core/txpool, eth/catalyst: clear transaction pool in Rollback (#30534)

This adds an API method `DropTransactions` to legacy pool, blob pool and
txpool interface. This method removes all txs currently tracked in the
pools.

It modifies the simulated beacon to use the new method in `Rollback`
which removes previous hacky implementation that also erroneously reset
the gas tip to 1 gwei.

---------

Co-authored-by: Felix Lange <fjl@twurst.com>
This commit is contained in:
jwasinger 2024-11-19 19:35:52 +07:00 committed by GitHub
parent 61ff3a1186
commit 581e2140f2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 102 additions and 8 deletions

View File

@ -1714,3 +1714,53 @@ func (p *BlobPool) Status(hash common.Hash) txpool.TxStatus {
}
return txpool.TxStatusUnknown
}
// Clear implements txpool.SubPool, removing all tracked transactions
// from the blob pool and persistent store.
func (p *BlobPool) Clear() {
p.lock.Lock()
defer p.lock.Unlock()
// manually iterating and deleting every entry is super sub-optimal
// However, Clear is not currently used in production so
// performance is not critical at the moment.
for hash := range p.lookup.txIndex {
id, _ := p.lookup.storeidOfTx(hash)
if err := p.store.Delete(id); err != nil {
log.Warn("failed to delete blob tx from backing store", "err", err)
}
}
for hash := range p.lookup.blobIndex {
id, _ := p.lookup.storeidOfBlob(hash)
if err := p.store.Delete(id); err != nil {
log.Warn("failed to delete blob from backing store", "err", err)
}
}
// unreserve each tracked account. Ideally, we could just clear the
// reservation map in the parent txpool context. However, if we clear in
// parent context, to avoid exposing the subpool lock, we have to lock the
// reservations and then lock each subpool.
//
// This creates the potential for a deadlock situation:
//
// * TxPool.Clear locks the reservations
// * a new transaction is received which locks the subpool mutex
// * TxPool.Clear attempts to lock subpool mutex
//
// The transaction addition may attempt to reserve the sender addr which
// can't happen until Clear releases the reservation lock. Clear cannot
// acquire the subpool lock until the transaction addition is completed.
for acct, _ := range p.index {
p.reserve(acct, false)
}
p.lookup = newLookup()
p.index = make(map[common.Address][]*blobTxMeta)
p.spent = make(map[common.Address]*uint256.Int)
var (
basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head))
blobfee = uint256.NewInt(params.BlobTxMinBlobGasprice)
)
p.evict = newPriceHeap(basefee, blobfee, p.index)
}

View File

@ -1961,3 +1961,44 @@ func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions {
func numSlots(tx *types.Transaction) int {
return int((tx.Size() + txSlotSize - 1) / txSlotSize)
}
// Clear implements txpool.SubPool, removing all tracked txs from the pool
// and rotating the journal.
func (pool *LegacyPool) Clear() {
pool.mu.Lock()
defer pool.mu.Unlock()
// unreserve each tracked account. Ideally, we could just clear the
// reservation map in the parent txpool context. However, if we clear in
// parent context, to avoid exposing the subpool lock, we have to lock the
// reservations and then lock each subpool.
//
// This creates the potential for a deadlock situation:
//
// * TxPool.Clear locks the reservations
// * a new transaction is received which locks the subpool mutex
// * TxPool.Clear attempts to lock subpool mutex
//
// The transaction addition may attempt to reserve the sender addr which
// can't happen until Clear releases the reservation lock. Clear cannot
// acquire the subpool lock until the transaction addition is completed.
for _, tx := range pool.all.remotes {
senderAddr, _ := types.Sender(pool.signer, tx)
pool.reserve(senderAddr, false)
}
for localSender, _ := range pool.locals.accounts {
pool.reserve(localSender, false)
}
pool.all = newLookup()
pool.priced = newPricedList(pool.all)
pool.pending = make(map[common.Address]*list)
pool.queue = make(map[common.Address]*list)
if !pool.config.NoLocals && pool.config.Journal != "" {
pool.journal = newTxJournal(pool.config.Journal)
if err := pool.journal.rotate(pool.local()); err != nil {
log.Warn("Failed to rotate transaction journal", "err", err)
}
}
}

View File

@ -168,4 +168,7 @@ type SubPool interface {
// Status returns the known status (unknown/pending/queued) of a transaction
// identified by their hashes.
Status(hash common.Hash) TxStatus
// Clear removes all tracked transactions from the pool
Clear()
}

View File

@ -497,3 +497,10 @@ func (p *TxPool) Sync() error {
return errors.New("pool already terminated")
}
}
// Clear removes all tracked txs from the subpools.
func (p *TxPool) Clear() {
for _, subpool := range p.subpools {
subpool.Clear()
}
}

View File

@ -21,7 +21,6 @@ import (
"crypto/sha256"
"errors"
"fmt"
"math/big"
"sync"
"time"
@ -34,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
)
@ -287,12 +285,7 @@ func (c *SimulatedBeacon) Commit() common.Hash {
// Rollback un-sends previously added transactions.
func (c *SimulatedBeacon) Rollback() {
// Flush all transactions from the transaction pools
maxUint256 := new(big.Int).Sub(new(big.Int).Lsh(common.Big1, 256), common.Big1)
c.eth.TxPool().SetGasTip(maxUint256)
// Set the gas tip back to accept new transactions
// TODO (Marius van der Wijden): set gas tip to parameter passed by config
c.eth.TxPool().SetGasTip(big.NewInt(params.GWei))
c.eth.TxPool().Clear()
}
// Fork sets the head to the provided hash.