Merge branch 'master' into local_pool_pt2

This commit is contained in:
Martin HS 2025-01-17 14:24:18 +01:00 committed by GitHub
commit a0283d1e12
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
85 changed files with 882 additions and 459 deletions

View File

@ -214,7 +214,9 @@ const (
// of starting any background processes such as automatic key derivation. // of starting any background processes such as automatic key derivation.
WalletOpened WalletOpened
// WalletDropped // WalletDropped is fired when a wallet is removed or disconnected, either via USB
// or due to a filesystem event in the keystore. This event indicates that the wallet
// is no longer available for operations.
WalletDropped WalletDropped
) )

View File

@ -13,6 +13,7 @@
// //
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package ethtest package ethtest
import ( import (

View File

@ -194,7 +194,7 @@ func PingExtraData(t *utesting.T) {
} }
} }
// This test sends a PING packet with additional data and wrong 'from' field // PingExtraDataWrongFrom sends a PING packet with additional data and wrong 'from' field
// and expects a PONG response. // and expects a PONG response.
func PingExtraDataWrongFrom(t *utesting.T) { func PingExtraDataWrongFrom(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2) te := newTestEnv(Remote, Listen1, Listen2)
@ -215,7 +215,7 @@ func PingExtraDataWrongFrom(t *utesting.T) {
} }
} }
// This test sends a PING packet with an expiration in the past. // PingPastExpiration sends a PING packet with an expiration in the past.
// The remote node should not respond. // The remote node should not respond.
func PingPastExpiration(t *utesting.T) { func PingPastExpiration(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2) te := newTestEnv(Remote, Listen1, Listen2)
@ -234,7 +234,7 @@ func PingPastExpiration(t *utesting.T) {
} }
} }
// This test sends an invalid packet. The remote node should not respond. // WrongPacketType sends an invalid packet. The remote node should not respond.
func WrongPacketType(t *utesting.T) { func WrongPacketType(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2) te := newTestEnv(Remote, Listen1, Listen2)
defer te.close() defer te.close()
@ -252,7 +252,7 @@ func WrongPacketType(t *utesting.T) {
} }
} }
// This test verifies that the default behaviour of ignoring 'from' fields is unaffected by // BondThenPingWithWrongFrom verifies that the default behaviour of ignoring 'from' fields is unaffected by
// the bonding process. After bonding, it pings the target with a different from endpoint. // the bonding process. After bonding, it pings the target with a different from endpoint.
func BondThenPingWithWrongFrom(t *utesting.T) { func BondThenPingWithWrongFrom(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2) te := newTestEnv(Remote, Listen1, Listen2)
@ -289,7 +289,7 @@ waitForPong:
} }
} }
// This test just sends FINDNODE. The remote node should not reply // FindnodeWithoutEndpointProof sends FINDNODE. The remote node should not reply
// because the endpoint proof has not completed. // because the endpoint proof has not completed.
func FindnodeWithoutEndpointProof(t *utesting.T) { func FindnodeWithoutEndpointProof(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2) te := newTestEnv(Remote, Listen1, Listen2)
@ -332,7 +332,7 @@ func BasicFindnode(t *utesting.T) {
} }
} }
// This test sends an unsolicited NEIGHBORS packet after the endpoint proof, then sends // UnsolicitedNeighbors sends an unsolicited NEIGHBORS packet after the endpoint proof, then sends
// FINDNODE to read the remote table. The remote node should not return the node contained // FINDNODE to read the remote table. The remote node should not return the node contained
// in the unsolicited NEIGHBORS packet. // in the unsolicited NEIGHBORS packet.
func UnsolicitedNeighbors(t *utesting.T) { func UnsolicitedNeighbors(t *utesting.T) {
@ -373,7 +373,7 @@ func UnsolicitedNeighbors(t *utesting.T) {
} }
} }
// This test sends FINDNODE with an expiration timestamp in the past. // FindnodePastExpiration sends FINDNODE with an expiration timestamp in the past.
// The remote node should not respond. // The remote node should not respond.
func FindnodePastExpiration(t *utesting.T) { func FindnodePastExpiration(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2) te := newTestEnv(Remote, Listen1, Listen2)
@ -426,7 +426,7 @@ func bond(t *utesting.T, te *testenv) {
} }
} }
// This test attempts to perform a traffic amplification attack against a // FindnodeAmplificationInvalidPongHash attempts to perform a traffic amplification attack against a
// 'victim' endpoint using FINDNODE. In this attack scenario, the attacker // 'victim' endpoint using FINDNODE. In this attack scenario, the attacker
// attempts to complete the endpoint proof non-interactively by sending a PONG // attempts to complete the endpoint proof non-interactively by sending a PONG
// with mismatching reply token from the 'victim' endpoint. The attack works if // with mismatching reply token from the 'victim' endpoint. The attack works if
@ -478,7 +478,7 @@ func FindnodeAmplificationInvalidPongHash(t *utesting.T) {
} }
} }
// This test attempts to perform a traffic amplification attack using FINDNODE. // FindnodeAmplificationWrongIP attempts to perform a traffic amplification attack using FINDNODE.
// The attack works if the remote node does not verify the IP address of FINDNODE // The attack works if the remote node does not verify the IP address of FINDNODE
// against the endpoint verification proof done by PING/PONG. // against the endpoint verification proof done by PING/PONG.
func FindnodeAmplificationWrongIP(t *utesting.T) { func FindnodeAmplificationWrongIP(t *utesting.T) {

View File

@ -379,7 +379,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
} }
// Commit block // Commit block
root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber)) root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber), chainConfig.IsCancun(vmContext.BlockNumber, vmContext.Time))
if err != nil { if err != nil {
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err)) return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err))
} }
@ -437,7 +437,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
} }
} }
// Commit and re-open to start with a clean state. // Commit and re-open to start with a clean state.
root, _ := statedb.Commit(0, false) root, _ := statedb.Commit(0, false, false)
statedb, _ = state.New(root, sdb) statedb, _ = state.New(root, sdb)
return statedb return statedb
} }

View File

@ -336,7 +336,7 @@ func runCmd(ctx *cli.Context) error {
output, stats, err := timedExec(bench, execFunc) output, stats, err := timedExec(bench, execFunc)
if ctx.Bool(DumpFlag.Name) { if ctx.Bool(DumpFlag.Name) {
root, err := runtimeConfig.State.Commit(genesisConfig.Number, true) root, err := runtimeConfig.State.Commit(genesisConfig.Number, true, false)
if err != nil { if err != nil {
fmt.Printf("Failed to commit changes %v\n", err) fmt.Printf("Failed to commit changes %v\n", err)
return err return err

View File

@ -230,11 +230,10 @@ func initGenesis(ctx *cli.Context) error {
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle()) triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
defer triedb.Close() defer triedb.Close()
_, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides) _, hash, _, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides)
if err != nil { if err != nil {
utils.Fatalf("Failed to write genesis block: %v", err) utils.Fatalf("Failed to write genesis block: %v", err)
} }
log.Info("Successfully wrote genesis state", "database", "chaindata", "hash", hash) log.Info("Successfully wrote genesis state", "database", "chaindata", "hash", hash)
return nil return nil

View File

@ -829,8 +829,7 @@ func inspectAccount(db *triedb.Database, start uint64, end uint64, address commo
func inspectStorage(db *triedb.Database, start uint64, end uint64, address common.Address, slot common.Hash, raw bool) error { func inspectStorage(db *triedb.Database, start uint64, end uint64, address common.Address, slot common.Hash, raw bool) error {
// The hash of storage slot key is utilized in the history // The hash of storage slot key is utilized in the history
// rather than the raw slot key, make the conversion. // rather than the raw slot key, make the conversion.
slotHash := crypto.Keccak256Hash(slot.Bytes()) stats, err := db.StorageHistory(address, slot, start, end)
stats, err := db.StorageHistory(address, slotHash, start, end)
if err != nil { if err != nil {
return err return err
} }

View File

@ -170,7 +170,7 @@ func TestHistoryImportAndExport(t *testing.T) {
db2.Close() db2.Close()
}) })
genesis.MustCommit(db2, triedb.NewDatabase(db, triedb.HashDefaults)) genesis.MustCommit(db2, triedb.NewDatabase(db2, triedb.HashDefaults))
imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil) imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil)
if err != nil { if err != nil {
t.Fatalf("unable to initialize chain: %v", err) t.Fatalf("unable to initialize chain: %v", err)

View File

@ -467,7 +467,6 @@ func (tt *cliqueTest) run(t *testing.T) {
for j := 0; j < len(batches)-1; j++ { for j := 0; j < len(batches)-1; j++ {
if k, err := chain.InsertChain(batches[j]); err != nil { if k, err := chain.InsertChain(batches[j]); err != nil {
t.Fatalf("failed to import batch %d, block %d: %v", j, k, err) t.Fatalf("failed to import batch %d, block %d: %v", j, k, err)
break
} }
} }
if _, err = chain.InsertChain(batches[len(batches)-1]); err != tt.failure { if _, err = chain.InsertChain(batches[len(batches)-1]); err != tt.failure {

View File

@ -269,14 +269,19 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
cacheConfig = defaultCacheConfig cacheConfig = defaultCacheConfig
} }
// Open trie database with provided config // Open trie database with provided config
triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(genesis != nil && genesis.IsVerkle())) enableVerkle, err := EnableVerkleAtGenesis(db, genesis)
if err != nil {
return nil, err
}
triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(enableVerkle))
// Setup the genesis block, commit the provided genesis specification // Write the supplied genesis to the database if it has not been initialized
// to database if the genesis block is not present yet, or load the // yet. The corresponding chain config will be returned, either from the
// stored one from database. // provided genesis or from the locally stored configuration if the genesis
chainConfig, genesisHash, genesisErr := SetupGenesisBlockWithOverride(db, triedb, genesis, overrides) // has already been initialized.
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { chainConfig, genesisHash, compatErr, err := SetupGenesisBlockWithOverride(db, triedb, genesis, overrides)
return nil, genesisErr if err != nil {
return nil, err
} }
log.Info("") log.Info("")
log.Info(strings.Repeat("-", 153)) log.Info(strings.Repeat("-", 153))
@ -303,7 +308,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
vmConfig: vmConfig, vmConfig: vmConfig,
logger: vmConfig.Tracer, logger: vmConfig.Tracer,
} }
var err error
bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped) bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped)
if err != nil { if err != nil {
return nil, err return nil, err
@ -453,16 +457,15 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
} }
// Rewind the chain in case of an incompatible config upgrade. // Rewind the chain in case of an incompatible config upgrade.
if compat, ok := genesisErr.(*params.ConfigCompatError); ok { if compatErr != nil {
log.Warn("Rewinding chain to upgrade configuration", "err", compat) log.Warn("Rewinding chain to upgrade configuration", "err", compatErr)
if compat.RewindToTime > 0 { if compatErr.RewindToTime > 0 {
bc.SetHeadWithTimestamp(compat.RewindToTime) bc.SetHeadWithTimestamp(compatErr.RewindToTime)
} else { } else {
bc.SetHead(compat.RewindToBlock) bc.SetHead(compatErr.RewindToBlock)
} }
rawdb.WriteChainConfig(db, genesisHash, chainConfig) rawdb.WriteChainConfig(db, genesisHash, chainConfig)
} }
// Start tx indexer if it's enabled. // Start tx indexer if it's enabled.
if txLookupLimit != nil { if txLookupLimit != nil {
bc.txIndexer = newTxIndexer(*txLookupLimit, bc) bc.txIndexer = newTxIndexer(*txLookupLimit, bc)
@ -1468,7 +1471,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
log.Crit("Failed to write block into disk", "err", err) log.Crit("Failed to write block into disk", "err", err)
} }
// Commit all cached state changes into underlying memory database. // Commit all cached state changes into underlying memory database.
root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number())) root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time()))
if err != nil { if err != nil {
return err return err
} }
@ -1616,7 +1619,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
return nil, 0, nil return nil, 0, nil
} }
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
SenderCacher.RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain) SenderCacher().RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain)
var ( var (
stats = insertStats{startTime: mclock.Now()} stats = insertStats{startTime: mclock.Now()}

View File

@ -181,7 +181,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
blockchain.chainmu.MustLock() blockchain.chainmu.MustLock()
rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))) rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)))
rawdb.WriteBlock(blockchain.db, block) rawdb.WriteBlock(blockchain.db, block)
statedb.Commit(block.NumberU64(), false) statedb.Commit(block.NumberU64(), false, false)
blockchain.chainmu.Unlock() blockchain.chainmu.Unlock()
} }
return nil return nil
@ -4265,12 +4265,11 @@ func TestEIP7702(t *testing.T) {
// 2. addr1:0xaaaa calls into addr2:0xbbbb // 2. addr1:0xaaaa calls into addr2:0xbbbb
// 3. addr2:0xbbbb writes to storage // 3. addr2:0xbbbb writes to storage
auth1, _ := types.SignSetCode(key1, types.SetCodeAuthorization{ auth1, _ := types.SignSetCode(key1, types.SetCodeAuthorization{
ChainID: gspec.Config.ChainID.Uint64(), ChainID: *uint256.MustFromBig(gspec.Config.ChainID),
Address: aa, Address: aa,
Nonce: 1, Nonce: 1,
}) })
auth2, _ := types.SignSetCode(key2, types.SetCodeAuthorization{ auth2, _ := types.SignSetCode(key2, types.SetCodeAuthorization{
ChainID: 0,
Address: bb, Address: bb,
Nonce: 0, Nonce: 0,
}) })
@ -4278,7 +4277,7 @@ func TestEIP7702(t *testing.T) {
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) { _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
b.SetCoinbase(aa) b.SetCoinbase(aa)
txdata := &types.SetCodeTx{ txdata := &types.SetCodeTx{
ChainID: gspec.Config.ChainID.Uint64(), ChainID: uint256.MustFromBig(gspec.Config.ChainID),
Nonce: 0, Nonce: 0,
To: addr1, To: addr1,
Gas: 500000, Gas: 500000,

View File

@ -405,7 +405,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
} }
// Write state changes to db // Write state changes to db
root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number)) root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), config.IsCancun(b.header.Number, b.header.Time))
if err != nil { if err != nil {
panic(fmt.Sprintf("state write error: %v", err)) panic(fmt.Sprintf("state write error: %v", err))
} }
@ -510,7 +510,7 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
} }
// Write state changes to DB. // Write state changes to DB.
root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number)) root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), config.IsCancun(b.header.Number, b.header.Time))
if err != nil { if err != nil {
panic(fmt.Sprintf("state write error: %v", err)) panic(fmt.Sprintf("state write error: %v", err))
} }

View File

@ -146,7 +146,7 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
statedb.SetState(addr, key, value) statedb.SetState(addr, key, value)
} }
} }
return statedb.Commit(0, false) return statedb.Commit(0, false, false)
} }
// flushAlloc is very similar with hash, but the main difference is all the // flushAlloc is very similar with hash, but the main difference is all the
@ -172,7 +172,7 @@ func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, e
statedb.SetState(addr, key, value) statedb.SetState(addr, key, value)
} }
} }
root, err := statedb.Commit(0, false) root, err := statedb.Commit(0, false, false)
if err != nil { if err != nil {
return common.Hash{}, err return common.Hash{}, err
} }
@ -247,6 +247,24 @@ type ChainOverrides struct {
OverrideVerkle *uint64 OverrideVerkle *uint64
} }
// apply applies the chain overrides on the supplied chain config.
func (o *ChainOverrides) apply(cfg *params.ChainConfig) (*params.ChainConfig, error) {
if o == nil || cfg == nil {
return cfg, nil
}
cpy := *cfg
if o.OverrideCancun != nil {
cpy.CancunTime = o.OverrideCancun
}
if o.OverrideVerkle != nil {
cpy.VerkleTime = o.OverrideVerkle
}
if err := cpy.CheckConfigForkOrder(); err != nil {
return nil, err
}
return &cpy, nil
}
// SetupGenesisBlock writes or updates the genesis block in db. // SetupGenesisBlock writes or updates the genesis block in db.
// The block that will be used is: // The block that will be used is:
// //
@ -258,109 +276,102 @@ type ChainOverrides struct {
// The stored chain configuration will be updated if it is compatible (i.e. does not // The stored chain configuration will be updated if it is compatible (i.e. does not
// specify a fork block below the local head block). In case of a conflict, the // specify a fork block below the local head block). In case of a conflict, the
// error is a *params.ConfigCompatError and the new, unwritten config is returned. // error is a *params.ConfigCompatError and the new, unwritten config is returned.
// func SetupGenesisBlock(db ethdb.Database, triedb *triedb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
// The returned chain configuration is never nil.
func SetupGenesisBlock(db ethdb.Database, triedb *triedb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
return SetupGenesisBlockWithOverride(db, triedb, genesis, nil) return SetupGenesisBlockWithOverride(db, triedb, genesis, nil)
} }
func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, error) { func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
// Sanitize the supplied genesis, ensuring it has the associated chain
// config attached.
if genesis != nil && genesis.Config == nil { if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig return nil, common.Hash{}, nil, errGenesisNoConfig
} }
applyOverrides := func(config *params.ChainConfig) { // Commit the genesis if the database is empty
if config != nil { ghash := rawdb.ReadCanonicalHash(db, 0)
if overrides != nil && overrides.OverrideCancun != nil { if (ghash == common.Hash{}) {
config.CancunTime = overrides.OverrideCancun
}
if overrides != nil && overrides.OverrideVerkle != nil {
config.VerkleTime = overrides.OverrideVerkle
}
}
}
// Just commit the new block if there is no stored genesis block.
stored := rawdb.ReadCanonicalHash(db, 0)
if (stored == common.Hash{}) {
if genesis == nil { if genesis == nil {
log.Info("Writing default main-net genesis block") log.Info("Writing default main-net genesis block")
genesis = DefaultGenesisBlock() genesis = DefaultGenesisBlock()
} else { } else {
log.Info("Writing custom genesis block") log.Info("Writing custom genesis block")
} }
chainCfg, err := overrides.apply(genesis.Config)
if err != nil {
return nil, common.Hash{}, nil, err
}
genesis.Config = chainCfg
applyOverrides(genesis.Config)
block, err := genesis.Commit(db, triedb) block, err := genesis.Commit(db, triedb)
if err != nil { if err != nil {
return genesis.Config, common.Hash{}, err return nil, common.Hash{}, nil, err
} }
return genesis.Config, block.Hash(), nil return chainCfg, block.Hash(), nil, nil
} }
// The genesis block is present(perhaps in ancient database) while the // Commit the genesis if the genesis block exists in the ancient database
// state database is not initialized yet. It can happen that the node // but the key-value database is empty without initializing the genesis
// is initialized with an external ancient store. Commit genesis state // fields. This scenario can occur when the node is created from scratch
// in this case. // with an existing ancient store.
header := rawdb.ReadHeader(db, stored, 0) storedCfg := rawdb.ReadChainConfig(db, ghash)
if header.Root != types.EmptyRootHash && !triedb.Initialized(header.Root) { if storedCfg == nil {
// Ensure the stored genesis block matches with the given genesis. Private
// networks must explicitly specify the genesis in the config file, mainnet
// genesis will be used as default and the initialization will always fail.
if genesis == nil { if genesis == nil {
log.Info("Writing default main-net genesis block")
genesis = DefaultGenesisBlock() genesis = DefaultGenesisBlock()
} else {
log.Info("Writing custom genesis block")
} }
applyOverrides(genesis.Config) chainCfg, err := overrides.apply(genesis.Config)
// Ensure the stored genesis matches with the given one. if err != nil {
hash := genesis.ToBlock().Hash() return nil, common.Hash{}, nil, err
if hash != stored { }
return genesis.Config, hash, &GenesisMismatchError{stored, hash} genesis.Config = chainCfg
if hash := genesis.ToBlock().Hash(); hash != ghash {
return nil, common.Hash{}, nil, &GenesisMismatchError{ghash, hash}
} }
block, err := genesis.Commit(db, triedb) block, err := genesis.Commit(db, triedb)
if err != nil { if err != nil {
return genesis.Config, hash, err return nil, common.Hash{}, nil, err
} }
return genesis.Config, block.Hash(), nil return chainCfg, block.Hash(), nil, nil
} }
// Check whether the genesis block is already written. // The genesis block has already been committed previously. Verify that the
// provided genesis with chain overrides matches the existing one, and update
// the stored chain config if necessary.
if genesis != nil { if genesis != nil {
applyOverrides(genesis.Config) chainCfg, err := overrides.apply(genesis.Config)
hash := genesis.ToBlock().Hash() if err != nil {
if hash != stored { return nil, common.Hash{}, nil, err
return genesis.Config, hash, &GenesisMismatchError{stored, hash}
} }
genesis.Config = chainCfg
if hash := genesis.ToBlock().Hash(); hash != ghash {
return nil, common.Hash{}, nil, &GenesisMismatchError{ghash, hash}
} }
// Get the existing chain configuration.
newcfg := genesis.configOrDefault(stored)
applyOverrides(newcfg)
if err := newcfg.CheckConfigForkOrder(); err != nil {
return newcfg, common.Hash{}, err
}
storedcfg := rawdb.ReadChainConfig(db, stored)
if storedcfg == nil {
log.Warn("Found genesis block without chain config")
rawdb.WriteChainConfig(db, stored, newcfg)
return newcfg, stored, nil
}
storedData, _ := json.Marshal(storedcfg)
// Special case: if a private network is being used (no genesis and also no
// mainnet hash in the database), we must not apply the `configOrDefault`
// chain config as that would be AllProtocolChanges (applying any new fork
// on top of an existing private network genesis block). In that case, only
// apply the overrides.
if genesis == nil && stored != params.MainnetGenesisHash {
newcfg = storedcfg
applyOverrides(newcfg)
} }
// Check config compatibility and write the config. Compatibility errors // Check config compatibility and write the config. Compatibility errors
// are returned to the caller unless we're already at block zero. // are returned to the caller unless we're already at block zero.
head := rawdb.ReadHeadHeader(db) head := rawdb.ReadHeadHeader(db)
if head == nil { if head == nil {
return newcfg, stored, errors.New("missing head header") return nil, common.Hash{}, nil, errors.New("missing head header")
} }
compatErr := storedcfg.CheckCompatible(newcfg, head.Number.Uint64(), head.Time) newCfg := genesis.chainConfigOrDefault(ghash, storedCfg)
// TODO(rjl493456442) better to define the comparator of chain config
// and short circuit if the chain config is not changed.
compatErr := storedCfg.CheckCompatible(newCfg, head.Number.Uint64(), head.Time)
if compatErr != nil && ((head.Number.Uint64() != 0 && compatErr.RewindToBlock != 0) || (head.Time != 0 && compatErr.RewindToTime != 0)) { if compatErr != nil && ((head.Number.Uint64() != 0 && compatErr.RewindToBlock != 0) || (head.Time != 0 && compatErr.RewindToTime != 0)) {
return newcfg, stored, compatErr return newCfg, ghash, compatErr, nil
} }
// Don't overwrite if the old is identical to the new // Don't overwrite if the old is identical to the new. It's useful
if newData, _ := json.Marshal(newcfg); !bytes.Equal(storedData, newData) { // for the scenarios that database is opened in the read-only mode.
rawdb.WriteChainConfig(db, stored, newcfg) storedData, _ := json.Marshal(storedCfg)
if newData, _ := json.Marshal(newCfg); !bytes.Equal(storedData, newData) {
rawdb.WriteChainConfig(db, ghash, newCfg)
} }
return newcfg, stored, nil return newCfg, ghash, nil, nil
} }
// LoadChainConfig loads the stored chain config if it is already present in // LoadChainConfig loads the stored chain config if it is already present in
@ -396,7 +407,10 @@ func LoadChainConfig(db ethdb.Database, genesis *Genesis) (*params.ChainConfig,
return params.MainnetChainConfig, nil return params.MainnetChainConfig, nil
} }
func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig { // chainConfigOrDefault retrieves the attached chain configuration. If the genesis
// object is null, it returns the default chain configuration based on the given
// genesis hash, or the locally stored config if it's not a pre-defined network.
func (g *Genesis) chainConfigOrDefault(ghash common.Hash, stored *params.ChainConfig) *params.ChainConfig {
switch { switch {
case g != nil: case g != nil:
return g.Config return g.Config
@ -407,14 +421,14 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
case ghash == params.SepoliaGenesisHash: case ghash == params.SepoliaGenesisHash:
return params.SepoliaChainConfig return params.SepoliaChainConfig
default: default:
return params.AllEthashProtocolChanges return stored
} }
} }
// IsVerkle indicates whether the state is already stored in a verkle // IsVerkle indicates whether the state is already stored in a verkle
// tree at genesis time. // tree at genesis time.
func (g *Genesis) IsVerkle() bool { func (g *Genesis) IsVerkle() bool {
return g.Config.IsVerkle(new(big.Int).SetUint64(g.Number), g.Timestamp) return g.Config.IsVerkleGenesis()
} }
// ToBlock returns the genesis block according to genesis specification. // ToBlock returns the genesis block according to genesis specification.
@ -494,7 +508,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Blo
} }
config := g.Config config := g.Config
if config == nil { if config == nil {
config = params.AllEthashProtocolChanges return nil, errors.New("invalid genesis without chain config")
} }
if err := config.CheckConfigForkOrder(); err != nil { if err := config.CheckConfigForkOrder(); err != nil {
return nil, err return nil, err
@ -514,16 +528,17 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Blo
if err != nil { if err != nil {
return nil, err return nil, err
} }
rawdb.WriteGenesisStateSpec(db, block.Hash(), blob) batch := db.NewBatch()
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty()) rawdb.WriteGenesisStateSpec(batch, block.Hash(), blob)
rawdb.WriteBlock(db, block) rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), block.Difficulty())
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil) rawdb.WriteBlock(batch, block)
rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), nil)
rawdb.WriteHeadBlockHash(db, block.Hash()) rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
rawdb.WriteHeadFastBlockHash(db, block.Hash()) rawdb.WriteHeadBlockHash(batch, block.Hash())
rawdb.WriteHeadHeaderHash(db, block.Hash()) rawdb.WriteHeadFastBlockHash(batch, block.Hash())
rawdb.WriteChainConfig(db, block.Hash(), config) rawdb.WriteHeadHeaderHash(batch, block.Hash())
return block, nil rawdb.WriteChainConfig(batch, block.Hash(), config)
return block, batch.Write()
} }
// MustCommit writes the genesis block and state to db, panicking on error. // MustCommit writes the genesis block and state to db, panicking on error.
@ -536,6 +551,29 @@ func (g *Genesis) MustCommit(db ethdb.Database, triedb *triedb.Database) *types.
return block return block
} }
// EnableVerkleAtGenesis indicates whether the verkle fork should be activated
// at genesis. This is a temporary solution only for verkle devnet testing, where
// verkle fork is activated at genesis, and the configured activation date has
// already passed.
//
// In production networks (mainnet and public testnets), verkle activation always
// occurs after the genesis block, making this function irrelevant in those cases.
func EnableVerkleAtGenesis(db ethdb.Database, genesis *Genesis) (bool, error) {
if genesis != nil {
if genesis.Config == nil {
return false, errGenesisNoConfig
}
return genesis.Config.EnableVerkleAtGenesis, nil
}
if ghash := rawdb.ReadCanonicalHash(db, 0); ghash != (common.Hash{}) {
chainCfg := rawdb.ReadChainConfig(db, ghash)
if chainCfg != nil {
return chainCfg.EnableVerkleAtGenesis, nil
}
}
return false, nil
}
// DefaultGenesisBlock returns the Ethereum main net genesis block. // DefaultGenesisBlock returns the Ethereum main net genesis block.
func DefaultGenesisBlock() *Genesis { func DefaultGenesisBlock() *Genesis {
return &Genesis{ return &Genesis{

View File

@ -55,22 +55,22 @@ func testSetupGenesis(t *testing.T, scheme string) {
tests := []struct { tests := []struct {
name string name string
fn func(ethdb.Database) (*params.ChainConfig, common.Hash, error) fn func(ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error)
wantConfig *params.ChainConfig wantConfig *params.ChainConfig
wantHash common.Hash wantHash common.Hash
wantErr error wantErr error
wantCompactErr *params.ConfigCompatError
}{ }{
{ {
name: "genesis without ChainConfig", name: "genesis without ChainConfig",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), new(Genesis)) return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), new(Genesis))
}, },
wantErr: errGenesisNoConfig, wantErr: errGenesisNoConfig,
wantConfig: params.AllEthashProtocolChanges,
}, },
{ {
name: "no block in DB, genesis == nil", name: "no block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil) return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil)
}, },
wantHash: params.MainnetGenesisHash, wantHash: params.MainnetGenesisHash,
@ -78,7 +78,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
}, },
{ {
name: "mainnet block in DB, genesis == nil", name: "mainnet block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
DefaultGenesisBlock().MustCommit(db, triedb.NewDatabase(db, newDbConfig(scheme))) DefaultGenesisBlock().MustCommit(db, triedb.NewDatabase(db, newDbConfig(scheme)))
return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil) return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil)
}, },
@ -87,7 +87,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
}, },
{ {
name: "custom block in DB, genesis == nil", name: "custom block in DB, genesis == nil",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
tdb := triedb.NewDatabase(db, newDbConfig(scheme)) tdb := triedb.NewDatabase(db, newDbConfig(scheme))
customg.Commit(db, tdb) customg.Commit(db, tdb)
return SetupGenesisBlock(db, tdb, nil) return SetupGenesisBlock(db, tdb, nil)
@ -97,18 +97,16 @@ func testSetupGenesis(t *testing.T, scheme string) {
}, },
{ {
name: "custom block in DB, genesis == sepolia", name: "custom block in DB, genesis == sepolia",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
tdb := triedb.NewDatabase(db, newDbConfig(scheme)) tdb := triedb.NewDatabase(db, newDbConfig(scheme))
customg.Commit(db, tdb) customg.Commit(db, tdb)
return SetupGenesisBlock(db, tdb, DefaultSepoliaGenesisBlock()) return SetupGenesisBlock(db, tdb, DefaultSepoliaGenesisBlock())
}, },
wantErr: &GenesisMismatchError{Stored: customghash, New: params.SepoliaGenesisHash}, wantErr: &GenesisMismatchError{Stored: customghash, New: params.SepoliaGenesisHash},
wantHash: params.SepoliaGenesisHash,
wantConfig: params.SepoliaChainConfig,
}, },
{ {
name: "compatible config in DB", name: "compatible config in DB",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
tdb := triedb.NewDatabase(db, newDbConfig(scheme)) tdb := triedb.NewDatabase(db, newDbConfig(scheme))
oldcustomg.Commit(db, tdb) oldcustomg.Commit(db, tdb)
return SetupGenesisBlock(db, tdb, &customg) return SetupGenesisBlock(db, tdb, &customg)
@ -118,7 +116,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
}, },
{ {
name: "incompatible config in DB", name: "incompatible config in DB",
fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
// Commit the 'old' genesis block with Homestead transition at #2. // Commit the 'old' genesis block with Homestead transition at #2.
// Advance to block #4, past the homestead transition block of customg. // Advance to block #4, past the homestead transition block of customg.
tdb := triedb.NewDatabase(db, newDbConfig(scheme)) tdb := triedb.NewDatabase(db, newDbConfig(scheme))
@ -135,7 +133,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
}, },
wantHash: customghash, wantHash: customghash,
wantConfig: customg.Config, wantConfig: customg.Config,
wantErr: &params.ConfigCompatError{ wantCompactErr: &params.ConfigCompatError{
What: "Homestead fork block", What: "Homestead fork block",
StoredBlock: big.NewInt(2), StoredBlock: big.NewInt(2),
NewBlock: big.NewInt(3), NewBlock: big.NewInt(3),
@ -146,12 +144,16 @@ func testSetupGenesis(t *testing.T, scheme string) {
for _, test := range tests { for _, test := range tests {
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
config, hash, err := test.fn(db) config, hash, compatErr, err := test.fn(db)
// Check the return values. // Check the return values.
if !reflect.DeepEqual(err, test.wantErr) { if !reflect.DeepEqual(err, test.wantErr) {
spew := spew.ConfigState{DisablePointerAddresses: true, DisableCapacities: true} spew := spew.ConfigState{DisablePointerAddresses: true, DisableCapacities: true}
t.Errorf("%s: returned error %#v, want %#v", test.name, spew.NewFormatter(err), spew.NewFormatter(test.wantErr)) t.Errorf("%s: returned error %#v, want %#v", test.name, spew.NewFormatter(err), spew.NewFormatter(test.wantErr))
} }
if !reflect.DeepEqual(compatErr, test.wantCompactErr) {
spew := spew.ConfigState{DisablePointerAddresses: true, DisableCapacities: true}
t.Errorf("%s: returned error %#v, want %#v", test.name, spew.NewFormatter(compatErr), spew.NewFormatter(test.wantCompactErr))
}
if !reflect.DeepEqual(config, test.wantConfig) { if !reflect.DeepEqual(config, test.wantConfig) {
t.Errorf("%s:\nreturned %v\nwant %v", test.name, config, test.wantConfig) t.Errorf("%s:\nreturned %v\nwant %v", test.name, config, test.wantConfig)
} }
@ -279,6 +281,7 @@ func TestVerkleGenesisCommit(t *testing.T) {
PragueTime: &verkleTime, PragueTime: &verkleTime,
VerkleTime: &verkleTime, VerkleTime: &verkleTime,
TerminalTotalDifficulty: big.NewInt(0), TerminalTotalDifficulty: big.NewInt(0),
EnableVerkleAtGenesis: true,
Ethash: nil, Ethash: nil,
Clique: nil, Clique: nil,
} }

View File

@ -18,12 +18,21 @@ package core
import ( import (
"runtime" "runtime"
"sync"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
) )
// SenderCacher is a concurrent transaction sender recoverer and cacher. // senderCacherOnce is used to ensure that the SenderCacher is initialized only once.
var SenderCacher = newTxSenderCacher(runtime.NumCPU()) var senderCacherOnce = sync.OnceValue(func() *txSenderCacher {
return newTxSenderCacher(runtime.NumCPU())
})
// SenderCacher returns the singleton instance of SenderCacher, initializing it if called for the first time.
// This function is thread-safe and ensures that initialization happens only once.
func SenderCacher() *txSenderCacher {
return senderCacherOnce()
}
// txSenderCacherRequest is a request for recovering transaction senders with a // txSenderCacherRequest is a request for recovering transaction senders with a
// specific signature scheme and caching it into the transactions themselves. // specific signature scheme and caching it into the transactions themselves.

View File

@ -399,10 +399,16 @@ func (s *stateObject) commitStorage(op *accountUpdate) {
op.storages = make(map[common.Hash][]byte) op.storages = make(map[common.Hash][]byte)
} }
op.storages[hash] = encode(val) op.storages[hash] = encode(val)
if op.storagesOrigin == nil {
op.storagesOrigin = make(map[common.Hash][]byte) if op.storagesOriginByKey == nil {
op.storagesOriginByKey = make(map[common.Hash][]byte)
} }
op.storagesOrigin[hash] = encode(s.originStorage[key]) if op.storagesOriginByHash == nil {
op.storagesOriginByHash = make(map[common.Hash][]byte)
}
origin := encode(s.originStorage[key])
op.storagesOriginByKey[key] = origin
op.storagesOriginByHash[hash] = origin
// Overwrite the clean value of storage slots // Overwrite the clean value of storage slots
s.originStorage[key] = val s.originStorage[key] = val

View File

@ -56,7 +56,7 @@ func TestDump(t *testing.T) {
// write some of them to the trie // write some of them to the trie
s.state.updateStateObject(obj1) s.state.updateStateObject(obj1)
s.state.updateStateObject(obj2) s.state.updateStateObject(obj2)
root, _ := s.state.Commit(0, false) root, _ := s.state.Commit(0, false, false)
// check that DumpToCollector contains the state objects that are in trie // check that DumpToCollector contains the state objects that are in trie
s.state, _ = New(root, tdb) s.state, _ = New(root, tdb)
@ -116,7 +116,7 @@ func TestIterativeDump(t *testing.T) {
// write some of them to the trie // write some of them to the trie
s.state.updateStateObject(obj1) s.state.updateStateObject(obj1)
s.state.updateStateObject(obj2) s.state.updateStateObject(obj2)
root, _ := s.state.Commit(0, false) root, _ := s.state.Commit(0, false, false)
s.state, _ = New(root, tdb) s.state, _ = New(root, tdb)
b := &bytes.Buffer{} b := &bytes.Buffer{}
@ -142,7 +142,7 @@ func TestNull(t *testing.T) {
var value common.Hash var value common.Hash
s.state.SetState(address, common.Hash{}, value) s.state.SetState(address, common.Hash{}, value)
s.state.Commit(0, false) s.state.Commit(0, false, false)
if value := s.state.GetState(address, common.Hash{}); value != (common.Hash{}) { if value := s.state.GetState(address, common.Hash{}); value != (common.Hash{}) {
t.Errorf("expected empty current value, got %x", value) t.Errorf("expected empty current value, got %x", value)

View File

@ -1051,7 +1051,7 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
// with their values be tracked as original value. // with their values be tracked as original value.
// In case (d), **original** account along with its storages should be deleted, // In case (d), **original** account along with its storages should be deleted,
// with their values be tracked as original value. // with their values be tracked as original value.
func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) { func (s *StateDB) handleDestruction(noStorageWiping bool) (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) {
var ( var (
nodes []*trienode.NodeSet nodes []*trienode.NodeSet
buf = crypto.NewKeccakState() buf = crypto.NewKeccakState()
@ -1080,6 +1080,9 @@ func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trieno
if prev.Root == types.EmptyRootHash || s.db.TrieDB().IsVerkle() { if prev.Root == types.EmptyRootHash || s.db.TrieDB().IsVerkle() {
continue continue
} }
if noStorageWiping {
return nil, nil, fmt.Errorf("unexpected storage wiping, %x", addr)
}
// Remove storage slots belonging to the account. // Remove storage slots belonging to the account.
storages, storagesOrigin, set, err := s.deleteStorage(addr, addrHash, prev.Root) storages, storagesOrigin, set, err := s.deleteStorage(addr, addrHash, prev.Root)
if err != nil { if err != nil {
@ -1101,7 +1104,7 @@ func (s *StateDB) GetTrie() Trie {
// commit gathers the state mutations accumulated along with the associated // commit gathers the state mutations accumulated along with the associated
// trie changes, resetting all internal flags with the new state as the base. // trie changes, resetting all internal flags with the new state as the base.
func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) { func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
// Short circuit in case any database failure occurred earlier. // Short circuit in case any database failure occurred earlier.
if s.dbErr != nil { if s.dbErr != nil {
return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
@ -1155,7 +1158,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
// the same block, account deletions must be processed first. This ensures // the same block, account deletions must be processed first. This ensures
// that the storage trie nodes deleted during destruction and recreated // that the storage trie nodes deleted during destruction and recreated
// during subsequent resurrection can be combined correctly. // during subsequent resurrection can be combined correctly.
deletes, delNodes, err := s.handleDestruction() deletes, delNodes, err := s.handleDestruction(noStorageWiping)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1252,13 +1255,14 @@ func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
origin := s.originalRoot origin := s.originalRoot
s.originalRoot = root s.originalRoot = root
return newStateUpdate(origin, root, deletes, updates, nodes), nil
return newStateUpdate(noStorageWiping, origin, root, deletes, updates, nodes), nil
} }
// commitAndFlush is a wrapper of commit which also commits the state mutations // commitAndFlush is a wrapper of commit which also commits the state mutations
// to the configured data stores. // to the configured data stores.
func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateUpdate, error) { func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
ret, err := s.commit(deleteEmptyObjects) ret, err := s.commit(deleteEmptyObjects, noStorageWiping)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1310,8 +1314,13 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateU
// //
// The associated block number of the state transition is also provided // The associated block number of the state transition is also provided
// for more chain context. // for more chain context.
func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, error) { //
ret, err := s.commitAndFlush(block, deleteEmptyObjects) // noStorageWiping is a flag indicating whether storage wiping is permitted.
// Since self-destruction was deprecated with the Cancun fork and there are
// no empty accounts left that could be deleted by EIP-158, storage wiping
// should not occur.
func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, error) {
ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping)
if err != nil { if err != nil {
return common.Hash{}, err return common.Hash{}, err
} }

View File

@ -228,7 +228,7 @@ func (test *stateTest) run() bool {
} else { } else {
state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
} }
ret, err := state.commitAndFlush(0, true) // call commit at the block boundary ret, err := state.commitAndFlush(0, true, false) // call commit at the block boundary
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -71,7 +71,7 @@ func TestBurn(t *testing.T) {
hooked.AddBalance(addC, uint256.NewInt(200), tracing.BalanceChangeUnspecified) hooked.AddBalance(addC, uint256.NewInt(200), tracing.BalanceChangeUnspecified)
hooked.Finalise(true) hooked.Finalise(true)
s.Commit(0, false) s.Commit(0, false, false)
if have, want := burned, uint256.NewInt(600); !have.Eq(want) { if have, want := burned, uint256.NewInt(600); !have.Eq(want) {
t.Fatalf("burn-count wrong, have %v want %v", have, want) t.Fatalf("burn-count wrong, have %v want %v", have, want)
} }

View File

@ -119,7 +119,7 @@ func TestIntermediateLeaks(t *testing.T) {
} }
// Commit and cross check the databases. // Commit and cross check the databases.
transRoot, err := transState.Commit(0, false) transRoot, err := transState.Commit(0, false, false)
if err != nil { if err != nil {
t.Fatalf("failed to commit transition state: %v", err) t.Fatalf("failed to commit transition state: %v", err)
} }
@ -127,7 +127,7 @@ func TestIntermediateLeaks(t *testing.T) {
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex()) t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
} }
finalRoot, err := finalState.Commit(0, false) finalRoot, err := finalState.Commit(0, false, false)
if err != nil { if err != nil {
t.Fatalf("failed to commit final state: %v", err) t.Fatalf("failed to commit final state: %v", err)
} }
@ -240,7 +240,7 @@ func TestCopyWithDirtyJournal(t *testing.T) {
obj.data.Root = common.HexToHash("0xdeadbeef") obj.data.Root = common.HexToHash("0xdeadbeef")
orig.updateStateObject(obj) orig.updateStateObject(obj)
} }
root, _ := orig.Commit(0, true) root, _ := orig.Commit(0, true, false)
orig, _ = New(root, db) orig, _ = New(root, db)
// modify all in memory without finalizing // modify all in memory without finalizing
@ -293,7 +293,7 @@ func TestCopyObjectState(t *testing.T) {
t.Fatalf("Error in test itself, the 'done' flag should not be set before Commit, have %v want %v", have, want) t.Fatalf("Error in test itself, the 'done' flag should not be set before Commit, have %v want %v", have, want)
} }
} }
orig.Commit(0, true) orig.Commit(0, true, false)
for _, op := range cpy.mutations { for _, op := range cpy.mutations {
if have, want := op.applied, false; have != want { if have, want := op.applied, false; have != want {
t.Fatalf("Error: original state affected copy, have %v want %v", have, want) t.Fatalf("Error: original state affected copy, have %v want %v", have, want)
@ -696,7 +696,7 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
func TestTouchDelete(t *testing.T) { func TestTouchDelete(t *testing.T) {
s := newStateEnv() s := newStateEnv()
s.state.getOrNewStateObject(common.Address{}) s.state.getOrNewStateObject(common.Address{})
root, _ := s.state.Commit(0, false) root, _ := s.state.Commit(0, false, false)
s.state, _ = New(root, s.state.db) s.state, _ = New(root, s.state.db)
snapshot := s.state.Snapshot() snapshot := s.state.Snapshot()
@ -784,7 +784,7 @@ func TestCopyCommitCopy(t *testing.T) {
t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval) t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval)
} }
// Commit state, ensure states can be loaded from disk // Commit state, ensure states can be loaded from disk
root, _ := state.Commit(0, false) root, _ := state.Commit(0, false, false)
state, _ = New(root, tdb) state, _ = New(root, tdb)
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 { if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42) t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42)
@ -898,11 +898,11 @@ func TestCommitCopy(t *testing.T) {
if val := state.GetCommittedState(addr, skey1); val != (common.Hash{}) { if val := state.GetCommittedState(addr, skey1); val != (common.Hash{}) {
t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{}) t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
} }
root, _ := state.Commit(0, true) root, _ := state.Commit(0, true, false)
state, _ = New(root, db) state, _ = New(root, db)
state.SetState(addr, skey2, sval2) state.SetState(addr, skey2, sval2)
state.Commit(1, true) state.Commit(1, true, false)
// Copy the committed state database, the copied one is not fully functional. // Copy the committed state database, the copied one is not fully functional.
copied := state.Copy() copied := state.Copy()
@ -943,7 +943,7 @@ func TestDeleteCreateRevert(t *testing.T) {
addr := common.BytesToAddress([]byte("so")) addr := common.BytesToAddress([]byte("so"))
state.SetBalance(addr, uint256.NewInt(1), tracing.BalanceChangeUnspecified) state.SetBalance(addr, uint256.NewInt(1), tracing.BalanceChangeUnspecified)
root, _ := state.Commit(0, false) root, _ := state.Commit(0, false, false)
state, _ = New(root, state.db) state, _ = New(root, state.db)
// Simulate self-destructing in one transaction, then create-reverting in another // Simulate self-destructing in one transaction, then create-reverting in another
@ -955,7 +955,7 @@ func TestDeleteCreateRevert(t *testing.T) {
state.RevertToSnapshot(id) state.RevertToSnapshot(id)
// Commit the entire state and make sure we don't crash and have the correct state // Commit the entire state and make sure we don't crash and have the correct state
root, _ = state.Commit(0, true) root, _ = state.Commit(0, true, false)
state, _ = New(root, state.db) state, _ = New(root, state.db)
if state.getStateObject(addr) != nil { if state.getStateObject(addr) != nil {
@ -998,7 +998,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
a2 := common.BytesToAddress([]byte("another")) a2 := common.BytesToAddress([]byte("another"))
state.SetBalance(a2, uint256.NewInt(100), tracing.BalanceChangeUnspecified) state.SetBalance(a2, uint256.NewInt(100), tracing.BalanceChangeUnspecified)
state.SetCode(a2, []byte{1, 2, 4}) state.SetCode(a2, []byte{1, 2, 4})
root, _ = state.Commit(0, false) root, _ = state.Commit(0, false, false)
t.Logf("root: %x", root) t.Logf("root: %x", root)
// force-flush // force-flush
tdb.Commit(root, false) tdb.Commit(root, false)
@ -1022,7 +1022,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
} }
// Modify the state // Modify the state
state.SetBalance(addr, uint256.NewInt(2), tracing.BalanceChangeUnspecified) state.SetBalance(addr, uint256.NewInt(2), tracing.BalanceChangeUnspecified)
root, err := state.Commit(0, false) root, err := state.Commit(0, false, false)
if err == nil { if err == nil {
t.Fatalf("expected error, got root :%x", root) t.Fatalf("expected error, got root :%x", root)
} }
@ -1213,7 +1213,7 @@ func TestFlushOrderDataLoss(t *testing.T) {
state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s}) state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s})
} }
} }
root, err := state.Commit(0, false) root, err := state.Commit(0, false, false)
if err != nil { if err != nil {
t.Fatalf("failed to commit state trie: %v", err) t.Fatalf("failed to commit state trie: %v", err)
} }
@ -1288,8 +1288,7 @@ func TestDeleteStorage(t *testing.T) {
value := common.Hash(uint256.NewInt(uint64(10 * i)).Bytes32()) value := common.Hash(uint256.NewInt(uint64(10 * i)).Bytes32())
state.SetState(addr, slot, value) state.SetState(addr, slot, value)
} }
root, _ := state.Commit(0, true) root, _ := state.Commit(0, true, false)
// Init phase done, create two states, one with snap and one without // Init phase done, create two states, one with snap and one without
fastState, _ := New(root, NewDatabase(tdb, snaps)) fastState, _ := New(root, NewDatabase(tdb, snaps))
slowState, _ := New(root, NewDatabase(tdb, nil)) slowState, _ := New(root, NewDatabase(tdb, nil))

View File

@ -34,8 +34,14 @@ type contractCode struct {
type accountDelete struct { type accountDelete struct {
address common.Address // address is the unique account identifier address common.Address // address is the unique account identifier
origin []byte // origin is the original value of account data in slim-RLP encoding. origin []byte // origin is the original value of account data in slim-RLP encoding.
storages map[common.Hash][]byte // storages stores mutated slots, the value should be nil.
storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format. // storages stores mutated slots, the value should be nil.
storages map[common.Hash][]byte
// storagesOrigin stores the original values of mutated slots in
// prefix-zero-trimmed RLP format. The map key refers to the **HASH**
// of the raw storage slot key.
storagesOrigin map[common.Hash][]byte
} }
// accountUpdate represents an operation for updating an Ethereum account. // accountUpdate represents an operation for updating an Ethereum account.
@ -45,7 +51,13 @@ type accountUpdate struct {
origin []byte // origin is the original value of account data in slim-RLP encoding. origin []byte // origin is the original value of account data in slim-RLP encoding.
code *contractCode // code represents mutated contract code; nil means it's not modified. code *contractCode // code represents mutated contract code; nil means it's not modified.
storages map[common.Hash][]byte // storages stores mutated slots in prefix-zero-trimmed RLP format. storages map[common.Hash][]byte // storages stores mutated slots in prefix-zero-trimmed RLP format.
storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format.
// storagesOriginByKey and storagesOriginByHash both store the original values
// of mutated slots in prefix-zero-trimmed RLP format. The difference is that
// storagesOriginByKey uses the **raw** storage slot key as the map ID, while
// storagesOriginByHash uses the **hash** of the storage slot key instead.
storagesOriginByKey map[common.Hash][]byte
storagesOriginByHash map[common.Hash][]byte
} }
// stateUpdate represents the difference between two states resulting from state // stateUpdate represents the difference between two states resulting from state
@ -56,8 +68,18 @@ type stateUpdate struct {
root common.Hash // hash of the state after applying mutation root common.Hash // hash of the state after applying mutation
accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding
accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding
storages map[common.Hash]map[common.Hash][]byte // storages stores mutated slots in 'prefix-zero-trimmed' RLP format
storagesOrigin map[common.Address]map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in 'prefix-zero-trimmed' RLP format // storages stores mutated slots in 'prefix-zero-trimmed' RLP format.
// The value is keyed by account hash and **storage slot key hash**.
storages map[common.Hash]map[common.Hash][]byte
// storagesOrigin stores the original values of mutated slots in
// 'prefix-zero-trimmed' RLP format.
// (a) the value is keyed by account hash and **storage slot key** if rawStorageKey is true;
// (b) the value is keyed by account hash and **storage slot key hash** if rawStorageKey is false;
storagesOrigin map[common.Address]map[common.Hash][]byte
rawStorageKey bool
codes map[common.Address]contractCode // codes contains the set of dirty codes codes map[common.Address]contractCode // codes contains the set of dirty codes
nodes *trienode.MergedNodeSet // Aggregated dirty nodes caused by state changes nodes *trienode.MergedNodeSet // Aggregated dirty nodes caused by state changes
} }
@ -67,10 +89,13 @@ func (sc *stateUpdate) empty() bool {
return sc.originRoot == sc.root return sc.originRoot == sc.root
} }
// newStateUpdate constructs a state update object, representing the differences // newStateUpdate constructs a state update object by identifying the differences
// between two states by performing state execution. It aggregates the given // between two states through state execution. It combines the specified account
// account deletions and account updates to form a comprehensive state update. // deletions and account updates to create a complete state update.
func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate { //
// rawStorageKey is a flag indicating whether to use the raw storage slot key or
// the hash of the slot key for constructing state update object.
func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
var ( var (
accounts = make(map[common.Hash][]byte) accounts = make(map[common.Hash][]byte)
accountsOrigin = make(map[common.Address][]byte) accountsOrigin = make(map[common.Address][]byte)
@ -78,13 +103,14 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
storagesOrigin = make(map[common.Address]map[common.Hash][]byte) storagesOrigin = make(map[common.Address]map[common.Hash][]byte)
codes = make(map[common.Address]contractCode) codes = make(map[common.Address]contractCode)
) )
// Due to the fact that some accounts could be destructed and resurrected // Since some accounts might be destroyed and recreated within the same
// within the same block, the deletions must be aggregated first. // block, deletions must be aggregated first.
for addrHash, op := range deletes { for addrHash, op := range deletes {
addr := op.address addr := op.address
accounts[addrHash] = nil accounts[addrHash] = nil
accountsOrigin[addr] = op.origin accountsOrigin[addr] = op.origin
// If storage wiping exists, the hash of the storage slot key must be used
if len(op.storages) > 0 { if len(op.storages) > 0 {
storages[addrHash] = op.storages storages[addrHash] = op.storages
} }
@ -118,12 +144,16 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
} }
// Aggregate the storage original values. If the slot is already present // Aggregate the storage original values. If the slot is already present
// in aggregated storagesOrigin set, skip it. // in aggregated storagesOrigin set, skip it.
if len(op.storagesOrigin) > 0 { storageOriginSet := op.storagesOriginByHash
if rawStorageKey {
storageOriginSet = op.storagesOriginByKey
}
if len(storageOriginSet) > 0 {
origin, exist := storagesOrigin[addr] origin, exist := storagesOrigin[addr]
if !exist { if !exist {
storagesOrigin[addr] = op.storagesOrigin storagesOrigin[addr] = storageOriginSet
} else { } else {
for key, slot := range op.storagesOrigin { for key, slot := range storageOriginSet {
if _, found := origin[key]; !found { if _, found := origin[key]; !found {
origin[key] = slot origin[key] = slot
} }
@ -138,6 +168,7 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
accountsOrigin: accountsOrigin, accountsOrigin: accountsOrigin,
storages: storages, storages: storages,
storagesOrigin: storagesOrigin, storagesOrigin: storagesOrigin,
rawStorageKey: rawStorageKey,
codes: codes, codes: codes,
nodes: nodes, nodes: nodes,
} }
@ -153,5 +184,6 @@ func (sc *stateUpdate) stateSet() *triedb.StateSet {
AccountsOrigin: sc.accountsOrigin, AccountsOrigin: sc.accountsOrigin,
Storages: sc.storages, Storages: sc.storages,
StoragesOrigin: sc.storagesOrigin, StoragesOrigin: sc.storagesOrigin,
RawStorageKey: sc.rawStorageKey,
} }
} }

View File

@ -79,7 +79,7 @@ func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, c
} }
accounts = append(accounts, acc) accounts = append(accounts, acc)
} }
root, _ := state.Commit(0, false) root, _ := state.Commit(0, false, false)
// Return the generated state // Return the generated state
return db, sdb, nodeDb, root, accounts return db, sdb, nodeDb, root, accounts

View File

@ -83,7 +83,7 @@ func TestVerklePrefetcher(t *testing.T) {
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
state.SetCode(addr, []byte("hello")) // Change an external metadata state.SetCode(addr, []byte("hello")) // Change an external metadata
state.SetState(addr, skey, sval) // Change the storage trie state.SetState(addr, skey, sval) // Change the storage trie
root, _ := state.Commit(0, true) root, _ := state.Commit(0, true, false)
state, _ = New(root, sdb) state, _ = New(root, sdb)
sRoot := state.GetStorageRoot(addr) sRoot := state.GetStorageRoot(addr)

View File

@ -529,8 +529,8 @@ func (st *stateTransition) execute() (*ExecutionResult, error) {
// validateAuthorization validates an EIP-7702 authorization against the state. // validateAuthorization validates an EIP-7702 authorization against the state.
func (st *stateTransition) validateAuthorization(auth *types.SetCodeAuthorization) (authority common.Address, err error) { func (st *stateTransition) validateAuthorization(auth *types.SetCodeAuthorization) (authority common.Address, err error) {
// Verify chain ID is 0 or equal to current chain ID. // Verify chain ID is null or equal to current chain ID.
if auth.ChainID != 0 && st.evm.ChainConfig().ChainID.Uint64() != auth.ChainID { if !auth.ChainID.IsZero() && auth.ChainID.CmpBig(st.evm.ChainConfig().ChainID) != 0 {
return authority, ErrAuthorizationWrongChainID return authority, ErrAuthorizationWrongChainID
} }
// Limit nonce to 2^64-1 per EIP-2681. // Limit nonce to 2^64-1 per EIP-2681.

View File

@ -9,6 +9,14 @@ All notable changes to the tracing interface will be documented in this file.
- `GasChangeReason` has been extended with the following reasons which will be enabled only post-Verkle. There shouldn't be any gas changes with those reasons prior to the fork. - `GasChangeReason` has been extended with the following reasons which will be enabled only post-Verkle. There shouldn't be any gas changes with those reasons prior to the fork.
- `GasChangeWitnessContractCollisionCheck` flags the event of adding to the witness when checking for contract address collision. - `GasChangeWitnessContractCollisionCheck` flags the event of adding to the witness when checking for contract address collision.
## [v1.14.12]
This release contains a change in behavior for `OnCodeChange` hook.
### `OnCodeChange` change
The `OnCodeChange` hook is now called when the code of a contract is removed due to a selfdestruct. Previously, no code change was emitted on such occasions.
## [v1.14.4] ## [v1.14.4]
This release contained only minor extensions to the tracing interface. This release contained only minor extensions to the tracing interface.

View File

@ -293,7 +293,7 @@ const (
GasChangeCallLeftOverRefunded GasChangeReason = 7 GasChangeCallLeftOverRefunded GasChangeReason = 7
// GasChangeCallContractCreation is the amount of gas that will be burned for a CREATE. // GasChangeCallContractCreation is the amount of gas that will be burned for a CREATE.
GasChangeCallContractCreation GasChangeReason = 8 GasChangeCallContractCreation GasChangeReason = 8
// GasChangeContractCreation is the amount of gas that will be burned for a CREATE2. // GasChangeCallContractCreation2 is the amount of gas that will be burned for a CREATE2.
GasChangeCallContractCreation2 GasChangeReason = 9 GasChangeCallContractCreation2 GasChangeReason = 9
// GasChangeCallCodeStorage is the amount of gas that will be charged for code storage. // GasChangeCallCodeStorage is the amount of gas that will be charged for code storage.
GasChangeCallCodeStorage GasChangeReason = 10 GasChangeCallCodeStorage GasChangeReason = 10

View File

@ -650,7 +650,7 @@ func TestOpenDrops(t *testing.T) {
statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), uint256.NewInt(10000000), tracing.BalanceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), uint256.NewInt(10000000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(crypto.PubkeyToAddress(duplicater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(duplicater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(crypto.PubkeyToAddress(repeater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) statedb.AddBalance(crypto.PubkeyToAddress(repeater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
statedb.Commit(0, true) statedb.Commit(0, true, false)
chain := &testBlockChain{ chain := &testBlockChain{
config: params.MainnetChainConfig, config: params.MainnetChainConfig,
@ -769,7 +769,7 @@ func TestOpenIndex(t *testing.T) {
// Create a blob pool out of the pre-seeded data // Create a blob pool out of the pre-seeded data
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting()) statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.Commit(0, true) statedb.Commit(0, true, false)
chain := &testBlockChain{ chain := &testBlockChain{
config: params.MainnetChainConfig, config: params.MainnetChainConfig,
@ -871,7 +871,7 @@ func TestOpenHeap(t *testing.T) {
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.Commit(0, true) statedb.Commit(0, true, false)
chain := &testBlockChain{ chain := &testBlockChain{
config: params.MainnetChainConfig, config: params.MainnetChainConfig,
@ -951,7 +951,7 @@ func TestOpenCap(t *testing.T) {
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.Commit(0, true) statedb.Commit(0, true, false)
chain := &testBlockChain{ chain := &testBlockChain{
config: params.MainnetChainConfig, config: params.MainnetChainConfig,
@ -1393,7 +1393,7 @@ func TestAdd(t *testing.T) {
store.Put(blob) store.Put(blob)
} }
} }
statedb.Commit(0, true) statedb.Commit(0, true, false)
store.Close() store.Close()
// Create a blob pool out of the pre-seeded dats // Create a blob pool out of the pre-seeded dats
@ -1519,7 +1519,7 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified) statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
pool.add(tx) pool.add(tx)
} }
statedb.Commit(0, true) statedb.Commit(0, true, false)
defer pool.Close() defer pool.Close()
// Benchmark assembling the pending // Benchmark assembling the pending

View File

@ -1313,7 +1313,7 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
// Inject any transactions discarded due to reorgs // Inject any transactions discarded due to reorgs
log.Debug("Reinjecting stale transactions", "count", len(reinject)) log.Debug("Reinjecting stale transactions", "count", len(reinject))
core.SenderCacher.Recover(pool.signer, reinject) core.SenderCacher().Recover(pool.signer, reinject)
pool.addTxsLocked(reinject) pool.addTxsLocked(reinject)
} }
@ -1758,4 +1758,5 @@ func (pool *LegacyPool) Clear() {
pool.priced = newPricedList(pool.all) pool.priced = newPricedList(pool.all)
pool.pending = make(map[common.Address]*list) pool.pending = make(map[common.Address]*list)
pool.queue = make(map[common.Address]*list) pool.queue = make(map[common.Address]*list)
pool.pendingNonces = newNoncer(pool.currentState)
} }

View File

@ -162,12 +162,12 @@ func TestTransactionZAttack(t *testing.T) {
var ivpendingNum int var ivpendingNum int
pendingtxs, _ := pool.Content() pendingtxs, _ := pool.Content()
for account, txs := range pendingtxs { for account, txs := range pendingtxs {
cur_balance := new(big.Int).Set(pool.currentState.GetBalance(account).ToBig()) curBalance := new(big.Int).Set(pool.currentState.GetBalance(account).ToBig())
for _, tx := range txs { for _, tx := range txs {
if cur_balance.Cmp(tx.Value()) <= 0 { if curBalance.Cmp(tx.Value()) <= 0 {
ivpendingNum++ ivpendingNum++
} else { } else {
cur_balance.Sub(cur_balance, tx.Value()) curBalance.Sub(curBalance, tx.Value())
} }
} }
} }

View File

@ -24,7 +24,7 @@ const (
depositRequestSize = 192 depositRequestSize = 192
) )
// UnpackIntoDeposit unpacks a serialized DepositEvent. // DepositLogToRequest unpacks a serialized DepositEvent.
func DepositLogToRequest(data []byte) ([]byte, error) { func DepositLogToRequest(data []byte) ([]byte, error) {
if len(data) != 576 { if len(data) != 576 {
return nil, fmt.Errorf("deposit wrong length: want 576, have %d", len(data)) return nil, fmt.Errorf("deposit wrong length: want 576, have %d", len(data))

View File

@ -16,7 +16,7 @@ var _ = (*authorizationMarshaling)(nil)
// MarshalJSON marshals as JSON. // MarshalJSON marshals as JSON.
func (s SetCodeAuthorization) MarshalJSON() ([]byte, error) { func (s SetCodeAuthorization) MarshalJSON() ([]byte, error) {
type SetCodeAuthorization struct { type SetCodeAuthorization struct {
ChainID hexutil.Uint64 `json:"chainId" gencodec:"required"` ChainID hexutil.U256 `json:"chainId" gencodec:"required"`
Address common.Address `json:"address" gencodec:"required"` Address common.Address `json:"address" gencodec:"required"`
Nonce hexutil.Uint64 `json:"nonce" gencodec:"required"` Nonce hexutil.Uint64 `json:"nonce" gencodec:"required"`
V hexutil.Uint64 `json:"yParity" gencodec:"required"` V hexutil.Uint64 `json:"yParity" gencodec:"required"`
@ -24,7 +24,7 @@ func (s SetCodeAuthorization) MarshalJSON() ([]byte, error) {
S hexutil.U256 `json:"s" gencodec:"required"` S hexutil.U256 `json:"s" gencodec:"required"`
} }
var enc SetCodeAuthorization var enc SetCodeAuthorization
enc.ChainID = hexutil.Uint64(s.ChainID) enc.ChainID = hexutil.U256(s.ChainID)
enc.Address = s.Address enc.Address = s.Address
enc.Nonce = hexutil.Uint64(s.Nonce) enc.Nonce = hexutil.Uint64(s.Nonce)
enc.V = hexutil.Uint64(s.V) enc.V = hexutil.Uint64(s.V)
@ -36,7 +36,7 @@ func (s SetCodeAuthorization) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON. // UnmarshalJSON unmarshals from JSON.
func (s *SetCodeAuthorization) UnmarshalJSON(input []byte) error { func (s *SetCodeAuthorization) UnmarshalJSON(input []byte) error {
type SetCodeAuthorization struct { type SetCodeAuthorization struct {
ChainID *hexutil.Uint64 `json:"chainId" gencodec:"required"` ChainID *hexutil.U256 `json:"chainId" gencodec:"required"`
Address *common.Address `json:"address" gencodec:"required"` Address *common.Address `json:"address" gencodec:"required"`
Nonce *hexutil.Uint64 `json:"nonce" gencodec:"required"` Nonce *hexutil.Uint64 `json:"nonce" gencodec:"required"`
V *hexutil.Uint64 `json:"yParity" gencodec:"required"` V *hexutil.Uint64 `json:"yParity" gencodec:"required"`
@ -50,7 +50,7 @@ func (s *SetCodeAuthorization) UnmarshalJSON(input []byte) error {
if dec.ChainID == nil { if dec.ChainID == nil {
return errors.New("missing required field 'chainId' for SetCodeAuthorization") return errors.New("missing required field 'chainId' for SetCodeAuthorization")
} }
s.ChainID = uint64(*dec.ChainID) s.ChainID = uint256.Int(*dec.ChainID)
if dec.Address == nil { if dec.Address == nil {
return errors.New("missing required field 'address' for SetCodeAuthorization") return errors.New("missing required field 'address' for SetCodeAuthorization")
} }

View File

@ -155,7 +155,7 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
enc.Proofs = itx.Sidecar.Proofs enc.Proofs = itx.Sidecar.Proofs
} }
case *SetCodeTx: case *SetCodeTx:
enc.ChainID = (*hexutil.Big)(new(big.Int).SetUint64(itx.ChainID)) enc.ChainID = (*hexutil.Big)(itx.ChainID.ToBig())
enc.Nonce = (*hexutil.Uint64)(&itx.Nonce) enc.Nonce = (*hexutil.Uint64)(&itx.Nonce)
enc.To = tx.To() enc.To = tx.To()
enc.Gas = (*hexutil.Uint64)(&itx.Gas) enc.Gas = (*hexutil.Uint64)(&itx.Gas)
@ -353,7 +353,11 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
if dec.ChainID == nil { if dec.ChainID == nil {
return errors.New("missing required field 'chainId' in transaction") return errors.New("missing required field 'chainId' in transaction")
} }
itx.ChainID = uint256.MustFromBig((*big.Int)(dec.ChainID)) var overflow bool
itx.ChainID, overflow = uint256.FromBig(dec.ChainID.ToInt())
if overflow {
return errors.New("'chainId' value overflows uint256")
}
if dec.Nonce == nil { if dec.Nonce == nil {
return errors.New("missing required field 'nonce' in transaction") return errors.New("missing required field 'nonce' in transaction")
} }
@ -395,7 +399,6 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
itx.BlobHashes = dec.BlobVersionedHashes itx.BlobHashes = dec.BlobVersionedHashes
// signature R // signature R
var overflow bool
if dec.R == nil { if dec.R == nil {
return errors.New("missing required field 'r' in transaction") return errors.New("missing required field 'r' in transaction")
} }
@ -432,7 +435,11 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
if dec.ChainID == nil { if dec.ChainID == nil {
return errors.New("missing required field 'chainId' in transaction") return errors.New("missing required field 'chainId' in transaction")
} }
itx.ChainID = dec.ChainID.ToInt().Uint64() var overflow bool
itx.ChainID, overflow = uint256.FromBig(dec.ChainID.ToInt())
if overflow {
return errors.New("'chainId' value overflows uint256")
}
if dec.Nonce == nil { if dec.Nonce == nil {
return errors.New("missing required field 'nonce' in transaction") return errors.New("missing required field 'nonce' in transaction")
} }
@ -470,7 +477,6 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
itx.AuthList = dec.AuthorizationList itx.AuthList = dec.AuthorizationList
// signature R // signature R
var overflow bool
if dec.R == nil { if dec.R == nil {
return errors.New("missing required field 'r' in transaction") return errors.New("missing required field 'r' in transaction")
} }

View File

@ -219,7 +219,7 @@ func (s pragueSigner) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big
} }
// Check that chain ID of tx matches the signer. We also accept ID zero here, // Check that chain ID of tx matches the signer. We also accept ID zero here,
// because it indicates that the chain ID was not specified in the tx. // because it indicates that the chain ID was not specified in the tx.
if txdata.ChainID != 0 && new(big.Int).SetUint64(txdata.ChainID).Cmp(s.chainId) != 0 { if txdata.ChainID != nil && txdata.ChainID.CmpBig(s.chainId) != 0 {
return nil, nil, nil, fmt.Errorf("%w: have %d want %d", ErrInvalidChainId, txdata.ChainID, s.chainId) return nil, nil, nil, fmt.Errorf("%w: have %d want %d", ErrInvalidChainId, txdata.ChainID, s.chainId)
} }
R, S, _ = decodeSignature(sig) R, S, _ = decodeSignature(sig)

View File

@ -47,9 +47,9 @@ type BlobTx struct {
Sidecar *BlobTxSidecar `rlp:"-"` Sidecar *BlobTxSidecar `rlp:"-"`
// Signature values // Signature values
V *uint256.Int `json:"v" gencodec:"required"` V *uint256.Int
R *uint256.Int `json:"r" gencodec:"required"` R *uint256.Int
S *uint256.Int `json:"s" gencodec:"required"` S *uint256.Int
} }
// BlobTxSidecar contains the blobs of a blob transaction. // BlobTxSidecar contains the blobs of a blob transaction.

View File

@ -37,9 +37,9 @@ type DynamicFeeTx struct {
AccessList AccessList AccessList AccessList
// Signature values // Signature values
V *big.Int `json:"v" gencodec:"required"` V *big.Int
R *big.Int `json:"r" gencodec:"required"` R *big.Int
S *big.Int `json:"s" gencodec:"required"` S *big.Int
} }
// copy creates a deep copy of the transaction data and initializes all fields. // copy creates a deep copy of the transaction data and initializes all fields.

View File

@ -49,7 +49,7 @@ func AddressToDelegation(addr common.Address) []byte {
// SetCodeTx implements the EIP-7702 transaction type which temporarily installs // SetCodeTx implements the EIP-7702 transaction type which temporarily installs
// the code at the signer's address. // the code at the signer's address.
type SetCodeTx struct { type SetCodeTx struct {
ChainID uint64 ChainID *uint256.Int
Nonce uint64 Nonce uint64
GasTipCap *uint256.Int // a.k.a. maxPriorityFeePerGas GasTipCap *uint256.Int // a.k.a. maxPriorityFeePerGas
GasFeeCap *uint256.Int // a.k.a. maxFeePerGas GasFeeCap *uint256.Int // a.k.a. maxFeePerGas
@ -61,16 +61,16 @@ type SetCodeTx struct {
AuthList []SetCodeAuthorization AuthList []SetCodeAuthorization
// Signature values // Signature values
V *uint256.Int `json:"v" gencodec:"required"` V *uint256.Int
R *uint256.Int `json:"r" gencodec:"required"` R *uint256.Int
S *uint256.Int `json:"s" gencodec:"required"` S *uint256.Int
} }
//go:generate go run github.com/fjl/gencodec -type SetCodeAuthorization -field-override authorizationMarshaling -out gen_authorization.go //go:generate go run github.com/fjl/gencodec -type SetCodeAuthorization -field-override authorizationMarshaling -out gen_authorization.go
// SetCodeAuthorization is an authorization from an account to deploy code at its address. // SetCodeAuthorization is an authorization from an account to deploy code at its address.
type SetCodeAuthorization struct { type SetCodeAuthorization struct {
ChainID uint64 `json:"chainId" gencodec:"required"` ChainID uint256.Int `json:"chainId" gencodec:"required"`
Address common.Address `json:"address" gencodec:"required"` Address common.Address `json:"address" gencodec:"required"`
Nonce uint64 `json:"nonce" gencodec:"required"` Nonce uint64 `json:"nonce" gencodec:"required"`
V uint8 `json:"yParity" gencodec:"required"` V uint8 `json:"yParity" gencodec:"required"`
@ -80,7 +80,7 @@ type SetCodeAuthorization struct {
// field type overrides for gencodec // field type overrides for gencodec
type authorizationMarshaling struct { type authorizationMarshaling struct {
ChainID hexutil.Uint64 ChainID hexutil.U256
Nonce hexutil.Uint64 Nonce hexutil.Uint64
V hexutil.Uint64 V hexutil.Uint64
R hexutil.U256 R hexutil.U256
@ -180,7 +180,7 @@ func (tx *SetCodeTx) copy() TxData {
// accessors for innerTx. // accessors for innerTx.
func (tx *SetCodeTx) txType() byte { return SetCodeTxType } func (tx *SetCodeTx) txType() byte { return SetCodeTxType }
func (tx *SetCodeTx) chainID() *big.Int { return big.NewInt(int64(tx.ChainID)) } func (tx *SetCodeTx) chainID() *big.Int { return tx.ChainID.ToBig() }
func (tx *SetCodeTx) accessList() AccessList { return tx.AccessList } func (tx *SetCodeTx) accessList() AccessList { return tx.AccessList }
func (tx *SetCodeTx) data() []byte { return tx.Data } func (tx *SetCodeTx) data() []byte { return tx.Data }
func (tx *SetCodeTx) gas() uint64 { return tx.Gas } func (tx *SetCodeTx) gas() uint64 { return tx.Gas }
@ -207,7 +207,7 @@ func (tx *SetCodeTx) rawSignatureValues() (v, r, s *big.Int) {
} }
func (tx *SetCodeTx) setSignatureValues(chainID, v, r, s *big.Int) { func (tx *SetCodeTx) setSignatureValues(chainID, v, r, s *big.Int) {
tx.ChainID = chainID.Uint64() tx.ChainID = uint256.MustFromBig(chainID)
tx.V.SetFromBig(v) tx.V.SetFromBig(v)
tx.R.SetFromBig(r) tx.R.SetFromBig(r)
tx.S.SetFromBig(s) tx.S.SetFromBig(s)

View File

@ -57,6 +57,7 @@ var (
ShanghaiTime: u64(0), ShanghaiTime: u64(0),
VerkleTime: u64(0), VerkleTime: u64(0),
TerminalTotalDifficulty: common.Big0, TerminalTotalDifficulty: common.Big0,
EnableVerkleAtGenesis: true,
// TODO uncomment when proof generation is merged // TODO uncomment when proof generation is merged
// ProofInBlocks: true, // ProofInBlocks: true,
} }
@ -77,6 +78,7 @@ var (
ShanghaiTime: u64(0), ShanghaiTime: u64(0),
VerkleTime: u64(0), VerkleTime: u64(0),
TerminalTotalDifficulty: common.Big0, TerminalTotalDifficulty: common.Big0,
EnableVerkleAtGenesis: true,
} }
) )

View File

@ -109,8 +109,8 @@ func validateCode(code []byte, section int, container *Container, jt *JumpTable,
return nil, err return nil, err
} }
case RJUMPV: case RJUMPV:
max_size := int(code[i+1]) maxSize := int(code[i+1])
length := max_size + 1 length := maxSize + 1
if len(code) <= i+length { if len(code) <= i+length {
return nil, fmt.Errorf("%w: jump table truncated, op %s, pos %d", errTruncatedImmediate, op, i) return nil, fmt.Errorf("%w: jump table truncated, op %s, pos %d", errTruncatedImmediate, op, i)
} }
@ -120,7 +120,7 @@ func validateCode(code []byte, section int, container *Container, jt *JumpTable,
return nil, err return nil, err
} }
} }
i += 2 * max_size i += 2 * maxSize
case CALLF: case CALLF:
arg, _ := parseUint16(code[i+1:]) arg, _ := parseUint16(code[i+1:])
if arg >= len(container.types) { if arg >= len(container.types) {

View File

@ -19,6 +19,7 @@
// - There are not package guarantees. We might iterate heavily on this package, and do backwards-incompatible changes without warning // - There are not package guarantees. We might iterate heavily on this package, and do backwards-incompatible changes without warning
// - There are no quality-guarantees. These utilities may produce evm-code that is non-functional. YMMV. // - There are no quality-guarantees. These utilities may produce evm-code that is non-functional. YMMV.
// - There are no stability-guarantees. The utility will `panic` if the inputs do not align / make sense. // - There are no stability-guarantees. The utility will `panic` if the inputs do not align / make sense.
package program package program
import ( import (
@ -204,7 +205,7 @@ func (p *Program) StaticCall(gas *uint256.Int, address, inOffset, inSize, outOff
return p.Op(vm.STATICCALL) return p.Op(vm.STATICCALL)
} }
// StaticCall is a convenience function to make a callcode. If 'gas' is nil, the opcode GAS will // CallCode is a convenience function to make a callcode. If 'gas' is nil, the opcode GAS will
// be used to provide all gas. // be used to provide all gas.
func (p *Program) CallCode(gas *uint256.Int, address, value, inOffset, inSize, outOffset, outSize any) *Program { func (p *Program) CallCode(gas *uint256.Int, address, value, inOffset, inSize, outOffset, outSize any) *Program {
if outOffset == outSize && inSize == outSize && inOffset == outSize { if outOffset == outSize && inSize == outSize && inOffset == outSize {
@ -263,7 +264,7 @@ func (p *Program) InputAddressToStack(inputOffset uint32) *Program {
return p.Op(vm.AND) return p.Op(vm.AND)
} }
// MStore stores the provided data (into the memory area starting at memStart). // Mstore stores the provided data (into the memory area starting at memStart).
func (p *Program) Mstore(data []byte, memStart uint32) *Program { func (p *Program) Mstore(data []byte, memStart uint32) *Program {
var idx = 0 var idx = 0
// We need to store it in chunks of 32 bytes // We need to store it in chunks of 32 bytes

View File

@ -23,13 +23,13 @@ import (
) )
const ( const (
// The blocksize of BLAKE2b in bytes. // BlockSize the blocksize of BLAKE2b in bytes.
BlockSize = 128 BlockSize = 128
// The hash size of BLAKE2b-512 in bytes. // Size the hash size of BLAKE2b-512 in bytes.
Size = 64 Size = 64
// The hash size of BLAKE2b-384 in bytes. // Size384 the hash size of BLAKE2b-384 in bytes.
Size384 = 48 Size384 = 48
// The hash size of BLAKE2b-256 in bytes. // Size256 the hash size of BLAKE2b-256 in bytes.
Size256 = 32 Size256 = 32
) )

View File

@ -4,7 +4,7 @@ import (
"github.com/consensys/gnark-crypto/ecc/bn254" "github.com/consensys/gnark-crypto/ecc/bn254"
) )
// Computes the following relation: ∏ᵢ e(Pᵢ, Qᵢ) =? 1 // PairingCheck computes the following relation: ∏ᵢ e(Pᵢ, Qᵢ) =? 1
// //
// To explain why gnark returns a (bool, error): // To explain why gnark returns a (bool, error):
// //

View File

@ -60,12 +60,12 @@ type PublicKey struct {
Params *ECIESParams Params *ECIESParams
} }
// Export an ECIES public key as an ECDSA public key. // ExportECDSA exports an ECIES public key as an ECDSA public key.
func (pub *PublicKey) ExportECDSA() *ecdsa.PublicKey { func (pub *PublicKey) ExportECDSA() *ecdsa.PublicKey {
return &ecdsa.PublicKey{Curve: pub.Curve, X: pub.X, Y: pub.Y} return &ecdsa.PublicKey{Curve: pub.Curve, X: pub.X, Y: pub.Y}
} }
// Import an ECDSA public key as an ECIES public key. // ImportECDSAPublic imports an ECDSA public key as an ECIES public key.
func ImportECDSAPublic(pub *ecdsa.PublicKey) *PublicKey { func ImportECDSAPublic(pub *ecdsa.PublicKey) *PublicKey {
return &PublicKey{ return &PublicKey{
X: pub.X, X: pub.X,
@ -81,20 +81,20 @@ type PrivateKey struct {
D *big.Int D *big.Int
} }
// Export an ECIES private key as an ECDSA private key. // ExportECDSA exports an ECIES private key as an ECDSA private key.
func (prv *PrivateKey) ExportECDSA() *ecdsa.PrivateKey { func (prv *PrivateKey) ExportECDSA() *ecdsa.PrivateKey {
pub := &prv.PublicKey pub := &prv.PublicKey
pubECDSA := pub.ExportECDSA() pubECDSA := pub.ExportECDSA()
return &ecdsa.PrivateKey{PublicKey: *pubECDSA, D: prv.D} return &ecdsa.PrivateKey{PublicKey: *pubECDSA, D: prv.D}
} }
// Import an ECDSA private key as an ECIES private key. // ImportECDSA imports an ECDSA private key as an ECIES private key.
func ImportECDSA(prv *ecdsa.PrivateKey) *PrivateKey { func ImportECDSA(prv *ecdsa.PrivateKey) *PrivateKey {
pub := ImportECDSAPublic(&prv.PublicKey) pub := ImportECDSAPublic(&prv.PublicKey)
return &PrivateKey{*pub, prv.D} return &PrivateKey{*pub, prv.D}
} }
// Generate an elliptic curve public / private keypair. If params is nil, // GenerateKey generates an elliptic curve public / private keypair. If params is nil,
// the recommended default parameters for the key will be chosen. // the recommended default parameters for the key will be chosen.
func GenerateKey(rand io.Reader, curve elliptic.Curve, params *ECIESParams) (prv *PrivateKey, err error) { func GenerateKey(rand io.Reader, curve elliptic.Curve, params *ECIESParams) (prv *PrivateKey, err error) {
sk, err := ecdsa.GenerateKey(curve, rand) sk, err := ecdsa.GenerateKey(curve, rand)
@ -119,7 +119,7 @@ func MaxSharedKeyLength(pub *PublicKey) int {
return (pub.Curve.Params().BitSize + 7) / 8 return (pub.Curve.Params().BitSize + 7) / 8
} }
// ECDH key agreement method used to establish secret keys for encryption. // GenerateShared ECDH key agreement method used to establish secret keys for encryption.
func (prv *PrivateKey) GenerateShared(pub *PublicKey, skLen, macLen int) (sk []byte, err error) { func (prv *PrivateKey) GenerateShared(pub *PublicKey, skLen, macLen int) (sk []byte, err error) {
if prv.PublicKey.Curve != pub.Curve { if prv.PublicKey.Curve != pub.Curve {
return nil, ErrInvalidCurve return nil, ErrInvalidCurve

View File

@ -82,7 +82,7 @@ func TestAccountRange(t *testing.T) {
m[addr] = true m[addr] = true
} }
} }
root, _ := sdb.Commit(0, true) root, _ := sdb.Commit(0, true, false)
sdb, _ = state.New(root, statedb) sdb, _ = state.New(root, statedb)
trie, err := statedb.OpenTrie(root) trie, err := statedb.OpenTrie(root)
@ -140,7 +140,7 @@ func TestEmptyAccountRange(t *testing.T) {
st, _ = state.New(types.EmptyRootHash, statedb) st, _ = state.New(types.EmptyRootHash, statedb)
) )
// Commit(although nothing to flush) and re-init the statedb // Commit(although nothing to flush) and re-init the statedb
st.Commit(0, true) st.Commit(0, true, false)
st, _ = state.New(types.EmptyRootHash, statedb) st, _ = state.New(types.EmptyRootHash, statedb)
results := st.RawDump(&state.DumpConfig{ results := st.RawDump(&state.DumpConfig{
@ -183,7 +183,7 @@ func TestStorageRangeAt(t *testing.T) {
for _, entry := range storage { for _, entry := range storage {
sdb.SetState(addr, *entry.Key, entry.Value) sdb.SetState(addr, *entry.Key, entry.Value)
} }
root, _ := sdb.Commit(0, false) root, _ := sdb.Commit(0, false, false)
sdb, _ = state.New(root, db) sdb, _ = state.New(root, db)
// Check a few combinations of limit and start/end. // Check a few combinations of limit and start/end.

View File

@ -638,6 +638,9 @@ func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHas
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV4 must only be called for prague payloads")) return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV4 must only be called for prague payloads"))
} }
requests := convertRequests(executionRequests) requests := convertRequests(executionRequests)
if err := validateRequests(requests); err != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(err)
}
return api.newPayload(params, versionedHashes, beaconRoot, requests, false) return api.newPayload(params, versionedHashes, beaconRoot, requests, false)
} }
@ -727,6 +730,9 @@ func (api *ConsensusAPI) NewPayloadWithWitnessV4(params engine.ExecutableData, v
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadWithWitnessV4 must only be called for prague payloads")) return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadWithWitnessV4 must only be called for prague payloads"))
} }
requests := convertRequests(executionRequests) requests := convertRequests(executionRequests)
if err := validateRequests(requests); err != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(err)
}
return api.newPayload(params, versionedHashes, beaconRoot, requests, true) return api.newPayload(params, versionedHashes, beaconRoot, requests, true)
} }
@ -1287,3 +1293,20 @@ func convertRequests(hex []hexutil.Bytes) [][]byte {
} }
return req return req
} }
// validateRequests checks that requests are ordered by their type and are not empty.
func validateRequests(requests [][]byte) error {
var last byte
for _, req := range requests {
// No empty requests.
if len(req) < 2 {
return fmt.Errorf("empty request: %v", req)
}
// Check that requests are ordered by their type.
if req[0] < last {
return fmt.Errorf("invalid request order: %v", req)
}
last = req[0]
}
return nil
}

View File

@ -120,16 +120,23 @@ func NewOracle(backend OracleBackend, params Config, startPrice *big.Int) *Oracl
cache := lru.NewCache[cacheKey, processedFees](2048) cache := lru.NewCache[cacheKey, processedFees](2048)
headEvent := make(chan core.ChainHeadEvent, 1) headEvent := make(chan core.ChainHeadEvent, 1)
backend.SubscribeChainHeadEvent(headEvent) sub := backend.SubscribeChainHeadEvent(headEvent)
if sub != nil { // the gasprice testBackend doesn't support subscribing to head events
go func() { go func() {
var lastHead common.Hash var lastHead common.Hash
for ev := range headEvent { for {
select {
case ev := <-headEvent:
if ev.Header.ParentHash != lastHead { if ev.Header.ParentHash != lastHead {
cache.Purge() cache.Purge()
} }
lastHead = ev.Header.Hash() lastHead = ev.Header.Hash()
case <-sub.Err():
return
}
} }
}() }()
}
return &Oracle{ return &Oracle{
backend: backend, backend: backend,

View File

@ -152,7 +152,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err) return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err)
} }
// Finalize the state so any modifications are written to the trie // Finalize the state so any modifications are written to the trie
root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number())) root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()), eth.blockchain.Config().IsCancun(current.Number(), current.Time()))
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w", return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w",
current.NumberU64(), current.Root().Hex(), err) current.NumberU64(), current.Root().Hex(), err)

View File

@ -34,7 +34,7 @@ type Context struct {
TxHash common.Hash // Hash of the transaction being traced (zero if dangling call) TxHash common.Hash // Hash of the transaction being traced (zero if dangling call)
} }
// The set of methods that must be exposed by a tracer // Tracer represents the set of methods that must be exposed by a tracer
// for it to be available through the RPC interface. // for it to be available through the RPC interface.
// This involves a method to retrieve results and one to // This involves a method to retrieve results and one to
// stop tracing. // stop tracing.

View File

@ -13,6 +13,7 @@
// //
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package internal package internal
import ( import (

View File

@ -19,11 +19,15 @@ package simulated
import ( import (
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"crypto/sha256"
"math/big" "math/big"
"math/rand" "math/rand"
"testing" "testing"
"time" "time"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/holiman/uint256"
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -36,6 +40,8 @@ var _ bind.ContractBackend = (Client)(nil)
var ( var (
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
testAddr = crypto.PubkeyToAddress(testKey.PublicKey) testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
testKey2, _ = crypto.HexToECDSA("7ee346e3f7efc685250053bfbafbfc880d58dc6145247053d4fb3cb0f66dfcb2")
testAddr2 = crypto.PubkeyToAddress(testKey2.PublicKey)
) )
func simTestBackend(testAddr common.Address) *Backend { func simTestBackend(testAddr common.Address) *Backend {
@ -46,6 +52,46 @@ func simTestBackend(testAddr common.Address) *Backend {
) )
} }
func newBlobTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
client := sim.Client()
testBlob := &kzg4844.Blob{0x00}
testBlobCommit, _ := kzg4844.BlobToCommitment(testBlob)
testBlobProof, _ := kzg4844.ComputeBlobProof(testBlob, testBlobCommit)
testBlobVHash := kzg4844.CalcBlobHashV1(sha256.New(), &testBlobCommit)
head, _ := client.HeaderByNumber(context.Background(), nil) // Should be child's, good enough
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei))
gasPriceU256, _ := uint256.FromBig(gasPrice)
gasTipCapU256, _ := uint256.FromBig(big.NewInt(params.GWei))
addr := crypto.PubkeyToAddress(key.PublicKey)
chainid, _ := client.ChainID(context.Background())
nonce, err := client.PendingNonceAt(context.Background(), addr)
if err != nil {
return nil, err
}
chainidU256, _ := uint256.FromBig(chainid)
tx := types.NewTx(&types.BlobTx{
ChainID: chainidU256,
GasTipCap: gasTipCapU256,
GasFeeCap: gasPriceU256,
BlobFeeCap: uint256.NewInt(1),
Gas: 21000,
Nonce: nonce,
To: addr,
AccessList: nil,
BlobHashes: []common.Hash{testBlobVHash},
Sidecar: &types.BlobTxSidecar{
Blobs: []kzg4844.Blob{*testBlob},
Commitments: []kzg4844.Commitment{testBlobCommit},
Proofs: []kzg4844.Proof{testBlobProof},
},
})
return types.SignTx(tx, types.LatestSignerForChainID(chainid), key)
}
func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) { func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
client := sim.Client() client := sim.Client()
@ -66,6 +112,7 @@ func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
Gas: 21000, Gas: 21000,
To: &addr, To: &addr,
}) })
return types.SignTx(tx, types.LatestSignerForChainID(chainid), key) return types.SignTx(tx, types.LatestSignerForChainID(chainid), key)
} }

View File

@ -0,0 +1,102 @@
package simulated
import (
"context"
"crypto/ecdsa"
"math/big"
"testing"
"time"
"github.com/ethereum/go-ethereum/core/types"
)
// TestTransactionRollbackBehavior tests that calling Rollback on the simulated backend doesn't prevent subsequent
// addition of new transactions
func TestTransactionRollbackBehavior(t *testing.T) {
sim := NewBackend(
types.GenesisAlloc{
testAddr: {Balance: big.NewInt(10000000000000000)},
testAddr2: {Balance: big.NewInt(10000000000000000)},
},
)
defer sim.Close()
client := sim.Client()
btx0 := testSendSignedTx(t, testKey, sim, true)
tx0 := testSendSignedTx(t, testKey2, sim, false)
tx1 := testSendSignedTx(t, testKey2, sim, false)
sim.Rollback()
if pendingStateHasTx(client, btx0) || pendingStateHasTx(client, tx0) || pendingStateHasTx(client, tx1) {
t.Fatalf("all transactions were not rolled back")
}
btx2 := testSendSignedTx(t, testKey, sim, true)
tx2 := testSendSignedTx(t, testKey2, sim, false)
tx3 := testSendSignedTx(t, testKey2, sim, false)
sim.Commit()
if !pendingStateHasTx(client, btx2) || !pendingStateHasTx(client, tx2) || !pendingStateHasTx(client, tx3) {
t.Fatalf("all post-rollback transactions were not included")
}
}
// testSendSignedTx sends a signed transaction to the simulated backend.
// It does not commit the block.
func testSendSignedTx(t *testing.T, key *ecdsa.PrivateKey, sim *Backend, isBlobTx bool) *types.Transaction {
t.Helper()
client := sim.Client()
ctx := context.Background()
var (
err error
signedTx *types.Transaction
)
if isBlobTx {
signedTx, err = newBlobTx(sim, key)
} else {
signedTx, err = newTx(sim, key)
}
if err != nil {
t.Fatalf("failed to create transaction: %v", err)
}
if err = client.SendTransaction(ctx, signedTx); err != nil {
t.Fatalf("failed to send transaction: %v", err)
}
return signedTx
}
// pendingStateHasTx returns true if a given transaction was successfully included as of the latest pending state.
func pendingStateHasTx(client Client, tx *types.Transaction) bool {
ctx := context.Background()
var (
receipt *types.Receipt
err error
)
// Poll for receipt with timeout
deadline := time.Now().Add(2 * time.Second)
for time.Now().Before(deadline) {
receipt, err = client.TransactionReceipt(ctx, tx.Hash())
if err == nil && receipt != nil {
break
}
time.Sleep(100 * time.Millisecond)
}
if err != nil {
return false
}
if receipt == nil {
return false
}
if receipt.Status != types.ReceiptStatusSuccessful {
return false
}
return true
}

View File

@ -12,7 +12,8 @@
// GNU General Public License for more details. // GNU General Public License for more details.
// //
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. // along with go-ethereum. If not, see <http//www.gnu.org/licenses/>.
package era package era
import ( import (

View File

@ -503,7 +503,7 @@ func (args *TransactionArgs) ToTransaction(defaultType int) *types.Transaction {
} }
data = &types.SetCodeTx{ data = &types.SetCodeTx{
To: *args.To, To: *args.To,
ChainID: args.ChainID.ToInt().Uint64(), ChainID: uint256.MustFromBig(args.ChainID.ToInt()),
Nonce: uint64(*args.Nonce), Nonce: uint64(*args.Nonce),
Gas: uint64(*args.Gas), Gas: uint64(*args.Gas),
GasFeeCap: uint256.MustFromBig((*big.Int)(args.MaxFeePerGas)), GasFeeCap: uint256.MustFromBig((*big.Int)(args.MaxFeePerGas)),

View File

@ -67,7 +67,19 @@ type evalReq struct {
done chan bool done chan bool
} }
// runtime must be stopped with Stop() after use and cannot be used after stopping // New creates and initializes a new JavaScript runtime environment (JSRE).
// The runtime is configured with the provided assetPath for loading scripts and
// an output writer for logging or printing results.
//
// The returned JSRE must be stopped by calling Stop() after use to release resources.
// Attempting to use the JSRE after stopping it will result in undefined behavior.
//
// Parameters:
// - assetPath: The path to the directory containing script assets.
// - output: The writer used for logging or printing runtime output.
//
// Returns:
// - A pointer to the newly created JSRE instance.
func New(assetPath string, output io.Writer) *JSRE { func New(assetPath string, output io.Writer) *JSRE {
re := &JSRE{ re := &JSRE{
assetPath: assetPath, assetPath: assetPath,
@ -251,8 +263,15 @@ func (re *JSRE) Stop(waitForCallbacks bool) {
} }
} }
// Exec(file) loads and runs the contents of a file // Exec loads and executes the contents of a JavaScript file.
// if a relative path is given, the jsre's assetPath is used // If a relative path is provided, the file is resolved relative to the JSRE's assetPath.
// The file is read, compiled, and executed in the JSRE's runtime environment.
//
// Parameters:
// - file: The path to the JavaScript file to execute. Can be an absolute path or relative to assetPath.
//
// Returns:
// - error: An error if the file cannot be read, compiled, or executed.
func (re *JSRE) Exec(file string) error { func (re *JSRE) Exec(file string) error {
code, err := os.ReadFile(common.AbsolutePath(re.assetPath, file)) code, err := os.ReadFile(common.AbsolutePath(re.assetPath, file))
if err != nil { if err != nil {

View File

@ -7,6 +7,7 @@
// we require because of the forking limitations of using Go. Handlers can be // we require because of the forking limitations of using Go. Handlers can be
// registered with a name and the argv 0 of the exec of the binary will be used // registered with a name and the argv 0 of the exec of the binary will be used
// to find and execute custom init paths. // to find and execute custom init paths.
package reexec package reexec
import ( import (

View File

@ -1,5 +1,6 @@
// Hook go-metrics into expvar // Hook go-metrics into expvar
// on any /debug/metrics request, load all vars from the registry into expvar, and execute regular expvar handler // on any /debug/metrics request, load all vars from the registry into expvar, and execute regular expvar handler
package exp package exp
import ( import (

View File

@ -39,7 +39,7 @@ func NewRegisteredGaugeInfo(name string, r Registry) *GaugeInfo {
return c return c
} }
// gaugeInfoSnapshot is a read-only copy of another GaugeInfo. // GaugeInfoSnapshot is a read-only copy of another GaugeInfo.
type GaugeInfoSnapshot GaugeInfoValue type GaugeInfoSnapshot GaugeInfoValue
// Value returns the value at the time the snapshot was taken. // Value returns the value at the time the snapshot was taken.

View File

@ -12,7 +12,7 @@ func Log(r Registry, freq time.Duration, l Logger) {
LogScaled(r, freq, time.Nanosecond, l) LogScaled(r, freq, time.Nanosecond, l)
} }
// Output each metric in the given registry periodically using the given // LogScaled outputs each metric in the given registry periodically using the given
// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos. // logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
du := float64(scale) du := float64(scale)

View File

@ -3,6 +3,7 @@
// <https://github.com/rcrowley/go-metrics> // <https://github.com/rcrowley/go-metrics>
// //
// Coda Hale's original work: <https://github.com/codahale/metrics> // Coda Hale's original work: <https://github.com/codahale/metrics>
package metrics package metrics
import ( import (

View File

@ -53,14 +53,14 @@ func (t *ResettingTimer) Snapshot() *ResettingTimerSnapshot {
return snapshot return snapshot
} }
// Record the duration of the execution of the given function. // Time records the duration of the execution of the given function.
func (t *ResettingTimer) Time(f func()) { func (t *ResettingTimer) Time(f func()) {
ts := time.Now() ts := time.Now()
f() f()
t.Update(time.Since(ts)) t.Update(time.Since(ts))
} }
// Record the duration of an event. // Update records the duration of an event.
func (t *ResettingTimer) Update(d time.Duration) { func (t *ResettingTimer) Update(d time.Duration) {
if !metricsEnabled { if !metricsEnabled {
return return
@ -71,7 +71,7 @@ func (t *ResettingTimer) Update(d time.Duration) {
t.sum += int64(d) t.sum += int64(d)
} }
// Record the duration of an event that started at a time and ends now. // UpdateSince records the duration of an event that started at a time and ends now.
func (t *ResettingTimer) UpdateSince(ts time.Time) { func (t *ResettingTimer) UpdateSince(ts time.Time) {
t.Update(time.Since(ts)) t.Update(time.Since(ts))
} }

View File

@ -9,7 +9,7 @@ import (
"time" "time"
) )
// Output each metric in the given registry to syslog periodically using // Syslog outputs each metric in the given registry to syslog periodically using
// the given syslogger. // the given syslogger.
func Syslog(r Registry, d time.Duration, w *syslog.Writer) { func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
for range time.Tick(d) { for range time.Tick(d) {

View File

@ -145,7 +145,7 @@ func createMiner(t *testing.T) *Miner {
chainDB := rawdb.NewMemoryDatabase() chainDB := rawdb.NewMemoryDatabase()
triedb := triedb.NewDatabase(chainDB, nil) triedb := triedb.NewDatabase(chainDB, nil)
genesis := minerTestGenesisBlock(15, 11_500_000, common.HexToAddress("12345")) genesis := minerTestGenesisBlock(15, 11_500_000, common.HexToAddress("12345"))
chainConfig, _, err := core.SetupGenesisBlock(chainDB, triedb, genesis) chainConfig, _, _, err := core.SetupGenesisBlock(chainDB, triedb, genesis)
if err != nil { if err != nil {
t.Fatalf("can't create new chain config: %v", err) t.Fatalf("can't create new chain config: %v", err)
} }

View File

@ -249,20 +249,20 @@ func TestHandshake_BadHandshakeAttack(t *testing.T) {
net.nodeA.expectDecode(t, WhoareyouPacket, whoareyou) net.nodeA.expectDecode(t, WhoareyouPacket, whoareyou)
// A -> B FINDNODE // A -> B FINDNODE
incorrect_challenge := &Whoareyou{ incorrectChallenge := &Whoareyou{
IDNonce: [16]byte{5, 6, 7, 8, 9, 6, 11, 12}, IDNonce: [16]byte{5, 6, 7, 8, 9, 6, 11, 12},
RecordSeq: challenge.RecordSeq, RecordSeq: challenge.RecordSeq,
Node: challenge.Node, Node: challenge.Node,
sent: challenge.sent, sent: challenge.sent,
} }
incorrect_findnode, _ := net.nodeA.encodeWithChallenge(t, net.nodeB, incorrect_challenge, &Findnode{}) incorrectFindNode, _ := net.nodeA.encodeWithChallenge(t, net.nodeB, incorrectChallenge, &Findnode{})
incorrect_findnode2 := make([]byte, len(incorrect_findnode)) incorrectFindNode2 := make([]byte, len(incorrectFindNode))
copy(incorrect_findnode2, incorrect_findnode) copy(incorrectFindNode2, incorrectFindNode)
net.nodeB.expectDecodeErr(t, errInvalidNonceSig, incorrect_findnode) net.nodeB.expectDecodeErr(t, errInvalidNonceSig, incorrectFindNode)
// Reject new findnode as previous handshake is now deleted. // Reject new findnode as previous handshake is now deleted.
net.nodeB.expectDecodeErr(t, errUnexpectedHandshake, incorrect_findnode2) net.nodeB.expectDecodeErr(t, errUnexpectedHandshake, incorrectFindNode2)
// The findnode packet is again rejected even with a valid challenge this time. // The findnode packet is again rejected even with a valid challenge this time.
findnode, _ := net.nodeA.encodeWithChallenge(t, net.nodeB, challenge, &Findnode{}) findnode, _ := net.nodeA.encodeWithChallenge(t, net.nodeB, challenge, &Findnode{})

View File

@ -326,6 +326,19 @@ type ChainConfig struct {
DepositContractAddress common.Address `json:"depositContractAddress,omitempty"` DepositContractAddress common.Address `json:"depositContractAddress,omitempty"`
// EnableVerkleAtGenesis is a flag that specifies whether the network uses
// the Verkle tree starting from the genesis block. If set to true, the
// genesis state will be committed using the Verkle tree, eliminating the
// need for any Verkle transition later.
//
// This is a temporary flag only for verkle devnet testing, where verkle is
// activated at genesis, and the configured activation date has already passed.
//
// In production networks (mainnet and public testnets), verkle activation
// always occurs after the genesis block, making this flag irrelevant in
// those cases.
EnableVerkleAtGenesis bool `json:"enableVerkleAtGenesis,omitempty"`
// Various consensus engines // Various consensus engines
Ethash *EthashConfig `json:"ethash,omitempty"` Ethash *EthashConfig `json:"ethash,omitempty"`
Clique *CliqueConfig `json:"clique,omitempty"` Clique *CliqueConfig `json:"clique,omitempty"`
@ -525,6 +538,20 @@ func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time) return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time)
} }
// IsVerkleGenesis checks whether the verkle fork is activated at the genesis block.
//
// Verkle mode is considered enabled if the verkle fork time is configured,
// regardless of whether the local time has surpassed the fork activation time.
// This is a temporary workaround for verkle devnet testing, where verkle is
// activated at genesis, and the configured activation date has already passed.
//
// In production networks (mainnet and public testnets), verkle activation
// always occurs after the genesis block, making this function irrelevant in
// those cases.
func (c *ChainConfig) IsVerkleGenesis() bool {
return c.EnableVerkleAtGenesis
}
// IsEIP4762 returns whether eip 4762 has been activated at given block. // IsEIP4762 returns whether eip 4762 has been activated at given block.
func (c *ChainConfig) IsEIP4762(num *big.Int, time uint64) bool { func (c *ChainConfig) IsEIP4762(num *big.Int, time uint64) bool {
return c.IsVerkle(num, time) return c.IsVerkle(num, time)

View File

@ -179,7 +179,7 @@ const (
HistoryServeWindow = 8192 // Number of blocks to serve historical block hashes for, EIP-2935. HistoryServeWindow = 8192 // Number of blocks to serve historical block hashes for, EIP-2935.
) )
// Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations // Bls12381MultiExpDiscountTable gas discount table for BLS12-381 G1 and G2 multi exponentiation operations
var Bls12381MultiExpDiscountTable = [128]uint64{1200, 888, 764, 641, 594, 547, 500, 453, 438, 423, 408, 394, 379, 364, 349, 334, 330, 326, 322, 318, 314, 310, 306, 302, 298, 294, 289, 285, 281, 277, 273, 269, 268, 266, 265, 263, 262, 260, 259, 257, 256, 254, 253, 251, 250, 248, 247, 245, 244, 242, 241, 239, 238, 236, 235, 233, 232, 231, 229, 228, 226, 225, 223, 222, 221, 220, 219, 219, 218, 217, 216, 216, 215, 214, 213, 213, 212, 211, 211, 210, 209, 208, 208, 207, 206, 205, 205, 204, 203, 202, 202, 201, 200, 199, 199, 198, 197, 196, 196, 195, 194, 193, 193, 192, 191, 191, 190, 189, 188, 188, 187, 186, 185, 185, 184, 183, 182, 182, 181, 180, 179, 179, 178, 177, 176, 176, 175, 174} var Bls12381MultiExpDiscountTable = [128]uint64{1200, 888, 764, 641, 594, 547, 500, 453, 438, 423, 408, 394, 379, 364, 349, 334, 330, 326, 322, 318, 314, 310, 306, 302, 298, 294, 289, 285, 281, 277, 273, 269, 268, 266, 265, 263, 262, 260, 259, 257, 256, 254, 253, 251, 250, 248, 247, 245, 244, 242, 241, 239, 238, 236, 235, 233, 232, 231, 229, 228, 226, 225, 223, 222, 221, 220, 219, 219, 218, 217, 216, 216, 215, 214, 213, 213, 212, 211, 211, 210, 209, 208, 208, 207, 206, 205, 205, 204, 203, 202, 202, 201, 200, 199, 199, 198, 197, 196, 196, 195, 194, 193, 193, 192, 191, 191, 190, 189, 188, 188, 187, 186, 185, 185, 184, 183, 182, 182, 181, 180, 179, 179, 178, 177, 176, 176, 175, 174}
// Difficulty parameters. // Difficulty parameters.

View File

@ -664,7 +664,7 @@ func (api *SignerAPI) SignGnosisSafeTx(ctx context.Context, signerAddress common
return &gnosisTx, nil return &gnosisTx, nil
} }
// Returns the external api version. This method does not require user acceptance. Available methods are // Version returns the external api version. This method does not require user acceptance. Available methods are
// available via enumeration anyway, and this info does not contain user-specific data // available via enumeration anyway, and this info does not contain user-specific data
func (api *SignerAPI) Version(ctx context.Context) (string, error) { func (api *SignerAPI) Version(ctx context.Context) (string, error) {
return ExternalAPIVersion, nil return ExternalAPIVersion, nil

View File

@ -48,7 +48,7 @@ func NewUIServerAPI(extapi *SignerAPI) *UIServerAPI {
return &UIServerAPI{extapi, extapi.am} return &UIServerAPI{extapi, extapi.am}
} }
// List available accounts. As opposed to the external API definition, this method delivers // ListAccounts lists available accounts. As opposed to the external API definition, this method delivers
// the full Account object and not only Address. // the full Account object and not only Address.
// Example call // Example call
// {"jsonrpc":"2.0","method":"clef_listAccounts","params":[], "id":4} // {"jsonrpc":"2.0","method":"clef_listAccounts","params":[], "id":4}

View File

@ -16,7 +16,7 @@ var _ = (*stAuthorizationMarshaling)(nil)
// MarshalJSON marshals as JSON. // MarshalJSON marshals as JSON.
func (s stAuthorization) MarshalJSON() ([]byte, error) { func (s stAuthorization) MarshalJSON() ([]byte, error) {
type stAuthorization struct { type stAuthorization struct {
ChainID math.HexOrDecimal64 ChainID *math.HexOrDecimal256 `json:"chainId" gencodec:"required"`
Address common.Address `json:"address" gencodec:"required"` Address common.Address `json:"address" gencodec:"required"`
Nonce math.HexOrDecimal64 `json:"nonce" gencodec:"required"` Nonce math.HexOrDecimal64 `json:"nonce" gencodec:"required"`
V math.HexOrDecimal64 `json:"v" gencodec:"required"` V math.HexOrDecimal64 `json:"v" gencodec:"required"`
@ -24,7 +24,7 @@ func (s stAuthorization) MarshalJSON() ([]byte, error) {
S *math.HexOrDecimal256 `json:"s" gencodec:"required"` S *math.HexOrDecimal256 `json:"s" gencodec:"required"`
} }
var enc stAuthorization var enc stAuthorization
enc.ChainID = math.HexOrDecimal64(s.ChainID) enc.ChainID = (*math.HexOrDecimal256)(s.ChainID)
enc.Address = s.Address enc.Address = s.Address
enc.Nonce = math.HexOrDecimal64(s.Nonce) enc.Nonce = math.HexOrDecimal64(s.Nonce)
enc.V = math.HexOrDecimal64(s.V) enc.V = math.HexOrDecimal64(s.V)
@ -36,7 +36,7 @@ func (s stAuthorization) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON. // UnmarshalJSON unmarshals from JSON.
func (s *stAuthorization) UnmarshalJSON(input []byte) error { func (s *stAuthorization) UnmarshalJSON(input []byte) error {
type stAuthorization struct { type stAuthorization struct {
ChainID *math.HexOrDecimal64 ChainID *math.HexOrDecimal256 `json:"chainId" gencodec:"required"`
Address *common.Address `json:"address" gencodec:"required"` Address *common.Address `json:"address" gencodec:"required"`
Nonce *math.HexOrDecimal64 `json:"nonce" gencodec:"required"` Nonce *math.HexOrDecimal64 `json:"nonce" gencodec:"required"`
V *math.HexOrDecimal64 `json:"v" gencodec:"required"` V *math.HexOrDecimal64 `json:"v" gencodec:"required"`
@ -47,9 +47,10 @@ func (s *stAuthorization) UnmarshalJSON(input []byte) error {
if err := json.Unmarshal(input, &dec); err != nil { if err := json.Unmarshal(input, &dec); err != nil {
return err return err
} }
if dec.ChainID != nil { if dec.ChainID == nil {
s.ChainID = uint64(*dec.ChainID) return errors.New("missing required field 'chainId' for stAuthorization")
} }
s.ChainID = (*big.Int)(dec.ChainID)
if dec.Address == nil { if dec.Address == nil {
return errors.New("missing required field 'address' for stAuthorization") return errors.New("missing required field 'address' for stAuthorization")
} }

View File

@ -140,7 +140,7 @@ type stTransactionMarshaling struct {
// Authorization is an authorization from an account to deploy code at it's address. // Authorization is an authorization from an account to deploy code at it's address.
type stAuthorization struct { type stAuthorization struct {
ChainID uint64 ChainID *big.Int `json:"chainId" gencodec:"required"`
Address common.Address `json:"address" gencodec:"required"` Address common.Address `json:"address" gencodec:"required"`
Nonce uint64 `json:"nonce" gencodec:"required"` Nonce uint64 `json:"nonce" gencodec:"required"`
V uint8 `json:"v" gencodec:"required"` V uint8 `json:"v" gencodec:"required"`
@ -150,7 +150,7 @@ type stAuthorization struct {
// field type overrides for gencodec // field type overrides for gencodec
type stAuthorizationMarshaling struct { type stAuthorizationMarshaling struct {
ChainID math.HexOrDecimal64 ChainID *math.HexOrDecimal256
Nonce math.HexOrDecimal64 Nonce math.HexOrDecimal64
V math.HexOrDecimal64 V math.HexOrDecimal64
R *math.HexOrDecimal256 R *math.HexOrDecimal256
@ -339,7 +339,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
st.StateDB.AddBalance(block.Coinbase(), new(uint256.Int), tracing.BalanceChangeUnspecified) st.StateDB.AddBalance(block.Coinbase(), new(uint256.Int), tracing.BalanceChangeUnspecified)
// Commit state mutations into database. // Commit state mutations into database.
root, _ = st.StateDB.Commit(block.NumberU64(), config.IsEIP158(block.Number())) root, _ = st.StateDB.Commit(block.NumberU64(), config.IsEIP158(block.Number()), config.IsCancun(block.Number(), block.Time()))
if tracer := evm.Config.Tracer; tracer != nil && tracer.OnTxEnd != nil { if tracer := evm.Config.Tracer; tracer != nil && tracer.OnTxEnd != nil {
receipt := &types.Receipt{GasUsed: vmRet.UsedGas} receipt := &types.Receipt{GasUsed: vmRet.UsedGas}
tracer.OnTxEnd(receipt, nil) tracer.OnTxEnd(receipt, nil)
@ -446,7 +446,7 @@ func (tx *stTransaction) toMessage(ps stPostState, baseFee *big.Int) (*core.Mess
authList = make([]types.SetCodeAuthorization, len(tx.AuthorizationList)) authList = make([]types.SetCodeAuthorization, len(tx.AuthorizationList))
for i, auth := range tx.AuthorizationList { for i, auth := range tx.AuthorizationList {
authList[i] = types.SetCodeAuthorization{ authList[i] = types.SetCodeAuthorization{
ChainID: auth.ChainID, ChainID: *uint256.MustFromBig(auth.ChainID),
Address: auth.Address, Address: auth.Address,
Nonce: auth.Nonce, Nonce: auth.Nonce,
V: auth.V, V: auth.V,
@ -512,7 +512,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bo
} }
} }
// Commit and re-open to start with a clean state. // Commit and re-open to start with a clean state.
root, _ := statedb.Commit(0, false) root, _ := statedb.Commit(0, false, false)
// If snapshot is requested, initialize the snapshotter and use it in state. // If snapshot is requested, initialize the snapshotter and use it in state.
var snaps *snapshot.Tree var snaps *snapshot.Tree

View File

@ -64,10 +64,6 @@ type backend interface {
// state. An error will be returned if the specified state is not available. // state. An error will be returned if the specified state is not available.
StateReader(root common.Hash) (database.StateReader, error) StateReader(root common.Hash) (database.StateReader, error)
// Initialized returns an indicator if the state data is already initialized
// according to the state scheme.
Initialized(genesisRoot common.Hash) bool
// Size returns the current storage size of the diff layers on top of the // Size returns the current storage size of the diff layers on top of the
// disk layer and the storage size of the nodes cached in the disk layer. // disk layer and the storage size of the nodes cached in the disk layer.
// //
@ -178,12 +174,6 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize, common.Stora
return diffs, nodes, preimages return diffs, nodes, preimages
} }
// Initialized returns an indicator if the state data is already initialized
// according to the state scheme.
func (db *Database) Initialized(genesisRoot common.Hash) bool {
return db.backend.Initialized(genesisRoot)
}
// Scheme returns the node scheme used in the database. // Scheme returns the node scheme used in the database.
func (db *Database) Scheme() string { func (db *Database) Scheme() string {
if db.config.PathDB != nil { if db.config.PathDB != nil {

View File

@ -532,12 +532,6 @@ func (c *cleaner) Delete(key []byte) error {
panic("not implemented") panic("not implemented")
} }
// Initialized returns an indicator if state data is already initialized
// in hash-based scheme by checking the presence of genesis state.
func (db *Database) Initialized(genesisRoot common.Hash) bool {
return rawdb.HasLegacyTrieNode(db.diskdb, genesisRoot)
}
// Update inserts the dirty nodes in provided nodeset into database and link the // Update inserts the dirty nodes in provided nodeset into database and link the
// account trie with multiple storage tries if necessary. // account trie with multiple storage tries if necessary.
func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet) error { func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet) error {

View File

@ -46,7 +46,7 @@ func newBuffer(limit int, nodes *nodeSet, states *stateSet, layers uint64) *buff
nodes = newNodeSet(nil) nodes = newNodeSet(nil)
} }
if states == nil { if states == nil {
states = newStates(nil, nil) states = newStates(nil, nil, false)
} }
return &buffer{ return &buffer{
layers: layers, layers: layers,

View File

@ -529,21 +529,6 @@ func (db *Database) Size() (diffs common.StorageSize, nodes common.StorageSize)
return diffs, nodes return diffs, nodes
} }
// Initialized returns an indicator if the state data is already
// initialized in path-based scheme.
func (db *Database) Initialized(genesisRoot common.Hash) bool {
var inited bool
db.tree.forEach(func(layer layer) {
if layer.rootHash() != types.EmptyRootHash {
inited = true
}
})
if !inited {
inited = rawdb.ReadSnapSyncStatusFlag(db.diskdb) != rawdb.StateSyncUnknown
}
return inited
}
// modifyAllowed returns the indicator if mutation is allowed. This function // modifyAllowed returns the indicator if mutation is allowed. This function
// assumes the db.lock is already held. // assumes the db.lock is already held.
func (db *Database) modifyAllowed() error { func (db *Database) modifyAllowed() error {

View File

@ -91,10 +91,28 @@ func newCtx(stateRoot common.Hash) *genctx {
} }
} }
func (ctx *genctx) storageOriginSet(rawStorageKey bool, t *tester) map[common.Address]map[common.Hash][]byte {
if !rawStorageKey {
return ctx.storageOrigin
}
set := make(map[common.Address]map[common.Hash][]byte)
for addr, storage := range ctx.storageOrigin {
subset := make(map[common.Hash][]byte)
for hash, val := range storage {
key := t.hashPreimage(hash)
subset[key] = val
}
set[addr] = subset
}
return set
}
type tester struct { type tester struct {
db *Database db *Database
roots []common.Hash roots []common.Hash
preimages map[common.Hash]common.Address preimages map[common.Hash][]byte
// current state set
accounts map[common.Hash][]byte accounts map[common.Hash][]byte
storages map[common.Hash]map[common.Hash][]byte storages map[common.Hash]map[common.Hash][]byte
@ -103,17 +121,17 @@ type tester struct {
snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte
} }
func newTester(t *testing.T, historyLimit uint64) *tester { func newTester(t *testing.T, historyLimit uint64, isVerkle bool) *tester {
var ( var (
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
db = New(disk, &Config{ db = New(disk, &Config{
StateHistory: historyLimit, StateHistory: historyLimit,
CleanCacheSize: 16 * 1024, CleanCacheSize: 16 * 1024,
WriteBufferSize: 16 * 1024, WriteBufferSize: 16 * 1024,
}, false) }, isVerkle)
obj = &tester{ obj = &tester{
db: db, db: db,
preimages: make(map[common.Hash]common.Address), preimages: make(map[common.Hash][]byte),
accounts: make(map[common.Hash][]byte), accounts: make(map[common.Hash][]byte),
storages: make(map[common.Hash]map[common.Hash][]byte), storages: make(map[common.Hash]map[common.Hash][]byte),
snapAccounts: make(map[common.Hash]map[common.Hash][]byte), snapAccounts: make(map[common.Hash]map[common.Hash][]byte),
@ -125,7 +143,8 @@ func newTester(t *testing.T, historyLimit uint64) *tester {
if len(obj.roots) != 0 { if len(obj.roots) != 0 {
parent = obj.roots[len(obj.roots)-1] parent = obj.roots[len(obj.roots)-1]
} }
root, nodes, states := obj.generate(parent) root, nodes, states := obj.generate(parent, i > 6)
if err := db.Update(root, parent, uint64(i), nodes, states); err != nil { if err := db.Update(root, parent, uint64(i), nodes, states); err != nil {
panic(fmt.Errorf("failed to update state changes, err: %w", err)) panic(fmt.Errorf("failed to update state changes, err: %w", err))
} }
@ -134,6 +153,14 @@ func newTester(t *testing.T, historyLimit uint64) *tester {
return obj return obj
} }
func (t *tester) accountPreimage(hash common.Hash) common.Address {
return common.BytesToAddress(t.preimages[hash])
}
func (t *tester) hashPreimage(hash common.Hash) common.Hash {
return common.BytesToHash(t.preimages[hash])
}
func (t *tester) release() { func (t *tester) release() {
t.db.Close() t.db.Close()
t.db.diskdb.Close() t.db.diskdb.Close()
@ -141,7 +168,7 @@ func (t *tester) release() {
func (t *tester) randAccount() (common.Address, []byte) { func (t *tester) randAccount() (common.Address, []byte) {
for addrHash, account := range t.accounts { for addrHash, account := range t.accounts {
return t.preimages[addrHash], account return t.accountPreimage(addrHash), account
} }
return common.Address{}, nil return common.Address{}, nil
} }
@ -154,7 +181,9 @@ func (t *tester) generateStorage(ctx *genctx, addr common.Address) common.Hash {
) )
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32))) v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
hash := testrand.Hash() key := testrand.Bytes(32)
hash := crypto.Keccak256Hash(key)
t.preimages[hash] = key
storage[hash] = v storage[hash] = v
origin[hash] = nil origin[hash] = nil
@ -183,7 +212,9 @@ func (t *tester) mutateStorage(ctx *genctx, addr common.Address, root common.Has
} }
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32))) v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
hash := testrand.Hash() key := testrand.Bytes(32)
hash := crypto.Keccak256Hash(key)
t.preimages[hash] = key
storage[hash] = v storage[hash] = v
origin[hash] = nil origin[hash] = nil
@ -216,7 +247,7 @@ func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash
return root return root
} }
func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNodeSet, *StateSetWithOrigin) { func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash, *trienode.MergedNodeSet, *StateSetWithOrigin) {
var ( var (
ctx = newCtx(parent) ctx = newCtx(parent)
dirties = make(map[common.Hash]struct{}) dirties = make(map[common.Hash]struct{})
@ -232,9 +263,12 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
// account creation // account creation
addr := testrand.Address() addr := testrand.Address()
addrHash := crypto.Keccak256Hash(addr.Bytes()) addrHash := crypto.Keccak256Hash(addr.Bytes())
// short circuit if the account was already existent
if _, ok := t.accounts[addrHash]; ok { if _, ok := t.accounts[addrHash]; ok {
continue continue
} }
// short circuit if the account has been modified within the same transition
if _, ok := dirties[addrHash]; ok { if _, ok := dirties[addrHash]; ok {
continue continue
} }
@ -243,7 +277,7 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
root := t.generateStorage(ctx, addr) root := t.generateStorage(ctx, addr)
ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root)) ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root))
ctx.accountOrigin[addr] = nil ctx.accountOrigin[addr] = nil
t.preimages[addrHash] = addr t.preimages[addrHash] = addr.Bytes()
case modifyAccountOp: case modifyAccountOp:
// account mutation // account mutation
@ -252,6 +286,8 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
continue continue
} }
addrHash := crypto.Keccak256Hash(addr.Bytes()) addrHash := crypto.Keccak256Hash(addr.Bytes())
// short circuit if the account has been modified within the same transition
if _, ok := dirties[addrHash]; ok { if _, ok := dirties[addrHash]; ok {
continue continue
} }
@ -271,6 +307,8 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
continue continue
} }
addrHash := crypto.Keccak256Hash(addr.Bytes()) addrHash := crypto.Keccak256Hash(addr.Bytes())
// short circuit if the account has been modified within the same transition
if _, ok := dirties[addrHash]; ok { if _, ok := dirties[addrHash]; ok {
continue continue
} }
@ -314,7 +352,8 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
delete(t.storages, addrHash) delete(t.storages, addrHash)
} }
} }
return root, ctx.nodes, NewStateSetWithOrigin(ctx.accounts, ctx.storages, ctx.accountOrigin, ctx.storageOrigin) storageOrigin := ctx.storageOriginSet(rawStorageKey, t)
return root, ctx.nodes, NewStateSetWithOrigin(ctx.accounts, ctx.storages, ctx.accountOrigin, storageOrigin, rawStorageKey)
} }
// lastHash returns the latest root hash, or empty if nothing is cached. // lastHash returns the latest root hash, or empty if nothing is cached.
@ -409,7 +448,7 @@ func TestDatabaseRollback(t *testing.T) {
}() }()
// Verify state histories // Verify state histories
tester := newTester(t, 0) tester := newTester(t, 0, false)
defer tester.release() defer tester.release()
if err := tester.verifyHistory(); err != nil { if err := tester.verifyHistory(); err != nil {
@ -443,7 +482,7 @@ func TestDatabaseRecoverable(t *testing.T) {
}() }()
var ( var (
tester = newTester(t, 0) tester = newTester(t, 0, false)
index = tester.bottomIndex() index = tester.bottomIndex()
) )
defer tester.release() defer tester.release()
@ -487,7 +526,7 @@ func TestDisable(t *testing.T) {
maxDiffLayers = 128 maxDiffLayers = 128
}() }()
tester := newTester(t, 0) tester := newTester(t, 0, false)
defer tester.release() defer tester.release()
stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil)) stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil))
@ -529,7 +568,7 @@ func TestCommit(t *testing.T) {
maxDiffLayers = 128 maxDiffLayers = 128
}() }()
tester := newTester(t, 0) tester := newTester(t, 0, false)
defer tester.release() defer tester.release()
if err := tester.db.Commit(tester.lastHash(), false); err != nil { if err := tester.db.Commit(tester.lastHash(), false); err != nil {
@ -559,7 +598,7 @@ func TestJournal(t *testing.T) {
maxDiffLayers = 128 maxDiffLayers = 128
}() }()
tester := newTester(t, 0) tester := newTester(t, 0, false)
defer tester.release() defer tester.release()
if err := tester.db.Journal(tester.lastHash()); err != nil { if err := tester.db.Journal(tester.lastHash()); err != nil {
@ -589,7 +628,7 @@ func TestCorruptedJournal(t *testing.T) {
maxDiffLayers = 128 maxDiffLayers = 128
}() }()
tester := newTester(t, 0) tester := newTester(t, 0, false)
defer tester.release() defer tester.release()
if err := tester.db.Journal(tester.lastHash()); err != nil { if err := tester.db.Journal(tester.lastHash()); err != nil {
@ -637,7 +676,7 @@ func TestTailTruncateHistory(t *testing.T) {
maxDiffLayers = 128 maxDiffLayers = 128
}() }()
tester := newTester(t, 10) tester := newTester(t, 10, false)
defer tester.release() defer tester.release()
tester.db.Close() tester.db.Close()

View File

@ -76,7 +76,7 @@ func benchmarkSearch(b *testing.B, depth int, total int) {
nblob = common.CopyBytes(blob) nblob = common.CopyBytes(blob)
} }
} }
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil)) return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false))
} }
var layer layer var layer layer
layer = emptyLayer() layer = emptyLayer()
@ -118,7 +118,7 @@ func BenchmarkPersist(b *testing.B) {
) )
nodes[common.Hash{}][string(path)] = node nodes[common.Hash{}][string(path)] = node
} }
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil)) return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false))
} }
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
b.StopTimer() b.StopTimer()
@ -156,7 +156,7 @@ func BenchmarkJournal(b *testing.B) {
) )
nodes[common.Hash{}][string(path)] = node nodes[common.Hash{}][string(path)] = node
} }
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil)) return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false))
} }
var layer layer var layer layer
layer = emptyLayer() layer = emptyLayer()

View File

@ -316,7 +316,7 @@ func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
// Apply the reverse state changes upon the current state. This must // Apply the reverse state changes upon the current state. This must
// be done before holding the lock in order to access state in "this" // be done before holding the lock in order to access state in "this"
// layer. // layer.
nodes, err := apply(dl.db, h.meta.parent, h.meta.root, h.accounts, h.storages) nodes, err := apply(dl.db, h.meta.parent, h.meta.root, h.meta.version != stateHistoryV0, h.accounts, h.storages)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -35,6 +35,7 @@ type context struct {
accounts map[common.Address][]byte accounts map[common.Address][]byte
storages map[common.Address]map[common.Hash][]byte storages map[common.Address]map[common.Hash][]byte
nodes *trienode.MergedNodeSet nodes *trienode.MergedNodeSet
rawStorageKey bool
// TODO (rjl493456442) abstract out the state hasher // TODO (rjl493456442) abstract out the state hasher
// for supporting verkle tree. // for supporting verkle tree.
@ -43,7 +44,7 @@ type context struct {
// apply processes the given state diffs, updates the corresponding post-state // apply processes the given state diffs, updates the corresponding post-state
// and returns the trie nodes that have been modified. // and returns the trie nodes that have been modified.
func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) (map[common.Hash]map[string]*trienode.Node, error) { func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash, rawStorageKey bool, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) (map[common.Hash]map[string]*trienode.Node, error) {
tr, err := trie.New(trie.TrieID(postRoot), db) tr, err := trie.New(trie.TrieID(postRoot), db)
if err != nil { if err != nil {
return nil, err return nil, err
@ -54,6 +55,7 @@ func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash,
accounts: accounts, accounts: accounts,
storages: storages, storages: storages,
accountTrie: tr, accountTrie: tr,
rawStorageKey: rawStorageKey,
nodes: trienode.NewMergedNodeSet(), nodes: trienode.NewMergedNodeSet(),
} }
for addr, account := range accounts { for addr, account := range accounts {
@ -109,11 +111,15 @@ func updateAccount(ctx *context, db database.NodeDatabase, addr common.Address)
return err return err
} }
for key, val := range ctx.storages[addr] { for key, val := range ctx.storages[addr] {
tkey := key
if ctx.rawStorageKey {
tkey = h.hash(key.Bytes())
}
var err error var err error
if len(val) == 0 { if len(val) == 0 {
err = st.Delete(key.Bytes()) err = st.Delete(tkey.Bytes())
} else { } else {
err = st.Update(key.Bytes(), val) err = st.Update(tkey.Bytes(), val)
} }
if err != nil { if err != nil {
return err return err
@ -166,7 +172,11 @@ func deleteAccount(ctx *context, db database.NodeDatabase, addr common.Address)
if len(val) != 0 { if len(val) != 0 {
return errors.New("expect storage deletion") return errors.New("expect storage deletion")
} }
if err := st.Delete(key.Bytes()); err != nil { tkey := key
if ctx.rawStorageKey {
tkey = h.hash(key.Bytes())
}
if err := st.Delete(tkey.Bytes()); err != nil {
return err return err
} }
} }

View File

@ -68,7 +68,8 @@ const (
slotIndexSize = common.HashLength + 5 // The length of encoded slot index slotIndexSize = common.HashLength + 5 // The length of encoded slot index
historyMetaSize = 9 + 2*common.HashLength // The length of encoded history meta historyMetaSize = 9 + 2*common.HashLength // The length of encoded history meta
stateHistoryVersion = uint8(0) // initial version of state history structure. stateHistoryV0 = uint8(0) // initial version of state history structure
stateHistoryV1 = uint8(1) // use the storage slot raw key as the identifier instead of the key hash
) )
// Each state history entry is consisted of five elements: // Each state history entry is consisted of five elements:
@ -169,7 +170,10 @@ func (i *accountIndex) decode(blob []byte) {
// slotIndex describes the metadata belonging to a storage slot. // slotIndex describes the metadata belonging to a storage slot.
type slotIndex struct { type slotIndex struct {
hash common.Hash // The hash of slot key // the identifier of the storage slot. Specifically
// in v0, it's the hash of the raw storage slot key (32 bytes);
// in v1, it's the raw storage slot key (32 bytes);
id common.Hash
length uint8 // The length of storage slot, up to 32 bytes defined in protocol length uint8 // The length of storage slot, up to 32 bytes defined in protocol
offset uint32 // The offset of item in storage slot data table offset uint32 // The offset of item in storage slot data table
} }
@ -177,7 +181,7 @@ type slotIndex struct {
// encode packs slot index into byte stream. // encode packs slot index into byte stream.
func (i *slotIndex) encode() []byte { func (i *slotIndex) encode() []byte {
var buf [slotIndexSize]byte var buf [slotIndexSize]byte
copy(buf[:common.HashLength], i.hash.Bytes()) copy(buf[:common.HashLength], i.id.Bytes())
buf[common.HashLength] = i.length buf[common.HashLength] = i.length
binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset) binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset)
return buf[:] return buf[:]
@ -185,7 +189,7 @@ func (i *slotIndex) encode() []byte {
// decode unpack slot index from the byte stream. // decode unpack slot index from the byte stream.
func (i *slotIndex) decode(blob []byte) { func (i *slotIndex) decode(blob []byte) {
i.hash = common.BytesToHash(blob[:common.HashLength]) i.id = common.BytesToHash(blob[:common.HashLength])
i.length = blob[common.HashLength] i.length = blob[common.HashLength]
i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:]) i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:])
} }
@ -214,7 +218,7 @@ func (m *meta) decode(blob []byte) error {
return errors.New("no version tag") return errors.New("no version tag")
} }
switch blob[0] { switch blob[0] {
case stateHistoryVersion: case stateHistoryV0, stateHistoryV1:
if len(blob) != historyMetaSize { if len(blob) != historyMetaSize {
return fmt.Errorf("invalid state history meta, len: %d", len(blob)) return fmt.Errorf("invalid state history meta, len: %d", len(blob))
} }
@ -242,7 +246,7 @@ type history struct {
} }
// newHistory constructs the state history object with provided state change set. // newHistory constructs the state history object with provided state change set.
func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) *history { func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, rawStorageKey bool) *history {
var ( var (
accountList = maps.Keys(accounts) accountList = maps.Keys(accounts)
storageList = make(map[common.Address][]common.Hash) storageList = make(map[common.Address][]common.Hash)
@ -254,9 +258,13 @@ func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map
slices.SortFunc(slist, common.Hash.Cmp) slices.SortFunc(slist, common.Hash.Cmp)
storageList[addr] = slist storageList[addr] = slist
} }
version := stateHistoryV0
if rawStorageKey {
version = stateHistoryV1
}
return &history{ return &history{
meta: &meta{ meta: &meta{
version: stateHistoryVersion, version: version,
parent: parent, parent: parent,
root: root, root: root,
block: block, block: block,
@ -289,7 +297,7 @@ func (h *history) encode() ([]byte, []byte, []byte, []byte) {
// Encode storage slots in order // Encode storage slots in order
for _, slotHash := range h.storageList[addr] { for _, slotHash := range h.storageList[addr] {
sIndex := slotIndex{ sIndex := slotIndex{
hash: slotHash, id: slotHash,
length: uint8(len(slots[slotHash])), length: uint8(len(slots[slotHash])),
offset: uint32(len(storageData)), offset: uint32(len(storageData)),
} }
@ -377,7 +385,7 @@ func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) {
// readStorage parses the storage slots from the byte stream with specified account. // readStorage parses the storage slots from the byte stream with specified account.
func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) { func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) {
var ( var (
last common.Hash last *common.Hash
count = int(accIndex.storageSlots) count = int(accIndex.storageSlots)
list = make([]common.Hash, 0, count) list = make([]common.Hash, 0, count)
storage = make(map[common.Hash][]byte, count) storage = make(map[common.Hash][]byte, count)
@ -402,8 +410,10 @@ func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.
} }
index.decode(r.storageIndexes[start:end]) index.decode(r.storageIndexes[start:end])
if bytes.Compare(last.Bytes(), index.hash.Bytes()) >= 0 { if last != nil {
return nil, nil, errors.New("storage slot is not in order") if bytes.Compare(last.Bytes(), index.id.Bytes()) >= 0 {
return nil, nil, fmt.Errorf("storage slot is not in order, last: %x, current: %x", *last, index.id)
}
} }
if index.offset != r.lastSlotDataRead { if index.offset != r.lastSlotDataRead {
return nil, nil, errors.New("storage data buffer is gapped") return nil, nil, errors.New("storage data buffer is gapped")
@ -412,10 +422,10 @@ func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.
if uint32(len(r.storageData)) < sEnd { if uint32(len(r.storageData)) < sEnd {
return nil, nil, errors.New("storage data buffer is corrupted") return nil, nil, errors.New("storage data buffer is corrupted")
} }
storage[index.hash] = r.storageData[r.lastSlotDataRead:sEnd] storage[index.id] = r.storageData[r.lastSlotDataRead:sEnd]
list = append(list, index.hash) list = append(list, index.id)
last = index.hash last = &index.id
r.lastSlotIndexRead = end r.lastSlotIndexRead = end
r.lastSlotDataRead = sEnd r.lastSlotDataRead = sEnd
} }
@ -498,7 +508,7 @@ func writeHistory(writer ethdb.AncientWriter, dl *diffLayer) error {
} }
var ( var (
start = time.Now() start = time.Now()
history = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states.accountOrigin, dl.states.storageOrigin) history = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states.accountOrigin, dl.states.storageOrigin, dl.states.rawStorageKey)
) )
accountData, storageData, accountIndex, storageIndex := history.encode() accountData, storageData, accountIndex, storageIndex := history.encode()
dataSize := common.StorageSize(len(accountData) + len(storageData)) dataSize := common.StorageSize(len(accountData) + len(storageData))

View File

@ -21,6 +21,7 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
@ -109,12 +110,17 @@ func accountHistory(freezer ethdb.AncientReader, address common.Address, start,
// storageHistory inspects the storage history within the range. // storageHistory inspects the storage history within the range.
func storageHistory(freezer ethdb.AncientReader, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) { func storageHistory(freezer ethdb.AncientReader, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) {
slotHash := crypto.Keccak256Hash(slot.Bytes())
return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) { return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) {
slots, exists := h.storages[address] slots, exists := h.storages[address]
if !exists { if !exists {
return return
} }
blob, exists := slots[slot] key := slotHash
if h.meta.version != stateHistoryV0 {
key = slot
}
blob, exists := slots[key]
if !exists { if !exists {
return return
} }

View File

@ -49,9 +49,9 @@ func randomStateSet(n int) (map[common.Address][]byte, map[common.Address]map[co
return accounts, storages return accounts, storages
} }
func makeHistory() *history { func makeHistory(rawStorageKey bool) *history {
accounts, storages := randomStateSet(3) accounts, storages := randomStateSet(3)
return newHistory(testrand.Hash(), types.EmptyRootHash, 0, accounts, storages) return newHistory(testrand.Hash(), types.EmptyRootHash, 0, accounts, storages, rawStorageKey)
} }
func makeHistories(n int) []*history { func makeHistories(n int) []*history {
@ -62,7 +62,7 @@ func makeHistories(n int) []*history {
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
root := testrand.Hash() root := testrand.Hash()
accounts, storages := randomStateSet(3) accounts, storages := randomStateSet(3)
h := newHistory(root, parent, uint64(i), accounts, storages) h := newHistory(root, parent, uint64(i), accounts, storages, false)
parent = root parent = root
result = append(result, h) result = append(result, h)
} }
@ -70,10 +70,15 @@ func makeHistories(n int) []*history {
} }
func TestEncodeDecodeHistory(t *testing.T) { func TestEncodeDecodeHistory(t *testing.T) {
testEncodeDecodeHistory(t, false)
testEncodeDecodeHistory(t, true)
}
func testEncodeDecodeHistory(t *testing.T, rawStorageKey bool) {
var ( var (
m meta m meta
dec history dec history
obj = makeHistory() obj = makeHistory(rawStorageKey)
) )
// check if meta data can be correctly encode/decode // check if meta data can be correctly encode/decode
blob := obj.meta.encode() blob := obj.meta.encode()

View File

@ -131,7 +131,7 @@ func TestAccountIteratorBasics(t *testing.T) {
storage[hash] = accStorage storage[hash] = accStorage
} }
} }
states := newStates(accounts, storage) states := newStates(accounts, storage, false)
it := newDiffAccountIterator(common.Hash{}, states, nil) it := newDiffAccountIterator(common.Hash{}, states, nil)
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
@ -171,7 +171,7 @@ func TestStorageIteratorBasics(t *testing.T) {
storage[hash] = accStorage storage[hash] = accStorage
nilStorage[hash] = nilstorage nilStorage[hash] = nilstorage
} }
states := newStates(accounts, storage) states := newStates(accounts, storage, false)
for account := range accounts { for account := range accounts {
it := newDiffStorageIterator(account, common.Hash{}, states, nil) it := newDiffStorageIterator(account, common.Hash{}, states, nil)
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
@ -267,13 +267,13 @@ func TestAccountIteratorTraversal(t *testing.T) {
// Stack three diff layers on top with various overlaps // Stack three diff layers on top with various overlaps
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 0, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 0, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 0, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 0, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil, false))
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 0, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 0, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil, false))
// Verify the single and multi-layer iterators // Verify the single and multi-layer iterators
head := db.tree.get(common.HexToHash("0x04")) head := db.tree.get(common.HexToHash("0x04"))
@ -314,13 +314,13 @@ func TestStorageIteratorTraversal(t *testing.T) {
// Stack three diff layers on top with various overlaps // Stack three diff layers on top with various overlaps
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 0, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 0, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 0, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 0, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil), nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil), nil, nil, false))
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 0, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 0, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil, false))
// Verify the single and multi-layer iterators // Verify the single and multi-layer iterators
head := db.tree.get(common.HexToHash("0x04")) head := db.tree.get(common.HexToHash("0x04"))
@ -395,14 +395,14 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
} }
} }
// Assemble a stack of snapshots from the account layers // Assemble a stack of snapshots from the account layers
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(a, nil, nil, nil)) db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(a, nil, nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(b, nil, nil, nil)) db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(b, nil, nil, nil, false))
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(c, nil, nil, nil)) db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(c, nil, nil, nil, false))
db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(d, nil, nil, nil)) db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(d, nil, nil, nil, false))
db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(e, nil, nil, nil)) db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(e, nil, nil, nil, false))
db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(f, nil, nil, nil)) db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(f, nil, nil, nil, false))
db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(g, nil, nil, nil)) db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(g, nil, nil, nil, false))
db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(h, nil, nil, nil)) db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(h, nil, nil, nil, false))
// binaryIterator // binaryIterator
r, _ := db.StateReader(common.HexToHash("0x09")) r, _ := db.StateReader(common.HexToHash("0x09"))
@ -504,14 +504,14 @@ func TestStorageIteratorTraversalValues(t *testing.T) {
} }
} }
// Assemble a stack of snapshots from the account layers // Assemble a stack of snapshots from the account layers
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(a), nil, nil)) db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(a), nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(b), nil, nil)) db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(b), nil, nil, false))
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(c), nil, nil)) db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(c), nil, nil, false))
db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(d), nil, nil)) db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(d), nil, nil, false))
db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(e), nil, nil)) db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(e), nil, nil, false))
db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(f), nil, nil)) db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(f), nil, nil, false))
db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(g), nil, nil)) db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(g), nil, nil, false))
db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(h), nil, nil)) db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(h), nil, nil, false))
// binaryIterator // binaryIterator
r, _ := db.StateReader(common.HexToHash("0x09")) r, _ := db.StateReader(common.HexToHash("0x09"))
@ -588,7 +588,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
parent = common.HexToHash(fmt.Sprintf("0x%02x", i)) parent = common.HexToHash(fmt.Sprintf("0x%02x", i))
} }
db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(), db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil)) NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil, false))
} }
// Iterate the entire stack and ensure everything is hit only once // Iterate the entire stack and ensure everything is hit only once
head := db.tree.get(common.HexToHash("0x80")) head := db.tree.get(common.HexToHash("0x80"))
@ -626,13 +626,13 @@ func TestAccountIteratorFlattening(t *testing.T) {
// Create a stack of diffs on top // Create a stack of diffs on top
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil, false))
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil, false))
// Create a binary iterator and flatten the data from underneath it // Create a binary iterator and flatten the data from underneath it
head := db.tree.get(common.HexToHash("0x04")) head := db.tree.get(common.HexToHash("0x04"))
@ -658,13 +658,13 @@ func TestAccountIteratorSeek(t *testing.T) {
// db.WaitGeneration() // db.WaitGeneration()
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil, false))
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil, false))
// Account set is now // Account set is now
// 02: aa, ee, f0, ff // 02: aa, ee, f0, ff
@ -731,13 +731,13 @@ func testStorageIteratorSeek(t *testing.T, newIterator func(db *Database, root,
// Stack three diff layers on top with various overlaps // Stack three diff layers on top with various overlaps
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil), nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil), nil, nil, false))
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil), nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil), nil, nil, false))
// Account set is now // Account set is now
// 02: 01, 03, 05 // 02: 01, 03, 05
@ -803,16 +803,16 @@ func testAccountIteratorDeletions(t *testing.T, newIterator func(db *Database, r
// Stack three diff layers on top with various overlaps // Stack three diff layers on top with various overlaps
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0x11", "0x22", "0x33"), nil, nil, nil)) NewStateSetWithOrigin(randomAccountSet("0x11", "0x22", "0x33"), nil, nil, nil, false))
deleted := common.HexToHash("0x22") deleted := common.HexToHash("0x22")
accounts := randomAccountSet("0x11", "0x33") accounts := randomAccountSet("0x11", "0x33")
accounts[deleted] = nil accounts[deleted] = nil
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(accounts, nil, nil, nil)) NewStateSetWithOrigin(accounts, nil, nil, nil, false))
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0x33", "0x44", "0x55"), nil, nil, nil)) NewStateSetWithOrigin(randomAccountSet("0x33", "0x44", "0x55"), nil, nil, nil, false))
// The output should be 11,33,44,55 // The output should be 11,33,44,55
it := newIterator(db, common.HexToHash("0x04"), common.Hash{}) it := newIterator(db, common.HexToHash("0x04"), common.Hash{})
@ -843,10 +843,10 @@ func TestStorageIteratorDeletions(t *testing.T) {
// Stack three diff layers on top with various overlaps // Stack three diff layers on top with various overlaps
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}), nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}), nil, nil, false))
// The output should be 02,04,05,06 // The output should be 02,04,05,06
it, _ := db.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{}) it, _ := db.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
@ -863,7 +863,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
common.HexToHash("0xaa"): nil, common.HexToHash("0xaa"): nil,
} }
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(accounts, randomStorageSet([]string{"0xaa"}, nil, [][]string{{"0x02", "0x04", "0x05", "0x06"}}), nil, nil)) NewStateSetWithOrigin(accounts, randomStorageSet([]string{"0xaa"}, nil, [][]string{{"0x02", "0x04", "0x05", "0x06"}}), nil, nil, false))
it, _ = db.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{}) it, _ = db.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 0, it, verifyStorage) verifyIterator(t, 0, it, verifyStorage)
@ -871,7 +871,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
// Re-insert the slots of the same account // Re-insert the slots of the same account
db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 4, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 4, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil), nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil), nil, nil, false))
// The output should be 07,08,09 // The output should be 07,08,09
it, _ = db.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{}) it, _ = db.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
@ -880,7 +880,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
// Destruct the whole storage but re-create the account in the same layer // Destruct the whole storage but re-create the account in the same layer
db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 5, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 5, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, [][]string{{"0x07", "0x08", "0x09"}}), nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, [][]string{{"0x07", "0x08", "0x09"}}), nil, nil, false))
it, _ = db.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{}) it, _ = db.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12 verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
@ -911,19 +911,19 @@ func testStaleIterator(t *testing.T, newIter func(db *Database, hash common.Hash
// [02 (disk), 03] // [02 (disk), 03]
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01"}}, nil), nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01"}}, nil), nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02"}}, nil), nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02"}}, nil), nil, nil, false))
db.tree.cap(common.HexToHash("0x03"), 1) db.tree.cap(common.HexToHash("0x03"), 1)
// [02 (disk), 03, 04] // [02 (disk), 03, 04]
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x03"}}, nil), nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x03"}}, nil), nil, nil, false))
iter := newIter(db, common.HexToHash("0x04")) iter := newIter(db, common.HexToHash("0x04"))
// [04 (disk), 05] // [04 (disk), 05]
db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 3, trienode.NewMergedNodeSet(), db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 3, trienode.NewMergedNodeSet(),
NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04"}}, nil), nil, nil)) NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04"}}, nil), nil, nil, false))
db.tree.cap(common.HexToHash("0x05"), 1) db.tree.cap(common.HexToHash("0x05"), 1)
// Iterator can't finish the traversal as the layer 02 has becoming stale. // Iterator can't finish the traversal as the layer 02 has becoming stale.
@ -969,7 +969,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
if i == 1 { if i == 1 {
parent = common.HexToHash(fmt.Sprintf("0x%02x", i)) parent = common.HexToHash(fmt.Sprintf("0x%02x", i))
} }
db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil)) db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil, false))
} }
// We call this once before the benchmark, so the creation of // We call this once before the benchmark, so the creation of
// sorted accountlists are not included in the results. // sorted accountlists are not included in the results.
@ -1059,9 +1059,9 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
db := New(rawdb.NewMemoryDatabase(), config, false) db := New(rawdb.NewMemoryDatabase(), config, false)
// db.WaitGeneration() // db.WaitGeneration()
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(2000), nil, nil, nil)) db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(2000), nil, nil, nil, false))
for i := 2; i <= 100; i++ { for i := 2; i <= 100; i++ {
db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(20), nil, nil, nil)) db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(20), nil, nil, nil, false))
} }
// We call this once before the benchmark, so the creation of // We call this once before the benchmark, so the creation of
// sorted accountlists are not included in the results. // sorted accountlists are not included in the results.

View File

@ -45,7 +45,8 @@ var (
// - Version 0: initial version // - Version 0: initial version
// - Version 1: storage.Incomplete field is removed // - Version 1: storage.Incomplete field is removed
// - Version 2: add post-modification state values // - Version 2: add post-modification state values
const journalVersion uint64 = 2 // - Version 3: a flag has been added to indicate whether the storage slot key is the raw key or a hash
const journalVersion uint64 = 3
// loadJournal tries to parse the layer journal from the disk. // loadJournal tries to parse the layer journal from the disk.
func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) { func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) {

View File

@ -65,6 +65,8 @@ type stateSet struct {
accountListSorted []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil accountListSorted []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
storageListSorted map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil storageListSorted map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
rawStorageKey bool // indicates whether the storage set uses the raw slot key or the hash
// Lock for guarding the two lists above. These lists might be accessed // Lock for guarding the two lists above. These lists might be accessed
// concurrently and lock protection is essential to avoid concurrent // concurrently and lock protection is essential to avoid concurrent
// slice or map read/write. // slice or map read/write.
@ -72,7 +74,7 @@ type stateSet struct {
} }
// newStates constructs the state set with the provided account and storage data. // newStates constructs the state set with the provided account and storage data.
func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *stateSet { func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, rawStorageKey bool) *stateSet {
// Don't panic for the lazy callers, initialize the nil maps instead. // Don't panic for the lazy callers, initialize the nil maps instead.
if accounts == nil { if accounts == nil {
accounts = make(map[common.Hash][]byte) accounts = make(map[common.Hash][]byte)
@ -83,6 +85,7 @@ func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[com
s := &stateSet{ s := &stateSet{
accountData: accounts, accountData: accounts,
storageData: storages, storageData: storages,
rawStorageKey: rawStorageKey,
storageListSorted: make(map[common.Hash][]common.Hash), storageListSorted: make(map[common.Hash][]common.Hash),
} }
s.size = s.check() s.size = s.check()
@ -330,6 +333,9 @@ func (s *stateSet) updateSize(delta int) {
// encode serializes the content of state set into the provided writer. // encode serializes the content of state set into the provided writer.
func (s *stateSet) encode(w io.Writer) error { func (s *stateSet) encode(w io.Writer) error {
// Encode accounts // Encode accounts
if err := rlp.Encode(w, s.rawStorageKey); err != nil {
return err
}
type accounts struct { type accounts struct {
AddrHashes []common.Hash AddrHashes []common.Hash
Accounts [][]byte Accounts [][]byte
@ -367,6 +373,9 @@ func (s *stateSet) encode(w io.Writer) error {
// decode deserializes the content from the rlp stream into the state set. // decode deserializes the content from the rlp stream into the state set.
func (s *stateSet) decode(r *rlp.Stream) error { func (s *stateSet) decode(r *rlp.Stream) error {
if err := r.Decode(&s.rawStorageKey); err != nil {
return fmt.Errorf("load diff raw storage key flag: %v", err)
}
type accounts struct { type accounts struct {
AddrHashes []common.Hash AddrHashes []common.Hash
Accounts [][]byte Accounts [][]byte
@ -435,23 +444,23 @@ func (s *stateSet) dbsize() int {
type StateSetWithOrigin struct { type StateSetWithOrigin struct {
*stateSet *stateSet
// AccountOrigin represents the account data before the state transition, // accountOrigin represents the account data before the state transition,
// corresponding to both the accountData and destructSet. It's keyed by the // corresponding to both the accountData and destructSet. It's keyed by the
// account address. The nil value means the account was not present before. // account address. The nil value means the account was not present before.
accountOrigin map[common.Address][]byte accountOrigin map[common.Address][]byte
// StorageOrigin represents the storage data before the state transition, // storageOrigin represents the storage data before the state transition,
// corresponding to storageData and deleted slots of destructSet. It's keyed // corresponding to storageData and deleted slots of destructSet. It's keyed
// by the account address and slot key hash. The nil value means the slot was // by the account address and slot key hash. The nil value means the slot was
// not present. // not present.
storageOrigin map[common.Address]map[common.Hash][]byte storageOrigin map[common.Address]map[common.Hash][]byte
// Memory size of the state data (accountOrigin and storageOrigin) // memory size of the state data (accountOrigin and storageOrigin)
size uint64 size uint64
} }
// NewStateSetWithOrigin constructs the state set with the provided data. // NewStateSetWithOrigin constructs the state set with the provided data.
func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte) *StateSetWithOrigin { func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte, rawStorageKey bool) *StateSetWithOrigin {
// Don't panic for the lazy callers, initialize the nil maps instead. // Don't panic for the lazy callers, initialize the nil maps instead.
if accountOrigin == nil { if accountOrigin == nil {
accountOrigin = make(map[common.Address][]byte) accountOrigin = make(map[common.Address][]byte)
@ -471,7 +480,7 @@ func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.
size += 2*common.HashLength + len(data) size += 2*common.HashLength + len(data)
} }
} }
set := newStates(accounts, storages) set := newStates(accounts, storages, rawStorageKey)
return &StateSetWithOrigin{ return &StateSetWithOrigin{
stateSet: set, stateSet: set,
accountOrigin: accountOrigin, accountOrigin: accountOrigin,

View File

@ -44,6 +44,7 @@ func TestStatesMerge(t *testing.T) {
common.Hash{0x1}: {0x10}, common.Hash{0x1}: {0x10},
}, },
}, },
false,
) )
b := newStates( b := newStates(
map[common.Hash][]byte{ map[common.Hash][]byte{
@ -64,6 +65,7 @@ func TestStatesMerge(t *testing.T) {
common.Hash{0x1}: nil, // delete slot common.Hash{0x1}: nil, // delete slot
}, },
}, },
false,
) )
a.merge(b) a.merge(b)
@ -132,6 +134,7 @@ func TestStatesRevert(t *testing.T) {
common.Hash{0x1}: {0x10}, common.Hash{0x1}: {0x10},
}, },
}, },
false,
) )
b := newStates( b := newStates(
map[common.Hash][]byte{ map[common.Hash][]byte{
@ -152,6 +155,7 @@ func TestStatesRevert(t *testing.T) {
common.Hash{0x1}: nil, common.Hash{0x1}: nil,
}, },
}, },
false,
) )
a.merge(b) a.merge(b)
a.revertTo( a.revertTo(
@ -224,12 +228,13 @@ func TestStatesRevert(t *testing.T) {
// before and was created during transition w, reverting w will retain an x=nil // before and was created during transition w, reverting w will retain an x=nil
// entry in the set. // entry in the set.
func TestStateRevertAccountNullMarker(t *testing.T) { func TestStateRevertAccountNullMarker(t *testing.T) {
a := newStates(nil, nil) // empty initial state a := newStates(nil, nil, false) // empty initial state
b := newStates( b := newStates(
map[common.Hash][]byte{ map[common.Hash][]byte{
{0xa}: {0xa}, {0xa}: {0xa},
}, },
nil, nil,
false,
) )
a.merge(b) // create account 0xa a.merge(b) // create account 0xa
a.revertTo( a.revertTo(
@ -254,7 +259,7 @@ func TestStateRevertAccountNullMarker(t *testing.T) {
func TestStateRevertStorageNullMarker(t *testing.T) { func TestStateRevertStorageNullMarker(t *testing.T) {
a := newStates(map[common.Hash][]byte{ a := newStates(map[common.Hash][]byte{
{0xa}: {0xa}, {0xa}: {0xa},
}, nil) // initial state with account 0xa }, nil, false) // initial state with account 0xa
b := newStates( b := newStates(
nil, nil,
@ -263,6 +268,7 @@ func TestStateRevertStorageNullMarker(t *testing.T) {
common.Hash{0x1}: {0x1}, common.Hash{0x1}: {0x1},
}, },
}, },
false,
) )
a.merge(b) // create slot 0x1 a.merge(b) // create slot 0x1
a.revertTo( a.revertTo(
@ -284,6 +290,11 @@ func TestStateRevertStorageNullMarker(t *testing.T) {
} }
func TestStatesEncode(t *testing.T) { func TestStatesEncode(t *testing.T) {
testStatesEncode(t, false)
testStatesEncode(t, true)
}
func testStatesEncode(t *testing.T, rawStorageKey bool) {
s := newStates( s := newStates(
map[common.Hash][]byte{ map[common.Hash][]byte{
{0x1}: {0x1}, {0x1}: {0x1},
@ -293,6 +304,7 @@ func TestStatesEncode(t *testing.T) {
common.Hash{0x1}: {0x1}, common.Hash{0x1}: {0x1},
}, },
}, },
rawStorageKey,
) )
buf := bytes.NewBuffer(nil) buf := bytes.NewBuffer(nil)
if err := s.encode(buf); err != nil { if err := s.encode(buf); err != nil {
@ -308,9 +320,17 @@ func TestStatesEncode(t *testing.T) {
if !reflect.DeepEqual(s.storageData, dec.storageData) { if !reflect.DeepEqual(s.storageData, dec.storageData) {
t.Fatal("Unexpected storage data") t.Fatal("Unexpected storage data")
} }
if s.rawStorageKey != dec.rawStorageKey {
t.Fatal("Unexpected rawStorageKey flag")
}
} }
func TestStateWithOriginEncode(t *testing.T) { func TestStateWithOriginEncode(t *testing.T) {
testStateWithOriginEncode(t, false)
testStateWithOriginEncode(t, true)
}
func testStateWithOriginEncode(t *testing.T, rawStorageKey bool) {
s := NewStateSetWithOrigin( s := NewStateSetWithOrigin(
map[common.Hash][]byte{ map[common.Hash][]byte{
{0x1}: {0x1}, {0x1}: {0x1},
@ -328,6 +348,7 @@ func TestStateWithOriginEncode(t *testing.T) {
common.Hash{0x1}: {0x1}, common.Hash{0x1}: {0x1},
}, },
}, },
rawStorageKey,
) )
buf := bytes.NewBuffer(nil) buf := bytes.NewBuffer(nil)
if err := s.encode(buf); err != nil { if err := s.encode(buf); err != nil {
@ -349,6 +370,9 @@ func TestStateWithOriginEncode(t *testing.T) {
if !reflect.DeepEqual(s.storageOrigin, dec.storageOrigin) { if !reflect.DeepEqual(s.storageOrigin, dec.storageOrigin) {
t.Fatal("Unexpected storage origin data") t.Fatal("Unexpected storage origin data")
} }
if s.rawStorageKey != dec.rawStorageKey {
t.Fatal("Unexpected rawStorageKey flag")
}
} }
func TestStateSizeTracking(t *testing.T) { func TestStateSizeTracking(t *testing.T) {
@ -375,6 +399,7 @@ func TestStateSizeTracking(t *testing.T) {
common.Hash{0x1}: {0x10}, // 2*common.HashLength+1 common.Hash{0x1}: {0x10}, // 2*common.HashLength+1
}, },
}, },
false,
) )
if a.size != uint64(expSizeA) { if a.size != uint64(expSizeA) {
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeA, a.size) t.Fatalf("Unexpected size, want: %d, got: %d", expSizeA, a.size)
@ -406,6 +431,7 @@ func TestStateSizeTracking(t *testing.T) {
common.Hash{0x3}: nil, // 2*common.HashLength, slot deletion common.Hash{0x3}: nil, // 2*common.HashLength, slot deletion
}, },
}, },
false,
) )
if b.size != uint64(expSizeB) { if b.size != uint64(expSizeB) {
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeB, b.size) t.Fatalf("Unexpected size, want: %d, got: %d", expSizeB, b.size)

View File

@ -27,6 +27,7 @@ type StateSet struct {
AccountsOrigin map[common.Address][]byte // Original values of mutated accounts in 'slim RLP' encoding AccountsOrigin map[common.Address][]byte // Original values of mutated accounts in 'slim RLP' encoding
Storages map[common.Hash]map[common.Hash][]byte // Mutated storage slots in 'prefix-zero-trimmed' RLP format Storages map[common.Hash]map[common.Hash][]byte // Mutated storage slots in 'prefix-zero-trimmed' RLP format
StoragesOrigin map[common.Address]map[common.Hash][]byte // Original values of mutated storage slots in 'prefix-zero-trimmed' RLP format StoragesOrigin map[common.Address]map[common.Hash][]byte // Original values of mutated storage slots in 'prefix-zero-trimmed' RLP format
RawStorageKey bool // Flag whether the storage set uses the raw slot key or the hash
} }
// NewStateSet initializes an empty state set. // NewStateSet initializes an empty state set.
@ -45,5 +46,5 @@ func (set *StateSet) internal() *pathdb.StateSetWithOrigin {
if set == nil { if set == nil {
return nil return nil
} }
return pathdb.NewStateSetWithOrigin(set.Accounts, set.Storages, set.AccountsOrigin, set.StoragesOrigin) return pathdb.NewStateSetWithOrigin(set.Accounts, set.Storages, set.AccountsOrigin, set.StoragesOrigin, set.RawStorageKey)
} }