diff --git a/accounts/accounts.go b/accounts/accounts.go
index b995498a6d..7bd911577a 100644
--- a/accounts/accounts.go
+++ b/accounts/accounts.go
@@ -214,7 +214,9 @@ const (
// of starting any background processes such as automatic key derivation.
WalletOpened
- // WalletDropped
+ // WalletDropped is fired when a wallet is removed or disconnected, either via USB
+ // or due to a filesystem event in the keystore. This event indicates that the wallet
+ // is no longer available for operations.
WalletDropped
)
diff --git a/cmd/devp2p/internal/ethtest/protocol.go b/cmd/devp2p/internal/ethtest/protocol.go
index f5f5f7e489..5c2f7d9e48 100644
--- a/cmd/devp2p/internal/ethtest/protocol.go
+++ b/cmd/devp2p/internal/ethtest/protocol.go
@@ -13,6 +13,7 @@
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see .
+
package ethtest
import (
diff --git a/cmd/devp2p/internal/v4test/discv4tests.go b/cmd/devp2p/internal/v4test/discv4tests.go
index ca556851b4..963df6cdbc 100644
--- a/cmd/devp2p/internal/v4test/discv4tests.go
+++ b/cmd/devp2p/internal/v4test/discv4tests.go
@@ -194,7 +194,7 @@ func PingExtraData(t *utesting.T) {
}
}
-// This test sends a PING packet with additional data and wrong 'from' field
+// PingExtraDataWrongFrom sends a PING packet with additional data and wrong 'from' field
// and expects a PONG response.
func PingExtraDataWrongFrom(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2)
@@ -215,7 +215,7 @@ func PingExtraDataWrongFrom(t *utesting.T) {
}
}
-// This test sends a PING packet with an expiration in the past.
+// PingPastExpiration sends a PING packet with an expiration in the past.
// The remote node should not respond.
func PingPastExpiration(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2)
@@ -234,7 +234,7 @@ func PingPastExpiration(t *utesting.T) {
}
}
-// This test sends an invalid packet. The remote node should not respond.
+// WrongPacketType sends an invalid packet. The remote node should not respond.
func WrongPacketType(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2)
defer te.close()
@@ -252,7 +252,7 @@ func WrongPacketType(t *utesting.T) {
}
}
-// This test verifies that the default behaviour of ignoring 'from' fields is unaffected by
+// BondThenPingWithWrongFrom verifies that the default behaviour of ignoring 'from' fields is unaffected by
// the bonding process. After bonding, it pings the target with a different from endpoint.
func BondThenPingWithWrongFrom(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2)
@@ -289,7 +289,7 @@ waitForPong:
}
}
-// This test just sends FINDNODE. The remote node should not reply
+// FindnodeWithoutEndpointProof sends FINDNODE. The remote node should not reply
// because the endpoint proof has not completed.
func FindnodeWithoutEndpointProof(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2)
@@ -332,7 +332,7 @@ func BasicFindnode(t *utesting.T) {
}
}
-// This test sends an unsolicited NEIGHBORS packet after the endpoint proof, then sends
+// UnsolicitedNeighbors sends an unsolicited NEIGHBORS packet after the endpoint proof, then sends
// FINDNODE to read the remote table. The remote node should not return the node contained
// in the unsolicited NEIGHBORS packet.
func UnsolicitedNeighbors(t *utesting.T) {
@@ -373,7 +373,7 @@ func UnsolicitedNeighbors(t *utesting.T) {
}
}
-// This test sends FINDNODE with an expiration timestamp in the past.
+// FindnodePastExpiration sends FINDNODE with an expiration timestamp in the past.
// The remote node should not respond.
func FindnodePastExpiration(t *utesting.T) {
te := newTestEnv(Remote, Listen1, Listen2)
@@ -426,7 +426,7 @@ func bond(t *utesting.T, te *testenv) {
}
}
-// This test attempts to perform a traffic amplification attack against a
+// FindnodeAmplificationInvalidPongHash attempts to perform a traffic amplification attack against a
// 'victim' endpoint using FINDNODE. In this attack scenario, the attacker
// attempts to complete the endpoint proof non-interactively by sending a PONG
// with mismatching reply token from the 'victim' endpoint. The attack works if
@@ -478,7 +478,7 @@ func FindnodeAmplificationInvalidPongHash(t *utesting.T) {
}
}
-// This test attempts to perform a traffic amplification attack using FINDNODE.
+// FindnodeAmplificationWrongIP attempts to perform a traffic amplification attack using FINDNODE.
// The attack works if the remote node does not verify the IP address of FINDNODE
// against the endpoint verification proof done by PING/PONG.
func FindnodeAmplificationWrongIP(t *utesting.T) {
diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go
index aef497885e..7c17a251f0 100644
--- a/cmd/evm/internal/t8ntool/execution.go
+++ b/cmd/evm/internal/t8ntool/execution.go
@@ -379,7 +379,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
}
// Commit block
- root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber))
+ root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber), chainConfig.IsCancun(vmContext.BlockNumber, vmContext.Time))
if err != nil {
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err))
}
@@ -437,7 +437,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
}
}
// Commit and re-open to start with a clean state.
- root, _ := statedb.Commit(0, false)
+ root, _ := statedb.Commit(0, false, false)
statedb, _ = state.New(root, sdb)
return statedb
}
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index c67d3657e2..b2cf28353b 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -336,7 +336,7 @@ func runCmd(ctx *cli.Context) error {
output, stats, err := timedExec(bench, execFunc)
if ctx.Bool(DumpFlag.Name) {
- root, err := runtimeConfig.State.Commit(genesisConfig.Number, true)
+ root, err := runtimeConfig.State.Commit(genesisConfig.Number, true, false)
if err != nil {
fmt.Printf("Failed to commit changes %v\n", err)
return err
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index 48564eb5eb..bbadb1cc19 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -230,11 +230,10 @@ func initGenesis(ctx *cli.Context) error {
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
defer triedb.Close()
- _, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides)
+ _, hash, _, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides)
if err != nil {
utils.Fatalf("Failed to write genesis block: %v", err)
}
-
log.Info("Successfully wrote genesis state", "database", "chaindata", "hash", hash)
return nil
diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go
index 7622246050..cd41c57c75 100644
--- a/cmd/geth/dbcmd.go
+++ b/cmd/geth/dbcmd.go
@@ -829,8 +829,7 @@ func inspectAccount(db *triedb.Database, start uint64, end uint64, address commo
func inspectStorage(db *triedb.Database, start uint64, end uint64, address common.Address, slot common.Hash, raw bool) error {
// The hash of storage slot key is utilized in the history
// rather than the raw slot key, make the conversion.
- slotHash := crypto.Keccak256Hash(slot.Bytes())
- stats, err := db.StorageHistory(address, slotHash, start, end)
+ stats, err := db.StorageHistory(address, slot, start, end)
if err != nil {
return err
}
diff --git a/cmd/utils/history_test.go b/cmd/utils/history_test.go
index 1074a358ec..07cf71234e 100644
--- a/cmd/utils/history_test.go
+++ b/cmd/utils/history_test.go
@@ -170,7 +170,7 @@ func TestHistoryImportAndExport(t *testing.T) {
db2.Close()
})
- genesis.MustCommit(db2, triedb.NewDatabase(db, triedb.HashDefaults))
+ genesis.MustCommit(db2, triedb.NewDatabase(db2, triedb.HashDefaults))
imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil)
if err != nil {
t.Fatalf("unable to initialize chain: %v", err)
diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go
index 6c46d1db4f..a83d6ca736 100644
--- a/consensus/clique/snapshot_test.go
+++ b/consensus/clique/snapshot_test.go
@@ -467,7 +467,6 @@ func (tt *cliqueTest) run(t *testing.T) {
for j := 0; j < len(batches)-1; j++ {
if k, err := chain.InsertChain(batches[j]); err != nil {
t.Fatalf("failed to import batch %d, block %d: %v", j, k, err)
- break
}
}
if _, err = chain.InsertChain(batches[len(batches)-1]); err != tt.failure {
diff --git a/core/blockchain.go b/core/blockchain.go
index 0fe4812626..6aac541ba0 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -269,14 +269,19 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
cacheConfig = defaultCacheConfig
}
// Open trie database with provided config
- triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(genesis != nil && genesis.IsVerkle()))
+ enableVerkle, err := EnableVerkleAtGenesis(db, genesis)
+ if err != nil {
+ return nil, err
+ }
+ triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(enableVerkle))
- // Setup the genesis block, commit the provided genesis specification
- // to database if the genesis block is not present yet, or load the
- // stored one from database.
- chainConfig, genesisHash, genesisErr := SetupGenesisBlockWithOverride(db, triedb, genesis, overrides)
- if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
- return nil, genesisErr
+ // Write the supplied genesis to the database if it has not been initialized
+ // yet. The corresponding chain config will be returned, either from the
+ // provided genesis or from the locally stored configuration if the genesis
+ // has already been initialized.
+ chainConfig, genesisHash, compatErr, err := SetupGenesisBlockWithOverride(db, triedb, genesis, overrides)
+ if err != nil {
+ return nil, err
}
log.Info("")
log.Info(strings.Repeat("-", 153))
@@ -303,7 +308,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
vmConfig: vmConfig,
logger: vmConfig.Tracer,
}
- var err error
bc.hc, err = NewHeaderChain(db, chainConfig, engine, bc.insertStopped)
if err != nil {
return nil, err
@@ -453,16 +457,15 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
}
// Rewind the chain in case of an incompatible config upgrade.
- if compat, ok := genesisErr.(*params.ConfigCompatError); ok {
- log.Warn("Rewinding chain to upgrade configuration", "err", compat)
- if compat.RewindToTime > 0 {
- bc.SetHeadWithTimestamp(compat.RewindToTime)
+ if compatErr != nil {
+ log.Warn("Rewinding chain to upgrade configuration", "err", compatErr)
+ if compatErr.RewindToTime > 0 {
+ bc.SetHeadWithTimestamp(compatErr.RewindToTime)
} else {
- bc.SetHead(compat.RewindToBlock)
+ bc.SetHead(compatErr.RewindToBlock)
}
rawdb.WriteChainConfig(db, genesisHash, chainConfig)
}
-
// Start tx indexer if it's enabled.
if txLookupLimit != nil {
bc.txIndexer = newTxIndexer(*txLookupLimit, bc)
@@ -1468,7 +1471,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
log.Crit("Failed to write block into disk", "err", err)
}
// Commit all cached state changes into underlying memory database.
- root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()))
+ root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time()))
if err != nil {
return err
}
@@ -1616,7 +1619,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
return nil, 0, nil
}
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
- SenderCacher.RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain)
+ SenderCacher().RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain)
var (
stats = insertStats{startTime: mclock.Now()}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index f2a9b953a1..84f1b9740c 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -181,7 +181,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
blockchain.chainmu.MustLock()
rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)))
rawdb.WriteBlock(blockchain.db, block)
- statedb.Commit(block.NumberU64(), false)
+ statedb.Commit(block.NumberU64(), false, false)
blockchain.chainmu.Unlock()
}
return nil
@@ -4265,12 +4265,11 @@ func TestEIP7702(t *testing.T) {
// 2. addr1:0xaaaa calls into addr2:0xbbbb
// 3. addr2:0xbbbb writes to storage
auth1, _ := types.SignSetCode(key1, types.SetCodeAuthorization{
- ChainID: gspec.Config.ChainID.Uint64(),
+ ChainID: *uint256.MustFromBig(gspec.Config.ChainID),
Address: aa,
Nonce: 1,
})
auth2, _ := types.SignSetCode(key2, types.SetCodeAuthorization{
- ChainID: 0,
Address: bb,
Nonce: 0,
})
@@ -4278,7 +4277,7 @@ func TestEIP7702(t *testing.T) {
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
b.SetCoinbase(aa)
txdata := &types.SetCodeTx{
- ChainID: gspec.Config.ChainID.Uint64(),
+ ChainID: uint256.MustFromBig(gspec.Config.ChainID),
Nonce: 0,
To: addr1,
Gas: 500000,
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 26714845eb..5298874a40 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -405,7 +405,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
// Write state changes to db
- root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number))
+ root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), config.IsCancun(b.header.Number, b.header.Time))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
}
@@ -510,7 +510,7 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
}
// Write state changes to DB.
- root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number))
+ root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), config.IsCancun(b.header.Number, b.header.Time))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
}
diff --git a/core/genesis.go b/core/genesis.go
index 347789cf0c..68d945e37e 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -146,7 +146,7 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
statedb.SetState(addr, key, value)
}
}
- return statedb.Commit(0, false)
+ return statedb.Commit(0, false, false)
}
// flushAlloc is very similar with hash, but the main difference is all the
@@ -172,7 +172,7 @@ func flushAlloc(ga *types.GenesisAlloc, triedb *triedb.Database) (common.Hash, e
statedb.SetState(addr, key, value)
}
}
- root, err := statedb.Commit(0, false)
+ root, err := statedb.Commit(0, false, false)
if err != nil {
return common.Hash{}, err
}
@@ -247,6 +247,24 @@ type ChainOverrides struct {
OverrideVerkle *uint64
}
+// apply applies the chain overrides on the supplied chain config.
+func (o *ChainOverrides) apply(cfg *params.ChainConfig) (*params.ChainConfig, error) {
+ if o == nil || cfg == nil {
+ return cfg, nil
+ }
+ cpy := *cfg
+ if o.OverrideCancun != nil {
+ cpy.CancunTime = o.OverrideCancun
+ }
+ if o.OverrideVerkle != nil {
+ cpy.VerkleTime = o.OverrideVerkle
+ }
+ if err := cpy.CheckConfigForkOrder(); err != nil {
+ return nil, err
+ }
+ return &cpy, nil
+}
+
// SetupGenesisBlock writes or updates the genesis block in db.
// The block that will be used is:
//
@@ -258,109 +276,102 @@ type ChainOverrides struct {
// The stored chain configuration will be updated if it is compatible (i.e. does not
// specify a fork block below the local head block). In case of a conflict, the
// error is a *params.ConfigCompatError and the new, unwritten config is returned.
-//
-// The returned chain configuration is never nil.
-func SetupGenesisBlock(db ethdb.Database, triedb *triedb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
+func SetupGenesisBlock(db ethdb.Database, triedb *triedb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
return SetupGenesisBlockWithOverride(db, triedb, genesis, nil)
}
-func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, error) {
+func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, genesis *Genesis, overrides *ChainOverrides) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
+ // Sanitize the supplied genesis, ensuring it has the associated chain
+ // config attached.
if genesis != nil && genesis.Config == nil {
- return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
+ return nil, common.Hash{}, nil, errGenesisNoConfig
}
- applyOverrides := func(config *params.ChainConfig) {
- if config != nil {
- if overrides != nil && overrides.OverrideCancun != nil {
- config.CancunTime = overrides.OverrideCancun
- }
- if overrides != nil && overrides.OverrideVerkle != nil {
- config.VerkleTime = overrides.OverrideVerkle
- }
- }
- }
- // Just commit the new block if there is no stored genesis block.
- stored := rawdb.ReadCanonicalHash(db, 0)
- if (stored == common.Hash{}) {
+ // Commit the genesis if the database is empty
+ ghash := rawdb.ReadCanonicalHash(db, 0)
+ if (ghash == common.Hash{}) {
if genesis == nil {
log.Info("Writing default main-net genesis block")
genesis = DefaultGenesisBlock()
} else {
log.Info("Writing custom genesis block")
}
+ chainCfg, err := overrides.apply(genesis.Config)
+ if err != nil {
+ return nil, common.Hash{}, nil, err
+ }
+ genesis.Config = chainCfg
- applyOverrides(genesis.Config)
block, err := genesis.Commit(db, triedb)
if err != nil {
- return genesis.Config, common.Hash{}, err
+ return nil, common.Hash{}, nil, err
}
- return genesis.Config, block.Hash(), nil
+ return chainCfg, block.Hash(), nil, nil
}
- // The genesis block is present(perhaps in ancient database) while the
- // state database is not initialized yet. It can happen that the node
- // is initialized with an external ancient store. Commit genesis state
- // in this case.
- header := rawdb.ReadHeader(db, stored, 0)
- if header.Root != types.EmptyRootHash && !triedb.Initialized(header.Root) {
+ // Commit the genesis if the genesis block exists in the ancient database
+ // but the key-value database is empty without initializing the genesis
+ // fields. This scenario can occur when the node is created from scratch
+ // with an existing ancient store.
+ storedCfg := rawdb.ReadChainConfig(db, ghash)
+ if storedCfg == nil {
+ // Ensure the stored genesis block matches with the given genesis. Private
+ // networks must explicitly specify the genesis in the config file, mainnet
+ // genesis will be used as default and the initialization will always fail.
if genesis == nil {
+ log.Info("Writing default main-net genesis block")
genesis = DefaultGenesisBlock()
+ } else {
+ log.Info("Writing custom genesis block")
}
- applyOverrides(genesis.Config)
- // Ensure the stored genesis matches with the given one.
- hash := genesis.ToBlock().Hash()
- if hash != stored {
- return genesis.Config, hash, &GenesisMismatchError{stored, hash}
+ chainCfg, err := overrides.apply(genesis.Config)
+ if err != nil {
+ return nil, common.Hash{}, nil, err
+ }
+ genesis.Config = chainCfg
+
+ if hash := genesis.ToBlock().Hash(); hash != ghash {
+ return nil, common.Hash{}, nil, &GenesisMismatchError{ghash, hash}
}
block, err := genesis.Commit(db, triedb)
if err != nil {
- return genesis.Config, hash, err
+ return nil, common.Hash{}, nil, err
}
- return genesis.Config, block.Hash(), nil
+ return chainCfg, block.Hash(), nil, nil
}
- // Check whether the genesis block is already written.
+ // The genesis block has already been committed previously. Verify that the
+ // provided genesis with chain overrides matches the existing one, and update
+ // the stored chain config if necessary.
if genesis != nil {
- applyOverrides(genesis.Config)
- hash := genesis.ToBlock().Hash()
- if hash != stored {
- return genesis.Config, hash, &GenesisMismatchError{stored, hash}
+ chainCfg, err := overrides.apply(genesis.Config)
+ if err != nil {
+ return nil, common.Hash{}, nil, err
+ }
+ genesis.Config = chainCfg
+
+ if hash := genesis.ToBlock().Hash(); hash != ghash {
+ return nil, common.Hash{}, nil, &GenesisMismatchError{ghash, hash}
}
- }
- // Get the existing chain configuration.
- newcfg := genesis.configOrDefault(stored)
- applyOverrides(newcfg)
- if err := newcfg.CheckConfigForkOrder(); err != nil {
- return newcfg, common.Hash{}, err
- }
- storedcfg := rawdb.ReadChainConfig(db, stored)
- if storedcfg == nil {
- log.Warn("Found genesis block without chain config")
- rawdb.WriteChainConfig(db, stored, newcfg)
- return newcfg, stored, nil
- }
- storedData, _ := json.Marshal(storedcfg)
- // Special case: if a private network is being used (no genesis and also no
- // mainnet hash in the database), we must not apply the `configOrDefault`
- // chain config as that would be AllProtocolChanges (applying any new fork
- // on top of an existing private network genesis block). In that case, only
- // apply the overrides.
- if genesis == nil && stored != params.MainnetGenesisHash {
- newcfg = storedcfg
- applyOverrides(newcfg)
}
// Check config compatibility and write the config. Compatibility errors
// are returned to the caller unless we're already at block zero.
head := rawdb.ReadHeadHeader(db)
if head == nil {
- return newcfg, stored, errors.New("missing head header")
+ return nil, common.Hash{}, nil, errors.New("missing head header")
}
- compatErr := storedcfg.CheckCompatible(newcfg, head.Number.Uint64(), head.Time)
+ newCfg := genesis.chainConfigOrDefault(ghash, storedCfg)
+
+ // TODO(rjl493456442) better to define the comparator of chain config
+ // and short circuit if the chain config is not changed.
+ compatErr := storedCfg.CheckCompatible(newCfg, head.Number.Uint64(), head.Time)
if compatErr != nil && ((head.Number.Uint64() != 0 && compatErr.RewindToBlock != 0) || (head.Time != 0 && compatErr.RewindToTime != 0)) {
- return newcfg, stored, compatErr
+ return newCfg, ghash, compatErr, nil
}
- // Don't overwrite if the old is identical to the new
- if newData, _ := json.Marshal(newcfg); !bytes.Equal(storedData, newData) {
- rawdb.WriteChainConfig(db, stored, newcfg)
+ // Don't overwrite if the old is identical to the new. It's useful
+ // for the scenarios that database is opened in the read-only mode.
+ storedData, _ := json.Marshal(storedCfg)
+ if newData, _ := json.Marshal(newCfg); !bytes.Equal(storedData, newData) {
+ rawdb.WriteChainConfig(db, ghash, newCfg)
}
- return newcfg, stored, nil
+ return newCfg, ghash, nil, nil
}
// LoadChainConfig loads the stored chain config if it is already present in
@@ -396,7 +407,10 @@ func LoadChainConfig(db ethdb.Database, genesis *Genesis) (*params.ChainConfig,
return params.MainnetChainConfig, nil
}
-func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
+// chainConfigOrDefault retrieves the attached chain configuration. If the genesis
+// object is null, it returns the default chain configuration based on the given
+// genesis hash, or the locally stored config if it's not a pre-defined network.
+func (g *Genesis) chainConfigOrDefault(ghash common.Hash, stored *params.ChainConfig) *params.ChainConfig {
switch {
case g != nil:
return g.Config
@@ -407,14 +421,14 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
case ghash == params.SepoliaGenesisHash:
return params.SepoliaChainConfig
default:
- return params.AllEthashProtocolChanges
+ return stored
}
}
// IsVerkle indicates whether the state is already stored in a verkle
// tree at genesis time.
func (g *Genesis) IsVerkle() bool {
- return g.Config.IsVerkle(new(big.Int).SetUint64(g.Number), g.Timestamp)
+ return g.Config.IsVerkleGenesis()
}
// ToBlock returns the genesis block according to genesis specification.
@@ -494,7 +508,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Blo
}
config := g.Config
if config == nil {
- config = params.AllEthashProtocolChanges
+ return nil, errors.New("invalid genesis without chain config")
}
if err := config.CheckConfigForkOrder(); err != nil {
return nil, err
@@ -514,16 +528,17 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Blo
if err != nil {
return nil, err
}
- rawdb.WriteGenesisStateSpec(db, block.Hash(), blob)
- rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
- rawdb.WriteBlock(db, block)
- rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
- rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
- rawdb.WriteHeadBlockHash(db, block.Hash())
- rawdb.WriteHeadFastBlockHash(db, block.Hash())
- rawdb.WriteHeadHeaderHash(db, block.Hash())
- rawdb.WriteChainConfig(db, block.Hash(), config)
- return block, nil
+ batch := db.NewBatch()
+ rawdb.WriteGenesisStateSpec(batch, block.Hash(), blob)
+ rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), block.Difficulty())
+ rawdb.WriteBlock(batch, block)
+ rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), nil)
+ rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
+ rawdb.WriteHeadBlockHash(batch, block.Hash())
+ rawdb.WriteHeadFastBlockHash(batch, block.Hash())
+ rawdb.WriteHeadHeaderHash(batch, block.Hash())
+ rawdb.WriteChainConfig(batch, block.Hash(), config)
+ return block, batch.Write()
}
// MustCommit writes the genesis block and state to db, panicking on error.
@@ -536,6 +551,29 @@ func (g *Genesis) MustCommit(db ethdb.Database, triedb *triedb.Database) *types.
return block
}
+// EnableVerkleAtGenesis indicates whether the verkle fork should be activated
+// at genesis. This is a temporary solution only for verkle devnet testing, where
+// verkle fork is activated at genesis, and the configured activation date has
+// already passed.
+//
+// In production networks (mainnet and public testnets), verkle activation always
+// occurs after the genesis block, making this function irrelevant in those cases.
+func EnableVerkleAtGenesis(db ethdb.Database, genesis *Genesis) (bool, error) {
+ if genesis != nil {
+ if genesis.Config == nil {
+ return false, errGenesisNoConfig
+ }
+ return genesis.Config.EnableVerkleAtGenesis, nil
+ }
+ if ghash := rawdb.ReadCanonicalHash(db, 0); ghash != (common.Hash{}) {
+ chainCfg := rawdb.ReadChainConfig(db, ghash)
+ if chainCfg != nil {
+ return chainCfg.EnableVerkleAtGenesis, nil
+ }
+ }
+ return false, nil
+}
+
// DefaultGenesisBlock returns the Ethereum main net genesis block.
func DefaultGenesisBlock() *Genesis {
return &Genesis{
diff --git a/core/genesis_test.go b/core/genesis_test.go
index 3ec87474e5..964ef928c7 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -54,23 +54,23 @@ func testSetupGenesis(t *testing.T, scheme string) {
oldcustomg.Config = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(2)}
tests := []struct {
- name string
- fn func(ethdb.Database) (*params.ChainConfig, common.Hash, error)
- wantConfig *params.ChainConfig
- wantHash common.Hash
- wantErr error
+ name string
+ fn func(ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error)
+ wantConfig *params.ChainConfig
+ wantHash common.Hash
+ wantErr error
+ wantCompactErr *params.ConfigCompatError
}{
{
name: "genesis without ChainConfig",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
+ fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), new(Genesis))
},
- wantErr: errGenesisNoConfig,
- wantConfig: params.AllEthashProtocolChanges,
+ wantErr: errGenesisNoConfig,
},
{
name: "no block in DB, genesis == nil",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
+ fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil)
},
wantHash: params.MainnetGenesisHash,
@@ -78,7 +78,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
},
{
name: "mainnet block in DB, genesis == nil",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
+ fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
DefaultGenesisBlock().MustCommit(db, triedb.NewDatabase(db, newDbConfig(scheme)))
return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil)
},
@@ -87,7 +87,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
},
{
name: "custom block in DB, genesis == nil",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
+ fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
tdb := triedb.NewDatabase(db, newDbConfig(scheme))
customg.Commit(db, tdb)
return SetupGenesisBlock(db, tdb, nil)
@@ -97,18 +97,16 @@ func testSetupGenesis(t *testing.T, scheme string) {
},
{
name: "custom block in DB, genesis == sepolia",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
+ fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
tdb := triedb.NewDatabase(db, newDbConfig(scheme))
customg.Commit(db, tdb)
return SetupGenesisBlock(db, tdb, DefaultSepoliaGenesisBlock())
},
- wantErr: &GenesisMismatchError{Stored: customghash, New: params.SepoliaGenesisHash},
- wantHash: params.SepoliaGenesisHash,
- wantConfig: params.SepoliaChainConfig,
+ wantErr: &GenesisMismatchError{Stored: customghash, New: params.SepoliaGenesisHash},
},
{
name: "compatible config in DB",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
+ fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
tdb := triedb.NewDatabase(db, newDbConfig(scheme))
oldcustomg.Commit(db, tdb)
return SetupGenesisBlock(db, tdb, &customg)
@@ -118,7 +116,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
},
{
name: "incompatible config in DB",
- fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) {
+ fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, *params.ConfigCompatError, error) {
// Commit the 'old' genesis block with Homestead transition at #2.
// Advance to block #4, past the homestead transition block of customg.
tdb := triedb.NewDatabase(db, newDbConfig(scheme))
@@ -135,7 +133,7 @@ func testSetupGenesis(t *testing.T, scheme string) {
},
wantHash: customghash,
wantConfig: customg.Config,
- wantErr: ¶ms.ConfigCompatError{
+ wantCompactErr: ¶ms.ConfigCompatError{
What: "Homestead fork block",
StoredBlock: big.NewInt(2),
NewBlock: big.NewInt(3),
@@ -146,12 +144,16 @@ func testSetupGenesis(t *testing.T, scheme string) {
for _, test := range tests {
db := rawdb.NewMemoryDatabase()
- config, hash, err := test.fn(db)
+ config, hash, compatErr, err := test.fn(db)
// Check the return values.
if !reflect.DeepEqual(err, test.wantErr) {
spew := spew.ConfigState{DisablePointerAddresses: true, DisableCapacities: true}
t.Errorf("%s: returned error %#v, want %#v", test.name, spew.NewFormatter(err), spew.NewFormatter(test.wantErr))
}
+ if !reflect.DeepEqual(compatErr, test.wantCompactErr) {
+ spew := spew.ConfigState{DisablePointerAddresses: true, DisableCapacities: true}
+ t.Errorf("%s: returned error %#v, want %#v", test.name, spew.NewFormatter(compatErr), spew.NewFormatter(test.wantCompactErr))
+ }
if !reflect.DeepEqual(config, test.wantConfig) {
t.Errorf("%s:\nreturned %v\nwant %v", test.name, config, test.wantConfig)
}
@@ -279,6 +281,7 @@ func TestVerkleGenesisCommit(t *testing.T) {
PragueTime: &verkleTime,
VerkleTime: &verkleTime,
TerminalTotalDifficulty: big.NewInt(0),
+ EnableVerkleAtGenesis: true,
Ethash: nil,
Clique: nil,
}
diff --git a/core/sender_cacher.go b/core/sender_cacher.go
index 4be53619eb..73bd5c85f2 100644
--- a/core/sender_cacher.go
+++ b/core/sender_cacher.go
@@ -18,12 +18,21 @@ package core
import (
"runtime"
+ "sync"
"github.com/ethereum/go-ethereum/core/types"
)
-// SenderCacher is a concurrent transaction sender recoverer and cacher.
-var SenderCacher = newTxSenderCacher(runtime.NumCPU())
+// senderCacherOnce is used to ensure that the SenderCacher is initialized only once.
+var senderCacherOnce = sync.OnceValue(func() *txSenderCacher {
+ return newTxSenderCacher(runtime.NumCPU())
+})
+
+// SenderCacher returns the singleton instance of SenderCacher, initializing it if called for the first time.
+// This function is thread-safe and ensures that initialization happens only once.
+func SenderCacher() *txSenderCacher {
+ return senderCacherOnce()
+}
// txSenderCacherRequest is a request for recovering transaction senders with a
// specific signature scheme and caching it into the transactions themselves.
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 76a3aba92c..a6979bd361 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -399,10 +399,16 @@ func (s *stateObject) commitStorage(op *accountUpdate) {
op.storages = make(map[common.Hash][]byte)
}
op.storages[hash] = encode(val)
- if op.storagesOrigin == nil {
- op.storagesOrigin = make(map[common.Hash][]byte)
+
+ if op.storagesOriginByKey == nil {
+ op.storagesOriginByKey = make(map[common.Hash][]byte)
}
- op.storagesOrigin[hash] = encode(s.originStorage[key])
+ if op.storagesOriginByHash == nil {
+ op.storagesOriginByHash = make(map[common.Hash][]byte)
+ }
+ origin := encode(s.originStorage[key])
+ op.storagesOriginByKey[key] = origin
+ op.storagesOriginByHash[hash] = origin
// Overwrite the clean value of storage slots
s.originStorage[key] = val
diff --git a/core/state/state_test.go b/core/state/state_test.go
index 6f54300c37..b443411f1b 100644
--- a/core/state/state_test.go
+++ b/core/state/state_test.go
@@ -56,7 +56,7 @@ func TestDump(t *testing.T) {
// write some of them to the trie
s.state.updateStateObject(obj1)
s.state.updateStateObject(obj2)
- root, _ := s.state.Commit(0, false)
+ root, _ := s.state.Commit(0, false, false)
// check that DumpToCollector contains the state objects that are in trie
s.state, _ = New(root, tdb)
@@ -116,7 +116,7 @@ func TestIterativeDump(t *testing.T) {
// write some of them to the trie
s.state.updateStateObject(obj1)
s.state.updateStateObject(obj2)
- root, _ := s.state.Commit(0, false)
+ root, _ := s.state.Commit(0, false, false)
s.state, _ = New(root, tdb)
b := &bytes.Buffer{}
@@ -142,7 +142,7 @@ func TestNull(t *testing.T) {
var value common.Hash
s.state.SetState(address, common.Hash{}, value)
- s.state.Commit(0, false)
+ s.state.Commit(0, false, false)
if value := s.state.GetState(address, common.Hash{}); value != (common.Hash{}) {
t.Errorf("expected empty current value, got %x", value)
diff --git a/core/state/statedb.go b/core/state/statedb.go
index d279ccfdfe..0310ee6973 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -1051,7 +1051,7 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
// with their values be tracked as original value.
// In case (d), **original** account along with its storages should be deleted,
// with their values be tracked as original value.
-func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) {
+func (s *StateDB) handleDestruction(noStorageWiping bool) (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) {
var (
nodes []*trienode.NodeSet
buf = crypto.NewKeccakState()
@@ -1080,6 +1080,9 @@ func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trieno
if prev.Root == types.EmptyRootHash || s.db.TrieDB().IsVerkle() {
continue
}
+ if noStorageWiping {
+ return nil, nil, fmt.Errorf("unexpected storage wiping, %x", addr)
+ }
// Remove storage slots belonging to the account.
storages, storagesOrigin, set, err := s.deleteStorage(addr, addrHash, prev.Root)
if err != nil {
@@ -1101,7 +1104,7 @@ func (s *StateDB) GetTrie() Trie {
// commit gathers the state mutations accumulated along with the associated
// trie changes, resetting all internal flags with the new state as the base.
-func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
+func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
// Short circuit in case any database failure occurred earlier.
if s.dbErr != nil {
return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
@@ -1155,7 +1158,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
// the same block, account deletions must be processed first. This ensures
// that the storage trie nodes deleted during destruction and recreated
// during subsequent resurrection can be combined correctly.
- deletes, delNodes, err := s.handleDestruction()
+ deletes, delNodes, err := s.handleDestruction(noStorageWiping)
if err != nil {
return nil, err
}
@@ -1252,13 +1255,14 @@ func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
origin := s.originalRoot
s.originalRoot = root
- return newStateUpdate(origin, root, deletes, updates, nodes), nil
+
+ return newStateUpdate(noStorageWiping, origin, root, deletes, updates, nodes), nil
}
// commitAndFlush is a wrapper of commit which also commits the state mutations
// to the configured data stores.
-func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateUpdate, error) {
- ret, err := s.commit(deleteEmptyObjects)
+func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
+ ret, err := s.commit(deleteEmptyObjects, noStorageWiping)
if err != nil {
return nil, err
}
@@ -1310,8 +1314,13 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateU
//
// The associated block number of the state transition is also provided
// for more chain context.
-func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, error) {
- ret, err := s.commitAndFlush(block, deleteEmptyObjects)
+//
+// noStorageWiping is a flag indicating whether storage wiping is permitted.
+// Since self-destruction was deprecated with the Cancun fork and there are
+// no empty accounts left that could be deleted by EIP-158, storage wiping
+// should not occur.
+func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, error) {
+ ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping)
if err != nil {
return common.Hash{}, err
}
diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go
index 7cbfd9b9d7..ed99cf687c 100644
--- a/core/state/statedb_fuzz_test.go
+++ b/core/state/statedb_fuzz_test.go
@@ -228,7 +228,7 @@ func (test *stateTest) run() bool {
} else {
state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
}
- ret, err := state.commitAndFlush(0, true) // call commit at the block boundary
+ ret, err := state.commitAndFlush(0, true, false) // call commit at the block boundary
if err != nil {
panic(err)
}
diff --git a/core/state/statedb_hooked_test.go b/core/state/statedb_hooked_test.go
index 5f82ed06d0..874a275993 100644
--- a/core/state/statedb_hooked_test.go
+++ b/core/state/statedb_hooked_test.go
@@ -71,7 +71,7 @@ func TestBurn(t *testing.T) {
hooked.AddBalance(addC, uint256.NewInt(200), tracing.BalanceChangeUnspecified)
hooked.Finalise(true)
- s.Commit(0, false)
+ s.Commit(0, false, false)
if have, want := burned, uint256.NewInt(600); !have.Eq(want) {
t.Fatalf("burn-count wrong, have %v want %v", have, want)
}
diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go
index 37141e90b0..67eb9cbdc6 100644
--- a/core/state/statedb_test.go
+++ b/core/state/statedb_test.go
@@ -119,7 +119,7 @@ func TestIntermediateLeaks(t *testing.T) {
}
// Commit and cross check the databases.
- transRoot, err := transState.Commit(0, false)
+ transRoot, err := transState.Commit(0, false, false)
if err != nil {
t.Fatalf("failed to commit transition state: %v", err)
}
@@ -127,7 +127,7 @@ func TestIntermediateLeaks(t *testing.T) {
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
}
- finalRoot, err := finalState.Commit(0, false)
+ finalRoot, err := finalState.Commit(0, false, false)
if err != nil {
t.Fatalf("failed to commit final state: %v", err)
}
@@ -240,7 +240,7 @@ func TestCopyWithDirtyJournal(t *testing.T) {
obj.data.Root = common.HexToHash("0xdeadbeef")
orig.updateStateObject(obj)
}
- root, _ := orig.Commit(0, true)
+ root, _ := orig.Commit(0, true, false)
orig, _ = New(root, db)
// modify all in memory without finalizing
@@ -293,7 +293,7 @@ func TestCopyObjectState(t *testing.T) {
t.Fatalf("Error in test itself, the 'done' flag should not be set before Commit, have %v want %v", have, want)
}
}
- orig.Commit(0, true)
+ orig.Commit(0, true, false)
for _, op := range cpy.mutations {
if have, want := op.applied, false; have != want {
t.Fatalf("Error: original state affected copy, have %v want %v", have, want)
@@ -696,7 +696,7 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
func TestTouchDelete(t *testing.T) {
s := newStateEnv()
s.state.getOrNewStateObject(common.Address{})
- root, _ := s.state.Commit(0, false)
+ root, _ := s.state.Commit(0, false, false)
s.state, _ = New(root, s.state.db)
snapshot := s.state.Snapshot()
@@ -784,7 +784,7 @@ func TestCopyCommitCopy(t *testing.T) {
t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval)
}
// Commit state, ensure states can be loaded from disk
- root, _ := state.Commit(0, false)
+ root, _ := state.Commit(0, false, false)
state, _ = New(root, tdb)
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42)
@@ -898,11 +898,11 @@ func TestCommitCopy(t *testing.T) {
if val := state.GetCommittedState(addr, skey1); val != (common.Hash{}) {
t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
- root, _ := state.Commit(0, true)
+ root, _ := state.Commit(0, true, false)
state, _ = New(root, db)
state.SetState(addr, skey2, sval2)
- state.Commit(1, true)
+ state.Commit(1, true, false)
// Copy the committed state database, the copied one is not fully functional.
copied := state.Copy()
@@ -943,7 +943,7 @@ func TestDeleteCreateRevert(t *testing.T) {
addr := common.BytesToAddress([]byte("so"))
state.SetBalance(addr, uint256.NewInt(1), tracing.BalanceChangeUnspecified)
- root, _ := state.Commit(0, false)
+ root, _ := state.Commit(0, false, false)
state, _ = New(root, state.db)
// Simulate self-destructing in one transaction, then create-reverting in another
@@ -955,7 +955,7 @@ func TestDeleteCreateRevert(t *testing.T) {
state.RevertToSnapshot(id)
// Commit the entire state and make sure we don't crash and have the correct state
- root, _ = state.Commit(0, true)
+ root, _ = state.Commit(0, true, false)
state, _ = New(root, state.db)
if state.getStateObject(addr) != nil {
@@ -998,7 +998,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
a2 := common.BytesToAddress([]byte("another"))
state.SetBalance(a2, uint256.NewInt(100), tracing.BalanceChangeUnspecified)
state.SetCode(a2, []byte{1, 2, 4})
- root, _ = state.Commit(0, false)
+ root, _ = state.Commit(0, false, false)
t.Logf("root: %x", root)
// force-flush
tdb.Commit(root, false)
@@ -1022,7 +1022,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
}
// Modify the state
state.SetBalance(addr, uint256.NewInt(2), tracing.BalanceChangeUnspecified)
- root, err := state.Commit(0, false)
+ root, err := state.Commit(0, false, false)
if err == nil {
t.Fatalf("expected error, got root :%x", root)
}
@@ -1213,7 +1213,7 @@ func TestFlushOrderDataLoss(t *testing.T) {
state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s})
}
}
- root, err := state.Commit(0, false)
+ root, err := state.Commit(0, false, false)
if err != nil {
t.Fatalf("failed to commit state trie: %v", err)
}
@@ -1288,8 +1288,7 @@ func TestDeleteStorage(t *testing.T) {
value := common.Hash(uint256.NewInt(uint64(10 * i)).Bytes32())
state.SetState(addr, slot, value)
}
- root, _ := state.Commit(0, true)
-
+ root, _ := state.Commit(0, true, false)
// Init phase done, create two states, one with snap and one without
fastState, _ := New(root, NewDatabase(tdb, snaps))
slowState, _ := New(root, NewDatabase(tdb, nil))
diff --git a/core/state/stateupdate.go b/core/state/stateupdate.go
index 45de660ca5..75c4ca028c 100644
--- a/core/state/stateupdate.go
+++ b/core/state/stateupdate.go
@@ -32,34 +32,56 @@ type contractCode struct {
// accountDelete represents an operation for deleting an Ethereum account.
type accountDelete struct {
- address common.Address // address is the unique account identifier
- origin []byte // origin is the original value of account data in slim-RLP encoding.
- storages map[common.Hash][]byte // storages stores mutated slots, the value should be nil.
- storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format.
+ address common.Address // address is the unique account identifier
+ origin []byte // origin is the original value of account data in slim-RLP encoding.
+
+ // storages stores mutated slots, the value should be nil.
+ storages map[common.Hash][]byte
+
+ // storagesOrigin stores the original values of mutated slots in
+ // prefix-zero-trimmed RLP format. The map key refers to the **HASH**
+ // of the raw storage slot key.
+ storagesOrigin map[common.Hash][]byte
}
// accountUpdate represents an operation for updating an Ethereum account.
type accountUpdate struct {
- address common.Address // address is the unique account identifier
- data []byte // data is the slim-RLP encoded account data.
- origin []byte // origin is the original value of account data in slim-RLP encoding.
- code *contractCode // code represents mutated contract code; nil means it's not modified.
- storages map[common.Hash][]byte // storages stores mutated slots in prefix-zero-trimmed RLP format.
- storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format.
+ address common.Address // address is the unique account identifier
+ data []byte // data is the slim-RLP encoded account data.
+ origin []byte // origin is the original value of account data in slim-RLP encoding.
+ code *contractCode // code represents mutated contract code; nil means it's not modified.
+ storages map[common.Hash][]byte // storages stores mutated slots in prefix-zero-trimmed RLP format.
+
+ // storagesOriginByKey and storagesOriginByHash both store the original values
+ // of mutated slots in prefix-zero-trimmed RLP format. The difference is that
+ // storagesOriginByKey uses the **raw** storage slot key as the map ID, while
+ // storagesOriginByHash uses the **hash** of the storage slot key instead.
+ storagesOriginByKey map[common.Hash][]byte
+ storagesOriginByHash map[common.Hash][]byte
}
// stateUpdate represents the difference between two states resulting from state
// execution. It contains information about mutated contract codes, accounts,
// and storage slots, along with their original values.
type stateUpdate struct {
- originRoot common.Hash // hash of the state before applying mutation
- root common.Hash // hash of the state after applying mutation
- accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding
- accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding
- storages map[common.Hash]map[common.Hash][]byte // storages stores mutated slots in 'prefix-zero-trimmed' RLP format
- storagesOrigin map[common.Address]map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in 'prefix-zero-trimmed' RLP format
- codes map[common.Address]contractCode // codes contains the set of dirty codes
- nodes *trienode.MergedNodeSet // Aggregated dirty nodes caused by state changes
+ originRoot common.Hash // hash of the state before applying mutation
+ root common.Hash // hash of the state after applying mutation
+ accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding
+ accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding
+
+ // storages stores mutated slots in 'prefix-zero-trimmed' RLP format.
+ // The value is keyed by account hash and **storage slot key hash**.
+ storages map[common.Hash]map[common.Hash][]byte
+
+ // storagesOrigin stores the original values of mutated slots in
+ // 'prefix-zero-trimmed' RLP format.
+ // (a) the value is keyed by account hash and **storage slot key** if rawStorageKey is true;
+ // (b) the value is keyed by account hash and **storage slot key hash** if rawStorageKey is false;
+ storagesOrigin map[common.Address]map[common.Hash][]byte
+ rawStorageKey bool
+
+ codes map[common.Address]contractCode // codes contains the set of dirty codes
+ nodes *trienode.MergedNodeSet // Aggregated dirty nodes caused by state changes
}
// empty returns a flag indicating the state transition is empty or not.
@@ -67,10 +89,13 @@ func (sc *stateUpdate) empty() bool {
return sc.originRoot == sc.root
}
-// newStateUpdate constructs a state update object, representing the differences
-// between two states by performing state execution. It aggregates the given
-// account deletions and account updates to form a comprehensive state update.
-func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
+// newStateUpdate constructs a state update object by identifying the differences
+// between two states through state execution. It combines the specified account
+// deletions and account updates to create a complete state update.
+//
+// rawStorageKey is a flag indicating whether to use the raw storage slot key or
+// the hash of the slot key for constructing state update object.
+func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
var (
accounts = make(map[common.Hash][]byte)
accountsOrigin = make(map[common.Address][]byte)
@@ -78,13 +103,14 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
storagesOrigin = make(map[common.Address]map[common.Hash][]byte)
codes = make(map[common.Address]contractCode)
)
- // Due to the fact that some accounts could be destructed and resurrected
- // within the same block, the deletions must be aggregated first.
+ // Since some accounts might be destroyed and recreated within the same
+ // block, deletions must be aggregated first.
for addrHash, op := range deletes {
addr := op.address
accounts[addrHash] = nil
accountsOrigin[addr] = op.origin
+ // If storage wiping exists, the hash of the storage slot key must be used
if len(op.storages) > 0 {
storages[addrHash] = op.storages
}
@@ -118,12 +144,16 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
}
// Aggregate the storage original values. If the slot is already present
// in aggregated storagesOrigin set, skip it.
- if len(op.storagesOrigin) > 0 {
+ storageOriginSet := op.storagesOriginByHash
+ if rawStorageKey {
+ storageOriginSet = op.storagesOriginByKey
+ }
+ if len(storageOriginSet) > 0 {
origin, exist := storagesOrigin[addr]
if !exist {
- storagesOrigin[addr] = op.storagesOrigin
+ storagesOrigin[addr] = storageOriginSet
} else {
- for key, slot := range op.storagesOrigin {
+ for key, slot := range storageOriginSet {
if _, found := origin[key]; !found {
origin[key] = slot
}
@@ -138,6 +168,7 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
accountsOrigin: accountsOrigin,
storages: storages,
storagesOrigin: storagesOrigin,
+ rawStorageKey: rawStorageKey,
codes: codes,
nodes: nodes,
}
@@ -153,5 +184,6 @@ func (sc *stateUpdate) stateSet() *triedb.StateSet {
AccountsOrigin: sc.accountsOrigin,
Storages: sc.storages,
StoragesOrigin: sc.storagesOrigin,
+ RawStorageKey: sc.rawStorageKey,
}
}
diff --git a/core/state/sync_test.go b/core/state/sync_test.go
index efa56f8860..5c8b5a90f7 100644
--- a/core/state/sync_test.go
+++ b/core/state/sync_test.go
@@ -79,7 +79,7 @@ func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, c
}
accounts = append(accounts, acc)
}
- root, _ := state.Commit(0, false)
+ root, _ := state.Commit(0, false, false)
// Return the generated state
return db, sdb, nodeDb, root, accounts
diff --git a/core/state/trie_prefetcher_test.go b/core/state/trie_prefetcher_test.go
index d96727704c..4d1b627c4d 100644
--- a/core/state/trie_prefetcher_test.go
+++ b/core/state/trie_prefetcher_test.go
@@ -83,7 +83,7 @@ func TestVerklePrefetcher(t *testing.T) {
state.SetBalance(addr, uint256.NewInt(42), tracing.BalanceChangeUnspecified) // Change the account trie
state.SetCode(addr, []byte("hello")) // Change an external metadata
state.SetState(addr, skey, sval) // Change the storage trie
- root, _ := state.Commit(0, true)
+ root, _ := state.Commit(0, true, false)
state, _ = New(root, sdb)
sRoot := state.GetStorageRoot(addr)
diff --git a/core/state_transition.go b/core/state_transition.go
index 009b679b27..b6203e6aae 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -529,8 +529,8 @@ func (st *stateTransition) execute() (*ExecutionResult, error) {
// validateAuthorization validates an EIP-7702 authorization against the state.
func (st *stateTransition) validateAuthorization(auth *types.SetCodeAuthorization) (authority common.Address, err error) {
- // Verify chain ID is 0 or equal to current chain ID.
- if auth.ChainID != 0 && st.evm.ChainConfig().ChainID.Uint64() != auth.ChainID {
+ // Verify chain ID is null or equal to current chain ID.
+ if !auth.ChainID.IsZero() && auth.ChainID.CmpBig(st.evm.ChainConfig().ChainID) != 0 {
return authority, ErrAuthorizationWrongChainID
}
// Limit nonce to 2^64-1 per EIP-2681.
diff --git a/core/tracing/CHANGELOG.md b/core/tracing/CHANGELOG.md
index e8aa3a9e2e..270e0a30bf 100644
--- a/core/tracing/CHANGELOG.md
+++ b/core/tracing/CHANGELOG.md
@@ -9,6 +9,14 @@ All notable changes to the tracing interface will be documented in this file.
- `GasChangeReason` has been extended with the following reasons which will be enabled only post-Verkle. There shouldn't be any gas changes with those reasons prior to the fork.
- `GasChangeWitnessContractCollisionCheck` flags the event of adding to the witness when checking for contract address collision.
+## [v1.14.12]
+
+This release contains a change in behavior for `OnCodeChange` hook.
+
+### `OnCodeChange` change
+
+The `OnCodeChange` hook is now called when the code of a contract is removed due to a selfdestruct. Previously, no code change was emitted on such occasions.
+
## [v1.14.4]
This release contained only minor extensions to the tracing interface.
diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go
index 728f15069b..4343edfe86 100644
--- a/core/tracing/hooks.go
+++ b/core/tracing/hooks.go
@@ -293,7 +293,7 @@ const (
GasChangeCallLeftOverRefunded GasChangeReason = 7
// GasChangeCallContractCreation is the amount of gas that will be burned for a CREATE.
GasChangeCallContractCreation GasChangeReason = 8
- // GasChangeContractCreation is the amount of gas that will be burned for a CREATE2.
+ // GasChangeCallContractCreation2 is the amount of gas that will be burned for a CREATE2.
GasChangeCallContractCreation2 GasChangeReason = 9
// GasChangeCallCodeStorage is the amount of gas that will be charged for code storage.
GasChangeCallCodeStorage GasChangeReason = 10
diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go
index e4441bec5d..3d90ec4412 100644
--- a/core/txpool/blobpool/blobpool_test.go
+++ b/core/txpool/blobpool/blobpool_test.go
@@ -650,7 +650,7 @@ func TestOpenDrops(t *testing.T) {
statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), uint256.NewInt(10000000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(crypto.PubkeyToAddress(duplicater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(crypto.PubkeyToAddress(repeater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
- statedb.Commit(0, true)
+ statedb.Commit(0, true, false)
chain := &testBlockChain{
config: params.MainnetChainConfig,
@@ -769,7 +769,7 @@ func TestOpenIndex(t *testing.T) {
// Create a blob pool out of the pre-seeded data
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
- statedb.Commit(0, true)
+ statedb.Commit(0, true, false)
chain := &testBlockChain{
config: params.MainnetChainConfig,
@@ -871,7 +871,7 @@ func TestOpenHeap(t *testing.T) {
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
- statedb.Commit(0, true)
+ statedb.Commit(0, true, false)
chain := &testBlockChain{
config: params.MainnetChainConfig,
@@ -951,7 +951,7 @@ func TestOpenCap(t *testing.T) {
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
- statedb.Commit(0, true)
+ statedb.Commit(0, true, false)
chain := &testBlockChain{
config: params.MainnetChainConfig,
@@ -1393,7 +1393,7 @@ func TestAdd(t *testing.T) {
store.Put(blob)
}
}
- statedb.Commit(0, true)
+ statedb.Commit(0, true, false)
store.Close()
// Create a blob pool out of the pre-seeded dats
@@ -1519,7 +1519,7 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
pool.add(tx)
}
- statedb.Commit(0, true)
+ statedb.Commit(0, true, false)
defer pool.Close()
// Benchmark assembling the pending
diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go
index cca8816271..48673a5f3c 100644
--- a/core/txpool/legacypool/legacypool.go
+++ b/core/txpool/legacypool/legacypool.go
@@ -1313,7 +1313,7 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
// Inject any transactions discarded due to reorgs
log.Debug("Reinjecting stale transactions", "count", len(reinject))
- core.SenderCacher.Recover(pool.signer, reinject)
+ core.SenderCacher().Recover(pool.signer, reinject)
pool.addTxsLocked(reinject)
}
@@ -1758,4 +1758,5 @@ func (pool *LegacyPool) Clear() {
pool.priced = newPricedList(pool.all)
pool.pending = make(map[common.Address]*list)
pool.queue = make(map[common.Address]*list)
+ pool.pendingNonces = newNoncer(pool.currentState)
}
diff --git a/core/txpool/legacypool/legacypool2_test.go b/core/txpool/legacypool/legacypool2_test.go
index 1377479da1..8af9624994 100644
--- a/core/txpool/legacypool/legacypool2_test.go
+++ b/core/txpool/legacypool/legacypool2_test.go
@@ -162,12 +162,12 @@ func TestTransactionZAttack(t *testing.T) {
var ivpendingNum int
pendingtxs, _ := pool.Content()
for account, txs := range pendingtxs {
- cur_balance := new(big.Int).Set(pool.currentState.GetBalance(account).ToBig())
+ curBalance := new(big.Int).Set(pool.currentState.GetBalance(account).ToBig())
for _, tx := range txs {
- if cur_balance.Cmp(tx.Value()) <= 0 {
+ if curBalance.Cmp(tx.Value()) <= 0 {
ivpendingNum++
} else {
- cur_balance.Sub(cur_balance, tx.Value())
+ curBalance.Sub(curBalance, tx.Value())
}
}
}
diff --git a/core/types/deposit.go b/core/types/deposit.go
index 3bba2c7aa4..8015f29ca7 100644
--- a/core/types/deposit.go
+++ b/core/types/deposit.go
@@ -24,7 +24,7 @@ const (
depositRequestSize = 192
)
-// UnpackIntoDeposit unpacks a serialized DepositEvent.
+// DepositLogToRequest unpacks a serialized DepositEvent.
func DepositLogToRequest(data []byte) ([]byte, error) {
if len(data) != 576 {
return nil, fmt.Errorf("deposit wrong length: want 576, have %d", len(data))
diff --git a/core/types/gen_authorization.go b/core/types/gen_authorization.go
index be5467c50d..57069cbb1f 100644
--- a/core/types/gen_authorization.go
+++ b/core/types/gen_authorization.go
@@ -16,7 +16,7 @@ var _ = (*authorizationMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (s SetCodeAuthorization) MarshalJSON() ([]byte, error) {
type SetCodeAuthorization struct {
- ChainID hexutil.Uint64 `json:"chainId" gencodec:"required"`
+ ChainID hexutil.U256 `json:"chainId" gencodec:"required"`
Address common.Address `json:"address" gencodec:"required"`
Nonce hexutil.Uint64 `json:"nonce" gencodec:"required"`
V hexutil.Uint64 `json:"yParity" gencodec:"required"`
@@ -24,7 +24,7 @@ func (s SetCodeAuthorization) MarshalJSON() ([]byte, error) {
S hexutil.U256 `json:"s" gencodec:"required"`
}
var enc SetCodeAuthorization
- enc.ChainID = hexutil.Uint64(s.ChainID)
+ enc.ChainID = hexutil.U256(s.ChainID)
enc.Address = s.Address
enc.Nonce = hexutil.Uint64(s.Nonce)
enc.V = hexutil.Uint64(s.V)
@@ -36,7 +36,7 @@ func (s SetCodeAuthorization) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (s *SetCodeAuthorization) UnmarshalJSON(input []byte) error {
type SetCodeAuthorization struct {
- ChainID *hexutil.Uint64 `json:"chainId" gencodec:"required"`
+ ChainID *hexutil.U256 `json:"chainId" gencodec:"required"`
Address *common.Address `json:"address" gencodec:"required"`
Nonce *hexutil.Uint64 `json:"nonce" gencodec:"required"`
V *hexutil.Uint64 `json:"yParity" gencodec:"required"`
@@ -50,7 +50,7 @@ func (s *SetCodeAuthorization) UnmarshalJSON(input []byte) error {
if dec.ChainID == nil {
return errors.New("missing required field 'chainId' for SetCodeAuthorization")
}
- s.ChainID = uint64(*dec.ChainID)
+ s.ChainID = uint256.Int(*dec.ChainID)
if dec.Address == nil {
return errors.New("missing required field 'address' for SetCodeAuthorization")
}
diff --git a/core/types/transaction_marshalling.go b/core/types/transaction_marshalling.go
index 993d633c6f..1bbb97a3ec 100644
--- a/core/types/transaction_marshalling.go
+++ b/core/types/transaction_marshalling.go
@@ -155,7 +155,7 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) {
enc.Proofs = itx.Sidecar.Proofs
}
case *SetCodeTx:
- enc.ChainID = (*hexutil.Big)(new(big.Int).SetUint64(itx.ChainID))
+ enc.ChainID = (*hexutil.Big)(itx.ChainID.ToBig())
enc.Nonce = (*hexutil.Uint64)(&itx.Nonce)
enc.To = tx.To()
enc.Gas = (*hexutil.Uint64)(&itx.Gas)
@@ -353,7 +353,11 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
if dec.ChainID == nil {
return errors.New("missing required field 'chainId' in transaction")
}
- itx.ChainID = uint256.MustFromBig((*big.Int)(dec.ChainID))
+ var overflow bool
+ itx.ChainID, overflow = uint256.FromBig(dec.ChainID.ToInt())
+ if overflow {
+ return errors.New("'chainId' value overflows uint256")
+ }
if dec.Nonce == nil {
return errors.New("missing required field 'nonce' in transaction")
}
@@ -395,7 +399,6 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
itx.BlobHashes = dec.BlobVersionedHashes
// signature R
- var overflow bool
if dec.R == nil {
return errors.New("missing required field 'r' in transaction")
}
@@ -432,7 +435,11 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
if dec.ChainID == nil {
return errors.New("missing required field 'chainId' in transaction")
}
- itx.ChainID = dec.ChainID.ToInt().Uint64()
+ var overflow bool
+ itx.ChainID, overflow = uint256.FromBig(dec.ChainID.ToInt())
+ if overflow {
+ return errors.New("'chainId' value overflows uint256")
+ }
if dec.Nonce == nil {
return errors.New("missing required field 'nonce' in transaction")
}
@@ -470,7 +477,6 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error {
itx.AuthList = dec.AuthorizationList
// signature R
- var overflow bool
if dec.R == nil {
return errors.New("missing required field 'r' in transaction")
}
diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go
index d72643b4a8..4d70f37bd3 100644
--- a/core/types/transaction_signing.go
+++ b/core/types/transaction_signing.go
@@ -219,7 +219,7 @@ func (s pragueSigner) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big
}
// Check that chain ID of tx matches the signer. We also accept ID zero here,
// because it indicates that the chain ID was not specified in the tx.
- if txdata.ChainID != 0 && new(big.Int).SetUint64(txdata.ChainID).Cmp(s.chainId) != 0 {
+ if txdata.ChainID != nil && txdata.ChainID.CmpBig(s.chainId) != 0 {
return nil, nil, nil, fmt.Errorf("%w: have %d want %d", ErrInvalidChainId, txdata.ChainID, s.chainId)
}
R, S, _ = decodeSignature(sig)
diff --git a/core/types/tx_blob.go b/core/types/tx_blob.go
index ce1f287caa..88251ab957 100644
--- a/core/types/tx_blob.go
+++ b/core/types/tx_blob.go
@@ -47,9 +47,9 @@ type BlobTx struct {
Sidecar *BlobTxSidecar `rlp:"-"`
// Signature values
- V *uint256.Int `json:"v" gencodec:"required"`
- R *uint256.Int `json:"r" gencodec:"required"`
- S *uint256.Int `json:"s" gencodec:"required"`
+ V *uint256.Int
+ R *uint256.Int
+ S *uint256.Int
}
// BlobTxSidecar contains the blobs of a blob transaction.
diff --git a/core/types/tx_dynamic_fee.go b/core/types/tx_dynamic_fee.go
index 8b5b514fde..981755cf70 100644
--- a/core/types/tx_dynamic_fee.go
+++ b/core/types/tx_dynamic_fee.go
@@ -37,9 +37,9 @@ type DynamicFeeTx struct {
AccessList AccessList
// Signature values
- V *big.Int `json:"v" gencodec:"required"`
- R *big.Int `json:"r" gencodec:"required"`
- S *big.Int `json:"s" gencodec:"required"`
+ V *big.Int
+ R *big.Int
+ S *big.Int
}
// copy creates a deep copy of the transaction data and initializes all fields.
diff --git a/core/types/tx_setcode.go b/core/types/tx_setcode.go
index f14ae3bc9d..0fb5362c26 100644
--- a/core/types/tx_setcode.go
+++ b/core/types/tx_setcode.go
@@ -49,7 +49,7 @@ func AddressToDelegation(addr common.Address) []byte {
// SetCodeTx implements the EIP-7702 transaction type which temporarily installs
// the code at the signer's address.
type SetCodeTx struct {
- ChainID uint64
+ ChainID *uint256.Int
Nonce uint64
GasTipCap *uint256.Int // a.k.a. maxPriorityFeePerGas
GasFeeCap *uint256.Int // a.k.a. maxFeePerGas
@@ -61,16 +61,16 @@ type SetCodeTx struct {
AuthList []SetCodeAuthorization
// Signature values
- V *uint256.Int `json:"v" gencodec:"required"`
- R *uint256.Int `json:"r" gencodec:"required"`
- S *uint256.Int `json:"s" gencodec:"required"`
+ V *uint256.Int
+ R *uint256.Int
+ S *uint256.Int
}
//go:generate go run github.com/fjl/gencodec -type SetCodeAuthorization -field-override authorizationMarshaling -out gen_authorization.go
// SetCodeAuthorization is an authorization from an account to deploy code at its address.
type SetCodeAuthorization struct {
- ChainID uint64 `json:"chainId" gencodec:"required"`
+ ChainID uint256.Int `json:"chainId" gencodec:"required"`
Address common.Address `json:"address" gencodec:"required"`
Nonce uint64 `json:"nonce" gencodec:"required"`
V uint8 `json:"yParity" gencodec:"required"`
@@ -80,7 +80,7 @@ type SetCodeAuthorization struct {
// field type overrides for gencodec
type authorizationMarshaling struct {
- ChainID hexutil.Uint64
+ ChainID hexutil.U256
Nonce hexutil.Uint64
V hexutil.Uint64
R hexutil.U256
@@ -180,7 +180,7 @@ func (tx *SetCodeTx) copy() TxData {
// accessors for innerTx.
func (tx *SetCodeTx) txType() byte { return SetCodeTxType }
-func (tx *SetCodeTx) chainID() *big.Int { return big.NewInt(int64(tx.ChainID)) }
+func (tx *SetCodeTx) chainID() *big.Int { return tx.ChainID.ToBig() }
func (tx *SetCodeTx) accessList() AccessList { return tx.AccessList }
func (tx *SetCodeTx) data() []byte { return tx.Data }
func (tx *SetCodeTx) gas() uint64 { return tx.Gas }
@@ -207,7 +207,7 @@ func (tx *SetCodeTx) rawSignatureValues() (v, r, s *big.Int) {
}
func (tx *SetCodeTx) setSignatureValues(chainID, v, r, s *big.Int) {
- tx.ChainID = chainID.Uint64()
+ tx.ChainID = uint256.MustFromBig(chainID)
tx.V.SetFromBig(v)
tx.R.SetFromBig(r)
tx.S.SetFromBig(s)
diff --git a/core/verkle_witness_test.go b/core/verkle_witness_test.go
index 5088231207..02e94963c4 100644
--- a/core/verkle_witness_test.go
+++ b/core/verkle_witness_test.go
@@ -57,6 +57,7 @@ var (
ShanghaiTime: u64(0),
VerkleTime: u64(0),
TerminalTotalDifficulty: common.Big0,
+ EnableVerkleAtGenesis: true,
// TODO uncomment when proof generation is merged
// ProofInBlocks: true,
}
@@ -77,6 +78,7 @@ var (
ShanghaiTime: u64(0),
VerkleTime: u64(0),
TerminalTotalDifficulty: common.Big0,
+ EnableVerkleAtGenesis: true,
}
)
diff --git a/core/vm/eof_validation.go b/core/vm/eof_validation.go
index fa534edce9..514f9fb58c 100644
--- a/core/vm/eof_validation.go
+++ b/core/vm/eof_validation.go
@@ -109,8 +109,8 @@ func validateCode(code []byte, section int, container *Container, jt *JumpTable,
return nil, err
}
case RJUMPV:
- max_size := int(code[i+1])
- length := max_size + 1
+ maxSize := int(code[i+1])
+ length := maxSize + 1
if len(code) <= i+length {
return nil, fmt.Errorf("%w: jump table truncated, op %s, pos %d", errTruncatedImmediate, op, i)
}
@@ -120,7 +120,7 @@ func validateCode(code []byte, section int, container *Container, jt *JumpTable,
return nil, err
}
}
- i += 2 * max_size
+ i += 2 * maxSize
case CALLF:
arg, _ := parseUint16(code[i+1:])
if arg >= len(container.types) {
diff --git a/core/vm/program/program.go b/core/vm/program/program.go
index acc7fd25fc..3b00bbae6f 100644
--- a/core/vm/program/program.go
+++ b/core/vm/program/program.go
@@ -19,6 +19,7 @@
// - There are not package guarantees. We might iterate heavily on this package, and do backwards-incompatible changes without warning
// - There are no quality-guarantees. These utilities may produce evm-code that is non-functional. YMMV.
// - There are no stability-guarantees. The utility will `panic` if the inputs do not align / make sense.
+
package program
import (
@@ -204,7 +205,7 @@ func (p *Program) StaticCall(gas *uint256.Int, address, inOffset, inSize, outOff
return p.Op(vm.STATICCALL)
}
-// StaticCall is a convenience function to make a callcode. If 'gas' is nil, the opcode GAS will
+// CallCode is a convenience function to make a callcode. If 'gas' is nil, the opcode GAS will
// be used to provide all gas.
func (p *Program) CallCode(gas *uint256.Int, address, value, inOffset, inSize, outOffset, outSize any) *Program {
if outOffset == outSize && inSize == outSize && inOffset == outSize {
@@ -263,7 +264,7 @@ func (p *Program) InputAddressToStack(inputOffset uint32) *Program {
return p.Op(vm.AND)
}
-// MStore stores the provided data (into the memory area starting at memStart).
+// Mstore stores the provided data (into the memory area starting at memStart).
func (p *Program) Mstore(data []byte, memStart uint32) *Program {
var idx = 0
// We need to store it in chunks of 32 bytes
diff --git a/crypto/blake2b/blake2b.go b/crypto/blake2b/blake2b.go
index 7ecaab8139..c24a88b99d 100644
--- a/crypto/blake2b/blake2b.go
+++ b/crypto/blake2b/blake2b.go
@@ -23,13 +23,13 @@ import (
)
const (
- // The blocksize of BLAKE2b in bytes.
+ // BlockSize the blocksize of BLAKE2b in bytes.
BlockSize = 128
- // The hash size of BLAKE2b-512 in bytes.
+ // Size the hash size of BLAKE2b-512 in bytes.
Size = 64
- // The hash size of BLAKE2b-384 in bytes.
+ // Size384 the hash size of BLAKE2b-384 in bytes.
Size384 = 48
- // The hash size of BLAKE2b-256 in bytes.
+ // Size256 the hash size of BLAKE2b-256 in bytes.
Size256 = 32
)
diff --git a/crypto/bn256/gnark/pairing.go b/crypto/bn256/gnark/pairing.go
index 39e8a657f4..439ce0a39d 100644
--- a/crypto/bn256/gnark/pairing.go
+++ b/crypto/bn256/gnark/pairing.go
@@ -4,7 +4,7 @@ import (
"github.com/consensys/gnark-crypto/ecc/bn254"
)
-// Computes the following relation: ∏ᵢ e(Pᵢ, Qᵢ) =? 1
+// PairingCheck computes the following relation: ∏ᵢ e(Pᵢ, Qᵢ) =? 1
//
// To explain why gnark returns a (bool, error):
//
diff --git a/crypto/ecies/ecies.go b/crypto/ecies/ecies.go
index 1b6c9e97c1..76f934c72d 100644
--- a/crypto/ecies/ecies.go
+++ b/crypto/ecies/ecies.go
@@ -60,12 +60,12 @@ type PublicKey struct {
Params *ECIESParams
}
-// Export an ECIES public key as an ECDSA public key.
+// ExportECDSA exports an ECIES public key as an ECDSA public key.
func (pub *PublicKey) ExportECDSA() *ecdsa.PublicKey {
return &ecdsa.PublicKey{Curve: pub.Curve, X: pub.X, Y: pub.Y}
}
-// Import an ECDSA public key as an ECIES public key.
+// ImportECDSAPublic imports an ECDSA public key as an ECIES public key.
func ImportECDSAPublic(pub *ecdsa.PublicKey) *PublicKey {
return &PublicKey{
X: pub.X,
@@ -81,20 +81,20 @@ type PrivateKey struct {
D *big.Int
}
-// Export an ECIES private key as an ECDSA private key.
+// ExportECDSA exports an ECIES private key as an ECDSA private key.
func (prv *PrivateKey) ExportECDSA() *ecdsa.PrivateKey {
pub := &prv.PublicKey
pubECDSA := pub.ExportECDSA()
return &ecdsa.PrivateKey{PublicKey: *pubECDSA, D: prv.D}
}
-// Import an ECDSA private key as an ECIES private key.
+// ImportECDSA imports an ECDSA private key as an ECIES private key.
func ImportECDSA(prv *ecdsa.PrivateKey) *PrivateKey {
pub := ImportECDSAPublic(&prv.PublicKey)
return &PrivateKey{*pub, prv.D}
}
-// Generate an elliptic curve public / private keypair. If params is nil,
+// GenerateKey generates an elliptic curve public / private keypair. If params is nil,
// the recommended default parameters for the key will be chosen.
func GenerateKey(rand io.Reader, curve elliptic.Curve, params *ECIESParams) (prv *PrivateKey, err error) {
sk, err := ecdsa.GenerateKey(curve, rand)
@@ -119,7 +119,7 @@ func MaxSharedKeyLength(pub *PublicKey) int {
return (pub.Curve.Params().BitSize + 7) / 8
}
-// ECDH key agreement method used to establish secret keys for encryption.
+// GenerateShared ECDH key agreement method used to establish secret keys for encryption.
func (prv *PrivateKey) GenerateShared(pub *PublicKey, skLen, macLen int) (sk []byte, err error) {
if prv.PublicKey.Curve != pub.Curve {
return nil, ErrInvalidCurve
diff --git a/eth/api_debug_test.go b/eth/api_debug_test.go
index cfb8829b5c..02b85f69fd 100644
--- a/eth/api_debug_test.go
+++ b/eth/api_debug_test.go
@@ -82,7 +82,7 @@ func TestAccountRange(t *testing.T) {
m[addr] = true
}
}
- root, _ := sdb.Commit(0, true)
+ root, _ := sdb.Commit(0, true, false)
sdb, _ = state.New(root, statedb)
trie, err := statedb.OpenTrie(root)
@@ -140,7 +140,7 @@ func TestEmptyAccountRange(t *testing.T) {
st, _ = state.New(types.EmptyRootHash, statedb)
)
// Commit(although nothing to flush) and re-init the statedb
- st.Commit(0, true)
+ st.Commit(0, true, false)
st, _ = state.New(types.EmptyRootHash, statedb)
results := st.RawDump(&state.DumpConfig{
@@ -183,7 +183,7 @@ func TestStorageRangeAt(t *testing.T) {
for _, entry := range storage {
sdb.SetState(addr, *entry.Key, entry.Value)
}
- root, _ := sdb.Commit(0, false)
+ root, _ := sdb.Commit(0, false, false)
sdb, _ = state.New(root, db)
// Check a few combinations of limit and start/end.
diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go
index 3e45ad9e4f..91b6511f71 100644
--- a/eth/catalyst/api.go
+++ b/eth/catalyst/api.go
@@ -638,6 +638,9 @@ func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHas
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV4 must only be called for prague payloads"))
}
requests := convertRequests(executionRequests)
+ if err := validateRequests(requests); err != nil {
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(err)
+ }
return api.newPayload(params, versionedHashes, beaconRoot, requests, false)
}
@@ -727,6 +730,9 @@ func (api *ConsensusAPI) NewPayloadWithWitnessV4(params engine.ExecutableData, v
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadWithWitnessV4 must only be called for prague payloads"))
}
requests := convertRequests(executionRequests)
+ if err := validateRequests(requests); err != nil {
+ return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(err)
+ }
return api.newPayload(params, versionedHashes, beaconRoot, requests, true)
}
@@ -1287,3 +1293,20 @@ func convertRequests(hex []hexutil.Bytes) [][]byte {
}
return req
}
+
+// validateRequests checks that requests are ordered by their type and are not empty.
+func validateRequests(requests [][]byte) error {
+ var last byte
+ for _, req := range requests {
+ // No empty requests.
+ if len(req) < 2 {
+ return fmt.Errorf("empty request: %v", req)
+ }
+ // Check that requests are ordered by their type.
+ if req[0] < last {
+ return fmt.Errorf("invalid request order: %v", req)
+ }
+ last = req[0]
+ }
+ return nil
+}
diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go
index fe2e4d408a..4fd3df7428 100644
--- a/eth/gasprice/gasprice.go
+++ b/eth/gasprice/gasprice.go
@@ -120,16 +120,23 @@ func NewOracle(backend OracleBackend, params Config, startPrice *big.Int) *Oracl
cache := lru.NewCache[cacheKey, processedFees](2048)
headEvent := make(chan core.ChainHeadEvent, 1)
- backend.SubscribeChainHeadEvent(headEvent)
- go func() {
- var lastHead common.Hash
- for ev := range headEvent {
- if ev.Header.ParentHash != lastHead {
- cache.Purge()
+ sub := backend.SubscribeChainHeadEvent(headEvent)
+ if sub != nil { // the gasprice testBackend doesn't support subscribing to head events
+ go func() {
+ var lastHead common.Hash
+ for {
+ select {
+ case ev := <-headEvent:
+ if ev.Header.ParentHash != lastHead {
+ cache.Purge()
+ }
+ lastHead = ev.Header.Hash()
+ case <-sub.Err():
+ return
+ }
}
- lastHead = ev.Header.Hash()
- }
- }()
+ }()
+ }
return &Oracle{
backend: backend,
diff --git a/eth/state_accessor.go b/eth/state_accessor.go
index 0749d73791..99ed28d96a 100644
--- a/eth/state_accessor.go
+++ b/eth/state_accessor.go
@@ -152,7 +152,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err)
}
// Finalize the state so any modifications are written to the trie
- root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()))
+ root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()), eth.blockchain.Config().IsCancun(current.Number(), current.Time()))
if err != nil {
return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w",
current.NumberU64(), current.Root().Hex(), err)
diff --git a/eth/tracers/dir.go b/eth/tracers/dir.go
index 55bcb44d23..1cdfab5454 100644
--- a/eth/tracers/dir.go
+++ b/eth/tracers/dir.go
@@ -34,7 +34,7 @@ type Context struct {
TxHash common.Hash // Hash of the transaction being traced (zero if dangling call)
}
-// The set of methods that must be exposed by a tracer
+// Tracer represents the set of methods that must be exposed by a tracer
// for it to be available through the RPC interface.
// This involves a method to retrieve results and one to
// stop tracing.
diff --git a/eth/tracers/internal/util.go b/eth/tracers/internal/util.go
index 347af43d51..cff6295566 100644
--- a/eth/tracers/internal/util.go
+++ b/eth/tracers/internal/util.go
@@ -13,6 +13,7 @@
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
+
package internal
import (
diff --git a/ethclient/simulated/backend_test.go b/ethclient/simulated/backend_test.go
index 37f4dbbf7b..8efe93e243 100644
--- a/ethclient/simulated/backend_test.go
+++ b/ethclient/simulated/backend_test.go
@@ -19,11 +19,15 @@ package simulated
import (
"context"
"crypto/ecdsa"
+ "crypto/sha256"
"math/big"
"math/rand"
"testing"
"time"
+ "github.com/ethereum/go-ethereum/crypto/kzg4844"
+ "github.com/holiman/uint256"
+
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
@@ -34,8 +38,10 @@ import (
var _ bind.ContractBackend = (Client)(nil)
var (
- testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
+ testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
+ testKey2, _ = crypto.HexToECDSA("7ee346e3f7efc685250053bfbafbfc880d58dc6145247053d4fb3cb0f66dfcb2")
+ testAddr2 = crypto.PubkeyToAddress(testKey2.PublicKey)
)
func simTestBackend(testAddr common.Address) *Backend {
@@ -46,6 +52,46 @@ func simTestBackend(testAddr common.Address) *Backend {
)
}
+func newBlobTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
+ client := sim.Client()
+
+ testBlob := &kzg4844.Blob{0x00}
+ testBlobCommit, _ := kzg4844.BlobToCommitment(testBlob)
+ testBlobProof, _ := kzg4844.ComputeBlobProof(testBlob, testBlobCommit)
+ testBlobVHash := kzg4844.CalcBlobHashV1(sha256.New(), &testBlobCommit)
+
+ head, _ := client.HeaderByNumber(context.Background(), nil) // Should be child's, good enough
+ gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei))
+ gasPriceU256, _ := uint256.FromBig(gasPrice)
+ gasTipCapU256, _ := uint256.FromBig(big.NewInt(params.GWei))
+
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ chainid, _ := client.ChainID(context.Background())
+ nonce, err := client.PendingNonceAt(context.Background(), addr)
+ if err != nil {
+ return nil, err
+ }
+
+ chainidU256, _ := uint256.FromBig(chainid)
+ tx := types.NewTx(&types.BlobTx{
+ ChainID: chainidU256,
+ GasTipCap: gasTipCapU256,
+ GasFeeCap: gasPriceU256,
+ BlobFeeCap: uint256.NewInt(1),
+ Gas: 21000,
+ Nonce: nonce,
+ To: addr,
+ AccessList: nil,
+ BlobHashes: []common.Hash{testBlobVHash},
+ Sidecar: &types.BlobTxSidecar{
+ Blobs: []kzg4844.Blob{*testBlob},
+ Commitments: []kzg4844.Commitment{testBlobCommit},
+ Proofs: []kzg4844.Proof{testBlobProof},
+ },
+ })
+ return types.SignTx(tx, types.LatestSignerForChainID(chainid), key)
+}
+
func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
client := sim.Client()
@@ -66,6 +112,7 @@ func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) {
Gas: 21000,
To: &addr,
})
+
return types.SignTx(tx, types.LatestSignerForChainID(chainid), key)
}
diff --git a/ethclient/simulated/rollback_test.go b/ethclient/simulated/rollback_test.go
new file mode 100644
index 0000000000..8fc9f5bc86
--- /dev/null
+++ b/ethclient/simulated/rollback_test.go
@@ -0,0 +1,102 @@
+package simulated
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "math/big"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+// TestTransactionRollbackBehavior tests that calling Rollback on the simulated backend doesn't prevent subsequent
+// addition of new transactions
+func TestTransactionRollbackBehavior(t *testing.T) {
+ sim := NewBackend(
+ types.GenesisAlloc{
+ testAddr: {Balance: big.NewInt(10000000000000000)},
+ testAddr2: {Balance: big.NewInt(10000000000000000)},
+ },
+ )
+ defer sim.Close()
+ client := sim.Client()
+
+ btx0 := testSendSignedTx(t, testKey, sim, true)
+ tx0 := testSendSignedTx(t, testKey2, sim, false)
+ tx1 := testSendSignedTx(t, testKey2, sim, false)
+
+ sim.Rollback()
+
+ if pendingStateHasTx(client, btx0) || pendingStateHasTx(client, tx0) || pendingStateHasTx(client, tx1) {
+ t.Fatalf("all transactions were not rolled back")
+ }
+
+ btx2 := testSendSignedTx(t, testKey, sim, true)
+ tx2 := testSendSignedTx(t, testKey2, sim, false)
+ tx3 := testSendSignedTx(t, testKey2, sim, false)
+
+ sim.Commit()
+
+ if !pendingStateHasTx(client, btx2) || !pendingStateHasTx(client, tx2) || !pendingStateHasTx(client, tx3) {
+ t.Fatalf("all post-rollback transactions were not included")
+ }
+}
+
+// testSendSignedTx sends a signed transaction to the simulated backend.
+// It does not commit the block.
+func testSendSignedTx(t *testing.T, key *ecdsa.PrivateKey, sim *Backend, isBlobTx bool) *types.Transaction {
+ t.Helper()
+ client := sim.Client()
+ ctx := context.Background()
+
+ var (
+ err error
+ signedTx *types.Transaction
+ )
+ if isBlobTx {
+ signedTx, err = newBlobTx(sim, key)
+ } else {
+ signedTx, err = newTx(sim, key)
+ }
+ if err != nil {
+ t.Fatalf("failed to create transaction: %v", err)
+ }
+
+ if err = client.SendTransaction(ctx, signedTx); err != nil {
+ t.Fatalf("failed to send transaction: %v", err)
+ }
+
+ return signedTx
+}
+
+// pendingStateHasTx returns true if a given transaction was successfully included as of the latest pending state.
+func pendingStateHasTx(client Client, tx *types.Transaction) bool {
+ ctx := context.Background()
+
+ var (
+ receipt *types.Receipt
+ err error
+ )
+
+ // Poll for receipt with timeout
+ deadline := time.Now().Add(2 * time.Second)
+ for time.Now().Before(deadline) {
+ receipt, err = client.TransactionReceipt(ctx, tx.Hash())
+ if err == nil && receipt != nil {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ if err != nil {
+ return false
+ }
+ if receipt == nil {
+ return false
+ }
+ if receipt.Status != types.ReceiptStatusSuccessful {
+ return false
+ }
+ return true
+}
diff --git a/internal/era/builder.go b/internal/era/builder.go
index 75782a08c2..33261555ba 100644
--- a/internal/era/builder.go
+++ b/internal/era/builder.go
@@ -12,7 +12,8 @@
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
-// along with go-ethereum. If not, see .
+// along with go-ethereum. If not, see .
+
package era
import (
diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go
index a39a6666f4..175ac13a0f 100644
--- a/internal/ethapi/transaction_args.go
+++ b/internal/ethapi/transaction_args.go
@@ -503,7 +503,7 @@ func (args *TransactionArgs) ToTransaction(defaultType int) *types.Transaction {
}
data = &types.SetCodeTx{
To: *args.To,
- ChainID: args.ChainID.ToInt().Uint64(),
+ ChainID: uint256.MustFromBig(args.ChainID.ToInt()),
Nonce: uint64(*args.Nonce),
Gas: uint64(*args.Gas),
GasFeeCap: uint256.MustFromBig((*big.Int)(args.MaxFeePerGas)),
diff --git a/internal/jsre/jsre.go b/internal/jsre/jsre.go
index f6e21d2ef7..0dfeae8e1b 100644
--- a/internal/jsre/jsre.go
+++ b/internal/jsre/jsre.go
@@ -67,7 +67,19 @@ type evalReq struct {
done chan bool
}
-// runtime must be stopped with Stop() after use and cannot be used after stopping
+// New creates and initializes a new JavaScript runtime environment (JSRE).
+// The runtime is configured with the provided assetPath for loading scripts and
+// an output writer for logging or printing results.
+//
+// The returned JSRE must be stopped by calling Stop() after use to release resources.
+// Attempting to use the JSRE after stopping it will result in undefined behavior.
+//
+// Parameters:
+// - assetPath: The path to the directory containing script assets.
+// - output: The writer used for logging or printing runtime output.
+//
+// Returns:
+// - A pointer to the newly created JSRE instance.
func New(assetPath string, output io.Writer) *JSRE {
re := &JSRE{
assetPath: assetPath,
@@ -251,8 +263,15 @@ func (re *JSRE) Stop(waitForCallbacks bool) {
}
}
-// Exec(file) loads and runs the contents of a file
-// if a relative path is given, the jsre's assetPath is used
+// Exec loads and executes the contents of a JavaScript file.
+// If a relative path is provided, the file is resolved relative to the JSRE's assetPath.
+// The file is read, compiled, and executed in the JSRE's runtime environment.
+//
+// Parameters:
+// - file: The path to the JavaScript file to execute. Can be an absolute path or relative to assetPath.
+//
+// Returns:
+// - error: An error if the file cannot be read, compiled, or executed.
func (re *JSRE) Exec(file string) error {
code, err := os.ReadFile(common.AbsolutePath(re.assetPath, file))
if err != nil {
diff --git a/internal/reexec/reexec.go b/internal/reexec/reexec.go
index af8d347986..7dc6d9222e 100644
--- a/internal/reexec/reexec.go
+++ b/internal/reexec/reexec.go
@@ -7,6 +7,7 @@
// we require because of the forking limitations of using Go. Handlers can be
// registered with a name and the argv 0 of the exec of the binary will be used
// to find and execute custom init paths.
+
package reexec
import (
diff --git a/metrics/exp/exp.go b/metrics/exp/exp.go
index 5213979aa2..85cabca6b1 100644
--- a/metrics/exp/exp.go
+++ b/metrics/exp/exp.go
@@ -1,5 +1,6 @@
// Hook go-metrics into expvar
// on any /debug/metrics request, load all vars from the registry into expvar, and execute regular expvar handler
+
package exp
import (
diff --git a/metrics/gauge_info.go b/metrics/gauge_info.go
index 2f78455649..1862ed55c5 100644
--- a/metrics/gauge_info.go
+++ b/metrics/gauge_info.go
@@ -39,7 +39,7 @@ func NewRegisteredGaugeInfo(name string, r Registry) *GaugeInfo {
return c
}
-// gaugeInfoSnapshot is a read-only copy of another GaugeInfo.
+// GaugeInfoSnapshot is a read-only copy of another GaugeInfo.
type GaugeInfoSnapshot GaugeInfoValue
// Value returns the value at the time the snapshot was taken.
diff --git a/metrics/log.go b/metrics/log.go
index 3380bbf9c4..08f3effb81 100644
--- a/metrics/log.go
+++ b/metrics/log.go
@@ -12,7 +12,7 @@ func Log(r Registry, freq time.Duration, l Logger) {
LogScaled(r, freq, time.Nanosecond, l)
}
-// Output each metric in the given registry periodically using the given
+// LogScaled outputs each metric in the given registry periodically using the given
// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
du := float64(scale)
diff --git a/metrics/metrics.go b/metrics/metrics.go
index a9d6623173..c4c43b7576 100644
--- a/metrics/metrics.go
+++ b/metrics/metrics.go
@@ -3,6 +3,7 @@
//
//
// Coda Hale's original work:
+
package metrics
import (
diff --git a/metrics/resetting_timer.go b/metrics/resetting_timer.go
index 1b3e87bc3d..66458bdb91 100644
--- a/metrics/resetting_timer.go
+++ b/metrics/resetting_timer.go
@@ -53,14 +53,14 @@ func (t *ResettingTimer) Snapshot() *ResettingTimerSnapshot {
return snapshot
}
-// Record the duration of the execution of the given function.
+// Time records the duration of the execution of the given function.
func (t *ResettingTimer) Time(f func()) {
ts := time.Now()
f()
t.Update(time.Since(ts))
}
-// Record the duration of an event.
+// Update records the duration of an event.
func (t *ResettingTimer) Update(d time.Duration) {
if !metricsEnabled {
return
@@ -71,7 +71,7 @@ func (t *ResettingTimer) Update(d time.Duration) {
t.sum += int64(d)
}
-// Record the duration of an event that started at a time and ends now.
+// UpdateSince records the duration of an event that started at a time and ends now.
func (t *ResettingTimer) UpdateSince(ts time.Time) {
t.Update(time.Since(ts))
}
diff --git a/metrics/syslog.go b/metrics/syslog.go
index 0bc4ed0da5..b265328f87 100644
--- a/metrics/syslog.go
+++ b/metrics/syslog.go
@@ -9,7 +9,7 @@ import (
"time"
)
-// Output each metric in the given registry to syslog periodically using
+// Syslog outputs each metric in the given registry to syslog periodically using
// the given syslogger.
func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
for range time.Tick(d) {
diff --git a/miner/miner_test.go b/miner/miner_test.go
index b92febdd12..04d84e2e1d 100644
--- a/miner/miner_test.go
+++ b/miner/miner_test.go
@@ -145,7 +145,7 @@ func createMiner(t *testing.T) *Miner {
chainDB := rawdb.NewMemoryDatabase()
triedb := triedb.NewDatabase(chainDB, nil)
genesis := minerTestGenesisBlock(15, 11_500_000, common.HexToAddress("12345"))
- chainConfig, _, err := core.SetupGenesisBlock(chainDB, triedb, genesis)
+ chainConfig, _, _, err := core.SetupGenesisBlock(chainDB, triedb, genesis)
if err != nil {
t.Fatalf("can't create new chain config: %v", err)
}
diff --git a/p2p/discover/v5wire/encoding_test.go b/p2p/discover/v5wire/encoding_test.go
index c66a0da9d3..df97e40e89 100644
--- a/p2p/discover/v5wire/encoding_test.go
+++ b/p2p/discover/v5wire/encoding_test.go
@@ -249,20 +249,20 @@ func TestHandshake_BadHandshakeAttack(t *testing.T) {
net.nodeA.expectDecode(t, WhoareyouPacket, whoareyou)
// A -> B FINDNODE
- incorrect_challenge := &Whoareyou{
+ incorrectChallenge := &Whoareyou{
IDNonce: [16]byte{5, 6, 7, 8, 9, 6, 11, 12},
RecordSeq: challenge.RecordSeq,
Node: challenge.Node,
sent: challenge.sent,
}
- incorrect_findnode, _ := net.nodeA.encodeWithChallenge(t, net.nodeB, incorrect_challenge, &Findnode{})
- incorrect_findnode2 := make([]byte, len(incorrect_findnode))
- copy(incorrect_findnode2, incorrect_findnode)
+ incorrectFindNode, _ := net.nodeA.encodeWithChallenge(t, net.nodeB, incorrectChallenge, &Findnode{})
+ incorrectFindNode2 := make([]byte, len(incorrectFindNode))
+ copy(incorrectFindNode2, incorrectFindNode)
- net.nodeB.expectDecodeErr(t, errInvalidNonceSig, incorrect_findnode)
+ net.nodeB.expectDecodeErr(t, errInvalidNonceSig, incorrectFindNode)
// Reject new findnode as previous handshake is now deleted.
- net.nodeB.expectDecodeErr(t, errUnexpectedHandshake, incorrect_findnode2)
+ net.nodeB.expectDecodeErr(t, errUnexpectedHandshake, incorrectFindNode2)
// The findnode packet is again rejected even with a valid challenge this time.
findnode, _ := net.nodeA.encodeWithChallenge(t, net.nodeB, challenge, &Findnode{})
diff --git a/params/config.go b/params/config.go
index 9b3b92484a..f1e139608c 100644
--- a/params/config.go
+++ b/params/config.go
@@ -326,6 +326,19 @@ type ChainConfig struct {
DepositContractAddress common.Address `json:"depositContractAddress,omitempty"`
+ // EnableVerkleAtGenesis is a flag that specifies whether the network uses
+ // the Verkle tree starting from the genesis block. If set to true, the
+ // genesis state will be committed using the Verkle tree, eliminating the
+ // need for any Verkle transition later.
+ //
+ // This is a temporary flag only for verkle devnet testing, where verkle is
+ // activated at genesis, and the configured activation date has already passed.
+ //
+ // In production networks (mainnet and public testnets), verkle activation
+ // always occurs after the genesis block, making this flag irrelevant in
+ // those cases.
+ EnableVerkleAtGenesis bool `json:"enableVerkleAtGenesis,omitempty"`
+
// Various consensus engines
Ethash *EthashConfig `json:"ethash,omitempty"`
Clique *CliqueConfig `json:"clique,omitempty"`
@@ -525,6 +538,20 @@ func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool {
return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time)
}
+// IsVerkleGenesis checks whether the verkle fork is activated at the genesis block.
+//
+// Verkle mode is considered enabled if the verkle fork time is configured,
+// regardless of whether the local time has surpassed the fork activation time.
+// This is a temporary workaround for verkle devnet testing, where verkle is
+// activated at genesis, and the configured activation date has already passed.
+//
+// In production networks (mainnet and public testnets), verkle activation
+// always occurs after the genesis block, making this function irrelevant in
+// those cases.
+func (c *ChainConfig) IsVerkleGenesis() bool {
+ return c.EnableVerkleAtGenesis
+}
+
// IsEIP4762 returns whether eip 4762 has been activated at given block.
func (c *ChainConfig) IsEIP4762(num *big.Int, time uint64) bool {
return c.IsVerkle(num, time)
diff --git a/params/protocol_params.go b/params/protocol_params.go
index b46e8d66b2..030083aa9a 100644
--- a/params/protocol_params.go
+++ b/params/protocol_params.go
@@ -179,7 +179,7 @@ const (
HistoryServeWindow = 8192 // Number of blocks to serve historical block hashes for, EIP-2935.
)
-// Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations
+// Bls12381MultiExpDiscountTable gas discount table for BLS12-381 G1 and G2 multi exponentiation operations
var Bls12381MultiExpDiscountTable = [128]uint64{1200, 888, 764, 641, 594, 547, 500, 453, 438, 423, 408, 394, 379, 364, 349, 334, 330, 326, 322, 318, 314, 310, 306, 302, 298, 294, 289, 285, 281, 277, 273, 269, 268, 266, 265, 263, 262, 260, 259, 257, 256, 254, 253, 251, 250, 248, 247, 245, 244, 242, 241, 239, 238, 236, 235, 233, 232, 231, 229, 228, 226, 225, 223, 222, 221, 220, 219, 219, 218, 217, 216, 216, 215, 214, 213, 213, 212, 211, 211, 210, 209, 208, 208, 207, 206, 205, 205, 204, 203, 202, 202, 201, 200, 199, 199, 198, 197, 196, 196, 195, 194, 193, 193, 192, 191, 191, 190, 189, 188, 188, 187, 186, 185, 185, 184, 183, 182, 182, 181, 180, 179, 179, 178, 177, 176, 176, 175, 174}
// Difficulty parameters.
diff --git a/signer/core/api.go b/signer/core/api.go
index def2d6041f..12acf925f0 100644
--- a/signer/core/api.go
+++ b/signer/core/api.go
@@ -664,7 +664,7 @@ func (api *SignerAPI) SignGnosisSafeTx(ctx context.Context, signerAddress common
return &gnosisTx, nil
}
-// Returns the external api version. This method does not require user acceptance. Available methods are
+// Version returns the external api version. This method does not require user acceptance. Available methods are
// available via enumeration anyway, and this info does not contain user-specific data
func (api *SignerAPI) Version(ctx context.Context) (string, error) {
return ExternalAPIVersion, nil
diff --git a/signer/core/uiapi.go b/signer/core/uiapi.go
index 43edfe7d97..2f511c7e19 100644
--- a/signer/core/uiapi.go
+++ b/signer/core/uiapi.go
@@ -48,7 +48,7 @@ func NewUIServerAPI(extapi *SignerAPI) *UIServerAPI {
return &UIServerAPI{extapi, extapi.am}
}
-// List available accounts. As opposed to the external API definition, this method delivers
+// ListAccounts lists available accounts. As opposed to the external API definition, this method delivers
// the full Account object and not only Address.
// Example call
// {"jsonrpc":"2.0","method":"clef_listAccounts","params":[], "id":4}
diff --git a/tests/gen_stauthorization.go b/tests/gen_stauthorization.go
index fbafd6fdea..4f2c50bd9f 100644
--- a/tests/gen_stauthorization.go
+++ b/tests/gen_stauthorization.go
@@ -16,7 +16,7 @@ var _ = (*stAuthorizationMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (s stAuthorization) MarshalJSON() ([]byte, error) {
type stAuthorization struct {
- ChainID math.HexOrDecimal64
+ ChainID *math.HexOrDecimal256 `json:"chainId" gencodec:"required"`
Address common.Address `json:"address" gencodec:"required"`
Nonce math.HexOrDecimal64 `json:"nonce" gencodec:"required"`
V math.HexOrDecimal64 `json:"v" gencodec:"required"`
@@ -24,7 +24,7 @@ func (s stAuthorization) MarshalJSON() ([]byte, error) {
S *math.HexOrDecimal256 `json:"s" gencodec:"required"`
}
var enc stAuthorization
- enc.ChainID = math.HexOrDecimal64(s.ChainID)
+ enc.ChainID = (*math.HexOrDecimal256)(s.ChainID)
enc.Address = s.Address
enc.Nonce = math.HexOrDecimal64(s.Nonce)
enc.V = math.HexOrDecimal64(s.V)
@@ -36,7 +36,7 @@ func (s stAuthorization) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (s *stAuthorization) UnmarshalJSON(input []byte) error {
type stAuthorization struct {
- ChainID *math.HexOrDecimal64
+ ChainID *math.HexOrDecimal256 `json:"chainId" gencodec:"required"`
Address *common.Address `json:"address" gencodec:"required"`
Nonce *math.HexOrDecimal64 `json:"nonce" gencodec:"required"`
V *math.HexOrDecimal64 `json:"v" gencodec:"required"`
@@ -47,9 +47,10 @@ func (s *stAuthorization) UnmarshalJSON(input []byte) error {
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
- if dec.ChainID != nil {
- s.ChainID = uint64(*dec.ChainID)
+ if dec.ChainID == nil {
+ return errors.New("missing required field 'chainId' for stAuthorization")
}
+ s.ChainID = (*big.Int)(dec.ChainID)
if dec.Address == nil {
return errors.New("missing required field 'address' for stAuthorization")
}
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 6e66bbaa72..e658b62ebf 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -140,7 +140,7 @@ type stTransactionMarshaling struct {
// Authorization is an authorization from an account to deploy code at it's address.
type stAuthorization struct {
- ChainID uint64
+ ChainID *big.Int `json:"chainId" gencodec:"required"`
Address common.Address `json:"address" gencodec:"required"`
Nonce uint64 `json:"nonce" gencodec:"required"`
V uint8 `json:"v" gencodec:"required"`
@@ -150,7 +150,7 @@ type stAuthorization struct {
// field type overrides for gencodec
type stAuthorizationMarshaling struct {
- ChainID math.HexOrDecimal64
+ ChainID *math.HexOrDecimal256
Nonce math.HexOrDecimal64
V math.HexOrDecimal64
R *math.HexOrDecimal256
@@ -339,7 +339,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
st.StateDB.AddBalance(block.Coinbase(), new(uint256.Int), tracing.BalanceChangeUnspecified)
// Commit state mutations into database.
- root, _ = st.StateDB.Commit(block.NumberU64(), config.IsEIP158(block.Number()))
+ root, _ = st.StateDB.Commit(block.NumberU64(), config.IsEIP158(block.Number()), config.IsCancun(block.Number(), block.Time()))
if tracer := evm.Config.Tracer; tracer != nil && tracer.OnTxEnd != nil {
receipt := &types.Receipt{GasUsed: vmRet.UsedGas}
tracer.OnTxEnd(receipt, nil)
@@ -446,7 +446,7 @@ func (tx *stTransaction) toMessage(ps stPostState, baseFee *big.Int) (*core.Mess
authList = make([]types.SetCodeAuthorization, len(tx.AuthorizationList))
for i, auth := range tx.AuthorizationList {
authList[i] = types.SetCodeAuthorization{
- ChainID: auth.ChainID,
+ ChainID: *uint256.MustFromBig(auth.ChainID),
Address: auth.Address,
Nonce: auth.Nonce,
V: auth.V,
@@ -512,7 +512,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bo
}
}
// Commit and re-open to start with a clean state.
- root, _ := statedb.Commit(0, false)
+ root, _ := statedb.Commit(0, false, false)
// If snapshot is requested, initialize the snapshotter and use it in state.
var snaps *snapshot.Tree
diff --git a/triedb/database.go b/triedb/database.go
index b448d7cd07..f8ccc5ad33 100644
--- a/triedb/database.go
+++ b/triedb/database.go
@@ -64,10 +64,6 @@ type backend interface {
// state. An error will be returned if the specified state is not available.
StateReader(root common.Hash) (database.StateReader, error)
- // Initialized returns an indicator if the state data is already initialized
- // according to the state scheme.
- Initialized(genesisRoot common.Hash) bool
-
// Size returns the current storage size of the diff layers on top of the
// disk layer and the storage size of the nodes cached in the disk layer.
//
@@ -178,12 +174,6 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize, common.Stora
return diffs, nodes, preimages
}
-// Initialized returns an indicator if the state data is already initialized
-// according to the state scheme.
-func (db *Database) Initialized(genesisRoot common.Hash) bool {
- return db.backend.Initialized(genesisRoot)
-}
-
// Scheme returns the node scheme used in the database.
func (db *Database) Scheme() string {
if db.config.PathDB != nil {
diff --git a/triedb/hashdb/database.go b/triedb/hashdb/database.go
index fb718f4e74..38392aa519 100644
--- a/triedb/hashdb/database.go
+++ b/triedb/hashdb/database.go
@@ -532,12 +532,6 @@ func (c *cleaner) Delete(key []byte) error {
panic("not implemented")
}
-// Initialized returns an indicator if state data is already initialized
-// in hash-based scheme by checking the presence of genesis state.
-func (db *Database) Initialized(genesisRoot common.Hash) bool {
- return rawdb.HasLegacyTrieNode(db.diskdb, genesisRoot)
-}
-
// Update inserts the dirty nodes in provided nodeset into database and link the
// account trie with multiple storage tries if necessary.
func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet) error {
diff --git a/triedb/pathdb/buffer.go b/triedb/pathdb/buffer.go
index 68e136f193..dea8875bda 100644
--- a/triedb/pathdb/buffer.go
+++ b/triedb/pathdb/buffer.go
@@ -46,7 +46,7 @@ func newBuffer(limit int, nodes *nodeSet, states *stateSet, layers uint64) *buff
nodes = newNodeSet(nil)
}
if states == nil {
- states = newStates(nil, nil)
+ states = newStates(nil, nil, false)
}
return &buffer{
layers: layers,
diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go
index c31f1d44f4..b0d84eb879 100644
--- a/triedb/pathdb/database.go
+++ b/triedb/pathdb/database.go
@@ -529,21 +529,6 @@ func (db *Database) Size() (diffs common.StorageSize, nodes common.StorageSize)
return diffs, nodes
}
-// Initialized returns an indicator if the state data is already
-// initialized in path-based scheme.
-func (db *Database) Initialized(genesisRoot common.Hash) bool {
- var inited bool
- db.tree.forEach(func(layer layer) {
- if layer.rootHash() != types.EmptyRootHash {
- inited = true
- }
- })
- if !inited {
- inited = rawdb.ReadSnapSyncStatusFlag(db.diskdb) != rawdb.StateSyncUnknown
- }
- return inited
-}
-
// modifyAllowed returns the indicator if mutation is allowed. This function
// assumes the db.lock is already held.
func (db *Database) modifyAllowed() error {
diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go
index a6b1d3c045..f4b3fcec23 100644
--- a/triedb/pathdb/database_test.go
+++ b/triedb/pathdb/database_test.go
@@ -91,29 +91,47 @@ func newCtx(stateRoot common.Hash) *genctx {
}
}
+func (ctx *genctx) storageOriginSet(rawStorageKey bool, t *tester) map[common.Address]map[common.Hash][]byte {
+ if !rawStorageKey {
+ return ctx.storageOrigin
+ }
+ set := make(map[common.Address]map[common.Hash][]byte)
+ for addr, storage := range ctx.storageOrigin {
+ subset := make(map[common.Hash][]byte)
+ for hash, val := range storage {
+ key := t.hashPreimage(hash)
+ subset[key] = val
+ }
+ set[addr] = subset
+ }
+ return set
+}
+
type tester struct {
db *Database
roots []common.Hash
- preimages map[common.Hash]common.Address
- accounts map[common.Hash][]byte
- storages map[common.Hash]map[common.Hash][]byte
+ preimages map[common.Hash][]byte
+
+ // current state set
+ accounts map[common.Hash][]byte
+ storages map[common.Hash]map[common.Hash][]byte
// state snapshots
snapAccounts map[common.Hash]map[common.Hash][]byte
snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte
}
-func newTester(t *testing.T, historyLimit uint64) *tester {
+func newTester(t *testing.T, historyLimit uint64, isVerkle bool) *tester {
var (
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
db = New(disk, &Config{
StateHistory: historyLimit,
CleanCacheSize: 16 * 1024,
WriteBufferSize: 16 * 1024,
- }, false)
+ }, isVerkle)
obj = &tester{
db: db,
- preimages: make(map[common.Hash]common.Address),
+ preimages: make(map[common.Hash][]byte),
accounts: make(map[common.Hash][]byte),
storages: make(map[common.Hash]map[common.Hash][]byte),
snapAccounts: make(map[common.Hash]map[common.Hash][]byte),
@@ -125,7 +143,8 @@ func newTester(t *testing.T, historyLimit uint64) *tester {
if len(obj.roots) != 0 {
parent = obj.roots[len(obj.roots)-1]
}
- root, nodes, states := obj.generate(parent)
+ root, nodes, states := obj.generate(parent, i > 6)
+
if err := db.Update(root, parent, uint64(i), nodes, states); err != nil {
panic(fmt.Errorf("failed to update state changes, err: %w", err))
}
@@ -134,6 +153,14 @@ func newTester(t *testing.T, historyLimit uint64) *tester {
return obj
}
+func (t *tester) accountPreimage(hash common.Hash) common.Address {
+ return common.BytesToAddress(t.preimages[hash])
+}
+
+func (t *tester) hashPreimage(hash common.Hash) common.Hash {
+ return common.BytesToHash(t.preimages[hash])
+}
+
func (t *tester) release() {
t.db.Close()
t.db.diskdb.Close()
@@ -141,7 +168,7 @@ func (t *tester) release() {
func (t *tester) randAccount() (common.Address, []byte) {
for addrHash, account := range t.accounts {
- return t.preimages[addrHash], account
+ return t.accountPreimage(addrHash), account
}
return common.Address{}, nil
}
@@ -154,7 +181,9 @@ func (t *tester) generateStorage(ctx *genctx, addr common.Address) common.Hash {
)
for i := 0; i < 10; i++ {
v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
- hash := testrand.Hash()
+ key := testrand.Bytes(32)
+ hash := crypto.Keccak256Hash(key)
+ t.preimages[hash] = key
storage[hash] = v
origin[hash] = nil
@@ -183,7 +212,9 @@ func (t *tester) mutateStorage(ctx *genctx, addr common.Address, root common.Has
}
for i := 0; i < 3; i++ {
v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
- hash := testrand.Hash()
+ key := testrand.Bytes(32)
+ hash := crypto.Keccak256Hash(key)
+ t.preimages[hash] = key
storage[hash] = v
origin[hash] = nil
@@ -216,7 +247,7 @@ func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash
return root
}
-func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNodeSet, *StateSetWithOrigin) {
+func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash, *trienode.MergedNodeSet, *StateSetWithOrigin) {
var (
ctx = newCtx(parent)
dirties = make(map[common.Hash]struct{})
@@ -232,9 +263,12 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
// account creation
addr := testrand.Address()
addrHash := crypto.Keccak256Hash(addr.Bytes())
+
+ // short circuit if the account was already existent
if _, ok := t.accounts[addrHash]; ok {
continue
}
+ // short circuit if the account has been modified within the same transition
if _, ok := dirties[addrHash]; ok {
continue
}
@@ -243,7 +277,7 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
root := t.generateStorage(ctx, addr)
ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root))
ctx.accountOrigin[addr] = nil
- t.preimages[addrHash] = addr
+ t.preimages[addrHash] = addr.Bytes()
case modifyAccountOp:
// account mutation
@@ -252,6 +286,8 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
continue
}
addrHash := crypto.Keccak256Hash(addr.Bytes())
+
+ // short circuit if the account has been modified within the same transition
if _, ok := dirties[addrHash]; ok {
continue
}
@@ -271,6 +307,8 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
continue
}
addrHash := crypto.Keccak256Hash(addr.Bytes())
+
+ // short circuit if the account has been modified within the same transition
if _, ok := dirties[addrHash]; ok {
continue
}
@@ -314,7 +352,8 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
delete(t.storages, addrHash)
}
}
- return root, ctx.nodes, NewStateSetWithOrigin(ctx.accounts, ctx.storages, ctx.accountOrigin, ctx.storageOrigin)
+ storageOrigin := ctx.storageOriginSet(rawStorageKey, t)
+ return root, ctx.nodes, NewStateSetWithOrigin(ctx.accounts, ctx.storages, ctx.accountOrigin, storageOrigin, rawStorageKey)
}
// lastHash returns the latest root hash, or empty if nothing is cached.
@@ -409,7 +448,7 @@ func TestDatabaseRollback(t *testing.T) {
}()
// Verify state histories
- tester := newTester(t, 0)
+ tester := newTester(t, 0, false)
defer tester.release()
if err := tester.verifyHistory(); err != nil {
@@ -443,7 +482,7 @@ func TestDatabaseRecoverable(t *testing.T) {
}()
var (
- tester = newTester(t, 0)
+ tester = newTester(t, 0, false)
index = tester.bottomIndex()
)
defer tester.release()
@@ -487,7 +526,7 @@ func TestDisable(t *testing.T) {
maxDiffLayers = 128
}()
- tester := newTester(t, 0)
+ tester := newTester(t, 0, false)
defer tester.release()
stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil))
@@ -529,7 +568,7 @@ func TestCommit(t *testing.T) {
maxDiffLayers = 128
}()
- tester := newTester(t, 0)
+ tester := newTester(t, 0, false)
defer tester.release()
if err := tester.db.Commit(tester.lastHash(), false); err != nil {
@@ -559,7 +598,7 @@ func TestJournal(t *testing.T) {
maxDiffLayers = 128
}()
- tester := newTester(t, 0)
+ tester := newTester(t, 0, false)
defer tester.release()
if err := tester.db.Journal(tester.lastHash()); err != nil {
@@ -589,7 +628,7 @@ func TestCorruptedJournal(t *testing.T) {
maxDiffLayers = 128
}()
- tester := newTester(t, 0)
+ tester := newTester(t, 0, false)
defer tester.release()
if err := tester.db.Journal(tester.lastHash()); err != nil {
@@ -637,7 +676,7 @@ func TestTailTruncateHistory(t *testing.T) {
maxDiffLayers = 128
}()
- tester := newTester(t, 10)
+ tester := newTester(t, 10, false)
defer tester.release()
tester.db.Close()
diff --git a/triedb/pathdb/difflayer_test.go b/triedb/pathdb/difflayer_test.go
index 7176d9964d..83ed833486 100644
--- a/triedb/pathdb/difflayer_test.go
+++ b/triedb/pathdb/difflayer_test.go
@@ -76,7 +76,7 @@ func benchmarkSearch(b *testing.B, depth int, total int) {
nblob = common.CopyBytes(blob)
}
}
- return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
+ return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false))
}
var layer layer
layer = emptyLayer()
@@ -118,7 +118,7 @@ func BenchmarkPersist(b *testing.B) {
)
nodes[common.Hash{}][string(path)] = node
}
- return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
+ return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false))
}
for i := 0; i < b.N; i++ {
b.StopTimer()
@@ -156,7 +156,7 @@ func BenchmarkJournal(b *testing.B) {
)
nodes[common.Hash{}][string(path)] = node
}
- return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
+ return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil, false))
}
var layer layer
layer = emptyLayer()
diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go
index 003431b19b..5e678dbdee 100644
--- a/triedb/pathdb/disklayer.go
+++ b/triedb/pathdb/disklayer.go
@@ -316,7 +316,7 @@ func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
// Apply the reverse state changes upon the current state. This must
// be done before holding the lock in order to access state in "this"
// layer.
- nodes, err := apply(dl.db, h.meta.parent, h.meta.root, h.accounts, h.storages)
+ nodes, err := apply(dl.db, h.meta.parent, h.meta.root, h.meta.version != stateHistoryV0, h.accounts, h.storages)
if err != nil {
return nil, err
}
diff --git a/triedb/pathdb/execute.go b/triedb/pathdb/execute.go
index e24d0710f3..80cecb82e7 100644
--- a/triedb/pathdb/execute.go
+++ b/triedb/pathdb/execute.go
@@ -30,11 +30,12 @@ import (
// context wraps all fields for executing state diffs.
type context struct {
- prevRoot common.Hash
- postRoot common.Hash
- accounts map[common.Address][]byte
- storages map[common.Address]map[common.Hash][]byte
- nodes *trienode.MergedNodeSet
+ prevRoot common.Hash
+ postRoot common.Hash
+ accounts map[common.Address][]byte
+ storages map[common.Address]map[common.Hash][]byte
+ nodes *trienode.MergedNodeSet
+ rawStorageKey bool
// TODO (rjl493456442) abstract out the state hasher
// for supporting verkle tree.
@@ -43,18 +44,19 @@ type context struct {
// apply processes the given state diffs, updates the corresponding post-state
// and returns the trie nodes that have been modified.
-func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) (map[common.Hash]map[string]*trienode.Node, error) {
+func apply(db database.NodeDatabase, prevRoot common.Hash, postRoot common.Hash, rawStorageKey bool, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) (map[common.Hash]map[string]*trienode.Node, error) {
tr, err := trie.New(trie.TrieID(postRoot), db)
if err != nil {
return nil, err
}
ctx := &context{
- prevRoot: prevRoot,
- postRoot: postRoot,
- accounts: accounts,
- storages: storages,
- accountTrie: tr,
- nodes: trienode.NewMergedNodeSet(),
+ prevRoot: prevRoot,
+ postRoot: postRoot,
+ accounts: accounts,
+ storages: storages,
+ accountTrie: tr,
+ rawStorageKey: rawStorageKey,
+ nodes: trienode.NewMergedNodeSet(),
}
for addr, account := range accounts {
var err error
@@ -109,11 +111,15 @@ func updateAccount(ctx *context, db database.NodeDatabase, addr common.Address)
return err
}
for key, val := range ctx.storages[addr] {
+ tkey := key
+ if ctx.rawStorageKey {
+ tkey = h.hash(key.Bytes())
+ }
var err error
if len(val) == 0 {
- err = st.Delete(key.Bytes())
+ err = st.Delete(tkey.Bytes())
} else {
- err = st.Update(key.Bytes(), val)
+ err = st.Update(tkey.Bytes(), val)
}
if err != nil {
return err
@@ -166,7 +172,11 @@ func deleteAccount(ctx *context, db database.NodeDatabase, addr common.Address)
if len(val) != 0 {
return errors.New("expect storage deletion")
}
- if err := st.Delete(key.Bytes()); err != nil {
+ tkey := key
+ if ctx.rawStorageKey {
+ tkey = h.hash(key.Bytes())
+ }
+ if err := st.Delete(tkey.Bytes()); err != nil {
return err
}
}
diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go
index e1cd981153..9fb7d9e153 100644
--- a/triedb/pathdb/history.go
+++ b/triedb/pathdb/history.go
@@ -68,7 +68,8 @@ const (
slotIndexSize = common.HashLength + 5 // The length of encoded slot index
historyMetaSize = 9 + 2*common.HashLength // The length of encoded history meta
- stateHistoryVersion = uint8(0) // initial version of state history structure.
+ stateHistoryV0 = uint8(0) // initial version of state history structure
+ stateHistoryV1 = uint8(1) // use the storage slot raw key as the identifier instead of the key hash
)
// Each state history entry is consisted of five elements:
@@ -169,15 +170,18 @@ func (i *accountIndex) decode(blob []byte) {
// slotIndex describes the metadata belonging to a storage slot.
type slotIndex struct {
- hash common.Hash // The hash of slot key
- length uint8 // The length of storage slot, up to 32 bytes defined in protocol
- offset uint32 // The offset of item in storage slot data table
+ // the identifier of the storage slot. Specifically
+ // in v0, it's the hash of the raw storage slot key (32 bytes);
+ // in v1, it's the raw storage slot key (32 bytes);
+ id common.Hash
+ length uint8 // The length of storage slot, up to 32 bytes defined in protocol
+ offset uint32 // The offset of item in storage slot data table
}
// encode packs slot index into byte stream.
func (i *slotIndex) encode() []byte {
var buf [slotIndexSize]byte
- copy(buf[:common.HashLength], i.hash.Bytes())
+ copy(buf[:common.HashLength], i.id.Bytes())
buf[common.HashLength] = i.length
binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset)
return buf[:]
@@ -185,7 +189,7 @@ func (i *slotIndex) encode() []byte {
// decode unpack slot index from the byte stream.
func (i *slotIndex) decode(blob []byte) {
- i.hash = common.BytesToHash(blob[:common.HashLength])
+ i.id = common.BytesToHash(blob[:common.HashLength])
i.length = blob[common.HashLength]
i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:])
}
@@ -214,7 +218,7 @@ func (m *meta) decode(blob []byte) error {
return errors.New("no version tag")
}
switch blob[0] {
- case stateHistoryVersion:
+ case stateHistoryV0, stateHistoryV1:
if len(blob) != historyMetaSize {
return fmt.Errorf("invalid state history meta, len: %d", len(blob))
}
@@ -242,7 +246,7 @@ type history struct {
}
// newHistory constructs the state history object with provided state change set.
-func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) *history {
+func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, rawStorageKey bool) *history {
var (
accountList = maps.Keys(accounts)
storageList = make(map[common.Address][]common.Hash)
@@ -254,9 +258,13 @@ func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map
slices.SortFunc(slist, common.Hash.Cmp)
storageList[addr] = slist
}
+ version := stateHistoryV0
+ if rawStorageKey {
+ version = stateHistoryV1
+ }
return &history{
meta: &meta{
- version: stateHistoryVersion,
+ version: version,
parent: parent,
root: root,
block: block,
@@ -289,7 +297,7 @@ func (h *history) encode() ([]byte, []byte, []byte, []byte) {
// Encode storage slots in order
for _, slotHash := range h.storageList[addr] {
sIndex := slotIndex{
- hash: slotHash,
+ id: slotHash,
length: uint8(len(slots[slotHash])),
offset: uint32(len(storageData)),
}
@@ -377,7 +385,7 @@ func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) {
// readStorage parses the storage slots from the byte stream with specified account.
func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) {
var (
- last common.Hash
+ last *common.Hash
count = int(accIndex.storageSlots)
list = make([]common.Hash, 0, count)
storage = make(map[common.Hash][]byte, count)
@@ -402,8 +410,10 @@ func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.
}
index.decode(r.storageIndexes[start:end])
- if bytes.Compare(last.Bytes(), index.hash.Bytes()) >= 0 {
- return nil, nil, errors.New("storage slot is not in order")
+ if last != nil {
+ if bytes.Compare(last.Bytes(), index.id.Bytes()) >= 0 {
+ return nil, nil, fmt.Errorf("storage slot is not in order, last: %x, current: %x", *last, index.id)
+ }
}
if index.offset != r.lastSlotDataRead {
return nil, nil, errors.New("storage data buffer is gapped")
@@ -412,10 +422,10 @@ func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.
if uint32(len(r.storageData)) < sEnd {
return nil, nil, errors.New("storage data buffer is corrupted")
}
- storage[index.hash] = r.storageData[r.lastSlotDataRead:sEnd]
- list = append(list, index.hash)
+ storage[index.id] = r.storageData[r.lastSlotDataRead:sEnd]
+ list = append(list, index.id)
- last = index.hash
+ last = &index.id
r.lastSlotIndexRead = end
r.lastSlotDataRead = sEnd
}
@@ -498,7 +508,7 @@ func writeHistory(writer ethdb.AncientWriter, dl *diffLayer) error {
}
var (
start = time.Now()
- history = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states.accountOrigin, dl.states.storageOrigin)
+ history = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states.accountOrigin, dl.states.storageOrigin, dl.states.rawStorageKey)
)
accountData, storageData, accountIndex, storageIndex := history.encode()
dataSize := common.StorageSize(len(accountData) + len(storageData))
diff --git a/triedb/pathdb/history_inspect.go b/triedb/pathdb/history_inspect.go
index 240474da37..7dbe5959dc 100644
--- a/triedb/pathdb/history_inspect.go
+++ b/triedb/pathdb/history_inspect.go
@@ -21,6 +21,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
)
@@ -109,12 +110,17 @@ func accountHistory(freezer ethdb.AncientReader, address common.Address, start,
// storageHistory inspects the storage history within the range.
func storageHistory(freezer ethdb.AncientReader, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) {
+ slotHash := crypto.Keccak256Hash(slot.Bytes())
return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) {
slots, exists := h.storages[address]
if !exists {
return
}
- blob, exists := slots[slot]
+ key := slotHash
+ if h.meta.version != stateHistoryV0 {
+ key = slot
+ }
+ blob, exists := slots[key]
if !exists {
return
}
diff --git a/triedb/pathdb/history_test.go b/triedb/pathdb/history_test.go
index d430706dee..953f023530 100644
--- a/triedb/pathdb/history_test.go
+++ b/triedb/pathdb/history_test.go
@@ -49,9 +49,9 @@ func randomStateSet(n int) (map[common.Address][]byte, map[common.Address]map[co
return accounts, storages
}
-func makeHistory() *history {
+func makeHistory(rawStorageKey bool) *history {
accounts, storages := randomStateSet(3)
- return newHistory(testrand.Hash(), types.EmptyRootHash, 0, accounts, storages)
+ return newHistory(testrand.Hash(), types.EmptyRootHash, 0, accounts, storages, rawStorageKey)
}
func makeHistories(n int) []*history {
@@ -62,7 +62,7 @@ func makeHistories(n int) []*history {
for i := 0; i < n; i++ {
root := testrand.Hash()
accounts, storages := randomStateSet(3)
- h := newHistory(root, parent, uint64(i), accounts, storages)
+ h := newHistory(root, parent, uint64(i), accounts, storages, false)
parent = root
result = append(result, h)
}
@@ -70,10 +70,15 @@ func makeHistories(n int) []*history {
}
func TestEncodeDecodeHistory(t *testing.T) {
+ testEncodeDecodeHistory(t, false)
+ testEncodeDecodeHistory(t, true)
+}
+
+func testEncodeDecodeHistory(t *testing.T, rawStorageKey bool) {
var (
m meta
dec history
- obj = makeHistory()
+ obj = makeHistory(rawStorageKey)
)
// check if meta data can be correctly encode/decode
blob := obj.meta.encode()
diff --git a/triedb/pathdb/iterator_test.go b/triedb/pathdb/iterator_test.go
index 48b5870b5b..05a166d1b6 100644
--- a/triedb/pathdb/iterator_test.go
+++ b/triedb/pathdb/iterator_test.go
@@ -131,7 +131,7 @@ func TestAccountIteratorBasics(t *testing.T) {
storage[hash] = accStorage
}
}
- states := newStates(accounts, storage)
+ states := newStates(accounts, storage, false)
it := newDiffAccountIterator(common.Hash{}, states, nil)
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
@@ -171,7 +171,7 @@ func TestStorageIteratorBasics(t *testing.T) {
storage[hash] = accStorage
nilStorage[hash] = nilstorage
}
- states := newStates(accounts, storage)
+ states := newStates(accounts, storage, false)
for account := range accounts {
it := newDiffStorageIterator(account, common.Hash{}, states, nil)
verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
@@ -267,13 +267,13 @@ func TestAccountIteratorTraversal(t *testing.T) {
// Stack three diff layers on top with various overlaps
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 0, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 0, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil, false))
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 0, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil, false))
// Verify the single and multi-layer iterators
head := db.tree.get(common.HexToHash("0x04"))
@@ -314,13 +314,13 @@ func TestStorageIteratorTraversal(t *testing.T) {
// Stack three diff layers on top with various overlaps
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 0, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 0, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil), nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil), nil, nil, false))
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 0, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil, false))
// Verify the single and multi-layer iterators
head := db.tree.get(common.HexToHash("0x04"))
@@ -395,14 +395,14 @@ func TestAccountIteratorTraversalValues(t *testing.T) {
}
}
// Assemble a stack of snapshots from the account layers
- db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(a, nil, nil, nil))
- db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(b, nil, nil, nil))
- db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(c, nil, nil, nil))
- db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(d, nil, nil, nil))
- db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(e, nil, nil, nil))
- db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(f, nil, nil, nil))
- db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(g, nil, nil, nil))
- db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(h, nil, nil, nil))
+ db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(a, nil, nil, nil, false))
+ db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(b, nil, nil, nil, false))
+ db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(c, nil, nil, nil, false))
+ db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(d, nil, nil, nil, false))
+ db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(e, nil, nil, nil, false))
+ db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(f, nil, nil, nil, false))
+ db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(g, nil, nil, nil, false))
+ db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(h, nil, nil, nil, false))
// binaryIterator
r, _ := db.StateReader(common.HexToHash("0x09"))
@@ -504,14 +504,14 @@ func TestStorageIteratorTraversalValues(t *testing.T) {
}
}
// Assemble a stack of snapshots from the account layers
- db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(a), nil, nil))
- db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(b), nil, nil))
- db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(c), nil, nil))
- db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(d), nil, nil))
- db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(e), nil, nil))
- db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(f), nil, nil))
- db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(g), nil, nil))
- db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(h), nil, nil))
+ db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(a), nil, nil, false))
+ db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(b), nil, nil, false))
+ db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(c), nil, nil, false))
+ db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(d), nil, nil, false))
+ db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(e), nil, nil, false))
+ db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(f), nil, nil, false))
+ db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(g), nil, nil, false))
+ db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(randomAccountSet("0xaa"), wrapStorage(h), nil, nil, false))
// binaryIterator
r, _ := db.StateReader(common.HexToHash("0x09"))
@@ -588,7 +588,7 @@ func TestAccountIteratorLargeTraversal(t *testing.T) {
parent = common.HexToHash(fmt.Sprintf("0x%02x", i))
}
db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil))
+ NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil, false))
}
// Iterate the entire stack and ensure everything is hit only once
head := db.tree.get(common.HexToHash("0x80"))
@@ -626,13 +626,13 @@ func TestAccountIteratorFlattening(t *testing.T) {
// Create a stack of diffs on top
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil, false))
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil, false))
// Create a binary iterator and flatten the data from underneath it
head := db.tree.get(common.HexToHash("0x04"))
@@ -658,13 +658,13 @@ func TestAccountIteratorSeek(t *testing.T) {
// db.WaitGeneration()
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil, false))
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil, false))
// Account set is now
// 02: aa, ee, f0, ff
@@ -731,13 +731,13 @@ func testStorageIteratorSeek(t *testing.T, newIterator func(db *Database, root,
// Stack three diff layers on top with various overlaps
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil), nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil), nil, nil, false))
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil), nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil), nil, nil, false))
// Account set is now
// 02: 01, 03, 05
@@ -803,16 +803,16 @@ func testAccountIteratorDeletions(t *testing.T, newIterator func(db *Database, r
// Stack three diff layers on top with various overlaps
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0x11", "0x22", "0x33"), nil, nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0x11", "0x22", "0x33"), nil, nil, nil, false))
deleted := common.HexToHash("0x22")
accounts := randomAccountSet("0x11", "0x33")
accounts[deleted] = nil
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(accounts, nil, nil, nil))
+ NewStateSetWithOrigin(accounts, nil, nil, nil, false))
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0x33", "0x44", "0x55"), nil, nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0x33", "0x44", "0x55"), nil, nil, nil, false))
// The output should be 11,33,44,55
it := newIterator(db, common.HexToHash("0x04"), common.Hash{})
@@ -843,10 +843,10 @@ func TestStorageIteratorDeletions(t *testing.T) {
// Stack three diff layers on top with various overlaps
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}), nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}), nil, nil, false))
// The output should be 02,04,05,06
it, _ := db.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
@@ -863,7 +863,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
common.HexToHash("0xaa"): nil,
}
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(accounts, randomStorageSet([]string{"0xaa"}, nil, [][]string{{"0x02", "0x04", "0x05", "0x06"}}), nil, nil))
+ NewStateSetWithOrigin(accounts, randomStorageSet([]string{"0xaa"}, nil, [][]string{{"0x02", "0x04", "0x05", "0x06"}}), nil, nil, false))
it, _ = db.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 0, it, verifyStorage)
@@ -871,7 +871,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
// Re-insert the slots of the same account
db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 4, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil), nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil), nil, nil, false))
// The output should be 07,08,09
it, _ = db.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
@@ -880,7 +880,7 @@ func TestStorageIteratorDeletions(t *testing.T) {
// Destruct the whole storage but re-create the account in the same layer
db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 5, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, [][]string{{"0x07", "0x08", "0x09"}}), nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, [][]string{{"0x07", "0x08", "0x09"}}), nil, nil, false))
it, _ = db.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
@@ -911,19 +911,19 @@ func testStaleIterator(t *testing.T, newIter func(db *Database, hash common.Hash
// [02 (disk), 03]
db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01"}}, nil), nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01"}}, nil), nil, nil, false))
db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02"}}, nil), nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02"}}, nil), nil, nil, false))
db.tree.cap(common.HexToHash("0x03"), 1)
// [02 (disk), 03, 04]
db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x03"}}, nil), nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x03"}}, nil), nil, nil, false))
iter := newIter(db, common.HexToHash("0x04"))
// [04 (disk), 05]
db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 3, trienode.NewMergedNodeSet(),
- NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04"}}, nil), nil, nil))
+ NewStateSetWithOrigin(randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04"}}, nil), nil, nil, false))
db.tree.cap(common.HexToHash("0x05"), 1)
// Iterator can't finish the traversal as the layer 02 has becoming stale.
@@ -969,7 +969,7 @@ func BenchmarkAccountIteratorTraversal(b *testing.B) {
if i == 1 {
parent = common.HexToHash(fmt.Sprintf("0x%02x", i))
}
- db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil))
+ db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(200), nil, nil, nil, false))
}
// We call this once before the benchmark, so the creation of
// sorted accountlists are not included in the results.
@@ -1059,9 +1059,9 @@ func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
db := New(rawdb.NewMemoryDatabase(), config, false)
// db.WaitGeneration()
- db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(2000), nil, nil, nil))
+ db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(2000), nil, nil, nil, false))
for i := 2; i <= 100; i++ {
- db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(20), nil, nil, nil))
+ db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(makeAccounts(20), nil, nil, nil, false))
}
// We call this once before the benchmark, so the creation of
// sorted accountlists are not included in the results.
diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go
index 267d675bc2..79a7a22e0b 100644
--- a/triedb/pathdb/journal.go
+++ b/triedb/pathdb/journal.go
@@ -45,7 +45,8 @@ var (
// - Version 0: initial version
// - Version 1: storage.Incomplete field is removed
// - Version 2: add post-modification state values
-const journalVersion uint64 = 2
+// - Version 3: a flag has been added to indicate whether the storage slot key is the raw key or a hash
+const journalVersion uint64 = 3
// loadJournal tries to parse the layer journal from the disk.
func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) {
diff --git a/triedb/pathdb/states.go b/triedb/pathdb/states.go
index 81d34da5df..969782e3c4 100644
--- a/triedb/pathdb/states.go
+++ b/triedb/pathdb/states.go
@@ -65,6 +65,8 @@ type stateSet struct {
accountListSorted []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
storageListSorted map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
+ rawStorageKey bool // indicates whether the storage set uses the raw slot key or the hash
+
// Lock for guarding the two lists above. These lists might be accessed
// concurrently and lock protection is essential to avoid concurrent
// slice or map read/write.
@@ -72,7 +74,7 @@ type stateSet struct {
}
// newStates constructs the state set with the provided account and storage data.
-func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *stateSet {
+func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, rawStorageKey bool) *stateSet {
// Don't panic for the lazy callers, initialize the nil maps instead.
if accounts == nil {
accounts = make(map[common.Hash][]byte)
@@ -83,6 +85,7 @@ func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[com
s := &stateSet{
accountData: accounts,
storageData: storages,
+ rawStorageKey: rawStorageKey,
storageListSorted: make(map[common.Hash][]common.Hash),
}
s.size = s.check()
@@ -330,6 +333,9 @@ func (s *stateSet) updateSize(delta int) {
// encode serializes the content of state set into the provided writer.
func (s *stateSet) encode(w io.Writer) error {
// Encode accounts
+ if err := rlp.Encode(w, s.rawStorageKey); err != nil {
+ return err
+ }
type accounts struct {
AddrHashes []common.Hash
Accounts [][]byte
@@ -367,6 +373,9 @@ func (s *stateSet) encode(w io.Writer) error {
// decode deserializes the content from the rlp stream into the state set.
func (s *stateSet) decode(r *rlp.Stream) error {
+ if err := r.Decode(&s.rawStorageKey); err != nil {
+ return fmt.Errorf("load diff raw storage key flag: %v", err)
+ }
type accounts struct {
AddrHashes []common.Hash
Accounts [][]byte
@@ -435,23 +444,23 @@ func (s *stateSet) dbsize() int {
type StateSetWithOrigin struct {
*stateSet
- // AccountOrigin represents the account data before the state transition,
+ // accountOrigin represents the account data before the state transition,
// corresponding to both the accountData and destructSet. It's keyed by the
// account address. The nil value means the account was not present before.
accountOrigin map[common.Address][]byte
- // StorageOrigin represents the storage data before the state transition,
+ // storageOrigin represents the storage data before the state transition,
// corresponding to storageData and deleted slots of destructSet. It's keyed
// by the account address and slot key hash. The nil value means the slot was
// not present.
storageOrigin map[common.Address]map[common.Hash][]byte
- // Memory size of the state data (accountOrigin and storageOrigin)
+ // memory size of the state data (accountOrigin and storageOrigin)
size uint64
}
// NewStateSetWithOrigin constructs the state set with the provided data.
-func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte) *StateSetWithOrigin {
+func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte, rawStorageKey bool) *StateSetWithOrigin {
// Don't panic for the lazy callers, initialize the nil maps instead.
if accountOrigin == nil {
accountOrigin = make(map[common.Address][]byte)
@@ -471,7 +480,7 @@ func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.
size += 2*common.HashLength + len(data)
}
}
- set := newStates(accounts, storages)
+ set := newStates(accounts, storages, rawStorageKey)
return &StateSetWithOrigin{
stateSet: set,
accountOrigin: accountOrigin,
diff --git a/triedb/pathdb/states_test.go b/triedb/pathdb/states_test.go
index f097e90e81..30eb6ad6c8 100644
--- a/triedb/pathdb/states_test.go
+++ b/triedb/pathdb/states_test.go
@@ -44,6 +44,7 @@ func TestStatesMerge(t *testing.T) {
common.Hash{0x1}: {0x10},
},
},
+ false,
)
b := newStates(
map[common.Hash][]byte{
@@ -64,6 +65,7 @@ func TestStatesMerge(t *testing.T) {
common.Hash{0x1}: nil, // delete slot
},
},
+ false,
)
a.merge(b)
@@ -132,6 +134,7 @@ func TestStatesRevert(t *testing.T) {
common.Hash{0x1}: {0x10},
},
},
+ false,
)
b := newStates(
map[common.Hash][]byte{
@@ -152,6 +155,7 @@ func TestStatesRevert(t *testing.T) {
common.Hash{0x1}: nil,
},
},
+ false,
)
a.merge(b)
a.revertTo(
@@ -224,12 +228,13 @@ func TestStatesRevert(t *testing.T) {
// before and was created during transition w, reverting w will retain an x=nil
// entry in the set.
func TestStateRevertAccountNullMarker(t *testing.T) {
- a := newStates(nil, nil) // empty initial state
+ a := newStates(nil, nil, false) // empty initial state
b := newStates(
map[common.Hash][]byte{
{0xa}: {0xa},
},
nil,
+ false,
)
a.merge(b) // create account 0xa
a.revertTo(
@@ -254,7 +259,7 @@ func TestStateRevertAccountNullMarker(t *testing.T) {
func TestStateRevertStorageNullMarker(t *testing.T) {
a := newStates(map[common.Hash][]byte{
{0xa}: {0xa},
- }, nil) // initial state with account 0xa
+ }, nil, false) // initial state with account 0xa
b := newStates(
nil,
@@ -263,6 +268,7 @@ func TestStateRevertStorageNullMarker(t *testing.T) {
common.Hash{0x1}: {0x1},
},
},
+ false,
)
a.merge(b) // create slot 0x1
a.revertTo(
@@ -284,6 +290,11 @@ func TestStateRevertStorageNullMarker(t *testing.T) {
}
func TestStatesEncode(t *testing.T) {
+ testStatesEncode(t, false)
+ testStatesEncode(t, true)
+}
+
+func testStatesEncode(t *testing.T, rawStorageKey bool) {
s := newStates(
map[common.Hash][]byte{
{0x1}: {0x1},
@@ -293,6 +304,7 @@ func TestStatesEncode(t *testing.T) {
common.Hash{0x1}: {0x1},
},
},
+ rawStorageKey,
)
buf := bytes.NewBuffer(nil)
if err := s.encode(buf); err != nil {
@@ -308,9 +320,17 @@ func TestStatesEncode(t *testing.T) {
if !reflect.DeepEqual(s.storageData, dec.storageData) {
t.Fatal("Unexpected storage data")
}
+ if s.rawStorageKey != dec.rawStorageKey {
+ t.Fatal("Unexpected rawStorageKey flag")
+ }
}
func TestStateWithOriginEncode(t *testing.T) {
+ testStateWithOriginEncode(t, false)
+ testStateWithOriginEncode(t, true)
+}
+
+func testStateWithOriginEncode(t *testing.T, rawStorageKey bool) {
s := NewStateSetWithOrigin(
map[common.Hash][]byte{
{0x1}: {0x1},
@@ -328,6 +348,7 @@ func TestStateWithOriginEncode(t *testing.T) {
common.Hash{0x1}: {0x1},
},
},
+ rawStorageKey,
)
buf := bytes.NewBuffer(nil)
if err := s.encode(buf); err != nil {
@@ -349,6 +370,9 @@ func TestStateWithOriginEncode(t *testing.T) {
if !reflect.DeepEqual(s.storageOrigin, dec.storageOrigin) {
t.Fatal("Unexpected storage origin data")
}
+ if s.rawStorageKey != dec.rawStorageKey {
+ t.Fatal("Unexpected rawStorageKey flag")
+ }
}
func TestStateSizeTracking(t *testing.T) {
@@ -375,6 +399,7 @@ func TestStateSizeTracking(t *testing.T) {
common.Hash{0x1}: {0x10}, // 2*common.HashLength+1
},
},
+ false,
)
if a.size != uint64(expSizeA) {
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeA, a.size)
@@ -406,6 +431,7 @@ func TestStateSizeTracking(t *testing.T) {
common.Hash{0x3}: nil, // 2*common.HashLength, slot deletion
},
},
+ false,
)
if b.size != uint64(expSizeB) {
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeB, b.size)
diff --git a/triedb/states.go b/triedb/states.go
index fa432e0704..9fabdb088d 100644
--- a/triedb/states.go
+++ b/triedb/states.go
@@ -27,6 +27,7 @@ type StateSet struct {
AccountsOrigin map[common.Address][]byte // Original values of mutated accounts in 'slim RLP' encoding
Storages map[common.Hash]map[common.Hash][]byte // Mutated storage slots in 'prefix-zero-trimmed' RLP format
StoragesOrigin map[common.Address]map[common.Hash][]byte // Original values of mutated storage slots in 'prefix-zero-trimmed' RLP format
+ RawStorageKey bool // Flag whether the storage set uses the raw slot key or the hash
}
// NewStateSet initializes an empty state set.
@@ -45,5 +46,5 @@ func (set *StateSet) internal() *pathdb.StateSetWithOrigin {
if set == nil {
return nil
}
- return pathdb.NewStateSetWithOrigin(set.Accounts, set.Storages, set.AccountsOrigin, set.StoragesOrigin)
+ return pathdb.NewStateSetWithOrigin(set.Accounts, set.Storages, set.AccountsOrigin, set.StoragesOrigin, set.RawStorageKey)
}