Compare commits
9 Commits
88cd3c5133
...
8d947a8732
Author | SHA1 | Date |
---|---|---|
rjl493456442 | 8d947a8732 | |
Gary Rong | 09e833b07d | |
Gary Rong | bac4b4b1be | |
Gary Rong | c0ee47e3c5 | |
Gary Rong | 715e1e1fe8 | |
Gary Rong | 1c84b6ccaf | |
Gary Rong | 0a8af7177a | |
Felix Lange | db8eed860d | |
Ng Wei Han | 2406305175 |
|
@ -265,15 +265,7 @@ func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.H
|
|||
|
||||
var requestsHash *common.Hash
|
||||
if requests != nil {
|
||||
// Put back request type byte.
|
||||
typedRequests := make([][]byte, len(requests))
|
||||
for i, reqdata := range requests {
|
||||
typedReqdata := make([]byte, len(reqdata)+1)
|
||||
typedReqdata[0] = byte(i)
|
||||
copy(typedReqdata[1:], reqdata)
|
||||
typedRequests[i] = typedReqdata
|
||||
}
|
||||
h := types.CalcRequestsHash(typedRequests)
|
||||
h := types.CalcRequestsHash(requests)
|
||||
requestsHash = &h
|
||||
}
|
||||
|
||||
|
@ -343,20 +335,11 @@ func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.
|
|||
}
|
||||
}
|
||||
|
||||
// Remove type byte in requests.
|
||||
var plainRequests [][]byte
|
||||
if requests != nil {
|
||||
plainRequests = make([][]byte, len(requests))
|
||||
for i, reqdata := range requests {
|
||||
plainRequests[i] = reqdata[1:]
|
||||
}
|
||||
}
|
||||
|
||||
return &ExecutionPayloadEnvelope{
|
||||
ExecutionPayload: data,
|
||||
BlockValue: fees,
|
||||
BlobsBundle: &bundle,
|
||||
Requests: plainRequests,
|
||||
Requests: requests,
|
||||
Override: false,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -366,21 +366,19 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||
// Gather the execution-layer triggered requests.
|
||||
var requests [][]byte
|
||||
if chainConfig.IsPrague(vmContext.BlockNumber, vmContext.Time) {
|
||||
// EIP-6110 deposits
|
||||
requests = [][]byte{}
|
||||
// EIP-6110
|
||||
var allLogs []*types.Log
|
||||
for _, receipt := range receipts {
|
||||
allLogs = append(allLogs, receipt.Logs...)
|
||||
}
|
||||
depositRequests, err := core.ParseDepositLogs(allLogs, chainConfig)
|
||||
if err != nil {
|
||||
if err := core.ParseDepositLogs(&requests, allLogs, chainConfig); err != nil {
|
||||
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not parse requests logs: %v", err))
|
||||
}
|
||||
requests = append(requests, depositRequests)
|
||||
|
||||
// EIP-7002 withdrawals
|
||||
requests = append(requests, core.ProcessWithdrawalQueue(evm))
|
||||
// EIP-7251 consolidations
|
||||
requests = append(requests, core.ProcessConsolidationQueue(evm))
|
||||
// EIP-7002
|
||||
core.ProcessWithdrawalQueue(&requests, evm)
|
||||
// EIP-7251
|
||||
core.ProcessConsolidationQueue(&requests, evm)
|
||||
}
|
||||
|
||||
// Commit block
|
||||
|
|
|
@ -349,25 +349,22 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
|
|||
|
||||
var requests [][]byte
|
||||
if config.IsPrague(b.header.Number, b.header.Time) {
|
||||
requests = [][]byte{}
|
||||
// EIP-6110 deposits
|
||||
var blockLogs []*types.Log
|
||||
for _, r := range b.receipts {
|
||||
blockLogs = append(blockLogs, r.Logs...)
|
||||
}
|
||||
depositRequests, err := ParseDepositLogs(blockLogs, config)
|
||||
if err != nil {
|
||||
if err := ParseDepositLogs(&requests, blockLogs, config); err != nil {
|
||||
panic(fmt.Sprintf("failed to parse deposit log: %v", err))
|
||||
}
|
||||
requests = append(requests, depositRequests)
|
||||
// create EVM for system calls
|
||||
blockContext := NewEVMBlockContext(b.header, cm, &b.header.Coinbase)
|
||||
evm := vm.NewEVM(blockContext, statedb, cm.config, vm.Config{})
|
||||
// EIP-7002 withdrawals
|
||||
withdrawalRequests := ProcessWithdrawalQueue(evm)
|
||||
requests = append(requests, withdrawalRequests)
|
||||
// EIP-7251 consolidations
|
||||
consolidationRequests := ProcessConsolidationQueue(evm)
|
||||
requests = append(requests, consolidationRequests)
|
||||
// EIP-7002
|
||||
ProcessWithdrawalQueue(&requests, evm)
|
||||
// EIP-7251
|
||||
ProcessConsolidationQueue(&requests, evm)
|
||||
}
|
||||
if requests != nil {
|
||||
reqHash := types.CalcRequestsHash(requests)
|
||||
|
|
|
@ -472,9 +472,7 @@ func (g *Genesis) toBlockWithRoot(root common.Hash) *types.Block {
|
|||
}
|
||||
}
|
||||
if conf.IsPrague(num, g.Timestamp) {
|
||||
emptyRequests := [][]byte{{0x00}, {0x01}, {0x02}}
|
||||
rhash := types.CalcRequestsHash(emptyRequests)
|
||||
head.RequestsHash = &rhash
|
||||
head.RequestsHash = &types.EmptyRequestsHash
|
||||
}
|
||||
}
|
||||
return types.NewBlock(head, &types.Body{Withdrawals: withdrawals}, nil, trie.NewStackTrie(nil))
|
||||
|
|
|
@ -106,18 +106,15 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
|||
// Read requests if Prague is enabled.
|
||||
var requests [][]byte
|
||||
if p.config.IsPrague(block.Number(), block.Time()) {
|
||||
// EIP-6110 deposits
|
||||
depositRequests, err := ParseDepositLogs(allLogs, p.config)
|
||||
if err != nil {
|
||||
requests = [][]byte{}
|
||||
// EIP-6110
|
||||
if err := ParseDepositLogs(&requests, allLogs, p.config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requests = append(requests, depositRequests)
|
||||
// EIP-7002 withdrawals
|
||||
withdrawalRequests := ProcessWithdrawalQueue(evm)
|
||||
requests = append(requests, withdrawalRequests)
|
||||
// EIP-7251 consolidations
|
||||
consolidationRequests := ProcessConsolidationQueue(evm)
|
||||
requests = append(requests, consolidationRequests)
|
||||
// EIP-7002
|
||||
ProcessWithdrawalQueue(&requests, evm)
|
||||
// EIP-7251
|
||||
ProcessConsolidationQueue(&requests, evm)
|
||||
}
|
||||
|
||||
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
|
||||
|
@ -271,17 +268,17 @@ func ProcessParentBlockHash(prevHash common.Hash, evm *vm.EVM) {
|
|||
|
||||
// ProcessWithdrawalQueue calls the EIP-7002 withdrawal queue contract.
|
||||
// It returns the opaque request data returned by the contract.
|
||||
func ProcessWithdrawalQueue(evm *vm.EVM) []byte {
|
||||
return processRequestsSystemCall(evm, 0x01, params.WithdrawalQueueAddress)
|
||||
func ProcessWithdrawalQueue(requests *[][]byte, evm *vm.EVM) {
|
||||
processRequestsSystemCall(requests, evm, 0x01, params.WithdrawalQueueAddress)
|
||||
}
|
||||
|
||||
// ProcessConsolidationQueue calls the EIP-7251 consolidation queue contract.
|
||||
// It returns the opaque request data returned by the contract.
|
||||
func ProcessConsolidationQueue(evm *vm.EVM) []byte {
|
||||
return processRequestsSystemCall(evm, 0x02, params.ConsolidationQueueAddress)
|
||||
func ProcessConsolidationQueue(requests *[][]byte, evm *vm.EVM) {
|
||||
processRequestsSystemCall(requests, evm, 0x02, params.ConsolidationQueueAddress)
|
||||
}
|
||||
|
||||
func processRequestsSystemCall(evm *vm.EVM, requestType byte, addr common.Address) []byte {
|
||||
func processRequestsSystemCall(requests *[][]byte, evm *vm.EVM, requestType byte, addr common.Address) {
|
||||
if tracer := evm.Config.Tracer; tracer != nil {
|
||||
if tracer.OnSystemCallStart != nil {
|
||||
tracer.OnSystemCallStart()
|
||||
|
@ -302,26 +299,32 @@ func processRequestsSystemCall(evm *vm.EVM, requestType byte, addr common.Addres
|
|||
evm.StateDB.AddAddressToAccessList(addr)
|
||||
ret, _, _ := evm.Call(vm.AccountRef(msg.From), *msg.To, msg.Data, 30_000_000, common.U2560)
|
||||
evm.StateDB.Finalise(true)
|
||||
if len(ret) == 0 {
|
||||
return // skip empty output
|
||||
}
|
||||
|
||||
// Create withdrawals requestsData with prefix 0x01
|
||||
// Append prefixed requestsData to the requests list.
|
||||
requestsData := make([]byte, len(ret)+1)
|
||||
requestsData[0] = requestType
|
||||
copy(requestsData[1:], ret)
|
||||
return requestsData
|
||||
*requests = append(*requests, requestsData)
|
||||
}
|
||||
|
||||
// ParseDepositLogs extracts the EIP-6110 deposit values from logs emitted by
|
||||
// BeaconDepositContract.
|
||||
func ParseDepositLogs(logs []*types.Log, config *params.ChainConfig) ([]byte, error) {
|
||||
func ParseDepositLogs(requests *[][]byte, logs []*types.Log, config *params.ChainConfig) error {
|
||||
deposits := make([]byte, 1) // note: first byte is 0x00 (== deposit request type)
|
||||
for _, log := range logs {
|
||||
if log.Address == config.DepositContractAddress {
|
||||
request, err := types.DepositLogToRequest(log.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse deposit data: %v", err)
|
||||
return fmt.Errorf("unable to parse deposit data: %v", err)
|
||||
}
|
||||
deposits = append(deposits, request...)
|
||||
}
|
||||
}
|
||||
return deposits, nil
|
||||
if len(deposits) > 1 {
|
||||
*requests = append(*requests, deposits)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -41,6 +41,9 @@ var (
|
|||
// EmptyWithdrawalsHash is the known hash of the empty withdrawal set.
|
||||
EmptyWithdrawalsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||
|
||||
// EmptyRequestsHash is the known hash of an empty request set, sha256("").
|
||||
EmptyRequestsHash = common.HexToHash("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
|
||||
|
||||
// EmptyVerkleHash is the known hash of an empty verkle trie.
|
||||
EmptyVerkleHash = common.Hash{}
|
||||
)
|
||||
|
|
|
@ -86,7 +86,7 @@ func TestSupplyOmittedFields(t *testing.T) {
|
|||
|
||||
expected := supplyInfo{
|
||||
Number: 0,
|
||||
Hash: common.HexToHash("0xc02ee8ee5b54a40e43f0fa827d431e1bd4f217e941790dda10b2521d1925a20b"),
|
||||
Hash: common.HexToHash("0x3055fc27d6b4a08eb07033a0d1ee755a4b2988086f28a6189eac1b507525eeb1"),
|
||||
ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
}
|
||||
actual := out[expected.Number]
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
{
|
||||
"blobGasPrice": "0x1",
|
||||
"blobGasUsed": "0x20000",
|
||||
"blockHash": "0x11e6318d77a45c01f89f76b56d36c6936c5250f4e2bd238cb7b09df73cf0cb7d",
|
||||
"blockHash": "0x17124e31fb075a301b1d7d4135683b0a09fe4e6d453c54e2e734d5ee00744a49",
|
||||
"blockNumber": "0x6",
|
||||
"contractAddress": null,
|
||||
"cumulativeGasUsed": "0x5208",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[
|
||||
{
|
||||
"blockHash": "0x5526cd89bc188f20fd5e9bb50d8054dc5a51a81a74ed07eacf36a4a8b10de4b1",
|
||||
"blockHash": "0xb3e447c77374fd285964cba692e96b1673a88a959726826b5b6e2dca15472b0a",
|
||||
"blockNumber": "0x2",
|
||||
"contractAddress": "0xae9bea628c4ce503dcfd7e305cab4e29e7476592",
|
||||
"cumulativeGasUsed": "0xcf50",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[
|
||||
{
|
||||
"blockHash": "0x3e946aa9e252873af511b257d9d89a1bcafa54ce7c6a6442f8407ecdf81e288d",
|
||||
"blockHash": "0x102e50de30318ee99a03a09db74387e79cad3165bf6840cc84249806a2a302f3",
|
||||
"blockNumber": "0x4",
|
||||
"contractAddress": null,
|
||||
"cumulativeGasUsed": "0x538d",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[
|
||||
{
|
||||
"blockHash": "0xc281d4299fc4e8ce5bba7ecb8deb50f5403d604c806b36aa887dfe2ff84c064f",
|
||||
"blockHash": "0xcc6225bf39327429a3d869af71182d619a354155187d0b5a8ecd6a9309cffcaa",
|
||||
"blockNumber": "0x3",
|
||||
"contractAddress": null,
|
||||
"cumulativeGasUsed": "0x5e28",
|
||||
|
@ -19,7 +19,7 @@
|
|||
"blockNumber": "0x3",
|
||||
"transactionHash": "0xeaf3921cbf03ba45bad4e6ab807b196ce3b2a0b5bacc355b6272fa96b11b4287",
|
||||
"transactionIndex": "0x0",
|
||||
"blockHash": "0xc281d4299fc4e8ce5bba7ecb8deb50f5403d604c806b36aa887dfe2ff84c064f",
|
||||
"blockHash": "0xcc6225bf39327429a3d869af71182d619a354155187d0b5a8ecd6a9309cffcaa",
|
||||
"logIndex": "0x0",
|
||||
"removed": false
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[
|
||||
{
|
||||
"blockHash": "0xda50d57d8802553b00bb8e4d777bd5c4114086941119ca04edb15429f4818ed9",
|
||||
"blockHash": "0xe9bd1d8c303b1af5c704b9d78e62c54a34af47e0db04ac1389a5ef74a619b9da",
|
||||
"blockNumber": "0x1",
|
||||
"contractAddress": null,
|
||||
"cumulativeGasUsed": "0x5208",
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
{
|
||||
"blobGasPrice": "0x1",
|
||||
"blobGasUsed": "0x20000",
|
||||
"blockHash": "0x11e6318d77a45c01f89f76b56d36c6936c5250f4e2bd238cb7b09df73cf0cb7d",
|
||||
"blockHash": "0x17124e31fb075a301b1d7d4135683b0a09fe4e6d453c54e2e734d5ee00744a49",
|
||||
"blockNumber": "0x6",
|
||||
"contractAddress": null,
|
||||
"cumulativeGasUsed": "0x5208",
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"blobGasPrice": "0x1",
|
||||
"blobGasUsed": "0x20000",
|
||||
"blockHash": "0x11e6318d77a45c01f89f76b56d36c6936c5250f4e2bd238cb7b09df73cf0cb7d",
|
||||
"blockHash": "0x17124e31fb075a301b1d7d4135683b0a09fe4e6d453c54e2e734d5ee00744a49",
|
||||
"blockNumber": "0x6",
|
||||
"contractAddress": null,
|
||||
"cumulativeGasUsed": "0x5208",
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"blockHash": "0x5526cd89bc188f20fd5e9bb50d8054dc5a51a81a74ed07eacf36a4a8b10de4b1",
|
||||
"blockHash": "0xb3e447c77374fd285964cba692e96b1673a88a959726826b5b6e2dca15472b0a",
|
||||
"blockNumber": "0x2",
|
||||
"contractAddress": "0xae9bea628c4ce503dcfd7e305cab4e29e7476592",
|
||||
"cumulativeGasUsed": "0xcf50",
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"blockHash": "0xa04ad6be58c45fe483991b89416572bc50426b0de44b769757e95c704250f874",
|
||||
"blockHash": "0x53bffe54375c0a31fe7bc0db7455db7d48278234c2400efa4d40d1c57cbe868d",
|
||||
"blockNumber": "0x5",
|
||||
"contractAddress": "0xfdaa97661a584d977b4d3abb5370766ff5b86a18",
|
||||
"cumulativeGasUsed": "0xe01c",
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"blockHash": "0x3e946aa9e252873af511b257d9d89a1bcafa54ce7c6a6442f8407ecdf81e288d",
|
||||
"blockHash": "0x102e50de30318ee99a03a09db74387e79cad3165bf6840cc84249806a2a302f3",
|
||||
"blockNumber": "0x4",
|
||||
"contractAddress": null,
|
||||
"cumulativeGasUsed": "0x538d",
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"blockHash": "0xda50d57d8802553b00bb8e4d777bd5c4114086941119ca04edb15429f4818ed9",
|
||||
"blockHash": "0xe9bd1d8c303b1af5c704b9d78e62c54a34af47e0db04ac1389a5ef74a619b9da",
|
||||
"blockNumber": "0x1",
|
||||
"contractAddress": null,
|
||||
"cumulativeGasUsed": "0x5208",
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"blockHash": "0xc281d4299fc4e8ce5bba7ecb8deb50f5403d604c806b36aa887dfe2ff84c064f",
|
||||
"blockHash": "0xcc6225bf39327429a3d869af71182d619a354155187d0b5a8ecd6a9309cffcaa",
|
||||
"blockNumber": "0x3",
|
||||
"contractAddress": null,
|
||||
"cumulativeGasUsed": "0x5e28",
|
||||
|
@ -18,7 +18,7 @@
|
|||
"blockNumber": "0x3",
|
||||
"transactionHash": "0xeaf3921cbf03ba45bad4e6ab807b196ce3b2a0b5bacc355b6272fa96b11b4287",
|
||||
"transactionIndex": "0x0",
|
||||
"blockHash": "0xc281d4299fc4e8ce5bba7ecb8deb50f5403d604c806b36aa887dfe2ff84c064f",
|
||||
"blockHash": "0xcc6225bf39327429a3d869af71182d619a354155187d0b5a8ecd6a9309cffcaa",
|
||||
"logIndex": "0x0",
|
||||
"removed": false
|
||||
}
|
||||
|
|
|
@ -121,18 +121,15 @@ func (miner *Miner) generateWork(params *generateParams, witness bool) *newPaylo
|
|||
// Collect consensus-layer requests if Prague is enabled.
|
||||
var requests [][]byte
|
||||
if miner.chainConfig.IsPrague(work.header.Number, work.header.Time) {
|
||||
requests = [][]byte{}
|
||||
// EIP-6110 deposits
|
||||
depositRequests, err := core.ParseDepositLogs(allLogs, miner.chainConfig)
|
||||
if err != nil {
|
||||
if err := core.ParseDepositLogs(&requests, allLogs, miner.chainConfig); err != nil {
|
||||
return &newPayloadResult{err: err}
|
||||
}
|
||||
requests = append(requests, depositRequests)
|
||||
// EIP-7002 withdrawals
|
||||
withdrawalRequests := core.ProcessWithdrawalQueue(work.evm)
|
||||
requests = append(requests, withdrawalRequests)
|
||||
// EIP-7002
|
||||
core.ProcessWithdrawalQueue(&requests, work.evm)
|
||||
// EIP-7251 consolidations
|
||||
consolidationRequests := core.ProcessConsolidationQueue(work.evm)
|
||||
requests = append(requests, consolidationRequests)
|
||||
core.ProcessConsolidationQueue(&requests, work.evm)
|
||||
}
|
||||
if requests != nil {
|
||||
reqHash := types.CalcRequestsHash(requests)
|
||||
|
|
|
@ -486,13 +486,11 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, keys [][]byte, valu
|
|||
return false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values))
|
||||
}
|
||||
// Ensure the received batch is monotonic increasing and contains no deletions
|
||||
for i := 0; i < len(keys)-1; i++ {
|
||||
if bytes.Compare(keys[i], keys[i+1]) >= 0 {
|
||||
for i := 0; i < len(keys); i++ {
|
||||
if i < len(keys)-1 && bytes.Compare(keys[i], keys[i+1]) >= 0 {
|
||||
return false, errors.New("range is not monotonically increasing")
|
||||
}
|
||||
}
|
||||
for _, value := range values {
|
||||
if len(value) == 0 {
|
||||
if len(values[i]) == 0 {
|
||||
return false, errors.New("range contains deletion")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,6 +60,10 @@ type backend interface {
|
|||
// An error will be returned if the specified state is not available.
|
||||
NodeReader(root common.Hash) (database.NodeReader, error)
|
||||
|
||||
// StateReader returns a reader for accessing flat states within the specified
|
||||
// state. An error will be returned if the specified state is not available.
|
||||
StateReader(root common.Hash) (database.StateReader, error)
|
||||
|
||||
// Initialized returns an indicator if the state data is already initialized
|
||||
// according to the state scheme.
|
||||
Initialized(genesisRoot common.Hash) bool
|
||||
|
@ -122,6 +126,13 @@ func (db *Database) NodeReader(blockRoot common.Hash) (database.NodeReader, erro
|
|||
return db.backend.NodeReader(blockRoot)
|
||||
}
|
||||
|
||||
// StateReader returns a reader that allows access to the state data associated
|
||||
// with the specified state. An error will be returned if the specified state is
|
||||
// not available.
|
||||
func (db *Database) StateReader(blockRoot common.Hash) (database.StateReader, error) {
|
||||
return db.backend.StateReader(blockRoot)
|
||||
}
|
||||
|
||||
// Update performs a state transition by committing dirty nodes contained in the
|
||||
// given set in order to update state from the specified parent to the specified
|
||||
// root. The held pre-images accumulated up to this point will be flushed in case
|
||||
|
|
|
@ -635,3 +635,9 @@ func (reader *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]
|
|||
blob, _ := reader.db.node(hash)
|
||||
return blob, nil
|
||||
}
|
||||
|
||||
// StateReader returns a reader that allows access to the state data associated
|
||||
// with the specified state.
|
||||
func (db *Database) StateReader(root common.Hash) (database.StateReader, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
|
|
@ -33,40 +33,56 @@ import (
|
|||
// must be checked before diving into disk (since it basically is not yet written
|
||||
// data).
|
||||
type buffer struct {
|
||||
layers uint64 // The number of diff layers aggregated inside
|
||||
limit uint64 // The maximum memory allowance in bytes
|
||||
nodes *nodeSet // Aggregated trie node set
|
||||
layers uint64 // The number of diff layers aggregated inside
|
||||
limit uint64 // The maximum memory allowance in bytes
|
||||
nodes *nodeSet // Aggregated trie node set
|
||||
states *stateSet // Aggregated state set
|
||||
}
|
||||
|
||||
// newBuffer initializes the buffer with the provided states and trie nodes.
|
||||
func newBuffer(limit int, nodes *nodeSet, layers uint64) *buffer {
|
||||
func newBuffer(limit int, nodes *nodeSet, states *stateSet, layers uint64) *buffer {
|
||||
// Don't panic for lazy users if any provided set is nil
|
||||
if nodes == nil {
|
||||
nodes = newNodeSet(nil)
|
||||
}
|
||||
if states == nil {
|
||||
states = newStates(nil, nil)
|
||||
}
|
||||
return &buffer{
|
||||
layers: layers,
|
||||
limit: uint64(limit),
|
||||
nodes: nodes,
|
||||
states: states,
|
||||
}
|
||||
}
|
||||
|
||||
// account retrieves the account blob with account address hash.
|
||||
func (b *buffer) account(hash common.Hash) ([]byte, bool) {
|
||||
return b.states.account(hash)
|
||||
}
|
||||
|
||||
// storage retrieves the storage slot with account address hash and slot key.
|
||||
func (b *buffer) storage(addrHash common.Hash, storageHash common.Hash) ([]byte, bool) {
|
||||
return b.states.storage(addrHash, storageHash)
|
||||
}
|
||||
|
||||
// node retrieves the trie node with node path and its trie identifier.
|
||||
func (b *buffer) node(owner common.Hash, path []byte) (*trienode.Node, bool) {
|
||||
return b.nodes.node(owner, path)
|
||||
}
|
||||
|
||||
// commit merges the provided states and trie nodes into the buffer.
|
||||
func (b *buffer) commit(nodes *nodeSet) *buffer {
|
||||
func (b *buffer) commit(nodes *nodeSet, states *stateSet) *buffer {
|
||||
b.layers++
|
||||
b.nodes.merge(nodes)
|
||||
b.states.merge(states)
|
||||
return b
|
||||
}
|
||||
|
||||
// revert is the reverse operation of commit. It also merges the provided states
|
||||
// revertTo is the reverse operation of commit. It also merges the provided states
|
||||
// and trie nodes into the buffer. The key difference is that the provided state
|
||||
// set should reverse the changes made by the most recent state transition.
|
||||
func (b *buffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error {
|
||||
func (b *buffer) revertTo(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node, accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) error {
|
||||
// Short circuit if no embedded state transition to revert
|
||||
if b.layers == 0 {
|
||||
return errStateUnrecoverable
|
||||
|
@ -78,7 +94,8 @@ func (b *buffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[strin
|
|||
b.reset()
|
||||
return nil
|
||||
}
|
||||
b.nodes.revert(db, nodes)
|
||||
b.nodes.revertTo(db, nodes)
|
||||
b.states.revertTo(accounts, storages)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -86,6 +103,7 @@ func (b *buffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[strin
|
|||
func (b *buffer) reset() {
|
||||
b.layers = 0
|
||||
b.nodes.reset()
|
||||
b.states.reset()
|
||||
}
|
||||
|
||||
// empty returns an indicator if buffer is empty.
|
||||
|
@ -101,7 +119,7 @@ func (b *buffer) full() bool {
|
|||
|
||||
// size returns the approximate memory size of the held content.
|
||||
func (b *buffer) size() uint64 {
|
||||
return b.nodes.size
|
||||
return b.states.size + b.nodes.size
|
||||
}
|
||||
|
||||
// flush persists the in-memory dirty trie node into the disk if the configured
|
||||
|
|
|
@ -68,6 +68,24 @@ type layer interface {
|
|||
// - no error will be returned if the requested node is not found in database.
|
||||
node(owner common.Hash, path []byte, depth int) ([]byte, common.Hash, *nodeLoc, error)
|
||||
|
||||
// account directly retrieves the account RLP associated with a particular
|
||||
// hash in the slim data format. An error will be returned if the read
|
||||
// operation exits abnormally. Specifically, if the layer is already stale.
|
||||
//
|
||||
// Note:
|
||||
// - the returned account is not a copy, please don't modify it.
|
||||
// - no error will be returned if the requested account is not found in database.
|
||||
account(hash common.Hash, depth int) ([]byte, error)
|
||||
|
||||
// storage directly retrieves the storage data associated with a particular hash,
|
||||
// within a particular account. An error will be returned if the read operation
|
||||
// exits abnormally. Specifically, if the layer is already stale.
|
||||
//
|
||||
// Note:
|
||||
// - the returned storage data is not a copy, please don't modify it.
|
||||
// - no error will be returned if the requested slot is not found in database.
|
||||
storage(accountHash, storageHash common.Hash, depth int) ([]byte, error)
|
||||
|
||||
// rootHash returns the root hash for which this layer was made.
|
||||
rootHash() common.Hash
|
||||
|
||||
|
@ -130,17 +148,18 @@ var Defaults = &Config{
|
|||
// ReadOnly is the config in order to open database in read only mode.
|
||||
var ReadOnly = &Config{ReadOnly: true}
|
||||
|
||||
// Database is a multiple-layered structure for maintaining in-memory trie nodes.
|
||||
// It consists of one persistent base layer backed by a key-value store, on top
|
||||
// of which arbitrarily many in-memory diff layers are stacked. The memory diffs
|
||||
// can form a tree with branching, but the disk layer is singleton and common to
|
||||
// all. If a reorg goes deeper than the disk layer, a batch of reverse diffs can
|
||||
// be applied to rollback. The deepest reorg that can be handled depends on the
|
||||
// amount of state histories tracked in the disk.
|
||||
// Database is a multiple-layered structure for maintaining in-memory states
|
||||
// along with its dirty trie nodes. It consists of one persistent base layer
|
||||
// backed by a key-value store, on top of which arbitrarily many in-memory diff
|
||||
// layers are stacked. The memory diffs can form a tree with branching, but the
|
||||
// disk layer is singleton and common to all. If a reorg goes deeper than the
|
||||
// disk layer, a batch of reverse diffs can be applied to rollback. The deepest
|
||||
// reorg that can be handled depends on the amount of state histories tracked
|
||||
// in the disk.
|
||||
//
|
||||
// At most one readable and writable database can be opened at the same time in
|
||||
// the whole system which ensures that only one database writer can operate disk
|
||||
// state. Unexpected open operations can cause the system to panic.
|
||||
// the whole system which ensures that only one database writer can operate the
|
||||
// persistent state. Unexpected open operations can cause the system to panic.
|
||||
type Database struct {
|
||||
// readOnly is the flag whether the mutation is allowed to be applied.
|
||||
// It will be set automatically when the database is journaled during
|
||||
|
@ -358,7 +377,7 @@ func (db *Database) Enable(root common.Hash) error {
|
|||
}
|
||||
// Re-construct a new disk layer backed by persistent state
|
||||
// with **empty clean cache and node buffer**.
|
||||
db.tree.reset(newDiskLayer(root, 0, db, nil, newBuffer(db.config.WriteBufferSize, nil, 0)))
|
||||
db.tree.reset(newDiskLayer(root, 0, db, nil, newBuffer(db.config.WriteBufferSize, nil, nil, 0)))
|
||||
|
||||
// Re-enable the database as the final step.
|
||||
db.waitSync = false
|
||||
|
|
|
@ -309,7 +309,7 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
|
|||
delete(t.storages, addrHash)
|
||||
}
|
||||
}
|
||||
return root, ctx.nodes, NewStateSetWithOrigin(ctx.accountOrigin, ctx.storageOrigin)
|
||||
return root, ctx.nodes, NewStateSetWithOrigin(ctx.accounts, ctx.storages, ctx.accountOrigin, ctx.storageOrigin)
|
||||
}
|
||||
|
||||
// lastHash returns the latest root hash, or empty if nothing is cached.
|
||||
|
|
|
@ -52,6 +52,7 @@ func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes
|
|||
states: states,
|
||||
}
|
||||
dirtyNodeWriteMeter.Mark(int64(nodes.size))
|
||||
dirtyStateWriteMeter.Mark(int64(states.size))
|
||||
log.Debug("Created new diff layer", "id", id, "block", block, "nodesize", common.StorageSize(nodes.size), "statesize", common.StorageSize(states.size))
|
||||
return dl
|
||||
}
|
||||
|
@ -96,6 +97,58 @@ func (dl *diffLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co
|
|||
return dl.parent.node(owner, path, depth+1)
|
||||
}
|
||||
|
||||
// account directly retrieves the account RLP associated with a particular
|
||||
// hash in the slim data format.
|
||||
//
|
||||
// Note the returned account is not a copy, please don't modify it.
|
||||
func (dl *diffLayer) account(hash common.Hash, depth int) ([]byte, error) {
|
||||
// Hold the lock, ensure the parent won't be changed during the
|
||||
// state accessing.
|
||||
dl.lock.RLock()
|
||||
defer dl.lock.RUnlock()
|
||||
|
||||
if blob, found := dl.states.account(hash); found {
|
||||
dirtyStateHitMeter.Mark(1)
|
||||
dirtyStateHitDepthHist.Update(int64(depth))
|
||||
dirtyStateReadMeter.Mark(int64(len(blob)))
|
||||
|
||||
if len(blob) == 0 {
|
||||
stateAccountInexMeter.Mark(1)
|
||||
} else {
|
||||
stateAccountExistMeter.Mark(1)
|
||||
}
|
||||
return blob, nil
|
||||
}
|
||||
// Account is unknown to this layer, resolve from parent
|
||||
return dl.parent.account(hash, depth+1)
|
||||
}
|
||||
|
||||
// storage directly retrieves the storage data associated with a particular hash,
|
||||
// within a particular account.
|
||||
//
|
||||
// Note the returned storage slot is not a copy, please don't modify it.
|
||||
func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([]byte, error) {
|
||||
// Hold the lock, ensure the parent won't be changed during the
|
||||
// state accessing.
|
||||
dl.lock.RLock()
|
||||
defer dl.lock.RUnlock()
|
||||
|
||||
if blob, found := dl.states.storage(accountHash, storageHash); found {
|
||||
dirtyStateHitMeter.Mark(1)
|
||||
dirtyStateHitDepthHist.Update(int64(depth))
|
||||
dirtyStateReadMeter.Mark(int64(len(blob)))
|
||||
|
||||
if len(blob) == 0 {
|
||||
stateStorageInexMeter.Mark(1)
|
||||
} else {
|
||||
stateStorageExistMeter.Mark(1)
|
||||
}
|
||||
return blob, nil
|
||||
}
|
||||
// storage slot is unknown to this layer, resolve from parent
|
||||
return dl.parent.storage(accountHash, storageHash, depth+1)
|
||||
}
|
||||
|
||||
// update implements the layer interface, creating a new layer on top of the
|
||||
// existing layer tree with the specified data items.
|
||||
func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer {
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
func emptyLayer() *diskLayer {
|
||||
return &diskLayer{
|
||||
db: New(rawdb.NewMemoryDatabase(), nil, false),
|
||||
buffer: newBuffer(defaultBufferSize, nil, 0),
|
||||
buffer: newBuffer(defaultBufferSize, nil, nil, 0),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ func benchmarkSearch(b *testing.B, depth int, total int) {
|
|||
nblob = common.CopyBytes(blob)
|
||||
}
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil))
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||
}
|
||||
var layer layer
|
||||
layer = emptyLayer()
|
||||
|
@ -118,7 +118,7 @@ func BenchmarkPersist(b *testing.B) {
|
|||
)
|
||||
nodes[common.Hash{}][string(path)] = node
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil))
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||
}
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
|
@ -156,7 +156,7 @@ func BenchmarkJournal(b *testing.B) {
|
|||
)
|
||||
nodes[common.Hash{}][string(path)] = node
|
||||
}
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), new(StateSetWithOrigin))
|
||||
return newDiffLayer(parent, common.Hash{}, 0, 0, newNodeSet(nodes), NewStateSetWithOrigin(nil, nil, nil, nil))
|
||||
}
|
||||
var layer layer
|
||||
layer = emptyLayer()
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package pathdb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
|
@ -33,7 +34,7 @@ type diskLayer struct {
|
|||
id uint64 // Immutable, corresponding state id
|
||||
db *Database // Path-based trie database
|
||||
nodes *fastcache.Cache // GC friendly memory cache of clean nodes
|
||||
buffer *buffer // Dirty buffer to aggregate writes of nodes
|
||||
buffer *buffer // Dirty buffer to aggregate writes of nodes and states
|
||||
stale bool // Signals that the layer became stale (state progressed)
|
||||
lock sync.RWMutex // Lock used to protect stale flag
|
||||
}
|
||||
|
@ -140,6 +141,75 @@ func (dl *diskLayer) node(owner common.Hash, path []byte, depth int) ([]byte, co
|
|||
return blob, h.hash(blob), &nodeLoc{loc: locDiskLayer, depth: depth}, nil
|
||||
}
|
||||
|
||||
// account directly retrieves the account RLP associated with a particular
|
||||
// hash in the slim data format.
|
||||
//
|
||||
// Note the returned account is not a copy, please don't modify it.
|
||||
func (dl *diskLayer) account(hash common.Hash, depth int) ([]byte, error) {
|
||||
dl.lock.RLock()
|
||||
defer dl.lock.RUnlock()
|
||||
|
||||
if dl.stale {
|
||||
return nil, errSnapshotStale
|
||||
}
|
||||
// Try to retrieve the account from the not-yet-written
|
||||
// node buffer first. Note the buffer is lock free since
|
||||
// it's impossible to mutate the buffer before tagging the
|
||||
// layer as stale.
|
||||
blob, found := dl.buffer.account(hash)
|
||||
if found {
|
||||
dirtyStateHitMeter.Mark(1)
|
||||
dirtyStateReadMeter.Mark(int64(len(blob)))
|
||||
dirtyStateHitDepthHist.Update(int64(depth))
|
||||
|
||||
if len(blob) == 0 {
|
||||
stateAccountInexMeter.Mark(1)
|
||||
} else {
|
||||
stateAccountExistMeter.Mark(1)
|
||||
}
|
||||
return blob, nil
|
||||
}
|
||||
dirtyStateMissMeter.Mark(1)
|
||||
|
||||
// TODO(rjl493456442) support persistent state retrieval
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
|
||||
// storage directly retrieves the storage data associated with a particular hash,
|
||||
// within a particular account.
|
||||
//
|
||||
// Note the returned account is not a copy, please don't modify it.
|
||||
func (dl *diskLayer) storage(accountHash, storageHash common.Hash, depth int) ([]byte, error) {
|
||||
// Hold the lock, ensure the parent won't be changed during the
|
||||
// state accessing.
|
||||
dl.lock.RLock()
|
||||
defer dl.lock.RUnlock()
|
||||
|
||||
if dl.stale {
|
||||
return nil, errSnapshotStale
|
||||
}
|
||||
// Try to retrieve the storage slot from the not-yet-written
|
||||
// node buffer first. Note the buffer is lock free since
|
||||
// it's impossible to mutate the buffer before tagging the
|
||||
// layer as stale.
|
||||
if blob, found := dl.buffer.storage(accountHash, storageHash); found {
|
||||
dirtyStateHitMeter.Mark(1)
|
||||
dirtyStateReadMeter.Mark(int64(len(blob)))
|
||||
dirtyStateHitDepthHist.Update(int64(depth))
|
||||
|
||||
if len(blob) == 0 {
|
||||
stateStorageInexMeter.Mark(1)
|
||||
} else {
|
||||
stateStorageExistMeter.Mark(1)
|
||||
}
|
||||
return blob, nil
|
||||
}
|
||||
dirtyStateMissMeter.Mark(1)
|
||||
|
||||
// TODO(rjl493456442) support persistent state retrieval
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
|
||||
// update implements the layer interface, returning a new diff layer on top
|
||||
// with the given state set.
|
||||
func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes *nodeSet, states *StateSetWithOrigin) *diffLayer {
|
||||
|
@ -190,14 +260,14 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) {
|
|||
|
||||
// In a unique scenario where the ID of the oldest history object (after tail
|
||||
// truncation) surpasses the persisted state ID, we take the necessary action
|
||||
// of forcibly committing the cached dirty nodes to ensure that the persisted
|
||||
// of forcibly committing the cached dirty states to ensure that the persisted
|
||||
// state ID remains higher.
|
||||
if !force && rawdb.ReadPersistentStateID(dl.db.diskdb) < oldest {
|
||||
force = true
|
||||
}
|
||||
// Merge the trie nodes of the bottom-most diff layer into the buffer as the
|
||||
// combined layer.
|
||||
combined := dl.buffer.commit(bottom.nodes)
|
||||
// Merge the trie nodes and flat states of the bottom-most diff layer into the
|
||||
// buffer as the combined layer.
|
||||
combined := dl.buffer.commit(bottom.nodes, bottom.states.stateSet)
|
||||
if combined.full() || force {
|
||||
if err := combined.flush(dl.db.diskdb, dl.db.freezer, dl.nodes, bottom.stateID()); err != nil {
|
||||
return nil, err
|
||||
|
@ -225,6 +295,24 @@ func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
|
|||
if dl.id == 0 {
|
||||
return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable)
|
||||
}
|
||||
var (
|
||||
buff = crypto.NewKeccakState()
|
||||
hashes = make(map[common.Address]common.Hash)
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
storages = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
for addr, blob := range h.accounts {
|
||||
hash := crypto.HashData(buff, addr.Bytes())
|
||||
hashes[addr] = hash
|
||||
accounts[hash] = blob
|
||||
}
|
||||
for addr, storage := range h.storages {
|
||||
hash, ok := hashes[addr]
|
||||
if !ok {
|
||||
panic(fmt.Errorf("storage history with no account %x", addr))
|
||||
}
|
||||
storages[hash] = storage
|
||||
}
|
||||
// Apply the reverse state changes upon the current state. This must
|
||||
// be done before holding the lock in order to access state in "this"
|
||||
// layer.
|
||||
|
@ -244,7 +332,7 @@ func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
|
|||
// needs to be reverted is not yet flushed and cached in node
|
||||
// buffer, otherwise, manipulate persistent state directly.
|
||||
if !dl.buffer.empty() {
|
||||
err := dl.buffer.revert(dl.db.diskdb, nodes)
|
||||
err := dl.buffer.revertTo(dl.db.diskdb, nodes, accounts, storages)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -45,7 +45,8 @@ var (
|
|||
//
|
||||
// - Version 0: initial version
|
||||
// - Version 1: storage.Incomplete field is removed
|
||||
const journalVersion uint64 = 1
|
||||
// - Version 2: add post-modification state values
|
||||
const journalVersion uint64 = 2
|
||||
|
||||
// loadJournal tries to parse the layer journal from the disk.
|
||||
func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) {
|
||||
|
@ -108,7 +109,7 @@ func (db *Database) loadLayers() layer {
|
|||
log.Info("Failed to load journal, discard it", "err", err)
|
||||
}
|
||||
// Return single layer with persistent state.
|
||||
return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, newBuffer(db.config.WriteBufferSize, nil, 0))
|
||||
return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, newBuffer(db.config.WriteBufferSize, nil, nil, 0))
|
||||
}
|
||||
|
||||
// loadDiskLayer reads the binary blob from the layer journal, reconstructing
|
||||
|
@ -135,7 +136,12 @@ func (db *Database) loadDiskLayer(r *rlp.Stream) (layer, error) {
|
|||
if err := nodes.decode(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newDiskLayer(root, id, db, nil, newBuffer(db.config.WriteBufferSize, &nodes, id-stored)), nil
|
||||
// Resolve flat state sets in aggregated buffer
|
||||
var states stateSet
|
||||
if err := states.decode(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newDiskLayer(root, id, db, nil, newBuffer(db.config.WriteBufferSize, &nodes, &states, id-stored)), nil
|
||||
}
|
||||
|
||||
// loadDiffLayer reads the next sections of a layer journal, reconstructing a new
|
||||
|
@ -189,6 +195,10 @@ func (dl *diskLayer) journal(w io.Writer) error {
|
|||
if err := dl.buffer.nodes.encode(w); err != nil {
|
||||
return err
|
||||
}
|
||||
// Step four, write the accumulated flat states into the journal
|
||||
if err := dl.buffer.states.encode(w); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("Journaled pathdb disk layer", "root", dl.root)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -30,10 +30,21 @@ var (
|
|||
dirtyNodeWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/node/write", nil)
|
||||
dirtyNodeHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/node/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||
|
||||
cleanFalseMeter = metrics.NewRegisteredMeter("pathdb/clean/false", nil)
|
||||
dirtyFalseMeter = metrics.NewRegisteredMeter("pathdb/dirty/false", nil)
|
||||
diskFalseMeter = metrics.NewRegisteredMeter("pathdb/disk/false", nil)
|
||||
diffFalseMeter = metrics.NewRegisteredMeter("pathdb/diff/false", nil)
|
||||
stateAccountInexMeter = metrics.NewRegisteredMeter("pathdb/state/account/inex/total", nil)
|
||||
stateStorageInexMeter = metrics.NewRegisteredMeter("pathdb/state/storage/inex/total", nil)
|
||||
stateAccountExistMeter = metrics.NewRegisteredMeter("pathdb/state/account/exist/total", nil)
|
||||
stateStorageExistMeter = metrics.NewRegisteredMeter("pathdb/state/storage/exist/total", nil)
|
||||
|
||||
dirtyStateHitMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/hit", nil)
|
||||
dirtyStateMissMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/miss", nil)
|
||||
dirtyStateReadMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/read", nil)
|
||||
dirtyStateWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/state/write", nil)
|
||||
dirtyStateHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/state/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||
|
||||
nodeCleanFalseMeter = metrics.NewRegisteredMeter("pathdb/clean/false", nil)
|
||||
nodeDirtyFalseMeter = metrics.NewRegisteredMeter("pathdb/dirty/false", nil)
|
||||
nodeDiskFalseMeter = metrics.NewRegisteredMeter("pathdb/disk/false", nil)
|
||||
nodeDiffFalseMeter = metrics.NewRegisteredMeter("pathdb/diff/false", nil)
|
||||
|
||||
commitTimeTimer = metrics.NewRegisteredTimer("pathdb/commit/time", nil)
|
||||
commitNodesMeter = metrics.NewRegisteredMeter("pathdb/commit/nodes", nil)
|
||||
|
@ -41,6 +52,10 @@ var (
|
|||
|
||||
gcTrieNodeMeter = metrics.NewRegisteredMeter("pathdb/gc/node/count", nil)
|
||||
gcTrieNodeBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/node/bytes", nil)
|
||||
gcAccountMeter = metrics.NewRegisteredMeter("pathdb/gc/account/count", nil)
|
||||
gcAccountBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/account/bytes", nil)
|
||||
gcStorageMeter = metrics.NewRegisteredMeter("pathdb/gc/storage/count", nil)
|
||||
gcStorageBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/storage/bytes", nil)
|
||||
|
||||
historyBuildTimeMeter = metrics.NewRegisteredTimer("pathdb/history/time", nil)
|
||||
historyDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/data", nil)
|
||||
|
|
|
@ -131,9 +131,9 @@ func (s *nodeSet) merge(set *nodeSet) {
|
|||
s.updateSize(delta)
|
||||
}
|
||||
|
||||
// revert merges the provided trie nodes into the set. This should reverse the
|
||||
// revertTo merges the provided trie nodes into the set. This should reverse the
|
||||
// changes made by the most recent state transition.
|
||||
func (s *nodeSet) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) {
|
||||
func (s *nodeSet) revertTo(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) {
|
||||
var delta int64
|
||||
for owner, subset := range nodes {
|
||||
current, ok := s.nodes[owner]
|
||||
|
|
|
@ -21,7 +21,9 @@ import (
|
|||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/triedb/database"
|
||||
)
|
||||
|
||||
|
@ -66,13 +68,13 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte,
|
|||
// is not found.
|
||||
switch loc.loc {
|
||||
case locCleanCache:
|
||||
cleanFalseMeter.Mark(1)
|
||||
nodeCleanFalseMeter.Mark(1)
|
||||
case locDirtyCache:
|
||||
dirtyFalseMeter.Mark(1)
|
||||
nodeDirtyFalseMeter.Mark(1)
|
||||
case locDiffLayer:
|
||||
diffFalseMeter.Mark(1)
|
||||
nodeDiffFalseMeter.Mark(1)
|
||||
case locDiskLayer:
|
||||
diskFalseMeter.Mark(1)
|
||||
nodeDiskFalseMeter.Mark(1)
|
||||
}
|
||||
blobHex := "nil"
|
||||
if len(blob) > 0 {
|
||||
|
@ -84,6 +86,39 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte,
|
|||
return blob, nil
|
||||
}
|
||||
|
||||
// Account directly retrieves the account associated with a particular hash in
|
||||
// the slim data format. An error will be returned if the read operation exits
|
||||
// abnormally. Specifically, if the layer is already stale.
|
||||
//
|
||||
// Note:
|
||||
// - the returned account object is safe to modify
|
||||
// - no error will be returned if the requested account is not found in database
|
||||
func (r *reader) Account(hash common.Hash) (*types.SlimAccount, error) {
|
||||
blob, err := r.layer.account(hash, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(blob) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
account := new(types.SlimAccount)
|
||||
if err := rlp.DecodeBytes(blob, account); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// Storage directly retrieves the storage data associated with a particular hash,
|
||||
// within a particular account. An error will be returned if the read operation
|
||||
// exits abnormally. Specifically, if the layer is already stale.
|
||||
//
|
||||
// Note:
|
||||
// - the returned storage data is not a copy, please don't modify it
|
||||
// - no error will be returned if the requested slot is not found in database
|
||||
func (r *reader) Storage(accountHash, storageHash common.Hash) ([]byte, error) {
|
||||
return r.layer.storage(accountHash, storageHash, 0)
|
||||
}
|
||||
|
||||
// NodeReader retrieves a layer belonging to the given state root.
|
||||
func (db *Database) NodeReader(root common.Hash) (database.NodeReader, error) {
|
||||
layer := db.tree.get(root)
|
||||
|
@ -92,3 +127,13 @@ func (db *Database) NodeReader(root common.Hash) (database.NodeReader, error) {
|
|||
}
|
||||
return &reader{layer: layer, noHashCheck: db.isVerkle}, nil
|
||||
}
|
||||
|
||||
// StateReader returns a reader that allows access to the state data associated
|
||||
// with the specified state.
|
||||
func (db *Database) StateReader(root common.Hash) (database.StateReader, error) {
|
||||
layer := db.tree.get(root)
|
||||
if layer == nil {
|
||||
return nil, fmt.Errorf("state %#x is not available", root)
|
||||
}
|
||||
return &reader{layer: layer}, nil
|
||||
}
|
||||
|
|
|
@ -19,10 +19,15 @@ package pathdb
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
// counter helps in tracking items and their corresponding sizes.
|
||||
|
@ -43,9 +48,383 @@ func (c *counter) report(count metrics.Meter, size metrics.Meter) {
|
|||
size.Mark(int64(c.size))
|
||||
}
|
||||
|
||||
// stateSet represents a collection of state modifications associated with a
|
||||
// transition (e.g., a block execution) or multiple aggregated transitions.
|
||||
//
|
||||
// A stateSet can only reside within a diffLayer or the buffer of a diskLayer,
|
||||
// serving as the envelope for the set. Lock protection is not required for
|
||||
// accessing or mutating the account set and storage set, as the associated
|
||||
// envelope is always marked as stale before any mutation is applied. Any
|
||||
// subsequent state access will be denied due to the stale flag. Therefore,
|
||||
// state access and mutation won't happen at the same time with guarantee.
|
||||
type stateSet struct {
|
||||
accountData map[common.Hash][]byte // Keyed accounts for direct retrieval (nil means deleted)
|
||||
storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted)
|
||||
size uint64 // Memory size of the state data (accountData and storageData)
|
||||
|
||||
accountListSorted []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
|
||||
storageListSorted map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil
|
||||
|
||||
// Lock for guarding the two lists above. These lists might be accessed
|
||||
// concurrently and lock protection is essential to avoid concurrent
|
||||
// slice or map read/write.
|
||||
listLock sync.RWMutex
|
||||
}
|
||||
|
||||
// newStates constructs the state set with the provided account and storage data.
|
||||
func newStates(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *stateSet {
|
||||
// Don't panic for the lazy callers, initialize the nil maps instead.
|
||||
if accounts == nil {
|
||||
accounts = make(map[common.Hash][]byte)
|
||||
}
|
||||
if storages == nil {
|
||||
storages = make(map[common.Hash]map[common.Hash][]byte)
|
||||
}
|
||||
s := &stateSet{
|
||||
accountData: accounts,
|
||||
storageData: storages,
|
||||
storageListSorted: make(map[common.Hash][]common.Hash),
|
||||
}
|
||||
s.size = s.check()
|
||||
return s
|
||||
}
|
||||
|
||||
// account returns the account data associated with the specified address hash.
|
||||
func (s *stateSet) account(hash common.Hash) ([]byte, bool) {
|
||||
// If the account is known locally, return it
|
||||
if data, ok := s.accountData[hash]; ok {
|
||||
return data, true
|
||||
}
|
||||
return nil, false // account is unknown in this set
|
||||
}
|
||||
|
||||
// storage returns the storage slot associated with the specified address hash
|
||||
// and storage key hash.
|
||||
func (s *stateSet) storage(accountHash, storageHash common.Hash) ([]byte, bool) {
|
||||
// If the account is known locally, try to resolve the slot locally
|
||||
if storage, ok := s.storageData[accountHash]; ok {
|
||||
if data, ok := storage[storageHash]; ok {
|
||||
return data, true
|
||||
}
|
||||
}
|
||||
return nil, false // storage is unknown in this set
|
||||
}
|
||||
|
||||
// check sanitizes accounts and storage slots to ensure the data validity.
|
||||
// Additionally, it computes the total memory size occupied by the maps.
|
||||
func (s *stateSet) check() uint64 {
|
||||
var size int
|
||||
for _, blob := range s.accountData {
|
||||
size += common.HashLength + len(blob)
|
||||
}
|
||||
for accountHash, slots := range s.storageData {
|
||||
if slots == nil {
|
||||
panic(fmt.Sprintf("storage %#x nil", accountHash)) // nil slots is not permitted
|
||||
}
|
||||
for _, blob := range slots {
|
||||
size += 2*common.HashLength + len(blob)
|
||||
}
|
||||
}
|
||||
return uint64(size)
|
||||
}
|
||||
|
||||
// accountList returns a sorted list of all accounts in this state set, including
|
||||
// the deleted ones.
|
||||
//
|
||||
// Note, the returned slice is not a copy, so do not modify it.
|
||||
//
|
||||
// nolint:unused
|
||||
func (s *stateSet) accountList() []common.Hash {
|
||||
// If an old list already exists, return it
|
||||
s.listLock.RLock()
|
||||
list := s.accountListSorted
|
||||
s.listLock.RUnlock()
|
||||
|
||||
if list != nil {
|
||||
return list
|
||||
}
|
||||
// No old sorted account list exists, generate a new one. It's possible that
|
||||
// multiple threads waiting for the write lock may regenerate the list
|
||||
// multiple times, which is acceptable.
|
||||
s.listLock.Lock()
|
||||
defer s.listLock.Unlock()
|
||||
|
||||
list = maps.Keys(s.accountData)
|
||||
slices.SortFunc(list, common.Hash.Cmp)
|
||||
s.accountListSorted = list
|
||||
return list
|
||||
}
|
||||
|
||||
// StorageList returns a sorted list of all storage slot hashes in this state set
|
||||
// for the given account. The returned list will include the hash of deleted
|
||||
// storage slot.
|
||||
//
|
||||
// Note, the returned slice is not a copy, so do not modify it.
|
||||
//
|
||||
// nolint:unused
|
||||
func (s *stateSet) storageList(accountHash common.Hash) []common.Hash {
|
||||
s.listLock.RLock()
|
||||
if _, ok := s.storageData[accountHash]; !ok {
|
||||
// Account not tracked by this layer
|
||||
s.listLock.RUnlock()
|
||||
return nil
|
||||
}
|
||||
// If an old list already exists, return it
|
||||
if list, exist := s.storageListSorted[accountHash]; exist {
|
||||
s.listLock.RUnlock()
|
||||
return list // the cached list can't be nil
|
||||
}
|
||||
s.listLock.RUnlock()
|
||||
|
||||
// No old sorted account list exists, generate a new one. It's possible that
|
||||
// multiple threads waiting for the write lock may regenerate the list
|
||||
// multiple times, which is acceptable.
|
||||
s.listLock.Lock()
|
||||
defer s.listLock.Unlock()
|
||||
|
||||
list := maps.Keys(s.storageData[accountHash])
|
||||
slices.SortFunc(list, common.Hash.Cmp)
|
||||
s.storageListSorted[accountHash] = list
|
||||
return list
|
||||
}
|
||||
|
||||
// clearCache invalidates the cached account list and storage lists.
|
||||
func (s *stateSet) clearCache() {
|
||||
s.listLock.Lock()
|
||||
defer s.listLock.Unlock()
|
||||
|
||||
s.accountListSorted = nil
|
||||
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
||||
}
|
||||
|
||||
// merge integrates the accounts and storages from the external set into the
|
||||
// local set, ensuring the combined set reflects the combined state of both.
|
||||
//
|
||||
// The stateSet supplied as parameter set will not be mutated by this operation,
|
||||
// as it may still be referenced by other layers.
|
||||
func (s *stateSet) merge(other *stateSet) {
|
||||
var (
|
||||
delta int
|
||||
accountOverwrites counter
|
||||
storageOverwrites counter
|
||||
)
|
||||
// Apply the updated account data
|
||||
for accountHash, data := range other.accountData {
|
||||
if origin, ok := s.accountData[accountHash]; ok {
|
||||
delta += len(data) - len(origin)
|
||||
accountOverwrites.add(common.HashLength + len(origin))
|
||||
} else {
|
||||
delta += common.HashLength + len(data)
|
||||
}
|
||||
s.accountData[accountHash] = data
|
||||
}
|
||||
// Apply all the updated storage slots (individually)
|
||||
for accountHash, storage := range other.storageData {
|
||||
// If storage didn't exist in the set, overwrite blindly
|
||||
if _, ok := s.storageData[accountHash]; !ok {
|
||||
// To prevent potential concurrent map read/write issues, allocate a
|
||||
// new map for the storage instead of claiming it directly from the
|
||||
// passed external set. Even after merging, the slots belonging to the
|
||||
// external state set remain accessible, so ownership of the map should
|
||||
// not be taken, and any mutation on it should be avoided.
|
||||
slots := make(map[common.Hash][]byte, len(storage))
|
||||
for storageHash, data := range storage {
|
||||
slots[storageHash] = data
|
||||
delta += 2*common.HashLength + len(data)
|
||||
}
|
||||
s.storageData[accountHash] = slots
|
||||
continue
|
||||
}
|
||||
// Storage exists in both local and external set, merge the slots
|
||||
slots := s.storageData[accountHash]
|
||||
for storageHash, data := range storage {
|
||||
if origin, ok := slots[storageHash]; ok {
|
||||
delta += len(data) - len(origin)
|
||||
storageOverwrites.add(2*common.HashLength + len(origin))
|
||||
} else {
|
||||
delta += 2*common.HashLength + len(data)
|
||||
}
|
||||
slots[storageHash] = data
|
||||
}
|
||||
}
|
||||
accountOverwrites.report(gcAccountMeter, gcAccountBytesMeter)
|
||||
storageOverwrites.report(gcStorageMeter, gcStorageBytesMeter)
|
||||
s.clearCache()
|
||||
s.updateSize(delta)
|
||||
}
|
||||
|
||||
// revertTo takes the original value of accounts and storages as input and reverts
|
||||
// the latest state transition applied on the state set.
|
||||
//
|
||||
// Notably, this operation may result in the set containing more entries after a
|
||||
// revert. For example, if account x did not exist and was created during transition
|
||||
// w, reverting w will retain an x=nil entry in the set. And also if account x along
|
||||
// with its storage slots was deleted in the transition w, reverting w will retain
|
||||
// a list of additional storage slots with their original value.
|
||||
func (s *stateSet) revertTo(accountOrigin map[common.Hash][]byte, storageOrigin map[common.Hash]map[common.Hash][]byte) {
|
||||
var delta int // size tracking
|
||||
for addrHash, blob := range accountOrigin {
|
||||
data, ok := s.accountData[addrHash]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("non-existent account for reverting, %x", addrHash))
|
||||
}
|
||||
delta += len(blob) - len(data)
|
||||
|
||||
if len(blob) != 0 {
|
||||
s.accountData[addrHash] = blob
|
||||
} else {
|
||||
if len(data) == 0 {
|
||||
panic(fmt.Sprintf("invalid account mutation (null to null), %x", addrHash))
|
||||
}
|
||||
s.accountData[addrHash] = nil
|
||||
}
|
||||
}
|
||||
// Overwrite the storage data with original value blindly
|
||||
for addrHash, storage := range storageOrigin {
|
||||
slots := s.storageData[addrHash]
|
||||
if len(slots) == 0 {
|
||||
panic(fmt.Sprintf("non-existent storage set for reverting, %x", addrHash))
|
||||
}
|
||||
for storageHash, blob := range storage {
|
||||
data, ok := slots[storageHash]
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("non-existent storage slot for reverting, %x-%x", addrHash, storageHash))
|
||||
}
|
||||
delta += len(blob) - len(data)
|
||||
|
||||
if len(blob) != 0 {
|
||||
slots[storageHash] = blob
|
||||
} else {
|
||||
if len(data) == 0 {
|
||||
panic(fmt.Sprintf("invalid storage slot mutation (null to null), %x-%x", addrHash, storageHash))
|
||||
}
|
||||
slots[storageHash] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
s.clearCache()
|
||||
s.updateSize(delta)
|
||||
}
|
||||
|
||||
// updateSize updates the total cache size by the given delta.
|
||||
func (s *stateSet) updateSize(delta int) {
|
||||
size := int64(s.size) + int64(delta)
|
||||
if size >= 0 {
|
||||
s.size = uint64(size)
|
||||
return
|
||||
}
|
||||
log.Error("Stateset size underflow", "prev", common.StorageSize(s.size), "delta", common.StorageSize(delta))
|
||||
s.size = 0
|
||||
}
|
||||
|
||||
// encode serializes the content of state set into the provided writer.
|
||||
func (s *stateSet) encode(w io.Writer) error {
|
||||
// Encode accounts
|
||||
type accounts struct {
|
||||
AddrHashes []common.Hash
|
||||
Accounts [][]byte
|
||||
}
|
||||
var enc accounts
|
||||
for addrHash, blob := range s.accountData {
|
||||
enc.AddrHashes = append(enc.AddrHashes, addrHash)
|
||||
enc.Accounts = append(enc.Accounts, blob)
|
||||
}
|
||||
if err := rlp.Encode(w, enc); err != nil {
|
||||
return err
|
||||
}
|
||||
// Encode storages
|
||||
type Storage struct {
|
||||
AddrHash common.Hash
|
||||
Keys []common.Hash
|
||||
Blobs [][]byte
|
||||
}
|
||||
storages := make([]Storage, 0, len(s.storageData))
|
||||
for addrHash, slots := range s.storageData {
|
||||
keys := make([]common.Hash, 0, len(slots))
|
||||
vals := make([][]byte, 0, len(slots))
|
||||
for key, val := range slots {
|
||||
keys = append(keys, key)
|
||||
vals = append(vals, val)
|
||||
}
|
||||
storages = append(storages, Storage{
|
||||
AddrHash: addrHash,
|
||||
Keys: keys,
|
||||
Blobs: vals,
|
||||
})
|
||||
}
|
||||
return rlp.Encode(w, storages)
|
||||
}
|
||||
|
||||
// decode deserializes the content from the rlp stream into the state set.
|
||||
func (s *stateSet) decode(r *rlp.Stream) error {
|
||||
type accounts struct {
|
||||
AddrHashes []common.Hash
|
||||
Accounts [][]byte
|
||||
}
|
||||
var (
|
||||
dec accounts
|
||||
accountSet = make(map[common.Hash][]byte)
|
||||
)
|
||||
if err := r.Decode(&dec); err != nil {
|
||||
return fmt.Errorf("load diff accounts: %v", err)
|
||||
}
|
||||
for i := 0; i < len(dec.AddrHashes); i++ {
|
||||
accountSet[dec.AddrHashes[i]] = dec.Accounts[i]
|
||||
}
|
||||
s.accountData = accountSet
|
||||
|
||||
// Decode storages
|
||||
type storage struct {
|
||||
AddrHash common.Hash
|
||||
Keys []common.Hash
|
||||
Vals [][]byte
|
||||
}
|
||||
var (
|
||||
storages []storage
|
||||
storageSet = make(map[common.Hash]map[common.Hash][]byte)
|
||||
)
|
||||
if err := r.Decode(&storages); err != nil {
|
||||
return fmt.Errorf("load diff storage: %v", err)
|
||||
}
|
||||
for _, entry := range storages {
|
||||
storageSet[entry.AddrHash] = make(map[common.Hash][]byte, len(entry.Keys))
|
||||
for i := 0; i < len(entry.Keys); i++ {
|
||||
storageSet[entry.AddrHash][entry.Keys[i]] = entry.Vals[i]
|
||||
}
|
||||
}
|
||||
s.storageData = storageSet
|
||||
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
||||
|
||||
s.size = s.check()
|
||||
return nil
|
||||
}
|
||||
|
||||
// reset clears all cached state data, including any optional sorted lists that
|
||||
// may have been generated.
|
||||
func (s *stateSet) reset() {
|
||||
s.accountData = make(map[common.Hash][]byte)
|
||||
s.storageData = make(map[common.Hash]map[common.Hash][]byte)
|
||||
s.size = 0
|
||||
s.accountListSorted = nil
|
||||
s.storageListSorted = make(map[common.Hash][]common.Hash)
|
||||
}
|
||||
|
||||
// dbsize returns the approximate size for db write.
|
||||
//
|
||||
// nolint:unused
|
||||
func (s *stateSet) dbsize() int {
|
||||
m := len(s.accountData) * len(rawdb.SnapshotAccountPrefix)
|
||||
for _, slots := range s.storageData {
|
||||
m += len(slots) * len(rawdb.SnapshotStoragePrefix)
|
||||
}
|
||||
return m + int(s.size)
|
||||
}
|
||||
|
||||
// StateSetWithOrigin wraps the state set with additional original values of the
|
||||
// mutated states.
|
||||
type StateSetWithOrigin struct {
|
||||
*stateSet
|
||||
|
||||
// AccountOrigin represents the account data before the state transition,
|
||||
// corresponding to both the accountData and destructSet. It's keyed by the
|
||||
// account address. The nil value means the account was not present before.
|
||||
|
@ -62,7 +441,7 @@ type StateSetWithOrigin struct {
|
|||
}
|
||||
|
||||
// NewStateSetWithOrigin constructs the state set with the provided data.
|
||||
func NewStateSetWithOrigin(accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte) *StateSetWithOrigin {
|
||||
func NewStateSetWithOrigin(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte, accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte) *StateSetWithOrigin {
|
||||
// Don't panic for the lazy callers, initialize the nil maps instead.
|
||||
if accountOrigin == nil {
|
||||
accountOrigin = make(map[common.Address][]byte)
|
||||
|
@ -82,15 +461,21 @@ func NewStateSetWithOrigin(accountOrigin map[common.Address][]byte, storageOrigi
|
|||
size += 2*common.HashLength + len(data)
|
||||
}
|
||||
}
|
||||
set := newStates(accounts, storages)
|
||||
return &StateSetWithOrigin{
|
||||
stateSet: set,
|
||||
accountOrigin: accountOrigin,
|
||||
storageOrigin: storageOrigin,
|
||||
size: uint64(size),
|
||||
size: set.size + uint64(size),
|
||||
}
|
||||
}
|
||||
|
||||
// encode serializes the content of state set into the provided writer.
|
||||
func (s *StateSetWithOrigin) encode(w io.Writer) error {
|
||||
// Encode state set
|
||||
if err := s.stateSet.encode(w); err != nil {
|
||||
return err
|
||||
}
|
||||
// Encode accounts
|
||||
type Accounts struct {
|
||||
Addresses []common.Address
|
||||
|
@ -125,6 +510,12 @@ func (s *StateSetWithOrigin) encode(w io.Writer) error {
|
|||
|
||||
// decode deserializes the content from the rlp stream into the state set.
|
||||
func (s *StateSetWithOrigin) decode(r *rlp.Stream) error {
|
||||
if s.stateSet == nil {
|
||||
s.stateSet = &stateSet{}
|
||||
}
|
||||
if err := s.stateSet.decode(r); err != nil {
|
||||
return err
|
||||
}
|
||||
// Decode account origin
|
||||
type Accounts struct {
|
||||
Addresses []common.Address
|
||||
|
|
|
@ -0,0 +1,453 @@
|
|||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||
|
||||
package pathdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
func TestStatesMerge(t *testing.T) {
|
||||
a := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa0},
|
||||
common.Hash{0xb}: {0xb0},
|
||||
common.Hash{0xc}: {0xc0},
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
common.Hash{0x2}: {0x20},
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
},
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
},
|
||||
},
|
||||
)
|
||||
b := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa1},
|
||||
common.Hash{0xb}: {0xb1},
|
||||
common.Hash{0xc}: nil, // delete account
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x11},
|
||||
common.Hash{0x2}: nil, // delete slot
|
||||
common.Hash{0x3}: {0x31},
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x11},
|
||||
},
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: nil, // delete slot
|
||||
},
|
||||
},
|
||||
)
|
||||
a.merge(b)
|
||||
|
||||
blob, exist := a.account(common.Hash{0xa})
|
||||
if !exist || !bytes.Equal(blob, []byte{0xa1}) {
|
||||
t.Error("Unexpected value for account a")
|
||||
}
|
||||
blob, exist = a.account(common.Hash{0xb})
|
||||
if !exist || !bytes.Equal(blob, []byte{0xb1}) {
|
||||
t.Error("Unexpected value for account b")
|
||||
}
|
||||
blob, exist = a.account(common.Hash{0xc})
|
||||
if !exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for account c")
|
||||
}
|
||||
// unknown account
|
||||
blob, exist = a.account(common.Hash{0xd})
|
||||
if exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for account d")
|
||||
}
|
||||
|
||||
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x1})
|
||||
if !exist || !bytes.Equal(blob, []byte{0x11}) {
|
||||
t.Error("Unexpected value for a's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x2})
|
||||
if !exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for a's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x3})
|
||||
if !exist || !bytes.Equal(blob, []byte{0x31}) {
|
||||
t.Error("Unexpected value for a's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xb}, common.Hash{0x1})
|
||||
if !exist || !bytes.Equal(blob, []byte{0x11}) {
|
||||
t.Error("Unexpected value for b's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xc}, common.Hash{0x1})
|
||||
if !exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for c's storage")
|
||||
}
|
||||
|
||||
// unknown storage slots
|
||||
blob, exist = a.storage(common.Hash{0xd}, common.Hash{0x1})
|
||||
if exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for d's storage")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatesRevert(t *testing.T) {
|
||||
a := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa0},
|
||||
common.Hash{0xb}: {0xb0},
|
||||
common.Hash{0xc}: {0xc0},
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
common.Hash{0x2}: {0x20},
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
},
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
},
|
||||
},
|
||||
)
|
||||
b := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa1},
|
||||
common.Hash{0xb}: {0xb1},
|
||||
common.Hash{0xc}: nil,
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x11},
|
||||
common.Hash{0x2}: nil,
|
||||
common.Hash{0x3}: {0x31},
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x11},
|
||||
},
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: nil,
|
||||
},
|
||||
},
|
||||
)
|
||||
a.merge(b)
|
||||
a.revertTo(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa0},
|
||||
common.Hash{0xb}: {0xb0},
|
||||
common.Hash{0xc}: {0xc0},
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
common.Hash{0x2}: {0x20},
|
||||
common.Hash{0x3}: nil,
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
},
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
blob, exist := a.account(common.Hash{0xa})
|
||||
if !exist || !bytes.Equal(blob, []byte{0xa0}) {
|
||||
t.Error("Unexpected value for account a")
|
||||
}
|
||||
blob, exist = a.account(common.Hash{0xb})
|
||||
if !exist || !bytes.Equal(blob, []byte{0xb0}) {
|
||||
t.Error("Unexpected value for account b")
|
||||
}
|
||||
blob, exist = a.account(common.Hash{0xc})
|
||||
if !exist || !bytes.Equal(blob, []byte{0xc0}) {
|
||||
t.Error("Unexpected value for account c")
|
||||
}
|
||||
// unknown account
|
||||
blob, exist = a.account(common.Hash{0xd})
|
||||
if exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for account d")
|
||||
}
|
||||
|
||||
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x1})
|
||||
if !exist || !bytes.Equal(blob, []byte{0x10}) {
|
||||
t.Error("Unexpected value for a's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x2})
|
||||
if !exist || !bytes.Equal(blob, []byte{0x20}) {
|
||||
t.Error("Unexpected value for a's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xa}, common.Hash{0x3})
|
||||
if !exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for a's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xb}, common.Hash{0x1})
|
||||
if !exist || !bytes.Equal(blob, []byte{0x10}) {
|
||||
t.Error("Unexpected value for b's storage")
|
||||
}
|
||||
blob, exist = a.storage(common.Hash{0xc}, common.Hash{0x1})
|
||||
if !exist || !bytes.Equal(blob, []byte{0x10}) {
|
||||
t.Error("Unexpected value for c's storage")
|
||||
}
|
||||
// unknown storage slots
|
||||
blob, exist = a.storage(common.Hash{0xd}, common.Hash{0x1})
|
||||
if exist || len(blob) != 0 {
|
||||
t.Error("Unexpected value for d's storage")
|
||||
}
|
||||
}
|
||||
|
||||
// TestStateRevertAccountNullMarker tests the scenario that account x did not exist
|
||||
// before and was created during transition w, reverting w will retain an x=nil
|
||||
// entry in the set.
|
||||
func TestStateRevertAccountNullMarker(t *testing.T) {
|
||||
a := newStates(nil, nil) // empty initial state
|
||||
b := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa},
|
||||
},
|
||||
nil,
|
||||
)
|
||||
a.merge(b) // create account 0xa
|
||||
a.revertTo(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: nil,
|
||||
},
|
||||
nil,
|
||||
) // revert the transition b
|
||||
|
||||
blob, exist := a.account(common.Hash{0xa})
|
||||
if !exist {
|
||||
t.Fatal("null marker is not found")
|
||||
}
|
||||
if len(blob) != 0 {
|
||||
t.Fatalf("Unexpected value for account, %v", blob)
|
||||
}
|
||||
}
|
||||
|
||||
// TestStateRevertStorageNullMarker tests the scenario that slot x did not exist
|
||||
// before and was created during transition w, reverting w will retain an x=nil
|
||||
// entry in the set.
|
||||
func TestStateRevertStorageNullMarker(t *testing.T) {
|
||||
a := newStates(map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa},
|
||||
}, nil) // initial state with account 0xa
|
||||
|
||||
b := newStates(
|
||||
nil,
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x1},
|
||||
},
|
||||
},
|
||||
)
|
||||
a.merge(b) // create slot 0x1
|
||||
a.revertTo(
|
||||
nil,
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: nil,
|
||||
},
|
||||
},
|
||||
) // revert the transition b
|
||||
|
||||
blob, exist := a.storage(common.Hash{0xa}, common.Hash{0x1})
|
||||
if !exist {
|
||||
t.Fatal("null marker is not found")
|
||||
}
|
||||
if len(blob) != 0 {
|
||||
t.Fatalf("Unexpected value for storage slot, %v", blob)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatesEncode(t *testing.T) {
|
||||
s := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0x1}: {0x1},
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0x1}: {
|
||||
common.Hash{0x1}: {0x1},
|
||||
},
|
||||
},
|
||||
)
|
||||
buf := bytes.NewBuffer(nil)
|
||||
if err := s.encode(buf); err != nil {
|
||||
t.Fatalf("Failed to encode states, %v", err)
|
||||
}
|
||||
var dec stateSet
|
||||
if err := dec.decode(rlp.NewStream(buf, 0)); err != nil {
|
||||
t.Fatalf("Failed to decode states, %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(s.accountData, dec.accountData) {
|
||||
t.Fatal("Unexpected account data")
|
||||
}
|
||||
if !reflect.DeepEqual(s.storageData, dec.storageData) {
|
||||
t.Fatal("Unexpected storage data")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateWithOriginEncode(t *testing.T) {
|
||||
s := NewStateSetWithOrigin(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0x1}: {0x1},
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0x1}: {
|
||||
common.Hash{0x1}: {0x1},
|
||||
},
|
||||
},
|
||||
map[common.Address][]byte{
|
||||
common.Address{0x1}: {0x1},
|
||||
},
|
||||
map[common.Address]map[common.Hash][]byte{
|
||||
common.Address{0x1}: {
|
||||
common.Hash{0x1}: {0x1},
|
||||
},
|
||||
},
|
||||
)
|
||||
buf := bytes.NewBuffer(nil)
|
||||
if err := s.encode(buf); err != nil {
|
||||
t.Fatalf("Failed to encode states, %v", err)
|
||||
}
|
||||
var dec StateSetWithOrigin
|
||||
if err := dec.decode(rlp.NewStream(buf, 0)); err != nil {
|
||||
t.Fatalf("Failed to decode states, %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(s.accountData, dec.accountData) {
|
||||
t.Fatal("Unexpected account data")
|
||||
}
|
||||
if !reflect.DeepEqual(s.storageData, dec.storageData) {
|
||||
t.Fatal("Unexpected storage data")
|
||||
}
|
||||
if !reflect.DeepEqual(s.accountOrigin, dec.accountOrigin) {
|
||||
t.Fatal("Unexpected account origin data")
|
||||
}
|
||||
if !reflect.DeepEqual(s.storageOrigin, dec.storageOrigin) {
|
||||
t.Fatal("Unexpected storage origin data")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateSizeTracking(t *testing.T) {
|
||||
expSizeA := 3*(common.HashLength+1) + /* account data */
|
||||
2*(2*common.HashLength+1) + /* storage data of 0xa */
|
||||
2*common.HashLength + 3 + /* storage data of 0xb */
|
||||
2*common.HashLength + 1 /* storage data of 0xc */
|
||||
|
||||
a := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa0}, // common.HashLength+1
|
||||
common.Hash{0xb}: {0xb0}, // common.HashLength+1
|
||||
common.Hash{0xc}: {0xc0}, // common.HashLength+1
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x10}, // 2*common.HashLength+1
|
||||
common.Hash{0x2}: {0x20}, // 2*common.HashLength+1
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x10, 0x11, 0x12}, // 2*common.HashLength+3
|
||||
},
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: {0x10}, // 2*common.HashLength+1
|
||||
},
|
||||
},
|
||||
)
|
||||
if a.size != uint64(expSizeA) {
|
||||
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeA, a.size)
|
||||
}
|
||||
|
||||
expSizeB := common.HashLength + 2 + common.HashLength + 3 + common.HashLength + /* account data */
|
||||
2*common.HashLength + 3 + 2*common.HashLength + 2 + /* storage data of 0xa */
|
||||
2*common.HashLength + 2 + 2*common.HashLength + 2 + /* storage data of 0xb */
|
||||
3*2*common.HashLength /* storage data of 0xc */
|
||||
b := newStates(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa1, 0xa1}, // common.HashLength+2
|
||||
common.Hash{0xb}: {0xb1, 0xb1, 0xb1}, // common.HashLength+3
|
||||
common.Hash{0xc}: nil, // common.HashLength, account deletion
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x11, 0x11, 0x11}, // 2*common.HashLength+3
|
||||
common.Hash{0x3}: {0x31, 0x31}, // 2*common.HashLength+2, slot creation
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x11, 0x11}, // 2*common.HashLength+2
|
||||
common.Hash{0x2}: {0x22, 0x22}, // 2*common.HashLength+2, slot creation
|
||||
},
|
||||
// The storage of 0xc is entirely removed
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: nil, // 2*common.HashLength, slot deletion
|
||||
common.Hash{0x2}: nil, // 2*common.HashLength, slot deletion
|
||||
common.Hash{0x3}: nil, // 2*common.HashLength, slot deletion
|
||||
},
|
||||
},
|
||||
)
|
||||
if b.size != uint64(expSizeB) {
|
||||
t.Fatalf("Unexpected size, want: %d, got: %d", expSizeB, b.size)
|
||||
}
|
||||
|
||||
a.merge(b)
|
||||
mergeSize := expSizeA + 1 /* account a data change */ + 2 /* account b data change */ - 1 /* account c data change */
|
||||
mergeSize += 2*common.HashLength + 2 + 2 /* storage a change */
|
||||
mergeSize += 2*common.HashLength + 2 - 1 /* storage b change */
|
||||
mergeSize += 2*2*common.HashLength - 1 /* storage data removal of 0xc */
|
||||
|
||||
if a.size != uint64(mergeSize) {
|
||||
t.Fatalf("Unexpected size, want: %d, got: %d", mergeSize, a.size)
|
||||
}
|
||||
|
||||
// Revert the set to original status
|
||||
a.revertTo(
|
||||
map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {0xa0},
|
||||
common.Hash{0xb}: {0xb0},
|
||||
common.Hash{0xc}: {0xc0},
|
||||
},
|
||||
map[common.Hash]map[common.Hash][]byte{
|
||||
common.Hash{0xa}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
common.Hash{0x2}: {0x20},
|
||||
common.Hash{0x3}: nil, // revert slot creation
|
||||
},
|
||||
common.Hash{0xb}: {
|
||||
common.Hash{0x1}: {0x10, 0x11, 0x12},
|
||||
common.Hash{0x2}: nil, // revert slot creation
|
||||
},
|
||||
common.Hash{0xc}: {
|
||||
common.Hash{0x1}: {0x10},
|
||||
common.Hash{0x2}: {0x20}, // resurrected slot
|
||||
common.Hash{0x3}: {0x30}, // resurrected slot
|
||||
},
|
||||
},
|
||||
)
|
||||
revertSize := expSizeA + 2*common.HashLength + 2*common.HashLength // delete-marker of a.3 and b.2 slot
|
||||
revertSize += 2 * (2*common.HashLength + 1) // resurrected slot, c.2, c.3
|
||||
if a.size != uint64(revertSize) {
|
||||
t.Fatalf("Unexpected size, want: %d, got: %d", revertSize, a.size)
|
||||
}
|
||||
}
|
|
@ -45,5 +45,5 @@ func (set *StateSet) internal() *pathdb.StateSetWithOrigin {
|
|||
if set == nil {
|
||||
return nil
|
||||
}
|
||||
return pathdb.NewStateSetWithOrigin(set.AccountsOrigin, set.StoragesOrigin)
|
||||
return pathdb.NewStateSetWithOrigin(set.Accounts, set.Storages, set.AccountsOrigin, set.StoragesOrigin)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue