2023-08-01 07:17:32 -05:00
|
|
|
// Copyright 2022 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
|
|
|
|
|
|
|
package pathdb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
|
|
|
"reflect"
|
|
|
|
"testing"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2024-03-18 21:50:08 -05:00
|
|
|
"github.com/ethereum/go-ethereum/internal/testrand"
|
2023-08-01 07:17:32 -05:00
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
|
|
|
)
|
|
|
|
|
|
|
|
// randomStateSet generates a random state change set.
|
2024-10-18 10:06:31 -05:00
|
|
|
func randomStateSet(n int) (map[common.Address][]byte, map[common.Address]map[common.Hash][]byte) {
|
2023-08-01 07:17:32 -05:00
|
|
|
var (
|
|
|
|
accounts = make(map[common.Address][]byte)
|
|
|
|
storages = make(map[common.Address]map[common.Hash][]byte)
|
|
|
|
)
|
|
|
|
for i := 0; i < n; i++ {
|
2024-03-18 21:50:08 -05:00
|
|
|
addr := testrand.Address()
|
2023-08-01 07:17:32 -05:00
|
|
|
storages[addr] = make(map[common.Hash][]byte)
|
|
|
|
for j := 0; j < 3; j++ {
|
2024-03-18 21:50:08 -05:00
|
|
|
v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
|
|
|
|
storages[addr][testrand.Hash()] = v
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
|
|
|
account := generateAccount(types.EmptyRootHash)
|
|
|
|
accounts[addr] = types.SlimAccountRLP(account)
|
|
|
|
}
|
2024-10-18 10:06:31 -05:00
|
|
|
return accounts, storages
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
|
|
|
|
all: implement state history v2 (#30107)
This pull request delivers the new version of the state history, where
the raw storage key is used instead of the hash.
Before the cancun fork, it's supported by protocol to destruct a
specific account and therefore, all the storage slot owned by it should
be wiped in the same transition.
Technically, storage wiping should be performed through storage
iteration, and only the storage key hash will be available for traversal
if the state snapshot is not available. Therefore, the storage key hash
is chosen as the identifier in the old version state history.
Fortunately, account self-destruction has been deprecated by the
protocol since the Cancun fork, and there are no empty accounts eligible
for deletion under EIP-158. Therefore, we can conclude that no storage
wiping should occur after the Cancun fork. In this case, it makes no
sense to keep using hash.
Besides, another big reason for making this change is the current format
state history is unusable if verkle is activated. Verkle tree has a
different key derivation scheme (merkle uses keccak256), the preimage of
key hash must be provided in order to make verkle rollback functional.
This pull request is a prerequisite for landing verkle.
Additionally, the raw storage key is more human-friendly for those who
want to manually check the history, even though Solidity already
performs some hashing to derive the storage location.
---
This pull request doesn't bump the database version, as I believe the
database should still be compatible if users degrade from the new geth
version to old one, the only side effect is the persistent new version
state history will be unusable.
---------
Co-authored-by: Zsolt Felfoldi <zsfelfoldi@gmail.com>
2025-01-16 19:59:02 -06:00
|
|
|
func makeHistory(rawStorageKey bool) *history {
|
2024-10-18 10:06:31 -05:00
|
|
|
accounts, storages := randomStateSet(3)
|
all: implement state history v2 (#30107)
This pull request delivers the new version of the state history, where
the raw storage key is used instead of the hash.
Before the cancun fork, it's supported by protocol to destruct a
specific account and therefore, all the storage slot owned by it should
be wiped in the same transition.
Technically, storage wiping should be performed through storage
iteration, and only the storage key hash will be available for traversal
if the state snapshot is not available. Therefore, the storage key hash
is chosen as the identifier in the old version state history.
Fortunately, account self-destruction has been deprecated by the
protocol since the Cancun fork, and there are no empty accounts eligible
for deletion under EIP-158. Therefore, we can conclude that no storage
wiping should occur after the Cancun fork. In this case, it makes no
sense to keep using hash.
Besides, another big reason for making this change is the current format
state history is unusable if verkle is activated. Verkle tree has a
different key derivation scheme (merkle uses keccak256), the preimage of
key hash must be provided in order to make verkle rollback functional.
This pull request is a prerequisite for landing verkle.
Additionally, the raw storage key is more human-friendly for those who
want to manually check the history, even though Solidity already
performs some hashing to derive the storage location.
---
This pull request doesn't bump the database version, as I believe the
database should still be compatible if users degrade from the new geth
version to old one, the only side effect is the persistent new version
state history will be unusable.
---------
Co-authored-by: Zsolt Felfoldi <zsfelfoldi@gmail.com>
2025-01-16 19:59:02 -06:00
|
|
|
return newHistory(testrand.Hash(), types.EmptyRootHash, 0, accounts, storages, rawStorageKey)
|
2023-08-01 07:17:32 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
func makeHistories(n int) []*history {
|
|
|
|
var (
|
|
|
|
parent = types.EmptyRootHash
|
|
|
|
result []*history
|
|
|
|
)
|
|
|
|
for i := 0; i < n; i++ {
|
2024-03-18 21:50:08 -05:00
|
|
|
root := testrand.Hash()
|
2024-10-18 10:06:31 -05:00
|
|
|
accounts, storages := randomStateSet(3)
|
all: implement state history v2 (#30107)
This pull request delivers the new version of the state history, where
the raw storage key is used instead of the hash.
Before the cancun fork, it's supported by protocol to destruct a
specific account and therefore, all the storage slot owned by it should
be wiped in the same transition.
Technically, storage wiping should be performed through storage
iteration, and only the storage key hash will be available for traversal
if the state snapshot is not available. Therefore, the storage key hash
is chosen as the identifier in the old version state history.
Fortunately, account self-destruction has been deprecated by the
protocol since the Cancun fork, and there are no empty accounts eligible
for deletion under EIP-158. Therefore, we can conclude that no storage
wiping should occur after the Cancun fork. In this case, it makes no
sense to keep using hash.
Besides, another big reason for making this change is the current format
state history is unusable if verkle is activated. Verkle tree has a
different key derivation scheme (merkle uses keccak256), the preimage of
key hash must be provided in order to make verkle rollback functional.
This pull request is a prerequisite for landing verkle.
Additionally, the raw storage key is more human-friendly for those who
want to manually check the history, even though Solidity already
performs some hashing to derive the storage location.
---
This pull request doesn't bump the database version, as I believe the
database should still be compatible if users degrade from the new geth
version to old one, the only side effect is the persistent new version
state history will be unusable.
---------
Co-authored-by: Zsolt Felfoldi <zsfelfoldi@gmail.com>
2025-01-16 19:59:02 -06:00
|
|
|
h := newHistory(root, parent, uint64(i), accounts, storages, false)
|
2023-08-01 07:17:32 -05:00
|
|
|
parent = root
|
|
|
|
result = append(result, h)
|
|
|
|
}
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEncodeDecodeHistory(t *testing.T) {
|
all: implement state history v2 (#30107)
This pull request delivers the new version of the state history, where
the raw storage key is used instead of the hash.
Before the cancun fork, it's supported by protocol to destruct a
specific account and therefore, all the storage slot owned by it should
be wiped in the same transition.
Technically, storage wiping should be performed through storage
iteration, and only the storage key hash will be available for traversal
if the state snapshot is not available. Therefore, the storage key hash
is chosen as the identifier in the old version state history.
Fortunately, account self-destruction has been deprecated by the
protocol since the Cancun fork, and there are no empty accounts eligible
for deletion under EIP-158. Therefore, we can conclude that no storage
wiping should occur after the Cancun fork. In this case, it makes no
sense to keep using hash.
Besides, another big reason for making this change is the current format
state history is unusable if verkle is activated. Verkle tree has a
different key derivation scheme (merkle uses keccak256), the preimage of
key hash must be provided in order to make verkle rollback functional.
This pull request is a prerequisite for landing verkle.
Additionally, the raw storage key is more human-friendly for those who
want to manually check the history, even though Solidity already
performs some hashing to derive the storage location.
---
This pull request doesn't bump the database version, as I believe the
database should still be compatible if users degrade from the new geth
version to old one, the only side effect is the persistent new version
state history will be unusable.
---------
Co-authored-by: Zsolt Felfoldi <zsfelfoldi@gmail.com>
2025-01-16 19:59:02 -06:00
|
|
|
testEncodeDecodeHistory(t, false)
|
|
|
|
testEncodeDecodeHistory(t, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func testEncodeDecodeHistory(t *testing.T, rawStorageKey bool) {
|
2023-08-01 07:17:32 -05:00
|
|
|
var (
|
|
|
|
m meta
|
|
|
|
dec history
|
all: implement state history v2 (#30107)
This pull request delivers the new version of the state history, where
the raw storage key is used instead of the hash.
Before the cancun fork, it's supported by protocol to destruct a
specific account and therefore, all the storage slot owned by it should
be wiped in the same transition.
Technically, storage wiping should be performed through storage
iteration, and only the storage key hash will be available for traversal
if the state snapshot is not available. Therefore, the storage key hash
is chosen as the identifier in the old version state history.
Fortunately, account self-destruction has been deprecated by the
protocol since the Cancun fork, and there are no empty accounts eligible
for deletion under EIP-158. Therefore, we can conclude that no storage
wiping should occur after the Cancun fork. In this case, it makes no
sense to keep using hash.
Besides, another big reason for making this change is the current format
state history is unusable if verkle is activated. Verkle tree has a
different key derivation scheme (merkle uses keccak256), the preimage of
key hash must be provided in order to make verkle rollback functional.
This pull request is a prerequisite for landing verkle.
Additionally, the raw storage key is more human-friendly for those who
want to manually check the history, even though Solidity already
performs some hashing to derive the storage location.
---
This pull request doesn't bump the database version, as I believe the
database should still be compatible if users degrade from the new geth
version to old one, the only side effect is the persistent new version
state history will be unusable.
---------
Co-authored-by: Zsolt Felfoldi <zsfelfoldi@gmail.com>
2025-01-16 19:59:02 -06:00
|
|
|
obj = makeHistory(rawStorageKey)
|
2023-08-01 07:17:32 -05:00
|
|
|
)
|
|
|
|
// check if meta data can be correctly encode/decode
|
|
|
|
blob := obj.meta.encode()
|
|
|
|
if err := m.decode(blob); err != nil {
|
|
|
|
t.Fatalf("Failed to decode %v", err)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(&m, obj.meta) {
|
|
|
|
t.Fatal("meta is mismatched")
|
|
|
|
}
|
|
|
|
|
|
|
|
// check if account/storage data can be correctly encode/decode
|
|
|
|
accountData, storageData, accountIndexes, storageIndexes := obj.encode()
|
|
|
|
if err := dec.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil {
|
|
|
|
t.Fatalf("Failed to decode, err: %v", err)
|
|
|
|
}
|
|
|
|
if !compareSet(dec.accounts, obj.accounts) {
|
|
|
|
t.Fatal("account data is mismatched")
|
|
|
|
}
|
|
|
|
if !compareStorages(dec.storages, obj.storages) {
|
|
|
|
t.Fatal("storage data is mismatched")
|
|
|
|
}
|
|
|
|
if !compareList(dec.accountList, obj.accountList) {
|
|
|
|
t.Fatal("account list is mismatched")
|
|
|
|
}
|
|
|
|
if !compareStorageList(dec.storageList, obj.storageList) {
|
|
|
|
t.Fatal("storage list is mismatched")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-30 04:33:22 -05:00
|
|
|
func checkHistory(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientReader, id uint64, root common.Hash, exist bool) {
|
2023-08-01 07:17:32 -05:00
|
|
|
blob := rawdb.ReadStateHistoryMeta(freezer, id)
|
|
|
|
if exist && len(blob) == 0 {
|
|
|
|
t.Fatalf("Failed to load trie history, %d", id)
|
|
|
|
}
|
|
|
|
if !exist && len(blob) != 0 {
|
|
|
|
t.Fatalf("Unexpected trie history, %d", id)
|
|
|
|
}
|
|
|
|
if exist && rawdb.ReadStateID(db, root) == nil {
|
|
|
|
t.Fatalf("Root->ID mapping is not found, %d", id)
|
|
|
|
}
|
|
|
|
if !exist && rawdb.ReadStateID(db, root) != nil {
|
|
|
|
t.Fatalf("Unexpected root->ID mapping, %d", id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-30 04:33:22 -05:00
|
|
|
func checkHistoriesInRange(t *testing.T, db ethdb.KeyValueReader, freezer ethdb.AncientReader, from, to uint64, roots []common.Hash, exist bool) {
|
2023-08-01 07:17:32 -05:00
|
|
|
for i, j := from, 0; i <= to; i, j = i+1, j+1 {
|
|
|
|
checkHistory(t, db, freezer, i, roots[j], exist)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTruncateHeadHistory(t *testing.T) {
|
|
|
|
var (
|
|
|
|
roots []common.Hash
|
|
|
|
hs = makeHistories(10)
|
|
|
|
db = rawdb.NewMemoryDatabase()
|
2024-07-16 08:17:58 -05:00
|
|
|
freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false, false)
|
2023-08-01 07:17:32 -05:00
|
|
|
)
|
|
|
|
defer freezer.Close()
|
|
|
|
|
|
|
|
for i := 0; i < len(hs); i++ {
|
|
|
|
accountData, storageData, accountIndex, storageIndex := hs[i].encode()
|
|
|
|
rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData)
|
|
|
|
rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1))
|
|
|
|
roots = append(roots, hs[i].meta.root)
|
|
|
|
}
|
|
|
|
for size := len(hs); size > 0; size-- {
|
|
|
|
pruned, err := truncateFromHead(db, freezer, uint64(size-1))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to truncate from head %v", err)
|
|
|
|
}
|
|
|
|
if pruned != 1 {
|
|
|
|
t.Error("Unexpected pruned items", "want", 1, "got", pruned)
|
|
|
|
}
|
|
|
|
checkHistoriesInRange(t, db, freezer, uint64(size), uint64(10), roots[size-1:], false)
|
|
|
|
checkHistoriesInRange(t, db, freezer, uint64(1), uint64(size-1), roots[:size-1], true)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTruncateTailHistory(t *testing.T) {
|
|
|
|
var (
|
|
|
|
roots []common.Hash
|
|
|
|
hs = makeHistories(10)
|
|
|
|
db = rawdb.NewMemoryDatabase()
|
2024-07-16 08:17:58 -05:00
|
|
|
freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false, false)
|
2023-08-01 07:17:32 -05:00
|
|
|
)
|
|
|
|
defer freezer.Close()
|
|
|
|
|
|
|
|
for i := 0; i < len(hs); i++ {
|
|
|
|
accountData, storageData, accountIndex, storageIndex := hs[i].encode()
|
|
|
|
rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData)
|
|
|
|
rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1))
|
|
|
|
roots = append(roots, hs[i].meta.root)
|
|
|
|
}
|
|
|
|
for newTail := 1; newTail < len(hs); newTail++ {
|
|
|
|
pruned, _ := truncateFromTail(db, freezer, uint64(newTail))
|
|
|
|
if pruned != 1 {
|
|
|
|
t.Error("Unexpected pruned items", "want", 1, "got", pruned)
|
|
|
|
}
|
|
|
|
checkHistoriesInRange(t, db, freezer, uint64(1), uint64(newTail), roots[:newTail], false)
|
|
|
|
checkHistoriesInRange(t, db, freezer, uint64(newTail+1), uint64(10), roots[newTail:], true)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTruncateTailHistories(t *testing.T) {
|
|
|
|
var cases = []struct {
|
|
|
|
limit uint64
|
|
|
|
expPruned int
|
|
|
|
maxPruned uint64
|
|
|
|
minUnpruned uint64
|
|
|
|
empty bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
1, 9, 9, 10, false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
0, 10, 10, 0 /* no meaning */, true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
10, 0, 0, 1, false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for i, c := range cases {
|
|
|
|
var (
|
|
|
|
roots []common.Hash
|
|
|
|
hs = makeHistories(10)
|
|
|
|
db = rawdb.NewMemoryDatabase()
|
2024-07-16 08:17:58 -05:00
|
|
|
freezer, _ = rawdb.NewStateFreezer(t.TempDir()+fmt.Sprintf("%d", i), false, false)
|
2023-08-01 07:17:32 -05:00
|
|
|
)
|
|
|
|
defer freezer.Close()
|
|
|
|
|
|
|
|
for i := 0; i < len(hs); i++ {
|
|
|
|
accountData, storageData, accountIndex, storageIndex := hs[i].encode()
|
|
|
|
rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData)
|
|
|
|
rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1))
|
|
|
|
roots = append(roots, hs[i].meta.root)
|
|
|
|
}
|
|
|
|
pruned, _ := truncateFromTail(db, freezer, uint64(10)-c.limit)
|
|
|
|
if pruned != c.expPruned {
|
|
|
|
t.Error("Unexpected pruned items", "want", c.expPruned, "got", pruned)
|
|
|
|
}
|
|
|
|
if c.empty {
|
|
|
|
checkHistoriesInRange(t, db, freezer, uint64(1), uint64(10), roots, false)
|
|
|
|
} else {
|
|
|
|
tail := 10 - int(c.limit)
|
|
|
|
checkHistoriesInRange(t, db, freezer, uint64(1), c.maxPruned, roots[:tail], false)
|
|
|
|
checkHistoriesInRange(t, db, freezer, c.minUnpruned, uint64(10), roots[tail:], true)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-23 08:46:39 -05:00
|
|
|
func TestTruncateOutOfRange(t *testing.T) {
|
|
|
|
var (
|
|
|
|
hs = makeHistories(10)
|
|
|
|
db = rawdb.NewMemoryDatabase()
|
2024-07-16 08:17:58 -05:00
|
|
|
freezer, _ = rawdb.NewStateFreezer(t.TempDir(), false, false)
|
2023-10-23 08:46:39 -05:00
|
|
|
)
|
|
|
|
defer freezer.Close()
|
|
|
|
|
|
|
|
for i := 0; i < len(hs); i++ {
|
|
|
|
accountData, storageData, accountIndex, storageIndex := hs[i].encode()
|
|
|
|
rawdb.WriteStateHistory(freezer, uint64(i+1), hs[i].meta.encode(), accountIndex, storageIndex, accountData, storageData)
|
|
|
|
rawdb.WriteStateID(db, hs[i].meta.root, uint64(i+1))
|
|
|
|
}
|
|
|
|
truncateFromTail(db, freezer, uint64(len(hs)/2))
|
|
|
|
|
|
|
|
// Ensure of-out-range truncations are rejected correctly.
|
|
|
|
head, _ := freezer.Ancients()
|
|
|
|
tail, _ := freezer.Tail()
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
mode int
|
|
|
|
target uint64
|
|
|
|
expErr error
|
|
|
|
}{
|
|
|
|
{0, head, nil}, // nothing to delete
|
|
|
|
{0, head + 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, head+1)},
|
|
|
|
{0, tail - 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, tail-1)},
|
|
|
|
{1, tail, nil}, // nothing to delete
|
|
|
|
{1, head + 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, head+1)},
|
|
|
|
{1, tail - 1, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", tail, head, tail-1)},
|
|
|
|
}
|
|
|
|
for _, c := range cases {
|
|
|
|
var gotErr error
|
|
|
|
if c.mode == 0 {
|
|
|
|
_, gotErr = truncateFromHead(db, freezer, c.target)
|
|
|
|
} else {
|
|
|
|
_, gotErr = truncateFromTail(db, freezer, c.target)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(gotErr, c.expErr) {
|
|
|
|
t.Errorf("Unexpected error, want: %v, got: %v", c.expErr, gotErr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-01 07:17:32 -05:00
|
|
|
func compareSet[k comparable](a, b map[k][]byte) bool {
|
|
|
|
if len(a) != len(b) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for key, valA := range a {
|
|
|
|
valB, ok := b[key]
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if !bytes.Equal(valA, valB) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func compareList[k comparable](a, b []k) bool {
|
|
|
|
if len(a) != len(b) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for i := 0; i < len(a); i++ {
|
|
|
|
if a[i] != b[i] {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func compareStorages(a, b map[common.Address]map[common.Hash][]byte) bool {
|
|
|
|
if len(a) != len(b) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for h, subA := range a {
|
|
|
|
subB, ok := b[h]
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if !compareSet(subA, subB) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func compareStorageList(a, b map[common.Address][]common.Hash) bool {
|
|
|
|
if len(a) != len(b) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for h, la := range a {
|
|
|
|
lb, ok := b[h]
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if !compareList(la, lb) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|