trie/trienode: avoid unnecessary copy (#30019)

* avoid unnecessary copy

* delete the never used function ProofList

* eth/protocols/snap, trie/trienode: polish the code

---------

Co-authored-by: Gary Rong <garyrong0905@gmail.com>
This commit is contained in:
maskpp 2024-06-20 11:47:29 +08:00 committed by GitHub
parent 27008408a5
commit 00675c5876
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 13 additions and 30 deletions

View File

@ -332,11 +332,7 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac
return nil, nil
}
}
var proofs [][]byte
for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
return accounts, proofs
return accounts, proof.List()
}
func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesPacket) ([][]*StorageData, [][]byte) {
@ -438,9 +434,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP
return nil, nil
}
}
for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
proofs = append(proofs, proof.List()...)
// Proof terminates the reply as proofs are only added if a node
// refuses to serve more data (exception when a contract fetch is
// finishing, but that's that).

View File

@ -286,10 +286,7 @@ func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.H
t.logger.Error("Could not prove last item", "error", err)
}
}
for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
return keys, vals, proofs
return keys, vals, proof.List()
}
// defaultStorageRequestHandler is a well-behaving storage request handler
@ -371,9 +368,7 @@ func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []comm
t.logger.Error("Could not prove last item", "error", err)
}
}
for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
proofs = append(proofs, proof.List()...)
break
}
}
@ -430,9 +425,7 @@ func createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, acco
t.logger.Error("Could not prove last item", "error", err)
}
}
for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
proofs = append(proofs, proof.List()...)
break
}
}
@ -586,9 +579,8 @@ func testSyncBloatedProof(t *testing.T, scheme string) {
source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {
var (
proofs [][]byte
keys []common.Hash
vals [][]byte
keys []common.Hash
vals [][]byte
)
// The values
for _, entry := range t.accountValues {
@ -618,10 +610,7 @@ func testSyncBloatedProof(t *testing.T, scheme string) {
keys = append(keys[:1], keys[2:]...)
vals = append(vals[:1], vals[2:]...)
}
for _, blob := range proof.List() {
proofs = append(proofs, blob)
}
if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {
if err := t.remote.OnAccounts(t, requestId, keys, vals, proof.List()); err != nil {
t.logger.Info("remote error on delivery (as expected)", "error", err)
t.term()
// This is actually correct, signal to exit the test successfully

View File

@ -102,14 +102,14 @@ func (db *ProofSet) DataSize() int {
return db.dataSize
}
// List converts the node set to a ProofList
func (db *ProofSet) List() ProofList {
// List converts the node set to a slice of bytes.
func (db *ProofSet) List() [][]byte {
db.lock.RLock()
defer db.lock.RUnlock()
var values ProofList
for _, key := range db.order {
values = append(values, db.nodes[key])
values := make([][]byte, len(db.order))
for i, key := range db.order {
values[i] = db.nodes[key]
}
return values
}