all: replace slice manipulation with slices package for improved readability
This commit is contained in:
parent
31c972febf
commit
0b28df8e7b
|
@ -136,7 +136,7 @@ func (ac *accountCache) deleteByFile(path string) {
|
|||
|
||||
if i < len(ac.all) && ac.all[i].URL.Path == path {
|
||||
removed := ac.all[i]
|
||||
ac.all = append(ac.all[:i], ac.all[i+1:]...)
|
||||
ac.all = slices.Delete(ac.all, i, i+1)
|
||||
if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 {
|
||||
delete(ac.byAddr, removed.Address)
|
||||
} else {
|
||||
|
@ -156,7 +156,7 @@ func (ac *accountCache) watcherStarted() bool {
|
|||
func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account {
|
||||
for i := range slice {
|
||||
if slice[i] == elem {
|
||||
return append(slice[:i], slice[i+1:]...)
|
||||
return slices.Delete(slice, i, i+1)
|
||||
}
|
||||
}
|
||||
return slice
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// managerSubBufferSize determines how many incoming wallet events
|
||||
|
@ -259,7 +260,7 @@ func drop(slice []Wallet, wallets ...Wallet) []Wallet {
|
|||
// Wallet not found, may happen during startup
|
||||
continue
|
||||
}
|
||||
slice = append(slice[:n], slice[n+1:]...)
|
||||
slice = slices.Delete(slice, n, n+1)
|
||||
}
|
||||
return slice
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/log"
|
||||
pcsc "github.com/gballet/go-libpcsclite"
|
||||
"github.com/status-im/keycard-go/derivationpath"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// ErrPairingPasswordNeeded is returned if opening the smart card requires pairing with a pairing
|
||||
|
@ -478,8 +479,8 @@ func (w *Wallet) selfDerive() {
|
|||
paths []accounts.DerivationPath
|
||||
nextAcc accounts.Account
|
||||
|
||||
nextPaths = append([]accounts.DerivationPath{}, w.deriveNextPaths...)
|
||||
nextAddrs = append([]common.Address{}, w.deriveNextAddrs...)
|
||||
nextPaths = slices.Clone(w.deriveNextPaths)
|
||||
nextAddrs = slices.Clone(w.deriveNextAddrs)
|
||||
|
||||
context = context.Background()
|
||||
)
|
||||
|
|
|
@ -32,6 +32,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/karalabe/hid"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// Maximum time between wallet health checks to detect USB unplugs.
|
||||
|
@ -341,8 +342,8 @@ func (w *wallet) selfDerive() {
|
|||
accs []accounts.Account
|
||||
paths []accounts.DerivationPath
|
||||
|
||||
nextPaths = append([]accounts.DerivationPath{}, w.deriveNextPaths...)
|
||||
nextAddrs = append([]common.Address{}, w.deriveNextAddrs...)
|
||||
nextPaths = slices.Clone(w.deriveNextPaths)
|
||||
nextAddrs = slices.Clone(w.deriveNextAddrs)
|
||||
|
||||
context = context.Background()
|
||||
)
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// canonicalStore stores instances of the given type in a database and caches
|
||||
|
@ -69,7 +70,7 @@ func newCanonicalStore[T any](db ethdb.Iteratee, keyPrefix []byte) (*canonicalSt
|
|||
|
||||
// databaseKey returns the database key belonging to the given period.
|
||||
func (cs *canonicalStore[T]) databaseKey(period uint64) []byte {
|
||||
return binary.BigEndian.AppendUint64(append([]byte{}, cs.keyPrefix...), period)
|
||||
return binary.BigEndian.AppendUint64(slices.Clone(cs.keyPrefix), period)
|
||||
}
|
||||
|
||||
// add adds the given item to the database. It also ensures that the range remains
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/holiman/uint256"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// Suite represents a structure used to test a node's conformance
|
||||
|
@ -837,9 +838,9 @@ func (s *Suite) TestBlobViolations(t *utesting.T) {
|
|||
func mangleSidecar(tx *types.Transaction) *types.Transaction {
|
||||
sidecar := tx.BlobTxSidecar()
|
||||
copy := types.BlobTxSidecar{
|
||||
Blobs: append([]kzg4844.Blob{}, sidecar.Blobs...),
|
||||
Commitments: append([]kzg4844.Commitment{}, sidecar.Commitments...),
|
||||
Proofs: append([]kzg4844.Proof{}, sidecar.Proofs...),
|
||||
Blobs: slices.Clone(sidecar.Blobs),
|
||||
Commitments: slices.Clone(sidecar.Commitments),
|
||||
Proofs: slices.Clone(sidecar.Proofs),
|
||||
}
|
||||
// zero the first commitment to alter the sidecar hash
|
||||
copy.Commitments[0] = kzg4844.Commitment{}
|
||||
|
|
|
@ -221,7 +221,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
|
|||
snap.uncast(vote.Address, vote.Authorize)
|
||||
|
||||
// Uncast the vote from the chronological list
|
||||
snap.Votes = append(snap.Votes[:i], snap.Votes[i+1:]...)
|
||||
snap.Votes = slices.Delete(snap.Votes, i, i+1)
|
||||
break // only one vote allowed
|
||||
}
|
||||
}
|
||||
|
@ -261,7 +261,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
|
|||
snap.uncast(snap.Votes[i].Address, snap.Votes[i].Authorize)
|
||||
|
||||
// Uncast the vote from the chronological list
|
||||
snap.Votes = append(snap.Votes[:i], snap.Votes[i+1:]...)
|
||||
snap.Votes = slices.Delete(snap.Votes, i, i+1)
|
||||
|
||||
i--
|
||||
}
|
||||
|
@ -270,7 +270,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
|
|||
// Discard any previous votes around the just changed account
|
||||
for i := 0; i < len(snap.Votes); i++ {
|
||||
if snap.Votes[i].Address == header.Coinbase {
|
||||
snap.Votes = append(snap.Votes[:i], snap.Votes[i+1:]...)
|
||||
snap.Votes = slices.Delete(snap.Votes, i, i+1)
|
||||
i--
|
||||
}
|
||||
}
|
||||
|
|
|
@ -135,7 +135,7 @@ func newFilter(config *params.ChainConfig, genesis *types.Block, headfn func() (
|
|||
// Calculate the all the valid fork hash and fork next combos
|
||||
var (
|
||||
forksByBlock, forksByTime = gatherForks(config, genesis.Time())
|
||||
forks = append(append([]uint64{}, forksByBlock...), forksByTime...)
|
||||
forks = slices.Concat(forksByBlock, forksByTime)
|
||||
sums = make([][4]byte, len(forks)+1) // 0th is the genesis
|
||||
)
|
||||
hash := crc32.ChecksumIEEE(genesis.Hash().Bytes())
|
||||
|
@ -275,13 +275,13 @@ func gatherForks(config *params.ChainConfig, genesis uint64) ([]uint64, []uint64
|
|||
// Deduplicate fork identifiers applying multiple forks
|
||||
for i := 1; i < len(forksByBlock); i++ {
|
||||
if forksByBlock[i] == forksByBlock[i-1] {
|
||||
forksByBlock = append(forksByBlock[:i], forksByBlock[i+1:]...)
|
||||
forksByBlock = slices.Delete(forksByBlock, i, i+1)
|
||||
i--
|
||||
}
|
||||
}
|
||||
for i := 1; i < len(forksByTime); i++ {
|
||||
if forksByTime[i] == forksByTime[i-1] {
|
||||
forksByTime = append(forksByTime[:i], forksByTime[i+1:]...)
|
||||
forksByTime = slices.Delete(forksByTime, i, i+1)
|
||||
i--
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"slices"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -258,7 +259,7 @@ func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
|
|||
continue
|
||||
}
|
||||
if _, ok := drop[child.ParentHash]; !ok {
|
||||
children = append(children[:i], children[i+1:]...)
|
||||
children = slices.Delete(children, i, i+1)
|
||||
i--
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -216,7 +216,7 @@ func (fi *fastIterator) next(idx int) bool {
|
|||
if it := fi.iterators[idx].it; !it.Next() {
|
||||
it.Release()
|
||||
|
||||
fi.iterators = append(fi.iterators[:idx], fi.iterators[idx+1:]...)
|
||||
fi.iterators = slices.Delete(fi.iterators, idx, idx+1)
|
||||
return len(fi.iterators) > 0
|
||||
}
|
||||
// If there's no one left to cascade into, return
|
||||
|
|
|
@ -44,6 +44,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/holiman/billy"
|
||||
"github.com/holiman/uint256"
|
||||
"slices"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -642,7 +643,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||
if err := p.store.Delete(id); err != nil {
|
||||
log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err)
|
||||
}
|
||||
txs = append(txs[:i], txs[i+1:]...)
|
||||
txs = slices.Delete(txs, i, i+1)
|
||||
p.index[addr] = txs
|
||||
|
||||
i--
|
||||
|
|
|
@ -1811,7 +1811,7 @@ func (t *lookup) removeAuthorities(tx *types.Transaction) {
|
|||
list := t.auths[addr]
|
||||
// Remove tx from tracker.
|
||||
if i := slices.Index(list, hash); i >= 0 {
|
||||
list = append(list[:i], list[i+1:]...)
|
||||
list = slices.Delete(list, i, i+1)
|
||||
} else {
|
||||
log.Error("Authority with untracked tx", "addr", addr, "hash", hash)
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/holiman/uint256"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// BlobTx represents an EIP-4844 transaction.
|
||||
|
@ -158,9 +159,9 @@ func (tx *BlobTx) copy() TxData {
|
|||
}
|
||||
if tx.Sidecar != nil {
|
||||
cpy.Sidecar = &BlobTxSidecar{
|
||||
Blobs: append([]kzg4844.Blob(nil), tx.Sidecar.Blobs...),
|
||||
Commitments: append([]kzg4844.Commitment(nil), tx.Sidecar.Commitments...),
|
||||
Proofs: append([]kzg4844.Proof(nil), tx.Sidecar.Proofs...),
|
||||
Blobs: slices.Clone(tx.Sidecar.Blobs),
|
||||
Commitments: slices.Clone(tx.Sidecar.Commitments),
|
||||
Proofs: slices.Clone(tx.Sidecar.Proofs),
|
||||
}
|
||||
}
|
||||
return cpy
|
||||
|
|
|
@ -38,6 +38,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// downloadTester is a test simulator for mocking out local block chain.
|
||||
|
@ -228,8 +229,8 @@ func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *et
|
|||
for i, body := range bodies {
|
||||
hash := types.DeriveSha(types.Transactions(body.Transactions), hasher)
|
||||
if _, ok := dlp.withholdBodies[hash]; ok {
|
||||
txsHashes = append(txsHashes[:i], txsHashes[i+1:]...)
|
||||
uncleHashes = append(uncleHashes[:i], uncleHashes[i+1:]...)
|
||||
txsHashes = slices.Delete(txsHashes, i, i+1)
|
||||
uncleHashes = slices.Delete(uncleHashes, i, i+1)
|
||||
continue
|
||||
}
|
||||
txsHashes[i] = hash
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// scratchHeaders is the number of headers to store in a scratch space to allow
|
||||
|
@ -1070,7 +1071,7 @@ func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged boo
|
|||
if s.progress.Subchains[1].Tail >= s.progress.Subchains[0].Tail {
|
||||
// Fully overwritten, get rid of the subchain as a whole
|
||||
log.Debug("Previous subchain fully overwritten", "head", head, "tail", tail, "next", next)
|
||||
s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...)
|
||||
s.progress.Subchains = slices.Delete(s.progress.Subchains, 1, 2)
|
||||
continue
|
||||
} else {
|
||||
// Partially overwritten, trim the head to the overwritten size
|
||||
|
@ -1084,7 +1085,7 @@ func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged boo
|
|||
s.progress.Subchains[0].Tail = s.progress.Subchains[1].Tail
|
||||
s.progress.Subchains[0].Next = s.progress.Subchains[1].Next
|
||||
|
||||
s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...)
|
||||
s.progress.Subchains = slices.Delete(s.progress.Subchains, 1, 2)
|
||||
merged = true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// hookedBackfiller is a tester backfiller with all interface methods mocked and
|
||||
|
@ -604,7 +605,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
|
|||
{
|
||||
head: chain[requestHeaders+100],
|
||||
peers: []*skeletonTestPeer{
|
||||
newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:99]...), nil), chain[100:]...)),
|
||||
newSkeletonTestPeer("header-skipper", append(append(slices.Clone(chain[:99]), nil), chain[100:]...)),
|
||||
},
|
||||
mid: skeletonExpect{
|
||||
state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
|
||||
|
@ -627,7 +628,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
|
|||
{
|
||||
head: chain[requestHeaders+100],
|
||||
peers: []*skeletonTestPeer{
|
||||
newSkeletonTestPeer("header-skipper", append(append(append([]*types.Header{}, chain[:50]...), nil), chain[51:]...)),
|
||||
newSkeletonTestPeer("header-skipper", append(append(slices.Clone(chain[:50]), nil), chain[51:]...)),
|
||||
},
|
||||
mid: skeletonExpect{
|
||||
state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
|
||||
|
@ -650,7 +651,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
|
|||
{
|
||||
head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
|
||||
peers: []*skeletonTestPeer{
|
||||
newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:99]...), chain[98]), chain[100:]...)),
|
||||
newSkeletonTestPeer("header-duper", append(append(slices.Clone(chain[:99]), chain[98]), chain[100:]...)),
|
||||
},
|
||||
mid: skeletonExpect{
|
||||
state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
|
||||
|
@ -673,7 +674,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
|
|||
{
|
||||
head: chain[requestHeaders+100], // We want to force the 100th header to be a request boundary
|
||||
peers: []*skeletonTestPeer{
|
||||
newSkeletonTestPeer("header-duper", append(append(append([]*types.Header{}, chain[:50]...), chain[49]), chain[51:]...)),
|
||||
newSkeletonTestPeer("header-duper", append(append(slices.Clone(chain[:50]), chain[49]), chain[51:]...)),
|
||||
},
|
||||
mid: skeletonExpect{
|
||||
state: []*subchain{{Head: requestHeaders + 100, Tail: 100}},
|
||||
|
@ -699,7 +700,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
|
|||
newSkeletonTestPeer("header-changer",
|
||||
append(
|
||||
append(
|
||||
append([]*types.Header{}, chain[:99]...),
|
||||
slices.Clone(chain[:99]),
|
||||
&types.Header{
|
||||
ParentHash: chain[98].Hash(),
|
||||
Number: big.NewInt(int64(99)),
|
||||
|
@ -733,7 +734,7 @@ func TestSkeletonSyncRetrievals(t *testing.T) {
|
|||
newSkeletonTestPeer("header-changer",
|
||||
append(
|
||||
append(
|
||||
append([]*types.Header{}, chain[:50]...),
|
||||
slices.Clone(chain[:50]),
|
||||
&types.Header{
|
||||
ParentHash: chain[49].Hash(),
|
||||
Number: big.NewInt(int64(50)),
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/triedb"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// Test chain parameters.
|
||||
|
@ -150,7 +151,7 @@ func (tc *testChain) copy(newlen int) *testChain {
|
|||
newlen = len(tc.blocks)
|
||||
}
|
||||
cpy := &testChain{
|
||||
blocks: append([]*types.Block{}, tc.blocks[:newlen]...),
|
||||
blocks: slices.Clone(tc.blocks[:newlen]),
|
||||
}
|
||||
return cpy
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ func (t *pathTrie) onTrieNode(path []byte, hash common.Hash, blob []byte) {
|
|||
// Memorize the path of first committed node, which is regarded
|
||||
// as left boundary. Deep-copy is necessary as the path given
|
||||
// is volatile.
|
||||
t.first = append([]byte{}, path...)
|
||||
t.first = bytes.Clone(path)
|
||||
|
||||
// The left boundary can be uniquely determined by the first committed node
|
||||
// from stackTrie (e.g., N_1), as the shared path prefix between the first
|
||||
|
@ -146,7 +146,7 @@ func (t *pathTrie) onTrieNode(path []byte, hash common.Hash, blob []byte) {
|
|||
|
||||
// Update the last flag. Deep-copy is necessary as the provided path is volatile.
|
||||
if t.last == nil {
|
||||
t.last = append([]byte{}, path...)
|
||||
t.last = bytes.Clone(path)
|
||||
} else {
|
||||
t.last = append(t.last[:0], path...)
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
"github.com/ethereum/go-ethereum/trie/trienode"
|
||||
"slices"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -943,7 +944,7 @@ func (s *Syncer) cleanAccountTasks() {
|
|||
// Sync wasn't finished previously, check for any task that can be finalized
|
||||
for i := 0; i < len(s.tasks); i++ {
|
||||
if s.tasks[i].done {
|
||||
s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
|
||||
s.tasks = slices.Delete(s.tasks, i, i+1)
|
||||
i--
|
||||
}
|
||||
}
|
||||
|
@ -966,7 +967,7 @@ func (s *Syncer) cleanStorageTasks() {
|
|||
// Remove storage range retrieval tasks that completed
|
||||
for j := 0; j < len(subtasks); j++ {
|
||||
if subtasks[j].done {
|
||||
subtasks = append(subtasks[:j], subtasks[j+1:]...)
|
||||
subtasks = slices.Delete(subtasks, j, j+1)
|
||||
j--
|
||||
}
|
||||
}
|
||||
|
|
|
@ -607,8 +607,8 @@ func testSyncBloatedProof(t *testing.T, scheme string) {
|
|||
}
|
||||
// And remove one item from the elements
|
||||
if len(keys) > 2 {
|
||||
keys = append(keys[:1], keys[2:]...)
|
||||
vals = append(vals[:1], vals[2:]...)
|
||||
keys = slices.Delete(keys, 1, 2)
|
||||
vals = slices.Delete(vals, 1, 2)
|
||||
}
|
||||
if err := t.remote.OnAccounts(t, requestId, keys, vals, proof.List()); err != nil {
|
||||
t.logger.Info("remote error on delivery (as expected)", "error", err)
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/eth/tracers"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"slices"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -75,12 +76,7 @@ func newFourByteTracer(ctx *tracers.Context, cfg json.RawMessage, chainConfig *p
|
|||
|
||||
// isPrecompiled returns whether the addr is a precompile. Logic borrowed from newJsTracer in eth/tracers/js/tracer.go
|
||||
func (t *fourByteTracer) isPrecompiled(addr common.Address) bool {
|
||||
for _, p := range t.activePrecompiles {
|
||||
if p == addr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(t.activePrecompiles, addr)
|
||||
}
|
||||
|
||||
// store saves the given identifier and datasize.
|
||||
|
|
|
@ -19,6 +19,7 @@ package event
|
|||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
)
|
||||
|
||||
|
@ -211,7 +212,7 @@ func (cs caseList) find(channel interface{}) int {
|
|||
|
||||
// delete removes the given case from cs.
|
||||
func (cs caseList) delete(index int) caseList {
|
||||
return append(cs[:index], cs[index+1:]...)
|
||||
return slices.Delete(cs, index, index+1)
|
||||
}
|
||||
|
||||
// deactivate moves the case at index into the non-accessible portion of the cs slice.
|
||||
|
|
|
@ -182,7 +182,7 @@ func (t *pingRecorder) waitPing(timeout time.Duration) *enode.Node {
|
|||
}
|
||||
if len(t.pinged) > 0 {
|
||||
n := t.pinged[0]
|
||||
t.pinged = append(t.pinged[:0], t.pinged[1:]...)
|
||||
t.pinged = slices.Delete(t.pinged, 0, 1)
|
||||
return n
|
||||
}
|
||||
t.cond.Wait()
|
||||
|
|
|
@ -37,6 +37,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/p2p/discover/v4wire"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// shared test variables
|
||||
|
@ -348,7 +349,7 @@ func TestUDPv4_findnodeMultiReply(t *testing.T) {
|
|||
// check that the sent neighbors are all returned by findnode
|
||||
select {
|
||||
case result := <-resultc:
|
||||
want := append(list[:2], list[3:]...)
|
||||
want := slices.Delete(list, 2, 3)
|
||||
if !reflect.DeepEqual(result, want) {
|
||||
t.Errorf("neighbors mismatch:\n got: %v\n want: %v", result, want)
|
||||
}
|
||||
|
|
|
@ -182,10 +182,8 @@ func (p *Peer) Caps() []Cap {
|
|||
// versions is supported by both this node and the peer p.
|
||||
func (p *Peer) RunningCap(protocol string, versions []uint) bool {
|
||||
if proto, ok := p.running[protocol]; ok {
|
||||
for _, ver := range versions {
|
||||
if proto.Version == ver {
|
||||
return true
|
||||
}
|
||||
if slices.Contains(versions, proto.Version) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -351,10 +352,8 @@ func (s *Server) validateRequest(r *http.Request) (int, error) {
|
|||
}
|
||||
// Check content-type
|
||||
if mt, _, err := mime.ParseMediaType(r.Header.Get("content-type")); err == nil {
|
||||
for _, accepted := range acceptedContentTypes {
|
||||
if accepted == mt {
|
||||
return 0, nil
|
||||
}
|
||||
if slices.Contains(acceptedContentTypes, mt) {
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
// Invalid content-type
|
||||
|
|
|
@ -141,8 +141,8 @@ func (f *fuzzer) fuzz() int {
|
|||
case 2:
|
||||
// Gapped entry slice
|
||||
index = index % len(keys)
|
||||
keys = append(keys[:index], keys[index+1:]...)
|
||||
vals = append(vals[:index], vals[index+1:]...)
|
||||
keys = slices.Delete(keys, index, index+1)
|
||||
vals = slices.Delete(vals, index, index+1)
|
||||
case 3:
|
||||
// Out of order
|
||||
index1 := index % len(keys)
|
||||
|
|
|
@ -490,8 +490,8 @@ func TestBadRangeProof(t *testing.T) {
|
|||
if (index == 0 && start < 100) || (index == end-start-1) {
|
||||
continue
|
||||
}
|
||||
keys = append(keys[:index], keys[index+1:]...)
|
||||
vals = append(vals[:index], vals[index+1:]...)
|
||||
keys = slices.Delete(keys, index, index+1)
|
||||
vals = slices.Delete(vals, index, index+1)
|
||||
case 3:
|
||||
// Out of order
|
||||
index1 := mrand.Intn(end - start)
|
||||
|
@ -741,7 +741,7 @@ func TestEmptyValueRangeProof(t *testing.T) {
|
|||
}
|
||||
}
|
||||
noop := &kv{key, []byte{}, false}
|
||||
entries = append(append(append([]*kv{}, entries[:mid]...), noop), entries[mid:]...)
|
||||
entries = append(append(slices.Clone(entries[:mid]), noop), entries[mid:]...)
|
||||
|
||||
start, end := 1, len(entries)-1
|
||||
|
||||
|
@ -785,7 +785,7 @@ func TestAllElementsEmptyValueRangeProof(t *testing.T) {
|
|||
}
|
||||
}
|
||||
noop := &kv{key, []byte{}, false}
|
||||
entries = append(append(append([]*kv{}, entries[:mid]...), noop), entries[mid:]...)
|
||||
entries = append(append(slices.Clone(entries[:mid]), noop), entries[mid:]...)
|
||||
|
||||
var keys [][]byte
|
||||
var vals [][]byte
|
||||
|
|
|
@ -84,7 +84,7 @@ func (t *StackTrie) Update(key, value []byte) error {
|
|||
return errors.New("non-ascending key order")
|
||||
}
|
||||
if t.last == nil {
|
||||
t.last = append([]byte{}, k...) // allocate key slice
|
||||
t.last = bytes.Clone(k) // allocate key slice
|
||||
} else {
|
||||
t.last = append(t.last[:0], k...) // reuse key slice
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// ErrNotRequested is returned by the trie sync when it's requested to process a
|
||||
|
@ -553,7 +554,7 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
|
|||
}
|
||||
children = []childNode{{
|
||||
node: node.Val,
|
||||
path: append(append([]byte(nil), req.path...), key...),
|
||||
path: slices.Concat(req.path, key),
|
||||
}}
|
||||
// Mark all internal nodes between shortNode and its **in disk**
|
||||
// child as invalid. This is essential in the case of path mode
|
||||
|
@ -595,7 +596,7 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
|
|||
if node.Children[i] != nil {
|
||||
children = append(children, childNode{
|
||||
node: node.Children[i],
|
||||
path: append(append([]byte(nil), req.path...), byte(i)),
|
||||
path: append(slices.Clone(req.path), byte(i)),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -260,7 +260,7 @@ func (fi *fastIterator) next(idx int) bool {
|
|||
if it := fi.iterators[idx].it; !it.Next() {
|
||||
it.Release()
|
||||
|
||||
fi.iterators = append(fi.iterators[:idx], fi.iterators[idx+1:]...)
|
||||
fi.iterators = slices.Delete(fi.iterators, idx, idx+1)
|
||||
return len(fi.iterators) > 0
|
||||
}
|
||||
// If there's no one left to cascade into, return
|
||||
|
|
Loading…
Reference in New Issue