all: simplify length calculations using min/max functions

This commit is contained in:
islishude 2025-03-01 23:27:55 +08:00
parent 31c972febf
commit f18fcf4776
33 changed files with 59 additions and 205 deletions

View File

@ -292,10 +292,10 @@ func (s *CommitteeChain) deleteFixedCommitteeRootsFrom(period uint64) error {
// get unfixed but are still proven by the update chain. If there were // get unfixed but are still proven by the update chain. If there were
// committees present after the range proven by updates, those should be // committees present after the range proven by updates, those should be
// removed if the belonging fixed roots are also removed. // removed if the belonging fixed roots are also removed.
fromPeriod := s.updates.periods.End + 1 // not proven by updates fromPeriod := max(
if period > fromPeriod { s.updates.periods.End+1, // not proven by updates
fromPeriod = period // also not justified by fixed roots period, // also not justified by fixed roots
} )
s.deleteCommitteesFrom(batch, fromPeriod) s.deleteCommitteesFrom(batch, fromPeriod)
} }
if err := batch.Write(); err != nil { if err := batch.Write(); err != nil {

View File

@ -415,10 +415,7 @@ func isSubdomain(name, domain string) bool {
func splitTXT(value string) string { func splitTXT(value string) string {
var result strings.Builder var result strings.Builder
for len(value) > 0 { for len(value) > 0 {
rlen := len(value) rlen := min(len(value), 253)
if rlen > 253 {
rlen = 253
}
result.WriteString(strconv.Quote(value[:rlen])) result.WriteString(strconv.Quote(value[:rlen]))
value = value[rlen:] value = value[rlen:]
} }

View File

@ -27,10 +27,7 @@ func XORBytes(dst, a, b []byte) int {
// fastXORBytes xors in bulk. It only works on architectures that support // fastXORBytes xors in bulk. It only works on architectures that support
// unaligned read/writes. // unaligned read/writes.
func fastXORBytes(dst, a, b []byte) int { func fastXORBytes(dst, a, b []byte) int {
n := len(a) n := min(len(b), len(a))
if len(b) < n {
n = len(b)
}
w := n / wordSize w := n / wordSize
if w > 0 { if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst)) dw := *(*[]uintptr)(unsafe.Pointer(&dst))
@ -49,10 +46,7 @@ func fastXORBytes(dst, a, b []byte) int {
// safeXORBytes xors one by one. It works on all architectures, independent if // safeXORBytes xors one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not. // it supports unaligned read/writes or not.
func safeXORBytes(dst, a, b []byte) int { func safeXORBytes(dst, a, b []byte) int {
n := len(a) n := min(len(b), len(a))
if len(b) < n {
n = len(b)
}
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
dst[i] = a[i] ^ b[i] dst[i] = a[i] ^ b[i]
} }
@ -71,10 +65,7 @@ func ANDBytes(dst, a, b []byte) int {
// fastANDBytes ands in bulk. It only works on architectures that support // fastANDBytes ands in bulk. It only works on architectures that support
// unaligned read/writes. // unaligned read/writes.
func fastANDBytes(dst, a, b []byte) int { func fastANDBytes(dst, a, b []byte) int {
n := len(a) n := min(len(b), len(a))
if len(b) < n {
n = len(b)
}
w := n / wordSize w := n / wordSize
if w > 0 { if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst)) dw := *(*[]uintptr)(unsafe.Pointer(&dst))
@ -93,10 +84,7 @@ func fastANDBytes(dst, a, b []byte) int {
// safeANDBytes ands one by one. It works on all architectures, independent if // safeANDBytes ands one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not. // it supports unaligned read/writes or not.
func safeANDBytes(dst, a, b []byte) int { func safeANDBytes(dst, a, b []byte) int {
n := len(a) n := min(len(b), len(a))
if len(b) < n {
n = len(b)
}
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
dst[i] = a[i] & b[i] dst[i] = a[i] & b[i]
} }
@ -115,10 +103,7 @@ func ORBytes(dst, a, b []byte) int {
// fastORBytes ors in bulk. It only works on architectures that support // fastORBytes ors in bulk. It only works on architectures that support
// unaligned read/writes. // unaligned read/writes.
func fastORBytes(dst, a, b []byte) int { func fastORBytes(dst, a, b []byte) int {
n := len(a) n := min(len(b), len(a))
if len(b) < n {
n = len(b)
}
w := n / wordSize w := n / wordSize
if w > 0 { if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst)) dw := *(*[]uintptr)(unsafe.Pointer(&dst))
@ -137,10 +122,7 @@ func fastORBytes(dst, a, b []byte) int {
// safeORBytes ors one by one. It works on all architectures, independent if // safeORBytes ors one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not. // it supports unaligned read/writes or not.
func safeORBytes(dst, a, b []byte) int { func safeORBytes(dst, a, b []byte) int {
n := len(a) n := min(len(b), len(a))
if len(b) < n {
n = len(b)
}
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
dst[i] = a[i] | b[i] dst[i] = a[i] | b[i]
} }

View File

@ -31,10 +31,7 @@ func Raise(max uint64) (uint64, error) {
return 0, err return 0, err
} }
// Try to update the limit to the max allowance // Try to update the limit to the max allowance
limit.Cur = limit.Max limit.Cur = min(limit.Max, max)
if limit.Cur > max {
limit.Cur = max
}
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err return 0, err
} }

View File

@ -147,10 +147,7 @@ func DecodeBig(input string) (*big.Int, error) {
words := make([]big.Word, len(raw)/bigWordNibbles+1) words := make([]big.Word, len(raw)/bigWordNibbles+1)
end := len(raw) end := len(raw)
for i := range words { for i := range words {
start := end - bigWordNibbles start := max(end-bigWordNibbles, 0)
if start < 0 {
start = 0
}
for ri := start; ri < end; ri++ { for ri := start; ri < end; ri++ {
nib := decodeNibble(raw[ri]) nib := decodeNibble(raw[ri])
if nib == badNibble { if nib == badNibble {

View File

@ -179,10 +179,7 @@ func (b *Big) UnmarshalText(input []byte) error {
words := make([]big.Word, len(raw)/bigWordNibbles+1) words := make([]big.Word, len(raw)/bigWordNibbles+1)
end := len(raw) end := len(raw)
for i := range words { for i := range words {
start := end - bigWordNibbles start := max(end-bigWordNibbles, 0)
if start < 0 {
start = 0
}
for ri := start; ri < end; ri++ { for ri := start; ri < end; ri++ {
nib := decodeNibble(raw[ri]) nib := decodeNibble(raw[ri])
if nib == badNibble { if nib == badNibble {

View File

@ -565,10 +565,7 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header
if parent == nil { if parent == nil {
return consensus.ErrUnknownAncestor return consensus.ErrUnknownAncestor
} }
header.Time = parent.Time + c.config.Period header.Time = max(parent.Time+c.config.Period, uint64(time.Now().Unix()))
if header.Time < uint64(time.Now().Unix()) {
header.Time = uint64(time.Now().Unix())
}
return nil return nil
} }

View File

@ -175,17 +175,11 @@ func CalcGasLimit(parentGasLimit, desiredLimit uint64) uint64 {
} }
// If we're outside our allowed gas range, we try to hone towards them // If we're outside our allowed gas range, we try to hone towards them
if limit < desiredLimit { if limit < desiredLimit {
limit = parentGasLimit + delta limit = min(parentGasLimit+delta, desiredLimit)
if limit > desiredLimit {
limit = desiredLimit
}
return limit return limit
} }
if limit > desiredLimit { if limit > desiredLimit {
limit = parentGasLimit - delta limit = max(parentGasLimit-delta, desiredLimit)
if limit < desiredLimit {
limit = desiredLimit
}
} }
return limit return limit
} }

View File

@ -181,14 +181,8 @@ func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uin
// Calculate the first and last blocks of the section // Calculate the first and last blocks of the section
sectionStart := res.section * m.sectionSize sectionStart := res.section * m.sectionSize
first := sectionStart first := max(begin, sectionStart)
if begin > first { last := min(end, sectionStart+m.sectionSize-1)
first = begin
}
last := sectionStart + m.sectionSize - 1
if end < last {
last = end
}
// Iterate over all the blocks in the section and return the matching ones // Iterate over all the blocks in the section and return the matching ones
for i := first; i <= last; i++ { for i := first; i <= last; i++ {
// Skip the entire byte if no matches are found inside (and we're processing an entire byte!) // Skip the entire byte if no matches are found inside (and we're processing an entire byte!)

View File

@ -450,12 +450,9 @@ func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) {
c.children = append(c.children, indexer) c.children = append(c.children, indexer)
// Cascade any pending updates to new children too // Cascade any pending updates to new children too
sections := c.storedSections // if a section is "stored" but not "known" then it is a checkpoint without
if c.knownSections < sections { // available chain data so we should not cascade it yet
// if a section is "stored" but not "known" then it is a checkpoint without sections := min(c.knownSections, c.storedSections)
// available chain data so we should not cascade it yet
sections = c.knownSections
}
if sections > 0 { if sections > 0 {
indexer.newHead(sections*c.sectionSize-1, false) indexer.newHead(sections*c.sectionSize-1, false)
} }

View File

@ -251,10 +251,7 @@ func (ae *AccessEvents) CodeChunksRangeGas(contractAddr common.Address, startPC,
return 0 return 0
} }
endPC := startPC + size endPC := min(startPC+size, codeLen)
if endPC > codeLen {
endPC = codeLen
}
if endPC > 0 { if endPC > 0 {
endPC -= 1 // endPC is the last bytecode that will be touched. endPC -= 1 // endPC is the last bytecode that will be touched.
} }

View File

@ -101,10 +101,7 @@ func (indexer *txIndexer) run(tail *uint64, head uint64, stop chan struct{}, don
// It can happen when chain is rewound to a historical point which // It can happen when chain is rewound to a historical point which
// is even lower than the indexes tail, recap the indexing target // is even lower than the indexes tail, recap the indexing target
// to new head to avoid reading non-existent block bodies. // to new head to avoid reading non-existent block bodies.
end := *tail end := min(*tail, head+1)
if end > head+1 {
end = head + 1
}
rawdb.IndexTransactions(indexer.db, 0, end, stop, true) rawdb.IndexTransactions(indexer.db, 0, end, stop, true)
} }
return return

View File

@ -613,14 +613,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 { if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 {
txs[i].evictionExecTip = txs[i].execTipCap txs[i].evictionExecTip = txs[i].execTipCap
} }
txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps txs[i].evictionExecFeeJumps = min(txs[i-1].evictionExecFeeJumps, txs[i].basefeeJumps)
if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps { txs[i].evictionBlobFeeJumps = min(txs[i-1].evictionBlobFeeJumps, txs[i].blobfeeJumps)
txs[i].evictionExecFeeJumps = txs[i].basefeeJumps
}
txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps
if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps {
txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps
}
continue continue
} }
// Sanity check that there's no double nonce. This case would generally // Sanity check that there's no double nonce. This case would generally
@ -1418,14 +1412,8 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 { if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 {
txs[i].evictionExecTip = txs[i].execTipCap txs[i].evictionExecTip = txs[i].execTipCap
} }
txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps txs[i].evictionExecFeeJumps = min(txs[i-1].evictionExecFeeJumps, txs[i].basefeeJumps)
if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps { txs[i].evictionBlobFeeJumps = min(txs[i-1].evictionBlobFeeJumps, txs[i].blobfeeJumps)
txs[i].evictionExecFeeJumps = txs[i].basefeeJumps
}
txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps
if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps {
txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps
}
} }
// Update the eviction heap with the new information: // Update the eviction heap with the new information:
// - If the transaction is from a new account, add it to the heap // - If the transaction is from a new account, add it to the heap

View File

@ -345,18 +345,12 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) {
t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, i, txs[i].evictionExecTip, wantExecTip) t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, i, txs[i].evictionExecTip, wantExecTip)
} }
wantExecFeeJumps := txs[i-1].evictionExecFeeJumps wantExecFeeJumps := min(txs[i-1].evictionExecFeeJumps, txs[i].basefeeJumps)
if wantExecFeeJumps > txs[i].basefeeJumps {
wantExecFeeJumps = txs[i].basefeeJumps
}
if math.Abs(txs[i].evictionExecFeeJumps-wantExecFeeJumps) > 0.001 { if math.Abs(txs[i].evictionExecFeeJumps-wantExecFeeJumps) > 0.001 {
t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionExecFeeJumps, wantExecFeeJumps) t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionExecFeeJumps, wantExecFeeJumps)
} }
wantBlobFeeJumps := txs[i-1].evictionBlobFeeJumps wantBlobFeeJumps := min(txs[i-1].evictionBlobFeeJumps, txs[i].blobfeeJumps)
if wantBlobFeeJumps > txs[i].blobfeeJumps {
wantBlobFeeJumps = txs[i].blobfeeJumps
}
if math.Abs(txs[i].evictionBlobFeeJumps-wantBlobFeeJumps) > 0.001 { if math.Abs(txs[i].evictionBlobFeeJumps-wantBlobFeeJumps) > 0.001 {
t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionBlobFeeJumps, wantBlobFeeJumps) t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionBlobFeeJumps, wantBlobFeeJumps)
} }

View File

@ -94,14 +94,8 @@ func (h *evictHeap) Less(i, j int) bool {
lastI := txsI[len(txsI)-1] lastI := txsI[len(txsI)-1]
lastJ := txsJ[len(txsJ)-1] lastJ := txsJ[len(txsJ)-1]
prioI := evictionPriority(h.basefeeJumps, lastI.evictionExecFeeJumps, h.blobfeeJumps, lastI.evictionBlobFeeJumps) prioI := min(evictionPriority(h.basefeeJumps, lastI.evictionExecFeeJumps, h.blobfeeJumps, lastI.evictionBlobFeeJumps), 0)
if prioI > 0 { prioJ := min(evictionPriority(h.basefeeJumps, lastJ.evictionExecFeeJumps, h.blobfeeJumps, lastJ.evictionBlobFeeJumps), 0)
prioI = 0
}
prioJ := evictionPriority(h.basefeeJumps, lastJ.evictionExecFeeJumps, h.blobfeeJumps, lastJ.evictionBlobFeeJumps)
if prioJ > 0 {
prioJ = 0
}
if prioI == prioJ { if prioI == prioJ {
return lastI.evictionExecTip.Lt(lastJ.evictionExecTip) return lastI.evictionExecTip.Lt(lastJ.evictionExecTip)
} }

View File

@ -57,10 +57,7 @@ func getData(data []byte, start uint64, size uint64) []byte {
if start > length { if start > length {
start = length start = length
} }
end := start + size end := min(start+size, length)
if end > length {
end = length
}
return common.RightPadBytes(data[start:end], int(size)) return common.RightPadBytes(data[start:end], int(size))
} }
@ -69,10 +66,7 @@ func getDataAndAdjustedBounds(data []byte, start uint64, size uint64) (codeCopyP
if start > length { if start > length {
start = length start = length
} }
end := start + size end := min(start+size, length)
if end > length {
end = length
}
return common.RightPadBytes(data[start:end], int(size)), start, end - start return common.RightPadBytes(data[start:end], int(size)), start, end - start
} }

View File

@ -170,10 +170,7 @@ func benchmarkPrecompiled(addr string, test precompiledTest, bench *testing.B) {
res, _, err = RunPrecompiledContract(p, data, reqGas, nil) res, _, err = RunPrecompiledContract(p, data, reqGas, nil)
} }
bench.StopTimer() bench.StopTimer()
elapsed := uint64(time.Since(start)) elapsed := max(uint64(time.Since(start)), 1)
if elapsed < 1 {
elapsed = 1
}
gasUsed := reqGas * uint64(bench.N) gasUsed := reqGas * uint64(bench.N)
bench.ReportMetric(float64(reqGas), "gas/op") bench.ReportMetric(float64(reqGas), "gas/op")
// Keep it as uint64, multiply 100 to get two digit float later // Keep it as uint64, multiply 100 to get two digit float later

View File

@ -1225,10 +1225,7 @@ func (api *ConsensusAPI) getBodiesByRange(start, count hexutil.Uint64) ([]*engin
} }
// limit count up until current // limit count up until current
current := api.eth.BlockChain().CurrentBlock().Number.Uint64() current := api.eth.BlockChain().CurrentBlock().Number.Uint64()
last := uint64(start) + uint64(count) - 1 last := min(uint64(start)+uint64(count)-1, current)
if last > current {
last = current
}
bodies := make([]*engine.ExecutionPayloadBody, 0, uint64(count)) bodies := make([]*engine.ExecutionPayloadBody, 0, uint64(count))
for i := uint64(start); i <= last; i++ { for i := uint64(start); i <= last; i++ {
block := api.eth.BlockChain().GetBlockByNumber(i) block := api.eth.BlockChain().GetBlockByNumber(i)

View File

@ -676,10 +676,7 @@ func (d *Downloader) processHeaders(origin uint64) error {
default: default:
} }
// Select the next chunk of headers to import // Select the next chunk of headers to import
limit := maxHeadersProcess limit := min(maxHeadersProcess, len(headers))
if limit > len(headers) {
limit = len(headers)
}
chunkHeaders := headers[:limit] chunkHeaders := headers[:limit]
chunkHashes := hashes[:limit] chunkHashes := hashes[:limit]

View File

@ -103,30 +103,21 @@ func (p *peerConnection) UpdateReceiptRate(delivered int, elapsed time.Duration)
// HeaderCapacity retrieves the peer's header download allowance based on its // HeaderCapacity retrieves the peer's header download allowance based on its
// previously discovered throughput. // previously discovered throughput.
func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int {
cap := p.rates.Capacity(eth.BlockHeadersMsg, targetRTT) cap := min(p.rates.Capacity(eth.BlockHeadersMsg, targetRTT), MaxHeaderFetch)
if cap > MaxHeaderFetch {
cap = MaxHeaderFetch
}
return cap return cap
} }
// BodyCapacity retrieves the peer's body download allowance based on its // BodyCapacity retrieves the peer's body download allowance based on its
// previously discovered throughput. // previously discovered throughput.
func (p *peerConnection) BodyCapacity(targetRTT time.Duration) int { func (p *peerConnection) BodyCapacity(targetRTT time.Duration) int {
cap := p.rates.Capacity(eth.BlockBodiesMsg, targetRTT) cap := min(p.rates.Capacity(eth.BlockBodiesMsg, targetRTT), MaxBlockFetch)
if cap > MaxBlockFetch {
cap = MaxBlockFetch
}
return cap return cap
} }
// ReceiptCapacity retrieves the peers receipt download allowance based on its // ReceiptCapacity retrieves the peers receipt download allowance based on its
// previously discovered throughput. // previously discovered throughput.
func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int { func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int {
cap := p.rates.Capacity(eth.ReceiptsMsg, targetRTT) cap := min(p.rates.Capacity(eth.ReceiptsMsg, targetRTT), MaxReceiptFetch)
if cap > MaxReceiptFetch {
cap = MaxReceiptFetch
}
return cap return cap
} }

View File

@ -328,10 +328,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
) )
// proceed in batches // proceed in batches
for i := 0; i < len(txs); i += 128 { for i := 0; i < len(txs); i += 128 {
end := i + 128 end := min(i+128, len(txs))
if end > len(txs) {
end = len(txs)
}
var ( var (
duplicate int64 duplicate int64
underpriced int64 underpriced int64

View File

@ -170,13 +170,10 @@ func Estimate(ctx context.Context, call *core.Message, opts *Options, gasCap uin
break break
} }
} }
mid := (hi + lo) / 2 // Most txs don't need much higher gas limit than their gas used, and most txs don't
if mid > lo*2 { // require near the full block limit of gas, so the selection of where to bisect the
// Most txs don't need much higher gas limit than their gas used, and most txs don't // range here is skewed to favor the low side.
// require near the full block limit of gas, so the selection of where to bisect the mid := min((hi+lo)/2, lo*2)
// range here is skewed to favor the low side.
mid = lo * 2
}
failed, _, err = execute(ctx, call, opts, mid) failed, _, err = execute(ctx, call, opts, mid)
if err != nil { if err != nil {
// This should not happen under normal conditions since if we make it this far the // This should not happen under normal conditions since if we make it this far the

View File

@ -143,10 +143,7 @@ func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBloc
} }
func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest) []rlp.RawValue { func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest) []rlp.RawValue {
count := query.Amount count := min(query.Amount, maxHeadersServe)
if count > maxHeadersServe {
count = maxHeadersServe
}
if query.Origin.Hash == (common.Hash{}) { if query.Origin.Hash == (common.Hash{}) {
// Number mode, just return the canon chain segment. The backend // Number mode, just return the canon chain segment. The backend
// delivers in [N, N-1, N-2..] descending order, so we need to // delivers in [N, N-1, N-2..] descending order, so we need to

View File

@ -248,10 +248,7 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed
reexec = *config.Reexec reexec = *config.Reexec
} }
blocks := int(end.NumberU64() - start.NumberU64()) blocks := int(end.NumberU64() - start.NumberU64())
threads := runtime.NumCPU() threads := min(runtime.NumCPU(), blocks)
if threads > blocks {
threads = blocks
}
var ( var (
pend = new(sync.WaitGroup) pend = new(sync.WaitGroup)
ctx = context.Background() ctx = context.Background()
@ -653,10 +650,7 @@ func (api *API) traceBlockParallel(ctx context.Context, block *types.Block, stat
results = make([]*txTraceResult, len(txs)) results = make([]*txTraceResult, len(txs))
pend sync.WaitGroup pend sync.WaitGroup
) )
threads := runtime.NumCPU() threads := min(runtime.NumCPU(), len(txs))
if threads > len(txs) {
threads = len(txs)
}
jobs := make(chan *txTraceTask, threads) jobs := make(chan *txTraceTask, threads)
for th := 0; th < threads; th++ { for th := 0; th < threads; th++ {
pend.Add(1) pend.Add(1)

View File

@ -685,10 +685,7 @@ func (s *Service) reportHistory(conn *connWrapper, list []uint64) error {
} else { } else {
// No indexes requested, send back the top ones // No indexes requested, send back the top ones
head := s.backend.CurrentHeader().Number.Int64() head := s.backend.CurrentHeader().Number.Int64()
start := head - historyUpdateRange + 1 start := max(head-historyUpdateRange+1, 0)
if start < 0 {
start = 0
}
for i := uint64(start); i <= uint64(head); i++ { for i := uint64(start); i <= uint64(head); i++ {
indexes = append(indexes, i) indexes = append(indexes, i)
} }

View File

@ -365,10 +365,7 @@ func (d *dialScheduler) expireHistory() {
// freeDialSlots returns the number of free dial slots. The result can be negative // freeDialSlots returns the number of free dial slots. The result can be negative
// when peers are connected while their task is still running. // when peers are connected while their task is still running.
func (d *dialScheduler) freeDialSlots() int { func (d *dialScheduler) freeDialSlots() int {
slots := (d.maxDialPeers - d.dialPeers) * 2 slots := min((d.maxDialPeers-d.dialPeers)*2, d.maxActiveDials)
if slots > d.maxActiveDials {
slots = d.maxActiveDials
}
free := slots - len(d.dialing) free := slots - len(d.dialing)
return free return free
} }

View File

@ -202,10 +202,7 @@ func (t *Tree) build(entries []entry) entry {
} }
var subtrees []entry var subtrees []entry
for len(entries) > 0 { for len(entries) > 0 {
n := maxChildren n := min(len(entries), maxChildren)
if len(entries) < n {
n = len(entries)
}
sub := t.build(entries[:n]) sub := t.build(entries[:n])
entries = entries[n:] entries = entries[n:]
subtrees = append(subtrees, sub) subtrees = append(subtrees, sub)

View File

@ -136,10 +136,7 @@ func (r *eofSignal) Read(buf []byte) (int, error) {
return 0, io.EOF return 0, io.EOF
} }
max := len(buf) max := min(int(r.count), len(buf))
if int(r.count) < len(buf) {
max = int(r.count)
}
n, err := r.wrapped.Read(buf[:max]) n, err := r.wrapped.Read(buf[:max])
r.count -= uint32(n) r.count -= uint32(n)
if (err != nil || r.count == 0) && r.eof != nil { if (err != nil || r.count == 0) && r.eof != nil {

View File

@ -378,10 +378,7 @@ func (t *Trackers) TargetTimeout() time.Duration {
// targetTimeout is the internal lockless version of TargetTimeout to be used // targetTimeout is the internal lockless version of TargetTimeout to be used
// during QoS tuning. // during QoS tuning.
func (t *Trackers) targetTimeout() time.Duration { func (t *Trackers) targetTimeout() time.Duration {
timeout := time.Duration(ttlScaling * float64(t.roundtrip) / t.confidence) timeout := min(time.Duration(ttlScaling*float64(t.roundtrip)/t.confidence), t.OverrideTTLLimit)
if timeout > t.OverrideTTLLimit {
timeout = t.OverrideTTLLimit
}
return timeout return timeout
} }
@ -433,10 +430,7 @@ func (t *Trackers) detune() {
// Otherwise drop the confidence factor // Otherwise drop the confidence factor
peers := float64(len(t.trackers)) peers := float64(len(t.trackers))
t.confidence = t.confidence * (peers - 1) / peers t.confidence = max(t.confidence*(peers-1)/peers, rttMinConfidence)
if t.confidence < rttMinConfidence {
t.confidence = rttMinConfidence
}
t.log.Debug("Relaxed msgrate QoS values", "rtt", t.roundtrip, "confidence", t.confidence, "ttl", t.targetTimeout()) t.log.Debug("Relaxed msgrate QoS values", "rtt", t.roundtrip, "confidence", t.confidence, "ttl", t.targetTimeout())
} }

View File

@ -344,10 +344,7 @@ func (s *sharedUDPConn) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPor
if !ok { if !ok {
return 0, netip.AddrPort{}, errors.New("connection was closed") return 0, netip.AddrPort{}, errors.New("connection was closed")
} }
l := len(packet.Data) l := min(len(packet.Data), len(b))
if l > len(b) {
l = len(b)
}
copy(b[:l], packet.Data[:l]) copy(b[:l], packet.Data[:l])
return l, packet.Addr, nil return l, packet.Addr, nil
} }

View File

@ -314,10 +314,7 @@ func decodeSliceElems(s *Stream, val reflect.Value, elemdec decoder) error {
for ; ; i++ { for ; ; i++ {
// grow slice if necessary // grow slice if necessary
if i >= val.Cap() { if i >= val.Cap() {
newcap := val.Cap() + val.Cap()/2 newcap := max(val.Cap()+val.Cap()/2, 4)
if newcap < 4 {
newcap = 4
}
newv := reflect.MakeSlice(val.Type(), val.Len(), newcap) newv := reflect.MakeSlice(val.Type(), val.Len(), newcap)
reflect.Copy(newv, val) reflect.Copy(newv, val)
val.Set(newv) val.Set(newv)

View File

@ -351,10 +351,7 @@ func ChunkifyCode(code []byte) ChunkedCode {
chunks := make([]byte, chunkCount*32) chunks := make([]byte, chunkCount*32)
for i := 0; i < chunkCount; i++ { for i := 0; i < chunkCount; i++ {
// number of bytes to copy, 31 unless the end of the code has been reached. // number of bytes to copy, 31 unless the end of the code has been reached.
end := 31 * (i + 1) end := min(len(code), 31*(i+1))
if len(code) < end {
end = len(code)
}
copy(chunks[i*32+1:], code[31*i:end]) // copy the code itself copy(chunks[i*32+1:], code[31*i:end]) // copy the code itself
// chunk offset = taken from the last chunk. // chunk offset = taken from the last chunk.

View File

@ -556,10 +556,8 @@ func writeHistory(writer ethdb.AncientWriter, dl *diffLayer) error {
// and performs the callback on each item. // and performs the callback on each item.
func checkHistories(reader ethdb.AncientReader, start, count uint64, check func(*meta) error) error { func checkHistories(reader ethdb.AncientReader, start, count uint64, check func(*meta) error) error {
for count > 0 { for count > 0 {
number := count // Read a batch of meta objects at maximum 10000 items.
if number > 10000 { number := min(count, 10_000)
number = 10000 // split the big read into small chunks
}
blobs, err := rawdb.ReadStateHistoryMetaList(reader, start, number) blobs, err := rawdb.ReadStateHistoryMetaList(reader, start, number)
if err != nil { if err != nil {
return err return err