diff --git a/beacon/light/committee_chain.go b/beacon/light/committee_chain.go index 4fa87785c0..1be8b355db 100644 --- a/beacon/light/committee_chain.go +++ b/beacon/light/committee_chain.go @@ -292,10 +292,10 @@ func (s *CommitteeChain) deleteFixedCommitteeRootsFrom(period uint64) error { // get unfixed but are still proven by the update chain. If there were // committees present after the range proven by updates, those should be // removed if the belonging fixed roots are also removed. - fromPeriod := s.updates.periods.End + 1 // not proven by updates - if period > fromPeriod { - fromPeriod = period // also not justified by fixed roots - } + fromPeriod := max( + s.updates.periods.End+1, // not proven by updates + period, // also not justified by fixed roots + ) s.deleteCommitteesFrom(batch, fromPeriod) } if err := batch.Write(); err != nil { diff --git a/cmd/devp2p/dns_route53.go b/cmd/devp2p/dns_route53.go index 86907688f3..d09c9cdcad 100644 --- a/cmd/devp2p/dns_route53.go +++ b/cmd/devp2p/dns_route53.go @@ -415,10 +415,7 @@ func isSubdomain(name, domain string) bool { func splitTXT(value string) string { var result strings.Builder for len(value) > 0 { - rlen := len(value) - if rlen > 253 { - rlen = 253 - } + rlen := min(len(value), 253) result.WriteString(strconv.Quote(value[:rlen])) value = value[rlen:] } diff --git a/common/bitutil/bitutil.go b/common/bitutil/bitutil.go index a18a6d18ee..944f1f3b24 100644 --- a/common/bitutil/bitutil.go +++ b/common/bitutil/bitutil.go @@ -27,10 +27,7 @@ func XORBytes(dst, a, b []byte) int { // fastXORBytes xors in bulk. It only works on architectures that support // unaligned read/writes. func fastXORBytes(dst, a, b []byte) int { - n := len(a) - if len(b) < n { - n = len(b) - } + n := min(len(b), len(a)) w := n / wordSize if w > 0 { dw := *(*[]uintptr)(unsafe.Pointer(&dst)) @@ -49,10 +46,7 @@ func fastXORBytes(dst, a, b []byte) int { // safeXORBytes xors one by one. It works on all architectures, independent if // it supports unaligned read/writes or not. func safeXORBytes(dst, a, b []byte) int { - n := len(a) - if len(b) < n { - n = len(b) - } + n := min(len(b), len(a)) for i := 0; i < n; i++ { dst[i] = a[i] ^ b[i] } @@ -71,10 +65,7 @@ func ANDBytes(dst, a, b []byte) int { // fastANDBytes ands in bulk. It only works on architectures that support // unaligned read/writes. func fastANDBytes(dst, a, b []byte) int { - n := len(a) - if len(b) < n { - n = len(b) - } + n := min(len(b), len(a)) w := n / wordSize if w > 0 { dw := *(*[]uintptr)(unsafe.Pointer(&dst)) @@ -93,10 +84,7 @@ func fastANDBytes(dst, a, b []byte) int { // safeANDBytes ands one by one. It works on all architectures, independent if // it supports unaligned read/writes or not. func safeANDBytes(dst, a, b []byte) int { - n := len(a) - if len(b) < n { - n = len(b) - } + n := min(len(b), len(a)) for i := 0; i < n; i++ { dst[i] = a[i] & b[i] } @@ -115,10 +103,7 @@ func ORBytes(dst, a, b []byte) int { // fastORBytes ors in bulk. It only works on architectures that support // unaligned read/writes. func fastORBytes(dst, a, b []byte) int { - n := len(a) - if len(b) < n { - n = len(b) - } + n := min(len(b), len(a)) w := n / wordSize if w > 0 { dw := *(*[]uintptr)(unsafe.Pointer(&dst)) @@ -137,10 +122,7 @@ func fastORBytes(dst, a, b []byte) int { // safeORBytes ors one by one. It works on all architectures, independent if // it supports unaligned read/writes or not. func safeORBytes(dst, a, b []byte) int { - n := len(a) - if len(b) < n { - n = len(b) - } + n := min(len(b), len(a)) for i := 0; i < n; i++ { dst[i] = a[i] | b[i] } diff --git a/common/fdlimit/fdlimit_darwin.go b/common/fdlimit/fdlimit_darwin.go index 6b26fa00f1..e5c5db6b19 100644 --- a/common/fdlimit/fdlimit_darwin.go +++ b/common/fdlimit/fdlimit_darwin.go @@ -31,10 +31,7 @@ func Raise(max uint64) (uint64, error) { return 0, err } // Try to update the limit to the max allowance - limit.Cur = limit.Max - if limit.Cur > max { - limit.Cur = max - } + limit.Cur = min(limit.Max, max) if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil { return 0, err } diff --git a/common/hexutil/hexutil.go b/common/hexutil/hexutil.go index d3201850a8..c9258ea32c 100644 --- a/common/hexutil/hexutil.go +++ b/common/hexutil/hexutil.go @@ -147,10 +147,7 @@ func DecodeBig(input string) (*big.Int, error) { words := make([]big.Word, len(raw)/bigWordNibbles+1) end := len(raw) for i := range words { - start := end - bigWordNibbles - if start < 0 { - start = 0 - } + start := max(end-bigWordNibbles, 0) for ri := start; ri < end; ri++ { nib := decodeNibble(raw[ri]) if nib == badNibble { diff --git a/common/hexutil/json.go b/common/hexutil/json.go index e0ac98f52d..c423791430 100644 --- a/common/hexutil/json.go +++ b/common/hexutil/json.go @@ -179,10 +179,7 @@ func (b *Big) UnmarshalText(input []byte) error { words := make([]big.Word, len(raw)/bigWordNibbles+1) end := len(raw) for i := range words { - start := end - bigWordNibbles - if start < 0 { - start = 0 - } + start := max(end-bigWordNibbles, 0) for ri := start; ri < end; ri++ { nib := decodeNibble(raw[ri]) if nib == badNibble { diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index d31efd7445..bb2fbed797 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -565,10 +565,7 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header if parent == nil { return consensus.ErrUnknownAncestor } - header.Time = parent.Time + c.config.Period - if header.Time < uint64(time.Now().Unix()) { - header.Time = uint64(time.Now().Unix()) - } + header.Time = max(parent.Time+c.config.Period, uint64(time.Now().Unix())) return nil } diff --git a/core/block_validator.go b/core/block_validator.go index 591e472bc1..30bb3810ca 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -175,17 +175,11 @@ func CalcGasLimit(parentGasLimit, desiredLimit uint64) uint64 { } // If we're outside our allowed gas range, we try to hone towards them if limit < desiredLimit { - limit = parentGasLimit + delta - if limit > desiredLimit { - limit = desiredLimit - } + limit = min(parentGasLimit+delta, desiredLimit) return limit } if limit > desiredLimit { - limit = parentGasLimit - delta - if limit < desiredLimit { - limit = desiredLimit - } + limit = max(parentGasLimit-delta, desiredLimit) } return limit } diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go index 486581fe23..ce5fdc96d3 100644 --- a/core/bloombits/matcher.go +++ b/core/bloombits/matcher.go @@ -181,14 +181,8 @@ func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uin // Calculate the first and last blocks of the section sectionStart := res.section * m.sectionSize - first := sectionStart - if begin > first { - first = begin - } - last := sectionStart + m.sectionSize - 1 - if end < last { - last = end - } + first := max(begin, sectionStart) + last := min(end, sectionStart+m.sectionSize-1) // Iterate over all the blocks in the section and return the matching ones for i := first; i <= last; i++ { // Skip the entire byte if no matches are found inside (and we're processing an entire byte!) diff --git a/core/chain_indexer.go b/core/chain_indexer.go index 2865daa1ff..aef4467b90 100644 --- a/core/chain_indexer.go +++ b/core/chain_indexer.go @@ -450,12 +450,9 @@ func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) { c.children = append(c.children, indexer) // Cascade any pending updates to new children too - sections := c.storedSections - if c.knownSections < sections { - // if a section is "stored" but not "known" then it is a checkpoint without - // available chain data so we should not cascade it yet - sections = c.knownSections - } + // if a section is "stored" but not "known" then it is a checkpoint without + // available chain data so we should not cascade it yet + sections := min(c.knownSections, c.storedSections) if sections > 0 { indexer.newHead(sections*c.sectionSize-1, false) } diff --git a/core/state/access_events.go b/core/state/access_events.go index b745c383b1..3424ae0a79 100644 --- a/core/state/access_events.go +++ b/core/state/access_events.go @@ -251,10 +251,7 @@ func (ae *AccessEvents) CodeChunksRangeGas(contractAddr common.Address, startPC, return 0 } - endPC := startPC + size - if endPC > codeLen { - endPC = codeLen - } + endPC := min(startPC+size, codeLen) if endPC > 0 { endPC -= 1 // endPC is the last bytecode that will be touched. } diff --git a/core/txindexer.go b/core/txindexer.go index 293124f681..e841b2e22b 100644 --- a/core/txindexer.go +++ b/core/txindexer.go @@ -101,10 +101,7 @@ func (indexer *txIndexer) run(tail *uint64, head uint64, stop chan struct{}, don // It can happen when chain is rewound to a historical point which // is even lower than the indexes tail, recap the indexing target // to new head to avoid reading non-existent block bodies. - end := *tail - if end > head+1 { - end = head + 1 - } + end := min(*tail, head+1) rawdb.IndexTransactions(indexer.db, 0, end, stop, true) } return diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 7ad95612bf..5692b85a6f 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -613,14 +613,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6 if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 { txs[i].evictionExecTip = txs[i].execTipCap } - txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps - if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps { - txs[i].evictionExecFeeJumps = txs[i].basefeeJumps - } - txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps - if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps { - txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps - } + txs[i].evictionExecFeeJumps = min(txs[i-1].evictionExecFeeJumps, txs[i].basefeeJumps) + txs[i].evictionBlobFeeJumps = min(txs[i-1].evictionBlobFeeJumps, txs[i].blobfeeJumps) continue } // Sanity check that there's no double nonce. This case would generally @@ -1418,14 +1412,8 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) { if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 { txs[i].evictionExecTip = txs[i].execTipCap } - txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps - if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps { - txs[i].evictionExecFeeJumps = txs[i].basefeeJumps - } - txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps - if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps { - txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps - } + txs[i].evictionExecFeeJumps = min(txs[i-1].evictionExecFeeJumps, txs[i].basefeeJumps) + txs[i].evictionBlobFeeJumps = min(txs[i-1].evictionBlobFeeJumps, txs[i].blobfeeJumps) } // Update the eviction heap with the new information: // - If the transaction is from a new account, add it to the heap diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go index d9137cb679..df2ece143e 100644 --- a/core/txpool/blobpool/blobpool_test.go +++ b/core/txpool/blobpool/blobpool_test.go @@ -345,18 +345,12 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) { t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, i, txs[i].evictionExecTip, wantExecTip) } - wantExecFeeJumps := txs[i-1].evictionExecFeeJumps - if wantExecFeeJumps > txs[i].basefeeJumps { - wantExecFeeJumps = txs[i].basefeeJumps - } + wantExecFeeJumps := min(txs[i-1].evictionExecFeeJumps, txs[i].basefeeJumps) if math.Abs(txs[i].evictionExecFeeJumps-wantExecFeeJumps) > 0.001 { t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionExecFeeJumps, wantExecFeeJumps) } - wantBlobFeeJumps := txs[i-1].evictionBlobFeeJumps - if wantBlobFeeJumps > txs[i].blobfeeJumps { - wantBlobFeeJumps = txs[i].blobfeeJumps - } + wantBlobFeeJumps := min(txs[i-1].evictionBlobFeeJumps, txs[i].blobfeeJumps) if math.Abs(txs[i].evictionBlobFeeJumps-wantBlobFeeJumps) > 0.001 { t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionBlobFeeJumps, wantBlobFeeJumps) } diff --git a/core/txpool/blobpool/evictheap.go b/core/txpool/blobpool/evictheap.go index 722a71bc9b..1ae57f450f 100644 --- a/core/txpool/blobpool/evictheap.go +++ b/core/txpool/blobpool/evictheap.go @@ -94,14 +94,8 @@ func (h *evictHeap) Less(i, j int) bool { lastI := txsI[len(txsI)-1] lastJ := txsJ[len(txsJ)-1] - prioI := evictionPriority(h.basefeeJumps, lastI.evictionExecFeeJumps, h.blobfeeJumps, lastI.evictionBlobFeeJumps) - if prioI > 0 { - prioI = 0 - } - prioJ := evictionPriority(h.basefeeJumps, lastJ.evictionExecFeeJumps, h.blobfeeJumps, lastJ.evictionBlobFeeJumps) - if prioJ > 0 { - prioJ = 0 - } + prioI := min(evictionPriority(h.basefeeJumps, lastI.evictionExecFeeJumps, h.blobfeeJumps, lastI.evictionBlobFeeJumps), 0) + prioJ := min(evictionPriority(h.basefeeJumps, lastJ.evictionExecFeeJumps, h.blobfeeJumps, lastJ.evictionBlobFeeJumps), 0) if prioI == prioJ { return lastI.evictionExecTip.Lt(lastJ.evictionExecTip) } diff --git a/core/vm/common.go b/core/vm/common.go index 658803b820..8f039b39ec 100644 --- a/core/vm/common.go +++ b/core/vm/common.go @@ -57,10 +57,7 @@ func getData(data []byte, start uint64, size uint64) []byte { if start > length { start = length } - end := start + size - if end > length { - end = length - } + end := min(start+size, length) return common.RightPadBytes(data[start:end], int(size)) } @@ -69,10 +66,7 @@ func getDataAndAdjustedBounds(data []byte, start uint64, size uint64) (codeCopyP if start > length { start = length } - end := start + size - if end > length { - end = length - } + end := min(start+size, length) return common.RightPadBytes(data[start:end], int(size)), start, end - start } diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go index b627f2ada5..2ca82d0a6e 100644 --- a/core/vm/contracts_test.go +++ b/core/vm/contracts_test.go @@ -170,10 +170,7 @@ func benchmarkPrecompiled(addr string, test precompiledTest, bench *testing.B) { res, _, err = RunPrecompiledContract(p, data, reqGas, nil) } bench.StopTimer() - elapsed := uint64(time.Since(start)) - if elapsed < 1 { - elapsed = 1 - } + elapsed := max(uint64(time.Since(start)), 1) gasUsed := reqGas * uint64(bench.N) bench.ReportMetric(float64(reqGas), "gas/op") // Keep it as uint64, multiply 100 to get two digit float later diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index e6f29c970b..ebd9904b24 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -1225,10 +1225,7 @@ func (api *ConsensusAPI) getBodiesByRange(start, count hexutil.Uint64) ([]*engin } // limit count up until current current := api.eth.BlockChain().CurrentBlock().Number.Uint64() - last := uint64(start) + uint64(count) - 1 - if last > current { - last = current - } + last := min(uint64(start)+uint64(count)-1, current) bodies := make([]*engine.ExecutionPayloadBody, 0, uint64(count)) for i := uint64(start); i <= last; i++ { block := api.eth.BlockChain().GetBlockByNumber(i) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 3f3f9b7f0c..69ba5b04b5 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -676,10 +676,7 @@ func (d *Downloader) processHeaders(origin uint64) error { default: } // Select the next chunk of headers to import - limit := maxHeadersProcess - if limit > len(headers) { - limit = len(headers) - } + limit := min(maxHeadersProcess, len(headers)) chunkHeaders := headers[:limit] chunkHashes := hashes[:limit] diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 0848e92a26..13cead573f 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -103,30 +103,21 @@ func (p *peerConnection) UpdateReceiptRate(delivered int, elapsed time.Duration) // HeaderCapacity retrieves the peer's header download allowance based on its // previously discovered throughput. func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { - cap := p.rates.Capacity(eth.BlockHeadersMsg, targetRTT) - if cap > MaxHeaderFetch { - cap = MaxHeaderFetch - } + cap := min(p.rates.Capacity(eth.BlockHeadersMsg, targetRTT), MaxHeaderFetch) return cap } // BodyCapacity retrieves the peer's body download allowance based on its // previously discovered throughput. func (p *peerConnection) BodyCapacity(targetRTT time.Duration) int { - cap := p.rates.Capacity(eth.BlockBodiesMsg, targetRTT) - if cap > MaxBlockFetch { - cap = MaxBlockFetch - } + cap := min(p.rates.Capacity(eth.BlockBodiesMsg, targetRTT), MaxBlockFetch) return cap } // ReceiptCapacity retrieves the peers receipt download allowance based on its // previously discovered throughput. func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int { - cap := p.rates.Capacity(eth.ReceiptsMsg, targetRTT) - if cap > MaxReceiptFetch { - cap = MaxReceiptFetch - } + cap := min(p.rates.Capacity(eth.ReceiptsMsg, targetRTT), MaxReceiptFetch) return cap } diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go index 97d1e29862..fe681abab7 100644 --- a/eth/fetcher/tx_fetcher.go +++ b/eth/fetcher/tx_fetcher.go @@ -328,10 +328,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) ) // proceed in batches for i := 0; i < len(txs); i += 128 { - end := i + 128 - if end > len(txs) { - end = len(txs) - } + end := min(i+128, len(txs)) var ( duplicate int64 underpriced int64 diff --git a/eth/gasestimator/gasestimator.go b/eth/gasestimator/gasestimator.go index a6c4718cf4..4bdaf15206 100644 --- a/eth/gasestimator/gasestimator.go +++ b/eth/gasestimator/gasestimator.go @@ -170,13 +170,10 @@ func Estimate(ctx context.Context, call *core.Message, opts *Options, gasCap uin break } } - mid := (hi + lo) / 2 - if mid > lo*2 { - // Most txs don't need much higher gas limit than their gas used, and most txs don't - // require near the full block limit of gas, so the selection of where to bisect the - // range here is skewed to favor the low side. - mid = lo * 2 - } + // Most txs don't need much higher gas limit than their gas used, and most txs don't + // require near the full block limit of gas, so the selection of where to bisect the + // range here is skewed to favor the low side. + mid := min((hi+lo)/2, lo*2) failed, _, err = execute(ctx, call, opts, mid) if err != nil { // This should not happen under normal conditions since if we make it this far the diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index b3886270f3..11847b8af6 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -143,10 +143,7 @@ func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBloc } func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest) []rlp.RawValue { - count := query.Amount - if count > maxHeadersServe { - count = maxHeadersServe - } + count := min(query.Amount, maxHeadersServe) if query.Origin.Hash == (common.Hash{}) { // Number mode, just return the canon chain segment. The backend // delivers in [N, N-1, N-2..] descending order, so we need to diff --git a/eth/tracers/api.go b/eth/tracers/api.go index d13aee555f..ee395fc574 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -248,10 +248,7 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed reexec = *config.Reexec } blocks := int(end.NumberU64() - start.NumberU64()) - threads := runtime.NumCPU() - if threads > blocks { - threads = blocks - } + threads := min(runtime.NumCPU(), blocks) var ( pend = new(sync.WaitGroup) ctx = context.Background() @@ -653,10 +650,7 @@ func (api *API) traceBlockParallel(ctx context.Context, block *types.Block, stat results = make([]*txTraceResult, len(txs)) pend sync.WaitGroup ) - threads := runtime.NumCPU() - if threads > len(txs) { - threads = len(txs) - } + threads := min(runtime.NumCPU(), len(txs)) jobs := make(chan *txTraceTask, threads) for th := 0; th < threads; th++ { pend.Add(1) diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index 0090a7d4c1..3059f5c83c 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -685,10 +685,7 @@ func (s *Service) reportHistory(conn *connWrapper, list []uint64) error { } else { // No indexes requested, send back the top ones head := s.backend.CurrentHeader().Number.Int64() - start := head - historyUpdateRange + 1 - if start < 0 { - start = 0 - } + start := max(head-historyUpdateRange+1, 0) for i := uint64(start); i <= uint64(head); i++ { indexes = append(indexes, i) } diff --git a/p2p/dial.go b/p2p/dial.go index 225709427c..607e346d55 100644 --- a/p2p/dial.go +++ b/p2p/dial.go @@ -365,10 +365,7 @@ func (d *dialScheduler) expireHistory() { // freeDialSlots returns the number of free dial slots. The result can be negative // when peers are connected while their task is still running. func (d *dialScheduler) freeDialSlots() int { - slots := (d.maxDialPeers - d.dialPeers) * 2 - if slots > d.maxActiveDials { - slots = d.maxActiveDials - } + slots := min((d.maxDialPeers-d.dialPeers)*2, d.maxActiveDials) free := slots - len(d.dialing) return free } diff --git a/p2p/dnsdisc/tree.go b/p2p/dnsdisc/tree.go index a8295ac9eb..a462d4a56b 100644 --- a/p2p/dnsdisc/tree.go +++ b/p2p/dnsdisc/tree.go @@ -202,10 +202,7 @@ func (t *Tree) build(entries []entry) entry { } var subtrees []entry for len(entries) > 0 { - n := maxChildren - if len(entries) < n { - n = len(entries) - } + n := min(len(entries), maxChildren) sub := t.build(entries[:n]) entries = entries[n:] subtrees = append(subtrees, sub) diff --git a/p2p/message.go b/p2p/message.go index 3ab56ee350..a5b04f9d56 100644 --- a/p2p/message.go +++ b/p2p/message.go @@ -136,10 +136,7 @@ func (r *eofSignal) Read(buf []byte) (int, error) { return 0, io.EOF } - max := len(buf) - if int(r.count) < len(buf) { - max = int(r.count) - } + max := min(int(r.count), len(buf)) n, err := r.wrapped.Read(buf[:max]) r.count -= uint32(n) if (err != nil || r.count == 0) && r.eof != nil { diff --git a/p2p/msgrate/msgrate.go b/p2p/msgrate/msgrate.go index de1a3177db..48b60e5f65 100644 --- a/p2p/msgrate/msgrate.go +++ b/p2p/msgrate/msgrate.go @@ -378,10 +378,7 @@ func (t *Trackers) TargetTimeout() time.Duration { // targetTimeout is the internal lockless version of TargetTimeout to be used // during QoS tuning. func (t *Trackers) targetTimeout() time.Duration { - timeout := time.Duration(ttlScaling * float64(t.roundtrip) / t.confidence) - if timeout > t.OverrideTTLLimit { - timeout = t.OverrideTTLLimit - } + timeout := min(time.Duration(ttlScaling*float64(t.roundtrip)/t.confidence), t.OverrideTTLLimit) return timeout } @@ -433,10 +430,7 @@ func (t *Trackers) detune() { // Otherwise drop the confidence factor peers := float64(len(t.trackers)) - t.confidence = t.confidence * (peers - 1) / peers - if t.confidence < rttMinConfidence { - t.confidence = rttMinConfidence - } + t.confidence = max(t.confidence*(peers-1)/peers, rttMinConfidence) t.log.Debug("Relaxed msgrate QoS values", "rtt", t.roundtrip, "confidence", t.confidence, "ttl", t.targetTimeout()) } diff --git a/p2p/server.go b/p2p/server.go index c1564352e5..75394edded 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -344,10 +344,7 @@ func (s *sharedUDPConn) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPor if !ok { return 0, netip.AddrPort{}, errors.New("connection was closed") } - l := len(packet.Data) - if l > len(b) { - l = len(b) - } + l := min(len(packet.Data), len(b)) copy(b[:l], packet.Data[:l]) return l, packet.Addr, nil } diff --git a/rlp/decode.go b/rlp/decode.go index 0fbca243ee..63c37a8604 100644 --- a/rlp/decode.go +++ b/rlp/decode.go @@ -314,10 +314,7 @@ func decodeSliceElems(s *Stream, val reflect.Value, elemdec decoder) error { for ; ; i++ { // grow slice if necessary if i >= val.Cap() { - newcap := val.Cap() + val.Cap()/2 - if newcap < 4 { - newcap = 4 - } + newcap := max(val.Cap()+val.Cap()/2, 4) newv := reflect.MakeSlice(val.Type(), val.Len(), newcap) reflect.Copy(newv, val) val.Set(newv) diff --git a/trie/verkle.go b/trie/verkle.go index 015b8f6590..359ee33b9f 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -351,10 +351,7 @@ func ChunkifyCode(code []byte) ChunkedCode { chunks := make([]byte, chunkCount*32) for i := 0; i < chunkCount; i++ { // number of bytes to copy, 31 unless the end of the code has been reached. - end := 31 * (i + 1) - if len(code) < end { - end = len(code) - } + end := min(len(code), 31*(i+1)) copy(chunks[i*32+1:], code[31*i:end]) // copy the code itself // chunk offset = taken from the last chunk. diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go index c063e45371..6bf0ea68aa 100644 --- a/triedb/pathdb/history.go +++ b/triedb/pathdb/history.go @@ -556,10 +556,8 @@ func writeHistory(writer ethdb.AncientWriter, dl *diffLayer) error { // and performs the callback on each item. func checkHistories(reader ethdb.AncientReader, start, count uint64, check func(*meta) error) error { for count > 0 { - number := count - if number > 10000 { - number = 10000 // split the big read into small chunks - } + // Read a batch of meta objects at maximum 10000 items. + number := min(count, 10_000) blobs, err := rawdb.ReadStateHistoryMetaList(reader, start, number) if err != nil { return err