all: simplify length calculations using min/max functions

This commit is contained in:
islishude 2025-03-01 23:27:55 +08:00
parent 31c972febf
commit f18fcf4776
33 changed files with 59 additions and 205 deletions

View File

@ -292,10 +292,10 @@ func (s *CommitteeChain) deleteFixedCommitteeRootsFrom(period uint64) error {
// get unfixed but are still proven by the update chain. If there were
// committees present after the range proven by updates, those should be
// removed if the belonging fixed roots are also removed.
fromPeriod := s.updates.periods.End + 1 // not proven by updates
if period > fromPeriod {
fromPeriod = period // also not justified by fixed roots
}
fromPeriod := max(
s.updates.periods.End+1, // not proven by updates
period, // also not justified by fixed roots
)
s.deleteCommitteesFrom(batch, fromPeriod)
}
if err := batch.Write(); err != nil {

View File

@ -415,10 +415,7 @@ func isSubdomain(name, domain string) bool {
func splitTXT(value string) string {
var result strings.Builder
for len(value) > 0 {
rlen := len(value)
if rlen > 253 {
rlen = 253
}
rlen := min(len(value), 253)
result.WriteString(strconv.Quote(value[:rlen]))
value = value[rlen:]
}

View File

@ -27,10 +27,7 @@ func XORBytes(dst, a, b []byte) int {
// fastXORBytes xors in bulk. It only works on architectures that support
// unaligned read/writes.
func fastXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
n := min(len(b), len(a))
w := n / wordSize
if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
@ -49,10 +46,7 @@ func fastXORBytes(dst, a, b []byte) int {
// safeXORBytes xors one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not.
func safeXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
n := min(len(b), len(a))
for i := 0; i < n; i++ {
dst[i] = a[i] ^ b[i]
}
@ -71,10 +65,7 @@ func ANDBytes(dst, a, b []byte) int {
// fastANDBytes ands in bulk. It only works on architectures that support
// unaligned read/writes.
func fastANDBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
n := min(len(b), len(a))
w := n / wordSize
if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
@ -93,10 +84,7 @@ func fastANDBytes(dst, a, b []byte) int {
// safeANDBytes ands one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not.
func safeANDBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
n := min(len(b), len(a))
for i := 0; i < n; i++ {
dst[i] = a[i] & b[i]
}
@ -115,10 +103,7 @@ func ORBytes(dst, a, b []byte) int {
// fastORBytes ors in bulk. It only works on architectures that support
// unaligned read/writes.
func fastORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
n := min(len(b), len(a))
w := n / wordSize
if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
@ -137,10 +122,7 @@ func fastORBytes(dst, a, b []byte) int {
// safeORBytes ors one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not.
func safeORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
n := min(len(b), len(a))
for i := 0; i < n; i++ {
dst[i] = a[i] | b[i]
}

View File

@ -31,10 +31,7 @@ func Raise(max uint64) (uint64, error) {
return 0, err
}
// Try to update the limit to the max allowance
limit.Cur = limit.Max
if limit.Cur > max {
limit.Cur = max
}
limit.Cur = min(limit.Max, max)
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
return 0, err
}

View File

@ -147,10 +147,7 @@ func DecodeBig(input string) (*big.Int, error) {
words := make([]big.Word, len(raw)/bigWordNibbles+1)
end := len(raw)
for i := range words {
start := end - bigWordNibbles
if start < 0 {
start = 0
}
start := max(end-bigWordNibbles, 0)
for ri := start; ri < end; ri++ {
nib := decodeNibble(raw[ri])
if nib == badNibble {

View File

@ -179,10 +179,7 @@ func (b *Big) UnmarshalText(input []byte) error {
words := make([]big.Word, len(raw)/bigWordNibbles+1)
end := len(raw)
for i := range words {
start := end - bigWordNibbles
if start < 0 {
start = 0
}
start := max(end-bigWordNibbles, 0)
for ri := start; ri < end; ri++ {
nib := decodeNibble(raw[ri])
if nib == badNibble {

View File

@ -565,10 +565,7 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header
if parent == nil {
return consensus.ErrUnknownAncestor
}
header.Time = parent.Time + c.config.Period
if header.Time < uint64(time.Now().Unix()) {
header.Time = uint64(time.Now().Unix())
}
header.Time = max(parent.Time+c.config.Period, uint64(time.Now().Unix()))
return nil
}

View File

@ -175,17 +175,11 @@ func CalcGasLimit(parentGasLimit, desiredLimit uint64) uint64 {
}
// If we're outside our allowed gas range, we try to hone towards them
if limit < desiredLimit {
limit = parentGasLimit + delta
if limit > desiredLimit {
limit = desiredLimit
}
limit = min(parentGasLimit+delta, desiredLimit)
return limit
}
if limit > desiredLimit {
limit = parentGasLimit - delta
if limit < desiredLimit {
limit = desiredLimit
}
limit = max(parentGasLimit-delta, desiredLimit)
}
return limit
}

View File

@ -181,14 +181,8 @@ func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uin
// Calculate the first and last blocks of the section
sectionStart := res.section * m.sectionSize
first := sectionStart
if begin > first {
first = begin
}
last := sectionStart + m.sectionSize - 1
if end < last {
last = end
}
first := max(begin, sectionStart)
last := min(end, sectionStart+m.sectionSize-1)
// Iterate over all the blocks in the section and return the matching ones
for i := first; i <= last; i++ {
// Skip the entire byte if no matches are found inside (and we're processing an entire byte!)

View File

@ -450,12 +450,9 @@ func (c *ChainIndexer) AddChildIndexer(indexer *ChainIndexer) {
c.children = append(c.children, indexer)
// Cascade any pending updates to new children too
sections := c.storedSections
if c.knownSections < sections {
// if a section is "stored" but not "known" then it is a checkpoint without
// available chain data so we should not cascade it yet
sections = c.knownSections
}
sections := min(c.knownSections, c.storedSections)
if sections > 0 {
indexer.newHead(sections*c.sectionSize-1, false)
}

View File

@ -251,10 +251,7 @@ func (ae *AccessEvents) CodeChunksRangeGas(contractAddr common.Address, startPC,
return 0
}
endPC := startPC + size
if endPC > codeLen {
endPC = codeLen
}
endPC := min(startPC+size, codeLen)
if endPC > 0 {
endPC -= 1 // endPC is the last bytecode that will be touched.
}

View File

@ -101,10 +101,7 @@ func (indexer *txIndexer) run(tail *uint64, head uint64, stop chan struct{}, don
// It can happen when chain is rewound to a historical point which
// is even lower than the indexes tail, recap the indexing target
// to new head to avoid reading non-existent block bodies.
end := *tail
if end > head+1 {
end = head + 1
}
end := min(*tail, head+1)
rawdb.IndexTransactions(indexer.db, 0, end, stop, true)
}
return

View File

@ -613,14 +613,8 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 {
txs[i].evictionExecTip = txs[i].execTipCap
}
txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps
if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps {
txs[i].evictionExecFeeJumps = txs[i].basefeeJumps
}
txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps
if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps {
txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps
}
txs[i].evictionExecFeeJumps = min(txs[i-1].evictionExecFeeJumps, txs[i].basefeeJumps)
txs[i].evictionBlobFeeJumps = min(txs[i-1].evictionBlobFeeJumps, txs[i].blobfeeJumps)
continue
}
// Sanity check that there's no double nonce. This case would generally
@ -1418,14 +1412,8 @@ func (p *BlobPool) add(tx *types.Transaction) (err error) {
if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 {
txs[i].evictionExecTip = txs[i].execTipCap
}
txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps
if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps {
txs[i].evictionExecFeeJumps = txs[i].basefeeJumps
}
txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps
if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps {
txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps
}
txs[i].evictionExecFeeJumps = min(txs[i-1].evictionExecFeeJumps, txs[i].basefeeJumps)
txs[i].evictionBlobFeeJumps = min(txs[i-1].evictionBlobFeeJumps, txs[i].blobfeeJumps)
}
// Update the eviction heap with the new information:
// - If the transaction is from a new account, add it to the heap

View File

@ -345,18 +345,12 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) {
t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, i, txs[i].evictionExecTip, wantExecTip)
}
wantExecFeeJumps := txs[i-1].evictionExecFeeJumps
if wantExecFeeJumps > txs[i].basefeeJumps {
wantExecFeeJumps = txs[i].basefeeJumps
}
wantExecFeeJumps := min(txs[i-1].evictionExecFeeJumps, txs[i].basefeeJumps)
if math.Abs(txs[i].evictionExecFeeJumps-wantExecFeeJumps) > 0.001 {
t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionExecFeeJumps, wantExecFeeJumps)
}
wantBlobFeeJumps := txs[i-1].evictionBlobFeeJumps
if wantBlobFeeJumps > txs[i].blobfeeJumps {
wantBlobFeeJumps = txs[i].blobfeeJumps
}
wantBlobFeeJumps := min(txs[i-1].evictionBlobFeeJumps, txs[i].blobfeeJumps)
if math.Abs(txs[i].evictionBlobFeeJumps-wantBlobFeeJumps) > 0.001 {
t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionBlobFeeJumps, wantBlobFeeJumps)
}

View File

@ -94,14 +94,8 @@ func (h *evictHeap) Less(i, j int) bool {
lastI := txsI[len(txsI)-1]
lastJ := txsJ[len(txsJ)-1]
prioI := evictionPriority(h.basefeeJumps, lastI.evictionExecFeeJumps, h.blobfeeJumps, lastI.evictionBlobFeeJumps)
if prioI > 0 {
prioI = 0
}
prioJ := evictionPriority(h.basefeeJumps, lastJ.evictionExecFeeJumps, h.blobfeeJumps, lastJ.evictionBlobFeeJumps)
if prioJ > 0 {
prioJ = 0
}
prioI := min(evictionPriority(h.basefeeJumps, lastI.evictionExecFeeJumps, h.blobfeeJumps, lastI.evictionBlobFeeJumps), 0)
prioJ := min(evictionPriority(h.basefeeJumps, lastJ.evictionExecFeeJumps, h.blobfeeJumps, lastJ.evictionBlobFeeJumps), 0)
if prioI == prioJ {
return lastI.evictionExecTip.Lt(lastJ.evictionExecTip)
}

View File

@ -57,10 +57,7 @@ func getData(data []byte, start uint64, size uint64) []byte {
if start > length {
start = length
}
end := start + size
if end > length {
end = length
}
end := min(start+size, length)
return common.RightPadBytes(data[start:end], int(size))
}
@ -69,10 +66,7 @@ func getDataAndAdjustedBounds(data []byte, start uint64, size uint64) (codeCopyP
if start > length {
start = length
}
end := start + size
if end > length {
end = length
}
end := min(start+size, length)
return common.RightPadBytes(data[start:end], int(size)), start, end - start
}

View File

@ -170,10 +170,7 @@ func benchmarkPrecompiled(addr string, test precompiledTest, bench *testing.B) {
res, _, err = RunPrecompiledContract(p, data, reqGas, nil)
}
bench.StopTimer()
elapsed := uint64(time.Since(start))
if elapsed < 1 {
elapsed = 1
}
elapsed := max(uint64(time.Since(start)), 1)
gasUsed := reqGas * uint64(bench.N)
bench.ReportMetric(float64(reqGas), "gas/op")
// Keep it as uint64, multiply 100 to get two digit float later

View File

@ -1225,10 +1225,7 @@ func (api *ConsensusAPI) getBodiesByRange(start, count hexutil.Uint64) ([]*engin
}
// limit count up until current
current := api.eth.BlockChain().CurrentBlock().Number.Uint64()
last := uint64(start) + uint64(count) - 1
if last > current {
last = current
}
last := min(uint64(start)+uint64(count)-1, current)
bodies := make([]*engine.ExecutionPayloadBody, 0, uint64(count))
for i := uint64(start); i <= last; i++ {
block := api.eth.BlockChain().GetBlockByNumber(i)

View File

@ -676,10 +676,7 @@ func (d *Downloader) processHeaders(origin uint64) error {
default:
}
// Select the next chunk of headers to import
limit := maxHeadersProcess
if limit > len(headers) {
limit = len(headers)
}
limit := min(maxHeadersProcess, len(headers))
chunkHeaders := headers[:limit]
chunkHashes := hashes[:limit]

View File

@ -103,30 +103,21 @@ func (p *peerConnection) UpdateReceiptRate(delivered int, elapsed time.Duration)
// HeaderCapacity retrieves the peer's header download allowance based on its
// previously discovered throughput.
func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int {
cap := p.rates.Capacity(eth.BlockHeadersMsg, targetRTT)
if cap > MaxHeaderFetch {
cap = MaxHeaderFetch
}
cap := min(p.rates.Capacity(eth.BlockHeadersMsg, targetRTT), MaxHeaderFetch)
return cap
}
// BodyCapacity retrieves the peer's body download allowance based on its
// previously discovered throughput.
func (p *peerConnection) BodyCapacity(targetRTT time.Duration) int {
cap := p.rates.Capacity(eth.BlockBodiesMsg, targetRTT)
if cap > MaxBlockFetch {
cap = MaxBlockFetch
}
cap := min(p.rates.Capacity(eth.BlockBodiesMsg, targetRTT), MaxBlockFetch)
return cap
}
// ReceiptCapacity retrieves the peers receipt download allowance based on its
// previously discovered throughput.
func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int {
cap := p.rates.Capacity(eth.ReceiptsMsg, targetRTT)
if cap > MaxReceiptFetch {
cap = MaxReceiptFetch
}
cap := min(p.rates.Capacity(eth.ReceiptsMsg, targetRTT), MaxReceiptFetch)
return cap
}

View File

@ -328,10 +328,7 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
)
// proceed in batches
for i := 0; i < len(txs); i += 128 {
end := i + 128
if end > len(txs) {
end = len(txs)
}
end := min(i+128, len(txs))
var (
duplicate int64
underpriced int64

View File

@ -170,13 +170,10 @@ func Estimate(ctx context.Context, call *core.Message, opts *Options, gasCap uin
break
}
}
mid := (hi + lo) / 2
if mid > lo*2 {
// Most txs don't need much higher gas limit than their gas used, and most txs don't
// require near the full block limit of gas, so the selection of where to bisect the
// range here is skewed to favor the low side.
mid = lo * 2
}
mid := min((hi+lo)/2, lo*2)
failed, _, err = execute(ctx, call, opts, mid)
if err != nil {
// This should not happen under normal conditions since if we make it this far the

View File

@ -143,10 +143,7 @@ func serviceNonContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBloc
}
func serviceContiguousBlockHeaderQuery(chain *core.BlockChain, query *GetBlockHeadersRequest) []rlp.RawValue {
count := query.Amount
if count > maxHeadersServe {
count = maxHeadersServe
}
count := min(query.Amount, maxHeadersServe)
if query.Origin.Hash == (common.Hash{}) {
// Number mode, just return the canon chain segment. The backend
// delivers in [N, N-1, N-2..] descending order, so we need to

View File

@ -248,10 +248,7 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed
reexec = *config.Reexec
}
blocks := int(end.NumberU64() - start.NumberU64())
threads := runtime.NumCPU()
if threads > blocks {
threads = blocks
}
threads := min(runtime.NumCPU(), blocks)
var (
pend = new(sync.WaitGroup)
ctx = context.Background()
@ -653,10 +650,7 @@ func (api *API) traceBlockParallel(ctx context.Context, block *types.Block, stat
results = make([]*txTraceResult, len(txs))
pend sync.WaitGroup
)
threads := runtime.NumCPU()
if threads > len(txs) {
threads = len(txs)
}
threads := min(runtime.NumCPU(), len(txs))
jobs := make(chan *txTraceTask, threads)
for th := 0; th < threads; th++ {
pend.Add(1)

View File

@ -685,10 +685,7 @@ func (s *Service) reportHistory(conn *connWrapper, list []uint64) error {
} else {
// No indexes requested, send back the top ones
head := s.backend.CurrentHeader().Number.Int64()
start := head - historyUpdateRange + 1
if start < 0 {
start = 0
}
start := max(head-historyUpdateRange+1, 0)
for i := uint64(start); i <= uint64(head); i++ {
indexes = append(indexes, i)
}

View File

@ -365,10 +365,7 @@ func (d *dialScheduler) expireHistory() {
// freeDialSlots returns the number of free dial slots. The result can be negative
// when peers are connected while their task is still running.
func (d *dialScheduler) freeDialSlots() int {
slots := (d.maxDialPeers - d.dialPeers) * 2
if slots > d.maxActiveDials {
slots = d.maxActiveDials
}
slots := min((d.maxDialPeers-d.dialPeers)*2, d.maxActiveDials)
free := slots - len(d.dialing)
return free
}

View File

@ -202,10 +202,7 @@ func (t *Tree) build(entries []entry) entry {
}
var subtrees []entry
for len(entries) > 0 {
n := maxChildren
if len(entries) < n {
n = len(entries)
}
n := min(len(entries), maxChildren)
sub := t.build(entries[:n])
entries = entries[n:]
subtrees = append(subtrees, sub)

View File

@ -136,10 +136,7 @@ func (r *eofSignal) Read(buf []byte) (int, error) {
return 0, io.EOF
}
max := len(buf)
if int(r.count) < len(buf) {
max = int(r.count)
}
max := min(int(r.count), len(buf))
n, err := r.wrapped.Read(buf[:max])
r.count -= uint32(n)
if (err != nil || r.count == 0) && r.eof != nil {

View File

@ -378,10 +378,7 @@ func (t *Trackers) TargetTimeout() time.Duration {
// targetTimeout is the internal lockless version of TargetTimeout to be used
// during QoS tuning.
func (t *Trackers) targetTimeout() time.Duration {
timeout := time.Duration(ttlScaling * float64(t.roundtrip) / t.confidence)
if timeout > t.OverrideTTLLimit {
timeout = t.OverrideTTLLimit
}
timeout := min(time.Duration(ttlScaling*float64(t.roundtrip)/t.confidence), t.OverrideTTLLimit)
return timeout
}
@ -433,10 +430,7 @@ func (t *Trackers) detune() {
// Otherwise drop the confidence factor
peers := float64(len(t.trackers))
t.confidence = t.confidence * (peers - 1) / peers
if t.confidence < rttMinConfidence {
t.confidence = rttMinConfidence
}
t.confidence = max(t.confidence*(peers-1)/peers, rttMinConfidence)
t.log.Debug("Relaxed msgrate QoS values", "rtt", t.roundtrip, "confidence", t.confidence, "ttl", t.targetTimeout())
}

View File

@ -344,10 +344,7 @@ func (s *sharedUDPConn) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPor
if !ok {
return 0, netip.AddrPort{}, errors.New("connection was closed")
}
l := len(packet.Data)
if l > len(b) {
l = len(b)
}
l := min(len(packet.Data), len(b))
copy(b[:l], packet.Data[:l])
return l, packet.Addr, nil
}

View File

@ -314,10 +314,7 @@ func decodeSliceElems(s *Stream, val reflect.Value, elemdec decoder) error {
for ; ; i++ {
// grow slice if necessary
if i >= val.Cap() {
newcap := val.Cap() + val.Cap()/2
if newcap < 4 {
newcap = 4
}
newcap := max(val.Cap()+val.Cap()/2, 4)
newv := reflect.MakeSlice(val.Type(), val.Len(), newcap)
reflect.Copy(newv, val)
val.Set(newv)

View File

@ -351,10 +351,7 @@ func ChunkifyCode(code []byte) ChunkedCode {
chunks := make([]byte, chunkCount*32)
for i := 0; i < chunkCount; i++ {
// number of bytes to copy, 31 unless the end of the code has been reached.
end := 31 * (i + 1)
if len(code) < end {
end = len(code)
}
end := min(len(code), 31*(i+1))
copy(chunks[i*32+1:], code[31*i:end]) // copy the code itself
// chunk offset = taken from the last chunk.

View File

@ -556,10 +556,8 @@ func writeHistory(writer ethdb.AncientWriter, dl *diffLayer) error {
// and performs the callback on each item.
func checkHistories(reader ethdb.AncientReader, start, count uint64, check func(*meta) error) error {
for count > 0 {
number := count
if number > 10000 {
number = 10000 // split the big read into small chunks
}
// Read a batch of meta objects at maximum 10000 items.
number := min(count, 10_000)
blobs, err := rawdb.ReadStateHistoryMetaList(reader, start, number)
if err != nil {
return err