2016-11-08 19:01:56 -06:00
|
|
|
// Copyright 2016 The go-ethereum Authors
|
2016-10-13 22:51:29 -05:00
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
// Package flowcontrol implements a client side flow control mechanism
|
|
|
|
package flowcontrol
|
|
|
|
|
|
|
|
import (
|
2019-02-26 05:32:48 -06:00
|
|
|
"fmt"
|
2020-11-23 03:18:33 -06:00
|
|
|
"math"
|
2016-10-13 22:51:29 -05:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common/mclock"
|
2019-02-26 05:32:48 -06:00
|
|
|
"github.com/ethereum/go-ethereum/log"
|
2016-10-13 22:51:29 -05:00
|
|
|
)
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
const (
|
|
|
|
// fcTimeConst is the time constant applied for MinRecharge during linear
|
|
|
|
// buffer recharge period
|
|
|
|
fcTimeConst = time.Millisecond
|
|
|
|
// DecParamDelay is applied at server side when decreasing capacity in order to
|
|
|
|
// avoid a buffer underrun error due to requests sent by the client before
|
|
|
|
// receiving the capacity update announcement
|
|
|
|
DecParamDelay = time.Second * 2
|
|
|
|
// keepLogs is the duration of keeping logs; logging is not used if zero
|
|
|
|
keepLogs = 0
|
|
|
|
)
|
2016-10-13 22:51:29 -05:00
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// ServerParams are the flow control parameters specified by a server for a client
|
|
|
|
//
|
|
|
|
// Note: a server can assign different amounts of capacity to each client by giving
|
|
|
|
// different parameters to them.
|
2016-10-13 22:51:29 -05:00
|
|
|
type ServerParams struct {
|
|
|
|
BufLimit, MinRecharge uint64
|
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// scheduledUpdate represents a delayed flow control parameter update
|
|
|
|
type scheduledUpdate struct {
|
|
|
|
time mclock.AbsTime
|
|
|
|
params ServerParams
|
|
|
|
}
|
|
|
|
|
|
|
|
// ClientNode is the flow control system's representation of a client
|
|
|
|
// (used in server mode only)
|
2016-10-13 22:51:29 -05:00
|
|
|
type ClientNode struct {
|
2019-02-26 05:32:48 -06:00
|
|
|
params ServerParams
|
2019-05-30 13:51:13 -05:00
|
|
|
bufValue int64
|
2019-02-26 05:32:48 -06:00
|
|
|
lastTime mclock.AbsTime
|
|
|
|
updateSchedule []scheduledUpdate
|
|
|
|
sumCost uint64 // sum of req costs received from this client
|
|
|
|
accepted map[uint64]uint64 // value = sumCost after accepting the given req
|
2019-05-30 13:51:13 -05:00
|
|
|
connected bool
|
2019-02-26 05:32:48 -06:00
|
|
|
lock sync.Mutex
|
|
|
|
cm *ClientManager
|
|
|
|
log *logger
|
|
|
|
cmNodeFields
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// NewClientNode returns a new ClientNode
|
|
|
|
func NewClientNode(cm *ClientManager, params ServerParams) *ClientNode {
|
2016-10-13 22:51:29 -05:00
|
|
|
node := &ClientNode{
|
2019-05-30 13:51:13 -05:00
|
|
|
cm: cm,
|
|
|
|
params: params,
|
|
|
|
bufValue: int64(params.BufLimit),
|
|
|
|
lastTime: cm.clock.Now(),
|
|
|
|
accepted: make(map[uint64]uint64),
|
|
|
|
connected: true,
|
2019-02-26 05:32:48 -06:00
|
|
|
}
|
|
|
|
if keepLogs > 0 {
|
|
|
|
node.log = newLogger(keepLogs)
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
2019-02-26 05:32:48 -06:00
|
|
|
cm.connect(node)
|
2016-10-13 22:51:29 -05:00
|
|
|
return node
|
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// Disconnect should be called when a client is disconnected
|
|
|
|
func (node *ClientNode) Disconnect() {
|
2019-05-30 13:51:13 -05:00
|
|
|
node.lock.Lock()
|
|
|
|
defer node.lock.Unlock()
|
|
|
|
|
|
|
|
node.connected = false
|
2019-02-26 05:32:48 -06:00
|
|
|
node.cm.disconnect(node)
|
|
|
|
}
|
|
|
|
|
2019-05-30 13:51:13 -05:00
|
|
|
// BufferStatus returns the current buffer value and limit
|
|
|
|
func (node *ClientNode) BufferStatus() (uint64, uint64) {
|
|
|
|
node.lock.Lock()
|
|
|
|
defer node.lock.Unlock()
|
|
|
|
|
|
|
|
if !node.connected {
|
|
|
|
return 0, 0
|
|
|
|
}
|
|
|
|
now := node.cm.clock.Now()
|
|
|
|
node.update(now)
|
|
|
|
node.cm.updateBuffer(node, 0, now)
|
|
|
|
bv := node.bufValue
|
|
|
|
if bv < 0 {
|
|
|
|
bv = 0
|
|
|
|
}
|
|
|
|
return uint64(bv), node.params.BufLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
// OneTimeCost subtracts the given amount from the node's buffer.
|
|
|
|
//
|
|
|
|
// Note: this call can take the buffer into the negative region internally.
|
|
|
|
// In this case zero buffer value is returned by exported calls and no requests
|
|
|
|
// are accepted.
|
|
|
|
func (node *ClientNode) OneTimeCost(cost uint64) {
|
|
|
|
node.lock.Lock()
|
|
|
|
defer node.lock.Unlock()
|
|
|
|
|
|
|
|
now := node.cm.clock.Now()
|
|
|
|
node.update(now)
|
|
|
|
node.bufValue -= int64(cost)
|
|
|
|
node.cm.updateBuffer(node, -int64(cost), now)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Freeze notifies the client manager about a client freeze event in which case
|
|
|
|
// the total capacity allowance is slightly reduced.
|
|
|
|
func (node *ClientNode) Freeze() {
|
|
|
|
node.lock.Lock()
|
|
|
|
frozenCap := node.params.MinRecharge
|
|
|
|
node.lock.Unlock()
|
|
|
|
node.cm.reduceTotalCapacity(frozenCap)
|
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// update recalculates the buffer value at a specified time while also performing
|
|
|
|
// scheduled flow control parameter updates if necessary
|
|
|
|
func (node *ClientNode) update(now mclock.AbsTime) {
|
|
|
|
for len(node.updateSchedule) > 0 && node.updateSchedule[0].time <= now {
|
|
|
|
node.recalcBV(node.updateSchedule[0].time)
|
|
|
|
node.updateParams(node.updateSchedule[0].params, now)
|
|
|
|
node.updateSchedule = node.updateSchedule[1:]
|
|
|
|
}
|
|
|
|
node.recalcBV(now)
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// recalcBV recalculates the buffer value at a specified time
|
|
|
|
func (node *ClientNode) recalcBV(now mclock.AbsTime) {
|
|
|
|
dt := uint64(now - node.lastTime)
|
|
|
|
if now < node.lastTime {
|
2016-10-13 22:51:29 -05:00
|
|
|
dt = 0
|
|
|
|
}
|
2019-05-30 13:51:13 -05:00
|
|
|
node.bufValue += int64(node.params.MinRecharge * dt / uint64(fcTimeConst))
|
|
|
|
if node.bufValue > int64(node.params.BufLimit) {
|
|
|
|
node.bufValue = int64(node.params.BufLimit)
|
2019-02-26 05:32:48 -06:00
|
|
|
}
|
|
|
|
if node.log != nil {
|
|
|
|
node.log.add(now, fmt.Sprintf("updated bv=%d MRR=%d BufLimit=%d", node.bufValue, node.params.MinRecharge, node.params.BufLimit))
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
2019-02-26 05:32:48 -06:00
|
|
|
node.lastTime = now
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// UpdateParams updates the flow control parameters of a client node
|
|
|
|
func (node *ClientNode) UpdateParams(params ServerParams) {
|
|
|
|
node.lock.Lock()
|
|
|
|
defer node.lock.Unlock()
|
|
|
|
|
|
|
|
now := node.cm.clock.Now()
|
|
|
|
node.update(now)
|
|
|
|
if params.MinRecharge >= node.params.MinRecharge {
|
|
|
|
node.updateSchedule = nil
|
|
|
|
node.updateParams(params, now)
|
|
|
|
} else {
|
|
|
|
for i, s := range node.updateSchedule {
|
|
|
|
if params.MinRecharge >= s.params.MinRecharge {
|
|
|
|
s.params = params
|
|
|
|
node.updateSchedule = node.updateSchedule[:i+1]
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2022-07-29 11:23:30 -05:00
|
|
|
node.updateSchedule = append(node.updateSchedule, scheduledUpdate{time: now.Add(DecParamDelay), params: params})
|
2019-02-26 05:32:48 -06:00
|
|
|
}
|
|
|
|
}
|
2016-10-13 22:51:29 -05:00
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// updateParams updates the flow control parameters of the node
|
|
|
|
func (node *ClientNode) updateParams(params ServerParams, now mclock.AbsTime) {
|
2019-05-30 13:51:13 -05:00
|
|
|
diff := int64(params.BufLimit - node.params.BufLimit)
|
|
|
|
if diff > 0 {
|
2019-02-26 05:32:48 -06:00
|
|
|
node.bufValue += diff
|
2019-05-30 13:51:13 -05:00
|
|
|
} else if node.bufValue > int64(params.BufLimit) {
|
|
|
|
node.bufValue = int64(params.BufLimit)
|
2019-02-26 05:32:48 -06:00
|
|
|
}
|
|
|
|
node.cm.updateParams(node, params, now)
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// AcceptRequest returns whether a new request can be accepted and the missing
|
|
|
|
// buffer amount if it was rejected due to a buffer underrun. If accepted, maxCost
|
|
|
|
// is deducted from the flow control buffer.
|
|
|
|
func (node *ClientNode) AcceptRequest(reqID, index, maxCost uint64) (accepted bool, bufShort uint64, priority int64) {
|
|
|
|
node.lock.Lock()
|
|
|
|
defer node.lock.Unlock()
|
2016-10-13 22:51:29 -05:00
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
now := node.cm.clock.Now()
|
|
|
|
node.update(now)
|
2019-05-30 13:51:13 -05:00
|
|
|
if int64(maxCost) > node.bufValue {
|
2019-02-26 05:32:48 -06:00
|
|
|
if node.log != nil {
|
|
|
|
node.log.add(now, fmt.Sprintf("rejected reqID=%d bv=%d maxCost=%d", reqID, node.bufValue, maxCost))
|
|
|
|
node.log.dump(now)
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
2019-05-30 13:51:13 -05:00
|
|
|
return false, maxCost - uint64(node.bufValue), 0
|
2019-02-26 05:32:48 -06:00
|
|
|
}
|
2019-05-30 13:51:13 -05:00
|
|
|
node.bufValue -= int64(maxCost)
|
2019-02-26 05:32:48 -06:00
|
|
|
node.sumCost += maxCost
|
|
|
|
if node.log != nil {
|
|
|
|
node.log.add(now, fmt.Sprintf("accepted reqID=%d bv=%d maxCost=%d sumCost=%d", reqID, node.bufValue, maxCost, node.sumCost))
|
|
|
|
}
|
|
|
|
node.accepted[index] = node.sumCost
|
|
|
|
return true, 0, node.cm.accepted(node, maxCost, now)
|
|
|
|
}
|
|
|
|
|
|
|
|
// RequestProcessed should be called when the request has been processed
|
2019-05-30 13:51:13 -05:00
|
|
|
func (node *ClientNode) RequestProcessed(reqID, index, maxCost, realCost uint64) uint64 {
|
2019-02-26 05:32:48 -06:00
|
|
|
node.lock.Lock()
|
|
|
|
defer node.lock.Unlock()
|
|
|
|
|
|
|
|
now := node.cm.clock.Now()
|
|
|
|
node.update(now)
|
|
|
|
node.cm.processed(node, maxCost, realCost, now)
|
2019-05-30 13:51:13 -05:00
|
|
|
bv := node.bufValue + int64(node.sumCost-node.accepted[index])
|
2019-02-26 05:32:48 -06:00
|
|
|
if node.log != nil {
|
|
|
|
node.log.add(now, fmt.Sprintf("processed reqID=%d bv=%d maxCost=%d realCost=%d sumCost=%d oldSumCost=%d reportedBV=%d", reqID, node.bufValue, maxCost, realCost, node.sumCost, node.accepted[index], bv))
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
2019-02-26 05:32:48 -06:00
|
|
|
delete(node.accepted, index)
|
2019-05-30 13:51:13 -05:00
|
|
|
if bv < 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return uint64(bv)
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// ServerNode is the flow control system's representation of a server
|
|
|
|
// (used in client mode only)
|
2016-10-13 22:51:29 -05:00
|
|
|
type ServerNode struct {
|
2019-02-26 05:32:48 -06:00
|
|
|
clock mclock.Clock
|
2017-03-22 14:44:22 -05:00
|
|
|
bufEstimate uint64
|
2019-02-26 05:32:48 -06:00
|
|
|
bufRecharge bool
|
2017-03-22 14:44:22 -05:00
|
|
|
lastTime mclock.AbsTime
|
2019-02-26 05:32:48 -06:00
|
|
|
params ServerParams
|
2017-03-22 14:44:22 -05:00
|
|
|
sumCost uint64 // sum of req costs sent to this server
|
|
|
|
pending map[uint64]uint64 // value = sumCost after sending the given req
|
2019-02-26 05:32:48 -06:00
|
|
|
log *logger
|
2017-03-22 14:44:22 -05:00
|
|
|
lock sync.RWMutex
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// NewServerNode returns a new ServerNode
|
|
|
|
func NewServerNode(params ServerParams, clock mclock.Clock) *ServerNode {
|
|
|
|
node := &ServerNode{
|
|
|
|
clock: clock,
|
2016-10-13 22:51:29 -05:00
|
|
|
bufEstimate: params.BufLimit,
|
2019-02-26 05:32:48 -06:00
|
|
|
bufRecharge: false,
|
|
|
|
lastTime: clock.Now(),
|
2016-10-13 22:51:29 -05:00
|
|
|
params: params,
|
|
|
|
pending: make(map[uint64]uint64),
|
|
|
|
}
|
2019-02-26 05:32:48 -06:00
|
|
|
if keepLogs > 0 {
|
|
|
|
node.log = newLogger(keepLogs)
|
|
|
|
}
|
|
|
|
return node
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// UpdateParams updates the flow control parameters of the node
|
|
|
|
func (node *ServerNode) UpdateParams(params ServerParams) {
|
|
|
|
node.lock.Lock()
|
|
|
|
defer node.lock.Unlock()
|
|
|
|
|
|
|
|
node.recalcBLE(mclock.Now())
|
|
|
|
if params.BufLimit > node.params.BufLimit {
|
|
|
|
node.bufEstimate += params.BufLimit - node.params.BufLimit
|
|
|
|
} else {
|
|
|
|
if node.bufEstimate > params.BufLimit {
|
|
|
|
node.bufEstimate = params.BufLimit
|
|
|
|
}
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
2019-02-26 05:32:48 -06:00
|
|
|
node.params = params
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// recalcBLE recalculates the lowest estimate for the client's buffer value at
|
|
|
|
// the given server at the specified time
|
|
|
|
func (node *ServerNode) recalcBLE(now mclock.AbsTime) {
|
|
|
|
if now < node.lastTime {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if node.bufRecharge {
|
|
|
|
dt := uint64(now - node.lastTime)
|
|
|
|
node.bufEstimate += node.params.MinRecharge * dt / uint64(fcTimeConst)
|
|
|
|
if node.bufEstimate >= node.params.BufLimit {
|
|
|
|
node.bufEstimate = node.params.BufLimit
|
|
|
|
node.bufRecharge = false
|
|
|
|
}
|
2016-12-15 04:13:52 -06:00
|
|
|
}
|
2019-02-26 05:32:48 -06:00
|
|
|
node.lastTime = now
|
|
|
|
if node.log != nil {
|
|
|
|
node.log.add(now, fmt.Sprintf("updated bufEst=%d MRR=%d BufLimit=%d", node.bufEstimate, node.params.MinRecharge, node.params.BufLimit))
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// safetyMargin is added to the flow control waiting time when estimated buffer value is low
|
|
|
|
const safetyMargin = time.Millisecond
|
|
|
|
|
2016-12-15 04:13:52 -06:00
|
|
|
// CanSend returns the minimum waiting time required before sending a request
|
2017-03-22 14:44:22 -05:00
|
|
|
// with the given maximum estimated cost. Second return value is the relative
|
|
|
|
// estimated buffer level after sending the request (divided by BufLimit).
|
2019-02-26 05:32:48 -06:00
|
|
|
func (node *ServerNode) CanSend(maxCost uint64) (time.Duration, float64) {
|
|
|
|
node.lock.RLock()
|
|
|
|
defer node.lock.RUnlock()
|
2016-10-13 22:51:29 -05:00
|
|
|
|
2020-11-23 03:18:33 -06:00
|
|
|
if node.params.BufLimit == 0 {
|
|
|
|
return time.Duration(math.MaxInt64), 0
|
|
|
|
}
|
2019-02-26 05:32:48 -06:00
|
|
|
now := node.clock.Now()
|
|
|
|
node.recalcBLE(now)
|
|
|
|
maxCost += uint64(safetyMargin) * node.params.MinRecharge / uint64(fcTimeConst)
|
|
|
|
if maxCost > node.params.BufLimit {
|
|
|
|
maxCost = node.params.BufLimit
|
|
|
|
}
|
|
|
|
if node.bufEstimate >= maxCost {
|
|
|
|
relBuf := float64(node.bufEstimate-maxCost) / float64(node.params.BufLimit)
|
|
|
|
if node.log != nil {
|
|
|
|
node.log.add(now, fmt.Sprintf("canSend bufEst=%d maxCost=%d true relBuf=%f", node.bufEstimate, maxCost, relBuf))
|
|
|
|
}
|
|
|
|
return 0, relBuf
|
|
|
|
}
|
|
|
|
timeLeft := time.Duration((maxCost - node.bufEstimate) * uint64(fcTimeConst) / node.params.MinRecharge)
|
|
|
|
if node.log != nil {
|
|
|
|
node.log.add(now, fmt.Sprintf("canSend bufEst=%d maxCost=%d false timeLeft=%v", node.bufEstimate, maxCost, timeLeft))
|
|
|
|
}
|
|
|
|
return timeLeft, 0
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// QueuedRequest should be called when the request has been assigned to the given
|
2017-03-22 14:44:22 -05:00
|
|
|
// server node, before putting it in the send queue. It is mandatory that requests
|
2019-02-26 05:32:48 -06:00
|
|
|
// are sent in the same order as the QueuedRequest calls are made.
|
|
|
|
func (node *ServerNode) QueuedRequest(reqID, maxCost uint64) {
|
|
|
|
node.lock.Lock()
|
|
|
|
defer node.lock.Unlock()
|
2016-10-13 22:51:29 -05:00
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
now := node.clock.Now()
|
|
|
|
node.recalcBLE(now)
|
|
|
|
// Note: we do not know when requests actually arrive to the server so bufRecharge
|
|
|
|
// is not turned on here if buffer was full; in this case it is going to be turned
|
|
|
|
// on by the first reply's bufValue feedback
|
|
|
|
if node.bufEstimate >= maxCost {
|
|
|
|
node.bufEstimate -= maxCost
|
|
|
|
} else {
|
|
|
|
log.Error("Queued request with insufficient buffer estimate")
|
|
|
|
node.bufEstimate = 0
|
|
|
|
}
|
|
|
|
node.sumCost += maxCost
|
|
|
|
node.pending[reqID] = node.sumCost
|
|
|
|
if node.log != nil {
|
|
|
|
node.log.add(now, fmt.Sprintf("queued reqID=%d bufEst=%d maxCost=%d sumCost=%d", reqID, node.bufEstimate, maxCost, node.sumCost))
|
|
|
|
}
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// ReceivedReply adjusts estimated buffer value according to the value included in
|
2017-03-22 14:44:22 -05:00
|
|
|
// the latest request reply.
|
2019-02-26 05:32:48 -06:00
|
|
|
func (node *ServerNode) ReceivedReply(reqID, bv uint64) {
|
|
|
|
node.lock.Lock()
|
|
|
|
defer node.lock.Unlock()
|
2016-10-13 22:51:29 -05:00
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
now := node.clock.Now()
|
|
|
|
node.recalcBLE(now)
|
|
|
|
if bv > node.params.BufLimit {
|
|
|
|
bv = node.params.BufLimit
|
2016-12-15 04:13:52 -06:00
|
|
|
}
|
2019-02-26 05:32:48 -06:00
|
|
|
sc, ok := node.pending[reqID]
|
2016-10-13 22:51:29 -05:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2019-02-26 05:32:48 -06:00
|
|
|
delete(node.pending, reqID)
|
|
|
|
cc := node.sumCost - sc
|
|
|
|
newEstimate := uint64(0)
|
2017-03-22 14:44:22 -05:00
|
|
|
if bv > cc {
|
2019-02-26 05:32:48 -06:00
|
|
|
newEstimate = bv - cc
|
|
|
|
}
|
|
|
|
if newEstimate > node.bufEstimate {
|
|
|
|
// Note: we never reduce the buffer estimate based on the reported value because
|
|
|
|
// this can only happen because of the delayed delivery of the latest reply.
|
|
|
|
// The lowest estimate based on the previous reply can still be considered valid.
|
|
|
|
node.bufEstimate = newEstimate
|
|
|
|
}
|
|
|
|
|
|
|
|
node.bufRecharge = node.bufEstimate < node.params.BufLimit
|
|
|
|
node.lastTime = now
|
|
|
|
if node.log != nil {
|
|
|
|
node.log.add(now, fmt.Sprintf("received reqID=%d bufEst=%d reportedBv=%d sumCost=%d oldSumCost=%d", reqID, node.bufEstimate, bv, node.sumCost, sc))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-30 13:51:13 -05:00
|
|
|
// ResumeFreeze cleans all pending requests and sets the buffer estimate to the
|
|
|
|
// reported value after resuming from a frozen state
|
|
|
|
func (node *ServerNode) ResumeFreeze(bv uint64) {
|
|
|
|
node.lock.Lock()
|
|
|
|
defer node.lock.Unlock()
|
|
|
|
|
|
|
|
for reqID := range node.pending {
|
|
|
|
delete(node.pending, reqID)
|
|
|
|
}
|
|
|
|
now := node.clock.Now()
|
|
|
|
node.recalcBLE(now)
|
|
|
|
if bv > node.params.BufLimit {
|
|
|
|
bv = node.params.BufLimit
|
|
|
|
}
|
|
|
|
node.bufEstimate = bv
|
|
|
|
node.bufRecharge = node.bufEstimate < node.params.BufLimit
|
|
|
|
node.lastTime = now
|
|
|
|
if node.log != nil {
|
|
|
|
node.log.add(now, fmt.Sprintf("unfreeze bv=%d sumCost=%d", bv, node.sumCost))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-26 05:32:48 -06:00
|
|
|
// DumpLogs dumps the event log if logging is used
|
|
|
|
func (node *ServerNode) DumpLogs() {
|
|
|
|
node.lock.Lock()
|
|
|
|
defer node.lock.Unlock()
|
|
|
|
|
|
|
|
if node.log != nil {
|
|
|
|
node.log.dump(node.clock.Now())
|
2017-03-22 14:44:22 -05:00
|
|
|
}
|
2016-10-13 22:51:29 -05:00
|
|
|
}
|