Merge branch 'tor-gitlab/mr/613' into maint-0.4.7

This commit is contained in:
David Goulet 2022-08-11 09:26:59 -04:00
commit 0564a19f6a
5 changed files with 287 additions and 135 deletions

9
changes/bug40642 Normal file
View File

@ -0,0 +1,9 @@
o Major bugfixes (congestion control):
- Implement RFC3742 Limited Slow Start. Congestion control was
overshooting the congestion window during slow start, particularly for
onion service activity. With this fix, we now update the congestion
window more often during slow start, as well as dampen the exponential
growth when the congestion window grows above a capping parameter.
This should reduce the memory increases guard relays were seeing, as
well as allow us to set lower queue limits to defend against
ongoing DoS attacks. Fixes bug 40642; bugfix on 0.4.7.5-alpha.

View File

@ -42,16 +42,19 @@
#define CC_ALG_DFLT_ALWAYS (CC_ALG_VEGAS)
#define CWND_INC_DFLT (TLS_RECORD_MAX_CELLS)
#define CWND_INC_PCT_SS_DFLT (50)
#define CWND_INC_PCT_SS_DFLT (100)
#define CWND_INC_RATE_DFLT (1)
#define CWND_MIN_DFLT (SENDME_INC_DFLT)
#define CWND_MIN_DFLT (2*SENDME_INC_DFLT)
#define CWND_MAX_DFLT (INT32_MAX)
#define BWE_SENDME_MIN_DFLT (5)
#define N_EWMA_CWND_PCT_DFLT (50)
#define N_EWMA_MAX_DFLT (10)
#define N_EWMA_SS_DFLT (2)
#define RTT_RESET_PCT_DFLT (100)
/* BDP algorithms for each congestion control algorithms use the piecewise
* estimattor. See section 3.1.4 of proposal 324. */
@ -107,11 +110,22 @@ static uint8_t n_ewma_cwnd_pct;
*/
static uint8_t n_ewma_max;
/**
* Maximum number N for the N-count EWMA averaging of RTT in Slow Start.
*/
static uint8_t n_ewma_ss;
/**
* Minimum number of sendmes before we begin BDP estimates
*/
static uint8_t bwe_sendme_min;
/**
* Percentage of the current RTT to use when reseting the minimum RTT
* for a circuit. (RTT is reset when the cwnd hits cwnd_min).
*/
static uint8_t rtt_reset_pct;
/**
* Update global congestion control related consensus parameter values,
* every consensus update.
@ -157,6 +171,14 @@ congestion_control_new_consensus_params(const networkstatus_t *ns)
CWND_MAX_MIN,
CWND_MAX_MAX);
#define RTT_RESET_PCT_MIN (0)
#define RTT_RESET_PCT_MAX (100)
rtt_reset_pct =
networkstatus_get_param(NULL, "cc_rtt_reset_pct",
RTT_RESET_PCT_DFLT,
RTT_RESET_PCT_MIN,
RTT_RESET_PCT_MAX);
#define SENDME_INC_MIN 1
#define SENDME_INC_MAX (255)
cc_sendme_inc =
@ -196,6 +218,14 @@ congestion_control_new_consensus_params(const networkstatus_t *ns)
N_EWMA_MAX_DFLT,
N_EWMA_MAX_MIN,
N_EWMA_MAX_MAX);
#define N_EWMA_SS_MIN 2
#define N_EWMA_SS_MAX (INT32_MAX)
n_ewma_ss =
networkstatus_get_param(NULL, "cc_ewma_ss",
N_EWMA_SS_DFLT,
N_EWMA_SS_MIN,
N_EWMA_SS_MAX);
}
/**
@ -452,8 +482,18 @@ dequeue_timestamp(smartlist_t *timestamps_u64_usecs)
static inline uint64_t
n_ewma_count(const congestion_control_t *cc)
{
uint64_t ewma_cnt = MIN(CWND_UPDATE_RATE(cc)*n_ewma_cwnd_pct/100,
uint64_t ewma_cnt = 0;
if (cc->in_slow_start) {
/* In slow-start, we check the Vegas condition every sendme,
* so much lower ewma counts are needed. */
ewma_cnt = n_ewma_ss;
} else {
/* After slow-start, we check the Vegas condition only once per
* CWND, so it is better to average over longer periods. */
ewma_cnt = MIN(CWND_UPDATE_RATE(cc)*n_ewma_cwnd_pct/100,
n_ewma_max);
}
ewma_cnt = MAX(ewma_cnt, 2);
return ewma_cnt;
}
@ -833,8 +873,25 @@ congestion_control_update_circuit_rtt(congestion_control_t *cc,
cc->max_rtt_usec = rtt;
}
if (cc->min_rtt_usec == 0 || rtt < cc->min_rtt_usec) {
cc->min_rtt_usec = rtt;
if (cc->min_rtt_usec == 0) {
// If we do not have a min_rtt yet, use current ewma
cc->min_rtt_usec = cc->ewma_rtt_usec;
} else if (cc->cwnd == cc->cwnd_min) {
// Raise min rtt if cwnd hit cwnd_min. This gets us out of a wedge state
// if we hit cwnd_min due to an abnormally low rtt.
uint64_t new_rtt = percent_max_mix(cc->ewma_rtt_usec, cc->min_rtt_usec,
rtt_reset_pct);
static ratelim_t rtt_notice_limit = RATELIM_INIT(300);
log_fn_ratelim(&rtt_notice_limit, LOG_NOTICE, LD_CIRC,
"Resetting circ RTT from %"PRIu64" to %"PRIu64" due to low cwnd",
cc->min_rtt_usec/1000, new_rtt/1000);
cc->min_rtt_usec = new_rtt;
} else if (cc->ewma_rtt_usec < cc->min_rtt_usec) {
// Using the EWMA for min instead of current RTT helps average out
// effects from other conns
cc->min_rtt_usec = cc->ewma_rtt_usec;
}
return rtt;

View File

@ -145,6 +145,29 @@ n_count_ewma(uint64_t curr, uint64_t prev, uint64_t N)
return (2*curr + (N-1)*prev)/(N+1);
}
/**
* Helper function that gives us a percentile weighted-average between
* two values. The pct_max argument specifies the percentage weight of the
* maximum of a and b, when computing this weighted-average.
*
* This also allows this function to be used as either MIN() or a MAX()
* by this parameterization. It is MIN() when pct_max==0;
* it is MAX() when pct_max==100; it is avg() when pct_max==50; it is a
* weighted-average for values in between.
*/
static inline uint64_t
percent_max_mix(uint64_t a, uint64_t b, uint8_t pct_max)
{
uint64_t max = MAX(a, b);
uint64_t min = MIN(a, b);
if (BUG(pct_max > 100)) {
return max;
}
return pct_max*max/100 + (100-pct_max)*min/100;
}
/* Private section starts. */
#ifdef TOR_CONGESTION_CONTROL_PRIVATE

View File

@ -97,6 +97,10 @@ struct westwood_params_t {
/** Vegas algorithm parameters. */
struct vegas_params_t {
/** The slow-start cwnd cap for RFC3742 */
uint32_t ss_cwnd_cap;
/** The maximum slow-start cwnd */
uint32_t ss_cwnd_max;
/** The queue use allowed before we exit slow start */
uint16_t gamma;
/** The queue use below which we increment cwnd */
@ -227,6 +231,16 @@ static inline uint64_t CWND_UPDATE_RATE(const struct congestion_control_t *cc)
}
}
/**
* Gives us the number of SENDMEs in a CWND, rounded.
*/
static inline uint64_t SENDME_PER_CWND(const struct congestion_control_t *cc)
{
/* We add cwnd_inc_rate*sendme_inc/2 to round to nearest integer number
* of acks */
return ((cc->cwnd + cc->sendme_inc/2)/cc->sendme_inc);
}
/**
* Returns the amount to increment the congestion window each update,
* during slow start.

View File

@ -26,56 +26,36 @@
#define OUTBUF_CELLS (2*TLS_RECORD_MAX_CELLS)
#define SS_CWND_MAX_DFLT (5000)
/* sbws circs are two hops, so params are based on 2 outbufs of cells */
#define VEGAS_ALPHA_SBWS_DFLT (2*OUTBUF_CELLS-TLS_RECORD_MAX_CELLS)
#define VEGAS_BETA_SBWS_DFLT (2*OUTBUF_CELLS)
#define VEGAS_BETA_SBWS_DFLT (2*OUTBUF_CELLS+TLS_RECORD_MAX_CELLS)
#define VEGAS_GAMMA_SBWS_DFLT (2*OUTBUF_CELLS)
#define VEGAS_DELTA_SBWS_DFLT (4*OUTBUF_CELLS)
#define VEGAS_SSCAP_SBWS_DFLT (400)
/* Exits are three hops, so params are based on 3 outbufs of cells */
#define VEGAS_ALPHA_EXIT_DFLT (3*OUTBUF_CELLS-TLS_RECORD_MAX_CELLS)
#define VEGAS_BETA_EXIT_DFLT (3*OUTBUF_CELLS)
#define VEGAS_ALPHA_EXIT_DFLT (2*OUTBUF_CELLS)
#define VEGAS_BETA_EXIT_DFLT (4*OUTBUF_CELLS)
#define VEGAS_GAMMA_EXIT_DFLT (3*OUTBUF_CELLS)
#define VEGAS_DELTA_EXIT_DFLT (5*OUTBUF_CELLS)
#define VEGAS_DELTA_EXIT_DFLT (6*OUTBUF_CELLS)
#define VEGAS_SSCAP_EXIT_DFLT (500)
/* Onion rends are six hops, so params are based on 6 outbufs of cells */
#define VEGAS_ALPHA_ONION_DFLT (6*OUTBUF_CELLS-TLS_RECORD_MAX_CELLS)
#define VEGAS_BETA_ONION_DFLT (6*OUTBUF_CELLS)
#define VEGAS_GAMMA_ONION_DFLT (6*OUTBUF_CELLS)
#define VEGAS_DELTA_ONION_DFLT (8*OUTBUF_CELLS)
/* Single Onions are three hops, so params are based on 3 outbufs of cells */
#define VEGAS_ALPHA_SOS_DFLT (3*OUTBUF_CELLS-TLS_RECORD_MAX_CELLS)
#define VEGAS_BETA_SOS_DFLT (3*OUTBUF_CELLS)
#define VEGAS_GAMMA_SOS_DFLT (3*OUTBUF_CELLS)
#define VEGAS_DELTA_SOS_DFLT (5*OUTBUF_CELLS)
/* Vanguard Onions are 7 hops (or 8 if both sides use vanguards, but that
* should be rare), so params are based on 7 outbufs of cells */
#define VEGAS_ALPHA_VG_DFLT (7*OUTBUF_CELLS-TLS_RECORD_MAX_CELLS)
#define VEGAS_BETA_VG_DFLT (7*OUTBUF_CELLS)
#define VEGAS_GAMMA_VG_DFLT (7*OUTBUF_CELLS)
#define VEGAS_DELTA_VG_DFLT (9*OUTBUF_CELLS)
#define VEGAS_BDP_MIX_PCT 100
#define VEGAS_ALPHA_ONION_DFLT (3*OUTBUF_CELLS)
#define VEGAS_BETA_ONION_DFLT (7*OUTBUF_CELLS)
#define VEGAS_GAMMA_ONION_DFLT (5*OUTBUF_CELLS)
#define VEGAS_DELTA_ONION_DFLT (9*OUTBUF_CELLS)
#define VEGAS_SSCAP_ONION_DFLT (600)
/**
* The original TCP Vegas used only a congestion window BDP estimator. We
* believe that the piecewise estimator is likely to perform better, but
* for purposes of experimentation, we might as well have a way to blend
* them. It also lets us set Vegas to its original estimator while other
* algorithms on the same network use piecewise (by setting the
* 'vegas_bdp_mix_pct' consensus parameter to 100, while leaving the
* 'cc_bdp_alg' parameter set to piecewise).
*
* Returns a percentage weighted average between the CWND estimator and
* the specified consensus BDP estimator.
* The original TCP Vegas congestion window BDP estimator.
*/
static inline uint64_t
vegas_bdp_mix(const congestion_control_t *cc)
vegas_bdp(const congestion_control_t *cc)
{
return cc->vegas_params.bdp_mix_pct*cc->bdp[BDP_ALG_CWND_RTT]/100 +
(100-cc->vegas_params.bdp_mix_pct)*cc->bdp[cc->bdp_alg]/100;
return cc->bdp[BDP_ALG_CWND_RTT];
}
/**
@ -87,8 +67,8 @@ congestion_control_vegas_set_params(congestion_control_t *cc,
{
tor_assert(cc->cc_alg == CC_ALG_VEGAS);
const char *alpha_str = NULL, *beta_str = NULL, *gamma_str = NULL;
const char *delta_str = NULL;
int alpha, beta, gamma, delta;
const char *delta_str = NULL, *sscap_str = NULL;
int alpha, beta, gamma, delta, ss_cwnd_cap;
switch (path) {
case CC_PATH_SBWS:
@ -96,56 +76,56 @@ congestion_control_vegas_set_params(congestion_control_t *cc,
beta_str = "cc_vegas_beta_sbws";
gamma_str = "cc_vegas_gamma_sbws";
delta_str = "cc_vegas_delta_sbws";
sscap_str = "cc_sscap_sbws";
alpha = VEGAS_ALPHA_SBWS_DFLT;
beta = VEGAS_BETA_SBWS_DFLT;
gamma = VEGAS_GAMMA_SBWS_DFLT;
delta = VEGAS_DELTA_SBWS_DFLT;
ss_cwnd_cap = VEGAS_SSCAP_SBWS_DFLT;
break;
case CC_PATH_EXIT:
case CC_PATH_ONION_SOS:
alpha_str = "cc_vegas_alpha_exit";
beta_str = "cc_vegas_beta_exit";
gamma_str = "cc_vegas_gamma_exit";
delta_str = "cc_vegas_delta_exit";
sscap_str = "cc_sscap_exit";
alpha = VEGAS_ALPHA_EXIT_DFLT;
beta = VEGAS_BETA_EXIT_DFLT;
gamma = VEGAS_GAMMA_EXIT_DFLT;
delta = VEGAS_DELTA_EXIT_DFLT;
ss_cwnd_cap = VEGAS_SSCAP_EXIT_DFLT;
break;
case CC_PATH_ONION:
case CC_PATH_ONION_VG:
alpha_str = "cc_vegas_alpha_onion";
beta_str = "cc_vegas_beta_onion";
gamma_str = "cc_vegas_gamma_onion";
delta_str = "cc_vegas_delta_onion";
sscap_str = "cc_sscap_onion";
alpha = VEGAS_ALPHA_ONION_DFLT;
beta = VEGAS_BETA_ONION_DFLT;
gamma = VEGAS_GAMMA_ONION_DFLT;
delta = VEGAS_DELTA_ONION_DFLT;
break;
case CC_PATH_ONION_SOS:
alpha_str = "cc_vegas_alpha_sos";
beta_str = "cc_vegas_beta_sos";
gamma_str = "cc_vegas_gamma_sos";
delta_str = "cc_vegas_delta_sos";
alpha = VEGAS_ALPHA_SOS_DFLT;
beta = VEGAS_BETA_SOS_DFLT;
gamma = VEGAS_GAMMA_SOS_DFLT;
delta = VEGAS_DELTA_SOS_DFLT;
break;
case CC_PATH_ONION_VG:
alpha_str = "cc_vegas_alpha_vg";
beta_str = "cc_vegas_beta_vg";
gamma_str = "cc_vegas_gamma_vg";
delta_str = "cc_vegas_delta_vg";
alpha = VEGAS_ALPHA_VG_DFLT;
beta = VEGAS_BETA_VG_DFLT;
gamma = VEGAS_GAMMA_VG_DFLT;
delta = VEGAS_DELTA_VG_DFLT;
ss_cwnd_cap = VEGAS_SSCAP_ONION_DFLT;
break;
default:
tor_assert(0);
break;
}
cc->vegas_params.ss_cwnd_cap =
networkstatus_get_param(NULL, sscap_str,
ss_cwnd_cap,
100,
INT32_MAX);
cc->vegas_params.ss_cwnd_max =
networkstatus_get_param(NULL, "cc_ss_max",
SS_CWND_MAX_DFLT,
500,
INT32_MAX);
cc->vegas_params.alpha =
networkstatus_get_param(NULL, alpha_str,
alpha,
@ -169,12 +149,107 @@ congestion_control_vegas_set_params(congestion_control_t *cc,
delta,
0,
INT32_MAX);
}
cc->vegas_params.bdp_mix_pct =
networkstatus_get_param(NULL, "cc_vegas_bdp_mix",
VEGAS_BDP_MIX_PCT,
0,
100);
/**
* Common log function for tracking all vegas state.
*/
static void
congestion_control_vegas_log(const circuit_t *circ,
const congestion_control_t *cc)
{
uint64_t queue_use = cc->cwnd - vegas_bdp(cc);
if (CIRCUIT_IS_ORIGIN(circ) &&
circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED) {
log_info(LD_CIRC,
"CC: TOR_VEGAS Onion Circuit %d "
"RTT: %"PRIu64", %"PRIu64", %"PRIu64", "
"CWND: %"PRIu64", "
"INFL: %"PRIu64", "
"VBDP: %"PRIu64", "
"QUSE: %"PRIu64", "
"BWE: %"PRIu64", "
"SS: %d",
CONST_TO_ORIGIN_CIRCUIT(circ)->global_identifier,
cc->min_rtt_usec/1000,
cc->ewma_rtt_usec/1000,
cc->max_rtt_usec/1000,
cc->cwnd,
cc->inflight,
vegas_bdp(cc),
queue_use,
cc->cwnd*CELL_MAX_NETWORK_SIZE*1000/
MAX(cc->min_rtt_usec,cc->ewma_rtt_usec),
cc->in_slow_start
);
} else {
log_info(LD_CIRC,
"CC: TOR_VEGAS "
"RTT: %"PRIu64", %"PRIu64", %"PRIu64", "
"CWND: %"PRIu64", "
"INFL: %"PRIu64", "
"VBDP: %"PRIu64", "
"QUSE: %"PRIu64", "
"BWE: %"PRIu64", "
"SS: %d",
cc->min_rtt_usec/1000,
cc->ewma_rtt_usec/1000,
cc->max_rtt_usec/1000,
cc->cwnd,
cc->inflight,
vegas_bdp(cc),
queue_use,
cc->cwnd*CELL_MAX_NETWORK_SIZE*1000/
MAX(cc->min_rtt_usec,cc->ewma_rtt_usec),
cc->in_slow_start
);
}
}
/**
* Implements RFC3742: Limited Slow Start.
* https://datatracker.ietf.org/doc/html/rfc3742#section-2
*/
static inline uint64_t
rfc3742_ss_inc(const congestion_control_t *cc)
{
if (cc->cwnd <= cc->vegas_params.ss_cwnd_cap) {
/* If less than the cap, round and always grow by at least 1 sendme_inc. */
return ((uint64_t)cc->cwnd_inc_pct_ss*cc->sendme_inc + 50)/100;
} else {
// K = int(cwnd/(0.5 max_ssthresh));
// => K = 2*cwnd/max_ssthresh
// cwnd += int(MSS/K);
// => cwnd += MSS*max_ssthresh/(2*cwnd)
return ((uint64_t)cc->sendme_inc*cc->vegas_params.ss_cwnd_cap + cc->cwnd)/
(2*cc->cwnd);
}
}
/**
* Exit Vegas slow start.
*
* This function sets our slow-start state to 0, and emits logs
* and control port information signifying end of slow start.
* It also schedules the next CWND update for steady-state.
*/
static void
congestion_control_vegas_exit_slow_start(const circuit_t *circ,
congestion_control_t *cc)
{
congestion_control_vegas_log(circ, cc);
cc->in_slow_start = 0;
cc->next_cc_event = CWND_UPDATE_RATE(cc);
congestion_control_vegas_log(circ, cc);
/* We need to report that slow start has exited ASAP,
* for sbws bandwidth measurement. */
if (CIRCUIT_IS_ORIGIN(circ)) {
/* We must discard const here because the event modifies fields :/ */
control_event_circ_bandwidth_used_for_circ(
TO_ORIGIN_CIRCUIT((circuit_t*)circ));
}
}
/**
@ -217,43 +292,45 @@ congestion_control_vegas_process_sendme(congestion_control_t *cc,
return 0;
}
/* We only update anything once per window */
if (cc->next_cc_event == 0) {
/* The queue use is the amount in which our cwnd is above BDP;
* if it is below, then 0 queue use. */
if (vegas_bdp_mix(cc) > cc->cwnd)
queue_use = 0;
else
queue_use = cc->cwnd - vegas_bdp_mix(cc);
/* The queue use is the amount in which our cwnd is above BDP;
* if it is below, then 0 queue use. */
if (vegas_bdp(cc) > cc->cwnd)
queue_use = 0; // This should not happen anymore..
else
queue_use = cc->cwnd - vegas_bdp(cc);
if (cc->in_slow_start) {
if (queue_use < cc->vegas_params.gamma && !cc->blocked_chan) {
/* Grow to BDP immediately, then exponential growth until
* congestion signal. Increment by at least 2 sendme's worth. */
cc->cwnd = MAX(cc->cwnd + MAX(CWND_INC_SS(cc), 2*cc->sendme_inc),
vegas_bdp_mix(cc));
if (cc->in_slow_start) {
if (queue_use < cc->vegas_params.gamma && !cc->blocked_chan) {
/* Get the "Limited Slow Start" increment */
uint64_t inc = rfc3742_ss_inc(cc);
// Check if inc is less than what we would do in steady-state
// avoidance
if (inc*SENDME_PER_CWND(cc) <= CWND_INC(cc)) {
cc->cwnd += inc;
congestion_control_vegas_exit_slow_start(circ, cc);
} else {
/* Congestion signal: Set cwnd to gamma threshhold */
cc->cwnd = vegas_bdp_mix(cc) + cc->vegas_params.gamma;
cc->in_slow_start = 0;
log_info(LD_CIRC, "CC: TOR_VEGAS exiting slow start");
/* We need to report that slow start has exited ASAP,
* for sbws bandwidth measurement. */
if (CIRCUIT_IS_ORIGIN(circ)) {
/* We must discard const here because the event modifies fields :/ */
control_event_circ_bandwidth_used_for_circ(
TO_ORIGIN_CIRCUIT((circuit_t*)circ));
}
cc->cwnd += inc;
cc->next_cc_event = 1; // Technically irellevant, but for consistency
}
} else {
if (queue_use > cc->vegas_params.delta) {
cc->cwnd = vegas_bdp_mix(cc) + cc->vegas_params.delta - CWND_INC(cc);
} else if (queue_use > cc->vegas_params.beta || cc->blocked_chan) {
cc->cwnd -= CWND_INC(cc);
} else if (queue_use < cc->vegas_params.alpha) {
cc->cwnd += CWND_INC(cc);
}
/* Congestion signal: Set cwnd to gamma threshhold */
cc->cwnd = vegas_bdp(cc) + cc->vegas_params.gamma;
congestion_control_vegas_exit_slow_start(circ, cc);
}
if (cc->cwnd >= cc->vegas_params.ss_cwnd_max) {
cc->cwnd = cc->vegas_params.ss_cwnd_max;
congestion_control_vegas_exit_slow_start(circ, cc);
}
/* After slow start, We only update once per window */
} else if (cc->next_cc_event == 0) {
if (queue_use > cc->vegas_params.delta) {
cc->cwnd = vegas_bdp(cc) + cc->vegas_params.delta - CWND_INC(cc);
} else if (queue_use > cc->vegas_params.beta || cc->blocked_chan) {
cc->cwnd -= CWND_INC(cc);
} else if (queue_use < cc->vegas_params.alpha) {
cc->cwnd += CWND_INC(cc);
}
/* cwnd can never fall below 1 increment */
@ -262,41 +339,13 @@ congestion_control_vegas_process_sendme(congestion_control_t *cc,
/* Schedule next update */
cc->next_cc_event = CWND_UPDATE_RATE(cc);
if (CIRCUIT_IS_ORIGIN(circ)) {
congestion_control_vegas_log(circ, cc);
/* Log if we're above the ss_cap */
if (cc->cwnd >= cc->vegas_params.ss_cwnd_max) {
log_info(LD_CIRC,
"CC: TOR_VEGAS Circuit %d "
"CWND: %"PRIu64", "
"INFL: %"PRIu64", "
"VBDP: %"PRIu64", "
"QUSE: %"PRIu64", "
"NCCE: %"PRIu64", "
"SS: %d",
CONST_TO_ORIGIN_CIRCUIT(circ)->global_identifier,
cc->cwnd,
cc->inflight,
vegas_bdp_mix(cc),
queue_use,
cc->next_cc_event,
cc->in_slow_start
);
} else {
log_info(LD_CIRC,
"CC: TOR_VEGAS Circuit %"PRIu64":%d "
"CWND: %"PRIu64", "
"INFL: %"PRIu64", "
"VBDP: %"PRIu64", "
"QUSE: %"PRIu64", "
"NCCE: %"PRIu64", "
"SS: %d",
CONST_TO_OR_CIRCUIT(circ)->p_chan->global_identifier,
CONST_TO_OR_CIRCUIT(circ)->p_circ_id,
cc->cwnd,
cc->inflight,
vegas_bdp_mix(cc),
queue_use,
cc->next_cc_event,
cc->in_slow_start
);
"CC: TOR_VEGAS above ss_max in steady state for circ %d: %"PRIu64,
circ->purpose, cc->cwnd);
}
}