mirror of
https://gitlab.torproject.org/tpo/core/tor.git
synced 2024-11-27 13:53:31 +01:00
Merge branch 'tor-gitlab/mr/490'
This commit is contained in:
commit
f64a88e72d
@ -457,7 +457,7 @@ AM_CONDITIONAL(BUILD_HTML_DOCS, [test "x$enable_html_manual" != "xno"])
|
||||
AM_PROG_CC_C_O
|
||||
|
||||
dnl Before autoconf 2.70, AC_PROG_CC_C99 is supposedly necessary for some
|
||||
dnl compilers if you wan't C99 support. Starting with 2.70, it is obsolete and
|
||||
dnl compilers if you want C99 support. Starting with 2.70, it is obsolete and
|
||||
dnl forbidden.
|
||||
m4_version_prereq([2.70], [:], [AC_PROG_CC_C99])
|
||||
|
||||
|
@ -424,7 +424,7 @@ def bug_html(m):
|
||||
try:
|
||||
disp_prefix, url_prefix = ISSUE_PREFIX_MAP[prefix]
|
||||
except KeyError:
|
||||
print("Can't figure out URL for {}{}".formt(prefix,bugno),
|
||||
print("Can't figure out URL for {}{}".format(prefix,bugno),
|
||||
file=sys.stderr)
|
||||
return "{} {}{}".format(kind, prefix, bugno)
|
||||
|
||||
|
@ -376,7 +376,7 @@ congestion_control_new(const circuit_params_t *params, cc_path_t path)
|
||||
}
|
||||
|
||||
/**
|
||||
* Free a congestion control object and its asssociated state.
|
||||
* Free a congestion control object and its associated state.
|
||||
*/
|
||||
void
|
||||
congestion_control_free_(congestion_control_t *cc)
|
||||
|
@ -235,7 +235,7 @@ circuit_send_stream_xon(edge_connection_t *stream)
|
||||
* Process a stream XOFF, parsing it, and then stopping reading on
|
||||
* the edge connection.
|
||||
*
|
||||
* Record that we have recieved an xoff, so we know not to resume
|
||||
* Record that we have received an xoff, so we know not to resume
|
||||
* reading on this edge conn until we get an XON.
|
||||
*
|
||||
* Returns false if the XOFF did not validate; true if it does.
|
||||
|
@ -53,7 +53,7 @@ congestion_control_nola_set_params(congestion_control_t *cc)
|
||||
*
|
||||
* To handle the case where the local orconn blocks, TOR_NOLA uses
|
||||
* the 'piecewise' BDP estimate, which uses more a conservative BDP
|
||||
* estimate method when blocking occurrs, but a more aggressive BDP
|
||||
* estimate method when blocking occurs, but a more aggressive BDP
|
||||
* estimate when there is no local blocking. This minimizes local
|
||||
* client queues.
|
||||
*/
|
||||
|
@ -19,12 +19,12 @@ typedef enum {
|
||||
CC_ALG_SENDME = 0,
|
||||
|
||||
/**
|
||||
* Prop#324 TOR_WESTWOOD - Deliberately agressive. Westwood may not even
|
||||
* Prop#324 TOR_WESTWOOD - Deliberately aggressive. Westwood may not even
|
||||
* converge to fairness in some cases because max RTT will also increase
|
||||
* on congesgtion, which boosts the Westwood RTT congestion threshhold. So it
|
||||
* on congestion, which boosts the Westwood RTT congestion threshold. So it
|
||||
* can cause runaway queue bloat, which may or may not lead to a robot
|
||||
* uprising... Ok that's Westworld, not Westwood. Still, we need to test
|
||||
* Vegas and NOLA against something more agressive to ensure they do not
|
||||
* Vegas and NOLA against something more aggressive to ensure they do not
|
||||
* starve in the presence of cheaters. We also need to make sure cheaters
|
||||
* trigger the oomkiller in those cases.
|
||||
*/
|
||||
@ -32,7 +32,7 @@ typedef enum {
|
||||
|
||||
/**
|
||||
* Prop#324 TOR_VEGAS - TCP Vegas-style BDP tracker. Because Vegas backs off
|
||||
* whenever it detects queue delay, it can be beaten out by more agressive
|
||||
* whenever it detects queue delay, it can be beaten out by more aggressive
|
||||
* algs. However, in live network testing, it seems to do just fine against
|
||||
* current SENDMEs. It outperforms Westwood and does not stall. */
|
||||
CC_ALG_VEGAS = 2,
|
||||
@ -40,7 +40,7 @@ typedef enum {
|
||||
/**
|
||||
* Prop#324: TOR_NOLA - NOLA looks the BDP right in the eye and uses it
|
||||
* immediately as CWND. No slow start, no other congestion signals, no delay,
|
||||
* no bullshit. Like TOR_VEGAS, it also uses agressive BDP estimates, to
|
||||
* no bullshit. Like TOR_VEGAS, it also uses aggressive BDP estimates, to
|
||||
* avoid out-competition. It seems a bit better throughput than Vegas,
|
||||
* but its agressive BDP and rapid updates may lead to more queue latency. */
|
||||
CC_ALG_NOLA = 3,
|
||||
@ -147,7 +147,7 @@ struct congestion_control_t {
|
||||
/**
|
||||
* For steady-state: the number of sendme acks until we will acknowledge
|
||||
* a congestion event again. It starts out as the number of sendme acks
|
||||
* in a congestion windowm and is decremented each ack. When this reaches
|
||||
* in a congestion window and is decremented each ack. When this reaches
|
||||
* 0, it means we should examine our congestion algorithm conditions.
|
||||
* In this way, we only react to one congestion event per congestion window.
|
||||
*
|
||||
@ -193,7 +193,7 @@ struct congestion_control_t {
|
||||
bdp_alg_t bdp_alg;
|
||||
|
||||
/** Algorithm-specific parameters. The specific struct that is used
|
||||
* depends upon the algoritghm selected by the cc_alg parameter.
|
||||
* depends upon the algorithm selected by the cc_alg parameter.
|
||||
* These should not be accessed anywhere other than the algorithm-specific
|
||||
* files. */
|
||||
union {
|
||||
@ -204,7 +204,7 @@ struct congestion_control_t {
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the number of sendme acks we will recieve before we update cwnd.
|
||||
* Returns the number of sendme acks we will receive before we update cwnd.
|
||||
*
|
||||
* Congestion control literature recommends only one update of cwnd per
|
||||
* cwnd worth of acks. However, we can also tune this to be more frequent
|
||||
|
@ -65,10 +65,10 @@ congestion_control_westwood_set_params(congestion_control_t *cc)
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the RTT threshhold that signals congestion.
|
||||
* Return the RTT threshold that signals congestion.
|
||||
*
|
||||
* Computed from the threshold parameter that specifies a
|
||||
* percent between the min and max RTT obseved so far.
|
||||
* percent between the min and max RTT observed so far.
|
||||
*/
|
||||
static inline uint64_t
|
||||
westwood_rtt_signal(const congestion_control_t *cc)
|
||||
@ -111,7 +111,7 @@ westwood_is_congested(const congestion_control_t *cc)
|
||||
westwood_rtt_signal(cc) - cc->min_rtt_usec < USEC_ONE_MS)
|
||||
return false;
|
||||
|
||||
/* If the EWMA-smoothed RTT exceeds the westwood RTT threshhold,
|
||||
/* If the EWMA-smoothed RTT exceeds the westwood RTT threshold,
|
||||
* then it is congestion. */
|
||||
if (cc->ewma_rtt_usec > westwood_rtt_signal(cc))
|
||||
return true;
|
||||
@ -123,11 +123,11 @@ westwood_is_congested(const congestion_control_t *cc)
|
||||
* Process a SENDME and update the congestion window according to the
|
||||
* rules specified in TOR_WESTWOOD of Proposal #324.
|
||||
*
|
||||
* Essentially, this algorithm uses a threshhold of 'rtt_thresh', which
|
||||
* Essentially, this algorithm uses a threshold of 'rtt_thresh', which
|
||||
* is a midpoint between the min and max RTT. If the RTT exceeds this
|
||||
* threshhold, then queue delay due to congestion is assumed to be present,
|
||||
* and the algirithm reduces the congestion window. If the RTT is below the
|
||||
* threshhold, the circuit is not congested (ie: queue delay is low), and we
|
||||
* threshold, then queue delay due to congestion is assumed to be present,
|
||||
* and the algorithm reduces the congestion window. If the RTT is below the
|
||||
* threshold, the circuit is not congested (ie: queue delay is low), and we
|
||||
* increase the congestion window.
|
||||
*
|
||||
* The congestion window is updated only once every congestion window worth of
|
||||
|
@ -513,7 +513,7 @@ conn_update_on_connect(conn_client_stats_t *stats, const tor_addr_t *addr)
|
||||
|
||||
/* Assess connect counter. Mark it if counter is down to 0 and we haven't
|
||||
* marked it before or it was reset. This is to avoid to re-mark it over and
|
||||
* over again extending continously the blocked time. */
|
||||
* over again extending continuously the blocked time. */
|
||||
if (token_bucket_ctr_get(&stats->connect_count) == 0 &&
|
||||
stats->marked_until_ts == 0) {
|
||||
conn_mark_client(stats);
|
||||
|
@ -108,7 +108,7 @@ struct edge_connection_t {
|
||||
|
||||
/**
|
||||
* The following fields are used to count the total bytes sent on this
|
||||
* stream, and compare them to the number of XON and XOFFs recieved, so
|
||||
* stream, and compare them to the number of XON and XOFFs received, so
|
||||
* that clients can check rate limits of XOFF/XON to prevent dropmark
|
||||
* attacks. */
|
||||
uint32_t total_bytes_xmit;
|
||||
|
@ -32,7 +32,7 @@ typedef struct half_edge_t {
|
||||
int data_pending;
|
||||
|
||||
/**
|
||||
* Monotime timestamp of when the other end should have successfuly
|
||||
* Monotime timestamp of when the other end should have successfully
|
||||
* shut down the stream and stop sending data, based on the larger
|
||||
* of circuit RTT and CBT. Used if 'used_ccontrol' is true, to expire
|
||||
* the half_edge at this monotime timestamp. */
|
||||
|
@ -30,7 +30,7 @@
|
||||
* Flow Control
|
||||
*/
|
||||
|
||||
/* Emitted everytime the flow_control_decide_xon() function is called. */
|
||||
/* Emitted every time the flow_control_decide_xon() function is called. */
|
||||
TRACEPOINT_EVENT(tor_cc, flow_decide_xon,
|
||||
TP_ARGS(const edge_connection_t *, stream, size_t, n_written),
|
||||
TP_FIELDS(
|
||||
|
@ -403,7 +403,7 @@ dump_desc_compare_fifo_entries(const void **a_v, const void **b_v)
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* We shouldn't see this, but what the hell, NULLs precede everythin
|
||||
* We shouldn't see this, but what the hell, NULLs precede everything
|
||||
* else
|
||||
*/
|
||||
return 1;
|
||||
|
@ -712,7 +712,7 @@ ed_key_init_from_file(const char *fname, uint32_t flags,
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new signing key and (optionally) certficiate; do not read or write
|
||||
* Create a new signing key and (optionally) certificate; do not read or write
|
||||
* from disk. See ed_key_init_from_file() for more information.
|
||||
*/
|
||||
ed25519_keypair_t *
|
||||
|
@ -190,7 +190,7 @@ describe_relay_port(const port_cfg_t *port)
|
||||
|
||||
/** Return true iff port p1 is equal to p2.
|
||||
*
|
||||
* This does a field by field comparaison. */
|
||||
* This does a field by field comparison. */
|
||||
static bool
|
||||
port_cfg_eq(const port_cfg_t *p1, const port_cfg_t *p2)
|
||||
{
|
||||
|
@ -60,7 +60,7 @@ get_output(const metrics_store_t *store, buf_t *data, fmt_driver_fn_t fmt)
|
||||
tor_assert(fmt);
|
||||
|
||||
STRMAP_FOREACH(store->entries, key, const smartlist_t *, entries) {
|
||||
/* Indicate that we've formatted the coment already for the entries. */
|
||||
/* Indicate that we've formatted the comment already for the entries. */
|
||||
bool comment_formatted = false;
|
||||
SMARTLIST_FOREACH_BEGIN(entries, const metrics_store_entry_t *, entry) {
|
||||
fmt(entry, data, comment_formatted);
|
||||
|
@ -146,7 +146,7 @@ tor_cond_wait(tor_cond_t *cond, tor_mutex_t *lock_, const struct timeval *tv)
|
||||
{
|
||||
// recursive SRW locks are not supported because they need extra logic for
|
||||
// acquiring and releasing but SleepConditionVariableSRW will use the OS
|
||||
// lock relase function which lacks our extra logic
|
||||
// lock release function which lacks our extra logic
|
||||
tor_assert(lock_->type == NON_RECURSIVE);
|
||||
SRWLOCK *lock = &lock_->mutex;
|
||||
DWORD ms = INFINITE;
|
||||
|
Loading…
Reference in New Issue
Block a user