Merge branch 'maint-0.2.9'

This commit is contained in:
Nick Mathewson 2016-11-07 11:02:15 -05:00
commit 1934bf75ef
4 changed files with 30 additions and 14 deletions

View File

@ -2,5 +2,7 @@
- Remove the maximum delay on exponential-backoff scheduling.
Since we now allow an infinite number of failures (see ticket
20536), we must now allow the time to grow longer on each failure.
Fixes bug 20534; bugfix on 0.2.9.1-alpha.
Fixes part of bug 20534; bugfix on 0.2.9.1-alpha.
- Use initial delays and decrements in download scheduling closer to
those from 0.2.8. Fixes another part of bug 20534; bugfix on
0.2.9.1-alpha.

6
changes/bug20593 Normal file
View File

@ -0,0 +1,6 @@
o Minor bugfixes (client directory scheduling):
- Treat "relay too busy to answer request" as a failed request and a
reason to back off on our retry frequency. This is safe now that
exponential backups retry indefinitely, and avoids a bug where we would
reset our download schedule erroneously.
Fixes bug 20593; bugfix on 0.2.9.1-alpha.

View File

@ -550,7 +550,7 @@ static config_var_t option_vars_[] = {
* When clients have authorities and fallbacks available, they use these
* schedules: (we stagger the times to avoid thundering herds) */
V(ClientBootstrapConsensusAuthorityDownloadSchedule, CSV_INTERVAL,
"10, 11, 3600, 10800, 25200, 54000, 111600, 262800" /* 3 days + 1 hour */),
"6, 11, 3600, 10800, 25200, 54000, 111600, 262800" /* 3 days + 1 hour */),
V(ClientBootstrapConsensusFallbackDownloadSchedule, CSV_INTERVAL,
"0, 1, 4, 11, 3600, 10800, 25200, 54000, 111600, 262800"),
/* When clients only have authorities available, they use this schedule: */
@ -561,7 +561,7 @@ static config_var_t option_vars_[] = {
* blackholed. Clients will try 3 directories simultaneously.
* (Relays never use simultaneous connections.) */
V(ClientBootstrapConsensusMaxInProgressTries, UINT, "3"),
V(TestingBridgeDownloadSchedule, CSV_INTERVAL, "3600, 900, 900, 3600"),
V(TestingBridgeDownloadSchedule, CSV_INTERVAL, "1200, 900, 900, 3600"),
V(TestingClientMaxIntervalWithoutRequest, INTERVAL, "10 minutes"),
V(TestingDirConnectionMaxStall, INTERVAL, "5 minutes"),
V(TestingConsensusMaxDownloadTries, UINT, "8"),

View File

@ -3997,14 +3997,21 @@ next_random_exponential_delay(int delay, int max_delay)
/* How much are we willing to add to the delay? */
int max_increment;
const int multiplier = 3; /* no more than quadruple the previous delay */
if (delay)
max_increment = delay; /* no more than double. */
else
max_increment = 1; /* we're always willing to slow down a little. */
if (delay && delay < (INT_MAX-1) / multiplier) {
max_increment = delay * multiplier;
} else if (delay) {
max_increment = INT_MAX-1;
} else {
max_increment = 1;
}
/* the + 1 here is so that we include the end of the interval */
int increment = crypto_rand_int(max_increment+1);
if (BUG(max_increment < 1))
max_increment = 1;
/* the + 1 here is so that we always wait longer than last time. */
int increment = crypto_rand_int(max_increment)+1;
if (increment < max_delay - delay)
return delay + increment;
@ -4134,15 +4141,16 @@ time_t
download_status_increment_failure(download_status_t *dls, int status_code,
const char *item, int server, time_t now)
{
(void) status_code; // XXXX no longer used.
(void) server; // XXXX no longer used.
int increment = -1;
int min_delay = 0, max_delay = INT_MAX;
tor_assert(dls);
/* only count the failure if it's permanent, or we're a server */
if (status_code != 503 || server) {
if (dls->n_download_failures < IMPOSSIBLE_TO_DOWNLOAD-1)
++dls->n_download_failures;
/* count the failure */
if (dls->n_download_failures < IMPOSSIBLE_TO_DOWNLOAD-1) {
++dls->n_download_failures;
}
if (dls->increment_on == DL_SCHED_INCREMENT_FAILURE) {