Implement AIMD effort estimation.

Now, pow should auto-enable and auto-disable itself.
This commit is contained in:
Mike Perry 2022-07-13 23:33:07 +00:00 committed by Micah Elizabeth Scott
parent 5b3a067fe3
commit ec9e95cf1e
4 changed files with 54 additions and 8 deletions

View File

@ -678,6 +678,10 @@ trim_rend_pqueue(hs_pow_service_state_t *pow_state, time_t now)
if (queued_rend_request_is_too_old(req, now)) {
log_info(LD_REND, "While trimming, rend request has been pending "
"for too long; discarding.");
if (req->rdv_data.pow_effort > pow_state->max_trimmed_effort)
pow_state->max_trimmed_effort = req->rdv_data.pow_effort;
free_pending_rend(req);
} else {
smartlist_pqueue_add(new_pqueue,
@ -689,6 +693,9 @@ trim_rend_pqueue(hs_pow_service_state_t *pow_state, time_t now)
/* Ok, we have rescued all the entries we want to keep. The rest are
* all excess. */
SMARTLIST_FOREACH_BEGIN(old_pqueue, pending_rend_t *, req) {
if (req->rdv_data.pow_effort > pow_state->max_trimmed_effort)
pow_state->max_trimmed_effort = req->rdv_data.pow_effort;
free_pending_rend(req);
} SMARTLIST_FOREACH_END(req);
smartlist_free(old_pqueue);
@ -719,7 +726,7 @@ count_service_rp_circuits_pending(hs_service_t *service)
* effort is at least what we're suggesting for that service right now,
* return 1, else return 0.
*/
static int
int
top_of_rend_pqueue_is_worthwhile(hs_pow_service_state_t *pow_state)
{
tor_assert(pow_state->rend_request_pqueue);
@ -815,6 +822,15 @@ handle_rend_pqueue_cb(mainloop_event_t *ev, void *arg)
* reschedule the event in order to continue handling them. */
if (smartlist_len(pow_state->rend_request_pqueue) > 0) {
mainloop_event_activate(pow_state->pop_pqueue_ev);
// XXX: Is this a good threshhold to decide that we have a significant
// queue? I just made it up.
if (smartlist_len(pow_state->rend_request_pqueue) >
2*MAX_REND_REQUEST_PER_MAINLOOP) {
/* Note the fact that we had multiple eventloops worth of queue
* to service, for effort estimation */
pow_state->had_queue = 1;
}
}
}
@ -1334,8 +1350,10 @@ hs_circ_handle_introduce2(const hs_service_t *service,
goto done;
}
/* Increase the total effort in valid requests received this period. */
service->state.pow_state->total_effort += data.rdv_data.pow_effort;
/* Increase the total effort in valid requests received this period,
* but count 0-effort as min-effort, for estimation purposes. */
service->state.pow_state->total_effort += MAX(data.rdv_data.pow_effort,
service->state.pow_state->min_effort);
/* Successfully added rend circuit to priority queue. */
ret = 0;

View File

@ -32,6 +32,8 @@ typedef struct pending_rend_t {
time_t enqueued_ts;
} pending_rend_t;
int top_of_rend_pqueue_is_worthwhile(hs_pow_service_state_t *pow_state);
/* Cleanup function when the circuit is closed or freed. */
void hs_circ_cleanup_on_close(circuit_t *circ);
void hs_circ_cleanup_on_free(circuit_t *circ);

View File

@ -87,6 +87,9 @@ typedef struct hs_pow_service_state_t {
* be serviced in a timely manner. */
uint32_t suggested_effort;
/* The maximum effort of a request we've had to trim, this update period */
uint32_t max_trimmed_effort;
/* The following values are used when calculating and updating the suggested
* effort every HS_UPDATE_PERIOD seconds. */
@ -96,6 +99,8 @@ typedef struct hs_pow_service_state_t {
time_t next_effort_update;
/* Sum of effort of all valid requests received since the last update. */
uint64_t total_effort;
/* Did we have elements waiting in the queue during this period? */
bool had_queue;
} hs_pow_service_state_t;
/* Struct to store a solution to the PoW challenge. */

View File

@ -2678,21 +2678,42 @@ update_suggested_effort(hs_service_t *service, time_t now)
/* Make life easier */
hs_pow_service_state_t *pow_state = service->state.pow_state;
/* Calculate the new suggested effort. */
/* TODO Check for overflow? */
pow_state->suggested_effort = (uint32_t)(pow_state->total_effort / pow_state->rend_handled);
/* Calculate the new suggested effort, using an additive-increase
* multiplicative-decrease estimation scheme. */
if (pow_state->max_trimmed_effort > pow_state->suggested_effort) {
/* If we trimmed a request above our suggested effort, re-estimate the
* effort */
pow_state->suggested_effort = (uint32_t)(pow_state->total_effort /
pow_state->rend_handled);
} else if (pow_state->had_queue) {
/* If we had a queue during this period, and the current top of queue
* is at or above the suggested effort, we should re-estimate the effort.
* Otherwise, it can stay the same (no change to effort). */
if (top_of_rend_pqueue_is_worthwhile(pow_state)) {
pow_state->suggested_effort = (uint32_t)(pow_state->total_effort /
pow_state->rend_handled);
}
} else {
/* If we were able to keep the queue drained the entire update period,
* multiplicative decrease the pow by 2/3. */
pow_state->suggested_effort = 2*pow_state->suggested_effort/3;
}
log_debug(LD_REND, "Recalculated suggested effort: %u",
pow_state->suggested_effort);
/* Set suggested effort to max(min_effort, suggested_effort) */
/* If the suggested effort has been decreased below the minimum, set it
* to zero: no pow needed again until we queue or trim */
if (pow_state->suggested_effort < pow_state->min_effort) {
pow_state->suggested_effort = pow_state->min_effort;
// XXX: Verify this disables pow being done at all.
pow_state->suggested_effort = 0;
}
/* Reset the total effort sum and number of rends for this update period. */
pow_state->total_effort = 0;
pow_state->rend_handled = 0;
pow_state->max_trimmed_effort = 0;
pow_state->had_queue = 0;
pow_state->next_effort_update = now + HS_UPDATE_PERIOD;
}