/* Copyright (c) 2018-2018, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
* \file token_bucket.c
* \brief Functions to use and manipulate token buckets, used for
* rate-limiting on connections and globally.
*
* Tor uses these token buckets to keep track of bandwidth usage, and
* sometimes other things too.
*
* There are two layers of abstraction here: "raw" token buckets, in which all
* the pieces are decoupled, and "read-write" token buckets, which combine all
* the moving parts into one.
*
* Token buckets may become negative.
**/
#define TOKEN_BUCKET_PRIVATE
#include "common/token_bucket.h"
#include "common/util_bug.h"
/**
* Set the rate and burst value in a token_bucket_cfg.
*
* Note that the rate value is in arbitrary units, but those units will
* determine the units of token_bucket_raw_dec(), token_bucket_raw_refill, and
* so on.
*/
void
token_bucket_cfg_init(token_bucket_cfg_t *cfg,
uint32_t rate,
uint32_t burst)
{
tor_assert_nonfatal(rate > 0);
tor_assert_nonfatal(burst > 0);
if (burst > TOKEN_BUCKET_MAX_BURST)
burst = TOKEN_BUCKET_MAX_BURST;
cfg->rate = rate;
cfg->burst = burst;
}
/**
* Initialize a raw token bucket and its associated timestamp to the "full"
* state, according to cfg.
*/
void
token_bucket_raw_reset(token_bucket_raw_t *bucket,
const token_bucket_cfg_t *cfg)
{
bucket->bucket = cfg->burst;
}
/**
* Adust a preexisting token bucket to respect the new configuration
* cfg, by decreasing its current level if needed. */
void
token_bucket_raw_adjust(token_bucket_raw_t *bucket,
const token_bucket_cfg_t *cfg)
{
bucket->bucket = MIN(bucket->bucket, cfg->burst);
}
/**
* Given an amount of elapsed time units, and a bucket configuration
* cfg, refill the level of bucket accordingly. Note that the
* units of time in elapsed must correspond to those used to set the
* rate in cfg, or the result will be illogical.
*/
int
token_bucket_raw_refill_steps(token_bucket_raw_t *bucket,
const token_bucket_cfg_t *cfg,
const uint32_t elapsed)
{
const int was_empty = (bucket->bucket <= 0);
/* The casts here prevent an underflow.
*
* Note that even if the bucket value is negative, subtracting it from
* "burst" will still produce a correct result. If this result is
* ridiculously high, then the "elapsed > gap / rate" check below
* should catch it. */
const size_t gap = ((size_t)cfg->burst) - ((size_t)bucket->bucket);
if (elapsed > gap / cfg->rate) {
bucket->bucket = cfg->burst;
} else {
bucket->bucket += cfg->rate * elapsed;
}
return was_empty && bucket->bucket > 0;
}
/**
* Decrement a provided bucket by n units. Note that n
* must be nonnegative.
*/
int
token_bucket_raw_dec(token_bucket_raw_t *bucket,
ssize_t n)
{
if (BUG(n < 0))
return 0;
const int becomes_empty = bucket->bucket > 0 && n >= bucket->bucket;
bucket->bucket -= n;
return becomes_empty;
}
/** Convert a rate in bytes per second to a rate in bytes per step */
STATIC uint32_t
rate_per_sec_to_rate_per_step(uint32_t rate)
{
/*
The precise calculation we'd want to do is
(rate / 1000) * to_approximate_msec(TICKS_PER_STEP). But to minimize
rounding error, we do it this way instead, and divide last.
*/
uint64_t units = (uint64_t) rate * TICKS_PER_STEP;
uint32_t val = (uint32_t)
(monotime_coarse_stamp_units_to_approx_msec(units) / 1000);
return val ? val : 1;
}
/**
* Initialize a token bucket in *bucket, set up to allow rate
* bytes per second, with a maximum burst of burst bytes. The bucket
* is created such that now_ts is the current timestamp. The bucket
* starts out full.
*/
void
token_bucket_rw_init(token_bucket_rw_t *bucket,
uint32_t rate,
uint32_t burst,
uint32_t now_ts)
{
memset(bucket, 0, sizeof(token_bucket_rw_t));
token_bucket_rw_adjust(bucket, rate, burst);
token_bucket_rw_reset(bucket, now_ts);
}
/**
* Change the configured rate (in bytes per second) and burst (in bytes)
* for the token bucket in *bucket.
*/
void
token_bucket_rw_adjust(token_bucket_rw_t *bucket,
uint32_t rate,
uint32_t burst)
{
token_bucket_cfg_init(&bucket->cfg,
rate_per_sec_to_rate_per_step(rate),
burst);
token_bucket_raw_adjust(&bucket->read_bucket, &bucket->cfg);
token_bucket_raw_adjust(&bucket->write_bucket, &bucket->cfg);
}
/**
* Reset bucket to be full, as of timestamp now_ts.
*/
void
token_bucket_rw_reset(token_bucket_rw_t *bucket,
uint32_t now_ts)
{
token_bucket_raw_reset(&bucket->read_bucket, &bucket->cfg);
token_bucket_raw_reset(&bucket->write_bucket, &bucket->cfg);
bucket->last_refilled_at_timestamp = now_ts;
}
/**
* Refill bucket as appropriate, given that the current timestamp
* is now_ts.
*
* Return a bitmask containing TB_READ iff read bucket was empty and became
* nonempty, and TB_WRITE iff the write bucket was empty and became nonempty.
*/
int
token_bucket_rw_refill(token_bucket_rw_t *bucket,
uint32_t now_ts)
{
const uint32_t elapsed_ticks =
(now_ts - bucket->last_refilled_at_timestamp);
if (elapsed_ticks > UINT32_MAX-(300*1000)) {
/* Either about 48 days have passed since the last refill, or the
* monotonic clock has somehow moved backwards. (We're looking at you,
* Windows.). We accept up to a 5 minute jump backwards as
* "unremarkable".
*/
return 0;
}
const uint32_t elapsed_steps = elapsed_ticks / TICKS_PER_STEP;
if (!elapsed_steps) {
/* Note that if less than one whole step elapsed, we don't advance the
* time in last_refilled_at. That's intentional: we want to make sure
* that we add some bytes to it eventually. */
return 0;
}
int flags = 0;
if (token_bucket_raw_refill_steps(&bucket->read_bucket,
&bucket->cfg, elapsed_steps))
flags |= TB_READ;
if (token_bucket_raw_refill_steps(&bucket->write_bucket,
&bucket->cfg, elapsed_steps))
flags |= TB_WRITE;
bucket->last_refilled_at_timestamp = now_ts;
return flags;
}
/**
* Decrement the read token bucket in bucket by n bytes.
*
* Return true if the bucket was nonempty and became empty; return false
* otherwise.
*/
int
token_bucket_rw_dec_read(token_bucket_rw_t *bucket,
ssize_t n)
{
return token_bucket_raw_dec(&bucket->read_bucket, n);
}
/**
* Decrement the write token bucket in bucket by n bytes.
*
* Return true if the bucket was nonempty and became empty; return false
* otherwise.
*/
int
token_bucket_rw_dec_write(token_bucket_rw_t *bucket,
ssize_t n)
{
return token_bucket_raw_dec(&bucket->write_bucket, n);
}
/**
* As token_bucket_rw_dec_read and token_bucket_rw_dec_write, in a single
* operation. Return a bitmask of TB_READ and TB_WRITE to indicate
* which buckets became empty.
*/
int
token_bucket_rw_dec(token_bucket_rw_t *bucket,
ssize_t n_read, ssize_t n_written)
{
int flags = 0;
if (token_bucket_rw_dec_read(bucket, n_read))
flags |= TB_READ;
if (token_bucket_rw_dec_write(bucket, n_written))
flags |= TB_WRITE;
return flags;
}