Remove bufferevents dead code

Signed-off-by: U+039b <*@0x39b.fr>
This commit is contained in:
U+039b 2016-07-14 18:46:37 +02:00
parent 3ac434104a
commit c735220a0b
No known key found for this signature in database
GPG Key ID: E5640E6E5683039D
9 changed files with 3 additions and 1129 deletions

View File

@ -914,97 +914,6 @@ fetch_var_cell_from_buf(buf_t *buf, var_cell_t **out, int linkproto)
return 1;
}
#ifdef USE_BUFFEREVENTS
/** Try to read <b>n</b> bytes from <b>buf</b> at <b>pos</b> (which may be
* NULL for the start of the buffer), copying the data only if necessary. Set
* *<b>data_out</b> to a pointer to the desired bytes. Set <b>free_out</b>
* to 1 if we needed to malloc *<b>data</b> because the original bytes were
* noncontiguous; 0 otherwise. Return the number of bytes actually available
* at *<b>data_out</b>.
*/
static ssize_t
inspect_evbuffer(struct evbuffer *buf, char **data_out, size_t n,
int *free_out, struct evbuffer_ptr *pos)
{
int n_vecs, i;
if (evbuffer_get_length(buf) < n)
n = evbuffer_get_length(buf);
if (n == 0)
return 0;
n_vecs = evbuffer_peek(buf, n, pos, NULL, 0);
tor_assert(n_vecs > 0);
if (n_vecs == 1) {
struct evbuffer_iovec v;
i = evbuffer_peek(buf, n, pos, &v, 1);
tor_assert(i == 1);
*data_out = v.iov_base;
*free_out = 0;
return v.iov_len;
} else {
ev_ssize_t copied;
*data_out = tor_malloc(n);
*free_out = 1;
copied = evbuffer_copyout(buf, *data_out, n);
tor_assert(copied >= 0 && (size_t)copied == n);
return copied;
}
}
/** As fetch_var_cell_from_buf, buf works on an evbuffer. */
int
fetch_var_cell_from_evbuffer(struct evbuffer *buf, var_cell_t **out,
int linkproto)
{
char *hdr = NULL;
int free_hdr = 0;
size_t n;
size_t buf_len;
uint8_t command;
uint16_t cell_length;
var_cell_t *cell;
int result = 0;
const int wide_circ_ids = linkproto >= MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS;
const int circ_id_len = get_circ_id_size(wide_circ_ids);
const unsigned header_len = get_var_cell_header_size(wide_circ_ids);
*out = NULL;
buf_len = evbuffer_get_length(buf);
if (buf_len < header_len)
return 0;
n = inspect_evbuffer(buf, &hdr, header_len, &free_hdr, NULL);
tor_assert(n >= header_len);
command = get_uint8(hdr + circ_id_len);
if (!(cell_command_is_var_length(command, linkproto))) {
goto done;
}
cell_length = ntohs(get_uint16(hdr + circ_id_len + 1));
if (buf_len < (size_t)(header_len+cell_length)) {
result = 1; /* Not all here yet. */
goto done;
}
cell = var_cell_new(cell_length);
cell->command = command;
if (wide_circ_ids)
cell->circ_id = ntohl(get_uint32(hdr));
else
cell->circ_id = ntohs(get_uint16(hdr));
evbuffer_drain(buf, header_len);
evbuffer_remove(buf, cell->payload, cell_length);
*out = cell;
result = 1;
done:
if (free_hdr && hdr)
tor_free(hdr);
return result;
}
#endif
/** Move up to *<b>buf_flushlen</b> bytes from <b>buf_in</b> to
* <b>buf_out</b>, and modify *<b>buf_flushlen</b> appropriately.
* Return the number of bytes actually copied.
@ -1247,94 +1156,6 @@ fetch_from_buf_http(buf_t *buf,
return 1;
}
#ifdef USE_BUFFEREVENTS
/** As fetch_from_buf_http, buf works on an evbuffer. */
int
fetch_from_evbuffer_http(struct evbuffer *buf,
char **headers_out, size_t max_headerlen,
char **body_out, size_t *body_used, size_t max_bodylen,
int force_complete)
{
struct evbuffer_ptr crlf, content_length;
size_t headerlen, bodylen, contentlen;
/* Find the first \r\n\r\n in the buffer */
crlf = evbuffer_search(buf, "\r\n\r\n", 4, NULL);
if (crlf.pos < 0) {
/* We didn't find one. */
if (evbuffer_get_length(buf) > max_headerlen)
return -1; /* Headers too long. */
return 0; /* Headers not here yet. */
} else if (crlf.pos > (int)max_headerlen) {
return -1; /* Headers too long. */
}
headerlen = crlf.pos + 4; /* Skip over the \r\n\r\n */
bodylen = evbuffer_get_length(buf) - headerlen;
if (bodylen > max_bodylen)
return -1; /* body too long */
/* Look for the first occurrence of CONTENT_LENGTH insize buf before the
* crlfcrlf */
content_length = evbuffer_search_range(buf, CONTENT_LENGTH,
strlen(CONTENT_LENGTH), NULL, &crlf);
if (content_length.pos >= 0) {
/* We found a content_length: parse it and figure out if the body is here
* yet. */
struct evbuffer_ptr eol;
char *data = NULL;
int free_data = 0;
int n, i;
n = evbuffer_ptr_set(buf, &content_length, strlen(CONTENT_LENGTH),
EVBUFFER_PTR_ADD);
tor_assert(n == 0);
eol = evbuffer_search_eol(buf, &content_length, NULL, EVBUFFER_EOL_CRLF);
tor_assert(eol.pos > content_length.pos);
tor_assert(eol.pos <= crlf.pos);
inspect_evbuffer(buf, &data, eol.pos - content_length.pos, &free_data,
&content_length);
i = atoi(data);
if (free_data)
tor_free(data);
if (i < 0) {
log_warn(LD_PROTOCOL, "Content-Length is less than zero; it looks like "
"someone is trying to crash us.");
return -1;
}
contentlen = i;
/* if content-length is malformed, then our body length is 0. fine. */
log_debug(LD_HTTP,"Got a contentlen of %d.",(int)contentlen);
if (bodylen < contentlen) {
if (!force_complete) {
log_debug(LD_HTTP,"body not all here yet.");
return 0; /* not all there yet */
}
}
if (bodylen > contentlen) {
bodylen = contentlen;
log_debug(LD_HTTP,"bodylen reduced to %d.",(int)bodylen);
}
}
if (headers_out) {
*headers_out = tor_malloc(headerlen+1);
evbuffer_remove(buf, *headers_out, headerlen);
(*headers_out)[headerlen] = '\0';
}
if (body_out) {
tor_assert(headers_out);
tor_assert(body_used);
*body_used = bodylen;
*body_out = tor_malloc(bodylen+1);
evbuffer_remove(buf, *body_out, bodylen);
(*body_out)[bodylen] = '\0';
}
return 1;
}
#endif
/**
* Wait this many seconds before warning the user about using SOCKS unsafely
* again (requires that WarnUnsafeSocks is turned on). */
@ -1454,86 +1275,6 @@ fetch_from_buf_socks(buf_t *buf, socks_request_t *req,
return res;
}
#ifdef USE_BUFFEREVENTS
/* As fetch_from_buf_socks(), but targets an evbuffer instead. */
int
fetch_from_evbuffer_socks(struct evbuffer *buf, socks_request_t *req,
int log_sockstype, int safe_socks)
{
char *data;
ssize_t n_drain;
size_t datalen, buflen, want_length;
int res;
buflen = evbuffer_get_length(buf);
if (buflen < 2)
return 0;
{
/* See if we can find the socks request in the first chunk of the buffer.
*/
struct evbuffer_iovec v;
int i;
n_drain = 0;
i = evbuffer_peek(buf, -1, NULL, &v, 1);
tor_assert(i == 1);
data = v.iov_base;
datalen = v.iov_len;
want_length = 0;
res = parse_socks(data, datalen, req, log_sockstype,
safe_socks, &n_drain, &want_length);
if (n_drain < 0)
evbuffer_drain(buf, evbuffer_get_length(buf));
else if (n_drain > 0)
evbuffer_drain(buf, n_drain);
if (res)
return res;
}
/* Okay, the first chunk of the buffer didn't have a complete socks request.
* That means that either we don't have a whole socks request at all, or
* it's gotten split up. We're going to try passing parse_socks() bigger
* and bigger chunks until either it says "Okay, I got it", or it says it
* will need more data than we currently have. */
/* Loop while we have more data that we haven't given parse_socks() yet. */
do {
int free_data = 0;
const size_t last_wanted = want_length;
n_drain = 0;
data = NULL;
datalen = inspect_evbuffer(buf, &data, want_length, &free_data, NULL);
want_length = 0;
res = parse_socks(data, datalen, req, log_sockstype,
safe_socks, &n_drain, &want_length);
if (free_data)
tor_free(data);
if (n_drain < 0)
evbuffer_drain(buf, evbuffer_get_length(buf));
else if (n_drain > 0)
evbuffer_drain(buf, n_drain);
if (res == 0 && n_drain == 0 && want_length <= last_wanted) {
/* If we drained nothing, and we didn't ask for more than last time,
* then we probably wanted more data than the buffer actually had,
* and we're finding out that we're not satisified with it. It's
* time to break until we have more data. */
break;
}
buflen = evbuffer_get_length(buf);
} while (res == 0 && want_length <= buflen && buflen >= 2);
return res;
}
#endif
/** The size of the header of an Extended ORPort message: 2 bytes for
* COMMAND, 2 bytes for BODYLEN */
#define EXT_OR_CMD_HEADER_SIZE 4
@ -1564,34 +1305,6 @@ fetch_ext_or_command_from_buf(buf_t *buf, ext_or_cmd_t **out)
return 1;
}
#ifdef USE_BUFFEREVENTS
/** Read <b>buf</b>, which should contain an Extended ORPort message
* from a transport proxy. If well-formed, create and populate
* <b>out</b> with the Extended ORport message. Return 0 if the
* buffer was incomplete, 1 if it was well-formed and -1 if we
* encountered an error while parsing it. */
int
fetch_ext_or_command_from_evbuffer(struct evbuffer *buf, ext_or_cmd_t **out)
{
char hdr[EXT_OR_CMD_HEADER_SIZE];
uint16_t len;
size_t buf_len = evbuffer_get_length(buf);
if (buf_len < EXT_OR_CMD_HEADER_SIZE)
return 0;
evbuffer_copyout(buf, hdr, EXT_OR_CMD_HEADER_SIZE);
len = ntohs(get_uint16(hdr+2));
if (buf_len < (unsigned)len + EXT_OR_CMD_HEADER_SIZE)
return 0;
*out = ext_or_cmd_new(len);
(*out)->cmd = ntohs(get_uint16(hdr));
(*out)->len = len;
evbuffer_drain(buf, EXT_OR_CMD_HEADER_SIZE);
evbuffer_remove(buf, (*out)->body, len);
return 1;
}
#endif
/** Create a SOCKS5 reply message with <b>reason</b> in its REP field and
* have Tor send it as error response to <b>req</b>.
*/
@ -2036,34 +1749,6 @@ fetch_from_buf_socks_client(buf_t *buf, int state, char **reason)
return r;
}
#ifdef USE_BUFFEREVENTS
/** As fetch_from_buf_socks_client, buf works on an evbuffer */
int
fetch_from_evbuffer_socks_client(struct evbuffer *buf, int state,
char **reason)
{
ssize_t drain = 0;
uint8_t *data;
size_t datalen;
int r;
/* Linearize the SOCKS response in the buffer, up to 128 bytes.
* (parse_socks_client shouldn't need to see anything beyond that.) */
datalen = evbuffer_get_length(buf);
if (datalen > MAX_SOCKS_MESSAGE_LEN)
datalen = MAX_SOCKS_MESSAGE_LEN;
data = evbuffer_pullup(buf, datalen);
r = parse_socks_client(data, datalen, state, reason, &drain);
if (drain > 0)
evbuffer_drain(buf, drain);
else if (drain < 0)
evbuffer_drain(buf, evbuffer_get_length(buf));
return r;
}
#endif
/** Implementation logic for fetch_from_*_socks_client. */
static int
parse_socks_client(const uint8_t *data, size_t datalen,
@ -2194,27 +1879,6 @@ peek_buf_has_control0_command(buf_t *buf)
return 0;
}
#ifdef USE_BUFFEREVENTS
int
peek_evbuffer_has_control0_command(struct evbuffer *buf)
{
int result = 0;
if (evbuffer_get_length(buf) >= 4) {
int free_out = 0;
char *data = NULL;
size_t n = inspect_evbuffer(buf, &data, 4, &free_out, NULL);
uint16_t cmd;
tor_assert(n >= 4);
cmd = ntohs(get_uint16(data+2));
if (cmd <= 0x14)
result = 1;
if (free_out)
tor_free(data);
}
return result;
}
#endif
/** Return the index within <b>buf</b> at which <b>ch</b> first appears,
* or -1 if <b>ch</b> does not appear on buf. */
static off_t
@ -2312,93 +1976,14 @@ write_to_buf_zlib(buf_t *buf, tor_zlib_state_t *state,
return 0;
}
#ifdef USE_BUFFEREVENTS
int
write_to_evbuffer_zlib(struct evbuffer *buf, tor_zlib_state_t *state,
const char *data, size_t data_len,
int done)
{
char *next;
size_t old_avail, avail;
int over = 0, n;
struct evbuffer_iovec vec[1];
do {
{
size_t cap = data_len / 4;
if (cap < 128)
cap = 128;
/* XXXX NM this strategy is fragmentation-prone. We should really have
* two iovecs, and write first into the one, and then into the
* second if the first gets full. */
n = evbuffer_reserve_space(buf, cap, vec, 1);
tor_assert(n == 1);
}
next = vec[0].iov_base;
avail = old_avail = vec[0].iov_len;
switch (tor_zlib_process(state, &next, &avail, &data, &data_len, done)) {
case TOR_ZLIB_DONE:
over = 1;
break;
case TOR_ZLIB_ERR:
return -1;
case TOR_ZLIB_OK:
if (data_len == 0)
over = 1;
break;
case TOR_ZLIB_BUF_FULL:
if (avail) {
/* Zlib says we need more room (ZLIB_BUF_FULL). Start a new chunk
* automatically, whether were going to or not. */
}
break;
}
/* XXXX possible infinite loop on BUF_FULL. */
vec[0].iov_len = old_avail - avail;
evbuffer_commit_space(buf, vec, 1);
} while (!over);
check();
return 0;
}
#endif
/** Set *<b>output</b> to contain a copy of the data in *<b>input</b> */
int
generic_buffer_set_to_copy(generic_buffer_t **output,
const generic_buffer_t *input)
{
#ifdef USE_BUFFEREVENTS
struct evbuffer_ptr ptr;
size_t remaining = evbuffer_get_length(input);
if (*output) {
evbuffer_drain(*output, evbuffer_get_length(*output));
} else {
if (!(*output = evbuffer_new()))
return -1;
}
evbuffer_ptr_set((struct evbuffer*)input, &ptr, 0, EVBUFFER_PTR_SET);
while (remaining) {
struct evbuffer_iovec v[4];
int n_used, i;
n_used = evbuffer_peek((struct evbuffer*)input, -1, &ptr, v, 4);
if (n_used < 0)
return -1;
for (i=0;i<n_used;++i) {
evbuffer_add(*output, v[i].iov_base, v[i].iov_len);
tor_assert(v[i].iov_len <= remaining);
remaining -= v[i].iov_len;
evbuffer_ptr_set((struct evbuffer*)input,
&ptr, v[i].iov_len, EVBUFFER_PTR_ADD);
}
}
#else
if (*output)
buf_free(*output);
*output = buf_copy(input);
#endif
return 0;
}

View File

@ -56,35 +56,6 @@ int peek_buf_has_control0_command(buf_t *buf);
int fetch_ext_or_command_from_buf(buf_t *buf, ext_or_cmd_t **out);
#ifdef USE_BUFFEREVENTS
int fetch_var_cell_from_evbuffer(struct evbuffer *buf, var_cell_t **out,
int linkproto);
int fetch_from_evbuffer_socks(struct evbuffer *buf, socks_request_t *req,
int log_sockstype, int safe_socks);
int fetch_from_evbuffer_socks_client(struct evbuffer *buf, int state,
char **reason);
int fetch_from_evbuffer_http(struct evbuffer *buf,
char **headers_out, size_t max_headerlen,
char **body_out, size_t *body_used, size_t max_bodylen,
int force_complete);
int peek_evbuffer_has_control0_command(struct evbuffer *buf);
int write_to_evbuffer_zlib(struct evbuffer *buf, tor_zlib_state_t *state,
const char *data, size_t data_len,
int done);
int fetch_ext_or_command_from_evbuffer(struct evbuffer *buf,
ext_or_cmd_t **out);
#endif
#ifdef USE_BUFFEREVENTS
#define generic_buffer_new() evbuffer_new()
#define generic_buffer_len(b) evbuffer_get_length((b))
#define generic_buffer_add(b,dat,len) evbuffer_add((b),(dat),(len))
#define generic_buffer_get(b,buf,buflen) evbuffer_remove((b),(buf),(buflen))
#define generic_buffer_clear(b) evbuffer_drain((b), evbuffer_get_length((b)))
#define generic_buffer_free(b) evbuffer_free((b))
#define generic_buffer_fetch_ext_or_cmd(b, out) \
fetch_ext_or_command_from_evbuffer((b), (out))
#else
#define generic_buffer_new() buf_new()
#define generic_buffer_len(b) buf_datalen((b))
#define generic_buffer_add(b,dat,len) write_to_buf((dat),(len),(b))
@ -93,7 +64,6 @@ int fetch_ext_or_command_from_evbuffer(struct evbuffer *buf,
#define generic_buffer_free(b) buf_free((b))
#define generic_buffer_fetch_ext_or_cmd(b, out) \
fetch_ext_or_command_from_buf((b), (out))
#endif
int generic_buffer_set_to_copy(generic_buffer_t **output,
const generic_buffer_t *input);

View File

@ -1674,17 +1674,6 @@ options_act(const or_options_t *old_options)
if (accounting_is_enabled(options))
configure_accounting(time(NULL));
#ifdef USE_BUFFEREVENTS
/* If we're using the bufferevents implementation and our rate limits
* changed, we need to tell the rate-limiting system about it. */
if (!old_options ||
old_options->BandwidthRate != options->BandwidthRate ||
old_options->BandwidthBurst != options->BandwidthBurst ||
old_options->RelayBandwidthRate != options->RelayBandwidthRate ||
old_options->RelayBandwidthBurst != options->RelayBandwidthBurst)
connection_bucket_init();
#endif
old_ewma_enabled = cell_ewma_enabled();
/* Change the cell EWMA settings */
cell_ewma_set_scale_factor(options, networkstatus_get_latest_consensus());

View File

@ -52,10 +52,6 @@
#include "sandbox.h"
#include "transports.h"
#ifdef USE_BUFFEREVENTS
#include <event2/event.h>
#endif
#ifdef HAVE_PWD_H
#include <pwd.h>
#endif
@ -75,10 +71,8 @@ static void connection_init(time_t now, connection_t *conn, int type,
static int connection_init_accepted_conn(connection_t *conn,
const listener_connection_t *listener);
static int connection_handle_listener_read(connection_t *conn, int new_type);
#ifndef USE_BUFFEREVENTS
static int connection_bucket_should_increase(int bucket,
or_connection_t *conn);
#endif
static int connection_finished_flushing(connection_t *conn);
static int connection_flushed_some(connection_t *conn);
static int connection_finished_connecting(connection_t *conn);
@ -236,26 +230,6 @@ conn_state_to_string(int type, int state)
return buf;
}
#ifdef USE_BUFFEREVENTS
/** Return true iff the connection's type is one that can use a
bufferevent-based implementation. */
int
connection_type_uses_bufferevent(connection_t *conn)
{
switch (conn->type) {
case CONN_TYPE_AP:
case CONN_TYPE_EXIT:
case CONN_TYPE_DIR:
case CONN_TYPE_CONTROL:
case CONN_TYPE_OR:
case CONN_TYPE_EXT_OR:
return 1;
default:
return 0;
}
}
#endif
/** Allocate and return a new dir_connection_t, initialized as by
* connection_init(). */
dir_connection_t *
@ -427,13 +401,11 @@ connection_init(time_t now, connection_t *conn, int type, int socket_family)
conn->type = type;
conn->socket_family = socket_family;
#ifndef USE_BUFFEREVENTS
if (!connection_is_listener(conn)) {
/* listeners never use their buf */
conn->inbuf = buf_new();
conn->outbuf = buf_new();
}
#endif
conn->timestamp_created = now;
conn->timestamp_lastread = now;
@ -645,13 +617,6 @@ connection_free_(connection_t *conn)
tor_free(TO_OR_CONN(conn)->ext_or_transport);
}
#ifdef USE_BUFFEREVENTS
if (conn->type == CONN_TYPE_OR && TO_OR_CONN(conn)->bucket_cfg) {
ev_token_bucket_cfg_free(TO_OR_CONN(conn)->bucket_cfg);
TO_OR_CONN(conn)->bucket_cfg = NULL;
}
#endif
memwipe(mem, 0xCC, memlen); /* poison memory */
tor_free(mem);
}
@ -2694,21 +2659,15 @@ connection_is_rate_limited(connection_t *conn)
return 1;
}
#ifdef USE_BUFFEREVENTS
static struct bufferevent_rate_limit_group *global_rate_limit = NULL;
#else
/** Did either global write bucket run dry last second? If so,
* we are likely to run dry again this second, so be stingy with the
* tokens we just put in. */
static int write_buckets_empty_last_second = 0;
#endif
/** How many seconds of no active local circuits will make the
* connection revert to the "relayed" bandwidth class? */
#define CLIENT_IDLE_TIME_FOR_PRIORITY 30
#ifndef USE_BUFFEREVENTS
/** Return 1 if <b>conn</b> should use tokens from the "relayed"
* bandwidth rates, else 0. Currently, only OR conns with bandwidth
* class 1, and directory conns that are serving data out, count.
@ -2820,20 +2779,6 @@ connection_bucket_write_limit(connection_t *conn, time_t now)
return connection_bucket_round_robin(base, priority,
global_bucket, conn_bucket);
}
#else
static ssize_t
connection_bucket_read_limit(connection_t *conn, time_t now)
{
(void) now;
return bufferevent_get_max_to_read(conn->bufev);
}
ssize_t
connection_bucket_write_limit(connection_t *conn, time_t now)
{
(void) now;
return bufferevent_get_max_to_write(conn->bufev);
}
#endif
/** Return 1 if the global write buckets are low enough that we
* shouldn't send <b>attempt</b> bytes of low-priority directory stuff
@ -2857,12 +2802,8 @@ connection_bucket_write_limit(connection_t *conn, time_t now)
int
global_write_bucket_low(connection_t *conn, size_t attempt, int priority)
{
#ifdef USE_BUFFEREVENTS
ssize_t smaller_bucket = bufferevent_get_max_to_write(conn->bufev);
#else
int smaller_bucket = global_write_bucket < global_relayed_write_bucket ?
global_write_bucket : global_relayed_write_bucket;
#endif
if (authdir_mode(get_options()) && priority>1)
return 0; /* there's always room to answer v2 if we're an auth dir */
@ -2872,10 +2813,8 @@ global_write_bucket_low(connection_t *conn, size_t attempt, int priority)
if (smaller_bucket < (int)attempt)
return 1; /* not enough space no matter the priority */
#ifndef USE_BUFFEREVENTS
if (write_buckets_empty_last_second)
return 1; /* we're already hitting our limits, no more please */
#endif
if (priority == 1) { /* old-style v1 query */
/* Could we handle *two* of these requests within the next two seconds? */
@ -2923,29 +2862,6 @@ record_num_bytes_transferred_impl(connection_t *conn,
rep_hist_note_exit_bytes(conn->port, num_written, num_read);
}
#ifdef USE_BUFFEREVENTS
/** Wrapper around fetch_from_(buf/evbuffer)_socks_client: see those functions
* for documentation of its behavior. */
static void
record_num_bytes_transferred(connection_t *conn,
time_t now, size_t num_read, size_t num_written)
{
/* XXXX check if this is necessary */
if (num_written >= INT_MAX || num_read >= INT_MAX) {
log_err(LD_BUG, "Value out of range. num_read=%lu, num_written=%lu, "
"connection type=%s, state=%s",
(unsigned long)num_read, (unsigned long)num_written,
conn_type_to_string(conn->type),
conn_state_to_string(conn->type, conn->state));
if (num_written >= INT_MAX) num_written = 1;
if (num_read >= INT_MAX) num_read = 1;
tor_fragile_assert();
}
record_num_bytes_transferred_impl(conn,now,num_read,num_written);
}
#endif
/** Helper: convert given <b>tvnow</b> time value to milliseconds since
* midnight. */
static uint32_t
@ -2990,7 +2906,6 @@ connection_buckets_note_empty_ts(uint32_t *timestamp_var,
*timestamp_var = msec_since_midnight(tvnow);
}
#ifndef USE_BUFFEREVENTS
/** Last time at which the global or relay buckets were emptied in msec
* since midnight. */
static uint32_t global_relayed_read_emptied = 0,
@ -3321,92 +3236,6 @@ connection_bucket_should_increase(int bucket, or_connection_t *conn)
return 1;
}
#else
static void
connection_buckets_decrement(connection_t *conn, time_t now,
size_t num_read, size_t num_written)
{
(void) conn;
(void) now;
(void) num_read;
(void) num_written;
/* Libevent does this for us. */
}
void
connection_bucket_refill(int seconds_elapsed, time_t now)
{
(void) seconds_elapsed;
(void) now;
/* Libevent does this for us. */
}
void
connection_bucket_init(void)
{
const or_options_t *options = get_options();
const struct timeval *tick = tor_libevent_get_one_tick_timeout();
struct ev_token_bucket_cfg *bucket_cfg;
uint64_t rate, burst;
if (options->RelayBandwidthRate) {
rate = options->RelayBandwidthRate;
burst = options->RelayBandwidthBurst;
} else {
rate = options->BandwidthRate;
burst = options->BandwidthBurst;
}
/* This can't overflow, since TokenBucketRefillInterval <= 1000,
* and rate started out less than INT32_MAX. */
rate = (rate * options->TokenBucketRefillInterval) / 1000;
bucket_cfg = ev_token_bucket_cfg_new((uint32_t)rate, (uint32_t)burst,
(uint32_t)rate, (uint32_t)burst,
tick);
if (!global_rate_limit) {
global_rate_limit =
bufferevent_rate_limit_group_new(tor_libevent_get_base(), bucket_cfg);
} else {
bufferevent_rate_limit_group_set_cfg(global_rate_limit, bucket_cfg);
}
ev_token_bucket_cfg_free(bucket_cfg);
}
void
connection_get_rate_limit_totals(uint64_t *read_out, uint64_t *written_out)
{
if (global_rate_limit == NULL) {
*read_out = *written_out = 0;
} else {
bufferevent_rate_limit_group_get_totals(
global_rate_limit, read_out, written_out);
}
}
/** Perform whatever operations are needed on <b>conn</b> to enable
* rate-limiting. */
void
connection_enable_rate_limiting(connection_t *conn)
{
if (conn->bufev) {
if (!global_rate_limit)
connection_bucket_init();
tor_add_bufferevent_to_rate_limit_group(conn->bufev, global_rate_limit);
}
}
static void
connection_consider_empty_write_buckets(connection_t *conn)
{
(void) conn;
}
static void
connection_consider_empty_read_buckets(connection_t *conn)
{
(void) conn;
}
#endif
/** Read bytes from conn-\>s and process them.
*
@ -3737,161 +3566,6 @@ connection_read_to_buf(connection_t *conn, ssize_t *max_to_read,
return 0;
}
#ifdef USE_BUFFEREVENTS
/* XXXX These generic versions could be simplified by making them
type-specific */
/** Callback: Invoked whenever bytes are added to or drained from an input
* evbuffer. Used to track the number of bytes read. */
static void
evbuffer_inbuf_callback(struct evbuffer *buf,
const struct evbuffer_cb_info *info, void *arg)
{
connection_t *conn = arg;
(void) buf;
/* XXXX These need to get real counts on the non-nested TLS case. - NM */
if (info->n_added) {
time_t now = approx_time();
conn->timestamp_lastread = now;
record_num_bytes_transferred(conn, now, info->n_added, 0);
connection_consider_empty_read_buckets(conn);
if (conn->type == CONN_TYPE_AP) {
edge_connection_t *edge_conn = TO_EDGE_CONN(conn);
/*XXXX++ check for overflow*/
edge_conn->n_read += (int)info->n_added;
}
}
}
/** Callback: Invoked whenever bytes are added to or drained from an output
* evbuffer. Used to track the number of bytes written. */
static void
evbuffer_outbuf_callback(struct evbuffer *buf,
const struct evbuffer_cb_info *info, void *arg)
{
connection_t *conn = arg;
(void)buf;
if (info->n_deleted) {
time_t now = approx_time();
conn->timestamp_lastwritten = now;
record_num_bytes_transferred(conn, now, 0, info->n_deleted);
connection_consider_empty_write_buckets(conn);
if (conn->type == CONN_TYPE_AP) {
edge_connection_t *edge_conn = TO_EDGE_CONN(conn);
/*XXXX++ check for overflow*/
edge_conn->n_written += (int)info->n_deleted;
}
}
}
/** Callback: invoked whenever a bufferevent has read data. */
void
connection_handle_read_cb(struct bufferevent *bufev, void *arg)
{
connection_t *conn = arg;
(void) bufev;
if (!conn->marked_for_close) {
if (connection_process_inbuf(conn, 1)<0) /* XXXX Always 1? */
if (!conn->marked_for_close)
connection_mark_for_close(conn);
}
}
/** Callback: invoked whenever a bufferevent has written data. */
void
connection_handle_write_cb(struct bufferevent *bufev, void *arg)
{
connection_t *conn = arg;
struct evbuffer *output;
if (connection_flushed_some(conn)<0) {
if (!conn->marked_for_close)
connection_mark_for_close(conn);
return;
}
output = bufferevent_get_output(bufev);
if (!evbuffer_get_length(output)) {
connection_finished_flushing(conn);
if (conn->marked_for_close && conn->hold_open_until_flushed) {
conn->hold_open_until_flushed = 0;
if (conn->linked) {
/* send eof */
bufferevent_flush(conn->bufev, EV_WRITE, BEV_FINISHED);
}
}
}
}
/** Callback: invoked whenever a bufferevent has had an event (like a
* connection, or an eof, or an error) occur. */
void
connection_handle_event_cb(struct bufferevent *bufev, short event, void *arg)
{
connection_t *conn = arg;
(void) bufev;
if (conn->marked_for_close)
return;
if (event & BEV_EVENT_CONNECTED) {
tor_assert(connection_state_is_connecting(conn));
if (connection_finished_connecting(conn)<0)
return;
}
if (event & BEV_EVENT_EOF) {
if (!conn->marked_for_close) {
conn->inbuf_reached_eof = 1;
if (connection_reached_eof(conn)<0)
return;
}
}
if (event & BEV_EVENT_ERROR) {
int socket_error = evutil_socket_geterror(conn->s);
if (conn->type == CONN_TYPE_OR &&
conn->state == OR_CONN_STATE_CONNECTING) {
connection_or_connect_failed(TO_OR_CONN(conn),
errno_to_orconn_end_reason(socket_error),
tor_socket_strerror(socket_error));
} else if (CONN_IS_EDGE(conn)) {
edge_connection_t *edge_conn = TO_EDGE_CONN(conn);
if (!edge_conn->edge_has_sent_end)
connection_edge_end_errno(edge_conn);
if (conn->type == CONN_TYPE_AP && TO_ENTRY_CONN(conn)->socks_request) {
/* broken, don't send a socks reply back */
TO_ENTRY_CONN(conn)->socks_request->has_finished = 1;
}
}
connection_close_immediate(conn); /* Connection is dead. */
if (!conn->marked_for_close)
connection_mark_for_close(conn);
}
}
/** Set up the generic callbacks for the bufferevent on <b>conn</b>. */
void
connection_configure_bufferevent_callbacks(connection_t *conn)
{
struct bufferevent *bufev;
struct evbuffer *input, *output;
tor_assert(conn->bufev);
bufev = conn->bufev;
bufferevent_setcb(bufev,
connection_handle_read_cb,
connection_handle_write_cb,
connection_handle_event_cb,
conn);
/* Set a fairly high write low-watermark so that we get the write callback
called whenever data is written to bring us under 128K. Leave the
high-watermark at 0.
*/
bufferevent_setwatermark(bufev, EV_WRITE, 128*1024, 0);
input = bufferevent_get_input(bufev);
output = bufferevent_get_output(bufev);
evbuffer_add_cb(input, evbuffer_inbuf_callback, conn);
evbuffer_add_cb(output, evbuffer_outbuf_callback, conn);
}
#endif
/** A pass-through to fetch_from_buf. */
int
connection_fetch_from_buf(char *string, size_t len, connection_t *conn)
@ -4902,15 +4576,6 @@ assert_connection_ok(connection_t *conn, time_t now)
tor_assert(conn->type >= CONN_TYPE_MIN_);
tor_assert(conn->type <= CONN_TYPE_MAX_);
#ifdef USE_BUFFEREVENTS
if (conn->bufev) {
tor_assert(conn->read_event == NULL);
tor_assert(conn->write_event == NULL);
tor_assert(conn->inbuf == NULL);
tor_assert(conn->outbuf == NULL);
}
#endif
switch (conn->type) {
case CONN_TYPE_OR:
case CONN_TYPE_EXT_OR:
@ -5174,11 +4839,6 @@ connection_free_all(void)
tor_free(last_interface_ipv4);
tor_free(last_interface_ipv6);
#ifdef USE_BUFFEREVENTS
if (global_rate_limit)
bufferevent_rate_limit_group_free(global_rate_limit);
#endif
}
/** Log a warning, and possibly emit a control event, that <b>received</b> came

View File

@ -257,19 +257,7 @@ void clock_skew_warning(const connection_t *conn, long apparent_skew,
int trusted, log_domain_mask_t domain,
const char *received, const char *source);
#ifdef USE_BUFFEREVENTS
int connection_type_uses_bufferevent(connection_t *conn);
void connection_configure_bufferevent_callbacks(connection_t *conn);
void connection_handle_read_cb(struct bufferevent *bufev, void *arg);
void connection_handle_write_cb(struct bufferevent *bufev, void *arg);
void connection_handle_event_cb(struct bufferevent *bufev, short event,
void *arg);
void connection_get_rate_limit_totals(uint64_t *read_out,
uint64_t *written_out);
void connection_enable_rate_limiting(connection_t *conn);
#else
#define connection_type_uses_bufferevent(c) (0)
#endif
#ifdef CONNECTION_PRIVATE
STATIC void connection_free_(connection_t *conn);

View File

@ -42,10 +42,6 @@
#include "ext_orport.h"
#include "scheduler.h"
#ifdef USE_BUFFEREVENTS
#include <event2/bufferevent_ssl.h>
#endif
static int connection_tls_finish_handshake(or_connection_t *conn);
static int connection_or_launch_v3_or_handshake(or_connection_t *conn);
static int connection_or_process_cells_from_inbuf(or_connection_t *conn);
@ -66,12 +62,6 @@ static void connection_or_mark_bad_for_new_circs(or_connection_t *or_conn);
static void connection_or_change_state(or_connection_t *conn, uint8_t state);
#ifdef USE_BUFFEREVENTS
static void connection_or_handle_event_cb(struct bufferevent *bufev,
short event, void *arg);
#include <event2/buffer.h>/*XXXX REMOVE */
#endif
/**************************************************************/
/** Map from identity digest of connected OR or desired OR to a connection_t
@ -565,13 +555,6 @@ connection_or_process_inbuf(or_connection_t *conn)
return ret;
case OR_CONN_STATE_TLS_SERVER_RENEGOTIATING:
#ifdef USE_BUFFEREVENTS
if (tor_tls_server_got_renegotiate(conn->tls))
connection_or_tls_renegotiated_cb(conn->tls, conn);
if (conn->base_.marked_for_close)
return 0;
/* fall through. */
#endif
case OR_CONN_STATE_OPEN:
case OR_CONN_STATE_OR_HANDSHAKING_V2:
case OR_CONN_STATE_OR_HANDSHAKING_V3:
@ -807,27 +790,6 @@ connection_or_update_token_buckets_helper(or_connection_t *conn, int reset,
conn->bandwidthrate = rate;
conn->bandwidthburst = burst;
#ifdef USE_BUFFEREVENTS
{
const struct timeval *tick = tor_libevent_get_one_tick_timeout();
struct ev_token_bucket_cfg *cfg, *old_cfg;
int64_t rate64 = (((int64_t)rate) * options->TokenBucketRefillInterval)
/ 1000;
/* This can't overflow, since TokenBucketRefillInterval <= 1000,
* and rate started out less than INT_MAX. */
int rate_per_tick = (int) rate64;
cfg = ev_token_bucket_cfg_new(rate_per_tick, burst, rate_per_tick,
burst, tick);
old_cfg = conn->bucket_cfg;
if (conn->base_.bufev)
tor_set_bufferevent_rate_limit(conn->base_.bufev, cfg);
if (old_cfg)
ev_token_bucket_cfg_free(old_cfg);
conn->bucket_cfg = cfg;
(void) reset; /* No way to do this with libevent yet. */
}
#else
if (reset) { /* set up the token buckets to be full */
conn->read_bucket = conn->write_bucket = burst;
return;
@ -838,7 +800,6 @@ connection_or_update_token_buckets_helper(or_connection_t *conn, int reset,
conn->read_bucket = burst;
if (conn->write_bucket > burst)
conn->write_bucket = burst;
#endif
}
/** Either our set of relays or our per-conn rate limits have changed.
@ -1395,29 +1356,6 @@ connection_tls_start_handshake,(or_connection_t *conn, int receiving))
tor_tls_set_logged_address(conn->tls, // XXX client and relay?
escaped_safe_str(conn->base_.address));
#ifdef USE_BUFFEREVENTS
if (connection_type_uses_bufferevent(TO_CONN(conn))) {
const int filtering = get_options()->UseFilteringSSLBufferevents;
struct bufferevent *b =
tor_tls_init_bufferevent(conn->tls, conn->base_.bufev, conn->base_.s,
receiving, filtering);
if (!b) {
log_warn(LD_BUG,"tor_tls_init_bufferevent failed. Closing.");
return -1;
}
conn->base_.bufev = b;
if (conn->bucket_cfg)
tor_set_bufferevent_rate_limit(conn->base_.bufev, conn->bucket_cfg);
connection_enable_rate_limiting(TO_CONN(conn));
connection_configure_bufferevent_callbacks(TO_CONN(conn));
bufferevent_setcb(b,
connection_handle_read_cb,
connection_handle_write_cb,
connection_or_handle_event_cb,/* overriding this one*/
TO_CONN(conn));
}
#endif
connection_start_reading(TO_CONN(conn));
log_debug(LD_HANDSHAKE,"starting TLS handshake on fd "TOR_SOCKET_T_FORMAT,
conn->base_.s);
@ -1517,75 +1455,6 @@ connection_tls_continue_handshake(or_connection_t *conn)
return 0;
}
#ifdef USE_BUFFEREVENTS
static void
connection_or_handle_event_cb(struct bufferevent *bufev, short event,
void *arg)
{
struct or_connection_t *conn = TO_OR_CONN(arg);
/* XXXX cut-and-paste code; should become a function. */
if (event & BEV_EVENT_CONNECTED) {
if (conn->base_.state == OR_CONN_STATE_TLS_HANDSHAKING) {
if (tor_tls_finish_handshake(conn->tls) < 0) {
log_warn(LD_OR, "Problem finishing handshake");
connection_or_close_for_error(conn, 0);
return;
}
}
if (! tor_tls_used_v1_handshake(conn->tls)) {
if (!tor_tls_is_server(conn->tls)) {
if (conn->base_.state == OR_CONN_STATE_TLS_HANDSHAKING) {
if (connection_or_launch_v3_or_handshake(conn) < 0)
connection_or_close_for_error(conn, 0);
}
} else {
const int handshakes = tor_tls_get_num_server_handshakes(conn->tls);
if (handshakes == 1) {
/* v2 or v3 handshake, as a server. Only got one handshake, so
* wait for the next one. */
tor_tls_set_renegotiate_callback(conn->tls,
connection_or_tls_renegotiated_cb,
conn);
connection_or_change_state(conn,
OR_CONN_STATE_TLS_SERVER_RENEGOTIATING);
} else if (handshakes == 2) {
/* v2 handshake, as a server. Two handshakes happened already,
* so we treat renegotiation as done.
*/
connection_or_tls_renegotiated_cb(conn->tls, conn);
} else if (handshakes > 2) {
log_warn(LD_OR, "More than two handshakes done on connection. "
"Closing.");
connection_or_close_for_error(conn, 0);
} else {
log_warn(LD_BUG, "We were unexpectedly told that a connection "
"got %d handshakes. Closing.", handshakes);
connection_or_close_for_error(conn, 0);
}
return;
}
}
connection_watch_events(TO_CONN(conn), READ_EVENT|WRITE_EVENT);
if (connection_tls_finish_handshake(conn) < 0)
connection_or_close_for_error(conn, 0); /* ???? */
return;
}
if (event & BEV_EVENT_ERROR) {
unsigned long err;
while ((err = bufferevent_get_openssl_error(bufev))) {
tor_tls_log_one_error(conn->tls, err, LOG_WARN, LD_OR,
"handshaking (with bufferevent)");
}
}
connection_handle_event_cb(bufev, event, arg);
}
#endif
/** Return 1 if we initiated this connection, or 0 if it started
* out as an incoming connection.
*/

View File

@ -3641,16 +3641,8 @@ connection_dir_finished_flushing(dir_connection_t *conn)
return 0;
case DIR_CONN_STATE_SERVER_WRITING:
if (conn->dir_spool_src != DIR_SPOOL_NONE) {
#ifdef USE_BUFFEREVENTS
/* This can happen with paired bufferevents, since a paired connection
* can flush immediately when you write to it, making the subsequent
* check in connection_handle_write_cb() decide that the connection
* is flushed. */
log_debug(LD_DIRSERV, "Emptied a dirserv buffer, but still spooling.");
#else
log_warn(LD_BUG, "Emptied a dirserv buffer, but it's still spooling!");
connection_mark_for_close(TO_CONN(conn));
#endif
} else {
log_debug(LD_DIRSERV, "Finished writing server response. Closing.");
connection_mark_for_close(TO_CONN(conn));

View File

@ -71,10 +71,6 @@
#include <event2/event.h>
#ifdef USE_BUFFEREVENTS
#include <event2/bufferevent.h>
#endif
#ifdef HAVE_SYSTEMD
# if defined(__COVERITY__) && !defined(__INCLUDE_LEVEL__)
/* Systemd's use of gcc's __INCLUDE_LEVEL__ extension macro appears to confuse
@ -101,8 +97,6 @@ static int run_main_loop_until_done(void);
static void process_signal(int sig);
/********* START VARIABLES **********/
#ifndef USE_BUFFEREVENTS
int global_read_bucket; /**< Max number of bytes I can read this second. */
int global_write_bucket; /**< Max number of bytes I can write this second. */
@ -116,7 +110,6 @@ static int stats_prev_global_read_bucket;
/** What was the write bucket before the last second_elapsed_callback() call?
* (used to determine how many bytes we've written). */
static int stats_prev_global_write_bucket;
#endif
/* DOCDOC stats_prev_n_read */
static uint64_t stats_prev_n_read = 0;
@ -188,28 +181,6 @@ int quiet_level = 0;
*
****************************************************************************/
#if defined(_WIN32) && defined(USE_BUFFEREVENTS)
/** Remove the kernel-space send and receive buffers for <b>s</b>. For use
* with IOCP only. */
static int
set_buffer_lengths_to_zero(tor_socket_t s)
{
int zero = 0;
int r = 0;
if (setsockopt(s, SOL_SOCKET, SO_SNDBUF, (void*)&zero,
(socklen_t)sizeof(zero))) {
log_warn(LD_NET, "Unable to clear SO_SNDBUF");
r = -1;
}
if (setsockopt(s, SOL_SOCKET, SO_RCVBUF, (void*)&zero,
(socklen_t)sizeof(zero))) {
log_warn(LD_NET, "Unable to clear SO_RCVBUF");
r = -1;
}
return r;
}
#endif
/** Return 1 if we have successfully built a circuit, and nothing has changed
* to make us think that maybe we can't.
*/
@ -252,64 +223,7 @@ connection_add_impl(connection_t *conn, int is_connecting)
conn->conn_array_index = smartlist_len(connection_array);
smartlist_add(connection_array, conn);
#ifdef USE_BUFFEREVENTS
if (connection_type_uses_bufferevent(conn)) {
if (SOCKET_OK(conn->s) && !conn->linked) {
#ifdef _WIN32
if (tor_libevent_using_iocp_bufferevents() &&
get_options()->UserspaceIOCPBuffers) {
set_buffer_lengths_to_zero(conn->s);
}
#endif
conn->bufev = bufferevent_socket_new(
tor_libevent_get_base(),
conn->s,
BEV_OPT_DEFER_CALLBACKS);
if (!conn->bufev) {
log_warn(LD_BUG, "Unable to create socket bufferevent");
smartlist_del(connection_array, conn->conn_array_index);
conn->conn_array_index = -1;
return -1;
}
if (is_connecting) {
/* Put the bufferevent into a "connecting" state so that we'll get
* a "connected" event callback on successful write. */
bufferevent_socket_connect(conn->bufev, NULL, 0);
}
connection_configure_bufferevent_callbacks(conn);
} else if (conn->linked && conn->linked_conn &&
connection_type_uses_bufferevent(conn->linked_conn)) {
tor_assert(!(SOCKET_OK(conn->s)));
if (!conn->bufev) {
struct bufferevent *pair[2] = { NULL, NULL };
if (bufferevent_pair_new(tor_libevent_get_base(),
BEV_OPT_DEFER_CALLBACKS,
pair) < 0) {
log_warn(LD_BUG, "Unable to create bufferevent pair");
smartlist_del(connection_array, conn->conn_array_index);
conn->conn_array_index = -1;
return -1;
}
tor_assert(pair[0]);
conn->bufev = pair[0];
conn->linked_conn->bufev = pair[1];
} /* else the other side already was added, and got a bufferevent_pair */
connection_configure_bufferevent_callbacks(conn);
} else {
tor_assert(!conn->linked);
}
if (conn->bufev)
tor_assert(conn->inbuf == NULL);
if (conn->linked_conn && conn->linked_conn->bufev)
tor_assert(conn->linked_conn->inbuf == NULL);
}
#else
(void) is_connecting;
#endif
if (!HAS_BUFFEREVENT(conn) && (SOCKET_OK(conn->s) || conn->linked)) {
conn->read_event = tor_event_new(tor_libevent_get_base(),
@ -340,12 +254,6 @@ connection_unregister_events(connection_t *conn)
log_warn(LD_BUG, "Error removing write event for %d", (int)conn->s);
tor_free(conn->write_event);
}
#ifdef USE_BUFFEREVENTS
if (conn->bufev) {
bufferevent_free(conn->bufev);
conn->bufev = NULL;
}
#endif
if (conn->type == CONN_TYPE_AP_DNS_LISTENER) {
dnsserv_close_listener(conn);
}
@ -876,21 +784,6 @@ conn_close_if_marked(int i)
assert_connection_ok(conn, now);
/* assert_all_pending_dns_resolves_ok(); */
#ifdef USE_BUFFEREVENTS
if (conn->bufev) {
if (conn->hold_open_until_flushed &&
evbuffer_get_length(bufferevent_get_output(conn->bufev))) {
/* don't close yet. */
return 0;
}
if (conn->linked_conn && ! conn->linked_conn->marked_for_close) {
/* We need to do this explicitly so that the linked connection
* notices that there was an EOF. */
bufferevent_flush(conn->bufev, EV_WRITE, BEV_FINISHED);
}
}
#endif
log_debug(LD_NET,"Cleaning up connection (fd "TOR_SOCKET_T_FORMAT").",
conn->s);
@ -985,9 +878,6 @@ conn_close_if_marked(int i)
}
}
#ifdef USE_BUFFEREVENTS
unlink:
#endif
connection_unlink(conn); /* unlink, remove, free */
return 1;
}
@ -1133,11 +1023,7 @@ run_connection_housekeeping(int i, time_t now)
the connection or send a keepalive, depending. */
or_conn = TO_OR_CONN(conn);
#ifdef USE_BUFFEREVENTS
tor_assert(conn->bufev);
#else
tor_assert(conn->outbuf);
#endif
chan = TLS_CHAN_TO_BASE(or_conn->chan);
tor_assert(chan);
@ -2051,25 +1937,10 @@ second_elapsed_callback(periodic_timer_t *timer, void *arg)
/* the second has rolled over. check more stuff. */
seconds_elapsed = current_second ? (int)(now - current_second) : 0;
#ifdef USE_BUFFEREVENTS
{
uint64_t cur_read,cur_written;
connection_get_rate_limit_totals(&cur_read, &cur_written);
bytes_written = (size_t)(cur_written - stats_prev_n_written);
bytes_read = (size_t)(cur_read - stats_prev_n_read);
stats_n_bytes_read += bytes_read;
stats_n_bytes_written += bytes_written;
if (accounting_is_enabled(options) && seconds_elapsed >= 0)
accounting_add_bytes(bytes_read, bytes_written, seconds_elapsed);
stats_prev_n_written = cur_written;
stats_prev_n_read = cur_read;
}
#else
bytes_read = (size_t)(stats_n_bytes_read - stats_prev_n_read);
bytes_written = (size_t)(stats_n_bytes_written - stats_prev_n_written);
stats_prev_n_read = stats_n_bytes_read;
stats_prev_n_written = stats_n_bytes_written;
#endif
control_event_bandwidth_used((uint32_t)bytes_read,(uint32_t)bytes_written);
control_event_stream_bandwidth_used();
@ -2141,7 +2012,6 @@ systemd_watchdog_callback(periodic_timer_t *timer, void *arg)
}
#endif
#ifndef USE_BUFFEREVENTS
/** Timer: used to invoke refill_callback(). */
static periodic_timer_t *refill_timer = NULL;
@ -2190,7 +2060,6 @@ refill_callback(periodic_timer_t *timer, void *arg)
current_millisecond = now; /* remember what time it is, for next time */
}
#endif
#ifndef _WIN32
/** Called when a possibly ignorable libevent error occurs; ensures that we
@ -2366,13 +2235,6 @@ do_main_loop(void)
}
}
#ifdef USE_BUFFEREVENTS
log_warn(LD_GENERAL, "Tor was compiled with the --enable-bufferevents "
"option. This is still experimental, and might cause strange "
"bugs. If you want a more stable Tor, be sure to build without "
"--enable-bufferevents.");
#endif
handle_signals(1);
/* load the private keys, if we're supposed to have them, and set up the
@ -2386,10 +2248,8 @@ do_main_loop(void)
/* Set up our buckets */
connection_bucket_init();
#ifndef USE_BUFFEREVENTS
stats_prev_global_read_bucket = global_read_bucket;
stats_prev_global_write_bucket = global_write_bucket;
#endif
/* initialize the bootstrap status events to know we're starting up */
control_event_bootstrap(BOOTSTRAP_STATUS_STARTING, 0);
@ -2486,7 +2346,6 @@ do_main_loop(void)
}
#endif
#ifndef USE_BUFFEREVENTS
if (!refill_timer) {
struct timeval refill_interval;
int msecs = get_options()->TokenBucketRefillInterval;
@ -2500,7 +2359,6 @@ do_main_loop(void)
NULL);
tor_assert(refill_timer);
}
#endif
#ifdef HAVE_SYSTEMD
{
@ -2994,12 +2852,8 @@ tor_init(int argc, char *argv[])
{
const char *version = get_version();
const char *bev_str =
#ifdef USE_BUFFEREVENTS
"(with bufferevents) ";
#else
"";
#endif
const char *bev_str = "";
log_notice(LD_GENERAL, "Tor v%s %srunning on %s with Libevent %s, "
"OpenSSL %s and Zlib %s.", version, bev_str,
get_uname(),
@ -3177,9 +3031,7 @@ tor_free_all(int postfork)
smartlist_free(active_linked_connection_lst);
periodic_timer_free(second_timer);
teardown_periodic_events();
#ifndef USE_BUFFEREVENTS
periodic_timer_free(refill_timer);
#endif
if (!postfork) {
release_lockfile();

View File

@ -66,12 +66,6 @@
#include <windows.h>
#endif
#ifdef USE_BUFFEREVENTS
#include <event2/bufferevent.h>
#include <event2/buffer.h>
#include <event2/util.h>
#endif
#include "crypto.h"
#include "crypto_format.h"
#include "tortls.h"
@ -1137,11 +1131,8 @@ typedef struct {
typedef struct buf_t buf_t;
typedef struct socks_request_t socks_request_t;
#ifdef USE_BUFFEREVENTS
#define generic_buffer_t struct evbuffer
#else
#define generic_buffer_t buf_t
#endif
typedef struct entry_port_cfg_t {
/* Client port types (socks, dns, trans, natd) only: */
@ -1280,10 +1271,6 @@ typedef struct connection_t {
time_t timestamp_lastwritten; /**< When was the last time libevent said we
* could write? */
#ifdef USE_BUFFEREVENTS
struct bufferevent *bufev; /**< A Libevent buffered IO structure. */
#endif
time_t timestamp_created; /**< When was this connection_t created? */
int socket_family; /**< Address family of this connection's socket. Usually
@ -1523,17 +1510,10 @@ typedef struct or_connection_t {
/* bandwidth* and *_bucket only used by ORs in OPEN state: */
int bandwidthrate; /**< Bytes/s added to the bucket. (OPEN ORs only.) */
int bandwidthburst; /**< Max bucket size for this conn. (OPEN ORs only.) */
#ifndef USE_BUFFEREVENTS
int read_bucket; /**< When this hits 0, stop receiving. Every second we
* add 'bandwidthrate' to this, capping it at
* bandwidthburst. (OPEN ORs only) */
int write_bucket; /**< When this hits 0, stop writing. Like read_bucket. */
#else
/** A rate-limiting configuration object to determine how this connection
* set its read- and write- limits. */
/* XXXX we could share this among all connections. */
struct ev_token_bucket_cfg *bucket_cfg;
#endif
struct or_connection_t *next_with_same_id; /**< Next connection with same
* identity digest as this one. */
@ -1874,22 +1854,11 @@ static inline listener_connection_t *TO_LISTENER_CONN(connection_t *c)
and if we aren't using bufferevents, they expand more or less to:
{ do non-bufferevent stuff; }
*/
#ifdef USE_BUFFEREVENTS
#define HAS_BUFFEREVENT(c) (((c)->bufev) != NULL)
#define IF_HAS_BUFFEREVENT(c, stmt) \
if ((c)->bufev) do { \
stmt ; \
} while (0)
#define ELSE_IF_NO_BUFFEREVENT ; else
#define IF_HAS_NO_BUFFEREVENT(c) \
if (NULL == (c)->bufev)
#else
#define HAS_BUFFEREVENT(c) (0)
#define IF_HAS_BUFFEREVENT(c, stmt) (void)0
#define ELSE_IF_NO_BUFFEREVENT ;
#define IF_HAS_NO_BUFFEREVENT(c) \
if (1)
#endif
/** What action type does an address policy indicate: accept or reject? */
typedef enum {