Merge remote-tracking branch 'public/Fix_19450'

This commit is contained in:
Nick Mathewson 2016-08-12 16:11:28 -04:00
commit 7f145b54af
25 changed files with 116 additions and 1723 deletions

3
changes/19450 Normal file
View File

@ -0,0 +1,3 @@
o Removed code:
- We no longer include the (dead, deprecated) bufferevent code in
Tor. Closes ticket 19450. Based on a patch from U+039b.

View File

@ -161,9 +161,6 @@ AC_ARG_ENABLE(tor2web-mode,
CFLAGS="$CFLAGS -D ENABLE_TOR2WEB_MODE=1" CFLAGS="$CFLAGS -D ENABLE_TOR2WEB_MODE=1"
fi]) fi])
AC_ARG_ENABLE(bufferevents,
AS_HELP_STRING(--enable-bufferevents, [use Libevent's buffered IO]))
AC_ARG_ENABLE(tool-name-check, AC_ARG_ENABLE(tool-name-check,
AS_HELP_STRING(--disable-tool-name-check, [check for sanely named toolchain when cross-compiling])) AS_HELP_STRING(--disable-tool-name-check, [check for sanely named toolchain when cross-compiling]))
@ -552,39 +549,6 @@ LIBS="$save_LIBS"
LDFLAGS="$save_LDFLAGS" LDFLAGS="$save_LDFLAGS"
CPPFLAGS="$save_CPPFLAGS" CPPFLAGS="$save_CPPFLAGS"
dnl bufferents require version 2.0.13
if test "$enable_bufferevents" = "yes"; then
AC_CHECK_HEADERS(event2/bufferevent_ssl.h)
CPPFLAGS="$CPPFLAGS $TOR_CPPFLAGS_libevent"
AC_MSG_CHECKING([whether Libevent is new enough for bufferevents])
AC_COMPILE_IFELSE([AC_LANG_SOURCE([
#include <event2/event.h>
#if !defined(LIBEVENT_VERSION_NUMBER) || LIBEVENT_VERSION_NUMBER < 0x02000d00
#error
int x = y(zz);
#else
int x = 1;
#endif
])], [ AC_MSG_RESULT([yes]) ],
[ AC_MSG_RESULT([no])
AC_MSG_ERROR([Libevent does not seem new enough to support bufferevents. We require 2.0.13-stable or later]) ] )
fi
LIBS="$save_LIBS"
LDFLAGS="$save_LDFLAGS"
CPPFLAGS="$save_CPPFLAGS"
AM_CONDITIONAL(USE_BUFFEREVENTS, test "$enable_bufferevents" = "yes")
if test "$enable_bufferevents" = "yes"; then
AC_DEFINE(USE_BUFFEREVENTS, 1, [Defined if we're going to use Libevent's buffered IO API])
if test "$enable_static_libevent" = "yes"; then
TOR_LIBEVENT_LIBS="$TOR_LIBDIR_libevent/libevent_openssl.a $TOR_LIBEVENT_LIBS"
else
TOR_LIBEVENT_LIBS="-levent_openssl $TOR_LIBEVENT_LIBS"
fi
fi
AC_SUBST(TOR_LIBEVENT_LIBS) AC_SUBST(TOR_LIBEVENT_LIBS)
dnl ------------------------------------------------------ dnl ------------------------------------------------------

View File

@ -705,26 +705,6 @@ GENERAL OPTIONS
networkstatus. This is an advanced option; you generally shouldn't have networkstatus. This is an advanced option; you generally shouldn't have
to mess with it. (Default: not set) to mess with it. (Default: not set)
[[DisableIOCP]] **DisableIOCP** **0**|**1**::
If Tor was built to use the Libevent's "bufferevents" networking code
and you're running on Windows, setting this option to 1 will tell Libevent
not to use the Windows IOCP networking API. (Default: 1)
[[UserspaceIOCPBuffers]] **UserspaceIOCPBuffers** **0**|**1**::
If IOCP is enabled (see DisableIOCP above), setting this option to 1
will tell Tor to disable kernel-space TCP buffers, in order to avoid
needless copy operations and try not to run out of non-paged RAM.
This feature is experimental; don't use it yet unless you're eager to
help tracking down bugs. (Default: 0)
[[UseFilteringSSLBufferevents]] **UseFilteringSSLBufferevents** **0**|**1**::
Tells Tor to do its SSL communication using a chain of
bufferevents: one for SSL and one for networking. This option has no
effect if bufferevents are disabled (in which case it can't turn on), or
if IOCP bufferevents are enabled (in which case it can't turn off). This
option is useful for debugging only; most users shouldn't touch it.
(Default: 0)
[[CountPrivateBandwidth]] **CountPrivateBandwidth** **0**|**1**:: [[CountPrivateBandwidth]] **CountPrivateBandwidth** **0**|**1**::
If this option is set, then Tor's rate-limiting applies not only to If this option is set, then Tor's rate-limiting applies not only to
remote connections, but also to connections to private addresses like remote connections, but also to connections to private addresses like

View File

@ -18,9 +18,6 @@
#include <event2/event.h> #include <event2/event.h>
#include <event2/thread.h> #include <event2/thread.h>
#ifdef USE_BUFFEREVENTS
#include <event2/bufferevent.h>
#endif
/** A string which, if it appears in a libevent log, should be ignored. */ /** A string which, if it appears in a libevent log, should be ignored. */
static const char *suppress_msg = NULL; static const char *suppress_msg = NULL;
@ -94,17 +91,6 @@ static struct event_base *the_event_base = NULL;
#endif #endif
#endif #endif
#ifdef USE_BUFFEREVENTS
static int using_iocp_bufferevents = 0;
static void tor_libevent_set_tick_timeout(int msec_per_tick);
int
tor_libevent_using_iocp_bufferevents(void)
{
return using_iocp_bufferevents;
}
#endif
/** Initialize the Libevent library and set up the event base. */ /** Initialize the Libevent library and set up the event base. */
void void
tor_libevent_initialize(tor_libevent_cfg *torcfg) tor_libevent_initialize(tor_libevent_cfg *torcfg)
@ -115,34 +101,15 @@ tor_libevent_initialize(tor_libevent_cfg *torcfg)
{ {
int attempts = 0; int attempts = 0;
int using_threads;
struct event_config *cfg; struct event_config *cfg;
retry:
++attempts; ++attempts;
using_threads = 0;
cfg = event_config_new(); cfg = event_config_new();
tor_assert(cfg); tor_assert(cfg);
#if defined(_WIN32) && defined(USE_BUFFEREVENTS)
if (! torcfg->disable_iocp) {
evthread_use_windows_threads();
event_config_set_flag(cfg, EVENT_BASE_FLAG_STARTUP_IOCP);
using_iocp_bufferevents = 1;
using_threads = 1;
} else {
using_iocp_bufferevents = 0;
}
#elif defined(__COVERITY__)
/* Avoid a 'dead code' warning below. */
using_threads = ! torcfg->disable_iocp;
#endif
if (!using_threads) {
/* Telling Libevent not to try to turn locking on can avoid a needless /* Telling Libevent not to try to turn locking on can avoid a needless
* socketpair() attempt. */ * socketpair() attempt. */
event_config_set_flag(cfg, EVENT_BASE_FLAG_NOLOCK); event_config_set_flag(cfg, EVENT_BASE_FLAG_NOLOCK);
}
if (torcfg->num_cpus > 0) if (torcfg->num_cpus > 0)
event_config_set_num_cpus_hint(cfg, torcfg->num_cpus); event_config_set_num_cpus_hint(cfg, torcfg->num_cpus);
@ -154,24 +121,6 @@ tor_libevent_initialize(tor_libevent_cfg *torcfg)
the_event_base = event_base_new_with_config(cfg); the_event_base = event_base_new_with_config(cfg);
event_config_free(cfg); event_config_free(cfg);
if (using_threads && the_event_base == NULL && attempts < 2) {
/* This could be a socketpair() failure, which can happen sometimes on
* windows boxes with obnoxious firewall rules. Downgrade and try
* again. */
#if defined(_WIN32) && defined(USE_BUFFEREVENTS)
if (torcfg->disable_iocp == 0) {
log_warn(LD_GENERAL, "Unable to initialize Libevent. Trying again "
"with IOCP disabled.");
} else
#endif
{
log_warn(LD_GENERAL, "Unable to initialize Libevent. Trying again.");
}
torcfg->disable_iocp = 1;
goto retry;
}
} }
if (!the_event_base) { if (!the_event_base) {
@ -184,10 +133,6 @@ tor_libevent_initialize(tor_libevent_cfg *torcfg)
log_info(LD_GENERAL, log_info(LD_GENERAL,
"Initialized libevent version %s using method %s. Good.", "Initialized libevent version %s using method %s. Good.",
event_get_version(), tor_libevent_get_method()); event_get_version(), tor_libevent_get_method());
#ifdef USE_BUFFEREVENTS
tor_libevent_set_tick_timeout(torcfg->msec_per_tick);
#endif
} }
/** Return the current Libevent event base that we're set up to use. */ /** Return the current Libevent event base that we're set up to use. */
@ -276,58 +221,6 @@ periodic_timer_free(periodic_timer_t *timer)
tor_free(timer); tor_free(timer);
} }
#ifdef USE_BUFFEREVENTS
static const struct timeval *one_tick = NULL;
/**
* Return a special timeout to be passed whenever libevent's O(1) timeout
* implementation should be used. Only use this when the timer is supposed
* to fire after msec_per_tick ticks have elapsed.
*/
const struct timeval *
tor_libevent_get_one_tick_timeout(void)
{
tor_assert(one_tick);
return one_tick;
}
/** Initialize the common timeout that we'll use to refill the buckets every
* time a tick elapses. */
static void
tor_libevent_set_tick_timeout(int msec_per_tick)
{
struct event_base *base = tor_libevent_get_base();
struct timeval tv;
tor_assert(! one_tick);
tv.tv_sec = msec_per_tick / 1000;
tv.tv_usec = (msec_per_tick % 1000) * 1000;
one_tick = event_base_init_common_timeout(base, &tv);
}
static struct bufferevent *
tor_get_root_bufferevent(struct bufferevent *bev)
{
struct bufferevent *u;
while ((u = bufferevent_get_underlying(bev)) != NULL)
bev = u;
return bev;
}
int
tor_set_bufferevent_rate_limit(struct bufferevent *bev,
struct ev_token_bucket_cfg *cfg)
{
return bufferevent_set_rate_limit(tor_get_root_bufferevent(bev), cfg);
}
int
tor_add_bufferevent_to_rate_limit_group(struct bufferevent *bev,
struct bufferevent_rate_limit_group *g)
{
return bufferevent_add_to_rate_limit_group(tor_get_root_bufferevent(bev), g);
}
#endif
int int
tor_init_libevent_rng(void) tor_init_libevent_rng(void)
{ {

View File

@ -9,10 +9,6 @@
#include <event2/event.h> #include <event2/event.h>
#ifdef USE_BUFFEREVENTS
#include <event2/bufferevent.h>
#endif
void configure_libevent_logging(void); void configure_libevent_logging(void);
void suppress_libevent_log_msg(const char *msg); void suppress_libevent_log_msg(const char *msg);
@ -38,12 +34,10 @@ void periodic_timer_free(periodic_timer_t *);
/** Defines a configuration for using libevent with Tor: passed as an argument /** Defines a configuration for using libevent with Tor: passed as an argument
* to tor_libevent_initialize() to describe how we want to set up. */ * to tor_libevent_initialize() to describe how we want to set up. */
typedef struct tor_libevent_cfg { typedef struct tor_libevent_cfg {
/** Flag: if true, disable IOCP (assuming that it could be enabled). */ /** How many CPUs should we use (not currently useful). */
int disable_iocp;
/** How many CPUs should we use (relevant only with IOCP). */
int num_cpus; int num_cpus;
/** How many milliseconds should we allow between updating bandwidth limits? /** How many milliseconds should we allow between updating bandwidth limits?
* (relevant only with bufferevents). */ * (Not currently useful). */
int msec_per_tick; int msec_per_tick;
} tor_libevent_cfg; } tor_libevent_cfg;
@ -54,15 +48,6 @@ void tor_check_libevent_header_compatibility(void);
const char *tor_libevent_get_version_str(void); const char *tor_libevent_get_version_str(void);
const char *tor_libevent_get_header_version_str(void); const char *tor_libevent_get_header_version_str(void);
#ifdef USE_BUFFEREVENTS
const struct timeval *tor_libevent_get_one_tick_timeout(void);
int tor_libevent_using_iocp_bufferevents(void);
int tor_set_bufferevent_rate_limit(struct bufferevent *bev,
struct ev_token_bucket_cfg *cfg);
int tor_add_bufferevent_to_rate_limit_group(struct bufferevent *bev,
struct bufferevent_rate_limit_group *g);
#endif
int tor_init_libevent_rng(void); int tor_init_libevent_rng(void);
void tor_gettimeofday_cached(struct timeval *tv); void tor_gettimeofday_cached(struct timeval *tv);

View File

@ -48,13 +48,6 @@ DISABLE_GCC_WARNING(redundant-decls)
ENABLE_GCC_WARNING(redundant-decls) ENABLE_GCC_WARNING(redundant-decls)
#ifdef USE_BUFFEREVENTS
#include <event2/bufferevent_ssl.h>
#include <event2/buffer.h>
#include <event2/event.h>
#include "compat_libevent.h"
#endif
#define TORTLS_PRIVATE #define TORTLS_PRIVATE
#include "tortls.h" #include "tortls.h"
#include "util.h" #include "util.h"
@ -2486,78 +2479,6 @@ tor_tls_get_buffer_sizes(tor_tls_t *tls,
#endif #endif
} }
#ifdef USE_BUFFEREVENTS
/** Construct and return an TLS-encrypting bufferevent to send data over
* <b>socket</b>, which must match the socket of the underlying bufferevent
* <b>bufev_in</b>. The TLS object <b>tls</b> is used for encryption.
*
* This function will either create a filtering bufferevent that wraps around
* <b>bufev_in</b>, or it will free bufev_in and return a new bufferevent that
* uses the <b>tls</b> to talk to the network directly. Do not use
* <b>bufev_in</b> after calling this function.
*
* The connection will start out doing a server handshake if <b>receiving</b>
* is strue, and a client handshake otherwise.
*
* Returns NULL on failure.
*/
struct bufferevent *
tor_tls_init_bufferevent(tor_tls_t *tls, struct bufferevent *bufev_in,
evutil_socket_t socket, int receiving,
int filter)
{
struct bufferevent *out;
const enum bufferevent_ssl_state state = receiving ?
BUFFEREVENT_SSL_ACCEPTING : BUFFEREVENT_SSL_CONNECTING;
if (filter || tor_libevent_using_iocp_bufferevents()) {
/* Grab an extra reference to the SSL, since BEV_OPT_CLOSE_ON_FREE
means that the SSL will get freed too.
This increment makes our SSL usage not-threadsafe, BTW. We should
see if we're allowed to use CRYPTO_add from outside openssl. */
tls->ssl->references += 1;
out = bufferevent_openssl_filter_new(tor_libevent_get_base(),
bufev_in,
tls->ssl,
state,
BEV_OPT_DEFER_CALLBACKS|
BEV_OPT_CLOSE_ON_FREE);
/* Tell the underlying bufferevent when to accept more data from the SSL
filter (only when it's got less than 32K to write), and when to notify
the SSL filter that it could write more (when it drops under 24K). */
bufferevent_setwatermark(bufev_in, EV_WRITE, 24*1024, 32*1024);
} else {
if (bufev_in) {
evutil_socket_t s = bufferevent_getfd(bufev_in);
tor_assert(s == -1 || s == socket);
tor_assert(evbuffer_get_length(bufferevent_get_input(bufev_in)) == 0);
tor_assert(evbuffer_get_length(bufferevent_get_output(bufev_in)) == 0);
tor_assert(BIO_number_read(SSL_get_rbio(tls->ssl)) == 0);
tor_assert(BIO_number_written(SSL_get_rbio(tls->ssl)) == 0);
bufferevent_free(bufev_in);
}
/* Current versions (as of 2.0.x) of Libevent need to defer
* bufferevent_openssl callbacks, or else our callback functions will
* get called reentrantly, which is bad for us.
*/
out = bufferevent_openssl_socket_new(tor_libevent_get_base(),
socket,
tls->ssl,
state,
BEV_OPT_DEFER_CALLBACKS);
}
tls->state = TOR_TLS_ST_BUFFEREVENT;
/* Unblock _after_ creating the bufferevent, since accept/connect tend to
* clear flags. */
tor_tls_unblock_renegotiation(tls);
return out;
}
#endif
/** Check whether the ECC group requested is supported by the current OpenSSL /** Check whether the ECC group requested is supported by the current OpenSSL
* library instance. Return 1 if the group is supported, and 0 if not. * library instance. Return 1 if the group is supported, and 0 if not.
*/ */

View File

@ -235,14 +235,6 @@ void check_no_tls_errors_(const char *fname, int line);
void tor_tls_log_one_error(tor_tls_t *tls, unsigned long err, void tor_tls_log_one_error(tor_tls_t *tls, unsigned long err,
int severity, int domain, const char *doing); int severity, int domain, const char *doing);
#ifdef USE_BUFFEREVENTS
int tor_tls_start_renegotiating(tor_tls_t *tls);
struct bufferevent *tor_tls_init_bufferevent(tor_tls_t *tls,
struct bufferevent *bufev_in,
evutil_socket_t socket, int receiving,
int filter);
#endif
void tor_x509_cert_free(tor_x509_cert_t *cert); void tor_x509_cert_free(tor_x509_cert_t *cert);
tor_x509_cert_t *tor_x509_cert_decode(const uint8_t *certificate, tor_x509_cert_t *tor_x509_cert_decode(const uint8_t *certificate,
size_t certificate_len); size_t certificate_len);

View File

@ -913,97 +913,6 @@ fetch_var_cell_from_buf(buf_t *buf, var_cell_t **out, int linkproto)
return 1; return 1;
} }
#ifdef USE_BUFFEREVENTS
/** Try to read <b>n</b> bytes from <b>buf</b> at <b>pos</b> (which may be
* NULL for the start of the buffer), copying the data only if necessary. Set
* *<b>data_out</b> to a pointer to the desired bytes. Set <b>free_out</b>
* to 1 if we needed to malloc *<b>data</b> because the original bytes were
* noncontiguous; 0 otherwise. Return the number of bytes actually available
* at *<b>data_out</b>.
*/
static ssize_t
inspect_evbuffer(struct evbuffer *buf, char **data_out, size_t n,
int *free_out, struct evbuffer_ptr *pos)
{
int n_vecs, i;
if (evbuffer_get_length(buf) < n)
n = evbuffer_get_length(buf);
if (n == 0)
return 0;
n_vecs = evbuffer_peek(buf, n, pos, NULL, 0);
tor_assert(n_vecs > 0);
if (n_vecs == 1) {
struct evbuffer_iovec v;
i = evbuffer_peek(buf, n, pos, &v, 1);
tor_assert(i == 1);
*data_out = v.iov_base;
*free_out = 0;
return v.iov_len;
} else {
ev_ssize_t copied;
*data_out = tor_malloc(n);
*free_out = 1;
copied = evbuffer_copyout(buf, *data_out, n);
tor_assert(copied >= 0 && (size_t)copied == n);
return copied;
}
}
/** As fetch_var_cell_from_buf, buf works on an evbuffer. */
int
fetch_var_cell_from_evbuffer(struct evbuffer *buf, var_cell_t **out,
int linkproto)
{
char *hdr = NULL;
int free_hdr = 0;
size_t n;
size_t buf_len;
uint8_t command;
uint16_t cell_length;
var_cell_t *cell;
int result = 0;
const int wide_circ_ids = linkproto >= MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS;
const int circ_id_len = get_circ_id_size(wide_circ_ids);
const unsigned header_len = get_var_cell_header_size(wide_circ_ids);
*out = NULL;
buf_len = evbuffer_get_length(buf);
if (buf_len < header_len)
return 0;
n = inspect_evbuffer(buf, &hdr, header_len, &free_hdr, NULL);
tor_assert(n >= header_len);
command = get_uint8(hdr + circ_id_len);
if (!(cell_command_is_var_length(command, linkproto))) {
goto done;
}
cell_length = ntohs(get_uint16(hdr + circ_id_len + 1));
if (buf_len < (size_t)(header_len+cell_length)) {
result = 1; /* Not all here yet. */
goto done;
}
cell = var_cell_new(cell_length);
cell->command = command;
if (wide_circ_ids)
cell->circ_id = ntohl(get_uint32(hdr));
else
cell->circ_id = ntohs(get_uint16(hdr));
evbuffer_drain(buf, header_len);
evbuffer_remove(buf, cell->payload, cell_length);
*out = cell;
result = 1;
done:
if (free_hdr && hdr)
tor_free(hdr);
return result;
}
#endif
/** Move up to *<b>buf_flushlen</b> bytes from <b>buf_in</b> to /** Move up to *<b>buf_flushlen</b> bytes from <b>buf_in</b> to
* <b>buf_out</b>, and modify *<b>buf_flushlen</b> appropriately. * <b>buf_out</b>, and modify *<b>buf_flushlen</b> appropriately.
* Return the number of bytes actually copied. * Return the number of bytes actually copied.
@ -1246,94 +1155,6 @@ fetch_from_buf_http(buf_t *buf,
return 1; return 1;
} }
#ifdef USE_BUFFEREVENTS
/** As fetch_from_buf_http, buf works on an evbuffer. */
int
fetch_from_evbuffer_http(struct evbuffer *buf,
char **headers_out, size_t max_headerlen,
char **body_out, size_t *body_used, size_t max_bodylen,
int force_complete)
{
struct evbuffer_ptr crlf, content_length;
size_t headerlen, bodylen, contentlen;
/* Find the first \r\n\r\n in the buffer */
crlf = evbuffer_search(buf, "\r\n\r\n", 4, NULL);
if (crlf.pos < 0) {
/* We didn't find one. */
if (evbuffer_get_length(buf) > max_headerlen)
return -1; /* Headers too long. */
return 0; /* Headers not here yet. */
} else if (crlf.pos > (int)max_headerlen) {
return -1; /* Headers too long. */
}
headerlen = crlf.pos + 4; /* Skip over the \r\n\r\n */
bodylen = evbuffer_get_length(buf) - headerlen;
if (bodylen > max_bodylen)
return -1; /* body too long */
/* Look for the first occurrence of CONTENT_LENGTH insize buf before the
* crlfcrlf */
content_length = evbuffer_search_range(buf, CONTENT_LENGTH,
strlen(CONTENT_LENGTH), NULL, &crlf);
if (content_length.pos >= 0) {
/* We found a content_length: parse it and figure out if the body is here
* yet. */
struct evbuffer_ptr eol;
char *data = NULL;
int free_data = 0;
int n, i;
n = evbuffer_ptr_set(buf, &content_length, strlen(CONTENT_LENGTH),
EVBUFFER_PTR_ADD);
tor_assert(n == 0);
eol = evbuffer_search_eol(buf, &content_length, NULL, EVBUFFER_EOL_CRLF);
tor_assert(eol.pos > content_length.pos);
tor_assert(eol.pos <= crlf.pos);
inspect_evbuffer(buf, &data, eol.pos - content_length.pos, &free_data,
&content_length);
i = atoi(data);
if (free_data)
tor_free(data);
if (i < 0) {
log_warn(LD_PROTOCOL, "Content-Length is less than zero; it looks like "
"someone is trying to crash us.");
return -1;
}
contentlen = i;
/* if content-length is malformed, then our body length is 0. fine. */
log_debug(LD_HTTP,"Got a contentlen of %d.",(int)contentlen);
if (bodylen < contentlen) {
if (!force_complete) {
log_debug(LD_HTTP,"body not all here yet.");
return 0; /* not all there yet */
}
}
if (bodylen > contentlen) {
bodylen = contentlen;
log_debug(LD_HTTP,"bodylen reduced to %d.",(int)bodylen);
}
}
if (headers_out) {
*headers_out = tor_malloc(headerlen+1);
evbuffer_remove(buf, *headers_out, headerlen);
(*headers_out)[headerlen] = '\0';
}
if (body_out) {
tor_assert(headers_out);
tor_assert(body_used);
*body_used = bodylen;
*body_out = tor_malloc(bodylen+1);
evbuffer_remove(buf, *body_out, bodylen);
(*body_out)[bodylen] = '\0';
}
return 1;
}
#endif
/** /**
* Wait this many seconds before warning the user about using SOCKS unsafely * Wait this many seconds before warning the user about using SOCKS unsafely
* again (requires that WarnUnsafeSocks is turned on). */ * again (requires that WarnUnsafeSocks is turned on). */
@ -1453,86 +1274,6 @@ fetch_from_buf_socks(buf_t *buf, socks_request_t *req,
return res; return res;
} }
#ifdef USE_BUFFEREVENTS
/* As fetch_from_buf_socks(), but targets an evbuffer instead. */
int
fetch_from_evbuffer_socks(struct evbuffer *buf, socks_request_t *req,
int log_sockstype, int safe_socks)
{
char *data;
ssize_t n_drain;
size_t datalen, buflen, want_length;
int res;
buflen = evbuffer_get_length(buf);
if (buflen < 2)
return 0;
{
/* See if we can find the socks request in the first chunk of the buffer.
*/
struct evbuffer_iovec v;
int i;
n_drain = 0;
i = evbuffer_peek(buf, -1, NULL, &v, 1);
tor_assert(i == 1);
data = v.iov_base;
datalen = v.iov_len;
want_length = 0;
res = parse_socks(data, datalen, req, log_sockstype,
safe_socks, &n_drain, &want_length);
if (n_drain < 0)
evbuffer_drain(buf, evbuffer_get_length(buf));
else if (n_drain > 0)
evbuffer_drain(buf, n_drain);
if (res)
return res;
}
/* Okay, the first chunk of the buffer didn't have a complete socks request.
* That means that either we don't have a whole socks request at all, or
* it's gotten split up. We're going to try passing parse_socks() bigger
* and bigger chunks until either it says "Okay, I got it", or it says it
* will need more data than we currently have. */
/* Loop while we have more data that we haven't given parse_socks() yet. */
do {
int free_data = 0;
const size_t last_wanted = want_length;
n_drain = 0;
data = NULL;
datalen = inspect_evbuffer(buf, &data, want_length, &free_data, NULL);
want_length = 0;
res = parse_socks(data, datalen, req, log_sockstype,
safe_socks, &n_drain, &want_length);
if (free_data)
tor_free(data);
if (n_drain < 0)
evbuffer_drain(buf, evbuffer_get_length(buf));
else if (n_drain > 0)
evbuffer_drain(buf, n_drain);
if (res == 0 && n_drain == 0 && want_length <= last_wanted) {
/* If we drained nothing, and we didn't ask for more than last time,
* then we probably wanted more data than the buffer actually had,
* and we're finding out that we're not satisified with it. It's
* time to break until we have more data. */
break;
}
buflen = evbuffer_get_length(buf);
} while (res == 0 && want_length <= buflen && buflen >= 2);
return res;
}
#endif
/** The size of the header of an Extended ORPort message: 2 bytes for /** The size of the header of an Extended ORPort message: 2 bytes for
* COMMAND, 2 bytes for BODYLEN */ * COMMAND, 2 bytes for BODYLEN */
#define EXT_OR_CMD_HEADER_SIZE 4 #define EXT_OR_CMD_HEADER_SIZE 4
@ -1563,34 +1304,6 @@ fetch_ext_or_command_from_buf(buf_t *buf, ext_or_cmd_t **out)
return 1; return 1;
} }
#ifdef USE_BUFFEREVENTS
/** Read <b>buf</b>, which should contain an Extended ORPort message
* from a transport proxy. If well-formed, create and populate
* <b>out</b> with the Extended ORport message. Return 0 if the
* buffer was incomplete, 1 if it was well-formed and -1 if we
* encountered an error while parsing it. */
int
fetch_ext_or_command_from_evbuffer(struct evbuffer *buf, ext_or_cmd_t **out)
{
char hdr[EXT_OR_CMD_HEADER_SIZE];
uint16_t len;
size_t buf_len = evbuffer_get_length(buf);
if (buf_len < EXT_OR_CMD_HEADER_SIZE)
return 0;
evbuffer_copyout(buf, hdr, EXT_OR_CMD_HEADER_SIZE);
len = ntohs(get_uint16(hdr+2));
if (buf_len < (unsigned)len + EXT_OR_CMD_HEADER_SIZE)
return 0;
*out = ext_or_cmd_new(len);
(*out)->cmd = ntohs(get_uint16(hdr));
(*out)->len = len;
evbuffer_drain(buf, EXT_OR_CMD_HEADER_SIZE);
evbuffer_remove(buf, (*out)->body, len);
return 1;
}
#endif
/** Create a SOCKS5 reply message with <b>reason</b> in its REP field and /** Create a SOCKS5 reply message with <b>reason</b> in its REP field and
* have Tor send it as error response to <b>req</b>. * have Tor send it as error response to <b>req</b>.
*/ */
@ -2035,34 +1748,6 @@ fetch_from_buf_socks_client(buf_t *buf, int state, char **reason)
return r; return r;
} }
#ifdef USE_BUFFEREVENTS
/** As fetch_from_buf_socks_client, buf works on an evbuffer */
int
fetch_from_evbuffer_socks_client(struct evbuffer *buf, int state,
char **reason)
{
ssize_t drain = 0;
uint8_t *data;
size_t datalen;
int r;
/* Linearize the SOCKS response in the buffer, up to 128 bytes.
* (parse_socks_client shouldn't need to see anything beyond that.) */
datalen = evbuffer_get_length(buf);
if (datalen > MAX_SOCKS_MESSAGE_LEN)
datalen = MAX_SOCKS_MESSAGE_LEN;
data = evbuffer_pullup(buf, datalen);
r = parse_socks_client(data, datalen, state, reason, &drain);
if (drain > 0)
evbuffer_drain(buf, drain);
else if (drain < 0)
evbuffer_drain(buf, evbuffer_get_length(buf));
return r;
}
#endif
/** Implementation logic for fetch_from_*_socks_client. */ /** Implementation logic for fetch_from_*_socks_client. */
static int static int
parse_socks_client(const uint8_t *data, size_t datalen, parse_socks_client(const uint8_t *data, size_t datalen,
@ -2193,27 +1878,6 @@ peek_buf_has_control0_command(buf_t *buf)
return 0; return 0;
} }
#ifdef USE_BUFFEREVENTS
int
peek_evbuffer_has_control0_command(struct evbuffer *buf)
{
int result = 0;
if (evbuffer_get_length(buf) >= 4) {
int free_out = 0;
char *data = NULL;
size_t n = inspect_evbuffer(buf, &data, 4, &free_out, NULL);
uint16_t cmd;
tor_assert(n >= 4);
cmd = ntohs(get_uint16(data+2));
if (cmd <= 0x14)
result = 1;
if (free_out)
tor_free(data);
}
return result;
}
#endif
/** Return the index within <b>buf</b> at which <b>ch</b> first appears, /** Return the index within <b>buf</b> at which <b>ch</b> first appears,
* or -1 if <b>ch</b> does not appear on buf. */ * or -1 if <b>ch</b> does not appear on buf. */
static off_t static off_t
@ -2311,93 +1975,14 @@ write_to_buf_zlib(buf_t *buf, tor_zlib_state_t *state,
return 0; return 0;
} }
#ifdef USE_BUFFEREVENTS
int
write_to_evbuffer_zlib(struct evbuffer *buf, tor_zlib_state_t *state,
const char *data, size_t data_len,
int done)
{
char *next;
size_t old_avail, avail;
int over = 0, n;
struct evbuffer_iovec vec[1];
do {
{
size_t cap = data_len / 4;
if (cap < 128)
cap = 128;
/* XXXX NM this strategy is fragmentation-prone. We should really have
* two iovecs, and write first into the one, and then into the
* second if the first gets full. */
n = evbuffer_reserve_space(buf, cap, vec, 1);
tor_assert(n == 1);
}
next = vec[0].iov_base;
avail = old_avail = vec[0].iov_len;
switch (tor_zlib_process(state, &next, &avail, &data, &data_len, done)) {
case TOR_ZLIB_DONE:
over = 1;
break;
case TOR_ZLIB_ERR:
return -1;
case TOR_ZLIB_OK:
if (data_len == 0)
over = 1;
break;
case TOR_ZLIB_BUF_FULL:
if (avail) {
/* Zlib says we need more room (ZLIB_BUF_FULL). Start a new chunk
* automatically, whether were going to or not. */
}
break;
}
/* XXXX possible infinite loop on BUF_FULL. */
vec[0].iov_len = old_avail - avail;
evbuffer_commit_space(buf, vec, 1);
} while (!over);
check();
return 0;
}
#endif
/** Set *<b>output</b> to contain a copy of the data in *<b>input</b> */ /** Set *<b>output</b> to contain a copy of the data in *<b>input</b> */
int int
generic_buffer_set_to_copy(generic_buffer_t **output, buf_set_to_copy(buf_t **output,
const generic_buffer_t *input) const buf_t *input)
{ {
#ifdef USE_BUFFEREVENTS
struct evbuffer_ptr ptr;
size_t remaining = evbuffer_get_length(input);
if (*output) {
evbuffer_drain(*output, evbuffer_get_length(*output));
} else {
if (!(*output = evbuffer_new()))
return -1;
}
evbuffer_ptr_set((struct evbuffer*)input, &ptr, 0, EVBUFFER_PTR_SET);
while (remaining) {
struct evbuffer_iovec v[4];
int n_used, i;
n_used = evbuffer_peek((struct evbuffer*)input, -1, &ptr, v, 4);
if (n_used < 0)
return -1;
for (i=0;i<n_used;++i) {
evbuffer_add(*output, v[i].iov_base, v[i].iov_len);
tor_assert(v[i].iov_len <= remaining);
remaining -= v[i].iov_len;
evbuffer_ptr_set((struct evbuffer*)input,
&ptr, v[i].iov_len, EVBUFFER_PTR_ADD);
}
}
#else
if (*output) if (*output)
buf_free(*output); buf_free(*output);
*output = buf_copy(input); *output = buf_copy(input);
#endif
return 0; return 0;
} }

View File

@ -56,46 +56,8 @@ int peek_buf_has_control0_command(buf_t *buf);
int fetch_ext_or_command_from_buf(buf_t *buf, ext_or_cmd_t **out); int fetch_ext_or_command_from_buf(buf_t *buf, ext_or_cmd_t **out);
#ifdef USE_BUFFEREVENTS int buf_set_to_copy(buf_t **output,
int fetch_var_cell_from_evbuffer(struct evbuffer *buf, var_cell_t **out, const buf_t *input);
int linkproto);
int fetch_from_evbuffer_socks(struct evbuffer *buf, socks_request_t *req,
int log_sockstype, int safe_socks);
int fetch_from_evbuffer_socks_client(struct evbuffer *buf, int state,
char **reason);
int fetch_from_evbuffer_http(struct evbuffer *buf,
char **headers_out, size_t max_headerlen,
char **body_out, size_t *body_used, size_t max_bodylen,
int force_complete);
int peek_evbuffer_has_control0_command(struct evbuffer *buf);
int write_to_evbuffer_zlib(struct evbuffer *buf, tor_zlib_state_t *state,
const char *data, size_t data_len,
int done);
int fetch_ext_or_command_from_evbuffer(struct evbuffer *buf,
ext_or_cmd_t **out);
#endif
#ifdef USE_BUFFEREVENTS
#define generic_buffer_new() evbuffer_new()
#define generic_buffer_len(b) evbuffer_get_length((b))
#define generic_buffer_add(b,dat,len) evbuffer_add((b),(dat),(len))
#define generic_buffer_get(b,buf,buflen) evbuffer_remove((b),(buf),(buflen))
#define generic_buffer_clear(b) evbuffer_drain((b), evbuffer_get_length((b)))
#define generic_buffer_free(b) evbuffer_free((b))
#define generic_buffer_fetch_ext_or_cmd(b, out) \
fetch_ext_or_command_from_evbuffer((b), (out))
#else
#define generic_buffer_new() buf_new()
#define generic_buffer_len(b) buf_datalen((b))
#define generic_buffer_add(b,dat,len) write_to_buf((dat),(len),(b))
#define generic_buffer_get(b,buf,buflen) fetch_from_buf((buf),(buflen),(b))
#define generic_buffer_clear(b) buf_clear((b))
#define generic_buffer_free(b) buf_free((b))
#define generic_buffer_fetch_ext_or_cmd(b, out) \
fetch_ext_or_command_from_buf((b), (out))
#endif
int generic_buffer_set_to_copy(generic_buffer_t **output,
const generic_buffer_t *input);
void assert_buf_ok(buf_t *buf); void assert_buf_ok(buf_t *buf);

View File

@ -1192,6 +1192,8 @@ channel_tls_handle_var_cell(var_cell_t *var_cell, or_connection_t *conn)
* notice "hey, data arrived!" before we notice "hey, the handshake * notice "hey, data arrived!" before we notice "hey, the handshake
* finished!" And we need to be accepting both at once to handle both * finished!" And we need to be accepting both at once to handle both
* the v2 and v3 handshakes. */ * the v2 and v3 handshakes. */
/* But that should be happening any longer've disabled bufferevents. */
tor_assert_nonfatal_unreached_once();
/* fall through */ /* fall through */
case OR_CONN_STATE_TLS_SERVER_RENEGOTIATING: case OR_CONN_STATE_TLS_SERVER_RENEGOTIATING:

View File

@ -113,7 +113,6 @@ static config_abbrev_t option_abbrevs_[] = {
{ "BridgeAuthoritativeDirectory", "BridgeAuthoritativeDir", 0, 0}, { "BridgeAuthoritativeDirectory", "BridgeAuthoritativeDir", 0, 0},
{ "HashedControlPassword", "__HashedControlSessionPassword", 1, 0}, { "HashedControlPassword", "__HashedControlSessionPassword", 1, 0},
{ "VirtualAddrNetwork", "VirtualAddrNetworkIPv4", 0, 0}, { "VirtualAddrNetwork", "VirtualAddrNetworkIPv4", 0, 0},
{ "_UseFilteringSSLBufferevents", "UseFilteringSSLBufferevents", 0, 1},
{ NULL, NULL, 0, 0}, { NULL, NULL, 0, 0},
}; };
@ -225,7 +224,7 @@ static config_var_t option_vars_[] = {
V(DirAuthorityFallbackRate, DOUBLE, "1.0"), V(DirAuthorityFallbackRate, DOUBLE, "1.0"),
V(DisableAllSwap, BOOL, "0"), V(DisableAllSwap, BOOL, "0"),
V(DisableDebuggerAttachment, BOOL, "1"), V(DisableDebuggerAttachment, BOOL, "1"),
V(DisableIOCP, BOOL, "1"), OBSOLETE("DisableIOCP"),
OBSOLETE("DisableV2DirectoryInfo_"), OBSOLETE("DisableV2DirectoryInfo_"),
OBSOLETE("DynamicDHGroups"), OBSOLETE("DynamicDHGroups"),
VPORT(DNSPort, LINELIST, NULL), VPORT(DNSPort, LINELIST, NULL),
@ -440,7 +439,7 @@ static config_var_t option_vars_[] = {
V(UseMicrodescriptors, AUTOBOOL, "auto"), V(UseMicrodescriptors, AUTOBOOL, "auto"),
V(UseNTorHandshake, AUTOBOOL, "1"), V(UseNTorHandshake, AUTOBOOL, "1"),
V(User, STRING, NULL), V(User, STRING, NULL),
V(UserspaceIOCPBuffers, BOOL, "0"), OBSOLETE("UserspaceIOCPBuffers"),
V(AuthDirSharedRandomness, BOOL, "1"), V(AuthDirSharedRandomness, BOOL, "1"),
OBSOLETE("V1AuthoritativeDirectory"), OBSOLETE("V1AuthoritativeDirectory"),
OBSOLETE("V2AuthoritativeDirectory"), OBSOLETE("V2AuthoritativeDirectory"),
@ -461,7 +460,8 @@ static config_var_t option_vars_[] = {
V(VirtualAddrNetworkIPv4, STRING, "127.192.0.0/10"), V(VirtualAddrNetworkIPv4, STRING, "127.192.0.0/10"),
V(VirtualAddrNetworkIPv6, STRING, "[FE80::]/10"), V(VirtualAddrNetworkIPv6, STRING, "[FE80::]/10"),
V(WarnPlaintextPorts, CSV, "23,109,110,143"), V(WarnPlaintextPorts, CSV, "23,109,110,143"),
V(UseFilteringSSLBufferevents, BOOL, "0"), OBSOLETE("UseFilteringSSLBufferevents"),
OBSOLETE("__UseFilteringSSLBufferevents"),
VAR("__ReloadTorrcOnSIGHUP", BOOL, ReloadTorrcOnSIGHUP, "1"), VAR("__ReloadTorrcOnSIGHUP", BOOL, ReloadTorrcOnSIGHUP, "1"),
VAR("__AllDirActionsPrivate", BOOL, AllDirActionsPrivate, "0"), VAR("__AllDirActionsPrivate", BOOL, AllDirActionsPrivate, "0"),
VAR("__DisablePredictedCircuits",BOOL,DisablePredictedCircuits, "0"), VAR("__DisablePredictedCircuits",BOOL,DisablePredictedCircuits, "0"),
@ -1674,17 +1674,6 @@ options_act(const or_options_t *old_options)
if (accounting_is_enabled(options)) if (accounting_is_enabled(options))
configure_accounting(time(NULL)); configure_accounting(time(NULL));
#ifdef USE_BUFFEREVENTS
/* If we're using the bufferevents implementation and our rate limits
* changed, we need to tell the rate-limiting system about it. */
if (!old_options ||
old_options->BandwidthRate != options->BandwidthRate ||
old_options->BandwidthBurst != options->BandwidthBurst ||
old_options->RelayBandwidthRate != options->RelayBandwidthRate ||
old_options->RelayBandwidthBurst != options->RelayBandwidthBurst)
connection_bucket_init();
#endif
old_ewma_enabled = cell_ewma_enabled(); old_ewma_enabled = cell_ewma_enabled();
/* Change the cell EWMA settings */ /* Change the cell EWMA settings */
cell_ewma_set_scale_factor(options, networkstatus_get_latest_consensus()); cell_ewma_set_scale_factor(options, networkstatus_get_latest_consensus());
@ -4229,12 +4218,6 @@ options_transition_allowed(const or_options_t *old,
return -1; return -1;
} }
if (old->DisableIOCP != new_val->DisableIOCP) {
*msg = tor_strdup("While Tor is running, changing DisableIOCP "
"is not allowed.");
return -1;
}
if (old->DisableDebuggerAttachment && if (old->DisableDebuggerAttachment &&
!new_val->DisableDebuggerAttachment) { !new_val->DisableDebuggerAttachment) {
*msg = tor_strdup("While Tor is running, disabling " *msg = tor_strdup("While Tor is running, disabling "
@ -7202,7 +7185,6 @@ init_libevent(const or_options_t *options)
suppress_libevent_log_msg("Function not implemented"); suppress_libevent_log_msg("Function not implemented");
memset(&cfg, 0, sizeof(cfg)); memset(&cfg, 0, sizeof(cfg));
cfg.disable_iocp = options->DisableIOCP;
cfg.num_cpus = get_num_cpus(options); cfg.num_cpus = get_num_cpus(options);
cfg.msec_per_tick = options->TokenBucketRefillInterval; cfg.msec_per_tick = options->TokenBucketRefillInterval;

View File

@ -52,10 +52,6 @@
#include "sandbox.h" #include "sandbox.h"
#include "transports.h" #include "transports.h"
#ifdef USE_BUFFEREVENTS
#include <event2/event.h>
#endif
#ifdef HAVE_PWD_H #ifdef HAVE_PWD_H
#include <pwd.h> #include <pwd.h>
#endif #endif
@ -75,10 +71,8 @@ static void connection_init(time_t now, connection_t *conn, int type,
static int connection_init_accepted_conn(connection_t *conn, static int connection_init_accepted_conn(connection_t *conn,
const listener_connection_t *listener); const listener_connection_t *listener);
static int connection_handle_listener_read(connection_t *conn, int new_type); static int connection_handle_listener_read(connection_t *conn, int new_type);
#ifndef USE_BUFFEREVENTS
static int connection_bucket_should_increase(int bucket, static int connection_bucket_should_increase(int bucket,
or_connection_t *conn); or_connection_t *conn);
#endif
static int connection_finished_flushing(connection_t *conn); static int connection_finished_flushing(connection_t *conn);
static int connection_flushed_some(connection_t *conn); static int connection_flushed_some(connection_t *conn);
static int connection_finished_connecting(connection_t *conn); static int connection_finished_connecting(connection_t *conn);
@ -236,26 +230,6 @@ conn_state_to_string(int type, int state)
return buf; return buf;
} }
#ifdef USE_BUFFEREVENTS
/** Return true iff the connection's type is one that can use a
bufferevent-based implementation. */
int
connection_type_uses_bufferevent(connection_t *conn)
{
switch (conn->type) {
case CONN_TYPE_AP:
case CONN_TYPE_EXIT:
case CONN_TYPE_DIR:
case CONN_TYPE_CONTROL:
case CONN_TYPE_OR:
case CONN_TYPE_EXT_OR:
return 1;
default:
return 0;
}
}
#endif
/** Allocate and return a new dir_connection_t, initialized as by /** Allocate and return a new dir_connection_t, initialized as by
* connection_init(). */ * connection_init(). */
dir_connection_t * dir_connection_t *
@ -427,13 +401,11 @@ connection_init(time_t now, connection_t *conn, int type, int socket_family)
conn->type = type; conn->type = type;
conn->socket_family = socket_family; conn->socket_family = socket_family;
#ifndef USE_BUFFEREVENTS
if (!connection_is_listener(conn)) { if (!connection_is_listener(conn)) {
/* listeners never use their buf */ /* listeners never use their buf */
conn->inbuf = buf_new(); conn->inbuf = buf_new();
conn->outbuf = buf_new(); conn->outbuf = buf_new();
} }
#endif
conn->timestamp_created = now; conn->timestamp_created = now;
conn->timestamp_lastread = now; conn->timestamp_lastread = now;
@ -577,10 +549,10 @@ connection_free_(connection_t *conn)
if (entry_conn->socks_request) if (entry_conn->socks_request)
socks_request_free(entry_conn->socks_request); socks_request_free(entry_conn->socks_request);
if (entry_conn->pending_optimistic_data) { if (entry_conn->pending_optimistic_data) {
generic_buffer_free(entry_conn->pending_optimistic_data); buf_free(entry_conn->pending_optimistic_data);
} }
if (entry_conn->sending_optimistic_data) { if (entry_conn->sending_optimistic_data) {
generic_buffer_free(entry_conn->sending_optimistic_data); buf_free(entry_conn->sending_optimistic_data);
} }
} }
if (CONN_IS_EDGE(conn)) { if (CONN_IS_EDGE(conn)) {
@ -603,15 +575,6 @@ connection_free_(connection_t *conn)
tor_event_free(conn->read_event); tor_event_free(conn->read_event);
tor_event_free(conn->write_event); tor_event_free(conn->write_event);
conn->read_event = conn->write_event = NULL; conn->read_event = conn->write_event = NULL;
IF_HAS_BUFFEREVENT(conn, {
/* This was a workaround to handle bugs in some old versions of libevent
* where callbacks can occur after calling bufferevent_free(). Setting
* the callbacks to NULL prevented this. It shouldn't be necessary any
* more, but let's not tempt fate for now. */
bufferevent_setcb(conn->bufev, NULL, NULL, NULL, NULL);
bufferevent_free(conn->bufev);
conn->bufev = NULL;
});
if (conn->type == CONN_TYPE_DIR) { if (conn->type == CONN_TYPE_DIR) {
dir_connection_t *dir_conn = TO_DIR_CONN(conn); dir_connection_t *dir_conn = TO_DIR_CONN(conn);
@ -645,13 +608,6 @@ connection_free_(connection_t *conn)
tor_free(TO_OR_CONN(conn)->ext_or_transport); tor_free(TO_OR_CONN(conn)->ext_or_transport);
} }
#ifdef USE_BUFFEREVENTS
if (conn->type == CONN_TYPE_OR && TO_OR_CONN(conn)->bucket_cfg) {
ev_token_bucket_cfg_free(TO_OR_CONN(conn)->bucket_cfg);
TO_OR_CONN(conn)->bucket_cfg = NULL;
}
#endif
memwipe(mem, 0xCC, memlen); /* poison memory */ memwipe(mem, 0xCC, memlen); /* poison memory */
tor_free(mem); tor_free(mem);
} }
@ -2249,19 +2205,14 @@ connection_send_socks5_connect(connection_t *conn)
conn->proxy_state = PROXY_SOCKS5_WANT_CONNECT_OK; conn->proxy_state = PROXY_SOCKS5_WANT_CONNECT_OK;
} }
/** Wrapper around fetch_from_(buf/evbuffer)_socks_client: see those functions /** Wrapper around fetch_from_buf_socks_client: see that functions
* for documentation of its behavior. */ * for documentation of its behavior. */
static int static int
connection_fetch_from_buf_socks_client(connection_t *conn, connection_fetch_from_buf_socks_client(connection_t *conn,
int state, char **reason) int state, char **reason)
{ {
IF_HAS_BUFFEREVENT(conn, {
struct evbuffer *input = bufferevent_get_input(conn->bufev);
return fetch_from_evbuffer_socks_client(input, state, reason);
}) ELSE_IF_NO_BUFFEREVENT {
return fetch_from_buf_socks_client(conn->inbuf, state, reason); return fetch_from_buf_socks_client(conn->inbuf, state, reason);
} }
}
/** Call this from connection_*_process_inbuf() to advance the proxy /** Call this from connection_*_process_inbuf() to advance the proxy
* handshake. * handshake.
@ -2694,21 +2645,15 @@ connection_is_rate_limited(connection_t *conn)
return 1; return 1;
} }
#ifdef USE_BUFFEREVENTS
static struct bufferevent_rate_limit_group *global_rate_limit = NULL;
#else
/** Did either global write bucket run dry last second? If so, /** Did either global write bucket run dry last second? If so,
* we are likely to run dry again this second, so be stingy with the * we are likely to run dry again this second, so be stingy with the
* tokens we just put in. */ * tokens we just put in. */
static int write_buckets_empty_last_second = 0; static int write_buckets_empty_last_second = 0;
#endif
/** How many seconds of no active local circuits will make the /** How many seconds of no active local circuits will make the
* connection revert to the "relayed" bandwidth class? */ * connection revert to the "relayed" bandwidth class? */
#define CLIENT_IDLE_TIME_FOR_PRIORITY 30 #define CLIENT_IDLE_TIME_FOR_PRIORITY 30
#ifndef USE_BUFFEREVENTS
/** Return 1 if <b>conn</b> should use tokens from the "relayed" /** Return 1 if <b>conn</b> should use tokens from the "relayed"
* bandwidth rates, else 0. Currently, only OR conns with bandwidth * bandwidth rates, else 0. Currently, only OR conns with bandwidth
* class 1, and directory conns that are serving data out, count. * class 1, and directory conns that are serving data out, count.
@ -2820,20 +2765,6 @@ connection_bucket_write_limit(connection_t *conn, time_t now)
return connection_bucket_round_robin(base, priority, return connection_bucket_round_robin(base, priority,
global_bucket, conn_bucket); global_bucket, conn_bucket);
} }
#else
static ssize_t
connection_bucket_read_limit(connection_t *conn, time_t now)
{
(void) now;
return bufferevent_get_max_to_read(conn->bufev);
}
ssize_t
connection_bucket_write_limit(connection_t *conn, time_t now)
{
(void) now;
return bufferevent_get_max_to_write(conn->bufev);
}
#endif
/** Return 1 if the global write buckets are low enough that we /** Return 1 if the global write buckets are low enough that we
* shouldn't send <b>attempt</b> bytes of low-priority directory stuff * shouldn't send <b>attempt</b> bytes of low-priority directory stuff
@ -2857,12 +2788,8 @@ connection_bucket_write_limit(connection_t *conn, time_t now)
int int
global_write_bucket_low(connection_t *conn, size_t attempt, int priority) global_write_bucket_low(connection_t *conn, size_t attempt, int priority)
{ {
#ifdef USE_BUFFEREVENTS
ssize_t smaller_bucket = bufferevent_get_max_to_write(conn->bufev);
#else
int smaller_bucket = global_write_bucket < global_relayed_write_bucket ? int smaller_bucket = global_write_bucket < global_relayed_write_bucket ?
global_write_bucket : global_relayed_write_bucket; global_write_bucket : global_relayed_write_bucket;
#endif
if (authdir_mode(get_options()) && priority>1) if (authdir_mode(get_options()) && priority>1)
return 0; /* there's always room to answer v2 if we're an auth dir */ return 0; /* there's always room to answer v2 if we're an auth dir */
@ -2872,10 +2799,8 @@ global_write_bucket_low(connection_t *conn, size_t attempt, int priority)
if (smaller_bucket < (int)attempt) if (smaller_bucket < (int)attempt)
return 1; /* not enough space no matter the priority */ return 1; /* not enough space no matter the priority */
#ifndef USE_BUFFEREVENTS
if (write_buckets_empty_last_second) if (write_buckets_empty_last_second)
return 1; /* we're already hitting our limits, no more please */ return 1; /* we're already hitting our limits, no more please */
#endif
if (priority == 1) { /* old-style v1 query */ if (priority == 1) { /* old-style v1 query */
/* Could we handle *two* of these requests within the next two seconds? */ /* Could we handle *two* of these requests within the next two seconds? */
@ -2923,29 +2848,6 @@ record_num_bytes_transferred_impl(connection_t *conn,
rep_hist_note_exit_bytes(conn->port, num_written, num_read); rep_hist_note_exit_bytes(conn->port, num_written, num_read);
} }
#ifdef USE_BUFFEREVENTS
/** Wrapper around fetch_from_(buf/evbuffer)_socks_client: see those functions
* for documentation of its behavior. */
static void
record_num_bytes_transferred(connection_t *conn,
time_t now, size_t num_read, size_t num_written)
{
/* XXXX check if this is necessary */
if (num_written >= INT_MAX || num_read >= INT_MAX) {
log_err(LD_BUG, "Value out of range. num_read=%lu, num_written=%lu, "
"connection type=%s, state=%s",
(unsigned long)num_read, (unsigned long)num_written,
conn_type_to_string(conn->type),
conn_state_to_string(conn->type, conn->state));
if (num_written >= INT_MAX) num_written = 1;
if (num_read >= INT_MAX) num_read = 1;
tor_fragile_assert();
}
record_num_bytes_transferred_impl(conn,now,num_read,num_written);
}
#endif
/** Helper: convert given <b>tvnow</b> time value to milliseconds since /** Helper: convert given <b>tvnow</b> time value to milliseconds since
* midnight. */ * midnight. */
static uint32_t static uint32_t
@ -2990,7 +2892,6 @@ connection_buckets_note_empty_ts(uint32_t *timestamp_var,
*timestamp_var = msec_since_midnight(tvnow); *timestamp_var = msec_since_midnight(tvnow);
} }
#ifndef USE_BUFFEREVENTS
/** Last time at which the global or relay buckets were emptied in msec /** Last time at which the global or relay buckets were emptied in msec
* since midnight. */ * since midnight. */
static uint32_t global_relayed_read_emptied = 0, static uint32_t global_relayed_read_emptied = 0,
@ -3321,92 +3222,6 @@ connection_bucket_should_increase(int bucket, or_connection_t *conn)
return 1; return 1;
} }
#else
static void
connection_buckets_decrement(connection_t *conn, time_t now,
size_t num_read, size_t num_written)
{
(void) conn;
(void) now;
(void) num_read;
(void) num_written;
/* Libevent does this for us. */
}
void
connection_bucket_refill(int seconds_elapsed, time_t now)
{
(void) seconds_elapsed;
(void) now;
/* Libevent does this for us. */
}
void
connection_bucket_init(void)
{
const or_options_t *options = get_options();
const struct timeval *tick = tor_libevent_get_one_tick_timeout();
struct ev_token_bucket_cfg *bucket_cfg;
uint64_t rate, burst;
if (options->RelayBandwidthRate) {
rate = options->RelayBandwidthRate;
burst = options->RelayBandwidthBurst;
} else {
rate = options->BandwidthRate;
burst = options->BandwidthBurst;
}
/* This can't overflow, since TokenBucketRefillInterval <= 1000,
* and rate started out less than INT32_MAX. */
rate = (rate * options->TokenBucketRefillInterval) / 1000;
bucket_cfg = ev_token_bucket_cfg_new((uint32_t)rate, (uint32_t)burst,
(uint32_t)rate, (uint32_t)burst,
tick);
if (!global_rate_limit) {
global_rate_limit =
bufferevent_rate_limit_group_new(tor_libevent_get_base(), bucket_cfg);
} else {
bufferevent_rate_limit_group_set_cfg(global_rate_limit, bucket_cfg);
}
ev_token_bucket_cfg_free(bucket_cfg);
}
void
connection_get_rate_limit_totals(uint64_t *read_out, uint64_t *written_out)
{
if (global_rate_limit == NULL) {
*read_out = *written_out = 0;
} else {
bufferevent_rate_limit_group_get_totals(
global_rate_limit, read_out, written_out);
}
}
/** Perform whatever operations are needed on <b>conn</b> to enable
* rate-limiting. */
void
connection_enable_rate_limiting(connection_t *conn)
{
if (conn->bufev) {
if (!global_rate_limit)
connection_bucket_init();
tor_add_bufferevent_to_rate_limit_group(conn->bufev, global_rate_limit);
}
}
static void
connection_consider_empty_write_buckets(connection_t *conn)
{
(void) conn;
}
static void
connection_consider_empty_read_buckets(connection_t *conn)
{
(void) conn;
}
#endif
/** Read bytes from conn-\>s and process them. /** Read bytes from conn-\>s and process them.
* *
@ -3737,216 +3552,32 @@ connection_read_to_buf(connection_t *conn, ssize_t *max_to_read,
return 0; return 0;
} }
#ifdef USE_BUFFEREVENTS
/* XXXX These generic versions could be simplified by making them
type-specific */
/** Callback: Invoked whenever bytes are added to or drained from an input
* evbuffer. Used to track the number of bytes read. */
static void
evbuffer_inbuf_callback(struct evbuffer *buf,
const struct evbuffer_cb_info *info, void *arg)
{
connection_t *conn = arg;
(void) buf;
/* XXXX These need to get real counts on the non-nested TLS case. - NM */
if (info->n_added) {
time_t now = approx_time();
conn->timestamp_lastread = now;
record_num_bytes_transferred(conn, now, info->n_added, 0);
connection_consider_empty_read_buckets(conn);
if (conn->type == CONN_TYPE_AP) {
edge_connection_t *edge_conn = TO_EDGE_CONN(conn);
/*XXXX++ check for overflow*/
edge_conn->n_read += (int)info->n_added;
}
}
}
/** Callback: Invoked whenever bytes are added to or drained from an output
* evbuffer. Used to track the number of bytes written. */
static void
evbuffer_outbuf_callback(struct evbuffer *buf,
const struct evbuffer_cb_info *info, void *arg)
{
connection_t *conn = arg;
(void)buf;
if (info->n_deleted) {
time_t now = approx_time();
conn->timestamp_lastwritten = now;
record_num_bytes_transferred(conn, now, 0, info->n_deleted);
connection_consider_empty_write_buckets(conn);
if (conn->type == CONN_TYPE_AP) {
edge_connection_t *edge_conn = TO_EDGE_CONN(conn);
/*XXXX++ check for overflow*/
edge_conn->n_written += (int)info->n_deleted;
}
}
}
/** Callback: invoked whenever a bufferevent has read data. */
void
connection_handle_read_cb(struct bufferevent *bufev, void *arg)
{
connection_t *conn = arg;
(void) bufev;
if (!conn->marked_for_close) {
if (connection_process_inbuf(conn, 1)<0) /* XXXX Always 1? */
if (!conn->marked_for_close)
connection_mark_for_close(conn);
}
}
/** Callback: invoked whenever a bufferevent has written data. */
void
connection_handle_write_cb(struct bufferevent *bufev, void *arg)
{
connection_t *conn = arg;
struct evbuffer *output;
if (connection_flushed_some(conn)<0) {
if (!conn->marked_for_close)
connection_mark_for_close(conn);
return;
}
output = bufferevent_get_output(bufev);
if (!evbuffer_get_length(output)) {
connection_finished_flushing(conn);
if (conn->marked_for_close && conn->hold_open_until_flushed) {
conn->hold_open_until_flushed = 0;
if (conn->linked) {
/* send eof */
bufferevent_flush(conn->bufev, EV_WRITE, BEV_FINISHED);
}
}
}
}
/** Callback: invoked whenever a bufferevent has had an event (like a
* connection, or an eof, or an error) occur. */
void
connection_handle_event_cb(struct bufferevent *bufev, short event, void *arg)
{
connection_t *conn = arg;
(void) bufev;
if (conn->marked_for_close)
return;
if (event & BEV_EVENT_CONNECTED) {
tor_assert(connection_state_is_connecting(conn));
if (connection_finished_connecting(conn)<0)
return;
}
if (event & BEV_EVENT_EOF) {
if (!conn->marked_for_close) {
conn->inbuf_reached_eof = 1;
if (connection_reached_eof(conn)<0)
return;
}
}
if (event & BEV_EVENT_ERROR) {
int socket_error = evutil_socket_geterror(conn->s);
if (conn->type == CONN_TYPE_OR &&
conn->state == OR_CONN_STATE_CONNECTING) {
connection_or_connect_failed(TO_OR_CONN(conn),
errno_to_orconn_end_reason(socket_error),
tor_socket_strerror(socket_error));
} else if (CONN_IS_EDGE(conn)) {
edge_connection_t *edge_conn = TO_EDGE_CONN(conn);
if (!edge_conn->edge_has_sent_end)
connection_edge_end_errno(edge_conn);
if (conn->type == CONN_TYPE_AP && TO_ENTRY_CONN(conn)->socks_request) {
/* broken, don't send a socks reply back */
TO_ENTRY_CONN(conn)->socks_request->has_finished = 1;
}
}
connection_close_immediate(conn); /* Connection is dead. */
if (!conn->marked_for_close)
connection_mark_for_close(conn);
}
}
/** Set up the generic callbacks for the bufferevent on <b>conn</b>. */
void
connection_configure_bufferevent_callbacks(connection_t *conn)
{
struct bufferevent *bufev;
struct evbuffer *input, *output;
tor_assert(conn->bufev);
bufev = conn->bufev;
bufferevent_setcb(bufev,
connection_handle_read_cb,
connection_handle_write_cb,
connection_handle_event_cb,
conn);
/* Set a fairly high write low-watermark so that we get the write callback
called whenever data is written to bring us under 128K. Leave the
high-watermark at 0.
*/
bufferevent_setwatermark(bufev, EV_WRITE, 128*1024, 0);
input = bufferevent_get_input(bufev);
output = bufferevent_get_output(bufev);
evbuffer_add_cb(input, evbuffer_inbuf_callback, conn);
evbuffer_add_cb(output, evbuffer_outbuf_callback, conn);
}
#endif
/** A pass-through to fetch_from_buf. */ /** A pass-through to fetch_from_buf. */
int int
connection_fetch_from_buf(char *string, size_t len, connection_t *conn) connection_fetch_from_buf(char *string, size_t len, connection_t *conn)
{ {
IF_HAS_BUFFEREVENT(conn, {
/* XXX overflow -seb */
return (int)bufferevent_read(conn->bufev, string, len);
}) ELSE_IF_NO_BUFFEREVENT {
return fetch_from_buf(string, len, conn->inbuf); return fetch_from_buf(string, len, conn->inbuf);
} }
}
/** As fetch_from_buf_line(), but read from a connection's input buffer. */ /** As fetch_from_buf_line(), but read from a connection's input buffer. */
int int
connection_fetch_from_buf_line(connection_t *conn, char *data, connection_fetch_from_buf_line(connection_t *conn, char *data,
size_t *data_len) size_t *data_len)
{ {
IF_HAS_BUFFEREVENT(conn, {
int r;
size_t eol_len=0;
struct evbuffer *input = bufferevent_get_input(conn->bufev);
struct evbuffer_ptr ptr =
evbuffer_search_eol(input, NULL, &eol_len, EVBUFFER_EOL_LF);
if (ptr.pos == -1)
return 0; /* No EOL found. */
if ((size_t)ptr.pos+eol_len >= *data_len) {
return -1; /* Too long */
}
*data_len = ptr.pos+eol_len;
r = evbuffer_remove(input, data, ptr.pos+eol_len);
tor_assert(r >= 0);
data[ptr.pos+eol_len] = '\0';
return 1;
}) ELSE_IF_NO_BUFFEREVENT {
return fetch_from_buf_line(conn->inbuf, data, data_len); return fetch_from_buf_line(conn->inbuf, data, data_len);
} }
}
/** As fetch_from_buf_http, but fetches from a connection's input buffer_t or /** As fetch_from_buf_http, but fetches from a connection's input buffer_t as
* its bufferevent as appropriate. */ * appropriate. */
int int
connection_fetch_from_buf_http(connection_t *conn, connection_fetch_from_buf_http(connection_t *conn,
char **headers_out, size_t max_headerlen, char **headers_out, size_t max_headerlen,
char **body_out, size_t *body_used, char **body_out, size_t *body_used,
size_t max_bodylen, int force_complete) size_t max_bodylen, int force_complete)
{ {
IF_HAS_BUFFEREVENT(conn, {
struct evbuffer *input = bufferevent_get_input(conn->bufev);
return fetch_from_evbuffer_http(input, headers_out, max_headerlen,
body_out, body_used, max_bodylen, force_complete);
}) ELSE_IF_NO_BUFFEREVENT {
return fetch_from_buf_http(conn->inbuf, headers_out, max_headerlen, return fetch_from_buf_http(conn->inbuf, headers_out, max_headerlen,
body_out, body_used, max_bodylen, force_complete); body_out, body_used, max_bodylen, force_complete);
} }
}
/** Return conn-\>outbuf_flushlen: how many bytes conn wants to flush /** Return conn-\>outbuf_flushlen: how many bytes conn wants to flush
* from its outbuf. */ * from its outbuf. */
@ -4249,7 +3880,7 @@ connection_handle_write(connection_t *conn, int force)
* Try to flush data that's waiting for a write on <b>conn</b>. Return * Try to flush data that's waiting for a write on <b>conn</b>. Return
* -1 on failure, 0 on success. * -1 on failure, 0 on success.
* *
* Don't use this function for regular writing; the buffers/bufferevents * Don't use this function for regular writing; the buffers
* system should be good enough at scheduling writes there. Instead, this * system should be good enough at scheduling writes there. Instead, this
* function is for cases when we're about to exit or something and we want * function is for cases when we're about to exit or something and we want
* to report it right away. * to report it right away.
@ -4257,10 +3888,6 @@ connection_handle_write(connection_t *conn, int force)
int int
connection_flush(connection_t *conn) connection_flush(connection_t *conn)
{ {
IF_HAS_BUFFEREVENT(conn, {
int r = bufferevent_flush(conn->bufev, EV_WRITE, BEV_FLUSH);
return (r < 0) ? -1 : 0;
});
return connection_handle_write(conn, 1); return connection_handle_write(conn, 1);
} }
@ -4289,22 +3916,6 @@ connection_write_to_buf_impl_,(const char *string, size_t len,
if (conn->marked_for_close && !conn->hold_open_until_flushed) if (conn->marked_for_close && !conn->hold_open_until_flushed)
return; return;
IF_HAS_BUFFEREVENT(conn, {
if (zlib) {
int done = zlib < 0;
r = write_to_evbuffer_zlib(bufferevent_get_output(conn->bufev),
TO_DIR_CONN(conn)->zlib_state,
string, len, done);
} else {
r = bufferevent_write(conn->bufev, string, len);
}
if (r < 0) {
/* XXXX mark for close? */
log_warn(LD_NET, "bufferevent_write failed! That shouldn't happen.");
}
return;
});
old_datalen = buf_datalen(conn->outbuf); old_datalen = buf_datalen(conn->outbuf);
if (zlib) { if (zlib) {
dir_connection_t *dir_conn = TO_DIR_CONN(conn); dir_connection_t *dir_conn = TO_DIR_CONN(conn);
@ -4750,7 +4361,7 @@ connection_flushed_some(connection_t *conn)
} }
/** We just finished flushing bytes to the appropriately low network layer, /** We just finished flushing bytes to the appropriately low network layer,
* and there are no more bytes remaining in conn-\>outbuf, conn-\>bev, or * and there are no more bytes remaining in conn-\>outbuf or
* conn-\>tls to be flushed. * conn-\>tls to be flushed.
* *
* This function just passes conn to the connection-specific * This function just passes conn to the connection-specific
@ -4767,7 +4378,6 @@ connection_finished_flushing(connection_t *conn)
// log_fn(LOG_DEBUG,"entered. Socket %u.", conn->s); // log_fn(LOG_DEBUG,"entered. Socket %u.", conn->s);
IF_HAS_NO_BUFFEREVENT(conn)
connection_stop_writing(conn); connection_stop_writing(conn);
switch (conn->type) { switch (conn->type) {
@ -4902,15 +4512,6 @@ assert_connection_ok(connection_t *conn, time_t now)
tor_assert(conn->type >= CONN_TYPE_MIN_); tor_assert(conn->type >= CONN_TYPE_MIN_);
tor_assert(conn->type <= CONN_TYPE_MAX_); tor_assert(conn->type <= CONN_TYPE_MAX_);
#ifdef USE_BUFFEREVENTS
if (conn->bufev) {
tor_assert(conn->read_event == NULL);
tor_assert(conn->write_event == NULL);
tor_assert(conn->inbuf == NULL);
tor_assert(conn->outbuf == NULL);
}
#endif
switch (conn->type) { switch (conn->type) {
case CONN_TYPE_OR: case CONN_TYPE_OR:
case CONN_TYPE_EXT_OR: case CONN_TYPE_EXT_OR:
@ -5174,11 +4775,6 @@ connection_free_all(void)
tor_free(last_interface_ipv4); tor_free(last_interface_ipv4);
tor_free(last_interface_ipv6); tor_free(last_interface_ipv6);
#ifdef USE_BUFFEREVENTS
if (global_rate_limit)
bufferevent_rate_limit_group_free(global_rate_limit);
#endif
} }
/** Log a warning, and possibly emit a control event, that <b>received</b> came /** Log a warning, and possibly emit a control event, that <b>received</b> came

View File

@ -57,8 +57,6 @@ void connection_mark_for_close_internal_(connection_t *conn,
connection_t *tmp_conn__ = (c); \ connection_t *tmp_conn__ = (c); \
connection_mark_for_close_internal_(tmp_conn__, (line), (file)); \ connection_mark_for_close_internal_(tmp_conn__, (line), (file)); \
tmp_conn__->hold_open_until_flushed = 1; \ tmp_conn__->hold_open_until_flushed = 1; \
IF_HAS_BUFFEREVENT(tmp_conn__, \
connection_start_writing(tmp_conn__)); \
} while (0) } while (0)
#define connection_mark_and_flush_internal(c) \ #define connection_mark_and_flush_internal(c) \
@ -166,22 +164,14 @@ static size_t connection_get_outbuf_len(connection_t *conn);
static inline size_t static inline size_t
connection_get_inbuf_len(connection_t *conn) connection_get_inbuf_len(connection_t *conn)
{ {
IF_HAS_BUFFEREVENT(conn, {
return evbuffer_get_length(bufferevent_get_input(conn->bufev));
}) ELSE_IF_NO_BUFFEREVENT {
return conn->inbuf ? buf_datalen(conn->inbuf) : 0; return conn->inbuf ? buf_datalen(conn->inbuf) : 0;
} }
}
static inline size_t static inline size_t
connection_get_outbuf_len(connection_t *conn) connection_get_outbuf_len(connection_t *conn)
{ {
IF_HAS_BUFFEREVENT(conn, {
return evbuffer_get_length(bufferevent_get_output(conn->bufev));
}) ELSE_IF_NO_BUFFEREVENT {
return conn->outbuf ? buf_datalen(conn->outbuf) : 0; return conn->outbuf ? buf_datalen(conn->outbuf) : 0;
} }
}
connection_t *connection_get_by_global_id(uint64_t id); connection_t *connection_get_by_global_id(uint64_t id);
@ -257,20 +247,6 @@ void clock_skew_warning(const connection_t *conn, long apparent_skew,
int trusted, log_domain_mask_t domain, int trusted, log_domain_mask_t domain,
const char *received, const char *source); const char *received, const char *source);
#ifdef USE_BUFFEREVENTS
int connection_type_uses_bufferevent(connection_t *conn);
void connection_configure_bufferevent_callbacks(connection_t *conn);
void connection_handle_read_cb(struct bufferevent *bufev, void *arg);
void connection_handle_write_cb(struct bufferevent *bufev, void *arg);
void connection_handle_event_cb(struct bufferevent *bufev, short event,
void *arg);
void connection_get_rate_limit_totals(uint64_t *read_out,
uint64_t *written_out);
void connection_enable_rate_limiting(connection_t *conn);
#else
#define connection_type_uses_bufferevent(c) (0)
#endif
#ifdef CONNECTION_PRIVATE #ifdef CONNECTION_PRIVATE
STATIC void connection_free_(connection_t *conn); STATIC void connection_free_(connection_t *conn);

View File

@ -478,7 +478,6 @@ connection_edge_finished_connecting(edge_connection_t *edge_conn)
rep_hist_note_exit_stream_opened(conn->port); rep_hist_note_exit_stream_opened(conn->port);
conn->state = EXIT_CONN_STATE_OPEN; conn->state = EXIT_CONN_STATE_OPEN;
IF_HAS_NO_BUFFEREVENT(conn)
connection_watch_events(conn, READ_EVENT); /* stop writing, keep reading */ connection_watch_events(conn, READ_EVENT); /* stop writing, keep reading */
if (connection_get_outbuf_len(conn)) /* in case there are any queued relay if (connection_get_outbuf_len(conn)) /* in case there are any queued relay
* cells */ * cells */
@ -1026,7 +1025,7 @@ connection_ap_detach_retriable(entry_connection_t *conn,
pathbias_mark_use_rollback(circ); pathbias_mark_use_rollback(circ);
if (conn->pending_optimistic_data) { if (conn->pending_optimistic_data) {
generic_buffer_set_to_copy(&conn->sending_optimistic_data, buf_set_to_copy(&conn->sending_optimistic_data,
conn->pending_optimistic_data); conn->pending_optimistic_data);
} }
@ -2008,14 +2007,8 @@ connection_ap_handshake_process_socks(entry_connection_t *conn)
log_debug(LD_APP,"entered."); log_debug(LD_APP,"entered.");
IF_HAS_BUFFEREVENT(base_conn, {
struct evbuffer *input = bufferevent_get_input(base_conn->bufev);
sockshere = fetch_from_evbuffer_socks(input, socks,
options->TestSocks, options->SafeSocks);
}) ELSE_IF_NO_BUFFEREVENT {
sockshere = fetch_from_buf_socks(base_conn->inbuf, socks, sockshere = fetch_from_buf_socks(base_conn->inbuf, socks,
options->TestSocks, options->SafeSocks); options->TestSocks, options->SafeSocks);
};
if (socks->replylen) { if (socks->replylen) {
had_reply = 1; had_reply = 1;
@ -2347,7 +2340,7 @@ connection_ap_handshake_send_begin(entry_connection_t *ap_conn)
log_info(LD_APP, "Sending up to %ld + %ld bytes of queued-up data", log_info(LD_APP, "Sending up to %ld + %ld bytes of queued-up data",
(long)connection_get_inbuf_len(base_conn), (long)connection_get_inbuf_len(base_conn),
ap_conn->sending_optimistic_data ? ap_conn->sending_optimistic_data ?
(long)generic_buffer_len(ap_conn->sending_optimistic_data) : 0); (long)buf_datalen(ap_conn->sending_optimistic_data) : 0);
if (connection_edge_package_raw_inbuf(edge_conn, 1, NULL) < 0) { if (connection_edge_package_raw_inbuf(edge_conn, 1, NULL) < 0) {
connection_mark_for_close(base_conn); connection_mark_for_close(base_conn);
} }
@ -3225,10 +3218,8 @@ connection_exit_connect(edge_connection_t *edge_conn)
conn->state = EXIT_CONN_STATE_OPEN; conn->state = EXIT_CONN_STATE_OPEN;
if (connection_get_outbuf_len(conn)) { if (connection_get_outbuf_len(conn)) {
/* in case there are any queued data cells, from e.g. optimistic data */ /* in case there are any queued data cells, from e.g. optimistic data */
IF_HAS_NO_BUFFEREVENT(conn)
connection_watch_events(conn, READ_EVENT|WRITE_EVENT); connection_watch_events(conn, READ_EVENT|WRITE_EVENT);
} else { } else {
IF_HAS_NO_BUFFEREVENT(conn)
connection_watch_events(conn, READ_EVENT); connection_watch_events(conn, READ_EVENT);
} }

View File

@ -42,10 +42,6 @@
#include "ext_orport.h" #include "ext_orport.h"
#include "scheduler.h" #include "scheduler.h"
#ifdef USE_BUFFEREVENTS
#include <event2/bufferevent_ssl.h>
#endif
static int connection_tls_finish_handshake(or_connection_t *conn); static int connection_tls_finish_handshake(or_connection_t *conn);
static int connection_or_launch_v3_or_handshake(or_connection_t *conn); static int connection_or_launch_v3_or_handshake(or_connection_t *conn);
static int connection_or_process_cells_from_inbuf(or_connection_t *conn); static int connection_or_process_cells_from_inbuf(or_connection_t *conn);
@ -66,12 +62,6 @@ static void connection_or_mark_bad_for_new_circs(or_connection_t *or_conn);
static void connection_or_change_state(or_connection_t *conn, uint8_t state); static void connection_or_change_state(or_connection_t *conn, uint8_t state);
#ifdef USE_BUFFEREVENTS
static void connection_or_handle_event_cb(struct bufferevent *bufev,
short event, void *arg);
#include <event2/buffer.h>/*XXXX REMOVE */
#endif
/**************************************************************/ /**************************************************************/
/** Map from identity digest of connected OR or desired OR to a connection_t /** Map from identity digest of connected OR or desired OR to a connection_t
@ -565,13 +555,6 @@ connection_or_process_inbuf(or_connection_t *conn)
return ret; return ret;
case OR_CONN_STATE_TLS_SERVER_RENEGOTIATING: case OR_CONN_STATE_TLS_SERVER_RENEGOTIATING:
#ifdef USE_BUFFEREVENTS
if (tor_tls_server_got_renegotiate(conn->tls))
connection_or_tls_renegotiated_cb(conn->tls, conn);
if (conn->base_.marked_for_close)
return 0;
/* fall through. */
#endif
case OR_CONN_STATE_OPEN: case OR_CONN_STATE_OPEN:
case OR_CONN_STATE_OR_HANDSHAKING_V2: case OR_CONN_STATE_OR_HANDSHAKING_V2:
case OR_CONN_STATE_OR_HANDSHAKING_V3: case OR_CONN_STATE_OR_HANDSHAKING_V3:
@ -807,27 +790,6 @@ connection_or_update_token_buckets_helper(or_connection_t *conn, int reset,
conn->bandwidthrate = rate; conn->bandwidthrate = rate;
conn->bandwidthburst = burst; conn->bandwidthburst = burst;
#ifdef USE_BUFFEREVENTS
{
const struct timeval *tick = tor_libevent_get_one_tick_timeout();
struct ev_token_bucket_cfg *cfg, *old_cfg;
int64_t rate64 = (((int64_t)rate) * options->TokenBucketRefillInterval)
/ 1000;
/* This can't overflow, since TokenBucketRefillInterval <= 1000,
* and rate started out less than INT_MAX. */
int rate_per_tick = (int) rate64;
cfg = ev_token_bucket_cfg_new(rate_per_tick, burst, rate_per_tick,
burst, tick);
old_cfg = conn->bucket_cfg;
if (conn->base_.bufev)
tor_set_bufferevent_rate_limit(conn->base_.bufev, cfg);
if (old_cfg)
ev_token_bucket_cfg_free(old_cfg);
conn->bucket_cfg = cfg;
(void) reset; /* No way to do this with libevent yet. */
}
#else
if (reset) { /* set up the token buckets to be full */ if (reset) { /* set up the token buckets to be full */
conn->read_bucket = conn->write_bucket = burst; conn->read_bucket = conn->write_bucket = burst;
return; return;
@ -838,7 +800,6 @@ connection_or_update_token_buckets_helper(or_connection_t *conn, int reset,
conn->read_bucket = burst; conn->read_bucket = burst;
if (conn->write_bucket > burst) if (conn->write_bucket > burst)
conn->write_bucket = burst; conn->write_bucket = burst;
#endif
} }
/** Either our set of relays or our per-conn rate limits have changed. /** Either our set of relays or our per-conn rate limits have changed.
@ -1395,40 +1356,14 @@ connection_tls_start_handshake,(or_connection_t *conn, int receiving))
tor_tls_set_logged_address(conn->tls, // XXX client and relay? tor_tls_set_logged_address(conn->tls, // XXX client and relay?
escaped_safe_str(conn->base_.address)); escaped_safe_str(conn->base_.address));
#ifdef USE_BUFFEREVENTS
if (connection_type_uses_bufferevent(TO_CONN(conn))) {
const int filtering = get_options()->UseFilteringSSLBufferevents;
struct bufferevent *b =
tor_tls_init_bufferevent(conn->tls, conn->base_.bufev, conn->base_.s,
receiving, filtering);
if (!b) {
log_warn(LD_BUG,"tor_tls_init_bufferevent failed. Closing.");
return -1;
}
conn->base_.bufev = b;
if (conn->bucket_cfg)
tor_set_bufferevent_rate_limit(conn->base_.bufev, conn->bucket_cfg);
connection_enable_rate_limiting(TO_CONN(conn));
connection_configure_bufferevent_callbacks(TO_CONN(conn));
bufferevent_setcb(b,
connection_handle_read_cb,
connection_handle_write_cb,
connection_or_handle_event_cb,/* overriding this one*/
TO_CONN(conn));
}
#endif
connection_start_reading(TO_CONN(conn)); connection_start_reading(TO_CONN(conn));
log_debug(LD_HANDSHAKE,"starting TLS handshake on fd "TOR_SOCKET_T_FORMAT, log_debug(LD_HANDSHAKE,"starting TLS handshake on fd "TOR_SOCKET_T_FORMAT,
conn->base_.s); conn->base_.s);
note_crypto_pk_op(receiving ? TLS_HANDSHAKE_S : TLS_HANDSHAKE_C); note_crypto_pk_op(receiving ? TLS_HANDSHAKE_S : TLS_HANDSHAKE_C);
IF_HAS_BUFFEREVENT(TO_CONN(conn), {
/* ???? */;
}) ELSE_IF_NO_BUFFEREVENT {
if (connection_tls_continue_handshake(conn) < 0) if (connection_tls_continue_handshake(conn) < 0)
return -1; return -1;
}
return 0; return 0;
} }
@ -1517,75 +1452,6 @@ connection_tls_continue_handshake(or_connection_t *conn)
return 0; return 0;
} }
#ifdef USE_BUFFEREVENTS
static void
connection_or_handle_event_cb(struct bufferevent *bufev, short event,
void *arg)
{
struct or_connection_t *conn = TO_OR_CONN(arg);
/* XXXX cut-and-paste code; should become a function. */
if (event & BEV_EVENT_CONNECTED) {
if (conn->base_.state == OR_CONN_STATE_TLS_HANDSHAKING) {
if (tor_tls_finish_handshake(conn->tls) < 0) {
log_warn(LD_OR, "Problem finishing handshake");
connection_or_close_for_error(conn, 0);
return;
}
}
if (! tor_tls_used_v1_handshake(conn->tls)) {
if (!tor_tls_is_server(conn->tls)) {
if (conn->base_.state == OR_CONN_STATE_TLS_HANDSHAKING) {
if (connection_or_launch_v3_or_handshake(conn) < 0)
connection_or_close_for_error(conn, 0);
}
} else {
const int handshakes = tor_tls_get_num_server_handshakes(conn->tls);
if (handshakes == 1) {
/* v2 or v3 handshake, as a server. Only got one handshake, so
* wait for the next one. */
tor_tls_set_renegotiate_callback(conn->tls,
connection_or_tls_renegotiated_cb,
conn);
connection_or_change_state(conn,
OR_CONN_STATE_TLS_SERVER_RENEGOTIATING);
} else if (handshakes == 2) {
/* v2 handshake, as a server. Two handshakes happened already,
* so we treat renegotiation as done.
*/
connection_or_tls_renegotiated_cb(conn->tls, conn);
} else if (handshakes > 2) {
log_warn(LD_OR, "More than two handshakes done on connection. "
"Closing.");
connection_or_close_for_error(conn, 0);
} else {
log_warn(LD_BUG, "We were unexpectedly told that a connection "
"got %d handshakes. Closing.", handshakes);
connection_or_close_for_error(conn, 0);
}
return;
}
}
connection_watch_events(TO_CONN(conn), READ_EVENT|WRITE_EVENT);
if (connection_tls_finish_handshake(conn) < 0)
connection_or_close_for_error(conn, 0); /* ???? */
return;
}
if (event & BEV_EVENT_ERROR) {
unsigned long err;
while ((err = bufferevent_get_openssl_error(bufev))) {
tor_tls_log_one_error(conn->tls, err, LOG_WARN, LD_OR,
"handshaking (with bufferevent)");
}
}
connection_handle_event_cb(bufev, event, arg);
}
#endif
/** Return 1 if we initiated this connection, or 0 if it started /** Return 1 if we initiated this connection, or 0 if it started
* out as an incoming connection. * out as an incoming connection.
*/ */
@ -2003,11 +1869,7 @@ connection_or_set_state_open(or_connection_t *conn)
or_handshake_state_free(conn->handshake_state); or_handshake_state_free(conn->handshake_state);
conn->handshake_state = NULL; conn->handshake_state = NULL;
IF_HAS_BUFFEREVENT(TO_CONN(conn), {
connection_watch_events(TO_CONN(conn), READ_EVENT|WRITE_EVENT);
}) ELSE_IF_NO_BUFFEREVENT {
connection_start_reading(TO_CONN(conn)); connection_start_reading(TO_CONN(conn));
}
return 0; return 0;
} }
@ -2067,13 +1929,8 @@ static int
connection_fetch_var_cell_from_buf(or_connection_t *or_conn, var_cell_t **out) connection_fetch_var_cell_from_buf(or_connection_t *or_conn, var_cell_t **out)
{ {
connection_t *conn = TO_CONN(or_conn); connection_t *conn = TO_CONN(or_conn);
IF_HAS_BUFFEREVENT(conn, {
struct evbuffer *input = bufferevent_get_input(conn->bufev);
return fetch_var_cell_from_evbuffer(input, out, or_conn->link_proto);
}) ELSE_IF_NO_BUFFEREVENT {
return fetch_var_cell_from_buf(conn->inbuf, out, or_conn->link_proto); return fetch_var_cell_from_buf(conn->inbuf, out, or_conn->link_proto);
} }
}
/** Process cells from <b>conn</b>'s inbuf. /** Process cells from <b>conn</b>'s inbuf.
* *

View File

@ -4794,20 +4794,15 @@ is_valid_initial_command(control_connection_t *conn, const char *cmd)
* interfaces is broken. */ * interfaces is broken. */
#define MAX_COMMAND_LINE_LENGTH (1024*1024) #define MAX_COMMAND_LINE_LENGTH (1024*1024)
/** Wrapper around peek_(evbuffer|buf)_has_control0 command: presents the same /** Wrapper around peek_buf_has_control0 command: presents the same
* interface as those underlying functions, but takes a connection_t intead of * interface as that underlying functions, but takes a connection_t intead of
* an evbuffer or a buf_t. * a buf_t.
*/ */
static int static int
peek_connection_has_control0_command(connection_t *conn) peek_connection_has_control0_command(connection_t *conn)
{ {
IF_HAS_BUFFEREVENT(conn, {
struct evbuffer *input = bufferevent_get_input(conn->bufev);
return peek_evbuffer_has_control0_command(input);
}) ELSE_IF_NO_BUFFEREVENT {
return peek_buf_has_control0_command(conn->inbuf); return peek_buf_has_control0_command(conn->inbuf);
} }
}
/** Called when data has arrived on a v1 control connection: Try to fetch /** Called when data has arrived on a v1 control connection: Try to fetch
* commands from conn->inbuf, and execute them. * commands from conn->inbuf, and execute them.

View File

@ -1272,10 +1272,6 @@ directory_initiate_command_rend(const tor_addr_port_t *or_addr_port,
if_modified_since); if_modified_since);
connection_watch_events(TO_CONN(conn), READ_EVENT|WRITE_EVENT); connection_watch_events(TO_CONN(conn), READ_EVENT|WRITE_EVENT);
IF_HAS_BUFFEREVENT(ENTRY_TO_CONN(linked_conn), {
connection_watch_events(ENTRY_TO_CONN(linked_conn),
READ_EVENT|WRITE_EVENT);
}) ELSE_IF_NO_BUFFEREVENT
connection_start_reading(ENTRY_TO_CONN(linked_conn)); connection_start_reading(ENTRY_TO_CONN(linked_conn));
} }
} }
@ -3641,16 +3637,8 @@ connection_dir_finished_flushing(dir_connection_t *conn)
return 0; return 0;
case DIR_CONN_STATE_SERVER_WRITING: case DIR_CONN_STATE_SERVER_WRITING:
if (conn->dir_spool_src != DIR_SPOOL_NONE) { if (conn->dir_spool_src != DIR_SPOOL_NONE) {
#ifdef USE_BUFFEREVENTS
/* This can happen with paired bufferevents, since a paired connection
* can flush immediately when you write to it, making the subsequent
* check in connection_handle_write_cb() decide that the connection
* is flushed. */
log_debug(LD_DIRSERV, "Emptied a dirserv buffer, but still spooling.");
#else
log_warn(LD_BUG, "Emptied a dirserv buffer, but it's still spooling!"); log_warn(LD_BUG, "Emptied a dirserv buffer, but it's still spooling!");
connection_mark_for_close(TO_CONN(conn)); connection_mark_for_close(TO_CONN(conn));
#endif
} else { } else {
log_debug(LD_DIRSERV, "Finished writing server response. Closing."); log_debug(LD_DIRSERV, "Finished writing server response. Closing.");
connection_mark_for_close(TO_CONN(conn)); connection_mark_for_close(TO_CONN(conn));

View File

@ -41,13 +41,8 @@ ext_or_cmd_free(ext_or_cmd_t *cmd)
static int static int
connection_fetch_ext_or_cmd_from_buf(connection_t *conn, ext_or_cmd_t **out) connection_fetch_ext_or_cmd_from_buf(connection_t *conn, ext_or_cmd_t **out)
{ {
IF_HAS_BUFFEREVENT(conn, {
struct evbuffer *input = bufferevent_get_input(conn->bufev);
return fetch_ext_or_command_from_evbuffer(input, out);
}) ELSE_IF_NO_BUFFEREVENT {
return fetch_ext_or_command_from_buf(conn->inbuf, out); return fetch_ext_or_command_from_buf(conn->inbuf, out);
} }
}
/** Write an Extended ORPort message to <b>conn</b>. Use /** Write an Extended ORPort message to <b>conn</b>. Use
* <b>command</b> as the command type, <b>bodylen</b> as the body * <b>command</b> as the command type, <b>bodylen</b> as the body

View File

@ -71,10 +71,6 @@
#include <event2/event.h> #include <event2/event.h>
#ifdef USE_BUFFEREVENTS
#include <event2/bufferevent.h>
#endif
#ifdef HAVE_SYSTEMD #ifdef HAVE_SYSTEMD
# if defined(__COVERITY__) && !defined(__INCLUDE_LEVEL__) # if defined(__COVERITY__) && !defined(__INCLUDE_LEVEL__)
/* Systemd's use of gcc's __INCLUDE_LEVEL__ extension macro appears to confuse /* Systemd's use of gcc's __INCLUDE_LEVEL__ extension macro appears to confuse
@ -101,8 +97,6 @@ static int run_main_loop_until_done(void);
static void process_signal(int sig); static void process_signal(int sig);
/********* START VARIABLES **********/ /********* START VARIABLES **********/
#ifndef USE_BUFFEREVENTS
int global_read_bucket; /**< Max number of bytes I can read this second. */ int global_read_bucket; /**< Max number of bytes I can read this second. */
int global_write_bucket; /**< Max number of bytes I can write this second. */ int global_write_bucket; /**< Max number of bytes I can write this second. */
@ -116,7 +110,6 @@ static int stats_prev_global_read_bucket;
/** What was the write bucket before the last second_elapsed_callback() call? /** What was the write bucket before the last second_elapsed_callback() call?
* (used to determine how many bytes we've written). */ * (used to determine how many bytes we've written). */
static int stats_prev_global_write_bucket; static int stats_prev_global_write_bucket;
#endif
/* DOCDOC stats_prev_n_read */ /* DOCDOC stats_prev_n_read */
static uint64_t stats_prev_n_read = 0; static uint64_t stats_prev_n_read = 0;
@ -185,28 +178,6 @@ int quiet_level = 0;
* *
****************************************************************************/ ****************************************************************************/
#if defined(_WIN32) && defined(USE_BUFFEREVENTS)
/** Remove the kernel-space send and receive buffers for <b>s</b>. For use
* with IOCP only. */
static int
set_buffer_lengths_to_zero(tor_socket_t s)
{
int zero = 0;
int r = 0;
if (setsockopt(s, SOL_SOCKET, SO_SNDBUF, (void*)&zero,
(socklen_t)sizeof(zero))) {
log_warn(LD_NET, "Unable to clear SO_SNDBUF");
r = -1;
}
if (setsockopt(s, SOL_SOCKET, SO_RCVBUF, (void*)&zero,
(socklen_t)sizeof(zero))) {
log_warn(LD_NET, "Unable to clear SO_RCVBUF");
r = -1;
}
return r;
}
#endif
/** Return 1 if we have successfully built a circuit, and nothing has changed /** Return 1 if we have successfully built a circuit, and nothing has changed
* to make us think that maybe we can't. * to make us think that maybe we can't.
*/ */
@ -249,66 +220,9 @@ connection_add_impl(connection_t *conn, int is_connecting)
conn->conn_array_index = smartlist_len(connection_array); conn->conn_array_index = smartlist_len(connection_array);
smartlist_add(connection_array, conn); smartlist_add(connection_array, conn);
#ifdef USE_BUFFEREVENTS
if (connection_type_uses_bufferevent(conn)) {
if (SOCKET_OK(conn->s) && !conn->linked) {
#ifdef _WIN32
if (tor_libevent_using_iocp_bufferevents() &&
get_options()->UserspaceIOCPBuffers) {
set_buffer_lengths_to_zero(conn->s);
}
#endif
conn->bufev = bufferevent_socket_new(
tor_libevent_get_base(),
conn->s,
BEV_OPT_DEFER_CALLBACKS);
if (!conn->bufev) {
log_warn(LD_BUG, "Unable to create socket bufferevent");
smartlist_del(connection_array, conn->conn_array_index);
conn->conn_array_index = -1;
return -1;
}
if (is_connecting) {
/* Put the bufferevent into a "connecting" state so that we'll get
* a "connected" event callback on successful write. */
bufferevent_socket_connect(conn->bufev, NULL, 0);
}
connection_configure_bufferevent_callbacks(conn);
} else if (conn->linked && conn->linked_conn &&
connection_type_uses_bufferevent(conn->linked_conn)) {
tor_assert(!(SOCKET_OK(conn->s)));
if (!conn->bufev) {
struct bufferevent *pair[2] = { NULL, NULL };
if (bufferevent_pair_new(tor_libevent_get_base(),
BEV_OPT_DEFER_CALLBACKS,
pair) < 0) {
log_warn(LD_BUG, "Unable to create bufferevent pair");
smartlist_del(connection_array, conn->conn_array_index);
conn->conn_array_index = -1;
return -1;
}
tor_assert(pair[0]);
conn->bufev = pair[0];
conn->linked_conn->bufev = pair[1];
} /* else the other side already was added, and got a bufferevent_pair */
connection_configure_bufferevent_callbacks(conn);
} else {
tor_assert(!conn->linked);
}
if (conn->bufev)
tor_assert(conn->inbuf == NULL);
if (conn->linked_conn && conn->linked_conn->bufev)
tor_assert(conn->linked_conn->inbuf == NULL);
}
#else
(void) is_connecting; (void) is_connecting;
#endif
if (!HAS_BUFFEREVENT(conn) && (SOCKET_OK(conn->s) || conn->linked)) { if (SOCKET_OK(conn->s) || conn->linked) {
conn->read_event = tor_event_new(tor_libevent_get_base(), conn->read_event = tor_event_new(tor_libevent_get_base(),
conn->s, EV_READ|EV_PERSIST, conn_read_callback, conn); conn->s, EV_READ|EV_PERSIST, conn_read_callback, conn);
conn->write_event = tor_event_new(tor_libevent_get_base(), conn->write_event = tor_event_new(tor_libevent_get_base(),
@ -337,12 +251,6 @@ connection_unregister_events(connection_t *conn)
log_warn(LD_BUG, "Error removing write event for %d", (int)conn->s); log_warn(LD_BUG, "Error removing write event for %d", (int)conn->s);
tor_free(conn->write_event); tor_free(conn->write_event);
} }
#ifdef USE_BUFFEREVENTS
if (conn->bufev) {
bufferevent_free(conn->bufev);
conn->bufev = NULL;
}
#endif
if (conn->type == CONN_TYPE_AP_DNS_LISTENER) { if (conn->type == CONN_TYPE_AP_DNS_LISTENER) {
dnsserv_close_listener(conn); dnsserv_close_listener(conn);
} }
@ -502,17 +410,6 @@ get_bytes_written,(void))
void void
connection_watch_events(connection_t *conn, watchable_events_t events) connection_watch_events(connection_t *conn, watchable_events_t events)
{ {
IF_HAS_BUFFEREVENT(conn, {
short ev = ((short)events) & (EV_READ|EV_WRITE);
short old_ev = bufferevent_get_enabled(conn->bufev);
if ((ev & ~old_ev) != 0) {
bufferevent_enable(conn->bufev, ev);
}
if ((old_ev & ~ev) != 0) {
bufferevent_disable(conn->bufev, old_ev & ~ev);
}
return;
});
if (events & READ_EVENT) if (events & READ_EVENT)
connection_start_reading(conn); connection_start_reading(conn);
else else
@ -530,9 +427,6 @@ connection_is_reading(connection_t *conn)
{ {
tor_assert(conn); tor_assert(conn);
IF_HAS_BUFFEREVENT(conn,
return (bufferevent_get_enabled(conn->bufev) & EV_READ) != 0;
);
return conn->reading_from_linked_conn || return conn->reading_from_linked_conn ||
(conn->read_event && event_pending(conn->read_event, EV_READ, NULL)); (conn->read_event && event_pending(conn->read_event, EV_READ, NULL));
} }
@ -583,11 +477,6 @@ connection_stop_reading,(connection_t *conn))
{ {
tor_assert(conn); tor_assert(conn);
IF_HAS_BUFFEREVENT(conn, {
bufferevent_disable(conn->bufev, EV_READ);
return;
});
if (connection_check_event(conn, conn->read_event) < 0) { if (connection_check_event(conn, conn->read_event) < 0) {
return; return;
} }
@ -610,11 +499,6 @@ connection_start_reading,(connection_t *conn))
{ {
tor_assert(conn); tor_assert(conn);
IF_HAS_BUFFEREVENT(conn, {
bufferevent_enable(conn->bufev, EV_READ);
return;
});
if (connection_check_event(conn, conn->read_event) < 0) { if (connection_check_event(conn, conn->read_event) < 0) {
return; return;
} }
@ -638,10 +522,6 @@ connection_is_writing(connection_t *conn)
{ {
tor_assert(conn); tor_assert(conn);
IF_HAS_BUFFEREVENT(conn,
return (bufferevent_get_enabled(conn->bufev) & EV_WRITE) != 0;
);
return conn->writing_to_linked_conn || return conn->writing_to_linked_conn ||
(conn->write_event && event_pending(conn->write_event, EV_WRITE, NULL)); (conn->write_event && event_pending(conn->write_event, EV_WRITE, NULL));
} }
@ -652,11 +532,6 @@ connection_stop_writing,(connection_t *conn))
{ {
tor_assert(conn); tor_assert(conn);
IF_HAS_BUFFEREVENT(conn, {
bufferevent_disable(conn->bufev, EV_WRITE);
return;
});
if (connection_check_event(conn, conn->write_event) < 0) { if (connection_check_event(conn, conn->write_event) < 0) {
return; return;
} }
@ -680,11 +555,6 @@ connection_start_writing,(connection_t *conn))
{ {
tor_assert(conn); tor_assert(conn);
IF_HAS_BUFFEREVENT(conn, {
bufferevent_enable(conn->bufev, EV_WRITE);
return;
});
if (connection_check_event(conn, conn->write_event) < 0) { if (connection_check_event(conn, conn->write_event) < 0) {
return; return;
} }
@ -873,21 +743,6 @@ conn_close_if_marked(int i)
assert_connection_ok(conn, now); assert_connection_ok(conn, now);
/* assert_all_pending_dns_resolves_ok(); */ /* assert_all_pending_dns_resolves_ok(); */
#ifdef USE_BUFFEREVENTS
if (conn->bufev) {
if (conn->hold_open_until_flushed &&
evbuffer_get_length(bufferevent_get_output(conn->bufev))) {
/* don't close yet. */
return 0;
}
if (conn->linked_conn && ! conn->linked_conn->marked_for_close) {
/* We need to do this explicitly so that the linked connection
* notices that there was an EOF. */
bufferevent_flush(conn->bufev, EV_WRITE, BEV_FINISHED);
}
}
#endif
log_debug(LD_NET,"Cleaning up connection (fd "TOR_SOCKET_T_FORMAT").", log_debug(LD_NET,"Cleaning up connection (fd "TOR_SOCKET_T_FORMAT").",
conn->s); conn->s);
@ -897,7 +752,6 @@ conn_close_if_marked(int i)
if (conn->proxy_state == PROXY_INFANT) if (conn->proxy_state == PROXY_INFANT)
log_failed_proxy_connection(conn); log_failed_proxy_connection(conn);
IF_HAS_BUFFEREVENT(conn, goto unlink);
if ((SOCKET_OK(conn->s) || conn->linked_conn) && if ((SOCKET_OK(conn->s) || conn->linked_conn) &&
connection_wants_to_flush(conn)) { connection_wants_to_flush(conn)) {
/* s == -1 means it's an incomplete edge connection, or that the socket /* s == -1 means it's an incomplete edge connection, or that the socket
@ -982,9 +836,6 @@ conn_close_if_marked(int i)
} }
} }
#ifdef USE_BUFFEREVENTS
unlink:
#endif
connection_unlink(conn); /* unlink, remove, free */ connection_unlink(conn); /* unlink, remove, free */
return 1; return 1;
} }
@ -1130,11 +981,7 @@ run_connection_housekeeping(int i, time_t now)
the connection or send a keepalive, depending. */ the connection or send a keepalive, depending. */
or_conn = TO_OR_CONN(conn); or_conn = TO_OR_CONN(conn);
#ifdef USE_BUFFEREVENTS
tor_assert(conn->bufev);
#else
tor_assert(conn->outbuf); tor_assert(conn->outbuf);
#endif
chan = TLS_CHAN_TO_BASE(or_conn->chan); chan = TLS_CHAN_TO_BASE(or_conn->chan);
tor_assert(chan); tor_assert(chan);
@ -2037,25 +1884,10 @@ second_elapsed_callback(periodic_timer_t *timer, void *arg)
/* the second has rolled over. check more stuff. */ /* the second has rolled over. check more stuff. */
seconds_elapsed = current_second ? (int)(now - current_second) : 0; seconds_elapsed = current_second ? (int)(now - current_second) : 0;
#ifdef USE_BUFFEREVENTS
{
uint64_t cur_read,cur_written;
connection_get_rate_limit_totals(&cur_read, &cur_written);
bytes_written = (size_t)(cur_written - stats_prev_n_written);
bytes_read = (size_t)(cur_read - stats_prev_n_read);
stats_n_bytes_read += bytes_read;
stats_n_bytes_written += bytes_written;
if (accounting_is_enabled(options) && seconds_elapsed >= 0)
accounting_add_bytes(bytes_read, bytes_written, seconds_elapsed);
stats_prev_n_written = cur_written;
stats_prev_n_read = cur_read;
}
#else
bytes_read = (size_t)(stats_n_bytes_read - stats_prev_n_read); bytes_read = (size_t)(stats_n_bytes_read - stats_prev_n_read);
bytes_written = (size_t)(stats_n_bytes_written - stats_prev_n_written); bytes_written = (size_t)(stats_n_bytes_written - stats_prev_n_written);
stats_prev_n_read = stats_n_bytes_read; stats_prev_n_read = stats_n_bytes_read;
stats_prev_n_written = stats_n_bytes_written; stats_prev_n_written = stats_n_bytes_written;
#endif
control_event_bandwidth_used((uint32_t)bytes_read,(uint32_t)bytes_written); control_event_bandwidth_used((uint32_t)bytes_read,(uint32_t)bytes_written);
control_event_stream_bandwidth_used(); control_event_stream_bandwidth_used();
@ -2127,12 +1959,11 @@ systemd_watchdog_callback(periodic_timer_t *timer, void *arg)
} }
#endif #endif
#ifndef USE_BUFFEREVENTS
/** Timer: used to invoke refill_callback(). */ /** Timer: used to invoke refill_callback(). */
static periodic_timer_t *refill_timer = NULL; static periodic_timer_t *refill_timer = NULL;
/** Libevent callback: invoked periodically to refill token buckets /** Libevent callback: invoked periodically to refill token buckets
* and count r/w bytes. It is only used when bufferevents are disabled. */ * and count r/w bytes. */
static void static void
refill_callback(periodic_timer_t *timer, void *arg) refill_callback(periodic_timer_t *timer, void *arg)
{ {
@ -2176,7 +2007,6 @@ refill_callback(periodic_timer_t *timer, void *arg)
current_millisecond = now; /* remember what time it is, for next time */ current_millisecond = now; /* remember what time it is, for next time */
} }
#endif
#ifndef _WIN32 #ifndef _WIN32
/** Called when a possibly ignorable libevent error occurs; ensures that we /** Called when a possibly ignorable libevent error occurs; ensures that we
@ -2352,13 +2182,6 @@ do_main_loop(void)
} }
} }
#ifdef USE_BUFFEREVENTS
log_warn(LD_GENERAL, "Tor was compiled with the --enable-bufferevents "
"option. This is still experimental, and might cause strange "
"bugs. If you want a more stable Tor, be sure to build without "
"--enable-bufferevents.");
#endif
handle_signals(1); handle_signals(1);
/* load the private keys, if we're supposed to have them, and set up the /* load the private keys, if we're supposed to have them, and set up the
@ -2372,10 +2195,8 @@ do_main_loop(void)
/* Set up our buckets */ /* Set up our buckets */
connection_bucket_init(); connection_bucket_init();
#ifndef USE_BUFFEREVENTS
stats_prev_global_read_bucket = global_read_bucket; stats_prev_global_read_bucket = global_read_bucket;
stats_prev_global_write_bucket = global_write_bucket; stats_prev_global_write_bucket = global_write_bucket;
#endif
/* initialize the bootstrap status events to know we're starting up */ /* initialize the bootstrap status events to know we're starting up */
control_event_bootstrap(BOOTSTRAP_STATUS_STARTING, 0); control_event_bootstrap(BOOTSTRAP_STATUS_STARTING, 0);
@ -2472,7 +2293,6 @@ do_main_loop(void)
} }
#endif #endif
#ifndef USE_BUFFEREVENTS
if (!refill_timer) { if (!refill_timer) {
struct timeval refill_interval; struct timeval refill_interval;
int msecs = get_options()->TokenBucketRefillInterval; int msecs = get_options()->TokenBucketRefillInterval;
@ -2486,7 +2306,6 @@ do_main_loop(void)
NULL); NULL);
tor_assert(refill_timer); tor_assert(refill_timer);
} }
#endif
#ifdef HAVE_SYSTEMD #ifdef HAVE_SYSTEMD
{ {
@ -2980,14 +2799,9 @@ tor_init(int argc, char *argv[])
{ {
const char *version = get_version(); const char *version = get_version();
const char *bev_str =
#ifdef USE_BUFFEREVENTS log_notice(LD_GENERAL, "Tor v%s running on %s with Libevent %s, "
"(with bufferevents) "; "OpenSSL %s and Zlib %s.", version,
#else
"";
#endif
log_notice(LD_GENERAL, "Tor v%s %srunning on %s with Libevent %s, "
"OpenSSL %s and Zlib %s.", version, bev_str,
get_uname(), get_uname(),
tor_libevent_get_version_str(), tor_libevent_get_version_str(),
crypto_openssl_get_version_str(), crypto_openssl_get_version_str(),
@ -3163,9 +2977,7 @@ tor_free_all(int postfork)
smartlist_free(active_linked_connection_lst); smartlist_free(active_linked_connection_lst);
periodic_timer_free(second_timer); periodic_timer_free(second_timer);
teardown_periodic_events(); teardown_periodic_events();
#ifndef USE_BUFFEREVENTS
periodic_timer_free(refill_timer); periodic_timer_free(refill_timer);
#endif
if (!postfork) { if (!postfork) {
release_lockfile(); release_lockfile();

View File

@ -66,12 +66,6 @@
#include <windows.h> #include <windows.h>
#endif #endif
#ifdef USE_BUFFEREVENTS
#include <event2/bufferevent.h>
#include <event2/buffer.h>
#include <event2/util.h>
#endif
#include "crypto.h" #include "crypto.h"
#include "crypto_format.h" #include "crypto_format.h"
#include "tortls.h" #include "tortls.h"
@ -1137,11 +1131,8 @@ typedef struct {
typedef struct buf_t buf_t; typedef struct buf_t buf_t;
typedef struct socks_request_t socks_request_t; typedef struct socks_request_t socks_request_t;
#ifdef USE_BUFFEREVENTS
#define generic_buffer_t struct evbuffer #define buf_t buf_t
#else
#define generic_buffer_t buf_t
#endif
typedef struct entry_port_cfg_t { typedef struct entry_port_cfg_t {
/* Client port types (socks, dns, trans, natd) only: */ /* Client port types (socks, dns, trans, natd) only: */
@ -1280,10 +1271,6 @@ typedef struct connection_t {
time_t timestamp_lastwritten; /**< When was the last time libevent said we time_t timestamp_lastwritten; /**< When was the last time libevent said we
* could write? */ * could write? */
#ifdef USE_BUFFEREVENTS
struct bufferevent *bufev; /**< A Libevent buffered IO structure. */
#endif
time_t timestamp_created; /**< When was this connection_t created? */ time_t timestamp_created; /**< When was this connection_t created? */
int socket_family; /**< Address family of this connection's socket. Usually int socket_family; /**< Address family of this connection's socket. Usually
@ -1523,17 +1510,10 @@ typedef struct or_connection_t {
/* bandwidth* and *_bucket only used by ORs in OPEN state: */ /* bandwidth* and *_bucket only used by ORs in OPEN state: */
int bandwidthrate; /**< Bytes/s added to the bucket. (OPEN ORs only.) */ int bandwidthrate; /**< Bytes/s added to the bucket. (OPEN ORs only.) */
int bandwidthburst; /**< Max bucket size for this conn. (OPEN ORs only.) */ int bandwidthburst; /**< Max bucket size for this conn. (OPEN ORs only.) */
#ifndef USE_BUFFEREVENTS
int read_bucket; /**< When this hits 0, stop receiving. Every second we int read_bucket; /**< When this hits 0, stop receiving. Every second we
* add 'bandwidthrate' to this, capping it at * add 'bandwidthrate' to this, capping it at
* bandwidthburst. (OPEN ORs only) */ * bandwidthburst. (OPEN ORs only) */
int write_bucket; /**< When this hits 0, stop writing. Like read_bucket. */ int write_bucket; /**< When this hits 0, stop writing. Like read_bucket. */
#else
/** A rate-limiting configuration object to determine how this connection
* set its read- and write- limits. */
/* XXXX we could share this among all connections. */
struct ev_token_bucket_cfg *bucket_cfg;
#endif
struct or_connection_t *next_with_same_id; /**< Next connection with same struct or_connection_t *next_with_same_id; /**< Next connection with same
* identity digest as this one. */ * identity digest as this one. */
@ -1639,11 +1619,11 @@ typedef struct entry_connection_t {
/** For AP connections only: buffer for data that we have sent /** For AP connections only: buffer for data that we have sent
* optimistically, which we might need to re-send if we have to * optimistically, which we might need to re-send if we have to
* retry this connection. */ * retry this connection. */
generic_buffer_t *pending_optimistic_data; buf_t *pending_optimistic_data;
/* For AP connections only: buffer for data that we previously sent /* For AP connections only: buffer for data that we previously sent
* optimistically which we are currently re-sending as we retry this * optimistically which we are currently re-sending as we retry this
* connection. */ * connection. */
generic_buffer_t *sending_optimistic_data; buf_t *sending_optimistic_data;
/** If this is a DNSPort connection, this field holds the pending DNS /** If this is a DNSPort connection, this field holds the pending DNS
* request that we're going to try to answer. */ * request that we're going to try to answer. */
@ -1846,51 +1826,6 @@ static inline listener_connection_t *TO_LISTENER_CONN(connection_t *c)
return DOWNCAST(listener_connection_t, c); return DOWNCAST(listener_connection_t, c);
} }
/* Conditional macros to help write code that works whether bufferevents are
disabled or not.
We can't just write:
if (conn->bufev) {
do bufferevent stuff;
} else {
do other stuff;
}
because the bufferevent stuff won't even compile unless we have a fairly
new version of Libevent. Instead, we say:
IF_HAS_BUFFEREVENT(conn, { do_bufferevent_stuff } );
or:
IF_HAS_BUFFEREVENT(conn, {
do bufferevent stuff;
}) ELSE_IF_NO_BUFFEREVENT {
do non-bufferevent stuff;
}
If we're compiling with bufferevent support, then the macros expand more or
less to:
if (conn->bufev) {
do_bufferevent_stuff;
} else {
do non-bufferevent stuff;
}
and if we aren't using bufferevents, they expand more or less to:
{ do non-bufferevent stuff; }
*/
#ifdef USE_BUFFEREVENTS
#define HAS_BUFFEREVENT(c) (((c)->bufev) != NULL)
#define IF_HAS_BUFFEREVENT(c, stmt) \
if ((c)->bufev) do { \
stmt ; \
} while (0)
#define ELSE_IF_NO_BUFFEREVENT ; else
#define IF_HAS_NO_BUFFEREVENT(c) \
if (NULL == (c)->bufev)
#else
#define HAS_BUFFEREVENT(c) (0)
#define IF_HAS_BUFFEREVENT(c, stmt) (void)0
#define ELSE_IF_NO_BUFFEREVENT ;
#define IF_HAS_NO_BUFFEREVENT(c) \
if (1)
#endif
/** What action type does an address policy indicate: accept or reject? */ /** What action type does an address policy indicate: accept or reject? */
typedef enum { typedef enum {
ADDR_POLICY_ACCEPT=1, ADDR_POLICY_ACCEPT=1,
@ -4357,12 +4292,6 @@ typedef struct {
*/ */
double CircuitPriorityHalflife; double CircuitPriorityHalflife;
/** If true, do not enable IOCP on windows with bufferevents, even if
* we think we could. */
int DisableIOCP;
/** For testing only: will go away eventually. */
int UseFilteringSSLBufferevents;
/** Set to true if the TestingTorNetwork configuration option is set. /** Set to true if the TestingTorNetwork configuration option is set.
* This is used so that options_validate() has a chance to realize that * This is used so that options_validate() has a chance to realize that
* the defaults have changed. */ * the defaults have changed. */
@ -4386,11 +4315,6 @@ typedef struct {
* never use it. If -1, we do what the consensus says. */ * never use it. If -1, we do what the consensus says. */
int OptimisticData; int OptimisticData;
/** If 1, and we are using IOCP, we set the kernel socket SNDBUF and RCVBUF
* to 0 to try to save kernel memory and avoid the dread "Out of buffers"
* issue. */
int UserspaceIOCPBuffers;
/** If 1, we accept and launch no external network connections, except on /** If 1, we accept and launch no external network connections, except on
* control ports. */ * control ports. */
int DisableNetwork; int DisableNetwork;

View File

@ -1374,7 +1374,7 @@ connection_edge_process_relay_cell_not_open(
/* This is definitely a success, so forget about any pending data we /* This is definitely a success, so forget about any pending data we
* had sent. */ * had sent. */
if (entry_conn->pending_optimistic_data) { if (entry_conn->pending_optimistic_data) {
generic_buffer_free(entry_conn->pending_optimistic_data); buf_free(entry_conn->pending_optimistic_data);
entry_conn->pending_optimistic_data = NULL; entry_conn->pending_optimistic_data = NULL;
} }
@ -1876,7 +1876,7 @@ connection_edge_package_raw_inbuf(edge_connection_t *conn, int package_partial,
entry_conn->sending_optimistic_data != NULL; entry_conn->sending_optimistic_data != NULL;
if (PREDICT_UNLIKELY(sending_from_optimistic)) { if (PREDICT_UNLIKELY(sending_from_optimistic)) {
bytes_to_process = generic_buffer_len(entry_conn->sending_optimistic_data); bytes_to_process = buf_datalen(entry_conn->sending_optimistic_data);
if (PREDICT_UNLIKELY(!bytes_to_process)) { if (PREDICT_UNLIKELY(!bytes_to_process)) {
log_warn(LD_BUG, "sending_optimistic_data was non-NULL but empty"); log_warn(LD_BUG, "sending_optimistic_data was non-NULL but empty");
bytes_to_process = connection_get_inbuf_len(TO_CONN(conn)); bytes_to_process = connection_get_inbuf_len(TO_CONN(conn));
@ -1904,9 +1904,9 @@ connection_edge_package_raw_inbuf(edge_connection_t *conn, int package_partial,
/* XXXX We could be more efficient here by sometimes packing /* XXXX We could be more efficient here by sometimes packing
* previously-sent optimistic data in the same cell with data * previously-sent optimistic data in the same cell with data
* from the inbuf. */ * from the inbuf. */
generic_buffer_get(entry_conn->sending_optimistic_data, payload, length); fetch_from_buf(payload, length, entry_conn->sending_optimistic_data);
if (!generic_buffer_len(entry_conn->sending_optimistic_data)) { if (!buf_datalen(entry_conn->sending_optimistic_data)) {
generic_buffer_free(entry_conn->sending_optimistic_data); buf_free(entry_conn->sending_optimistic_data);
entry_conn->sending_optimistic_data = NULL; entry_conn->sending_optimistic_data = NULL;
} }
} else { } else {
@ -1921,8 +1921,8 @@ connection_edge_package_raw_inbuf(edge_connection_t *conn, int package_partial,
/* This is new optimistic data; remember it in case we need to detach and /* This is new optimistic data; remember it in case we need to detach and
retry */ retry */
if (!entry_conn->pending_optimistic_data) if (!entry_conn->pending_optimistic_data)
entry_conn->pending_optimistic_data = generic_buffer_new(); entry_conn->pending_optimistic_data = buf_new();
generic_buffer_add(entry_conn->pending_optimistic_data, payload, length); write_to_buf(payload, length, entry_conn->pending_optimistic_data);
} }
if (connection_edge_send_command(conn, RELAY_COMMAND_DATA, if (connection_edge_send_command(conn, RELAY_COMMAND_DATA,
@ -2523,7 +2523,7 @@ set_streams_blocked_on_circ(circuit_t *circ, channel_t *chan,
edge->edge_blocked_on_circ = block; edge->edge_blocked_on_circ = block;
} }
if (!conn->read_event && !HAS_BUFFEREVENT(conn)) { if (!conn->read_event) {
/* This connection is a placeholder for something; probably a DNS /* This connection is a placeholder for something; probably a DNS
* request. It can't actually stop or start reading.*/ * request. It can't actually stop or start reading.*/
continue; continue;

View File

@ -303,42 +303,42 @@ test_buffer_pullup(void *arg)
static void static void
test_buffer_copy(void *arg) test_buffer_copy(void *arg)
{ {
generic_buffer_t *buf=NULL, *buf2=NULL; buf_t *buf=NULL, *buf2=NULL;
const char *s; const char *s;
size_t len; size_t len;
char b[256]; char b[256];
int i; int i;
(void)arg; (void)arg;
buf = generic_buffer_new(); buf = buf_new();
tt_assert(buf); tt_assert(buf);
/* Copy an empty buffer. */ /* Copy an empty buffer. */
tt_int_op(0, OP_EQ, generic_buffer_set_to_copy(&buf2, buf)); tt_int_op(0, OP_EQ, buf_set_to_copy(&buf2, buf));
tt_assert(buf2); tt_assert(buf2);
tt_int_op(0, OP_EQ, generic_buffer_len(buf2)); tt_int_op(0, OP_EQ, buf_datalen(buf2));
/* Now try with a short buffer. */ /* Now try with a short buffer. */
s = "And now comes an act of enormous enormance!"; s = "And now comes an act of enormous enormance!";
len = strlen(s); len = strlen(s);
generic_buffer_add(buf, s, len); write_to_buf(s, len, buf);
tt_int_op(len, OP_EQ, generic_buffer_len(buf)); tt_int_op(len, OP_EQ, buf_datalen(buf));
/* Add junk to buf2 so we can test replacing.*/ /* Add junk to buf2 so we can test replacing.*/
generic_buffer_add(buf2, "BLARG", 5); write_to_buf("BLARG", 5, buf2);
tt_int_op(0, OP_EQ, generic_buffer_set_to_copy(&buf2, buf)); tt_int_op(0, OP_EQ, buf_set_to_copy(&buf2, buf));
tt_int_op(len, OP_EQ, generic_buffer_len(buf2)); tt_int_op(len, OP_EQ, buf_datalen(buf2));
generic_buffer_get(buf2, b, len); fetch_from_buf(b, len, buf2);
tt_mem_op(b, OP_EQ, s, len); tt_mem_op(b, OP_EQ, s, len);
/* Now free buf2 and retry so we can test allocating */ /* Now free buf2 and retry so we can test allocating */
generic_buffer_free(buf2); buf_free(buf2);
buf2 = NULL; buf2 = NULL;
tt_int_op(0, OP_EQ, generic_buffer_set_to_copy(&buf2, buf)); tt_int_op(0, OP_EQ, buf_set_to_copy(&buf2, buf));
tt_int_op(len, OP_EQ, generic_buffer_len(buf2)); tt_int_op(len, OP_EQ, buf_datalen(buf2));
generic_buffer_get(buf2, b, len); fetch_from_buf(b, len, buf2);
tt_mem_op(b, OP_EQ, s, len); tt_mem_op(b, OP_EQ, s, len);
/* Clear buf for next test */ /* Clear buf for next test */
generic_buffer_get(buf, b, len); fetch_from_buf(b, len, buf);
tt_int_op(generic_buffer_len(buf),OP_EQ,0); tt_int_op(buf_datalen(buf),OP_EQ,0);
/* Okay, now let's try a bigger buffer. */ /* Okay, now let's try a bigger buffer. */
s = "Quis autem vel eum iure reprehenderit qui in ea voluptate velit " s = "Quis autem vel eum iure reprehenderit qui in ea voluptate velit "
@ -347,95 +347,94 @@ test_buffer_copy(void *arg)
len = strlen(s); len = strlen(s);
for (i = 0; i < 256; ++i) { for (i = 0; i < 256; ++i) {
b[0]=i; b[0]=i;
generic_buffer_add(buf, b, 1); write_to_buf(b, 1, buf);
generic_buffer_add(buf, s, len); write_to_buf(s, len, buf);
} }
tt_int_op(0, OP_EQ, generic_buffer_set_to_copy(&buf2, buf)); tt_int_op(0, OP_EQ, buf_set_to_copy(&buf2, buf));
tt_int_op(generic_buffer_len(buf2), OP_EQ, generic_buffer_len(buf)); tt_int_op(buf_datalen(buf2), OP_EQ, buf_datalen(buf));
for (i = 0; i < 256; ++i) { for (i = 0; i < 256; ++i) {
generic_buffer_get(buf2, b, len+1); fetch_from_buf(b, len+1, buf2);
tt_int_op((unsigned char)b[0],OP_EQ,i); tt_int_op((unsigned char)b[0],OP_EQ,i);
tt_mem_op(b+1, OP_EQ, s, len); tt_mem_op(b+1, OP_EQ, s, len);
} }
done: done:
if (buf) if (buf)
generic_buffer_free(buf); buf_free(buf);
if (buf2) if (buf2)
generic_buffer_free(buf2); buf_free(buf2);
} }
static void static void
test_buffer_ext_or_cmd(void *arg) test_buffer_ext_or_cmd(void *arg)
{ {
ext_or_cmd_t *cmd = NULL; ext_or_cmd_t *cmd = NULL;
generic_buffer_t *buf = generic_buffer_new(); buf_t *buf = buf_new();
char *tmp = NULL; char *tmp = NULL;
(void) arg; (void) arg;
/* Empty -- should give "not there. */ /* Empty -- should give "not there. */
tt_int_op(0, OP_EQ, generic_buffer_fetch_ext_or_cmd(buf, &cmd)); tt_int_op(0, OP_EQ, fetch_ext_or_command_from_buf(buf, &cmd));
tt_ptr_op(NULL, OP_EQ, cmd); tt_ptr_op(NULL, OP_EQ, cmd);
/* Three bytes: shouldn't work. */ /* Three bytes: shouldn't work. */
generic_buffer_add(buf, "\x00\x20\x00", 3); write_to_buf("\x00\x20\x00", 3, buf);
tt_int_op(0, OP_EQ, generic_buffer_fetch_ext_or_cmd(buf, &cmd)); tt_int_op(0, OP_EQ, fetch_ext_or_command_from_buf(buf, &cmd));
tt_ptr_op(NULL, OP_EQ, cmd); tt_ptr_op(NULL, OP_EQ, cmd);
tt_int_op(3, OP_EQ, generic_buffer_len(buf)); tt_int_op(3, OP_EQ, buf_datalen(buf));
/* 0020 0000: That's a nil command. It should work. */ /* 0020 0000: That's a nil command. It should work. */
generic_buffer_add(buf, "\x00", 1); write_to_buf("\x00", 1, buf);
tt_int_op(1, OP_EQ, generic_buffer_fetch_ext_or_cmd(buf, &cmd)); tt_int_op(1, OP_EQ, fetch_ext_or_command_from_buf(buf, &cmd));
tt_ptr_op(NULL, OP_NE, cmd); tt_ptr_op(NULL, OP_NE, cmd);
tt_int_op(0x20, OP_EQ, cmd->cmd); tt_int_op(0x20, OP_EQ, cmd->cmd);
tt_int_op(0, OP_EQ, cmd->len); tt_int_op(0, OP_EQ, cmd->len);
tt_int_op(0, OP_EQ, generic_buffer_len(buf)); tt_int_op(0, OP_EQ, buf_datalen(buf));
ext_or_cmd_free(cmd); ext_or_cmd_free(cmd);
cmd = NULL; cmd = NULL;
/* Now try a length-6 command with one byte missing. */ /* Now try a length-6 command with one byte missing. */
generic_buffer_add(buf, "\x10\x21\x00\x06""abcde", 9); write_to_buf("\x10\x21\x00\x06""abcde", 9, buf);
tt_int_op(0, OP_EQ, generic_buffer_fetch_ext_or_cmd(buf, &cmd)); tt_int_op(0, OP_EQ, fetch_ext_or_command_from_buf(buf, &cmd));
tt_ptr_op(NULL, OP_EQ, cmd); tt_ptr_op(NULL, OP_EQ, cmd);
generic_buffer_add(buf, "f", 1); write_to_buf("f", 1, buf);
tt_int_op(1, OP_EQ, generic_buffer_fetch_ext_or_cmd(buf, &cmd)); tt_int_op(1, OP_EQ, fetch_ext_or_command_from_buf(buf, &cmd));
tt_ptr_op(NULL, OP_NE, cmd); tt_ptr_op(NULL, OP_NE, cmd);
tt_int_op(0x1021, OP_EQ, cmd->cmd); tt_int_op(0x1021, OP_EQ, cmd->cmd);
tt_int_op(6, OP_EQ, cmd->len); tt_int_op(6, OP_EQ, cmd->len);
tt_mem_op("abcdef", OP_EQ, cmd->body, 6); tt_mem_op("abcdef", OP_EQ, cmd->body, 6);
tt_int_op(0, OP_EQ, generic_buffer_len(buf)); tt_int_op(0, OP_EQ, buf_datalen(buf));
ext_or_cmd_free(cmd); ext_or_cmd_free(cmd);
cmd = NULL; cmd = NULL;
/* Now try a length-10 command with 4 extra bytes. */ /* Now try a length-10 command with 4 extra bytes. */
generic_buffer_add(buf, "\xff\xff\x00\x0a" write_to_buf("\xff\xff\x00\x0aloremipsum\x10\x00\xff\xff", 18, buf);
"loremipsum\x10\x00\xff\xff", 18); tt_int_op(1, OP_EQ, fetch_ext_or_command_from_buf(buf, &cmd));
tt_int_op(1, OP_EQ, generic_buffer_fetch_ext_or_cmd(buf, &cmd));
tt_ptr_op(NULL, OP_NE, cmd); tt_ptr_op(NULL, OP_NE, cmd);
tt_int_op(0xffff, OP_EQ, cmd->cmd); tt_int_op(0xffff, OP_EQ, cmd->cmd);
tt_int_op(10, OP_EQ, cmd->len); tt_int_op(10, OP_EQ, cmd->len);
tt_mem_op("loremipsum", OP_EQ, cmd->body, 10); tt_mem_op("loremipsum", OP_EQ, cmd->body, 10);
tt_int_op(4, OP_EQ, generic_buffer_len(buf)); tt_int_op(4, OP_EQ, buf_datalen(buf));
ext_or_cmd_free(cmd); ext_or_cmd_free(cmd);
cmd = NULL; cmd = NULL;
/* Finally, let's try a maximum-length command. We already have the header /* Finally, let's try a maximum-length command. We already have the header
* waiting. */ * waiting. */
tt_int_op(0, OP_EQ, generic_buffer_fetch_ext_or_cmd(buf, &cmd)); tt_int_op(0, OP_EQ, fetch_ext_or_command_from_buf(buf, &cmd));
tmp = tor_malloc_zero(65535); tmp = tor_malloc_zero(65535);
generic_buffer_add(buf, tmp, 65535); write_to_buf(tmp, 65535, buf);
tt_int_op(1, OP_EQ, generic_buffer_fetch_ext_or_cmd(buf, &cmd)); tt_int_op(1, OP_EQ, fetch_ext_or_command_from_buf(buf, &cmd));
tt_ptr_op(NULL, OP_NE, cmd); tt_ptr_op(NULL, OP_NE, cmd);
tt_int_op(0x1000, OP_EQ, cmd->cmd); tt_int_op(0x1000, OP_EQ, cmd->cmd);
tt_int_op(0xffff, OP_EQ, cmd->len); tt_int_op(0xffff, OP_EQ, cmd->len);
tt_mem_op(tmp, OP_EQ, cmd->body, 65535); tt_mem_op(tmp, OP_EQ, cmd->body, 65535);
tt_int_op(0, OP_EQ, generic_buffer_len(buf)); tt_int_op(0, OP_EQ, buf_datalen(buf));
ext_or_cmd_free(cmd); ext_or_cmd_free(cmd);
cmd = NULL; cmd = NULL;
done: done:
ext_or_cmd_free(cmd); ext_or_cmd_free(cmd);
generic_buffer_free(buf); buf_free(buf);
tor_free(tmp); tor_free(tmp);
} }

View File

@ -124,8 +124,7 @@ test_channeltls_num_bytes_queued(void *arg)
* Next, we have to test ch->num_bytes_queued, which is * Next, we have to test ch->num_bytes_queued, which is
* channel_tls_num_bytes_queued_method. We can't mock * channel_tls_num_bytes_queued_method. We can't mock
* connection_get_outbuf_len() directly because it's static inline * connection_get_outbuf_len() directly because it's static inline
* in connection.h, but we can mock buf_datalen(). Note that * in connection.h, but we can mock buf_datalen().
* if bufferevents ever work, this will break with them enabled.
*/ */
tt_assert(ch->num_bytes_queued != NULL); tt_assert(ch->num_bytes_queued != NULL);

View File

@ -11,9 +11,6 @@
#include <event2/event.h> #include <event2/event.h>
#include <event2/thread.h> #include <event2/thread.h>
#ifdef USE_BUFFEREVENTS
#include <event2/bufferevent.h>
#endif
#include "log_test_helpers.h" #include "log_test_helpers.h"

View File

@ -77,14 +77,14 @@ dummy_origin_circuit_new(int n_cells)
} }
static void static void
add_bytes_to_buf(generic_buffer_t *buf, size_t n_bytes) add_bytes_to_buf(buf_t *buf, size_t n_bytes)
{ {
char b[3000]; char b[3000];
while (n_bytes) { while (n_bytes) {
size_t this_add = n_bytes > sizeof(b) ? sizeof(b) : n_bytes; size_t this_add = n_bytes > sizeof(b) ? sizeof(b) : n_bytes;
crypto_rand(b, this_add); crypto_rand(b, this_add);
generic_buffer_add(buf, b, this_add); write_to_buf(b, this_add, buf);
n_bytes -= this_add; n_bytes -= this_add;
} }
} }
@ -94,20 +94,15 @@ dummy_edge_conn_new(circuit_t *circ,
int type, size_t in_bytes, size_t out_bytes) int type, size_t in_bytes, size_t out_bytes)
{ {
edge_connection_t *conn; edge_connection_t *conn;
generic_buffer_t *inbuf, *outbuf; buf_t *inbuf, *outbuf;
if (type == CONN_TYPE_EXIT) if (type == CONN_TYPE_EXIT)
conn = edge_connection_new(type, AF_INET); conn = edge_connection_new(type, AF_INET);
else else
conn = ENTRY_TO_EDGE_CONN(entry_connection_new(type, AF_INET)); conn = ENTRY_TO_EDGE_CONN(entry_connection_new(type, AF_INET));
#ifdef USE_BUFFEREVENTS
inbuf = bufferevent_get_input(TO_CONN(conn)->bufev);
outbuf = bufferevent_get_output(TO_CONN(conn)->bufev);
#else
inbuf = TO_CONN(conn)->inbuf; inbuf = TO_CONN(conn)->inbuf;
outbuf = TO_CONN(conn)->outbuf; outbuf = TO_CONN(conn)->outbuf;
#endif
/* We add these bytes directly to the buffers, to avoid all the /* We add these bytes directly to the buffers, to avoid all the
* edge connection read/write machinery. */ * edge connection read/write machinery. */