2003-10-08 04:04:08 +02:00
|
|
|
/* Copyright 2001,2002,2003 Roger Dingledine, Matej Pfajfar. */
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
/* $Id$ */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
#include "or.h"
|
|
|
|
|
|
|
|
/********* START VARIABLES **********/
|
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
extern or_options_t options; /* command-line and config-file options */
|
|
|
|
|
2003-07-05 09:10:34 +02:00
|
|
|
extern int global_read_bucket;
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
char *conn_type_to_string[] = {
|
2002-09-22 00:41:48 +02:00
|
|
|
"", /* 0 */
|
|
|
|
"OP listener", /* 1 */
|
|
|
|
"OP", /* 2 */
|
|
|
|
"OR listener", /* 3 */
|
|
|
|
"OR", /* 4 */
|
|
|
|
"Exit", /* 5 */
|
|
|
|
"App listener",/* 6 */
|
2002-09-26 14:09:10 +02:00
|
|
|
"App", /* 7 */
|
|
|
|
"Dir listener",/* 8 */
|
|
|
|
"Dir", /* 9 */
|
2003-08-21 01:05:22 +02:00
|
|
|
"DNS worker", /* 10 */
|
|
|
|
"CPU worker", /* 11 */
|
2002-06-27 00:45:49 +02:00
|
|
|
};
|
|
|
|
|
2003-09-28 08:48:20 +02:00
|
|
|
char *conn_state_to_string[][_CONN_TYPE_MAX+1] = {
|
2003-09-30 10:18:10 +02:00
|
|
|
{ NULL }, /* no type associated with 0 */
|
|
|
|
{ NULL }, /* op listener, obsolete */
|
|
|
|
{ NULL }, /* op, obsolete */
|
2002-06-27 00:45:49 +02:00
|
|
|
{ "ready" }, /* or listener, 0 */
|
2003-09-30 10:18:10 +02:00
|
|
|
{ "", /* OR, 0 */
|
|
|
|
"connect()ing", /* 1 */
|
|
|
|
"handshaking", /* 2 */
|
|
|
|
"open" }, /* 3 */
|
|
|
|
{ "", /* exit, 0 */
|
|
|
|
"waiting for dest info", /* 1 */
|
|
|
|
"connecting", /* 2 */
|
|
|
|
"open" }, /* 3 */
|
2002-09-22 00:41:48 +02:00
|
|
|
{ "ready" }, /* app listener, 0 */
|
2003-04-12 00:11:11 +02:00
|
|
|
{ "", /* 0 */
|
|
|
|
"", /* 1 */
|
|
|
|
"", /* 2 */
|
2003-09-30 10:18:10 +02:00
|
|
|
"", /* 3 */
|
|
|
|
"awaiting dest info", /* app, 4 */
|
|
|
|
"waiting for OR connection", /* 5 */
|
|
|
|
"open" }, /* 6 */
|
2002-09-26 14:09:10 +02:00
|
|
|
{ "ready" }, /* dir listener, 0 */
|
2003-09-30 10:18:10 +02:00
|
|
|
{ "", /* dir, 0 */
|
|
|
|
"connecting (fetch)", /* 1 */
|
|
|
|
"connecting (upload)", /* 2 */
|
|
|
|
"client sending fetch", /* 3 */
|
|
|
|
"client sending upload", /* 4 */
|
|
|
|
"client reading fetch", /* 5 */
|
|
|
|
"client reading upload", /* 6 */
|
|
|
|
"awaiting command", /* 7 */
|
|
|
|
"writing" }, /* 8 */
|
|
|
|
{ "", /* dns worker, 0 */
|
|
|
|
"idle", /* 1 */
|
|
|
|
"busy" }, /* 2 */
|
|
|
|
{ "", /* cpu worker, 0 */
|
|
|
|
"idle", /* 1 */
|
|
|
|
"busy with onion", /* 2 */
|
|
|
|
"busy with handshake" }, /* 3 */
|
2002-06-27 00:45:49 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
/********* END VARIABLES ************/
|
|
|
|
|
2003-09-08 12:59:00 +02:00
|
|
|
static int connection_init_accepted_conn(connection_t *conn);
|
2003-10-09 20:45:14 +02:00
|
|
|
static int connection_handle_listener_read(connection_t *conn, int new_type);
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
|
|
|
|
/**************************************************************/
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
connection_t *connection_new(int type) {
|
|
|
|
connection_t *conn;
|
2003-10-04 05:29:09 +02:00
|
|
|
time_t now = time(NULL);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-11-18 09:20:19 +01:00
|
|
|
conn = tor_malloc_zero(sizeof(connection_t));
|
2003-10-15 20:50:16 +02:00
|
|
|
conn->s = -1; /* give it a default of 'not used' */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
conn->type = type;
|
2003-10-14 05:06:48 +02:00
|
|
|
if(!connection_is_listener(conn)) { /* listeners never use their buf */
|
|
|
|
conn->inbuf = buf_new();
|
|
|
|
conn->outbuf = buf_new();
|
|
|
|
}
|
2003-11-11 03:41:31 +01:00
|
|
|
if (type == CONN_TYPE_AP) {
|
2003-11-18 09:20:19 +01:00
|
|
|
conn->socks_request = tor_malloc_zero(sizeof(socks_request_t));
|
2003-11-11 03:41:31 +01:00
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-10-04 05:29:09 +02:00
|
|
|
conn->timestamp_created = now;
|
|
|
|
conn->timestamp_lastread = now;
|
|
|
|
conn->timestamp_lastwritten = now;
|
2002-10-13 15:17:27 +02:00
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
|
|
|
|
void connection_free(connection_t *conn) {
|
|
|
|
assert(conn);
|
|
|
|
|
2003-10-14 05:06:48 +02:00
|
|
|
if(!connection_is_listener(conn)) {
|
|
|
|
buf_free(conn->inbuf);
|
|
|
|
buf_free(conn->outbuf);
|
|
|
|
}
|
2003-10-21 11:48:17 +02:00
|
|
|
tor_free(conn->address);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-07-18 08:37:58 +02:00
|
|
|
if(connection_speaks_cells(conn)) {
|
2003-09-05 08:04:03 +02:00
|
|
|
directory_set_dirty();
|
2003-09-07 12:24:40 +02:00
|
|
|
if (conn->tls)
|
|
|
|
tor_tls_free(conn->tls);
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
if (conn->onion_pkey)
|
|
|
|
crypto_free_pk_env(conn->onion_pkey);
|
|
|
|
if (conn->link_pkey)
|
|
|
|
crypto_free_pk_env(conn->link_pkey);
|
|
|
|
if (conn->identity_pkey)
|
|
|
|
crypto_free_pk_env(conn->identity_pkey);
|
2003-10-21 11:48:17 +02:00
|
|
|
tor_free(conn->nickname);
|
2003-11-11 03:41:31 +01:00
|
|
|
tor_free(conn->socks_request);
|
2002-09-24 12:43:57 +02:00
|
|
|
|
2003-10-15 20:50:16 +02:00
|
|
|
if(conn->s >= 0) {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_INFO,"closing fd %d.",conn->s);
|
2002-06-27 00:45:49 +02:00
|
|
|
close(conn->s);
|
2002-09-03 20:36:40 +02:00
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
free(conn);
|
|
|
|
}
|
|
|
|
|
2003-10-25 14:01:09 +02:00
|
|
|
int connection_create_listener(char *bindaddress, uint16_t bindport, int type) {
|
|
|
|
struct sockaddr_in bindaddr; /* where to bind */
|
|
|
|
struct hostent *rent;
|
2002-06-27 00:45:49 +02:00
|
|
|
connection_t *conn;
|
2003-10-25 14:01:09 +02:00
|
|
|
int s; /* the socket we're going to make */
|
2002-06-27 00:45:49 +02:00
|
|
|
int one=1;
|
|
|
|
|
2003-10-25 14:01:09 +02:00
|
|
|
memset(&bindaddr,0,sizeof(struct sockaddr_in));
|
|
|
|
bindaddr.sin_family = AF_INET;
|
|
|
|
bindaddr.sin_port = htons(bindport);
|
|
|
|
rent = gethostbyname(bindaddress);
|
|
|
|
if (!rent) {
|
|
|
|
log_fn(LOG_WARN,"Can't resolve BindAddress %s",bindaddress);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(rent->h_length != 4)
|
|
|
|
return -1; /* XXX complain */
|
|
|
|
memcpy(&(bindaddr.sin_addr.s_addr),rent->h_addr,rent->h_length);
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
s = socket(PF_INET,SOCK_STREAM,IPPROTO_TCP);
|
2003-09-16 03:58:46 +02:00
|
|
|
if (s < 0) {
|
2003-10-10 03:48:32 +02:00
|
|
|
log_fn(LOG_WARN,"Socket creation failed.");
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-10-25 14:01:09 +02:00
|
|
|
setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-10-25 14:01:09 +02:00
|
|
|
if(bind(s,(struct sockaddr *)&bindaddr,sizeof(bindaddr)) < 0) {
|
|
|
|
log_fn(LOG_WARN,"Could not bind to port %u: %s",bindport,strerror(errno));
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(listen(s,SOMAXCONN) < 0) {
|
2003-10-25 14:01:09 +02:00
|
|
|
log_fn(LOG_WARN,"Could not listen on port %u: %s",bindport,strerror(errno));
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-08-12 05:08:41 +02:00
|
|
|
set_socket_nonblocking(s);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
conn = connection_new(type);
|
|
|
|
conn->s = s;
|
|
|
|
|
|
|
|
if(connection_add(conn) < 0) { /* no space, forget it */
|
2003-10-10 03:48:32 +02:00
|
|
|
log_fn(LOG_WARN,"connection_add failed. Giving up.");
|
2002-06-27 00:45:49 +02:00
|
|
|
connection_free(conn);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-10-25 14:01:09 +02:00
|
|
|
log_fn(LOG_DEBUG,"%s listening on port %u.",conn_type_to_string[type], bindport);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
conn->state = LISTENER_STATE_READY;
|
2002-07-18 08:37:58 +02:00
|
|
|
connection_start_reading(conn);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-10-09 20:45:14 +02:00
|
|
|
static int connection_handle_listener_read(connection_t *conn, int new_type) {
|
2002-06-27 00:45:49 +02:00
|
|
|
int news; /* the new socket */
|
|
|
|
connection_t *newconn;
|
|
|
|
struct sockaddr_in remote; /* information about the remote peer when connecting to other routers */
|
|
|
|
int remotelen = sizeof(struct sockaddr_in); /* length of the remote address */
|
2003-08-14 19:13:52 +02:00
|
|
|
#ifdef MS_WINDOWS
|
|
|
|
int e;
|
|
|
|
#endif
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
news = accept(conn->s,(struct sockaddr *)&remote,&remotelen);
|
|
|
|
if (news == -1) { /* accept() error */
|
2003-08-14 19:13:52 +02:00
|
|
|
if(ERRNO_EAGAIN(errno)) {
|
|
|
|
#ifdef MS_WINDOWS
|
|
|
|
e = correct_socket_errno(conn->s);
|
|
|
|
if (ERRNO_EAGAIN(e))
|
|
|
|
return 0;
|
|
|
|
#else
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0; /* he hung up before we could accept(). that's fine. */
|
2003-08-14 19:13:52 +02:00
|
|
|
#endif
|
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
/* else there was a real error. */
|
2003-10-10 03:48:32 +02:00
|
|
|
log_fn(LOG_WARN,"accept() failed. Closing listener.");
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2002-09-03 20:36:40 +02:00
|
|
|
log(LOG_INFO,"Connection accepted on socket %d (child of fd %d).",news, conn->s);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-08-12 05:08:41 +02:00
|
|
|
set_socket_nonblocking(news);
|
2002-07-16 04:12:58 +02:00
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
newconn = connection_new(new_type);
|
|
|
|
newconn->s = news;
|
|
|
|
|
2003-10-04 05:29:09 +02:00
|
|
|
newconn->address = tor_strdup(inet_ntoa(remote.sin_addr)); /* remember the remote address */
|
2002-10-13 15:17:27 +02:00
|
|
|
newconn->addr = ntohl(remote.sin_addr.s_addr);
|
|
|
|
newconn->port = ntohs(remote.sin_port);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
if(connection_add(newconn) < 0) { /* no space, forget it */
|
|
|
|
connection_free(newconn);
|
2002-09-03 20:36:40 +02:00
|
|
|
return 0; /* no need to tear down the parent */
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2003-09-08 12:59:00 +02:00
|
|
|
if(connection_init_accepted_conn(newconn) < 0) {
|
2003-09-14 08:43:18 +02:00
|
|
|
newconn->marked_for_close = 1;
|
2003-09-08 12:59:00 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int connection_init_accepted_conn(connection_t *conn) {
|
|
|
|
|
|
|
|
connection_start_reading(conn);
|
|
|
|
|
|
|
|
switch(conn->type) {
|
|
|
|
case CONN_TYPE_OR:
|
2003-09-26 12:03:50 +02:00
|
|
|
return connection_tls_start_handshake(conn, 1);
|
2003-09-08 12:59:00 +02:00
|
|
|
case CONN_TYPE_AP:
|
|
|
|
conn->state = AP_CONN_STATE_SOCKS_WAIT;
|
|
|
|
break;
|
|
|
|
case CONN_TYPE_DIR:
|
2003-09-17 22:09:06 +02:00
|
|
|
conn->state = DIR_CONN_STATE_SERVER_COMMAND_WAIT;
|
2003-09-08 12:59:00 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-09-16 03:58:46 +02:00
|
|
|
/* take conn, make a nonblocking socket; try to connect to
|
|
|
|
* addr:port (they arrive in *host order*). If fail, return -1. Else
|
|
|
|
* assign s to conn->s: if connected return 1, if eagain return 0.
|
|
|
|
* address is used to make the logs useful.
|
|
|
|
*/
|
|
|
|
int connection_connect(connection_t *conn, char *address, uint32_t addr, uint16_t port) {
|
|
|
|
int s;
|
|
|
|
struct sockaddr_in dest_addr;
|
|
|
|
|
|
|
|
s=socket(PF_INET,SOCK_STREAM,IPPROTO_TCP);
|
|
|
|
if (s < 0) {
|
2003-10-10 03:48:32 +02:00
|
|
|
log_fn(LOG_WARN,"Error creating network socket.");
|
2003-09-16 03:58:46 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
set_socket_nonblocking(s);
|
|
|
|
|
2003-11-18 09:20:19 +01:00
|
|
|
memset(&dest_addr,0,sizeof(dest_addr));
|
2003-09-16 03:58:46 +02:00
|
|
|
dest_addr.sin_family = AF_INET;
|
|
|
|
dest_addr.sin_port = htons(port);
|
|
|
|
dest_addr.sin_addr.s_addr = htonl(addr);
|
|
|
|
|
|
|
|
log_fn(LOG_DEBUG,"Connecting to %s:%u.",address,port);
|
|
|
|
|
|
|
|
if(connect(s,(struct sockaddr *)&dest_addr,sizeof(dest_addr)) < 0) {
|
|
|
|
if(!ERRNO_CONN_EINPROGRESS(errno)) {
|
|
|
|
/* yuck. kill it. */
|
|
|
|
perror("connect");
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_INFO,"Connect() to %s:%u failed.",address,port);
|
2003-09-16 03:58:46 +02:00
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
/* it's in progress. set state appropriately and return. */
|
|
|
|
conn->s = s;
|
|
|
|
log_fn(LOG_DEBUG,"connect in progress, socket %d.",s);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* it succeeded. we're connected. */
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_INFO,"Connection to %s:%u established.",address,port);
|
2003-09-16 03:58:46 +02:00
|
|
|
conn->s = s;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2003-09-07 12:24:40 +02:00
|
|
|
/* start all connections that should be up but aren't */
|
2003-10-25 14:01:09 +02:00
|
|
|
int retry_all_connections(void) {
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
|
2003-10-25 14:01:09 +02:00
|
|
|
if(options.ORPort) {
|
2002-10-03 00:54:20 +02:00
|
|
|
router_retry_connections();
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2003-10-25 14:01:09 +02:00
|
|
|
if(options.ORPort && !connection_get_by_type(CONN_TYPE_OR_LISTENER)) {
|
|
|
|
if(connection_create_listener(options.ORBindAddress, options.ORPort,
|
|
|
|
CONN_TYPE_OR_LISTENER) < 0)
|
|
|
|
return -1;
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
2002-10-02 22:12:44 +02:00
|
|
|
|
2003-10-25 14:01:09 +02:00
|
|
|
if(options.DirPort && !connection_get_by_type(CONN_TYPE_DIR_LISTENER)) {
|
|
|
|
if(connection_create_listener(options.DirBindAddress, options.DirPort,
|
|
|
|
CONN_TYPE_DIR_LISTENER) < 0)
|
|
|
|
return -1;
|
2002-09-26 14:09:10 +02:00
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-10-25 14:01:09 +02:00
|
|
|
if(options.SocksPort && !connection_get_by_type(CONN_TYPE_AP_LISTENER)) {
|
|
|
|
if(connection_create_listener(options.SocksBindAddress, options.SocksPort,
|
|
|
|
CONN_TYPE_AP_LISTENER) < 0)
|
|
|
|
return -1;
|
2002-10-02 03:03:00 +02:00
|
|
|
}
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-09-05 08:04:03 +02:00
|
|
|
int connection_handle_read(connection_t *conn) {
|
|
|
|
|
2003-10-04 05:29:09 +02:00
|
|
|
conn->timestamp_lastread = time(NULL);
|
2003-09-05 08:04:03 +02:00
|
|
|
|
|
|
|
switch(conn->type) {
|
|
|
|
case CONN_TYPE_OR_LISTENER:
|
2003-09-08 12:59:00 +02:00
|
|
|
return connection_handle_listener_read(conn, CONN_TYPE_OR);
|
2003-09-05 08:04:03 +02:00
|
|
|
case CONN_TYPE_AP_LISTENER:
|
2003-09-08 12:59:00 +02:00
|
|
|
return connection_handle_listener_read(conn, CONN_TYPE_AP);
|
2003-09-05 08:04:03 +02:00
|
|
|
case CONN_TYPE_DIR_LISTENER:
|
2003-09-08 12:59:00 +02:00
|
|
|
return connection_handle_listener_read(conn, CONN_TYPE_DIR);
|
|
|
|
}
|
2003-09-05 08:04:03 +02:00
|
|
|
|
2003-09-08 12:59:00 +02:00
|
|
|
if(connection_read_to_buf(conn) < 0) {
|
2003-09-17 22:09:06 +02:00
|
|
|
if(conn->type == CONN_TYPE_DIR &&
|
2003-09-21 08:15:43 +02:00
|
|
|
(conn->state == DIR_CONN_STATE_CONNECTING_FETCH ||
|
|
|
|
conn->state == DIR_CONN_STATE_CONNECTING_UPLOAD)) {
|
2003-09-08 12:59:00 +02:00
|
|
|
/* it's a directory server and connecting failed: forget about this router */
|
|
|
|
/* XXX I suspect pollerr may make Windows not get to this point. :( */
|
2003-09-30 23:27:16 +02:00
|
|
|
router_mark_as_down(conn->nickname);
|
2003-09-08 12:59:00 +02:00
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(connection_process_inbuf(conn) < 0) {
|
2003-09-30 23:27:16 +02:00
|
|
|
// log_fn(LOG_DEBUG,"connection_process_inbuf returned -1.");
|
2003-09-08 12:59:00 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2003-09-07 12:24:40 +02:00
|
|
|
return 0;
|
2003-09-05 08:04:03 +02:00
|
|
|
}
|
|
|
|
|
2003-09-07 12:24:40 +02:00
|
|
|
/* return -1 if we want to break conn, else return 0 */
|
2002-06-27 00:45:49 +02:00
|
|
|
int connection_read_to_buf(connection_t *conn) {
|
2003-09-07 12:24:40 +02:00
|
|
|
int result;
|
2003-09-05 13:25:24 +02:00
|
|
|
int at_most;
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
|
2003-09-05 13:25:24 +02:00
|
|
|
if(options.LinkPadding) {
|
|
|
|
at_most = global_read_bucket;
|
2003-09-05 08:04:03 +02:00
|
|
|
} else {
|
2003-09-05 13:25:24 +02:00
|
|
|
/* do a rudimentary round-robin so one connection can't hog a thickpipe */
|
|
|
|
if(connection_speaks_cells(conn)) {
|
2003-10-21 10:37:07 +02:00
|
|
|
at_most = 30*(CELL_NETWORK_SIZE);
|
2003-09-05 13:25:24 +02:00
|
|
|
} else {
|
2003-10-21 10:37:07 +02:00
|
|
|
at_most = 30*(CELL_PAYLOAD_SIZE - RELAY_HEADER_SIZE);
|
2003-09-05 13:25:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if(at_most > global_read_bucket)
|
|
|
|
at_most = global_read_bucket;
|
2002-07-18 08:37:58 +02:00
|
|
|
}
|
2002-10-02 01:37:31 +02:00
|
|
|
|
2003-09-07 12:24:40 +02:00
|
|
|
if(connection_speaks_cells(conn) && conn->state != OR_CONN_STATE_CONNECTING) {
|
2003-09-08 12:59:00 +02:00
|
|
|
if(conn->state == OR_CONN_STATE_HANDSHAKING)
|
|
|
|
return connection_tls_continue_handshake(conn);
|
|
|
|
|
|
|
|
/* else open, or closing */
|
2003-09-27 23:09:56 +02:00
|
|
|
if(at_most > conn->receiver_bucket)
|
|
|
|
at_most = conn->receiver_bucket;
|
2003-09-25 07:17:11 +02:00
|
|
|
result = read_to_buf_tls(conn->tls, at_most, conn->inbuf);
|
2003-09-07 12:24:40 +02:00
|
|
|
|
|
|
|
switch(result) {
|
|
|
|
case TOR_TLS_ERROR:
|
|
|
|
case TOR_TLS_CLOSE:
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_INFO,"tls error. breaking.");
|
2003-09-07 12:24:40 +02:00
|
|
|
return -1; /* XXX deal with close better */
|
2003-09-08 12:59:00 +02:00
|
|
|
case TOR_TLS_WANTWRITE:
|
2003-09-07 12:24:40 +02:00
|
|
|
connection_start_writing(conn);
|
|
|
|
return 0;
|
2003-09-08 12:59:00 +02:00
|
|
|
case TOR_TLS_WANTREAD: /* we're already reading */
|
2003-09-07 12:24:40 +02:00
|
|
|
case TOR_TLS_DONE: /* no data read, so nothing to process */
|
|
|
|
return 0;
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
}
|
2003-09-16 23:20:09 +02:00
|
|
|
} else {
|
2003-09-25 07:17:11 +02:00
|
|
|
result = read_to_buf(conn->s, at_most, conn->inbuf,
|
|
|
|
&conn->inbuf_reached_eof);
|
|
|
|
|
2003-09-07 12:24:40 +02:00
|
|
|
// log(LOG_DEBUG,"connection_read_to_buf(): read_to_buf returned %d.",read_result);
|
|
|
|
|
|
|
|
if(result < 0)
|
|
|
|
return -1;
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
}
|
|
|
|
|
2003-09-07 12:24:40 +02:00
|
|
|
global_read_bucket -= result; assert(global_read_bucket >= 0);
|
2003-09-27 23:09:56 +02:00
|
|
|
if(global_read_bucket == 0) {
|
|
|
|
log_fn(LOG_DEBUG,"global bucket exhausted. Pausing.");
|
2003-09-07 12:24:40 +02:00
|
|
|
conn->wants_to_read = 1;
|
|
|
|
connection_stop_reading(conn);
|
cleanups, bugfixes, more verbose logs
Fixed up the assert_*_ok funcs some (more work remains)
Changed config so it reads either /etc/torrc or the -f arg, never both
Finally tracked down a nasty bug with our use of tls:
It turns out that if you ask SSL_read() for no more than n bytes, it
will read the entire record from the network (and maybe part of the next
record, I'm not sure), give you n bytes of it, and keep the remaining
bytes internally. This is fine, except our poll-for-read looks at the
network, and there are no bytes pending on the network, so we never know
to ask SSL_read() for more bytes. Currently I've hacked it so if we ask
for n bytes and it returns n bytes, then it reads again right then. This
will interact poorly with our rate limiting; we need a cleaner solution.
svn:r481
2003-09-24 23:24:52 +02:00
|
|
|
return 0;
|
2003-09-07 12:24:40 +02:00
|
|
|
}
|
2003-09-27 23:09:56 +02:00
|
|
|
if(connection_speaks_cells(conn) && conn->state == OR_CONN_STATE_OPEN) {
|
|
|
|
conn->receiver_bucket -= result; assert(conn->receiver_bucket >= 0);
|
|
|
|
if(conn->receiver_bucket == 0) {
|
|
|
|
log_fn(LOG_DEBUG,"receiver bucket exhausted. Pausing.");
|
|
|
|
conn->wants_to_read = 1;
|
|
|
|
connection_stop_reading(conn);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2003-09-07 12:24:40 +02:00
|
|
|
return 0;
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int connection_fetch_from_buf(char *string, int len, connection_t *conn) {
|
2003-09-25 07:17:11 +02:00
|
|
|
return fetch_from_buf(string, len, conn->inbuf);
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2002-09-28 07:53:00 +02:00
|
|
|
int connection_find_on_inbuf(char *string, int len, connection_t *conn) {
|
2003-09-25 07:17:11 +02:00
|
|
|
return find_on_inbuf(string, len, conn->inbuf);
|
2002-09-28 07:53:00 +02:00
|
|
|
}
|
|
|
|
|
2002-07-18 08:37:58 +02:00
|
|
|
int connection_wants_to_flush(connection_t *conn) {
|
|
|
|
return conn->outbuf_flushlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
int connection_outbuf_too_full(connection_t *conn) {
|
|
|
|
return (conn->outbuf_flushlen > 10*CELL_PAYLOAD_SIZE);
|
|
|
|
}
|
|
|
|
|
2003-09-07 12:24:40 +02:00
|
|
|
/* return -1 if you want to break the conn, else return 0 */
|
2003-09-05 08:04:03 +02:00
|
|
|
int connection_handle_write(connection_t *conn) {
|
|
|
|
|
|
|
|
if(connection_is_listener(conn)) {
|
2003-10-10 03:48:32 +02:00
|
|
|
log_fn(LOG_WARN,"Got a listener socket. Can't happen!");
|
2003-09-05 08:04:03 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2002-10-02 01:37:31 +02:00
|
|
|
|
2003-10-04 05:29:09 +02:00
|
|
|
conn->timestamp_lastwritten = time(NULL);
|
2003-09-05 08:04:03 +02:00
|
|
|
|
2003-09-07 12:24:40 +02:00
|
|
|
if(connection_speaks_cells(conn) && conn->state != OR_CONN_STATE_CONNECTING) {
|
2003-09-11 22:06:55 +02:00
|
|
|
if(conn->state == OR_CONN_STATE_HANDSHAKING) {
|
|
|
|
connection_stop_writing(conn);
|
2003-09-08 12:59:00 +02:00
|
|
|
return connection_tls_continue_handshake(conn);
|
2003-09-11 22:06:55 +02:00
|
|
|
}
|
2003-09-05 08:04:03 +02:00
|
|
|
|
2003-09-08 12:59:00 +02:00
|
|
|
/* else open, or closing */
|
2003-09-25 07:17:11 +02:00
|
|
|
switch(flush_buf_tls(conn->tls, conn->outbuf, &conn->outbuf_flushlen)) {
|
2003-09-07 12:24:40 +02:00
|
|
|
case TOR_TLS_ERROR:
|
|
|
|
case TOR_TLS_CLOSE:
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_INFO,"tls error. breaking.");
|
2003-09-07 12:24:40 +02:00
|
|
|
return -1; /* XXX deal with close better */
|
2003-09-08 12:59:00 +02:00
|
|
|
case TOR_TLS_WANTWRITE:
|
cleanups, bugfixes, more verbose logs
Fixed up the assert_*_ok funcs some (more work remains)
Changed config so it reads either /etc/torrc or the -f arg, never both
Finally tracked down a nasty bug with our use of tls:
It turns out that if you ask SSL_read() for no more than n bytes, it
will read the entire record from the network (and maybe part of the next
record, I'm not sure), give you n bytes of it, and keep the remaining
bytes internally. This is fine, except our poll-for-read looks at the
network, and there are no bytes pending on the network, so we never know
to ask SSL_read() for more bytes. Currently I've hacked it so if we ask
for n bytes and it returns n bytes, then it reads again right then. This
will interact poorly with our rate limiting; we need a cleaner solution.
svn:r481
2003-09-24 23:24:52 +02:00
|
|
|
log_fn(LOG_DEBUG,"wanted write.");
|
2003-09-07 12:24:40 +02:00
|
|
|
/* we're already writing */
|
|
|
|
return 0;
|
2003-09-08 12:59:00 +02:00
|
|
|
case TOR_TLS_WANTREAD:
|
2003-09-07 12:24:40 +02:00
|
|
|
/* Make sure to avoid a loop if the receive buckets are empty. */
|
cleanups, bugfixes, more verbose logs
Fixed up the assert_*_ok funcs some (more work remains)
Changed config so it reads either /etc/torrc or the -f arg, never both
Finally tracked down a nasty bug with our use of tls:
It turns out that if you ask SSL_read() for no more than n bytes, it
will read the entire record from the network (and maybe part of the next
record, I'm not sure), give you n bytes of it, and keep the remaining
bytes internally. This is fine, except our poll-for-read looks at the
network, and there are no bytes pending on the network, so we never know
to ask SSL_read() for more bytes. Currently I've hacked it so if we ask
for n bytes and it returns n bytes, then it reads again right then. This
will interact poorly with our rate limiting; we need a cleaner solution.
svn:r481
2003-09-24 23:24:52 +02:00
|
|
|
log_fn(LOG_DEBUG,"wanted read.");
|
2003-09-07 12:24:40 +02:00
|
|
|
if(!connection_is_reading(conn)) {
|
|
|
|
connection_stop_writing(conn);
|
|
|
|
conn->wants_to_write = 1;
|
|
|
|
/* we'll start reading again when the next second arrives,
|
|
|
|
* and then also start writing again.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
/* else no problem, we're already reading */
|
|
|
|
return 0;
|
2003-09-14 04:58:50 +02:00
|
|
|
/* case TOR_TLS_DONE:
|
|
|
|
* for TOR_TLS_DONE, fall through to check if the flushlen
|
2003-09-07 12:24:40 +02:00
|
|
|
* is empty, so we can stop writing.
|
|
|
|
*/
|
|
|
|
}
|
2003-09-16 23:20:09 +02:00
|
|
|
} else {
|
2003-09-25 07:17:11 +02:00
|
|
|
if(flush_buf(conn->s, conn->outbuf, &conn->outbuf_flushlen) < 0)
|
2003-09-07 12:24:40 +02:00
|
|
|
return -1;
|
|
|
|
/* conns in CONNECTING state will fall through... */
|
2003-09-05 08:04:03 +02:00
|
|
|
}
|
|
|
|
|
2003-09-07 12:24:40 +02:00
|
|
|
if(!connection_wants_to_flush(conn)) /* it's done flushing */
|
|
|
|
if(connection_finished_flushing(conn) < 0) /* ...and get handled here. */
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return 0;
|
2003-09-05 08:04:03 +02:00
|
|
|
}
|
|
|
|
|
2003-10-04 04:38:18 +02:00
|
|
|
void connection_write_to_buf(const char *string, int len, connection_t *conn) {
|
2002-10-02 01:37:31 +02:00
|
|
|
|
2003-10-04 04:38:18 +02:00
|
|
|
if(!len || conn->marked_for_close)
|
|
|
|
return;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
|
2002-09-04 08:29:28 +02:00
|
|
|
if( (!connection_speaks_cells(conn)) ||
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
(!connection_state_is_open(conn)) ||
|
|
|
|
(options.LinkPadding == 0) ) {
|
2003-09-05 08:04:03 +02:00
|
|
|
/* connection types other than or, or or not in 'open' state, should flush immediately */
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
/* also flush immediately if we're not doing LinkPadding, since otherwise it will never flush */
|
2002-07-18 08:37:58 +02:00
|
|
|
connection_start_writing(conn);
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
conn->outbuf_flushlen += len;
|
|
|
|
}
|
|
|
|
|
2003-10-04 04:38:18 +02:00
|
|
|
if(write_to_buf(string, len, conn->outbuf) < 0) {
|
2003-10-10 03:48:32 +02:00
|
|
|
log_fn(LOG_WARN,"write_to_buf failed. Closing connection (fd %d).", conn->s);
|
2003-10-04 04:38:18 +02:00
|
|
|
conn->marked_for_close = 1;
|
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2003-09-30 21:06:22 +02:00
|
|
|
connection_t *connection_exact_get_by_addr_port(uint32_t addr, uint16_t port) {
|
|
|
|
int i, n;
|
|
|
|
connection_t *conn;
|
|
|
|
connection_t **carray;
|
|
|
|
|
|
|
|
get_connection_array(&carray,&n);
|
|
|
|
for(i=0;i<n;i++) {
|
|
|
|
conn = carray[i];
|
|
|
|
if(conn->addr == addr && conn->port == port && !conn->marked_for_close)
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
connection_t *connection_twin_get_by_addr_port(uint32_t addr, uint16_t port) {
|
|
|
|
/* Find a connection to the router described by addr and port,
|
|
|
|
* or alternately any router which knows its key.
|
|
|
|
* This connection *must* be in 'open' state.
|
|
|
|
* If not, return NULL.
|
|
|
|
*/
|
|
|
|
int i, n;
|
|
|
|
connection_t *conn;
|
|
|
|
routerinfo_t *router;
|
|
|
|
connection_t **carray;
|
|
|
|
|
|
|
|
/* first check if it's there exactly */
|
|
|
|
conn = connection_exact_get_by_addr_port(addr,port);
|
|
|
|
if(conn && connection_state_is_open(conn)) {
|
|
|
|
log(LOG_INFO,"connection_twin_get_by_addr_port(): Found exact match.");
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now check if any of the other open connections are a twin for this one */
|
|
|
|
|
|
|
|
router = router_get_by_addr_port(addr,port);
|
|
|
|
if(!router)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
get_connection_array(&carray,&n);
|
|
|
|
for(i=0;i<n;i++) {
|
|
|
|
conn = carray[i];
|
|
|
|
assert(conn);
|
|
|
|
if(connection_state_is_open(conn) &&
|
|
|
|
!crypto_pk_cmp_keys(conn->onion_pkey, router->onion_pkey)) {
|
|
|
|
log(LOG_INFO,"connection_twin_get_by_addr_port(): Found twin (%s).",conn->address);
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
connection_t *connection_get_by_type(int type) {
|
|
|
|
int i, n;
|
|
|
|
connection_t *conn;
|
|
|
|
connection_t **carray;
|
|
|
|
|
|
|
|
get_connection_array(&carray,&n);
|
|
|
|
for(i=0;i<n;i++) {
|
|
|
|
conn = carray[i];
|
|
|
|
if(conn->type == type && !conn->marked_for_close)
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
connection_t *connection_get_by_type_state(int type, int state) {
|
|
|
|
int i, n;
|
|
|
|
connection_t *conn;
|
|
|
|
connection_t **carray;
|
|
|
|
|
2003-09-30 21:25:16 +02:00
|
|
|
get_connection_array(&carray,&n);
|
2003-09-30 21:06:22 +02:00
|
|
|
for(i=0;i<n;i++) {
|
|
|
|
conn = carray[i];
|
|
|
|
if(conn->type == type && conn->state == state && !conn->marked_for_close)
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
connection_t *connection_get_by_type_state_lastwritten(int type, int state) {
|
|
|
|
int i, n;
|
|
|
|
connection_t *conn, *best=NULL;
|
|
|
|
connection_t **carray;
|
|
|
|
|
2003-09-30 21:25:16 +02:00
|
|
|
get_connection_array(&carray,&n);
|
2003-09-30 21:06:22 +02:00
|
|
|
for(i=0;i<n;i++) {
|
|
|
|
conn = carray[i];
|
|
|
|
if(conn->type == type && conn->state == state && !conn->marked_for_close)
|
|
|
|
if(!best || conn->timestamp_lastwritten < best->timestamp_lastwritten)
|
|
|
|
best = conn;
|
|
|
|
}
|
|
|
|
return best;
|
|
|
|
}
|
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
int connection_receiver_bucket_should_increase(connection_t *conn) {
|
|
|
|
assert(conn);
|
|
|
|
|
2002-07-18 08:37:58 +02:00
|
|
|
if(!connection_speaks_cells(conn))
|
|
|
|
return 0; /* edge connections don't use receiver_buckets */
|
2003-09-27 23:09:56 +02:00
|
|
|
if(conn->state != OR_CONN_STATE_OPEN)
|
|
|
|
return 0; /* only open connections play the rate limiting game */
|
2002-07-18 08:37:58 +02:00
|
|
|
|
2003-09-27 23:09:56 +02:00
|
|
|
assert(conn->bandwidth > 0);
|
2003-07-05 09:10:34 +02:00
|
|
|
if(conn->receiver_bucket > 9*conn->bandwidth)
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2002-09-22 00:41:48 +02:00
|
|
|
int connection_is_listener(connection_t *conn) {
|
2003-05-28 04:03:25 +02:00
|
|
|
if(conn->type == CONN_TYPE_OR_LISTENER ||
|
2002-09-26 14:09:10 +02:00
|
|
|
conn->type == CONN_TYPE_AP_LISTENER ||
|
|
|
|
conn->type == CONN_TYPE_DIR_LISTENER)
|
2002-09-22 00:41:48 +02:00
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
int connection_state_is_open(connection_t *conn) {
|
|
|
|
assert(conn);
|
|
|
|
|
2003-11-18 11:17:52 +01:00
|
|
|
if(conn->marked_for_close)
|
|
|
|
return 0;
|
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
if((conn->type == CONN_TYPE_OR && conn->state == OR_CONN_STATE_OPEN) ||
|
|
|
|
(conn->type == CONN_TYPE_AP && conn->state == AP_CONN_STATE_OPEN) ||
|
|
|
|
(conn->type == CONN_TYPE_EXIT && conn->state == EXIT_CONN_STATE_OPEN))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-11-11 04:01:48 +01:00
|
|
|
int connection_send_destroy(circ_id_t circ_id, connection_t *conn) {
|
2002-06-27 00:45:49 +02:00
|
|
|
cell_t cell;
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
assert(conn);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-07-18 08:37:58 +02:00
|
|
|
if(!connection_speaks_cells(conn)) {
|
2003-11-11 04:01:48 +01:00
|
|
|
log_fn(LOG_INFO,"CircID %d: At an edge. Marking connection for close.",
|
|
|
|
circ_id);
|
2003-10-22 09:55:44 +02:00
|
|
|
connection_edge_end(conn, END_STREAM_REASON_DESTROY, conn->cpath_layer);
|
2003-10-21 10:37:07 +02:00
|
|
|
/* if they already sent a destroy, they know. XXX can just close? */
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-03-11 22:38:38 +01:00
|
|
|
memset(&cell, 0, sizeof(cell_t));
|
2003-11-11 04:01:48 +01:00
|
|
|
cell.circ_id = circ_id;
|
2002-06-27 00:45:49 +02:00
|
|
|
cell.command = CELL_DESTROY;
|
2003-11-11 04:01:48 +01:00
|
|
|
log_fn(LOG_INFO,"Sending destroy (circID %d).", circ_id);
|
2003-10-09 20:45:14 +02:00
|
|
|
connection_or_write_cell_to_buf(&cell, conn);
|
2003-10-04 04:38:18 +02:00
|
|
|
return 0;
|
2002-09-17 10:14:37 +02:00
|
|
|
}
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
int connection_process_inbuf(connection_t *conn) {
|
|
|
|
|
|
|
|
assert(conn);
|
|
|
|
|
|
|
|
switch(conn->type) {
|
|
|
|
case CONN_TYPE_OR:
|
|
|
|
return connection_or_process_inbuf(conn);
|
2002-06-30 09:37:49 +02:00
|
|
|
case CONN_TYPE_EXIT:
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
case CONN_TYPE_AP:
|
2003-04-12 00:11:11 +02:00
|
|
|
return connection_edge_process_inbuf(conn);
|
2002-09-26 14:09:10 +02:00
|
|
|
case CONN_TYPE_DIR:
|
|
|
|
return connection_dir_process_inbuf(conn);
|
2003-06-17 16:31:05 +02:00
|
|
|
case CONN_TYPE_DNSWORKER:
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
return connection_dns_process_inbuf(conn);
|
2003-08-21 01:05:22 +02:00
|
|
|
case CONN_TYPE_CPUWORKER:
|
|
|
|
return connection_cpu_process_inbuf(conn);
|
2002-06-27 00:45:49 +02:00
|
|
|
default:
|
2003-10-10 03:48:32 +02:00
|
|
|
log_fn(LOG_WARN,"got unexpected conn->type %d.", conn->type);
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int connection_finished_flushing(connection_t *conn) {
|
|
|
|
|
|
|
|
assert(conn);
|
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
// log_fn(LOG_DEBUG,"entered. Socket %u.", conn->s);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
switch(conn->type) {
|
|
|
|
case CONN_TYPE_OR:
|
|
|
|
return connection_or_finished_flushing(conn);
|
2003-04-12 00:11:11 +02:00
|
|
|
case CONN_TYPE_AP:
|
2002-06-30 09:37:49 +02:00
|
|
|
case CONN_TYPE_EXIT:
|
2003-04-12 00:11:11 +02:00
|
|
|
return connection_edge_finished_flushing(conn);
|
2002-09-26 14:09:10 +02:00
|
|
|
case CONN_TYPE_DIR:
|
|
|
|
return connection_dir_finished_flushing(conn);
|
2003-06-17 16:31:05 +02:00
|
|
|
case CONN_TYPE_DNSWORKER:
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
return connection_dns_finished_flushing(conn);
|
2003-08-21 01:05:22 +02:00
|
|
|
case CONN_TYPE_CPUWORKER:
|
|
|
|
return connection_cpu_finished_flushing(conn);
|
2002-06-27 00:45:49 +02:00
|
|
|
default:
|
2003-10-10 03:48:32 +02:00
|
|
|
log_fn(LOG_WARN,"got unexpected conn->type %d.", conn->type);
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-09-16 21:36:19 +02:00
|
|
|
void assert_connection_ok(connection_t *conn, time_t now)
|
|
|
|
{
|
cleanups, bugfixes, more verbose logs
Fixed up the assert_*_ok funcs some (more work remains)
Changed config so it reads either /etc/torrc or the -f arg, never both
Finally tracked down a nasty bug with our use of tls:
It turns out that if you ask SSL_read() for no more than n bytes, it
will read the entire record from the network (and maybe part of the next
record, I'm not sure), give you n bytes of it, and keep the remaining
bytes internally. This is fine, except our poll-for-read looks at the
network, and there are no bytes pending on the network, so we never know
to ask SSL_read() for more bytes. Currently I've hacked it so if we ask
for n bytes and it returns n bytes, then it reads again right then. This
will interact poorly with our rate limiting; we need a cleaner solution.
svn:r481
2003-09-24 23:24:52 +02:00
|
|
|
return;
|
2003-09-16 21:36:19 +02:00
|
|
|
assert(conn);
|
|
|
|
assert(conn->type >= _CONN_TYPE_MIN);
|
|
|
|
assert(conn->type <= _CONN_TYPE_MAX);
|
|
|
|
|
|
|
|
/* XXX check: wants_to_read, wants_to_write, s, poll_index,
|
|
|
|
* marked_for_close. */
|
|
|
|
|
|
|
|
/* buffers */
|
2003-11-23 19:14:19 +01:00
|
|
|
if (!connection_is_listener(conn)) {
|
|
|
|
assert(conn->inbuf);
|
|
|
|
assert(conn->outbuf);
|
|
|
|
}
|
2003-09-16 21:36:19 +02:00
|
|
|
|
|
|
|
assert(!now || conn->timestamp_lastread <= now);
|
|
|
|
assert(!now || conn->timestamp_lastwritten <= now);
|
|
|
|
assert(conn->timestamp_created <= conn->timestamp_lastread);
|
|
|
|
assert(conn->timestamp_created <= conn->timestamp_lastwritten);
|
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
/* XXX Fix this; no longer so.*/
|
|
|
|
#if 0
|
cleanups, bugfixes, more verbose logs
Fixed up the assert_*_ok funcs some (more work remains)
Changed config so it reads either /etc/torrc or the -f arg, never both
Finally tracked down a nasty bug with our use of tls:
It turns out that if you ask SSL_read() for no more than n bytes, it
will read the entire record from the network (and maybe part of the next
record, I'm not sure), give you n bytes of it, and keep the remaining
bytes internally. This is fine, except our poll-for-read looks at the
network, and there are no bytes pending on the network, so we never know
to ask SSL_read() for more bytes. Currently I've hacked it so if we ask
for n bytes and it returns n bytes, then it reads again right then. This
will interact poorly with our rate limiting; we need a cleaner solution.
svn:r481
2003-09-24 23:24:52 +02:00
|
|
|
if(conn->type != CONN_TYPE_OR && conn->type != CONN_TYPE_DIR)
|
|
|
|
assert(!conn->pkey);
|
|
|
|
/* pkey is set if we're a dir client, or if we're an OR in state OPEN
|
|
|
|
* connected to another OR.
|
|
|
|
*/
|
2003-09-25 07:17:11 +02:00
|
|
|
#endif
|
cleanups, bugfixes, more verbose logs
Fixed up the assert_*_ok funcs some (more work remains)
Changed config so it reads either /etc/torrc or the -f arg, never both
Finally tracked down a nasty bug with our use of tls:
It turns out that if you ask SSL_read() for no more than n bytes, it
will read the entire record from the network (and maybe part of the next
record, I'm not sure), give you n bytes of it, and keep the remaining
bytes internally. This is fine, except our poll-for-read looks at the
network, and there are no bytes pending on the network, so we never know
to ask SSL_read() for more bytes. Currently I've hacked it so if we ask
for n bytes and it returns n bytes, then it reads again right then. This
will interact poorly with our rate limiting; we need a cleaner solution.
svn:r481
2003-09-24 23:24:52 +02:00
|
|
|
|
2003-09-16 21:36:19 +02:00
|
|
|
if (conn->type != CONN_TYPE_OR) {
|
|
|
|
assert(!conn->tls);
|
|
|
|
} else {
|
2003-09-27 23:09:56 +02:00
|
|
|
if(conn->state == OR_CONN_STATE_OPEN) {
|
|
|
|
assert(conn->bandwidth > 0);
|
|
|
|
assert(conn->receiver_bucket >= 0);
|
|
|
|
assert(conn->receiver_bucket <= 10*conn->bandwidth);
|
|
|
|
}
|
2003-09-16 21:36:19 +02:00
|
|
|
assert(conn->addr && conn->port);
|
|
|
|
assert(conn->address);
|
|
|
|
if (conn->state != OR_CONN_STATE_CONNECTING)
|
|
|
|
assert(conn->tls);
|
|
|
|
}
|
|
|
|
|
2003-09-16 21:51:09 +02:00
|
|
|
if (conn->type != CONN_TYPE_EXIT && conn->type != CONN_TYPE_AP) {
|
2003-09-16 21:36:19 +02:00
|
|
|
assert(!conn->stream_id[0]);
|
|
|
|
assert(!conn->next_stream);
|
|
|
|
assert(!conn->cpath_layer);
|
|
|
|
assert(!conn->package_window);
|
|
|
|
assert(!conn->deliver_window);
|
|
|
|
assert(!conn->done_sending);
|
|
|
|
assert(!conn->done_receiving);
|
|
|
|
} else {
|
|
|
|
assert(!conn->next_stream ||
|
2003-09-16 21:51:09 +02:00
|
|
|
conn->next_stream->type == CONN_TYPE_EXIT ||
|
|
|
|
conn->next_stream->type == CONN_TYPE_AP);
|
cleanups, bugfixes, more verbose logs
Fixed up the assert_*_ok funcs some (more work remains)
Changed config so it reads either /etc/torrc or the -f arg, never both
Finally tracked down a nasty bug with our use of tls:
It turns out that if you ask SSL_read() for no more than n bytes, it
will read the entire record from the network (and maybe part of the next
record, I'm not sure), give you n bytes of it, and keep the remaining
bytes internally. This is fine, except our poll-for-read looks at the
network, and there are no bytes pending on the network, so we never know
to ask SSL_read() for more bytes. Currently I've hacked it so if we ask
for n bytes and it returns n bytes, then it reads again right then. This
will interact poorly with our rate limiting; we need a cleaner solution.
svn:r481
2003-09-24 23:24:52 +02:00
|
|
|
if(conn->type == CONN_TYPE_AP && conn->state == AP_CONN_STATE_OPEN)
|
|
|
|
assert(conn->cpath_layer);
|
|
|
|
if(conn->cpath_layer)
|
|
|
|
assert_cpath_layer_ok(conn->cpath_layer);
|
2003-09-16 21:36:19 +02:00
|
|
|
/* XXX unchecked, package window, deliver window. */
|
|
|
|
}
|
2003-11-11 03:41:31 +01:00
|
|
|
if (conn->type != CONN_TYPE_AP) {
|
|
|
|
assert(!conn->socks_request);
|
|
|
|
}
|
2003-09-16 21:36:19 +02:00
|
|
|
|
|
|
|
switch(conn->type)
|
|
|
|
{
|
|
|
|
case CONN_TYPE_OR_LISTENER:
|
|
|
|
case CONN_TYPE_AP_LISTENER:
|
|
|
|
case CONN_TYPE_DIR_LISTENER:
|
|
|
|
assert(conn->state == LISTENER_STATE_READY);
|
|
|
|
break;
|
|
|
|
case CONN_TYPE_OR:
|
|
|
|
assert(conn->state >= _OR_CONN_STATE_MIN &&
|
|
|
|
conn->state <= _OR_CONN_STATE_MAX);
|
|
|
|
break;
|
|
|
|
case CONN_TYPE_EXIT:
|
|
|
|
assert(conn->state >= _EXIT_CONN_STATE_MIN &&
|
|
|
|
conn->state <= _EXIT_CONN_STATE_MAX);
|
|
|
|
break;
|
|
|
|
case CONN_TYPE_AP:
|
2003-09-16 22:57:09 +02:00
|
|
|
assert(conn->state >= _AP_CONN_STATE_MIN &&
|
2003-09-16 21:36:19 +02:00
|
|
|
conn->state <= _AP_CONN_STATE_MAX);
|
2003-11-23 19:14:19 +01:00
|
|
|
assert(conn->socks_request);
|
2003-09-16 21:36:19 +02:00
|
|
|
break;
|
|
|
|
case CONN_TYPE_DIR:
|
|
|
|
assert(conn->state >= _DIR_CONN_STATE_MIN &&
|
|
|
|
conn->state <= _DIR_CONN_STATE_MAX);
|
|
|
|
break;
|
|
|
|
case CONN_TYPE_DNSWORKER:
|
|
|
|
assert(conn->state == DNSWORKER_STATE_IDLE ||
|
|
|
|
conn->state == DNSWORKER_STATE_BUSY);
|
|
|
|
case CONN_TYPE_CPUWORKER:
|
|
|
|
assert(conn->state >= _CPUWORKER_STATE_MIN &&
|
|
|
|
conn->state <= _CPUWORKER_STATE_MAX);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-04-07 04:12:02 +02:00
|
|
|
/*
|
|
|
|
Local Variables:
|
|
|
|
mode:c
|
|
|
|
indent-tabs-mode:nil
|
|
|
|
c-basic-offset:2
|
|
|
|
End:
|
|
|
|
*/
|
2003-09-13 00:45:31 +02:00
|
|
|
|