Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
/* Copyright 2001,2002 Roger Dingledine, Matej Pfajfar. */
|
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
/* $Id$ */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
#include "or.h"
|
|
|
|
|
|
|
|
/********* START VARIABLES **********/
|
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
or_options_t options; /* command-line and config-file options */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-07-05 08:27:23 +02:00
|
|
|
static connection_t *connection_array[MAXCONNECTIONS] =
|
2002-06-27 00:45:49 +02:00
|
|
|
{ NULL };
|
|
|
|
|
2002-09-03 20:44:24 +02:00
|
|
|
static struct pollfd poll_array[MAXCONNECTIONS];
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-07-05 08:27:23 +02:00
|
|
|
static int nfds=0; /* number of connections currently active */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-09-22 00:41:48 +02:00
|
|
|
static int please_dumpstats=0; /* whether we should dump stats during the loop */
|
2002-09-28 03:40:11 +02:00
|
|
|
static int please_fetch_directory=0; /* whether we should fetch a new directory */
|
2002-09-22 00:41:48 +02:00
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
/* private key */
|
2002-09-28 02:52:59 +02:00
|
|
|
static crypto_pk_env_t *privatekey;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-09-26 15:17:14 +02:00
|
|
|
routerinfo_t *my_routerinfo=NULL;
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
/********* END VARIABLES ************/
|
|
|
|
|
2002-09-28 02:52:59 +02:00
|
|
|
void setprivatekey(crypto_pk_env_t *k) {
|
|
|
|
privatekey = k;
|
|
|
|
}
|
|
|
|
|
|
|
|
crypto_pk_env_t *getprivatekey(void) {
|
|
|
|
assert(privatekey);
|
|
|
|
return privatekey;
|
|
|
|
}
|
|
|
|
|
2002-07-05 08:27:23 +02:00
|
|
|
/****************************************************************************
|
|
|
|
*
|
|
|
|
* This section contains accessors and other methods on the connection_array
|
|
|
|
* and poll_array variables (which are global within this file and unavailable
|
|
|
|
* outside it).
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
int connection_add(connection_t *conn) {
|
|
|
|
|
2002-09-03 20:36:40 +02:00
|
|
|
if(nfds >= options.MaxConn-1) {
|
2002-07-16 04:12:58 +02:00
|
|
|
log(LOG_INFO,"connection_add(): failing because nfds is too high.");
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
conn->poll_index = nfds;
|
|
|
|
connection_set_poll_socket(conn);
|
|
|
|
connection_array[nfds] = conn;
|
|
|
|
|
|
|
|
/* zero these out here, because otherwise we'll inherit values from the previously freed one */
|
|
|
|
poll_array[nfds].events = 0;
|
|
|
|
poll_array[nfds].revents = 0;
|
|
|
|
|
|
|
|
nfds++;
|
|
|
|
|
2002-07-16 04:12:58 +02:00
|
|
|
log(LOG_INFO,"connection_add(): new conn type %d, socket %d, nfds %d.",conn->type, conn->s, nfds);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void connection_set_poll_socket(connection_t *conn) {
|
|
|
|
poll_array[conn->poll_index].fd = conn->s;
|
|
|
|
}
|
|
|
|
|
|
|
|
int connection_remove(connection_t *conn) {
|
|
|
|
int current_index;
|
|
|
|
|
|
|
|
assert(conn);
|
|
|
|
assert(nfds>0);
|
|
|
|
|
2002-07-22 06:08:37 +02:00
|
|
|
log(LOG_INFO,"connection_remove(): removing socket %d, nfds now %d",conn->s, nfds-1);
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
circuit_about_to_close_connection(conn); /* if it's an edge conn, remove it from the list
|
|
|
|
* of conn's on this circuit. If it's not on an edge,
|
|
|
|
* flush and send destroys for all circuits on this conn
|
|
|
|
*/
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
current_index = conn->poll_index;
|
|
|
|
if(current_index == nfds-1) { /* this is the end */
|
|
|
|
nfds--;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* we replace this one with the one at the end, then free it */
|
|
|
|
nfds--;
|
|
|
|
poll_array[current_index].fd = poll_array[nfds].fd;
|
|
|
|
poll_array[current_index].events = poll_array[nfds].events;
|
|
|
|
poll_array[current_index].revents = poll_array[nfds].revents;
|
|
|
|
connection_array[current_index] = connection_array[nfds];
|
|
|
|
connection_array[current_index]->poll_index = current_index;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2002-07-08 10:59:15 +02:00
|
|
|
connection_t *connection_twin_get_by_addr_port(uint32_t addr, uint16_t port) {
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
/* Find a connection to the router described by addr and port,
|
|
|
|
* or alternately any router which knows its key.
|
|
|
|
* This connection *must* be in 'open' state.
|
|
|
|
* If not, return NULL.
|
|
|
|
*/
|
2002-07-08 10:59:15 +02:00
|
|
|
int i;
|
|
|
|
connection_t *conn;
|
2002-07-19 01:44:57 +02:00
|
|
|
routerinfo_t *router;
|
2002-07-08 10:59:15 +02:00
|
|
|
|
|
|
|
/* first check if it's there exactly */
|
|
|
|
conn = connection_exact_get_by_addr_port(addr,port);
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
if(conn && connection_state_is_open(conn)) {
|
2002-08-23 05:35:44 +02:00
|
|
|
log(LOG_INFO,"connection_twin_get_by_addr_port(): Found exact match.");
|
2002-07-08 10:59:15 +02:00
|
|
|
return conn;
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
}
|
2002-07-08 10:59:15 +02:00
|
|
|
|
|
|
|
/* now check if any of the other open connections are a twin for this one */
|
|
|
|
|
2002-07-19 01:44:57 +02:00
|
|
|
router = router_get_by_addr_port(addr,port);
|
|
|
|
if(!router)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for(i=0;i<nfds;i++) {
|
|
|
|
conn = connection_array[i];
|
|
|
|
assert(conn);
|
2002-09-24 12:43:57 +02:00
|
|
|
if(connection_state_is_open(conn) && !crypto_pk_cmp_keys(conn->pkey, router->pkey)) {
|
2002-08-23 05:35:44 +02:00
|
|
|
log(LOG_INFO,"connection_twin_get_by_addr_port(): Found twin (%s).",conn->address);
|
2002-07-19 01:44:57 +02:00
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
}
|
2002-07-08 10:59:15 +02:00
|
|
|
/* guess not */
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
connection_t *connection_exact_get_by_addr_port(uint32_t addr, uint16_t port) {
|
2002-06-27 00:45:49 +02:00
|
|
|
int i;
|
|
|
|
connection_t *conn;
|
|
|
|
|
|
|
|
for(i=0;i<nfds;i++) {
|
|
|
|
conn = connection_array[i];
|
|
|
|
assert(conn);
|
|
|
|
if(conn->addr == addr && conn->port == port)
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
connection_t *connection_get_by_type(int type) {
|
|
|
|
int i;
|
|
|
|
connection_t *conn;
|
|
|
|
|
|
|
|
for(i=0;i<nfds;i++) {
|
|
|
|
conn = connection_array[i];
|
|
|
|
if(conn->type == type)
|
|
|
|
return conn;
|
|
|
|
}
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
void connection_watch_events(connection_t *conn, short events) {
|
|
|
|
|
|
|
|
assert(conn && conn->poll_index < nfds);
|
|
|
|
|
|
|
|
poll_array[conn->poll_index].events = events;
|
|
|
|
}
|
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
void connection_stop_reading(connection_t *conn) {
|
|
|
|
|
|
|
|
assert(conn && conn->poll_index < nfds);
|
|
|
|
|
2002-07-18 08:37:58 +02:00
|
|
|
log(LOG_DEBUG,"connection_stop_reading() called.");
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
if(poll_array[conn->poll_index].events & POLLIN)
|
|
|
|
poll_array[conn->poll_index].events -= POLLIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
void connection_start_reading(connection_t *conn) {
|
|
|
|
|
|
|
|
assert(conn && conn->poll_index < nfds);
|
|
|
|
|
|
|
|
poll_array[conn->poll_index].events |= POLLIN;
|
|
|
|
}
|
|
|
|
|
2002-07-18 08:37:58 +02:00
|
|
|
void connection_stop_writing(connection_t *conn) {
|
|
|
|
|
|
|
|
assert(conn && conn->poll_index < nfds);
|
|
|
|
|
|
|
|
if(poll_array[conn->poll_index].events & POLLOUT)
|
|
|
|
poll_array[conn->poll_index].events -= POLLOUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
void connection_start_writing(connection_t *conn) {
|
|
|
|
|
|
|
|
assert(conn && conn->poll_index < nfds);
|
|
|
|
|
|
|
|
poll_array[conn->poll_index].events |= POLLOUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
void check_conn_read(int i) {
|
|
|
|
int retval;
|
|
|
|
connection_t *conn;
|
|
|
|
|
|
|
|
if(poll_array[i].revents & POLLIN) { /* something to read */
|
|
|
|
|
|
|
|
conn = connection_array[i];
|
|
|
|
assert(conn);
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
// log(LOG_DEBUG,"check_conn_read(): socket %d has something to read.",conn->s);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
if (conn->type == CONN_TYPE_OP_LISTENER) {
|
|
|
|
retval = connection_op_handle_listener_read(conn);
|
|
|
|
} else if (conn->type == CONN_TYPE_OR_LISTENER) {
|
|
|
|
retval = connection_or_handle_listener_read(conn);
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
} else if (conn->type == CONN_TYPE_AP_LISTENER) {
|
|
|
|
retval = connection_ap_handle_listener_read(conn);
|
2002-09-26 14:09:10 +02:00
|
|
|
} else if (conn->type == CONN_TYPE_DIR_LISTENER) {
|
|
|
|
retval = connection_dir_handle_listener_read(conn);
|
2002-06-27 00:45:49 +02:00
|
|
|
} else {
|
|
|
|
retval = connection_read_to_buf(conn);
|
2002-10-02 06:07:33 +02:00
|
|
|
if (retval < 0 && conn->type == CONN_TYPE_DIR && conn->state == DIR_CONN_STATE_CONNECTING) {
|
|
|
|
/* it's a directory server and connecting failed: forget about this router */
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
router_forget_router(conn->addr,conn->port); /* FIXME i don't think this function works. */
|
2002-09-26 15:17:14 +02:00
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
if (retval >= 0) { /* all still well */
|
|
|
|
retval = connection_process_inbuf(conn);
|
2002-09-26 14:09:10 +02:00
|
|
|
// log(LOG_DEBUG,"check_conn_read(): connection_process_inbuf returned %d.",retval);
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
if(retval >= 0 && !connection_state_is_open(conn) && conn->receiver_bucket == 0) {
|
|
|
|
log(LOG_DEBUG,"check_conn_read(): receiver bucket reached 0 before handshake finished. Closing.");
|
|
|
|
retval = -1;
|
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
}
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
if(retval < 0) { /* this connection is broken. remove it */
|
2002-07-22 06:08:37 +02:00
|
|
|
log(LOG_INFO,"check_conn_read(): Connection broken, removing.");
|
2002-06-27 00:45:49 +02:00
|
|
|
connection_remove(conn);
|
|
|
|
connection_free(conn);
|
|
|
|
if(i<nfds) { /* we just replaced the one at i with a new one.
|
|
|
|
process it too. */
|
|
|
|
check_conn_read(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void check_conn_write(int i) {
|
|
|
|
int retval;
|
|
|
|
connection_t *conn;
|
|
|
|
|
|
|
|
if(poll_array[i].revents & POLLOUT) { /* something to write */
|
|
|
|
|
|
|
|
conn = connection_array[i];
|
2002-07-18 08:37:58 +02:00
|
|
|
// log(LOG_DEBUG,"check_conn_write(): socket %d wants to write.",conn->s);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-09-26 14:09:10 +02:00
|
|
|
if(connection_is_listener(conn)) {
|
2002-06-27 00:45:49 +02:00
|
|
|
log(LOG_DEBUG,"check_conn_write(): Got a listener socket. Can't happen!");
|
|
|
|
retval = -1;
|
|
|
|
} else {
|
2002-06-30 09:37:49 +02:00
|
|
|
/* else it's an OP, OR, or exit */
|
2002-06-27 00:45:49 +02:00
|
|
|
retval = connection_flush_buf(conn); /* conns in CONNECTING state will fall through... */
|
|
|
|
if(retval == 0) { /* it's done flushing */
|
|
|
|
retval = connection_finished_flushing(conn); /* ...and get handled here. */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if(retval < 0) { /* this connection is broken. remove it. */
|
|
|
|
log(LOG_DEBUG,"check_conn_write(): Connection broken, removing.");
|
|
|
|
connection_remove(conn);
|
|
|
|
connection_free(conn);
|
|
|
|
if(i<nfds) { /* we just replaced the one at i with a new one.
|
|
|
|
process it too. */
|
2002-07-18 08:37:58 +02:00
|
|
|
check_conn_write(i);
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void check_conn_marked(int i) {
|
|
|
|
connection_t *conn;
|
|
|
|
|
|
|
|
conn = connection_array[i];
|
|
|
|
assert(conn);
|
|
|
|
if(conn->marked_for_close) {
|
|
|
|
log(LOG_DEBUG,"check_conn_marked(): Cleaning up connection.");
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
if(conn->s >= 0) { /* might be an incomplete exit connection */
|
|
|
|
/* FIXME there's got to be a better way to check for this -- and make other checks? */
|
|
|
|
connection_flush_buf(conn); /* flush it first */
|
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
connection_remove(conn);
|
|
|
|
connection_free(conn);
|
|
|
|
if(i<nfds) { /* we just replaced the one at i with a new one.
|
|
|
|
process it too. */
|
2002-07-05 08:27:23 +02:00
|
|
|
check_conn_marked(i);
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
int prepare_for_poll(int *timeout) {
|
|
|
|
int i;
|
2003-03-06 05:52:02 +01:00
|
|
|
// connection_t *conn = NULL;
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
connection_t *tmpconn;
|
2003-03-06 05:52:02 +01:00
|
|
|
struct timeval now; //soonest;
|
2002-10-02 01:37:31 +02:00
|
|
|
static long current_second = 0; /* from previous calls to gettimeofday */
|
|
|
|
static long time_to_fetch_directory = 0;
|
2003-04-16 08:18:31 +02:00
|
|
|
static long time_to_new_circuit = 0;
|
2003-03-06 05:52:02 +01:00
|
|
|
// int ms_until_conn;
|
2002-10-02 01:37:31 +02:00
|
|
|
cell_t cell;
|
2003-04-16 08:18:31 +02:00
|
|
|
circuit_t *circ;
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
|
2003-04-16 19:04:58 +02:00
|
|
|
my_gettimeofday(&now);
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
|
2003-03-06 05:52:02 +01:00
|
|
|
if(now.tv_sec > current_second) { /* the second has rolled over. check more stuff. */
|
|
|
|
|
2003-03-18 02:49:55 +01:00
|
|
|
if(!options.DirPort) {
|
2003-03-06 05:52:02 +01:00
|
|
|
if(time_to_fetch_directory < now.tv_sec) {
|
|
|
|
/* it's time to fetch a new directory */
|
|
|
|
/* NOTE directory servers do not currently fetch directories.
|
|
|
|
* Hope this doesn't bite us later.
|
|
|
|
*/
|
|
|
|
directory_initiate_fetch(router_pick_directory_server());
|
|
|
|
time_to_fetch_directory = now.tv_sec + options.DirFetchPeriod;
|
2002-09-26 14:09:10 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-04-16 08:18:31 +02:00
|
|
|
if(options.APPort && time_to_new_circuit < now.tv_sec) {
|
|
|
|
circuit_expire_unused_circuits();
|
|
|
|
circuit_launch_new(-1); /* tell it to forget about previous failures */
|
2003-05-02 00:55:51 +02:00
|
|
|
circ = circuit_get_newest_ap();
|
2003-04-16 08:18:31 +02:00
|
|
|
if(!circ || circ->dirty) {
|
2003-04-18 20:47:49 +02:00
|
|
|
log(LOG_INFO,"prepare_for_poll(): Youngest circuit %s; launching replacement.", circ ? "dirty" : "missing");
|
2003-04-16 08:18:31 +02:00
|
|
|
circuit_launch_new(0); /* make an onion and lay the circuit */
|
|
|
|
}
|
|
|
|
time_to_new_circuit = now.tv_sec + options.NewCircuitPeriod;
|
|
|
|
}
|
|
|
|
|
2003-03-06 05:52:02 +01:00
|
|
|
/* do housekeeping for each connection */
|
|
|
|
for(i=0;i<nfds;i++) {
|
|
|
|
tmpconn = connection_array[i];
|
|
|
|
connection_increment_receiver_bucket(tmpconn);
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2003-03-06 05:52:02 +01:00
|
|
|
/* check connections to see whether we should send a keepalive, expire, or wait */
|
|
|
|
if(!connection_speaks_cells(tmpconn))
|
|
|
|
continue; /* this conn type doesn't send cells */
|
|
|
|
if(now.tv_sec >= tmpconn->timestamp_lastwritten + options.KeepalivePeriod) {
|
2003-03-18 02:49:55 +01:00
|
|
|
if((!options.ORPort && !circuit_get_by_conn(tmpconn)) ||
|
2003-03-06 05:52:02 +01:00
|
|
|
(!connection_state_is_open(tmpconn))) {
|
|
|
|
/* we're an onion proxy, with no circuits; or our handshake has expired. kill it. */
|
|
|
|
log(LOG_DEBUG,"prepare_for_poll(): Expiring connection to %d (%s:%d).",
|
|
|
|
i,tmpconn->address, tmpconn->port);
|
2002-10-13 15:17:27 +02:00
|
|
|
tmpconn->marked_for_close = 1;
|
2003-03-06 05:52:02 +01:00
|
|
|
} else {
|
|
|
|
/* either a full router, or we've got a circuit. send a padding cell. */
|
|
|
|
// log(LOG_DEBUG,"prepare_for_poll(): Sending keepalive to (%s:%d)",
|
|
|
|
// tmpconn->address, tmpconn->port);
|
2003-03-11 22:38:38 +01:00
|
|
|
memset(&cell,0,sizeof(cell_t));
|
2003-03-06 05:52:02 +01:00
|
|
|
cell.command = CELL_PADDING;
|
|
|
|
if(connection_write_cell_to_buf(&cell, tmpconn) < 0)
|
|
|
|
tmpconn->marked_for_close = 1;
|
|
|
|
}
|
2002-10-02 01:37:31 +02:00
|
|
|
}
|
|
|
|
}
|
2003-03-06 05:52:02 +01:00
|
|
|
/* blow away any connections that need to die. can't do this later
|
|
|
|
* because we might open up a circuit and not realize it we're about to cull it.
|
|
|
|
*/
|
|
|
|
for(i=0;i<nfds;i++)
|
|
|
|
check_conn_marked(i);
|
2002-10-02 01:37:31 +02:00
|
|
|
|
2002-12-31 16:04:14 +01:00
|
|
|
current_second = now.tv_sec; /* remember which second it is, for next time */
|
2002-10-02 01:37:31 +02:00
|
|
|
}
|
|
|
|
|
2003-03-06 05:52:02 +01:00
|
|
|
if(onion_pending_check()) {
|
|
|
|
/* there's an onion pending. check for new things to do, but don't wait any time */
|
|
|
|
*timeout = 0;
|
|
|
|
} else {
|
|
|
|
*timeout = 1000 - (now.tv_usec / 1000); /* how many milliseconds til the next second? */
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
|
2003-03-06 05:52:02 +01:00
|
|
|
|
|
|
|
/* Link padding stuff left here for fun. Not used now. */
|
|
|
|
#if 0
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
if(options.LinkPadding) {
|
|
|
|
/* now check which conn wants to speak soonest */
|
|
|
|
for(i=0;i<nfds;i++) {
|
|
|
|
tmpconn = connection_array[i];
|
2002-07-18 08:37:58 +02:00
|
|
|
if(!connection_speaks_cells(tmpconn))
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
continue; /* this conn type doesn't send cells */
|
|
|
|
if(!connection_state_is_open(tmpconn))
|
|
|
|
continue; /* only conns in state 'open' have a valid send_timeval */
|
|
|
|
while(tv_cmp(&tmpconn->send_timeval,&now) <= 0) { /* send_timeval has already passed, let it send a cell */
|
|
|
|
connection_send_cell(tmpconn);
|
|
|
|
}
|
|
|
|
if(!conn || tv_cmp(&tmpconn->send_timeval, &soonest) < 0) { /* this is the best choice so far */
|
|
|
|
conn = tmpconn;
|
|
|
|
soonest.tv_sec = conn->send_timeval.tv_sec;
|
|
|
|
soonest.tv_usec = conn->send_timeval.tv_usec;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if(conn) { /* we might want to set *timeout sooner */
|
|
|
|
ms_until_conn = (soonest.tv_sec - now.tv_sec)*1000 +
|
|
|
|
(soonest.tv_usec - now.tv_usec)/1000;
|
|
|
|
// log(LOG_DEBUG,"prepare_for_poll(): conn %d times out in %d ms.",conn->s, ms_until_conn);
|
2002-10-02 01:37:31 +02:00
|
|
|
if(ms_until_conn < *timeout) { /* use the new one */
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
// log(LOG_DEBUG,"prepare_for_poll(): conn %d soonest, in %d ms.",conn->s,ms_until_conn);
|
|
|
|
*timeout = ms_until_conn;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2003-03-06 05:52:02 +01:00
|
|
|
#endif
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
int do_main_loop(void) {
|
|
|
|
int i;
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
int timeout;
|
|
|
|
int poll_result;
|
2002-09-28 02:52:59 +02:00
|
|
|
crypto_pk_env_t *prkey;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* load the routers file */
|
2002-10-03 00:54:20 +02:00
|
|
|
if(router_get_list_from_file(options.RouterFile) < 0) {
|
2002-06-27 00:45:49 +02:00
|
|
|
log(LOG_ERR,"Error loading router list.");
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
return -1;
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2002-09-04 08:29:28 +02:00
|
|
|
/* load the private key, if we're supposed to have one */
|
2003-03-18 02:49:55 +01:00
|
|
|
if(options.ORPort) {
|
2002-09-04 08:29:28 +02:00
|
|
|
prkey = crypto_new_pk_env(CRYPTO_PK_RSA);
|
|
|
|
if (!prkey) {
|
|
|
|
log(LOG_ERR,"Error creating a crypto environment.");
|
|
|
|
return -1;
|
|
|
|
}
|
2002-09-24 12:43:57 +02:00
|
|
|
if (crypto_pk_read_private_key_from_filename(prkey, options.PrivateKeyFile))
|
2002-09-04 08:29:28 +02:00
|
|
|
{
|
|
|
|
log(LOG_ERR,"Error loading private key.");
|
|
|
|
return -1;
|
|
|
|
}
|
2002-09-28 02:52:59 +02:00
|
|
|
setprivatekey(prkey);
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2003-03-18 02:49:55 +01:00
|
|
|
/* start up the necessary connections based on which ports are
|
|
|
|
* non-zero. This is where we try to connect to all the other ORs,
|
|
|
|
* and start the listeners
|
|
|
|
*/
|
|
|
|
retry_all_connections(options.ORPort,
|
2002-09-26 14:09:10 +02:00
|
|
|
options.OPPort, options.APPort, options.DirPort);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
for(;;) {
|
2002-09-22 00:41:48 +02:00
|
|
|
if(please_dumpstats) {
|
|
|
|
dumpstats();
|
2002-09-28 03:40:11 +02:00
|
|
|
please_dumpstats = 0;
|
|
|
|
}
|
|
|
|
if(please_fetch_directory) {
|
2003-03-18 02:49:55 +01:00
|
|
|
if(options.DirPort) {
|
2002-10-03 00:54:20 +02:00
|
|
|
if(router_get_list_from_file(options.RouterFile) < 0) {
|
2002-09-28 03:40:11 +02:00
|
|
|
log(LOG_ERR,"Error reloading router list. Continuing with old list.");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
directory_initiate_fetch(router_pick_directory_server());
|
|
|
|
}
|
|
|
|
please_fetch_directory = 0;
|
2002-09-22 00:41:48 +02:00
|
|
|
}
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
if(prepare_for_poll(&timeout) < 0) {
|
|
|
|
log(LOG_DEBUG,"do_main_loop(): prepare_for_poll failed, exiting.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
/* now timeout is the value we'll hand to poll. It's either -1, meaning
|
|
|
|
* don't timeout, else it indicates the soonest event (either the
|
|
|
|
* one-second rollover for refilling receiver buckets, or the soonest
|
|
|
|
* conn that needs to send a cell)
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* poll until we have an event, or it's time to do something */
|
|
|
|
poll_result = poll(poll_array, nfds, timeout);
|
|
|
|
|
2002-07-16 20:24:12 +02:00
|
|
|
#if 0 /* let catch() handle things like ^c, and otherwise don't worry about it */
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
if(poll_result < 0) {
|
|
|
|
log(LOG_ERR,"do_main_loop(): poll failed.");
|
|
|
|
if(errno != EINTR) /* let the program survive things like ^z */
|
|
|
|
return -1;
|
|
|
|
}
|
2002-07-16 20:24:12 +02:00
|
|
|
#endif
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-11-27 05:08:20 +01:00
|
|
|
if(poll_result == 0) {
|
|
|
|
/* poll timed out without anything to do. process a pending onion, if any. */
|
|
|
|
onion_pending_process_one();
|
|
|
|
}
|
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
if(poll_result > 0) { /* we have at least one connection to deal with */
|
|
|
|
/* do all the reads first, so we can detect closed sockets */
|
|
|
|
for(i=0;i<nfds;i++)
|
|
|
|
check_conn_read(i); /* this also blows away broken connections */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
/* then do the writes */
|
|
|
|
for(i=0;i<nfds;i++)
|
|
|
|
check_conn_write(i);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
/* any of the conns need to be closed now? */
|
|
|
|
for(i=0;i<nfds;i++)
|
|
|
|
check_conn_marked(i);
|
|
|
|
}
|
|
|
|
/* refilling buckets and sending cells happens at the beginning of the
|
|
|
|
* next iteration of the loop, inside prepare_for_poll()
|
|
|
|
*/
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-09-28 03:40:11 +02:00
|
|
|
static void catch(int the_signal) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-09-28 03:40:11 +02:00
|
|
|
switch(the_signal) {
|
2002-11-24 09:45:54 +01:00
|
|
|
// case SIGABRT:
|
2002-09-28 03:40:11 +02:00
|
|
|
case SIGTERM:
|
|
|
|
case SIGINT:
|
|
|
|
log(LOG_NOTICE,"Catching signal %d, exiting cleanly.", the_signal);
|
|
|
|
exit(0);
|
|
|
|
case SIGHUP:
|
|
|
|
please_fetch_directory = 1;
|
|
|
|
break;
|
|
|
|
case SIGUSR1:
|
|
|
|
please_dumpstats = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
log(LOG_ERR,"Caught signal that we can't handle??");
|
|
|
|
}
|
2002-09-22 00:41:48 +02:00
|
|
|
}
|
|
|
|
|
2003-04-16 08:18:31 +02:00
|
|
|
void dumpstats(void) { /* dump stats to stdout */
|
2002-09-22 00:41:48 +02:00
|
|
|
int i;
|
|
|
|
connection_t *conn;
|
2002-10-02 01:37:31 +02:00
|
|
|
struct timeval now;
|
2002-09-22 00:41:48 +02:00
|
|
|
extern char *conn_type_to_string[];
|
2002-09-26 14:09:10 +02:00
|
|
|
extern char *conn_state_to_string[][15];
|
2002-09-22 00:41:48 +02:00
|
|
|
|
|
|
|
printf("Dumping stats:\n");
|
2003-04-16 19:04:58 +02:00
|
|
|
my_gettimeofday(&now);
|
2002-09-22 00:41:48 +02:00
|
|
|
|
|
|
|
for(i=0;i<nfds;i++) {
|
|
|
|
conn = connection_array[i];
|
2002-10-02 01:37:31 +02:00
|
|
|
printf("Conn %d (socket %d) type %d (%s), state %d (%s), created %ld secs ago\n",
|
2002-09-22 00:41:48 +02:00
|
|
|
i, conn->s, conn->type, conn_type_to_string[conn->type],
|
2002-10-02 01:37:31 +02:00
|
|
|
conn->state, conn_state_to_string[conn->type][conn->state], now.tv_sec - conn->timestamp_created);
|
2002-09-22 00:41:48 +02:00
|
|
|
if(!connection_is_listener(conn)) {
|
|
|
|
printf("Conn %d is to '%s:%d'.\n",i,conn->address, conn->port);
|
2002-10-02 01:37:31 +02:00
|
|
|
printf("Conn %d: %d bytes waiting on inbuf (last read %ld secs ago)\n",i,conn->inbuf_datalen,
|
|
|
|
now.tv_sec - conn->timestamp_lastread);
|
|
|
|
printf("Conn %d: %d bytes waiting on outbuf (last written %ld secs ago)\n",i,conn->outbuf_datalen,
|
|
|
|
now.tv_sec - conn->timestamp_lastwritten);
|
2002-09-22 00:41:48 +02:00
|
|
|
}
|
|
|
|
circuit_dump_by_conn(conn); /* dump info about all the circuits using this conn */
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2002-09-26 15:17:14 +02:00
|
|
|
int dump_router_to_string(char *s, int maxlen, routerinfo_t *router) {
|
2002-09-26 14:09:10 +02:00
|
|
|
char *pkey;
|
2003-05-07 04:13:23 +02:00
|
|
|
char *signing_pkey, *signing_pkey_tag;
|
|
|
|
int pkeylen, signing_pkeylen;
|
2002-09-26 14:09:10 +02:00
|
|
|
int written;
|
2003-04-08 08:44:38 +02:00
|
|
|
int result=0;
|
|
|
|
struct exit_policy_t *tmpe;
|
2002-09-26 15:17:14 +02:00
|
|
|
|
|
|
|
if(crypto_pk_write_public_key_to_string(router->pkey,&pkey,&pkeylen)<0) {
|
2003-03-11 02:51:41 +01:00
|
|
|
log(LOG_ERR,"dump_router_to_string(): write pkey to string failed!");
|
2002-09-26 15:17:14 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2003-04-08 08:44:38 +02:00
|
|
|
|
2003-05-07 04:13:23 +02:00
|
|
|
signing_pkey = "";
|
|
|
|
signing_pkey_tag = "";
|
|
|
|
if (router->signing_pkey) {
|
|
|
|
if(crypto_pk_write_public_key_to_string(router->signing_pkey,
|
|
|
|
&signing_pkey,&signing_pkeylen)<0) {
|
|
|
|
log(LOG_ERR,"dump_router_to_string(): write signing_pkey to string failed!");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
signing_pkey_tag = "signing-key\n";
|
|
|
|
}
|
|
|
|
|
|
|
|
result = snprintf(s, maxlen, "router %s %d %d %d %d %d\n%s%s%s",
|
2002-09-26 15:17:14 +02:00
|
|
|
router->address,
|
|
|
|
router->or_port,
|
|
|
|
router->op_port,
|
|
|
|
router->ap_port,
|
|
|
|
router->dir_port,
|
|
|
|
router->bandwidth,
|
2003-05-07 04:13:23 +02:00
|
|
|
pkey,
|
|
|
|
signing_pkey_tag, signing_pkey);
|
2002-09-26 15:17:14 +02:00
|
|
|
|
|
|
|
free(pkey);
|
2003-05-07 04:13:23 +02:00
|
|
|
if (*signing_pkey)
|
|
|
|
free(signing_pkey);
|
2002-09-26 15:17:14 +02:00
|
|
|
|
2003-04-08 08:44:38 +02:00
|
|
|
if(result < 0 || result > maxlen) {
|
|
|
|
/* apparently different glibcs do different things on snprintf error.. so check both */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
written = result;
|
|
|
|
|
|
|
|
for(tmpe=router->exit_policy; tmpe; tmpe=tmpe->next) {
|
|
|
|
result = snprintf(s+written, maxlen-written, "%s %s:%s\n",
|
|
|
|
tmpe->policy_type == EXIT_POLICY_ACCEPT ? "accept" : "reject",
|
|
|
|
tmpe->address, tmpe->port);
|
|
|
|
if(result < 0 || result+written > maxlen) {
|
|
|
|
/* apparently different glibcs do different things on snprintf error.. so check both */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
written += result;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(written > maxlen-2) {
|
|
|
|
return -1; /* not enough space for \n\0 */
|
|
|
|
}
|
|
|
|
/* XXX count fenceposts here. They're probably wrong. In general,
|
|
|
|
* we need a better way to handle overruns in building the directory
|
|
|
|
* string, and a better way to handle directory string size in general. */
|
|
|
|
|
|
|
|
/* include a last '\n' */
|
|
|
|
s[written] = '\n';
|
|
|
|
s[written+1] = 0;
|
|
|
|
return written+1;
|
|
|
|
|
2002-09-26 15:17:14 +02:00
|
|
|
}
|
|
|
|
|
2003-05-07 04:13:23 +02:00
|
|
|
void dump_directory_to_string(char *s, int maxlen)
|
|
|
|
{
|
|
|
|
directory_t dir;
|
2003-05-07 05:32:18 +02:00
|
|
|
routerinfo_t **routers = NULL;
|
2002-09-26 15:17:14 +02:00
|
|
|
connection_t *conn;
|
2002-09-26 14:09:10 +02:00
|
|
|
routerinfo_t *router;
|
2003-05-07 04:13:23 +02:00
|
|
|
int i, n = 0;
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2003-05-07 04:13:23 +02:00
|
|
|
routers = (routerinfo_t**) malloc(sizeof(routerinfo_t*) * (nfds+1));
|
|
|
|
if (!routers) {
|
|
|
|
/* freak out XXX */
|
|
|
|
return;
|
2002-10-13 15:17:27 +02:00
|
|
|
}
|
2003-05-07 04:13:23 +02:00
|
|
|
if (my_routerinfo) {
|
|
|
|
routers[n++] = my_routerinfo;
|
|
|
|
}
|
|
|
|
for(i = 0; i<nfds; ++i) {
|
2002-09-26 14:09:10 +02:00
|
|
|
conn = connection_array[i];
|
|
|
|
|
|
|
|
if(conn->type != CONN_TYPE_OR)
|
|
|
|
continue; /* we only want to list ORs */
|
2002-10-13 15:17:27 +02:00
|
|
|
if(conn->state != OR_CONN_STATE_OPEN)
|
|
|
|
continue; /* we only want to list ones that successfully handshaked */
|
2002-09-26 14:09:10 +02:00
|
|
|
router = router_get_by_addr_port(conn->addr,conn->port);
|
|
|
|
if(!router) {
|
|
|
|
log(LOG_ERR,"dump_directory_to_string(): couldn't find router %d:%d!",conn->addr,conn->port);
|
2002-10-13 15:17:27 +02:00
|
|
|
continue;
|
2002-09-26 14:09:10 +02:00
|
|
|
}
|
2003-05-07 04:13:23 +02:00
|
|
|
routers[n++] = router;
|
|
|
|
}
|
|
|
|
dir.routers = routers;
|
|
|
|
dir.n_routers = n;
|
|
|
|
|
|
|
|
dump_directory_to_string_impl(s, maxlen, &dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
dump_signed_directory_to_string_impl(char *s, int maxlen, directory_t *dir,
|
|
|
|
crypto_pk_env_t *private_key)
|
|
|
|
{
|
|
|
|
char *cp;
|
|
|
|
char digest[20];
|
|
|
|
char signature[128];
|
|
|
|
int i;
|
|
|
|
strncpy(s,
|
|
|
|
"signed-directory\n"
|
|
|
|
"client-software x y z\n" /* XXX make this real */
|
|
|
|
"server-software a b c\n\n" /* XXX make this real */
|
|
|
|
, maxlen);
|
|
|
|
/* These multiple strlen calls are inefficient, but dwarfed by the RSA
|
|
|
|
signature.
|
|
|
|
*/
|
|
|
|
i = strlen(s);
|
|
|
|
|
|
|
|
dump_directory_to_string_impl(s+i, maxlen-i, dir);
|
|
|
|
i = strlen(s);
|
|
|
|
cp = s + i;
|
|
|
|
|
|
|
|
if (crypto_SHA_digest(s, i, digest))
|
|
|
|
return -1;
|
|
|
|
if (crypto_pk_private_sign(private_key, digest, 20, signature))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
|
|
|
|
strncpy(cp,
|
|
|
|
"directory-signature\n-----BEGIN SIGNATURE-----\n", maxlen-i);
|
|
|
|
|
|
|
|
i = strlen(s);
|
|
|
|
cp = s+i;
|
|
|
|
if (base64_encode(cp, maxlen-i, signature, 128) < 0)
|
|
|
|
return -1;
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2003-05-07 04:13:23 +02:00
|
|
|
i = strlen(s);
|
|
|
|
cp = s+i;
|
|
|
|
strcat(cp, "-----END SIGNATURE-----\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void dump_directory_to_string_impl(char *s, int maxlen, directory_t *directory) {
|
|
|
|
int i;
|
|
|
|
routerinfo_t *router;
|
|
|
|
int written;
|
|
|
|
|
|
|
|
for (i = 0; i < directory->n_routers; ++i) {
|
|
|
|
router = directory->routers[i];
|
2002-09-26 15:17:14 +02:00
|
|
|
written = dump_router_to_string(s, maxlen, router);
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2003-04-08 08:44:38 +02:00
|
|
|
if(written < 0) {
|
2002-09-26 14:09:10 +02:00
|
|
|
log(LOG_ERR,"dump_directory_to_string(): tried to exceed string length.");
|
|
|
|
s[maxlen-1] = 0; /* make sure it's null terminated */
|
|
|
|
return;
|
|
|
|
}
|
2003-04-08 08:44:38 +02:00
|
|
|
|
2002-09-26 14:09:10 +02:00
|
|
|
maxlen -= written;
|
|
|
|
s += written;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-04-16 08:18:31 +02:00
|
|
|
void daemonize(void) {
|
2003-03-17 03:41:36 +01:00
|
|
|
/* Fork; parent exits. */
|
|
|
|
if (fork())
|
|
|
|
exit(0);
|
|
|
|
|
|
|
|
/* Create new session; make sure we never get a terminal */
|
|
|
|
setsid();
|
|
|
|
if (fork())
|
|
|
|
exit(0);
|
|
|
|
|
|
|
|
chdir("/");
|
|
|
|
umask(000);
|
|
|
|
|
|
|
|
fclose(stdin);
|
|
|
|
fclose(stdout);
|
|
|
|
fclose(stderr);
|
|
|
|
}
|
|
|
|
|
2003-04-07 04:12:02 +02:00
|
|
|
int tor_main(int argc, char *argv[]) {
|
2002-06-27 00:45:49 +02:00
|
|
|
int retval = 0;
|
|
|
|
|
2002-11-23 07:49:01 +01:00
|
|
|
if(getconfig(argc,argv,&options))
|
|
|
|
exit(1);
|
2002-07-12 20:14:17 +02:00
|
|
|
log(options.loglevel,NULL); /* assign logging severity level from options */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-03-17 03:41:36 +01:00
|
|
|
if (options.Daemon)
|
|
|
|
daemonize();
|
|
|
|
|
2003-03-18 02:49:55 +01:00
|
|
|
if(options.ORPort) { /* only spawn dns handlers if we're a router */
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
if(dns_master_start() < 0) {
|
|
|
|
log(LOG_ERR,"main(): We're running without a dns handler. Bad news.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-03-04 05:36:37 +01:00
|
|
|
init_cache_tree(); /* initialize the dns resolve tree */
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
|
|
|
|
signal (SIGINT, catch); /* catch kills so we can exit cleanly */
|
|
|
|
signal (SIGTERM, catch);
|
|
|
|
signal (SIGUSR1, catch); /* to dump stats to stdout */
|
|
|
|
signal (SIGHUP, catch); /* to reload directory */
|
|
|
|
|
|
|
|
crypto_global_init();
|
2002-06-27 00:45:49 +02:00
|
|
|
retval = do_main_loop();
|
2002-08-22 09:30:03 +02:00
|
|
|
crypto_global_cleanup();
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2003-04-07 04:12:02 +02:00
|
|
|
/*
|
|
|
|
Local Variables:
|
|
|
|
mode:c
|
|
|
|
indent-tabs-mode:nil
|
|
|
|
c-basic-offset:2
|
|
|
|
End:
|
|
|
|
*/
|