2003-10-08 04:04:08 +02:00
|
|
|
/* Copyright 2001,2002,2003 Roger Dingledine, Matej Pfajfar. */
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
/* $Id$ */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/**
|
|
|
|
* \file main.c
|
|
|
|
* \brief Tor main loop and startup functions.
|
|
|
|
**/
|
2004-05-05 23:32:43 +02:00
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
#include "or.h"
|
|
|
|
|
2004-02-29 02:31:33 +01:00
|
|
|
/********* PROTOTYPES **********/
|
2003-07-05 07:46:06 +02:00
|
|
|
|
2003-10-15 20:28:32 +02:00
|
|
|
static void dumpstats(int severity); /* log stats */
|
2003-11-13 07:49:25 +01:00
|
|
|
static int init_from_config(int argc, char **argv);
|
2003-07-05 07:46:06 +02:00
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
/********* START VARIABLES **********/
|
|
|
|
|
2004-05-10 06:34:48 +02:00
|
|
|
/* declared in connection.c */
|
2003-09-25 12:42:07 +02:00
|
|
|
extern char *conn_state_to_string[][_CONN_TYPE_MAX+1];
|
2003-08-11 22:22:48 +02:00
|
|
|
|
2004-05-10 12:27:54 +02:00
|
|
|
or_options_t options; /**< Command-line and config-file options. */
|
|
|
|
int global_read_bucket; /**< Max number of bytes I can read this second. */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** What was the read bucket before the last call to prepare_for_pool?
|
2004-05-05 23:32:43 +02:00
|
|
|
* (used to determine how many bytes we've read). */
|
2003-10-02 22:00:38 +02:00
|
|
|
static int stats_prev_global_read_bucket;
|
2004-05-09 18:47:25 +02:00
|
|
|
/** How many bytes have we read since we started the process? */
|
2003-10-02 22:00:38 +02:00
|
|
|
static uint64_t stats_n_bytes_read = 0;
|
2004-05-09 18:47:25 +02:00
|
|
|
/** How many seconds have we been running? */
|
2003-10-02 22:00:38 +02:00
|
|
|
static long stats_n_seconds_reading = 0;
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Array of all open connections; each element corresponds to the element of
|
2004-05-05 23:32:43 +02:00
|
|
|
* poll_array in the same position. The first nfds elements are valid. */
|
2002-07-05 08:27:23 +02:00
|
|
|
static connection_t *connection_array[MAXCONNECTIONS] =
|
2002-06-27 00:45:49 +02:00
|
|
|
{ NULL };
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Array of pollfd objects for calls to poll(). */
|
2002-09-03 20:44:24 +02:00
|
|
|
static struct pollfd poll_array[MAXCONNECTIONS];
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2004-05-10 12:27:54 +02:00
|
|
|
static int nfds=0; /**< Number of connections currently active. */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-08-12 10:04:31 +02:00
|
|
|
#ifndef MS_WINDOWS /* do signal stuff only on unix */
|
2004-05-10 12:27:54 +02:00
|
|
|
static int please_dumpstats=0; /**< Whether we should dump stats during the loop. */
|
|
|
|
static int please_reset=0; /**< Whether we just got a sighup. */
|
|
|
|
static int please_reap_children=0; /**< Whether we should waitpid for exited children. */
|
2003-08-12 10:04:31 +02:00
|
|
|
#endif /* signal stuff */
|
2002-09-22 00:41:48 +02:00
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** We set this to 1 when we've fetched a dir, to know whether to complain
|
2004-03-27 01:15:09 +01:00
|
|
|
* yet about unrecognized nicknames in entrynodes, exitnodes, etc.
|
|
|
|
* Also, we don't try building circuits unless this is 1. */
|
2004-05-05 23:32:43 +02:00
|
|
|
int has_fetched_directory=0;
|
2004-02-29 02:31:33 +01:00
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** We set this to 1 when we've opened a circuit, so we can print a log
|
2004-02-29 02:31:33 +01:00
|
|
|
* entry to inform the user that Tor is working. */
|
2004-05-05 23:32:43 +02:00
|
|
|
int has_completed_circuit=0;
|
2004-02-29 02:31:33 +01:00
|
|
|
|
2004-06-12 23:43:02 +02:00
|
|
|
#ifdef MS_WINDOWS
|
|
|
|
SERVICE_STATUS service_status;
|
|
|
|
SERVICE_STATUS_HANDLE hStatus;
|
|
|
|
#endif
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
/********* END VARIABLES ************/
|
|
|
|
|
2002-07-05 08:27:23 +02:00
|
|
|
/****************************************************************************
|
|
|
|
*
|
|
|
|
* This section contains accessors and other methods on the connection_array
|
|
|
|
* and poll_array variables (which are global within this file and unavailable
|
|
|
|
* outside it).
|
|
|
|
*
|
|
|
|
****************************************************************************/
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Add <b>conn</b> to the array of connections that we can poll on. The
|
2004-05-05 23:32:43 +02:00
|
|
|
* connection's socket must be set; the connection starts out
|
|
|
|
* non-reading and non-writing.
|
|
|
|
*/
|
2002-06-27 00:45:49 +02:00
|
|
|
int connection_add(connection_t *conn) {
|
2004-04-25 22:37:37 +02:00
|
|
|
tor_assert(conn);
|
2004-05-05 03:26:57 +02:00
|
|
|
tor_assert(conn->s >= 0);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-09-03 20:36:40 +02:00
|
|
|
if(nfds >= options.MaxConn-1) {
|
2004-03-18 20:22:56 +01:00
|
|
|
log_fn(LOG_WARN,"failing because nfds is too high.");
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2003-12-05 10:51:49 +01:00
|
|
|
|
2004-05-06 13:08:04 +02:00
|
|
|
tor_assert(conn->poll_index == -1); /* can only connection_add once */
|
2002-06-27 00:45:49 +02:00
|
|
|
conn->poll_index = nfds;
|
|
|
|
connection_array[nfds] = conn;
|
|
|
|
|
2004-05-05 03:26:57 +02:00
|
|
|
poll_array[nfds].fd = conn->s;
|
2004-05-06 13:08:04 +02:00
|
|
|
|
|
|
|
/* zero these out here, because otherwise we'll inherit values from the previously freed one */
|
2002-06-27 00:45:49 +02:00
|
|
|
poll_array[nfds].events = 0;
|
|
|
|
poll_array[nfds].revents = 0;
|
|
|
|
|
|
|
|
nfds++;
|
|
|
|
|
2004-03-18 20:22:56 +01:00
|
|
|
log_fn(LOG_INFO,"new conn type %s, socket %d, nfds %d.",
|
2004-03-12 14:00:34 +01:00
|
|
|
CONN_TYPE_TO_STRING(conn->type), conn->s, nfds);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Remove the connection from the global list, and remove the
|
2003-10-09 20:45:14 +02:00
|
|
|
* corresponding poll entry. Calling this function will shift the last
|
|
|
|
* connection (if any) into the position occupied by conn.
|
|
|
|
*/
|
2002-06-27 00:45:49 +02:00
|
|
|
int connection_remove(connection_t *conn) {
|
|
|
|
int current_index;
|
|
|
|
|
2004-04-25 22:37:37 +02:00
|
|
|
tor_assert(conn);
|
|
|
|
tor_assert(nfds>0);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2004-03-11 07:19:08 +01:00
|
|
|
log_fn(LOG_INFO,"removing socket %d (type %s), nfds now %d",
|
2004-03-12 14:00:34 +01:00
|
|
|
conn->s, CONN_TYPE_TO_STRING(conn->type), nfds-1);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2004-05-06 13:08:04 +02:00
|
|
|
tor_assert(conn->poll_index >= 0);
|
2002-06-27 00:45:49 +02:00
|
|
|
current_index = conn->poll_index;
|
|
|
|
if(current_index == nfds-1) { /* this is the end */
|
|
|
|
nfds--;
|
|
|
|
return 0;
|
2003-12-05 10:51:49 +01:00
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2004-03-18 20:22:56 +01:00
|
|
|
/* replace this one with the one at the end */
|
2002-06-27 00:45:49 +02:00
|
|
|
nfds--;
|
2003-12-05 10:51:49 +01:00
|
|
|
poll_array[current_index].fd = poll_array[nfds].fd;
|
2002-06-27 00:45:49 +02:00
|
|
|
poll_array[current_index].events = poll_array[nfds].events;
|
|
|
|
poll_array[current_index].revents = poll_array[nfds].revents;
|
|
|
|
connection_array[current_index] = connection_array[nfds];
|
|
|
|
connection_array[current_index]->poll_index = current_index;
|
|
|
|
|
2003-12-05 10:51:49 +01:00
|
|
|
return 0;
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2004-06-02 00:09:58 +02:00
|
|
|
/** Return true iff conn is in the current poll array. */
|
|
|
|
int connection_in_array(connection_t *conn) {
|
|
|
|
int i;
|
|
|
|
for (i=0; i<nfds; ++i) {
|
|
|
|
if (conn==connection_array[i])
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Set <b>*array</b> to an array of all connections, and <b>*n</b>
|
|
|
|
* to the length of the array. <b>*array</b> and <b>*n</b> must not
|
|
|
|
* be modified.
|
2004-05-05 23:32:43 +02:00
|
|
|
*/
|
2003-09-30 21:06:22 +02:00
|
|
|
void get_connection_array(connection_t ***array, int *n) {
|
|
|
|
*array = connection_array;
|
|
|
|
*n = nfds;
|
2003-06-25 09:19:30 +02:00
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Set the event mask on <b>conn</b> to <b>events</b>. (The form of
|
|
|
|
* the event mask is as for poll().)
|
2004-05-05 23:32:43 +02:00
|
|
|
*/
|
2002-06-27 00:45:49 +02:00
|
|
|
void connection_watch_events(connection_t *conn, short events) {
|
|
|
|
|
2004-05-06 13:08:04 +02:00
|
|
|
tor_assert(conn && conn->poll_index >= 0 && conn->poll_index < nfds);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
poll_array[conn->poll_index].events = events;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Return true iff <b>conn</b> is listening for read events. */
|
2003-09-07 12:24:40 +02:00
|
|
|
int connection_is_reading(connection_t *conn) {
|
2004-05-06 13:08:04 +02:00
|
|
|
tor_assert(conn && conn->poll_index >= 0);
|
2003-09-07 12:24:40 +02:00
|
|
|
return poll_array[conn->poll_index].events & POLLIN;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Tell the main loop to stop notifying <b>conn</b> of any read events. */
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
void connection_stop_reading(connection_t *conn) {
|
2004-05-06 13:08:04 +02:00
|
|
|
tor_assert(conn && conn->poll_index >= 0 && conn->poll_index < nfds);
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
|
2002-07-18 08:37:58 +02:00
|
|
|
log(LOG_DEBUG,"connection_stop_reading() called.");
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
if(poll_array[conn->poll_index].events & POLLIN)
|
|
|
|
poll_array[conn->poll_index].events -= POLLIN;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Tell the main loop to start notifying <b>conn</b> of any read events. */
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
void connection_start_reading(connection_t *conn) {
|
2004-05-06 13:08:04 +02:00
|
|
|
tor_assert(conn && conn->poll_index >= 0 && conn->poll_index < nfds);
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
poll_array[conn->poll_index].events |= POLLIN;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Return true iff <b>conn</b> is listening for write events. */
|
2004-02-27 05:42:14 +01:00
|
|
|
int connection_is_writing(connection_t *conn) {
|
|
|
|
return poll_array[conn->poll_index].events & POLLOUT;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Tell the main loop to stop notifying <b>conn</b> of any write events. */
|
2002-07-18 08:37:58 +02:00
|
|
|
void connection_stop_writing(connection_t *conn) {
|
2004-05-06 13:08:04 +02:00
|
|
|
tor_assert(conn && conn->poll_index >= 0 && conn->poll_index < nfds);
|
2002-07-18 08:37:58 +02:00
|
|
|
if(poll_array[conn->poll_index].events & POLLOUT)
|
|
|
|
poll_array[conn->poll_index].events -= POLLOUT;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Tell the main loop to start notifying <b>conn</b> of any write events. */
|
2002-07-18 08:37:58 +02:00
|
|
|
void connection_start_writing(connection_t *conn) {
|
2004-05-06 13:08:04 +02:00
|
|
|
tor_assert(conn && conn->poll_index >= 0 && conn->poll_index < nfds);
|
2002-07-18 08:37:58 +02:00
|
|
|
poll_array[conn->poll_index].events |= POLLOUT;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Called when the connection at connection_array[i] has a read event,
|
2004-05-06 13:08:04 +02:00
|
|
|
* or it has pending tls data waiting to be read: checks for validity,
|
|
|
|
* catches numerous errors, and dispatches to connection_handle_read.
|
2004-05-05 23:32:43 +02:00
|
|
|
*/
|
2003-07-05 07:46:06 +02:00
|
|
|
static void conn_read(int i) {
|
2003-09-28 08:48:20 +02:00
|
|
|
connection_t *conn = connection_array[i];
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2004-02-28 05:44:48 +01:00
|
|
|
if (conn->marked_for_close)
|
|
|
|
return;
|
|
|
|
|
2003-09-28 08:48:20 +02:00
|
|
|
/* see http://www.greenend.org.uk/rjk/2001/06/poll.html for
|
|
|
|
* discussion of POLLIN vs POLLHUP */
|
2003-09-05 08:04:03 +02:00
|
|
|
if(!(poll_array[i].revents & (POLLIN|POLLHUP|POLLERR)))
|
2003-10-09 20:45:14 +02:00
|
|
|
if(!connection_is_reading(conn) ||
|
|
|
|
!connection_has_pending_tls_data(conn))
|
2003-09-28 08:48:20 +02:00
|
|
|
return; /* this conn should not read */
|
2003-08-14 19:13:52 +02:00
|
|
|
|
cleanups, bugfixes, more verbose logs
Fixed up the assert_*_ok funcs some (more work remains)
Changed config so it reads either /etc/torrc or the -f arg, never both
Finally tracked down a nasty bug with our use of tls:
It turns out that if you ask SSL_read() for no more than n bytes, it
will read the entire record from the network (and maybe part of the next
record, I'm not sure), give you n bytes of it, and keep the remaining
bytes internally. This is fine, except our poll-for-read looks at the
network, and there are no bytes pending on the network, so we never know
to ask SSL_read() for more bytes. Currently I've hacked it so if we ask
for n bytes and it returns n bytes, then it reads again right then. This
will interact poorly with our rate limiting; we need a cleaner solution.
svn:r481
2003-09-24 23:24:52 +02:00
|
|
|
log_fn(LOG_DEBUG,"socket %d wants to read.",conn->s);
|
2003-12-05 10:51:49 +01:00
|
|
|
|
2003-09-23 21:47:41 +02:00
|
|
|
assert_connection_ok(conn, time(NULL));
|
2004-04-09 11:39:42 +02:00
|
|
|
assert_all_pending_dns_resolves_ok();
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
|
2003-09-05 08:04:03 +02:00
|
|
|
if(
|
2004-04-09 11:39:42 +02:00
|
|
|
/* XXX does POLLHUP also mean it's definitely broken? */
|
2003-08-14 19:13:52 +02:00
|
|
|
#ifdef MS_WINDOWS
|
2004-04-09 11:39:42 +02:00
|
|
|
(poll_array[i].revents & POLLERR) ||
|
2003-08-14 19:13:52 +02:00
|
|
|
#endif
|
2004-04-09 11:39:42 +02:00
|
|
|
connection_handle_read(conn) < 0) {
|
2004-02-28 05:34:27 +01:00
|
|
|
if (!conn->marked_for_close) {
|
|
|
|
/* this connection is broken. remove it */
|
2004-05-18 18:54:04 +02:00
|
|
|
log_fn(LOG_WARN,"Unhandled error on read for %s connection (fd %d); removing",
|
2004-03-11 07:19:08 +01:00
|
|
|
CONN_TYPE_TO_STRING(conn->type), conn->s);
|
2004-05-12 23:12:33 +02:00
|
|
|
connection_mark_for_close(conn);
|
2004-02-28 05:34:27 +01:00
|
|
|
}
|
2004-04-09 11:39:42 +02:00
|
|
|
}
|
|
|
|
assert_connection_ok(conn, time(NULL));
|
|
|
|
assert_all_pending_dns_resolves_ok();
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Called when the connection at connection_array[i] has a write event:
|
2004-05-05 23:32:43 +02:00
|
|
|
* checks for validity, catches numerous errors, and dispatches to
|
|
|
|
* connection_handle_write.
|
|
|
|
*/
|
2003-07-05 07:46:06 +02:00
|
|
|
static void conn_write(int i) {
|
2002-06-27 00:45:49 +02:00
|
|
|
connection_t *conn;
|
|
|
|
|
2003-09-05 08:04:03 +02:00
|
|
|
if(!(poll_array[i].revents & POLLOUT))
|
|
|
|
return; /* this conn doesn't want to write */
|
|
|
|
|
2003-07-05 07:46:06 +02:00
|
|
|
conn = connection_array[i];
|
2004-03-03 09:46:18 +01:00
|
|
|
log_fn(LOG_DEBUG,"socket %d wants to write.",conn->s);
|
2004-02-28 05:11:53 +01:00
|
|
|
if (conn->marked_for_close)
|
|
|
|
return;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-09-23 21:47:41 +02:00
|
|
|
assert_connection_ok(conn, time(NULL));
|
2004-04-09 11:39:42 +02:00
|
|
|
assert_all_pending_dns_resolves_ok();
|
2003-09-23 21:47:41 +02:00
|
|
|
|
2004-02-28 05:34:27 +01:00
|
|
|
if (connection_handle_write(conn) < 0) {
|
|
|
|
if (!conn->marked_for_close) {
|
|
|
|
/* this connection is broken. remove it. */
|
2004-02-28 05:44:48 +01:00
|
|
|
log_fn(LOG_WARN,"Unhandled error on read for %s connection (fd %d); removing",
|
2004-03-11 07:19:08 +01:00
|
|
|
CONN_TYPE_TO_STRING(conn->type), conn->s);
|
2004-03-03 05:54:16 +01:00
|
|
|
conn->has_sent_end = 1; /* otherwise we cry wolf about duplicate close */
|
2004-05-18 18:54:04 +02:00
|
|
|
/* XXX do we need a close-immediate here, so we don't try to flush? */
|
2004-05-12 23:12:33 +02:00
|
|
|
connection_mark_for_close(conn);
|
2004-02-28 05:34:27 +01:00
|
|
|
}
|
2004-02-28 05:11:53 +01:00
|
|
|
}
|
|
|
|
assert_connection_ok(conn, time(NULL));
|
2004-04-09 11:39:42 +02:00
|
|
|
assert_all_pending_dns_resolves_ok();
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** If the connection at connection_array[i] is marked for close, then:
|
2004-05-05 23:32:43 +02:00
|
|
|
* - If it has data that it wants to flush, try to flush it.
|
|
|
|
* - If it _still_ has data to flush, and conn->hold_open_until_flushed is
|
|
|
|
* true, then leave the connection open and return.
|
|
|
|
* - Otherwise, remove the connection from connection_array and from
|
|
|
|
* all other lists, close it, and free it.
|
|
|
|
* If we remove the connection, then call conn_closed_if_marked at the new
|
|
|
|
* connection at position i.
|
|
|
|
*/
|
2003-10-09 20:45:14 +02:00
|
|
|
static void conn_close_if_marked(int i) {
|
2002-06-27 00:45:49 +02:00
|
|
|
connection_t *conn;
|
2004-03-03 09:46:18 +01:00
|
|
|
int retval;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
conn = connection_array[i];
|
2003-09-23 21:47:41 +02:00
|
|
|
assert_connection_ok(conn, time(NULL));
|
2004-04-09 11:39:42 +02:00
|
|
|
assert_all_pending_dns_resolves_ok();
|
2004-03-03 09:46:18 +01:00
|
|
|
if(!conn->marked_for_close)
|
|
|
|
return; /* nothing to see here, move along */
|
|
|
|
|
|
|
|
log_fn(LOG_INFO,"Cleaning up connection (fd %d).",conn->s);
|
|
|
|
if(conn->s >= 0 && connection_wants_to_flush(conn)) {
|
|
|
|
/* -1 means it's an incomplete edge connection, or that the socket
|
|
|
|
* has already been closed as unflushable. */
|
|
|
|
if(!conn->hold_open_until_flushed)
|
2004-03-03 03:07:57 +01:00
|
|
|
log_fn(LOG_WARN,
|
2004-03-11 07:19:08 +01:00
|
|
|
"Conn (fd %d, type %s, state %d) marked, but wants to flush %d bytes. "
|
2004-03-03 09:46:18 +01:00
|
|
|
"(Marked at %s:%d)",
|
2004-03-11 07:19:08 +01:00
|
|
|
conn->s, CONN_TYPE_TO_STRING(conn->type), conn->state,
|
2004-03-03 09:46:18 +01:00
|
|
|
conn->outbuf_flushlen, conn->marked_for_close_file, conn->marked_for_close);
|
|
|
|
if(connection_speaks_cells(conn)) {
|
|
|
|
if(conn->state == OR_CONN_STATE_OPEN) {
|
|
|
|
retval = flush_buf_tls(conn->tls, conn->outbuf, &conn->outbuf_flushlen);
|
|
|
|
} else
|
2004-03-06 02:43:37 +01:00
|
|
|
retval = -1; /* never flush non-open broken tls connections */
|
2004-03-03 09:46:18 +01:00
|
|
|
} else {
|
|
|
|
retval = flush_buf(conn->s, conn->outbuf, &conn->outbuf_flushlen);
|
|
|
|
}
|
2004-03-03 09:48:32 +01:00
|
|
|
if(retval >= 0 &&
|
2004-03-03 09:46:18 +01:00
|
|
|
conn->hold_open_until_flushed && connection_wants_to_flush(conn)) {
|
|
|
|
log_fn(LOG_INFO,"Holding conn (fd %d) open for more flushing.",conn->s);
|
|
|
|
/* XXX should we reset timestamp_lastwritten here? */
|
|
|
|
return;
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
}
|
2004-03-03 09:46:18 +01:00
|
|
|
if(connection_wants_to_flush(conn)) {
|
2004-03-12 09:16:48 +01:00
|
|
|
log_fn(LOG_WARN,"Conn (fd %d, type %s, state %d) still wants to flush. Losing %d bytes! (Marked at %s:%d)",
|
|
|
|
conn->s, CONN_TYPE_TO_STRING(conn->type), conn->state,
|
|
|
|
(int)buf_datalen(conn->outbuf), conn->marked_for_close_file,
|
|
|
|
conn->marked_for_close);
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
}
|
2004-05-06 13:08:04 +02:00
|
|
|
/* if it's an edge conn, remove it from the list
|
|
|
|
* of conn's on this circuit. If it's not on an edge,
|
|
|
|
* flush and send destroys for all circuits on this conn
|
|
|
|
*/
|
|
|
|
circuit_about_to_close_connection(conn);
|
2004-05-12 22:36:44 +02:00
|
|
|
connection_about_to_close_connection(conn);
|
2004-03-03 09:46:18 +01:00
|
|
|
connection_remove(conn);
|
2004-03-28 23:14:05 +02:00
|
|
|
if(conn->type == CONN_TYPE_EXIT) {
|
|
|
|
assert_connection_edge_not_dns_pending(conn);
|
|
|
|
}
|
2004-03-03 09:46:18 +01:00
|
|
|
connection_free(conn);
|
|
|
|
if(i<nfds) { /* we just replaced the one at i with a new one.
|
|
|
|
process it too. */
|
|
|
|
conn_close_if_marked(i);
|
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** This function is called whenever we successfully pull down a directory */
|
2004-04-01 00:02:13 +02:00
|
|
|
void directory_has_arrived(void) {
|
|
|
|
|
2004-04-09 21:30:38 +02:00
|
|
|
log_fn(LOG_INFO, "A directory has arrived.");
|
2004-04-01 00:02:13 +02:00
|
|
|
|
2004-04-09 21:30:38 +02:00
|
|
|
has_fetched_directory=1;
|
|
|
|
|
|
|
|
if(options.ORPort) { /* connect to them all */
|
|
|
|
router_retry_connections();
|
|
|
|
}
|
2004-04-01 00:02:13 +02:00
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Perform regular maintenance tasks for a single connection. This
|
2003-10-09 20:45:14 +02:00
|
|
|
* function gets run once per second per connection by run_housekeeping.
|
|
|
|
*/
|
|
|
|
static void run_connection_housekeeping(int i, time_t now) {
|
|
|
|
cell_t cell;
|
|
|
|
connection_t *conn = connection_array[i];
|
2003-12-05 10:51:49 +01:00
|
|
|
|
2004-05-05 23:32:43 +02:00
|
|
|
/* Expire any directory connections that haven't sent anything for 5 min */
|
2004-04-18 08:35:31 +02:00
|
|
|
if(conn->type == CONN_TYPE_DIR &&
|
2004-04-20 19:27:54 +02:00
|
|
|
!conn->marked_for_close &&
|
2004-04-19 22:08:42 +02:00
|
|
|
conn->timestamp_lastwritten + 5*60 < now) {
|
2004-05-12 20:41:32 +02:00
|
|
|
log_fn(LOG_WARN,"Expiring wedged directory conn (fd %d, purpose %d)", conn->s, conn->purpose);
|
2004-05-11 03:55:32 +02:00
|
|
|
if (connection_wants_to_flush(conn)) {
|
2004-05-12 20:41:32 +02:00
|
|
|
if(flush_buf(conn->s, conn->outbuf, &conn->outbuf_flushlen) < 0) {
|
|
|
|
log_fn(LOG_WARN,"flushing expired directory conn failed.");
|
|
|
|
connection_close_immediate(conn);
|
2004-05-12 23:12:33 +02:00
|
|
|
connection_mark_for_close(conn);
|
2004-05-12 20:41:32 +02:00
|
|
|
/* */
|
|
|
|
} else {
|
|
|
|
/* XXXX Does this next part make sense, really? */
|
2004-05-12 23:12:33 +02:00
|
|
|
connection_mark_for_close(conn);
|
2004-05-12 20:41:32 +02:00
|
|
|
conn->hold_open_until_flushed = 1; /* give it a last chance */
|
|
|
|
}
|
2004-05-28 19:56:17 +02:00
|
|
|
} else {
|
|
|
|
connection_mark_for_close(conn);
|
2004-05-11 03:55:32 +02:00
|
|
|
}
|
2004-04-18 08:35:31 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2003-10-09 20:45:14 +02:00
|
|
|
/* check connections to see whether we should send a keepalive, expire, or wait */
|
|
|
|
if(!connection_speaks_cells(conn))
|
|
|
|
return;
|
|
|
|
|
2004-05-05 23:32:43 +02:00
|
|
|
/* If we haven't written to an OR connection for a while, then either nuke
|
|
|
|
the connection or send a keepalive, depending. */
|
2003-10-09 20:45:14 +02:00
|
|
|
if(now >= conn->timestamp_lastwritten + options.KeepalivePeriod) {
|
2003-11-20 18:49:45 +01:00
|
|
|
if((!options.ORPort && !circuit_get_by_conn(conn)) ||
|
2003-10-09 20:45:14 +02:00
|
|
|
(!connection_state_is_open(conn))) {
|
|
|
|
/* we're an onion proxy, with no circuits; or our handshake has expired. kill it. */
|
|
|
|
log_fn(LOG_INFO,"Expiring connection to %d (%s:%d).",
|
|
|
|
i,conn->address, conn->port);
|
2004-03-06 07:05:00 +01:00
|
|
|
/* flush anything waiting, e.g. a destroy for a just-expired circ */
|
2004-05-12 23:12:33 +02:00
|
|
|
connection_mark_for_close(conn);
|
2004-03-08 02:19:57 +01:00
|
|
|
conn->hold_open_until_flushed = 1;
|
2003-10-09 20:45:14 +02:00
|
|
|
} else {
|
|
|
|
/* either a full router, or we've got a circuit. send a padding cell. */
|
|
|
|
log_fn(LOG_DEBUG,"Sending keepalive to (%s:%d)",
|
|
|
|
conn->address, conn->port);
|
|
|
|
memset(&cell,0,sizeof(cell_t));
|
|
|
|
cell.command = CELL_PADDING;
|
|
|
|
connection_or_write_cell_to_buf(&cell, conn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Perform regular maintenance tasks. This function gets run once per
|
2003-10-09 20:45:14 +02:00
|
|
|
* second by prepare_for_poll.
|
|
|
|
*/
|
|
|
|
static void run_scheduled_events(time_t now) {
|
2002-10-02 01:37:31 +02:00
|
|
|
static long time_to_fetch_directory = 0;
|
2004-04-13 21:53:25 +02:00
|
|
|
static time_t last_uploaded_services = 0;
|
2004-04-25 00:17:50 +02:00
|
|
|
static time_t last_rotated_certificate = 0;
|
2003-10-09 20:45:14 +02:00
|
|
|
int i;
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
|
2004-04-25 00:17:50 +02:00
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** 1a. Every MIN_ONION_KEY_LIFETIME seconds, rotate the onion keys,
|
2004-04-25 00:17:50 +02:00
|
|
|
* shut down and restart all cpuworkers, and update the directory if
|
|
|
|
* necessary.
|
|
|
|
*/
|
|
|
|
if (options.ORPort && get_onion_key_set_at()+MIN_ONION_KEY_LIFETIME < now) {
|
2004-04-26 11:32:51 +02:00
|
|
|
log_fn(LOG_INFO,"Rotating onion key.");
|
2004-04-25 00:17:50 +02:00
|
|
|
rotate_onion_key();
|
|
|
|
cpuworkers_rotate();
|
|
|
|
if (router_rebuild_descriptor()<0) {
|
|
|
|
log_fn(LOG_WARN, "Couldn't rebuild router descriptor");
|
|
|
|
}
|
|
|
|
router_upload_dir_desc_to_dirservers();
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** 1b. Every MAX_SSL_KEY_LIFETIME seconds, we change our TLS context. */
|
2004-04-25 00:17:50 +02:00
|
|
|
if (!last_rotated_certificate)
|
|
|
|
last_rotated_certificate = now;
|
|
|
|
if (options.ORPort && last_rotated_certificate+MAX_SSL_KEY_LIFETIME < now) {
|
2004-04-26 11:32:51 +02:00
|
|
|
log_fn(LOG_INFO,"Rotating tls context.");
|
2004-04-25 00:17:50 +02:00
|
|
|
if (tor_tls_context_new(get_identity_key(), 1, options.Nickname,
|
|
|
|
MAX_SSL_KEY_LIFETIME) < 0) {
|
|
|
|
log_fn(LOG_WARN, "Error reinitializing TLS context");
|
|
|
|
}
|
|
|
|
last_rotated_certificate = now;
|
|
|
|
/* XXXX We should rotate TLS connections as well; this code doesn't change
|
|
|
|
* XXXX them at all. */
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** 1c. Every DirFetchPostPeriod seconds, we get a new directory and upload
|
2003-10-09 20:45:14 +02:00
|
|
|
* our descriptor (if any). */
|
|
|
|
if(time_to_fetch_directory < now) {
|
|
|
|
/* it's time to fetch a new directory and/or post our descriptor */
|
2003-11-20 18:49:45 +01:00
|
|
|
if(options.ORPort) {
|
2003-10-09 20:45:14 +02:00
|
|
|
router_rebuild_descriptor();
|
2004-04-01 05:23:28 +02:00
|
|
|
router_upload_dir_desc_to_dirservers();
|
2003-10-09 20:45:14 +02:00
|
|
|
}
|
2004-06-30 23:48:02 +02:00
|
|
|
routerlist_remove_old_routers(); /* purge obsolete entries */
|
|
|
|
if(options.AuthoritativeDir) {
|
2004-03-29 21:28:16 +02:00
|
|
|
/* We're a directory; dump any old descriptors. */
|
|
|
|
dirserv_remove_old_servers();
|
2004-05-28 17:01:47 +02:00
|
|
|
/* dirservers try to reconnect too, in case connections have failed */
|
|
|
|
router_retry_connections();
|
2003-10-09 20:45:14 +02:00
|
|
|
}
|
2004-06-30 23:48:02 +02:00
|
|
|
directory_get_from_dirserver(DIR_PURPOSE_FETCH_DIR, NULL, 0);
|
2004-04-13 19:16:47 +02:00
|
|
|
/* Force an upload of our descriptors every DirFetchPostPeriod seconds. */
|
|
|
|
rend_services_upload(1);
|
2004-04-13 21:53:25 +02:00
|
|
|
last_uploaded_services = now;
|
2004-03-31 06:10:10 +02:00
|
|
|
rend_cache_clean(); /* should this go elsewhere? */
|
2003-10-09 20:45:14 +02:00
|
|
|
time_to_fetch_directory = now + options.DirFetchPostPeriod;
|
|
|
|
}
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** 2. Every second, we examine pending circuits and prune the
|
2004-04-09 22:08:13 +02:00
|
|
|
* ones which have been pending for more than a few seconds.
|
2003-11-18 08:48:00 +01:00
|
|
|
* We do this before step 3, so it can try building more if
|
|
|
|
* it's not comfortable with the number of available circuits.
|
|
|
|
*/
|
2004-04-13 07:20:52 +02:00
|
|
|
circuit_expire_building(now);
|
2003-11-18 08:48:00 +01:00
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** 2b. Also look at pending streams and prune the ones that 'began'
|
2004-01-20 10:21:46 +01:00
|
|
|
* a long time ago but haven't gotten a 'connected' yet.
|
|
|
|
* Do this before step 3, so we can put them back into pending
|
|
|
|
* state to be picked up by the new circuit.
|
|
|
|
*/
|
|
|
|
connection_ap_expire_beginning();
|
|
|
|
|
2004-03-03 06:08:01 +01:00
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** 2c. And expire connections that we've held open for too long.
|
2004-03-03 06:08:01 +01:00
|
|
|
*/
|
|
|
|
connection_expire_held_open();
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** 3. Every second, we try a new circuit if there are no valid
|
2003-11-16 22:49:52 +01:00
|
|
|
* circuits. Every NewCircuitPeriod seconds, we expire circuits
|
|
|
|
* that became dirty more than NewCircuitPeriod seconds ago,
|
|
|
|
* and we make a new circ if there are no clean circuits.
|
2003-10-09 20:45:14 +02:00
|
|
|
*/
|
2004-04-13 07:20:52 +02:00
|
|
|
if(has_fetched_directory)
|
2004-04-13 00:47:12 +02:00
|
|
|
circuit_build_needed_circs(now);
|
2003-11-16 22:49:52 +01:00
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** 4. We do housekeeping for each connection... */
|
2003-10-09 20:45:14 +02:00
|
|
|
for(i=0;i<nfds;i++) {
|
|
|
|
run_connection_housekeeping(i, now);
|
|
|
|
}
|
2003-10-02 22:00:38 +02:00
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** 5. And remove any marked circuits... */
|
2004-03-02 18:48:17 +01:00
|
|
|
circuit_close_all_marked();
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** 6. And upload service descriptors for any services whose intro points
|
2004-04-13 19:16:47 +02:00
|
|
|
* have changed in the last second. */
|
2004-04-13 21:53:25 +02:00
|
|
|
if (last_uploaded_services < now-5) {
|
|
|
|
rend_services_upload(0);
|
|
|
|
last_uploaded_services = now;
|
|
|
|
}
|
2004-04-13 19:16:47 +02:00
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** 7. and blow away any connections that need to die. have to do this now,
|
2004-05-05 02:59:42 +02:00
|
|
|
* because if we marked a conn for close and left its socket -1, then
|
|
|
|
* we'll pass it to poll/select and bad things will happen.
|
2003-10-09 20:45:14 +02:00
|
|
|
*/
|
|
|
|
for(i=0;i<nfds;i++)
|
|
|
|
conn_close_if_marked(i);
|
|
|
|
}
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Called every time we're about to call tor_poll. Increments statistics,
|
2004-05-05 23:32:43 +02:00
|
|
|
* and adjusts token buckets. Returns the number of milliseconds to use for
|
|
|
|
* the poll() timeout.
|
|
|
|
*/
|
2003-10-09 20:45:14 +02:00
|
|
|
static int prepare_for_poll(void) {
|
|
|
|
static long current_second = 0; /* from previous calls to gettimeofday */
|
|
|
|
connection_t *conn;
|
|
|
|
struct timeval now;
|
|
|
|
int i;
|
2003-04-16 08:18:31 +02:00
|
|
|
|
2003-10-09 20:45:14 +02:00
|
|
|
tor_gettimeofday(&now);
|
2003-07-05 09:10:34 +02:00
|
|
|
|
2004-05-05 23:32:43 +02:00
|
|
|
/* Check how much bandwidth we've consumed, and increment the token
|
|
|
|
* buckets. */
|
2004-03-14 17:00:52 +01:00
|
|
|
stats_n_bytes_read += stats_prev_global_read_bucket-global_read_bucket;
|
|
|
|
connection_bucket_refill(&now);
|
|
|
|
stats_prev_global_read_bucket = global_read_bucket;
|
|
|
|
|
2003-10-09 20:45:14 +02:00
|
|
|
if(now.tv_sec > current_second) { /* the second has rolled over. check more stuff. */
|
2002-09-26 14:09:10 +02:00
|
|
|
|
2003-10-09 20:45:14 +02:00
|
|
|
++stats_n_seconds_reading;
|
2004-04-09 11:39:42 +02:00
|
|
|
assert_all_pending_dns_resolves_ok();
|
2003-10-09 20:45:14 +02:00
|
|
|
run_scheduled_events(now.tv_sec);
|
2004-04-09 11:39:42 +02:00
|
|
|
assert_all_pending_dns_resolves_ok();
|
2002-10-02 01:37:31 +02:00
|
|
|
|
2002-12-31 16:04:14 +01:00
|
|
|
current_second = now.tv_sec; /* remember which second it is, for next time */
|
2002-10-02 01:37:31 +02:00
|
|
|
}
|
|
|
|
|
2003-09-30 22:36:20 +02:00
|
|
|
for(i=0;i<nfds;i++) {
|
|
|
|
conn = connection_array[i];
|
2004-03-14 18:06:29 +01:00
|
|
|
if(connection_has_pending_tls_data(conn) &&
|
|
|
|
connection_is_reading(conn)) {
|
2003-09-30 22:36:20 +02:00
|
|
|
log_fn(LOG_DEBUG,"sock %d has pending bytes.",conn->s);
|
|
|
|
return 0; /* has pending bytes to read; don't let poll wait. */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (1000 - (now.tv_usec / 1000)); /* how many milliseconds til the next second? */
|
2003-03-06 05:52:02 +01:00
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Configure the Tor process from the command line arguments and from the
|
2004-05-05 23:32:43 +02:00
|
|
|
* configuration file.
|
|
|
|
*/
|
2003-11-13 07:49:25 +01:00
|
|
|
static int init_from_config(int argc, char **argv) {
|
2004-05-05 23:32:43 +02:00
|
|
|
/* read the configuration file. */
|
2003-11-13 07:49:25 +01:00
|
|
|
if(getconfig(argc,argv,&options)) {
|
|
|
|
log_fn(LOG_ERR,"Reading config failed. For usage, try -h.");
|
|
|
|
return -1;
|
|
|
|
}
|
2004-02-29 23:34:38 +01:00
|
|
|
|
2004-05-05 23:32:43 +02:00
|
|
|
/* Setuid/setgid as appropriate */
|
2004-02-29 23:34:38 +01:00
|
|
|
if(options.User || options.Group) {
|
|
|
|
if(switch_id(options.User, options.Group) != 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-06-21 06:37:27 +02:00
|
|
|
/* Ensure data directory is private; create if possible. */
|
|
|
|
if (check_private_dir(get_data_directory(&options), 1) != 0) {
|
|
|
|
log_fn(LOG_ERR, "Couldn't access/create private data directory %s",
|
|
|
|
get_data_directory(&options));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2004-05-05 23:32:43 +02:00
|
|
|
/* Start backgrounding the process, if requested. */
|
2004-02-29 23:34:38 +01:00
|
|
|
if (options.RunAsDaemon) {
|
2004-06-30 18:37:08 +02:00
|
|
|
start_daemon(get_data_directory(&options));
|
2004-02-29 23:34:38 +01:00
|
|
|
}
|
|
|
|
|
2004-05-05 23:32:43 +02:00
|
|
|
/* Configure the log(s) */
|
2004-05-20 21:47:28 +02:00
|
|
|
if (config_init_logs(&options)<0)
|
|
|
|
return -1;
|
2004-05-24 04:28:15 +02:00
|
|
|
/* Close the temporary log we used while starting up, if it isn't already
|
|
|
|
* gone. */
|
2004-05-20 21:47:28 +02:00
|
|
|
close_temp_logs();
|
2003-11-13 07:49:25 +01:00
|
|
|
|
2004-05-05 23:32:43 +02:00
|
|
|
/* Set up our buckets */
|
2004-03-14 17:00:52 +01:00
|
|
|
connection_bucket_init();
|
2003-11-13 07:49:25 +01:00
|
|
|
stats_prev_global_read_bucket = global_read_bucket;
|
|
|
|
|
2004-05-05 23:32:43 +02:00
|
|
|
/* Finish backgrounding the process */
|
2004-01-03 23:40:49 +01:00
|
|
|
if(options.RunAsDaemon) {
|
|
|
|
/* XXXX Can we delay this any more? */
|
|
|
|
finish_daemon();
|
2003-11-13 07:49:25 +01:00
|
|
|
}
|
|
|
|
|
2004-05-20 10:41:54 +02:00
|
|
|
/* Write our pid to the pid file. If we do not have write permissions we
|
2004-05-05 23:32:43 +02:00
|
|
|
* will log a warning */
|
2003-11-19 23:45:06 +01:00
|
|
|
if(options.PidFile)
|
|
|
|
write_pidfile(options.PidFile);
|
2003-11-19 03:09:43 +01:00
|
|
|
|
2003-11-13 07:49:25 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Called when we get a SIGHUP: reload configuration files and keys,
|
2004-05-05 23:32:43 +02:00
|
|
|
* retry all connections, re-upload all descriptors, and so on. */
|
2004-02-26 23:30:44 +01:00
|
|
|
static int do_hup(void) {
|
|
|
|
char keydir[512];
|
|
|
|
|
2004-03-30 05:15:53 +02:00
|
|
|
log_fn(LOG_NOTICE,"Received sighup. Reloading config.");
|
2004-02-29 04:52:38 +01:00
|
|
|
has_completed_circuit=0;
|
2004-06-02 21:18:37 +02:00
|
|
|
mark_logs_temp(); /* Close current logs once new logs are open. */
|
2004-02-26 23:30:44 +01:00
|
|
|
/* first, reload config variables, in case they've changed */
|
|
|
|
/* no need to provide argc/v, they've been cached inside init_from_config */
|
|
|
|
if (init_from_config(0, NULL) < 0) {
|
|
|
|
exit(1);
|
|
|
|
}
|
2004-04-09 19:48:09 +02:00
|
|
|
/* reload keys as needed for rendezvous services. */
|
2004-04-13 19:16:47 +02:00
|
|
|
if (rend_service_load_keys()<0) {
|
2004-04-09 19:48:09 +02:00
|
|
|
log_fn(LOG_ERR,"Error reloading rendezvous service keys");
|
|
|
|
exit(1);
|
|
|
|
}
|
2004-02-26 23:30:44 +01:00
|
|
|
if(retry_all_connections() < 0) {
|
|
|
|
log_fn(LOG_ERR,"Failed to bind one of the listener ports.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if(options.DirPort) {
|
|
|
|
/* reload the approved-routers file */
|
2004-06-30 18:37:08 +02:00
|
|
|
sprintf(keydir,"%s/approved-routers", get_data_directory(&options));
|
2004-02-26 23:30:44 +01:00
|
|
|
log_fn(LOG_INFO,"Reloading approved fingerprints from %s...",keydir);
|
|
|
|
if(dirserv_parse_fingerprint_file(keydir) < 0) {
|
|
|
|
log_fn(LOG_WARN, "Error reloading fingerprints. Continuing with old list.");
|
|
|
|
}
|
2004-04-09 21:37:50 +02:00
|
|
|
/* Since we aren't fetching a directory, we won't retry rendezvous points
|
|
|
|
* when it gets in. Try again now. */
|
2004-04-13 19:16:47 +02:00
|
|
|
rend_services_introduce();
|
2004-02-26 23:30:44 +01:00
|
|
|
} else {
|
|
|
|
/* fetch a new directory */
|
2004-05-13 01:48:57 +02:00
|
|
|
directory_get_from_dirserver(DIR_PURPOSE_FETCH_DIR, NULL, 0);
|
2004-02-26 23:30:44 +01:00
|
|
|
}
|
|
|
|
if(options.ORPort) {
|
2004-06-06 05:38:31 +02:00
|
|
|
/* Restart cpuworker and dnsworker processes, so they get up-to-date
|
|
|
|
* configuration options. */
|
|
|
|
cpuworkers_rotate();
|
|
|
|
dnsworkers_rotate();
|
|
|
|
/* Rebuild fresh descriptor as needed. */
|
2004-02-26 23:30:44 +01:00
|
|
|
router_rebuild_descriptor();
|
2004-06-30 18:37:08 +02:00
|
|
|
sprintf(keydir,"%s/router.desc", get_data_directory(&options));
|
2004-02-26 23:30:44 +01:00
|
|
|
log_fn(LOG_INFO,"Dumping descriptor to %s...",keydir);
|
|
|
|
if (write_str_to_file(keydir, router_get_my_descriptor())) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Tor main loop. */
|
2003-09-25 07:17:11 +02:00
|
|
|
static int do_main_loop(void) {
|
|
|
|
int i;
|
|
|
|
int timeout;
|
|
|
|
int poll_result;
|
2003-12-05 10:51:49 +01:00
|
|
|
|
2004-03-20 05:59:29 +01:00
|
|
|
/* Initialize the history structures. */
|
|
|
|
rep_hist_init();
|
2004-03-31 05:42:56 +02:00
|
|
|
/* Intialize the service cache. */
|
2004-03-31 06:10:10 +02:00
|
|
|
rend_cache_init();
|
2004-03-20 05:59:29 +01:00
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
/* load the private keys, if we're supposed to have them, and set up the
|
|
|
|
* TLS context. */
|
2004-04-13 19:16:47 +02:00
|
|
|
if (init_keys() < 0 || rend_service_load_keys() < 0) {
|
2003-09-25 07:17:11 +02:00
|
|
|
log_fn(LOG_ERR,"Error initializing keys; exiting");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2004-04-07 23:44:46 +02:00
|
|
|
/* load the routers file */
|
2004-05-17 22:31:01 +02:00
|
|
|
if(options.RouterFile) {
|
|
|
|
routerlist_clear_trusted_directories();
|
|
|
|
if (router_load_routerlist_from_file(options.RouterFile, 1) < 0) {
|
|
|
|
log_fn(LOG_ERR,"Error loading router list.");
|
|
|
|
return -1;
|
|
|
|
}
|
2004-04-07 23:44:46 +02:00
|
|
|
}
|
|
|
|
|
2004-04-01 00:02:13 +02:00
|
|
|
if(options.DirPort) { /* the directory is already here, run startup things */
|
|
|
|
has_fetched_directory = 1;
|
|
|
|
directory_has_arrived();
|
|
|
|
}
|
|
|
|
|
2003-11-20 18:49:45 +01:00
|
|
|
if(options.ORPort) {
|
2003-09-25 12:42:07 +02:00
|
|
|
cpu_init(); /* launch cpuworkers. Need to do this *after* we've read the onion key. */
|
2003-09-08 08:26:38 +02:00
|
|
|
}
|
2003-09-07 12:24:40 +02:00
|
|
|
|
2003-03-18 02:49:55 +01:00
|
|
|
/* start up the necessary connections based on which ports are
|
|
|
|
* non-zero. This is where we try to connect to all the other ORs,
|
2003-09-05 08:04:03 +02:00
|
|
|
* and start the listeners.
|
2003-03-18 02:49:55 +01:00
|
|
|
*/
|
2003-10-25 14:01:09 +02:00
|
|
|
if(retry_all_connections() < 0) {
|
|
|
|
log_fn(LOG_ERR,"Failed to bind one of the listener ports.");
|
|
|
|
return -1;
|
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
for(;;) {
|
2004-06-12 23:43:02 +02:00
|
|
|
#ifdef MS_WINDOWS /* Do service stuff only on windows. */
|
|
|
|
if (service_status.dwCurrentState != SERVICE_RUNNING) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#else /* do signal stuff only on unix */
|
2002-09-22 00:41:48 +02:00
|
|
|
if(please_dumpstats) {
|
2003-10-15 20:50:16 +02:00
|
|
|
/* prefer to log it at INFO, but make sure we always see it */
|
2004-05-19 22:07:08 +02:00
|
|
|
dumpstats(get_min_log_level()>LOG_INFO ? get_min_log_level() : LOG_INFO);
|
2002-09-28 03:40:11 +02:00
|
|
|
please_dumpstats = 0;
|
|
|
|
}
|
2003-09-21 08:15:43 +02:00
|
|
|
if(please_reset) {
|
2004-02-26 23:30:44 +01:00
|
|
|
do_hup();
|
2003-09-21 08:15:43 +02:00
|
|
|
please_reset = 0;
|
2002-09-22 00:41:48 +02:00
|
|
|
}
|
2003-08-12 08:41:53 +02:00
|
|
|
if(please_reap_children) {
|
|
|
|
while(waitpid(-1,NULL,WNOHANG)) ; /* keep reaping until no more zombies */
|
|
|
|
please_reap_children = 0;
|
|
|
|
}
|
2003-08-12 10:04:31 +02:00
|
|
|
#endif /* signal stuff */
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
|
2003-09-05 08:04:03 +02:00
|
|
|
timeout = prepare_for_poll();
|
|
|
|
|
|
|
|
/* poll until we have an event, or the second ends */
|
2004-02-21 00:41:45 +01:00
|
|
|
poll_result = tor_poll(poll_array, nfds, timeout);
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
|
2003-10-15 20:28:32 +02:00
|
|
|
/* let catch() handle things like ^c, and otherwise don't worry about it */
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
if(poll_result < 0) {
|
2004-05-01 22:46:28 +02:00
|
|
|
/* let the program survive things like ^z */
|
|
|
|
if(tor_socket_errno(-1) != EINTR) {
|
|
|
|
log_fn(LOG_ERR,"poll failed: %s [%d]",
|
2004-05-02 22:18:21 +02:00
|
|
|
tor_socket_strerror(tor_socket_errno(-1)),
|
|
|
|
tor_socket_errno(-1));
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
return -1;
|
2003-10-15 20:28:32 +02:00
|
|
|
} else {
|
2003-10-15 20:37:19 +02:00
|
|
|
log_fn(LOG_DEBUG,"poll interrupted.");
|
2003-10-15 20:28:32 +02:00
|
|
|
}
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-09-30 22:36:20 +02:00
|
|
|
/* do all the reads and errors first, so we can detect closed sockets */
|
|
|
|
for(i=0;i<nfds;i++)
|
2004-02-28 05:11:53 +01:00
|
|
|
conn_read(i); /* this also marks broken connections */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-09-30 22:36:20 +02:00
|
|
|
/* then do the writes */
|
|
|
|
for(i=0;i<nfds;i++)
|
|
|
|
conn_write(i);
|
|
|
|
|
|
|
|
/* any of the conns need to be closed now? */
|
|
|
|
for(i=0;i<nfds;i++)
|
2003-12-05 10:51:49 +01:00
|
|
|
conn_close_if_marked(i);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
/* refilling buckets and sending cells happens at the beginning of the
|
|
|
|
* next iteration of the loop, inside prepare_for_poll()
|
|
|
|
*/
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Unix signal handler. */
|
2002-09-28 03:40:11 +02:00
|
|
|
static void catch(int the_signal) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-10-22 08:03:11 +02:00
|
|
|
#ifndef MS_WINDOWS /* do signal stuff only on unix */
|
2002-09-28 03:40:11 +02:00
|
|
|
switch(the_signal) {
|
2002-11-24 09:45:54 +01:00
|
|
|
// case SIGABRT:
|
2002-09-28 03:40:11 +02:00
|
|
|
case SIGTERM:
|
|
|
|
case SIGINT:
|
2003-09-26 12:03:50 +02:00
|
|
|
log(LOG_ERR,"Catching signal %d, exiting cleanly.", the_signal);
|
2003-10-22 08:03:11 +02:00
|
|
|
/* we don't care if there was an error when we unlink, nothing
|
|
|
|
we could do about it anyways */
|
2003-11-19 23:45:06 +01:00
|
|
|
if(options.PidFile)
|
|
|
|
unlink(options.PidFile);
|
2002-09-28 03:40:11 +02:00
|
|
|
exit(0);
|
2004-03-11 21:15:53 +01:00
|
|
|
case SIGPIPE:
|
2004-06-01 19:31:13 +02:00
|
|
|
log(LOG_INFO,"Caught sigpipe. Ignoring.");
|
2004-03-11 21:15:53 +01:00
|
|
|
break;
|
2002-09-28 03:40:11 +02:00
|
|
|
case SIGHUP:
|
2003-09-21 08:15:43 +02:00
|
|
|
please_reset = 1;
|
2002-09-28 03:40:11 +02:00
|
|
|
break;
|
|
|
|
case SIGUSR1:
|
|
|
|
please_dumpstats = 1;
|
|
|
|
break;
|
2003-08-12 08:41:53 +02:00
|
|
|
case SIGCHLD:
|
|
|
|
please_reap_children = 1;
|
2003-09-13 23:53:38 +02:00
|
|
|
break;
|
2002-09-28 03:40:11 +02:00
|
|
|
default:
|
2003-10-10 03:48:32 +02:00
|
|
|
log(LOG_WARN,"Caught signal %d that we can't handle??", the_signal);
|
2002-09-28 03:40:11 +02:00
|
|
|
}
|
2003-08-12 10:04:31 +02:00
|
|
|
#endif /* signal stuff */
|
2002-09-22 00:41:48 +02:00
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Write all statistics to the log, with log level 'severity'. Called
|
2004-05-05 23:35:12 +02:00
|
|
|
* in response to a SIGUSR1. */
|
2003-10-15 20:28:32 +02:00
|
|
|
static void dumpstats(int severity) {
|
2002-09-22 00:41:48 +02:00
|
|
|
int i;
|
|
|
|
connection_t *conn;
|
2003-10-04 05:29:09 +02:00
|
|
|
time_t now = time(NULL);
|
2002-09-22 00:41:48 +02:00
|
|
|
|
2003-10-15 20:28:32 +02:00
|
|
|
log(severity, "Dumping stats:");
|
2002-09-22 00:41:48 +02:00
|
|
|
|
|
|
|
for(i=0;i<nfds;i++) {
|
|
|
|
conn = connection_array[i];
|
2004-04-17 08:34:20 +02:00
|
|
|
log(severity, "Conn %d (socket %d) type %d (%s), state %d (%s), created %d secs ago",
|
2004-03-11 07:19:08 +01:00
|
|
|
i, conn->s, conn->type, CONN_TYPE_TO_STRING(conn->type),
|
2004-04-17 08:34:20 +02:00
|
|
|
conn->state, conn_state_to_string[conn->type][conn->state], (int)(now - conn->timestamp_created));
|
2002-09-22 00:41:48 +02:00
|
|
|
if(!connection_is_listener(conn)) {
|
2003-10-15 20:28:32 +02:00
|
|
|
log(severity,"Conn %d is to '%s:%d'.",i,conn->address, conn->port);
|
2004-04-17 08:34:20 +02:00
|
|
|
log(severity,"Conn %d: %d bytes waiting on inbuf (last read %d secs ago)",i,
|
2003-09-25 07:17:11 +02:00
|
|
|
(int)buf_datalen(conn->inbuf),
|
2004-04-17 08:34:20 +02:00
|
|
|
(int)(now - conn->timestamp_lastread));
|
|
|
|
log(severity,"Conn %d: %d bytes waiting on outbuf (last written %d secs ago)",i,
|
|
|
|
(int)buf_datalen(conn->outbuf), (int)(now - conn->timestamp_lastwritten));
|
2002-09-22 00:41:48 +02:00
|
|
|
}
|
2003-10-15 20:28:32 +02:00
|
|
|
circuit_dump_by_conn(conn, severity); /* dump info about all the circuits using this conn */
|
2002-09-22 00:41:48 +02:00
|
|
|
}
|
2003-10-15 20:28:32 +02:00
|
|
|
log(severity,
|
|
|
|
"Cells processed: %10lu padding\n"
|
2003-10-04 10:19:23 +02:00
|
|
|
" %10lu create\n"
|
|
|
|
" %10lu created\n"
|
|
|
|
" %10lu relay\n"
|
|
|
|
" (%10lu relayed)\n"
|
|
|
|
" (%10lu delivered)\n"
|
2004-01-16 09:27:17 +01:00
|
|
|
" %10lu destroy",
|
2003-10-02 22:00:38 +02:00
|
|
|
stats_n_padding_cells_processed,
|
|
|
|
stats_n_create_cells_processed,
|
|
|
|
stats_n_created_cells_processed,
|
|
|
|
stats_n_relay_cells_processed,
|
|
|
|
stats_n_relay_cells_relayed,
|
|
|
|
stats_n_relay_cells_delivered,
|
|
|
|
stats_n_destroy_cells_processed);
|
|
|
|
if (stats_n_data_cells_packaged)
|
2004-01-16 09:27:17 +01:00
|
|
|
log(severity,"Average packaged cell fullness: %2.3f%%",
|
2003-12-05 10:51:49 +01:00
|
|
|
100*(((double)stats_n_data_bytes_packaged) /
|
2003-12-23 08:42:01 +01:00
|
|
|
(stats_n_data_cells_packaged*RELAY_PAYLOAD_SIZE)) );
|
2003-10-15 21:25:28 +02:00
|
|
|
if (stats_n_data_cells_received)
|
2004-01-16 09:27:17 +01:00
|
|
|
log(severity,"Average delivered cell fullness: %2.3f%%",
|
2003-12-05 10:51:49 +01:00
|
|
|
100*(((double)stats_n_data_bytes_received) /
|
2003-12-23 08:42:01 +01:00
|
|
|
(stats_n_data_cells_received*RELAY_PAYLOAD_SIZE)) );
|
2003-12-05 10:51:49 +01:00
|
|
|
|
2003-10-02 22:00:38 +02:00
|
|
|
if (stats_n_seconds_reading)
|
2003-10-15 20:28:32 +02:00
|
|
|
log(severity,"Average bandwidth used: %d bytes/sec",
|
2003-10-02 22:00:38 +02:00
|
|
|
(int) (stats_n_bytes_read/stats_n_seconds_reading));
|
2004-03-20 05:59:29 +01:00
|
|
|
|
|
|
|
rep_hist_dump_stats(now,severity);
|
2004-04-09 22:02:16 +02:00
|
|
|
rend_service_dump_stats(severity);
|
2002-09-22 00:41:48 +02:00
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Called before we make any calls to network-related functions.
|
2004-05-05 23:35:12 +02:00
|
|
|
* (Some operating systems require their network libraries to be
|
|
|
|
* initialized.) */
|
2004-03-10 07:26:38 +01:00
|
|
|
int network_init(void)
|
|
|
|
{
|
|
|
|
#ifdef MS_WINDOWS
|
|
|
|
/* This silly exercise is necessary before windows will allow gethostbyname to work.
|
|
|
|
*/
|
|
|
|
WSADATA WSAData;
|
|
|
|
int r;
|
|
|
|
r = WSAStartup(0x101,&WSAData);
|
|
|
|
if (r) {
|
2004-03-12 14:00:34 +01:00
|
|
|
log_fn(LOG_WARN,"Error initializing windows network layer: code was %d",r);
|
|
|
|
return -1;
|
2004-03-10 07:26:38 +01:00
|
|
|
}
|
|
|
|
/* XXXX We should call WSACleanup on exit, I think. */
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Called by exit() as we shut down the process.
|
2004-05-05 23:35:12 +02:00
|
|
|
*/
|
2004-03-10 07:26:38 +01:00
|
|
|
void exit_function(void)
|
|
|
|
{
|
|
|
|
#ifdef MS_WINDOWS
|
|
|
|
WSACleanup();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Main entry point for the Tor command-line client.
|
2004-05-05 23:35:12 +02:00
|
|
|
*/
|
2004-06-12 21:45:46 +02:00
|
|
|
int tor_init(int argc, char *argv[]) {
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-10-18 02:13:08 +02:00
|
|
|
/* give it somewhere to log to initially */
|
2004-05-20 21:47:28 +02:00
|
|
|
add_temp_log();
|
2004-03-30 05:15:53 +02:00
|
|
|
log_fn(LOG_NOTICE,"Tor v%s. This is experimental software. Do not use it if you need anonymity.",VERSION);
|
2003-10-18 02:13:08 +02:00
|
|
|
|
2004-03-10 07:26:38 +01:00
|
|
|
if (network_init()<0) {
|
2004-03-12 14:00:34 +01:00
|
|
|
log_fn(LOG_ERR,"Error initializing network; exiting.");
|
2004-06-12 21:45:46 +02:00
|
|
|
return -1;
|
2004-03-10 07:26:38 +01:00
|
|
|
}
|
|
|
|
atexit(exit_function);
|
|
|
|
|
2004-01-19 07:12:32 +01:00
|
|
|
if (init_from_config(argc,argv) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2003-12-14 05:39:23 +01:00
|
|
|
#ifndef MS_WINDOWS
|
|
|
|
if(geteuid()==0)
|
|
|
|
log_fn(LOG_WARN,"You are running Tor as root. You don't need to, and you probably shouldn't.");
|
|
|
|
#endif
|
2004-03-21 00:27:22 +01:00
|
|
|
|
2003-11-20 18:49:45 +01:00
|
|
|
if(options.ORPort) { /* only spawn dns handlers if we're a router */
|
2003-06-17 16:31:05 +02:00
|
|
|
dns_init(); /* initialize the dns resolve tree, and spawn workers */
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
}
|
2003-11-16 18:00:02 +01:00
|
|
|
if(options.SocksPort) {
|
|
|
|
client_dns_init(); /* init the client dns cache */
|
|
|
|
}
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
|
2003-08-12 10:04:31 +02:00
|
|
|
#ifndef MS_WINDOWS /* do signal stuff only on unix */
|
2004-03-21 00:27:22 +01:00
|
|
|
{
|
|
|
|
struct sigaction action;
|
|
|
|
action.sa_flags = 0;
|
|
|
|
sigemptyset(&action.sa_mask);
|
|
|
|
|
|
|
|
action.sa_handler = catch;
|
|
|
|
sigaction(SIGINT, &action, NULL);
|
|
|
|
sigaction(SIGTERM, &action, NULL);
|
|
|
|
sigaction(SIGPIPE, &action, NULL);
|
|
|
|
sigaction(SIGUSR1, &action, NULL);
|
|
|
|
sigaction(SIGHUP, &action, NULL); /* to reload config, retry conns, etc */
|
|
|
|
sigaction(SIGCHLD, &action, NULL); /* handle dns/cpu workers that exit */
|
|
|
|
}
|
2003-08-12 10:04:31 +02:00
|
|
|
#endif /* signal stuff */
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
|
|
|
|
crypto_global_init();
|
2003-06-13 23:13:37 +02:00
|
|
|
crypto_seed_rng();
|
2004-06-12 21:45:46 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void tor_cleanup(void) {
|
2002-08-22 09:30:03 +02:00
|
|
|
crypto_global_cleanup();
|
2004-06-12 21:45:46 +02:00
|
|
|
}
|
|
|
|
|
2004-06-12 23:43:02 +02:00
|
|
|
#ifdef MS_WINDOWS
|
|
|
|
void nt_service_control(DWORD request)
|
|
|
|
{
|
|
|
|
switch (request) {
|
|
|
|
case SERVICE_CONTROL_STOP:
|
|
|
|
case SERVICE_CONTROL_SHUTDOWN:
|
|
|
|
log(LOG_ERR, "Got stop/shutdown request; shutting down cleanly.");
|
|
|
|
service_status.dwWin32ExitCode = 0;
|
|
|
|
service_status.dwCurrentState = SERVICE_STOPPED;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
SetServiceStatus(hStatus, &service_status);
|
|
|
|
}
|
|
|
|
|
|
|
|
void nt_service_body(int argc, char **argv)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
FILE *f;
|
|
|
|
f = fopen("d:\\foo.txt", "w");
|
|
|
|
fprintf(f, "POINT 1\n");
|
|
|
|
fclose(f);
|
|
|
|
service_status.dwServiceType = SERVICE_WIN32;
|
|
|
|
service_status.dwCurrentState = SERVICE_START_PENDING;
|
|
|
|
service_status.dwControlsAccepted =
|
|
|
|
SERVICE_ACCEPT_STOP |
|
|
|
|
SERVICE_ACCEPT_SHUTDOWN;
|
|
|
|
service_status.dwWin32ExitCode = 0;
|
|
|
|
service_status.dwServiceSpecificExitCode = 0;
|
|
|
|
service_status.dwCheckPoint = 0;
|
|
|
|
service_status.dwWaitHint = 0;
|
|
|
|
hStatus = RegisterServiceCtrlHandler("Tor", (LPHANDLER_FUNCTION) nt_service_control);
|
|
|
|
if (hStatus == 0) {
|
|
|
|
// failed;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
err = tor_init(argc, argv); // refactor this part out of tor_main and do_main_loop
|
|
|
|
if (err) {
|
|
|
|
// failed.
|
|
|
|
service_status.dwCurrentState = SERVICE_STOPPED;
|
|
|
|
service_status.dwWin32ExitCode = -1;
|
|
|
|
SetServiceStatus(hStatus, &service_status);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
service_status.dwCurrentState = SERVICE_RUNNING;
|
|
|
|
SetServiceStatus(hStatus, &service_status);
|
|
|
|
do_main_loop();
|
|
|
|
tor_cleanup();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
void nt_service_main(void)
|
|
|
|
{
|
|
|
|
SERVICE_TABLE_ENTRY table[2];
|
|
|
|
table[0].lpServiceName = "Tor";
|
|
|
|
table[0].lpServiceProc = (LPSERVICE_MAIN_FUNCTION)nt_service_body;
|
|
|
|
table[1].lpServiceName = NULL;
|
|
|
|
table[1].lpServiceProc = NULL;
|
|
|
|
if (!StartServiceCtrlDispatcher(table))
|
|
|
|
printf("Error was %d\n",GetLastError());
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2004-06-12 21:45:46 +02:00
|
|
|
int tor_main(int argc, char *argv[]) {
|
2004-06-12 23:43:02 +02:00
|
|
|
#ifdef MS_WINDOWS_SERVICE
|
|
|
|
nt_service_main();
|
|
|
|
return 0;
|
|
|
|
#else
|
2004-06-12 21:45:46 +02:00
|
|
|
if (tor_init(argc, argv)<0)
|
|
|
|
return -1;
|
|
|
|
do_main_loop();
|
|
|
|
tor_cleanup();
|
2003-09-29 09:50:08 +02:00
|
|
|
return -1;
|
2004-06-12 23:43:02 +02:00
|
|
|
#endif
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2003-04-07 04:12:02 +02:00
|
|
|
/*
|
|
|
|
Local Variables:
|
|
|
|
mode:c
|
|
|
|
indent-tabs-mode:nil
|
|
|
|
c-basic-offset:2
|
|
|
|
End:
|
|
|
|
*/
|