Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
/* Copyright 2001,2002 Roger Dingledine, Matej Pfajfar. */
|
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
/* $Id$ */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
#include "or.h"
|
|
|
|
|
2003-04-16 08:18:31 +02:00
|
|
|
extern or_options_t options; /* command-line and config-file options */
|
|
|
|
|
2003-09-16 07:41:49 +02:00
|
|
|
static void circuit_free_cpath(crypt_path_t *cpath);
|
|
|
|
static void circuit_free_cpath_node(crypt_path_t *victim);
|
|
|
|
static aci_t get_unique_aci_by_addr_port(uint32_t addr, uint16_t port, int aci_type);
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
/********* START VARIABLES **********/
|
|
|
|
|
2002-07-05 08:27:23 +02:00
|
|
|
static circuit_t *global_circuitlist=NULL;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-09-22 00:41:48 +02:00
|
|
|
char *circuit_state_to_string[] = {
|
|
|
|
"receiving the onion", /* 0 */
|
2002-11-27 05:08:20 +01:00
|
|
|
"waiting to process create", /* 1 */
|
|
|
|
"connecting to firsthop", /* 2 */
|
|
|
|
"open" /* 3 */
|
2002-09-22 00:41:48 +02:00
|
|
|
};
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
/********* END VARIABLES ************/
|
|
|
|
|
|
|
|
void circuit_add(circuit_t *circ) {
|
|
|
|
|
|
|
|
if(!global_circuitlist) { /* first one */
|
|
|
|
global_circuitlist = circ;
|
|
|
|
circ->next = NULL;
|
|
|
|
} else {
|
|
|
|
circ->next = global_circuitlist;
|
|
|
|
global_circuitlist = circ;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void circuit_remove(circuit_t *circ) {
|
|
|
|
circuit_t *tmpcirc;
|
|
|
|
|
2002-07-05 08:27:23 +02:00
|
|
|
assert(circ && global_circuitlist);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
if(global_circuitlist == circ) {
|
|
|
|
global_circuitlist = global_circuitlist->next;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for(tmpcirc = global_circuitlist;tmpcirc->next;tmpcirc = tmpcirc->next) {
|
|
|
|
if(tmpcirc->next == circ) {
|
|
|
|
tmpcirc->next = circ->next;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
circuit_t *circuit_new(aci_t p_aci, connection_t *p_conn) {
|
|
|
|
circuit_t *circ;
|
2003-04-16 08:18:31 +02:00
|
|
|
struct timeval now;
|
|
|
|
|
2003-04-16 19:04:58 +02:00
|
|
|
my_gettimeofday(&now);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-05-20 08:41:23 +02:00
|
|
|
circ = (circuit_t *)tor_malloc(sizeof(circuit_t));
|
2002-06-27 00:45:49 +02:00
|
|
|
memset(circ,0,sizeof(circuit_t)); /* zero it out */
|
|
|
|
|
2003-04-16 08:18:31 +02:00
|
|
|
circ->timestamp_created = now.tv_sec;
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
circ->p_aci = p_aci;
|
|
|
|
circ->p_conn = p_conn;
|
|
|
|
|
2003-05-06 01:24:46 +02:00
|
|
|
circ->state = CIRCUIT_STATE_ONIONSKIN_PENDING;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* ACIs */
|
|
|
|
circ->p_aci = p_aci;
|
2002-07-05 08:27:23 +02:00
|
|
|
/* circ->n_aci remains 0 because we haven't identified the next hop yet */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-05-20 08:41:23 +02:00
|
|
|
circ->package_window = CIRCWINDOW_START;
|
|
|
|
circ->deliver_window = CIRCWINDOW_START;
|
2002-07-18 08:37:58 +02:00
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
circuit_add(circ);
|
|
|
|
|
|
|
|
return circ;
|
|
|
|
}
|
|
|
|
|
|
|
|
void circuit_free(circuit_t *circ) {
|
2002-08-22 09:30:03 +02:00
|
|
|
if (circ->n_crypto)
|
|
|
|
crypto_free_cipher_env(circ->n_crypto);
|
|
|
|
if (circ->p_crypto)
|
|
|
|
crypto_free_cipher_env(circ->p_crypto);
|
2003-05-02 00:55:51 +02:00
|
|
|
circuit_free_cpath(circ->cpath);
|
2002-06-27 00:45:49 +02:00
|
|
|
free(circ);
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
}
|
|
|
|
|
2003-09-16 07:41:49 +02:00
|
|
|
static void circuit_free_cpath(crypt_path_t *cpath) {
|
2003-05-02 00:55:51 +02:00
|
|
|
crypt_path_t *victim, *head=cpath;
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
|
2003-05-02 00:55:51 +02:00
|
|
|
if(!cpath)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* it's a doubly linked list, so we have to notice when we've
|
|
|
|
* gone through it once. */
|
|
|
|
while(cpath->next && cpath->next != head) {
|
|
|
|
victim = cpath;
|
|
|
|
cpath = victim->next;
|
|
|
|
circuit_free_cpath_node(victim);
|
|
|
|
}
|
|
|
|
|
|
|
|
circuit_free_cpath_node(cpath);
|
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-09-16 07:41:49 +02:00
|
|
|
static void circuit_free_cpath_node(crypt_path_t *victim) {
|
2003-05-02 00:55:51 +02:00
|
|
|
if(victim->f_crypto)
|
|
|
|
crypto_free_cipher_env(victim->f_crypto);
|
|
|
|
if(victim->b_crypto)
|
|
|
|
crypto_free_cipher_env(victim->b_crypto);
|
2003-05-06 01:24:46 +02:00
|
|
|
if(victim->handshake_state)
|
|
|
|
crypto_dh_free(victim->handshake_state);
|
2003-05-02 00:55:51 +02:00
|
|
|
free(victim);
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2002-12-30 09:51:41 +01:00
|
|
|
/* return 0 if can't get a unique aci. */
|
2003-09-16 07:41:49 +02:00
|
|
|
static aci_t get_unique_aci_by_addr_port(uint32_t addr, uint16_t port, int aci_type) {
|
2002-06-27 00:45:49 +02:00
|
|
|
aci_t test_aci;
|
|
|
|
connection_t *conn;
|
2003-09-16 22:13:43 +02:00
|
|
|
uint16_t high_bit;
|
2003-09-26 12:03:50 +02:00
|
|
|
|
2003-09-16 22:13:43 +02:00
|
|
|
high_bit = (aci_type == ACI_TYPE_HIGHER) ? 1<<15 : 0;
|
2003-09-16 19:17:39 +02:00
|
|
|
conn = connection_exact_get_by_addr_port(addr,port);
|
|
|
|
if (!conn)
|
2003-09-16 22:57:09 +02:00
|
|
|
return (1|high_bit); /* No connection exists; conflict is impossible. */
|
2003-09-16 22:13:43 +02:00
|
|
|
|
2003-09-16 19:17:39 +02:00
|
|
|
do {
|
2003-09-16 22:13:43 +02:00
|
|
|
/* Sequentially iterate over test_aci=1...1<<15-1 until we find an
|
|
|
|
* aci such that (high_bit|test_aci) is not already used. */
|
2003-09-16 22:57:09 +02:00
|
|
|
/* XXX Will loop forever if all aci's in our range are used.
|
|
|
|
* This matters because it's an external DoS vulnerability. */
|
2003-09-16 22:13:43 +02:00
|
|
|
test_aci = conn->next_aci++;
|
|
|
|
if (test_aci == 0 || test_aci >= 1<<15) {
|
|
|
|
test_aci = 1;
|
|
|
|
conn->next_aci = 2;
|
|
|
|
}
|
|
|
|
test_aci |= high_bit;
|
2003-09-16 19:17:39 +02:00
|
|
|
} while(circuit_get_by_aci_conn(test_aci, conn));
|
2002-06-27 00:45:49 +02:00
|
|
|
return test_aci;
|
|
|
|
}
|
|
|
|
|
2002-09-24 12:43:57 +02:00
|
|
|
circuit_t *circuit_enumerate_by_naddr_nport(circuit_t *circ, uint32_t naddr, uint16_t nport) {
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
|
2002-09-24 12:43:57 +02:00
|
|
|
if(!circ) /* use circ if it's defined, else start from the beginning */
|
|
|
|
circ = global_circuitlist;
|
|
|
|
else
|
|
|
|
circ = circ->next;
|
|
|
|
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
for( ; circ; circ = circ->next) {
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
if(circ->n_addr == naddr && circ->n_port == nport)
|
|
|
|
return circ;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
circuit_t *circuit_get_by_aci_conn(aci_t aci, connection_t *conn) {
|
|
|
|
circuit_t *circ;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
connection_t *tmpconn;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
for(circ=global_circuitlist;circ;circ = circ->next) {
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
if(circ->p_aci == aci) {
|
2003-05-28 01:39:04 +02:00
|
|
|
if(circ->p_conn == conn)
|
|
|
|
return circ;
|
|
|
|
for(tmpconn = circ->p_streams; tmpconn; tmpconn = tmpconn->next_stream) {
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
if(tmpconn == conn)
|
|
|
|
return circ;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if(circ->n_aci == aci) {
|
2003-05-28 01:39:04 +02:00
|
|
|
if(circ->n_conn == conn)
|
|
|
|
return circ;
|
|
|
|
for(tmpconn = circ->n_streams; tmpconn; tmpconn = tmpconn->next_stream) {
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
if(tmpconn == conn)
|
|
|
|
return circ;
|
|
|
|
}
|
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
circuit_t *circuit_get_by_conn(connection_t *conn) {
|
|
|
|
circuit_t *circ;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
connection_t *tmpconn;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
for(circ=global_circuitlist;circ;circ = circ->next) {
|
2003-05-28 01:39:04 +02:00
|
|
|
if(circ->p_conn == conn)
|
|
|
|
return circ;
|
|
|
|
if(circ->n_conn == conn)
|
|
|
|
return circ;
|
|
|
|
for(tmpconn = circ->p_streams; tmpconn; tmpconn=tmpconn->next_stream)
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
if(tmpconn == conn)
|
|
|
|
return circ;
|
2003-05-28 01:39:04 +02:00
|
|
|
for(tmpconn = circ->n_streams; tmpconn; tmpconn=tmpconn->next_stream)
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
if(tmpconn == conn)
|
|
|
|
return circ;
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2003-09-16 22:57:09 +02:00
|
|
|
circuit_t *circuit_get_newest_open(void) {
|
2003-04-16 08:18:31 +02:00
|
|
|
circuit_t *circ, *bestcirc=NULL;
|
2003-02-06 09:00:49 +01:00
|
|
|
|
|
|
|
for(circ=global_circuitlist;circ;circ = circ->next) {
|
2003-09-16 22:57:09 +02:00
|
|
|
if(circ->cpath && circ->state == CIRCUIT_STATE_OPEN && circ->n_conn && (!bestcirc ||
|
2003-05-06 01:24:46 +02:00
|
|
|
bestcirc->timestamp_created < circ->timestamp_created)) {
|
2003-09-16 22:57:09 +02:00
|
|
|
log_fn(LOG_DEBUG,"Choosing circuit %s:%d:%d.", circ->n_conn->address, circ->n_port, circ->n_aci);
|
2003-05-06 01:24:46 +02:00
|
|
|
assert(circ->n_aci);
|
|
|
|
bestcirc = circ;
|
2003-02-06 09:00:49 +01:00
|
|
|
}
|
|
|
|
}
|
2003-04-16 08:18:31 +02:00
|
|
|
return bestcirc;
|
2003-02-06 09:00:49 +01:00
|
|
|
}
|
|
|
|
|
2003-05-02 23:29:25 +02:00
|
|
|
int circuit_deliver_relay_cell(cell_t *cell, circuit_t *circ,
|
|
|
|
int cell_direction, crypt_path_t *layer_hint) {
|
|
|
|
connection_t *conn=NULL;
|
|
|
|
char recognized=0;
|
|
|
|
char buf[256];
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
|
|
|
|
assert(cell && circ);
|
|
|
|
assert(cell_direction == CELL_DIRECTION_OUT || cell_direction == CELL_DIRECTION_IN);
|
|
|
|
|
2003-05-02 23:29:25 +02:00
|
|
|
buf[0] = cell->length;
|
|
|
|
memcpy(buf+1, cell->payload, CELL_PAYLOAD_SIZE);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"direction %d, streamid %d before crypt.", cell_direction, *(int*)(cell->payload+1));
|
2003-05-02 23:29:25 +02:00
|
|
|
|
2003-05-20 08:41:23 +02:00
|
|
|
if(relay_crypt(circ, buf, 1+CELL_PAYLOAD_SIZE, cell_direction, &layer_hint, &recognized, &conn) < 0) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"relay crypt failed. Dropping connection.");
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-05-02 23:29:25 +02:00
|
|
|
cell->length = buf[0];
|
|
|
|
memcpy(cell->payload, buf+1, CELL_PAYLOAD_SIZE);
|
|
|
|
|
|
|
|
if(recognized) {
|
|
|
|
if(cell_direction == CELL_DIRECTION_OUT) {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"Sending to exit.");
|
2003-05-20 08:41:23 +02:00
|
|
|
return connection_edge_process_relay_cell(cell, circ, conn, EDGE_EXIT, NULL);
|
2003-05-02 23:29:25 +02:00
|
|
|
}
|
|
|
|
if(cell_direction == CELL_DIRECTION_IN) {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"Sending to AP.");
|
2003-05-20 08:41:23 +02:00
|
|
|
return connection_edge_process_relay_cell(cell, circ, conn, EDGE_AP, layer_hint);
|
2003-05-02 23:29:25 +02:00
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
2003-05-02 23:29:25 +02:00
|
|
|
|
|
|
|
/* not recognized. pass it on. */
|
|
|
|
if(cell_direction == CELL_DIRECTION_OUT)
|
|
|
|
conn = circ->n_conn;
|
|
|
|
else
|
|
|
|
conn = circ->p_conn;
|
|
|
|
|
2003-05-28 01:39:04 +02:00
|
|
|
if(!conn) { //|| !connection_speaks_cells(conn)) {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_INFO,"Didn't recognize cell (%d), but circ stops here! Dropping.", *(int *)(cell->payload+1));
|
2003-05-02 23:29:25 +02:00
|
|
|
return 0;
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
}
|
2003-05-02 23:29:25 +02:00
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"Passing on unrecognized cell.");
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
return connection_write_cell_to_buf(cell, conn);
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2003-05-02 23:29:25 +02:00
|
|
|
int relay_crypt(circuit_t *circ, char *in, int inlen, char cell_direction,
|
2003-05-20 08:41:23 +02:00
|
|
|
crypt_path_t **layer_hint, char *recognized, connection_t **conn) {
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
crypt_path_t *thishop;
|
2003-05-02 23:29:25 +02:00
|
|
|
char out[256];
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-05-02 23:29:25 +02:00
|
|
|
assert(circ && in && recognized && conn);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-05-02 23:29:25 +02:00
|
|
|
assert(inlen < 256);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-03-05 21:03:05 +01:00
|
|
|
if(cell_direction == CELL_DIRECTION_IN) {
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
if(circ->cpath) { /* we're at the beginning of the circuit. We'll want to do layered crypts. */
|
2003-05-02 00:55:51 +02:00
|
|
|
thishop = circ->cpath;
|
2003-05-06 01:24:46 +02:00
|
|
|
if(thishop->state != CPATH_STATE_OPEN) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"Relay cell before first created cell?");
|
2003-05-06 01:24:46 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2003-05-02 23:29:25 +02:00
|
|
|
do { /* Remember: cpath is in forward order, that is, first hop first. */
|
2003-05-02 00:55:51 +02:00
|
|
|
assert(thishop);
|
2003-05-02 23:29:25 +02:00
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"before decrypt: %d",*(int*)(in+2));
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
/* decrypt */
|
|
|
|
if(crypto_cipher_decrypt(thishop->b_crypto, in, inlen, out)) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"Error performing onion decryption: %s", crypto_perror());
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
memcpy(in,out,inlen);
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"after decrypt: %d",*(int*)(in+2));
|
2003-05-02 23:29:25 +02:00
|
|
|
|
2003-05-20 08:41:23 +02:00
|
|
|
if( (*recognized = relay_check_recognized(circ, cell_direction, in+2, conn))) {
|
|
|
|
*layer_hint = thishop;
|
2003-05-02 23:29:25 +02:00
|
|
|
return 0;
|
2003-05-20 08:41:23 +02:00
|
|
|
}
|
2003-05-02 23:29:25 +02:00
|
|
|
|
2003-05-02 00:55:51 +02:00
|
|
|
thishop = thishop->next;
|
2003-05-06 01:24:46 +02:00
|
|
|
} while(thishop != circ->cpath && thishop->state == CPATH_STATE_OPEN);
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_INFO,"in-cell at OP not recognized. Dropping.");
|
2003-05-06 01:24:46 +02:00
|
|
|
return 0;
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
} else { /* we're in the middle. Just one crypt. */
|
2003-05-02 23:29:25 +02:00
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"before encrypt: %d",*(int*)(in+2));
|
2003-05-02 23:29:25 +02:00
|
|
|
if(crypto_cipher_encrypt(circ->p_crypto, in, inlen, out)) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"Onion encryption failed for ACI %u: %s",
|
2002-08-22 09:30:03 +02:00
|
|
|
circ->p_aci, crypto_perror());
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
memcpy(in,out,inlen);
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"after encrypt: %d",*(int*)(in+2));
|
2003-05-02 23:29:25 +02:00
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"Skipping recognized check, because we're not the OP.");
|
2003-05-02 23:29:25 +02:00
|
|
|
/* don't check for recognized. only the OP can recognize a stream on the way back. */
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
2003-03-05 21:03:05 +01:00
|
|
|
} else if(cell_direction == CELL_DIRECTION_OUT) {
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
if(circ->cpath) { /* we're at the beginning of the circuit. We'll want to do layered crypts. */
|
2003-05-02 23:29:25 +02:00
|
|
|
|
2003-05-20 08:41:23 +02:00
|
|
|
thishop = *layer_hint; /* we already know which layer, from when we package_raw_inbuf'ed */
|
2003-05-02 00:55:51 +02:00
|
|
|
/* moving from last to first hop */
|
|
|
|
do {
|
|
|
|
assert(thishop);
|
2003-05-02 23:29:25 +02:00
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"before encrypt: %d",*(int*)(in+2));
|
2003-05-02 23:29:25 +02:00
|
|
|
if(crypto_cipher_encrypt(thishop->f_crypto, in, inlen, out)) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"Error performing encryption: %s", crypto_perror());
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
memcpy(in,out,inlen);
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"after encrypt: %d",*(int*)(in+2));
|
2003-05-02 23:29:25 +02:00
|
|
|
|
2003-05-02 00:55:51 +02:00
|
|
|
thishop = thishop->prev;
|
|
|
|
} while(thishop != circ->cpath->prev);
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
} else { /* we're in the middle. Just one crypt. */
|
2003-05-02 23:29:25 +02:00
|
|
|
|
2002-08-22 09:30:03 +02:00
|
|
|
if(crypto_cipher_decrypt(circ->n_crypto,in, inlen, out)) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"Decryption failed for ACI %u: %s",
|
2003-06-18 00:18:26 +02:00
|
|
|
circ->n_aci, crypto_perror());
|
Integrated onion proxy into or/
The 'or' process can now be told (by the global_role variable) what
roles this server should play -- connect to all ORs, listen for ORs,
listen for OPs, listen for APs, or any combination.
* everything in /src/op/ is now obsolete.
* connection_ap.c now handles all interactions with application proxies
* "port" is now or_port, op_port, ap_port. But routers are still always
referenced (say, in conn_get_by_addr_port()) by addr / or_port. We
should make routers.c actually read these new ports (currently I've
kludged it so op_port = or_port+10, ap_port=or_port+20)
* circuits currently know if they're at the beginning of the path because
circ->cpath is set. They use this instead for crypts (both ways),
if it's set.
* I still obey the "send a 0 back to the AP when you're ready" protocol,
but I think we should phase it out. I can simply not read from the AP
socket until I'm ready.
I need to do a lot of cleanup work here, but the code appears to work, so
now's a good time for a checkin.
svn:r22
2002-07-02 11:36:58 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
memcpy(in,out,inlen);
|
2003-05-02 23:29:25 +02:00
|
|
|
|
|
|
|
if( (*recognized = relay_check_recognized(circ, cell_direction, in+2, conn)))
|
|
|
|
return 0;
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
} else {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_ERR,"unknown cell direction %d.", cell_direction);
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
assert(0);
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-05-02 23:29:25 +02:00
|
|
|
int relay_check_recognized(circuit_t *circ, int cell_direction, char *stream, connection_t **conn) {
|
|
|
|
/* FIXME can optimize by passing thishop in */
|
|
|
|
connection_t *tmpconn;
|
|
|
|
|
2003-05-06 01:24:46 +02:00
|
|
|
if(!memcmp(stream,ZERO_STREAM,STREAM_ID_SIZE)) {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"It's the zero stream. Recognized.");
|
2003-05-02 23:29:25 +02:00
|
|
|
return 1; /* the zero stream is always recognized */
|
2003-05-06 01:24:46 +02:00
|
|
|
}
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"not the zero stream.");
|
2003-05-02 23:29:25 +02:00
|
|
|
|
|
|
|
if(cell_direction == CELL_DIRECTION_OUT)
|
2003-05-28 01:39:04 +02:00
|
|
|
tmpconn = circ->n_streams;
|
2003-05-02 23:29:25 +02:00
|
|
|
else
|
2003-05-28 01:39:04 +02:00
|
|
|
tmpconn = circ->p_streams;
|
2003-05-02 23:29:25 +02:00
|
|
|
|
2003-05-06 01:24:46 +02:00
|
|
|
if(!tmpconn) {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"No conns. Not recognized.");
|
2003-05-28 01:39:04 +02:00
|
|
|
return 0;
|
2003-05-02 23:29:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for( ; tmpconn; tmpconn=tmpconn->next_stream) {
|
|
|
|
if(!memcmp(stream,tmpconn->stream_id, STREAM_ID_SIZE)) {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"recognized stream %d.", *(int*)stream);
|
2003-05-02 23:29:25 +02:00
|
|
|
*conn = tmpconn;
|
|
|
|
return 1;
|
|
|
|
}
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"considered stream %d, not it.",*(int*)tmpconn->stream_id);
|
2003-05-02 23:29:25 +02:00
|
|
|
}
|
|
|
|
|
2003-09-14 10:17:14 +02:00
|
|
|
log_fn(LOG_DEBUG,"Didn't recognize on this iteration of decryption.");
|
2003-05-02 23:29:25 +02:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2003-05-20 08:41:23 +02:00
|
|
|
void circuit_resume_edge_reading(circuit_t *circ, int edge_type, crypt_path_t *layer_hint) {
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
connection_t *conn;
|
|
|
|
|
|
|
|
assert(edge_type == EDGE_EXIT || edge_type == EDGE_AP);
|
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"resuming");
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
|
2003-02-18 02:35:55 +01:00
|
|
|
if(edge_type == EDGE_EXIT)
|
2003-05-28 01:39:04 +02:00
|
|
|
conn = circ->n_streams;
|
2003-02-18 02:35:55 +01:00
|
|
|
else
|
2003-05-28 01:39:04 +02:00
|
|
|
conn = circ->p_streams;
|
2003-02-18 02:35:55 +01:00
|
|
|
|
2003-05-01 08:42:29 +02:00
|
|
|
for( ; conn; conn=conn->next_stream) {
|
2003-05-20 08:41:23 +02:00
|
|
|
if((edge_type == EDGE_EXIT && conn->package_window > 0) ||
|
|
|
|
(edge_type == EDGE_AP && conn->package_window > 0 && conn->cpath_layer == layer_hint)) {
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
connection_start_reading(conn);
|
|
|
|
connection_package_raw_inbuf(conn); /* handle whatever might still be on the inbuf */
|
2003-08-25 22:57:23 +02:00
|
|
|
|
|
|
|
/* If the circuit won't accept any more data, return without looking
|
|
|
|
* at any more of the streams. Any connections that should be stopped
|
|
|
|
* have already been stopped by connection_package_raw_inbuf. */
|
2003-07-03 05:40:47 +02:00
|
|
|
if(circuit_consider_stop_edge_reading(circ, edge_type, layer_hint))
|
|
|
|
return;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns 1 if the window is empty, else 0. If it's empty, tell edge conns to stop reading. */
|
2003-05-20 08:41:23 +02:00
|
|
|
int circuit_consider_stop_edge_reading(circuit_t *circ, int edge_type, crypt_path_t *layer_hint) {
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
connection_t *conn = NULL;
|
|
|
|
|
|
|
|
assert(edge_type == EDGE_EXIT || edge_type == EDGE_AP);
|
2003-05-20 08:41:23 +02:00
|
|
|
assert(edge_type == EDGE_EXIT || layer_hint);
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"considering");
|
2003-05-20 08:41:23 +02:00
|
|
|
if(edge_type == EDGE_EXIT && circ->package_window <= 0)
|
2003-05-28 01:39:04 +02:00
|
|
|
conn = circ->n_streams;
|
2003-05-20 08:41:23 +02:00
|
|
|
else if(edge_type == EDGE_AP && layer_hint->package_window <= 0)
|
2003-05-28 01:39:04 +02:00
|
|
|
conn = circ->p_streams;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
|
2003-05-01 08:42:29 +02:00
|
|
|
for( ; conn; conn=conn->next_stream)
|
2003-05-20 08:41:23 +02:00
|
|
|
if(!layer_hint || conn->cpath_layer == layer_hint)
|
|
|
|
connection_stop_reading(conn);
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"yes. stopped.");
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2003-05-20 08:41:23 +02:00
|
|
|
int circuit_consider_sending_sendme(circuit_t *circ, int edge_type, crypt_path_t *layer_hint) {
|
|
|
|
cell_t cell;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
assert(circ);
|
|
|
|
|
2003-05-20 08:41:23 +02:00
|
|
|
memset(&cell, 0, sizeof(cell_t));
|
|
|
|
cell.command = CELL_RELAY;
|
|
|
|
SET_CELL_RELAY_COMMAND(cell, RELAY_COMMAND_SENDME);
|
|
|
|
SET_CELL_STREAM_ID(cell, ZERO_STREAM);
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
|
2003-05-20 08:41:23 +02:00
|
|
|
cell.length = RELAY_HEADER_SIZE;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
if(edge_type == EDGE_AP) { /* i'm the AP */
|
2003-05-20 08:41:23 +02:00
|
|
|
cell.aci = circ->n_aci;
|
|
|
|
while(layer_hint->deliver_window < CIRCWINDOW_START-CIRCWINDOW_INCREMENT) {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"deliver_window %d, Queueing sendme forward.", layer_hint->deliver_window);
|
2003-05-20 08:41:23 +02:00
|
|
|
layer_hint->deliver_window += CIRCWINDOW_INCREMENT;
|
2003-06-12 12:16:33 +02:00
|
|
|
if(circuit_deliver_relay_cell(&cell, circ, CELL_DIRECTION_OUT, layer_hint) < 0) {
|
2003-02-18 02:35:55 +01:00
|
|
|
return -1;
|
|
|
|
}
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
}
|
|
|
|
} else if(edge_type == EDGE_EXIT) { /* i'm the exit */
|
2003-05-20 08:41:23 +02:00
|
|
|
cell.aci = circ->p_aci;
|
|
|
|
while(circ->deliver_window < CIRCWINDOW_START-CIRCWINDOW_INCREMENT) {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"deliver_window %d, Queueing sendme back.", circ->deliver_window);
|
2003-05-20 08:41:23 +02:00
|
|
|
circ->deliver_window += CIRCWINDOW_INCREMENT;
|
2003-06-12 12:16:33 +02:00
|
|
|
if(circuit_deliver_relay_cell(&cell, circ, CELL_DIRECTION_IN, layer_hint) < 0) {
|
2003-02-18 02:35:55 +01:00
|
|
|
return -1;
|
|
|
|
}
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
}
|
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void circuit_close(circuit_t *circ) {
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
connection_t *conn;
|
2003-04-17 01:21:44 +02:00
|
|
|
circuit_t *youngest=NULL;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
|
2003-02-06 09:00:49 +01:00
|
|
|
assert(circ);
|
2003-04-20 21:47:33 +02:00
|
|
|
if(options.APPort) {
|
2003-09-16 22:57:09 +02:00
|
|
|
youngest = circuit_get_newest_open();
|
2003-06-21 21:03:22 +02:00
|
|
|
log_fn(LOG_DEBUG,"youngest %d, circ %d.",(int)youngest, (int)circ);
|
2003-04-20 21:47:33 +02:00
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
circuit_remove(circ);
|
2003-05-28 01:39:04 +02:00
|
|
|
if(circ->n_conn)
|
|
|
|
connection_send_destroy(circ->n_aci, circ->n_conn);
|
|
|
|
for(conn=circ->n_streams; conn; conn=conn->next_stream) {
|
|
|
|
connection_send_destroy(circ->n_aci, conn);
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
}
|
2003-05-28 01:39:04 +02:00
|
|
|
if(circ->p_conn)
|
|
|
|
connection_send_destroy(circ->n_aci, circ->p_conn);
|
|
|
|
for(conn=circ->p_streams; conn; conn=conn->next_stream) {
|
|
|
|
connection_send_destroy(circ->p_aci, conn);
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
}
|
2003-04-16 08:18:31 +02:00
|
|
|
if(options.APPort && youngest == circ) { /* check this after we've sent the destroys, to reduce races */
|
|
|
|
/* our current circuit just died. Launch another one pronto. */
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_INFO,"Youngest circuit dying. Launching a replacement.");
|
2003-04-16 08:18:31 +02:00
|
|
|
circuit_launch_new(1);
|
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
circuit_free(circ);
|
|
|
|
}
|
|
|
|
|
|
|
|
void circuit_about_to_close_connection(connection_t *conn) {
|
|
|
|
/* send destroys for all circuits using conn */
|
|
|
|
/* currently, we assume it's too late to flush conn's buf here.
|
|
|
|
* down the road, maybe we'll consider that eof doesn't mean can't-write
|
|
|
|
*/
|
|
|
|
circuit_t *circ;
|
2003-04-20 23:56:44 +02:00
|
|
|
connection_t *prevconn;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
|
|
|
|
if(!connection_speaks_cells(conn)) {
|
|
|
|
/* it's an edge conn. need to remove it from the linked list of
|
2003-05-01 08:42:29 +02:00
|
|
|
* conn's for this circuit. Send an 'end' relay command.
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
* But don't kill the circuit.
|
|
|
|
*/
|
|
|
|
|
|
|
|
circ = circuit_get_by_conn(conn);
|
|
|
|
if(!circ)
|
|
|
|
return;
|
|
|
|
|
2003-05-28 01:39:04 +02:00
|
|
|
if(conn == circ->p_streams) {
|
|
|
|
circ->p_streams = conn->next_stream;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
goto send_end;
|
|
|
|
}
|
2003-05-28 01:39:04 +02:00
|
|
|
if(conn == circ->n_streams) {
|
|
|
|
circ->n_streams = conn->next_stream;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
goto send_end;
|
|
|
|
}
|
2003-05-28 02:13:42 +02:00
|
|
|
for(prevconn = circ->p_streams; prevconn && prevconn->next_stream && prevconn->next_stream != conn; prevconn = prevconn->next_stream) ;
|
|
|
|
if(prevconn && prevconn->next_stream) {
|
2003-05-01 08:42:29 +02:00
|
|
|
prevconn->next_stream = conn->next_stream;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
goto send_end;
|
|
|
|
}
|
2003-05-28 02:13:42 +02:00
|
|
|
for(prevconn = circ->n_streams; prevconn && prevconn->next_stream && prevconn->next_stream != conn; prevconn = prevconn->next_stream) ;
|
|
|
|
if(prevconn && prevconn->next_stream) {
|
2003-05-01 08:42:29 +02:00
|
|
|
prevconn->next_stream = conn->next_stream;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
goto send_end;
|
|
|
|
}
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_ERR,"edge conn not in circuit's list?");
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
assert(0); /* should never get here */
|
|
|
|
send_end:
|
2003-05-01 08:42:29 +02:00
|
|
|
if(connection_edge_send_command(conn, circ, RELAY_COMMAND_END) < 0) {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"sending end failed. Closing.");
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
circuit_close(circ);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2003-04-20 21:47:33 +02:00
|
|
|
/* this connection speaks cells. We must close all the circuits on it. */
|
2002-06-27 00:45:49 +02:00
|
|
|
while((circ = circuit_get_by_conn(conn))) {
|
|
|
|
if(circ->n_conn == conn) /* it's closing in front of us */
|
2003-04-20 21:47:33 +02:00
|
|
|
circ->n_conn = NULL;
|
2002-06-27 00:45:49 +02:00
|
|
|
if(circ->p_conn == conn) /* it's closing behind us */
|
2003-04-20 21:47:33 +02:00
|
|
|
circ->p_conn = NULL;
|
|
|
|
circuit_close(circ);
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
/* FIXME this now leaves some out */
|
2002-09-22 00:41:48 +02:00
|
|
|
void circuit_dump_by_conn(connection_t *conn) {
|
|
|
|
circuit_t *circ;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
connection_t *tmpconn;
|
2002-09-22 00:41:48 +02:00
|
|
|
|
|
|
|
for(circ=global_circuitlist;circ;circ = circ->next) {
|
2003-05-28 01:39:04 +02:00
|
|
|
if(circ->p_conn == conn)
|
|
|
|
printf("Conn %d has App-ward circuit: aci %d (other side %d), state %d (%s)\n",
|
|
|
|
conn->poll_index, circ->p_aci, circ->n_aci, circ->state, circuit_state_to_string[circ->state]);
|
|
|
|
for(tmpconn=circ->p_streams; tmpconn; tmpconn=tmpconn->next_stream) {
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
if(tmpconn == conn) {
|
|
|
|
printf("Conn %d has App-ward circuit: aci %d (other side %d), state %d (%s)\n",
|
|
|
|
conn->poll_index, circ->p_aci, circ->n_aci, circ->state, circuit_state_to_string[circ->state]);
|
|
|
|
}
|
2002-09-22 00:41:48 +02:00
|
|
|
}
|
2003-05-28 01:39:04 +02:00
|
|
|
if(circ->n_conn == conn)
|
|
|
|
printf("Conn %d has Exit-ward circuit: aci %d (other side %d), state %d (%s)\n",
|
|
|
|
conn->poll_index, circ->n_aci, circ->p_aci, circ->state, circuit_state_to_string[circ->state]);
|
|
|
|
for(tmpconn=circ->n_streams; tmpconn; tmpconn=tmpconn->next_stream) {
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
if(tmpconn == conn) {
|
|
|
|
printf("Conn %d has Exit-ward circuit: aci %d (other side %d), state %d (%s)\n",
|
|
|
|
conn->poll_index, circ->n_aci, circ->p_aci, circ->state, circuit_state_to_string[circ->state]);
|
|
|
|
}
|
2002-09-22 00:41:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-04-16 08:18:31 +02:00
|
|
|
void circuit_expire_unused_circuits(void) {
|
|
|
|
circuit_t *circ, *tmpcirc;
|
|
|
|
circuit_t *youngest;
|
|
|
|
|
2003-09-16 22:57:09 +02:00
|
|
|
youngest = circuit_get_newest_open();
|
2003-04-16 08:18:31 +02:00
|
|
|
|
|
|
|
circ = global_circuitlist;
|
|
|
|
while(circ) {
|
|
|
|
tmpcirc = circ;
|
|
|
|
circ = circ->next;
|
2003-06-01 04:09:36 +02:00
|
|
|
if(tmpcirc != youngest && !tmpcirc->p_conn && !tmpcirc->p_streams) {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"Closing n_aci %d",tmpcirc->n_aci);
|
2003-04-16 08:18:31 +02:00
|
|
|
circuit_close(tmpcirc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* failure_status code: negative means reset failures to 0. Other values mean
|
|
|
|
* add that value to the current number of failures, then if we don't have too
|
|
|
|
* many failures on record, try to make a new circuit.
|
|
|
|
*/
|
|
|
|
void circuit_launch_new(int failure_status) {
|
|
|
|
static int failures=0;
|
|
|
|
|
2003-04-20 21:47:33 +02:00
|
|
|
if(!options.APPort) /* we're not an application proxy. no need for circuits. */
|
|
|
|
return;
|
|
|
|
|
2003-04-16 08:18:31 +02:00
|
|
|
if(failure_status == -1) { /* I was called because a circuit succeeded */
|
|
|
|
failures = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
failures += failure_status;
|
|
|
|
|
|
|
|
retry_circuit:
|
|
|
|
|
|
|
|
if(failures > 5) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_INFO,"Giving up for now, %d failures.", failures);
|
2003-04-16 08:18:31 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2003-05-06 01:24:46 +02:00
|
|
|
if(circuit_establish_circuit() < 0) {
|
2003-04-16 08:18:31 +02:00
|
|
|
failures++;
|
|
|
|
goto retry_circuit;
|
|
|
|
}
|
|
|
|
|
|
|
|
failures = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2003-05-06 01:24:46 +02:00
|
|
|
int circuit_establish_circuit(void) {
|
2003-04-16 08:18:31 +02:00
|
|
|
routerinfo_t *firsthop;
|
|
|
|
connection_t *n_conn;
|
|
|
|
circuit_t *circ;
|
|
|
|
|
|
|
|
circ = circuit_new(0, NULL); /* sets circ->p_aci and circ->p_conn */
|
|
|
|
circ->state = CIRCUIT_STATE_OR_WAIT;
|
2003-05-06 01:24:46 +02:00
|
|
|
circ->cpath = onion_generate_cpath(&firsthop);
|
|
|
|
if(!circ->cpath) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_INFO,"Generating cpath failed.");
|
2003-05-06 01:24:46 +02:00
|
|
|
circuit_close(circ);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now see if we're already connected to the first OR in 'route' */
|
2003-04-16 08:18:31 +02:00
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"Looking for firsthop '%s:%u'",
|
2003-04-16 08:18:31 +02:00
|
|
|
firsthop->address,firsthop->or_port);
|
|
|
|
n_conn = connection_twin_get_by_addr_port(firsthop->addr,firsthop->or_port);
|
|
|
|
if(!n_conn || n_conn->state != OR_CONN_STATE_OPEN) { /* not currently connected */
|
|
|
|
circ->n_addr = firsthop->addr;
|
|
|
|
circ->n_port = firsthop->or_port;
|
2003-05-28 04:03:25 +02:00
|
|
|
if(options.OnionRouter) { /* we would be connected if he were up. but he's not. */
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_INFO,"Route's firsthop isn't connected.");
|
2003-04-16 08:18:31 +02:00
|
|
|
circuit_close(circ);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(!n_conn) { /* launch the connection */
|
2003-05-28 04:03:25 +02:00
|
|
|
n_conn = connection_or_connect(firsthop);
|
2003-04-16 08:18:31 +02:00
|
|
|
if(!n_conn) { /* connect failed, forget the whole thing */
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_INFO,"connect to firsthop failed. Closing.");
|
2003-04-16 08:18:31 +02:00
|
|
|
circuit_close(circ);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"connecting in progress (or finished). Good.");
|
2003-04-16 08:18:31 +02:00
|
|
|
return 0; /* return success. The onion/circuit/etc will be taken care of automatically
|
|
|
|
* (may already have been) whenever n_conn reaches OR_CONN_STATE_OPEN.
|
|
|
|
*/
|
|
|
|
} else { /* it (or a twin) is already open. use it. */
|
|
|
|
circ->n_addr = n_conn->addr;
|
|
|
|
circ->n_port = n_conn->port;
|
2003-05-06 01:24:46 +02:00
|
|
|
circ->n_conn = n_conn;
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"Conn open. Delivering first onion skin.");
|
2003-05-06 01:24:46 +02:00
|
|
|
if(circuit_send_next_onion_skin(circ) < 0) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_INFO,"circuit_send_next_onion_skin failed.");
|
2003-05-06 01:24:46 +02:00
|
|
|
circuit_close(circ);
|
|
|
|
return -1;
|
|
|
|
}
|
2003-04-16 08:18:31 +02:00
|
|
|
}
|
2003-05-06 01:24:46 +02:00
|
|
|
return 0;
|
2003-04-16 08:18:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* find circuits that are waiting on me, if any, and get them to send the onion */
|
|
|
|
void circuit_n_conn_open(connection_t *or_conn) {
|
|
|
|
circuit_t *circ;
|
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"Starting.");
|
2003-04-16 08:18:31 +02:00
|
|
|
circ = circuit_enumerate_by_naddr_nport(NULL, or_conn->addr, or_conn->port);
|
|
|
|
for(;;) {
|
|
|
|
if(!circ)
|
|
|
|
return;
|
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"Found circ, sending onion skin.");
|
2003-05-06 01:24:46 +02:00
|
|
|
circ->n_conn = or_conn;
|
|
|
|
if(circuit_send_next_onion_skin(circ) < 0) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_INFO,"send_next_onion_skin failed; circuit marked for closing.");
|
2003-04-16 08:18:31 +02:00
|
|
|
circuit_close(circ);
|
|
|
|
return; /* FIXME will want to try the other circuits too? */
|
|
|
|
}
|
|
|
|
circ = circuit_enumerate_by_naddr_nport(circ, or_conn->addr, or_conn->port);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-05-06 01:24:46 +02:00
|
|
|
int circuit_send_next_onion_skin(circuit_t *circ) {
|
2003-04-16 08:18:31 +02:00
|
|
|
cell_t cell;
|
2003-05-06 01:24:46 +02:00
|
|
|
crypt_path_t *hop;
|
|
|
|
routerinfo_t *router;
|
2003-04-16 08:18:31 +02:00
|
|
|
|
2003-05-06 01:24:46 +02:00
|
|
|
assert(circ && circ->cpath);
|
2003-04-16 08:18:31 +02:00
|
|
|
|
2003-05-06 01:24:46 +02:00
|
|
|
if(circ->cpath->state == CPATH_STATE_CLOSED) {
|
2003-04-16 08:18:31 +02:00
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"First skin; sending create cell.");
|
2003-05-06 01:24:46 +02:00
|
|
|
circ->n_aci = get_unique_aci_by_addr_port(circ->n_addr, circ->n_port, ACI_TYPE_BOTH);
|
2003-04-16 08:18:31 +02:00
|
|
|
|
2003-05-06 01:24:46 +02:00
|
|
|
memset(&cell, 0, sizeof(cell_t));
|
2003-04-16 08:18:31 +02:00
|
|
|
cell.command = CELL_CREATE;
|
|
|
|
cell.aci = circ->n_aci;
|
2003-05-06 07:54:42 +02:00
|
|
|
cell.length = DH_ONIONSKIN_LEN;
|
2003-05-06 01:24:46 +02:00
|
|
|
|
2003-09-25 07:17:11 +02:00
|
|
|
if(onion_skin_create(circ->n_conn->onion_pkey, &(circ->cpath->handshake_state), cell.payload) < 0) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"onion_skin_create (first hop) failed.");
|
2003-05-06 01:24:46 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(connection_write_cell_to_buf(&cell, circ->n_conn) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
circ->cpath->state = CPATH_STATE_AWAITING_KEYS;
|
|
|
|
circ->state = CIRCUIT_STATE_BUILDING;
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"first skin; finished sending create cell.");
|
2003-05-06 01:24:46 +02:00
|
|
|
} else {
|
|
|
|
assert(circ->cpath->state == CPATH_STATE_OPEN);
|
|
|
|
assert(circ->state == CIRCUIT_STATE_BUILDING);
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"starting to send subsequent skin.");
|
2003-05-06 01:24:46 +02:00
|
|
|
for(hop=circ->cpath->next;
|
|
|
|
hop != circ->cpath && hop->state == CPATH_STATE_OPEN;
|
|
|
|
hop=hop->next) ;
|
|
|
|
if(hop == circ->cpath) { /* done building the circuit. whew. */
|
|
|
|
circ->state = CIRCUIT_STATE_OPEN;
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_INFO,"circuit built!");
|
2003-05-06 01:24:46 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
router = router_get_by_addr_port(hop->addr,hop->port);
|
|
|
|
if(!router) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"couldn't lookup router %d:%d",hop->addr,hop->port);
|
2003-05-06 01:24:46 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&cell, 0, sizeof(cell_t));
|
|
|
|
cell.command = CELL_RELAY;
|
|
|
|
cell.aci = circ->n_aci;
|
|
|
|
SET_CELL_RELAY_COMMAND(cell, RELAY_COMMAND_EXTEND);
|
|
|
|
SET_CELL_STREAM_ID(cell, ZERO_STREAM);
|
|
|
|
|
2003-05-06 07:54:42 +02:00
|
|
|
cell.length = RELAY_HEADER_SIZE + 6 + DH_ONIONSKIN_LEN;
|
2003-05-06 01:24:46 +02:00
|
|
|
*(uint32_t*)(cell.payload+RELAY_HEADER_SIZE) = htonl(hop->addr);
|
2003-08-11 21:48:22 +02:00
|
|
|
*(uint16_t*)(cell.payload+RELAY_HEADER_SIZE+4) = htons(hop->port);
|
2003-09-25 07:17:11 +02:00
|
|
|
if(onion_skin_create(router->onion_pkey, &(hop->handshake_state), cell.payload+RELAY_HEADER_SIZE+6) < 0) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"onion_skin_create failed.");
|
2003-05-06 01:24:46 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"Sending extend relay cell.");
|
2003-05-06 01:24:46 +02:00
|
|
|
/* send it to hop->prev, because it will transfer it to a create cell and then send to hop */
|
2003-06-12 12:16:33 +02:00
|
|
|
if(circuit_deliver_relay_cell(&cell, circ, CELL_DIRECTION_OUT, hop->prev) < 0) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"failed to deliver extend cell. Closing.");
|
2003-05-06 01:24:46 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
hop->state = CPATH_STATE_AWAITING_KEYS;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-09-18 10:11:31 +02:00
|
|
|
/* take the 'extend' cell, pull out addr/port plus the onion skin. Make
|
|
|
|
* sure we're connected to the next hop, and pass it the onion skin in
|
|
|
|
* a create cell.
|
2003-05-06 01:24:46 +02:00
|
|
|
*/
|
|
|
|
int circuit_extend(cell_t *cell, circuit_t *circ) {
|
|
|
|
connection_t *n_conn;
|
|
|
|
aci_t aci_type;
|
|
|
|
struct sockaddr_in me; /* my router identity */
|
|
|
|
cell_t newcell;
|
|
|
|
|
2003-05-28 01:39:04 +02:00
|
|
|
if(circ->n_conn) {
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_WARNING,"n_conn already set. Bug/attack. Closing.");
|
2003-05-28 01:39:04 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2003-05-06 01:24:46 +02:00
|
|
|
circ->n_addr = ntohl(*(uint32_t*)(cell->payload+RELAY_HEADER_SIZE));
|
|
|
|
circ->n_port = ntohs(*(uint16_t*)(cell->payload+RELAY_HEADER_SIZE+4));
|
|
|
|
|
|
|
|
if(learn_my_address(&me) < 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
n_conn = connection_twin_get_by_addr_port(circ->n_addr,circ->n_port);
|
|
|
|
if(!n_conn || n_conn->type != CONN_TYPE_OR) {
|
|
|
|
/* i've disabled making connections through OPs, but it's definitely
|
|
|
|
* possible here. I'm not sure if it would be a bug or a feature. -RD
|
|
|
|
*/
|
|
|
|
/* note also that this will close circuits where the onion has the same
|
|
|
|
* router twice in a row in the path. i think that's ok. -RD
|
|
|
|
*/
|
2003-08-11 22:22:48 +02:00
|
|
|
struct in_addr in;
|
|
|
|
in.s_addr = htonl(circ->n_addr);
|
|
|
|
log_fn(LOG_DEBUG,"Next router (%s:%d) not connected. Closing.", inet_ntoa(in), circ->n_port);
|
2003-05-06 01:24:46 +02:00
|
|
|
/* XXX later we should fail more gracefully here, like with a 'truncated' */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
circ->n_addr = n_conn->addr; /* these are different if we found a twin instead */
|
|
|
|
circ->n_port = n_conn->port;
|
|
|
|
|
|
|
|
circ->n_conn = n_conn;
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"n_conn is %s:%u",n_conn->address,n_conn->port);
|
2003-05-06 01:24:46 +02:00
|
|
|
|
|
|
|
aci_type = decide_aci_type(ntohl(me.sin_addr.s_addr), ntohs(me.sin_port),
|
|
|
|
circ->n_addr, circ->n_port);
|
|
|
|
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"aci_type = %u.",aci_type);
|
2003-05-06 01:24:46 +02:00
|
|
|
circ->n_aci = get_unique_aci_by_addr_port(circ->n_addr, circ->n_port, aci_type);
|
|
|
|
if(!circ->n_aci) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"failed to get unique aci.");
|
2003-05-06 01:24:46 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG,"Chosen ACI %u.",circ->n_aci);
|
2003-05-06 01:24:46 +02:00
|
|
|
|
|
|
|
memset(&newcell, 0, sizeof(cell_t));
|
|
|
|
newcell.command = CELL_CREATE;
|
|
|
|
newcell.aci = circ->n_aci;
|
2003-05-06 07:54:42 +02:00
|
|
|
newcell.length = DH_ONIONSKIN_LEN;
|
2003-05-06 01:24:46 +02:00
|
|
|
|
2003-05-06 07:54:42 +02:00
|
|
|
memcpy(newcell.payload, cell->payload+RELAY_HEADER_SIZE+6, DH_ONIONSKIN_LEN);
|
2003-05-06 01:24:46 +02:00
|
|
|
|
|
|
|
if(connection_write_cell_to_buf(&newcell, circ->n_conn) < 0) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int circuit_finish_handshake(circuit_t *circ, char *reply) {
|
|
|
|
unsigned char iv[16];
|
|
|
|
unsigned char keys[32];
|
|
|
|
crypt_path_t *hop;
|
|
|
|
|
|
|
|
memset(iv, 0, 16);
|
|
|
|
|
|
|
|
assert(circ->cpath);
|
|
|
|
if(circ->cpath->state == CPATH_STATE_AWAITING_KEYS)
|
|
|
|
hop = circ->cpath;
|
|
|
|
else {
|
|
|
|
for(hop=circ->cpath->next;
|
|
|
|
hop != circ->cpath && hop->state == CPATH_STATE_OPEN;
|
|
|
|
hop=hop->next) ;
|
|
|
|
if(hop == circ->cpath) { /* got an extended when we're all done? */
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"got extended when circ already built? Closing.");
|
2003-09-14 10:17:14 +02:00
|
|
|
return -1;
|
2003-05-06 01:24:46 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(hop->state == CPATH_STATE_AWAITING_KEYS);
|
|
|
|
|
|
|
|
if(onion_skin_client_handshake(hop->handshake_state, reply, keys, 32) < 0) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_WARNING,"onion_skin_client_handshake failed.");
|
2003-05-06 01:24:46 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
crypto_dh_free(hop->handshake_state); /* don't need it anymore */
|
|
|
|
hop->handshake_state = NULL;
|
|
|
|
|
2003-06-24 07:17:48 +02:00
|
|
|
log_fn(LOG_DEBUG,"hop %d init cipher forward %d, backward %d.", (uint32_t)hop, *(uint32_t*)keys, *(uint32_t*)(keys+16));
|
2003-05-06 01:24:46 +02:00
|
|
|
if (!(hop->f_crypto =
|
2003-07-30 21:12:03 +02:00
|
|
|
crypto_create_init_cipher(CIRCUIT_CIPHER,keys,iv,1))) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log(LOG_WARNING,"forward cipher initialization failed.");
|
2003-05-06 01:24:46 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(hop->b_crypto =
|
2003-07-30 21:12:03 +02:00
|
|
|
crypto_create_init_cipher(CIRCUIT_CIPHER,keys+16,iv,0))) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log(LOG_WARNING,"backward cipher initialization failed.");
|
2003-05-06 01:24:46 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2003-04-16 08:18:31 +02:00
|
|
|
|
2003-05-06 01:24:46 +02:00
|
|
|
hop->state = CPATH_STATE_OPEN;
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_INFO,"finished");
|
2003-04-16 08:18:31 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-06-12 12:16:33 +02:00
|
|
|
int circuit_truncated(circuit_t *circ, crypt_path_t *layer) {
|
|
|
|
crypt_path_t *victim;
|
|
|
|
connection_t *stream;
|
|
|
|
|
|
|
|
assert(circ);
|
|
|
|
assert(layer);
|
|
|
|
|
|
|
|
while(layer->next != circ->cpath) {
|
|
|
|
/* we need to clear out layer->next */
|
|
|
|
victim = layer->next;
|
2003-06-18 00:18:26 +02:00
|
|
|
log_fn(LOG_DEBUG, "Killing a layer of the cpath.");
|
2003-06-12 12:16:33 +02:00
|
|
|
|
|
|
|
for(stream = circ->p_streams; stream; stream=stream->next_stream) {
|
|
|
|
if(stream->cpath_layer == victim) {
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_INFO, "Marking stream %d for close.", *(int*)stream->stream_id);
|
2003-06-12 12:16:33 +02:00
|
|
|
stream->marked_for_close = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
layer->next = victim->next;
|
|
|
|
circuit_free_cpath_node(victim);
|
|
|
|
}
|
|
|
|
|
2003-09-26 12:03:50 +02:00
|
|
|
log_fn(LOG_INFO, "finished");
|
2003-06-12 12:16:33 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2003-09-16 21:36:19 +02:00
|
|
|
|
cleanups, bugfixes, more verbose logs
Fixed up the assert_*_ok funcs some (more work remains)
Changed config so it reads either /etc/torrc or the -f arg, never both
Finally tracked down a nasty bug with our use of tls:
It turns out that if you ask SSL_read() for no more than n bytes, it
will read the entire record from the network (and maybe part of the next
record, I'm not sure), give you n bytes of it, and keep the remaining
bytes internally. This is fine, except our poll-for-read looks at the
network, and there are no bytes pending on the network, so we never know
to ask SSL_read() for more bytes. Currently I've hacked it so if we ask
for n bytes and it returns n bytes, then it reads again right then. This
will interact poorly with our rate limiting; we need a cleaner solution.
svn:r481
2003-09-24 23:24:52 +02:00
|
|
|
void assert_cpath_layer_ok(const crypt_path_t *cp)
|
2003-09-16 21:36:19 +02:00
|
|
|
{
|
|
|
|
assert(cp->f_crypto);
|
|
|
|
assert(cp->b_crypto);
|
|
|
|
assert(cp->addr);
|
|
|
|
assert(cp->port);
|
|
|
|
switch(cp->state)
|
|
|
|
{
|
|
|
|
case CPATH_STATE_CLOSED:
|
|
|
|
case CPATH_STATE_OPEN:
|
|
|
|
assert(!cp->handshake_state);
|
cleanups, bugfixes, more verbose logs
Fixed up the assert_*_ok funcs some (more work remains)
Changed config so it reads either /etc/torrc or the -f arg, never both
Finally tracked down a nasty bug with our use of tls:
It turns out that if you ask SSL_read() for no more than n bytes, it
will read the entire record from the network (and maybe part of the next
record, I'm not sure), give you n bytes of it, and keep the remaining
bytes internally. This is fine, except our poll-for-read looks at the
network, and there are no bytes pending on the network, so we never know
to ask SSL_read() for more bytes. Currently I've hacked it so if we ask
for n bytes and it returns n bytes, then it reads again right then. This
will interact poorly with our rate limiting; we need a cleaner solution.
svn:r481
2003-09-24 23:24:52 +02:00
|
|
|
break;
|
2003-09-16 21:36:19 +02:00
|
|
|
case CPATH_STATE_AWAITING_KEYS:
|
|
|
|
assert(cp->handshake_state);
|
cleanups, bugfixes, more verbose logs
Fixed up the assert_*_ok funcs some (more work remains)
Changed config so it reads either /etc/torrc or the -f arg, never both
Finally tracked down a nasty bug with our use of tls:
It turns out that if you ask SSL_read() for no more than n bytes, it
will read the entire record from the network (and maybe part of the next
record, I'm not sure), give you n bytes of it, and keep the remaining
bytes internally. This is fine, except our poll-for-read looks at the
network, and there are no bytes pending on the network, so we never know
to ask SSL_read() for more bytes. Currently I've hacked it so if we ask
for n bytes and it returns n bytes, then it reads again right then. This
will interact poorly with our rate limiting; we need a cleaner solution.
svn:r481
2003-09-24 23:24:52 +02:00
|
|
|
break;
|
2003-09-16 21:36:19 +02:00
|
|
|
default:
|
|
|
|
assert(0);
|
|
|
|
}
|
|
|
|
assert(cp->package_window >= 0);
|
|
|
|
assert(cp->deliver_window >= 0);
|
|
|
|
}
|
|
|
|
|
cleanups, bugfixes, more verbose logs
Fixed up the assert_*_ok funcs some (more work remains)
Changed config so it reads either /etc/torrc or the -f arg, never both
Finally tracked down a nasty bug with our use of tls:
It turns out that if you ask SSL_read() for no more than n bytes, it
will read the entire record from the network (and maybe part of the next
record, I'm not sure), give you n bytes of it, and keep the remaining
bytes internally. This is fine, except our poll-for-read looks at the
network, and there are no bytes pending on the network, so we never know
to ask SSL_read() for more bytes. Currently I've hacked it so if we ask
for n bytes and it returns n bytes, then it reads again right then. This
will interact poorly with our rate limiting; we need a cleaner solution.
svn:r481
2003-09-24 23:24:52 +02:00
|
|
|
void assert_cpath_ok(const crypt_path_t *cp)
|
2003-09-16 21:36:19 +02:00
|
|
|
{
|
|
|
|
while(cp->prev)
|
|
|
|
cp = cp->prev;
|
|
|
|
|
|
|
|
while(cp->next) {
|
|
|
|
assert_cpath_layer_ok(cp);
|
|
|
|
/* layers must be in sequence of: "open* awaiting? closed*" */
|
|
|
|
if (cp->prev) {
|
|
|
|
if (cp->prev->state == CPATH_STATE_OPEN) {
|
|
|
|
assert(cp->state == CPATH_STATE_CLOSED ||
|
|
|
|
cp->state == CPATH_STATE_AWAITING_KEYS);
|
|
|
|
} else {
|
|
|
|
assert(cp->state == CPATH_STATE_CLOSED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
cp = cp->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
cleanups, bugfixes, more verbose logs
Fixed up the assert_*_ok funcs some (more work remains)
Changed config so it reads either /etc/torrc or the -f arg, never both
Finally tracked down a nasty bug with our use of tls:
It turns out that if you ask SSL_read() for no more than n bytes, it
will read the entire record from the network (and maybe part of the next
record, I'm not sure), give you n bytes of it, and keep the remaining
bytes internally. This is fine, except our poll-for-read looks at the
network, and there are no bytes pending on the network, so we never know
to ask SSL_read() for more bytes. Currently I've hacked it so if we ask
for n bytes and it returns n bytes, then it reads again right then. This
will interact poorly with our rate limiting; we need a cleaner solution.
svn:r481
2003-09-24 23:24:52 +02:00
|
|
|
void assert_circuit_ok(const circuit_t *c)
|
2003-09-16 21:36:19 +02:00
|
|
|
{
|
|
|
|
connection_t *conn;
|
|
|
|
|
|
|
|
assert(c->n_addr);
|
|
|
|
assert(c->n_port);
|
|
|
|
assert(c->n_conn);
|
|
|
|
assert(c->n_conn->type == CONN_TYPE_OR);
|
|
|
|
if (c->p_conn)
|
|
|
|
assert(c->p_conn->type == CONN_TYPE_OR);
|
|
|
|
for (conn = c->p_streams; conn; conn = conn->next_stream)
|
|
|
|
assert(c->p_conn->type == CONN_TYPE_EXIT);
|
|
|
|
for (conn = c->n_streams; conn; conn = conn->next_stream)
|
|
|
|
assert(conn->type == CONN_TYPE_EXIT);
|
|
|
|
|
|
|
|
assert(c->deliver_window >= 0);
|
|
|
|
assert(c->package_window >= 0);
|
|
|
|
if (c->state == CIRCUIT_STATE_OPEN) {
|
|
|
|
if (c->cpath) {
|
|
|
|
assert(!c->n_crypto);
|
|
|
|
assert(!c->p_crypto);
|
|
|
|
} else {
|
|
|
|
assert(c->n_crypto);
|
|
|
|
assert(c->p_crypto);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (c->cpath) {
|
|
|
|
assert_cpath_ok(c->cpath);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-04-07 04:12:02 +02:00
|
|
|
/*
|
|
|
|
Local Variables:
|
|
|
|
mode:c
|
|
|
|
indent-tabs-mode:nil
|
|
|
|
c-basic-offset:2
|
|
|
|
End:
|
|
|
|
*/
|