Implemented link padding and receiver token buckets
Each socket reads at most 'bandwidth' bytes per second sustained, but
can handle bursts of up to 10*bandwidth bytes.
Cells are now sent out at evenly-spaced intervals, with padding sent
out otherwise. Set Linkpadding=0 in the rc file to send cells as soon
as they're available (and to never send padding cells).
Added license/copyrights statements at the top of most files.
router->min and router->max have been merged into a single 'bandwidth'
value. We should make the routerinfo_t reflect this (want to do that,
Mat?)
As the bandwidth increases, and we want to stop sleeping more and more
frequently to send a single cell, cpu usage goes up. At 128kB/s we're
pretty much calling poll with a timeout of 1ms or even 0ms. The current
code takes a timeout of 0-9ms and makes it 10ms. prepare_for_poll()
handles everything that should have happened in the past, so as long as
our buffers don't get too full in that 10ms, we're ok.
Speaking of too full, if you run three servers at 100kB/s with -l debug,
it spends too much time printing debugging messages to be able to keep
up with the cells. The outbuf ultimately fills up and it kills that
connection. If you run with -l err, it works fine up through 500kB/s and
probably beyond. Down the road we'll want to teach it to recognize when
an outbuf is getting full, and back off.
svn:r50
2002-07-16 03:12:15 +02:00
|
|
|
/* Copyright 2001,2002 Roger Dingledine, Matej Pfajfar. */
|
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
/* $Id$ */
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
#include "or.h"
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
int connection_exit_process_inbuf(connection_t *conn) {
|
2003-03-19 21:48:56 +01:00
|
|
|
circuit_t *circ;
|
|
|
|
cell_t cell;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
assert(conn && conn->type == CONN_TYPE_EXIT);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
if(conn->inbuf_reached_eof) {
|
2003-03-19 22:59:07 +01:00
|
|
|
#ifdef HALF_OPEN
|
2003-03-19 21:48:56 +01:00
|
|
|
/* XXX!!! If this is right, duplicate it in connection_ap.c */
|
|
|
|
|
|
|
|
/* eof reached; we're done reading, but we might want to write more. */
|
|
|
|
conn->done_receiving = 1;
|
|
|
|
shutdown(conn->s, 0); /* XXX check return, refactor NM */
|
|
|
|
if (conn->done_sending)
|
|
|
|
conn->marked_for_close = 1;
|
|
|
|
|
|
|
|
/* XXX Factor out common logic here and in circuit_about_to_close NM */
|
|
|
|
circ = circuit_get_by_conn(conn);
|
|
|
|
if (!circ)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
memset(&cell, 0, sizeof(cell_t));
|
|
|
|
cell.command = CELL_DATA;
|
|
|
|
cell.length = TOPIC_HEADER_SIZE;
|
|
|
|
*(uint16_t *)(cell.payload+2) = htons(conn->topic_id);
|
|
|
|
*cell.payload = TOPIC_COMMAND_END;
|
|
|
|
cell.aci = circ->p_aci;
|
|
|
|
if (circuit_deliver_data_cell_from_edge(&cell, circ, EDGE_EXIT) < 0) {
|
|
|
|
log(LOG_DEBUG,"connection_exit_process_inbuf: circuit_deliver_data_cell_from_edge failed. Closing");
|
|
|
|
circuit_close(circ);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
|
2002-06-27 00:45:49 +02:00
|
|
|
/* eof reached, kill it. */
|
2002-06-30 09:37:49 +02:00
|
|
|
log(LOG_DEBUG,"connection_exit_process_inbuf(): conn reached eof. Closing.");
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
2003-03-19 21:48:56 +01:00
|
|
|
#endif
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
log(LOG_DEBUG,"connection_exit_process_inbuf(): state %d.",conn->state);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
switch(conn->state) {
|
2002-06-30 09:37:49 +02:00
|
|
|
case EXIT_CONN_STATE_CONNECTING:
|
|
|
|
log(LOG_DEBUG,"connection_exit_process_inbuf(): text from server while in 'connecting' state. Leaving it on buffer.");
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
2002-06-30 09:37:49 +02:00
|
|
|
case EXIT_CONN_STATE_OPEN:
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
if(connection_package_raw_inbuf(conn) < 0)
|
|
|
|
return -1;
|
|
|
|
circuit_consider_stop_edge_reading(circuit_get_by_conn(conn), EDGE_EXIT);
|
|
|
|
return 0;
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
int connection_exit_finished_flushing(connection_t *conn) {
|
2002-06-27 00:45:49 +02:00
|
|
|
int e, len=sizeof(e);
|
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
assert(conn && conn->type == CONN_TYPE_EXIT);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
switch(conn->state) {
|
2002-06-30 09:37:49 +02:00
|
|
|
case EXIT_CONN_STATE_CONNECTING:
|
2002-06-27 00:45:49 +02:00
|
|
|
if (getsockopt(conn->s, SOL_SOCKET, SO_ERROR, &e, &len) < 0) { /* not yet */
|
|
|
|
if(errno != EINPROGRESS){
|
|
|
|
/* yuck. kill it. */
|
2002-06-30 09:37:49 +02:00
|
|
|
log(LOG_DEBUG,"connection_exit_finished_flushing(): in-progress connect failed. Removing.");
|
2002-06-27 00:45:49 +02:00
|
|
|
return -1;
|
|
|
|
} else {
|
2002-07-10 22:17:27 +02:00
|
|
|
log(LOG_DEBUG,"connection_exit_finished_flushing(): in-progress connect still waiting.");
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0; /* no change, see if next time is better */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* the connect has finished. */
|
|
|
|
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
log(LOG_DEBUG,"connection_exit_finished_flushing(): Connection to %s:%u established.",
|
2002-08-24 09:55:49 +02:00
|
|
|
conn->address,conn->port);
|
2003-02-18 02:35:55 +01:00
|
|
|
|
2002-06-30 09:37:49 +02:00
|
|
|
conn->state = EXIT_CONN_STATE_OPEN;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
connection_watch_events(conn, POLLIN); /* stop writing, continue reading */
|
2002-07-18 08:37:58 +02:00
|
|
|
if(connection_wants_to_flush(conn)) /* in case there are any queued data cells */
|
|
|
|
connection_start_writing(conn);
|
2002-10-01 07:46:48 +02:00
|
|
|
return
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
connection_exit_send_connected(conn) || /* deliver a 'connected' data cell back through the circuit. */
|
2002-10-01 07:46:48 +02:00
|
|
|
connection_process_inbuf(conn); /* in case the server has written anything */
|
2002-06-30 09:37:49 +02:00
|
|
|
case EXIT_CONN_STATE_OPEN:
|
2002-06-27 00:45:49 +02:00
|
|
|
/* FIXME down the road, we'll clear out circuits that are pending to close */
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
log(LOG_DEBUG,"connection_exit_finished_flushing(): finished flushing.");
|
2002-07-18 08:37:58 +02:00
|
|
|
connection_stop_writing(conn);
|
2003-03-19 21:48:56 +01:00
|
|
|
#ifdef USE_ZLIB
|
|
|
|
if (connection_decompress_to_buf(NULL, 0, conn, Z_SYNC_FLUSH) < 0)
|
2003-03-19 22:59:07 +01:00
|
|
|
return -1;
|
2003-03-19 21:48:56 +01:00
|
|
|
#endif
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
connection_consider_sending_sendme(conn, EDGE_EXIT);
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
default:
|
2002-06-30 09:37:49 +02:00
|
|
|
log(LOG_DEBUG,"Bug: connection_exit_finished_flushing() called in unexpected state.");
|
2002-06-27 00:45:49 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2002-09-17 10:14:37 +02:00
|
|
|
int connection_exit_send_connected(connection_t *conn) {
|
|
|
|
circuit_t *circ;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
cell_t cell;
|
2002-09-17 10:14:37 +02:00
|
|
|
|
|
|
|
assert(conn);
|
|
|
|
|
|
|
|
circ = circuit_get_by_conn(conn);
|
|
|
|
|
2002-11-24 09:33:15 +01:00
|
|
|
if(!circ) {
|
|
|
|
log(LOG_DEBUG,"connection_exit_send_connected(): client-side sent destroy just as we completed server connection. Closing.");
|
|
|
|
return -1;
|
|
|
|
}
|
2002-09-17 10:14:37 +02:00
|
|
|
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
memset(&cell, 0, sizeof(cell_t));
|
|
|
|
cell.aci = circ->p_aci;
|
|
|
|
cell.command = CELL_DATA;
|
2003-02-07 00:48:35 +01:00
|
|
|
*(uint16_t *)(cell.payload+2) = htons(conn->topic_id);
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
*cell.payload = TOPIC_COMMAND_CONNECTED;
|
|
|
|
cell.length = TOPIC_HEADER_SIZE;
|
|
|
|
log(LOG_INFO,"connection_exit_send_connected(): passing back cell (aci %d).",circ->p_aci);
|
|
|
|
|
|
|
|
if(circuit_deliver_data_cell_from_edge(&cell, circ, EDGE_EXIT) < 0) {
|
|
|
|
log(LOG_DEBUG,"connection_exit_send_connected(): circuit_deliver_data_cell (backward) failed. Closing.");
|
|
|
|
circuit_close(circ);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 0;
|
2002-09-17 10:14:37 +02:00
|
|
|
}
|
|
|
|
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
int connection_exit_begin_conn(cell_t *cell, circuit_t *circ) {
|
|
|
|
connection_t *n_conn;
|
|
|
|
char *comma;
|
|
|
|
|
|
|
|
if(!memchr(cell->payload + TOPIC_HEADER_SIZE,0,cell->length - TOPIC_HEADER_SIZE)) {
|
|
|
|
log(LOG_WARNING,"connection_exit_begin_conn(): topic begin cell has no \\0. Dropping.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
comma = strchr(cell->payload + TOPIC_HEADER_SIZE, ',');
|
|
|
|
if(!comma) {
|
|
|
|
log(LOG_WARNING,"connection_exit_begin_conn(): topic begin cell has no comma. Dropping.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
*comma = 0;
|
|
|
|
|
|
|
|
if(!atoi(comma+1)) { /* bad port */
|
|
|
|
log(LOG_DEBUG,"connection_exit_begin_conn(): topic begin cell has invalid port. Dropping.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
log(LOG_DEBUG,"connection_exit_begin_conn(): Creating new exit connection.");
|
|
|
|
n_conn = connection_new(CONN_TYPE_EXIT);
|
|
|
|
if(!n_conn) {
|
2003-02-18 02:35:55 +01:00
|
|
|
log(LOG_DEBUG,"connection_exit_begin_conn(): connection_new failed. Dropping.");
|
|
|
|
return 0;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
cell->payload[0] = 0;
|
2003-02-07 00:48:35 +01:00
|
|
|
n_conn->topic_id = ntohs(*(uint16_t *)(cell->payload+2));
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
|
|
|
|
n_conn->address = strdup(cell->payload + TOPIC_HEADER_SIZE);
|
|
|
|
n_conn->port = atoi(comma+1);
|
|
|
|
n_conn->state = EXIT_CONN_STATE_RESOLVING;
|
|
|
|
n_conn->receiver_bucket = -1; /* edge connections don't do receiver buckets */
|
|
|
|
n_conn->bandwidth = -1;
|
|
|
|
n_conn->s = -1; /* not yet valid */
|
|
|
|
n_conn->n_receive_topicwindow = TOPICWINDOW_START;
|
|
|
|
n_conn->p_receive_topicwindow = TOPICWINDOW_START;
|
|
|
|
if(connection_add(n_conn) < 0) { /* no space, forget it */
|
2003-02-18 02:35:55 +01:00
|
|
|
log(LOG_DEBUG,"connection_exit_begin_conn(): connection_add failed. Dropping.");
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
connection_free(n_conn);
|
2003-02-18 02:35:55 +01:00
|
|
|
return 0;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* add it into the linked list of topics on this circuit */
|
|
|
|
n_conn->next_topic = circ->n_conn;
|
|
|
|
circ->n_conn = n_conn;
|
|
|
|
|
|
|
|
/* send it off to the gethostbyname farm */
|
2003-02-14 08:53:55 +01:00
|
|
|
if(dns_resolve(n_conn) < 0) {
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
log(LOG_DEBUG,"connection_exit_begin_conn(): Couldn't queue resolve request.");
|
|
|
|
connection_remove(n_conn);
|
|
|
|
connection_free(n_conn);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int connection_exit_process_data_cell(cell_t *cell, circuit_t *circ) {
|
|
|
|
connection_t *conn;
|
|
|
|
int topic_command;
|
|
|
|
int topic_id;
|
2003-02-14 05:10:22 +01:00
|
|
|
static int num_seen=0;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
|
|
|
/* an outgoing data cell has arrived */
|
|
|
|
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
assert(cell && circ);
|
|
|
|
|
|
|
|
topic_command = *cell->payload;
|
2003-02-07 00:48:35 +01:00
|
|
|
topic_id = ntohs(*(uint16_t *)(cell->payload+2));
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
log(LOG_DEBUG,"connection_exit_process_data_cell(): command %d topic %d", topic_command, topic_id);
|
2003-02-06 09:00:49 +01:00
|
|
|
num_seen++;
|
|
|
|
log(LOG_DEBUG,"connection_exit_process_data_cell(): Now seen %d data cells here.", num_seen);
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
|
|
|
|
circuit_consider_sending_sendme(circ, EDGE_EXIT);
|
|
|
|
|
|
|
|
for(conn = circ->n_conn; conn && conn->topic_id != topic_id; conn = conn->next_topic) ;
|
|
|
|
|
|
|
|
/* now conn is either NULL, in which case we don't recognize the topic_id, or
|
|
|
|
* it is set, in which case cell is talking about this conn.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if(conn && conn->state != EXIT_CONN_STATE_OPEN) {
|
|
|
|
if(topic_command == TOPIC_COMMAND_END) {
|
|
|
|
log(LOG_ERR,"connection_exit_process_data_cell(): Got an end before we're connected. Marking for close.");
|
|
|
|
conn->marked_for_close = 1;
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
log(LOG_INFO,"connection_exit_process_data_cell(): Got a non-end data cell when not in 'open' state. Dropping.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch(topic_command) {
|
|
|
|
case TOPIC_COMMAND_BEGIN:
|
|
|
|
if(conn) {
|
|
|
|
log(LOG_INFO,"connection_exit_process_data_cell(): begin cell for known topic. Dropping.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return connection_exit_begin_conn(cell, circ);
|
|
|
|
case TOPIC_COMMAND_DATA:
|
|
|
|
if(!conn) {
|
|
|
|
log(LOG_INFO,"connection_exit_process_data_cell(): data cell for unknown topic. Dropping.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if(--conn->p_receive_topicwindow < 0) { /* is it below 0 after decrement? */
|
|
|
|
log(LOG_DEBUG,"connection_exit_process_data_cell(): receive_topicwindow at exit below 0. Killing.");
|
|
|
|
return -1; /* AP breaking protocol. kill the whole circuit. */
|
|
|
|
}
|
|
|
|
log(LOG_DEBUG,"connection_exit_process_data_cell(): willing to receive %d more cells from circ",conn->p_receive_topicwindow);
|
|
|
|
|
|
|
|
if(conn->state != EXIT_CONN_STATE_OPEN) {
|
|
|
|
log(LOG_DEBUG,"connection_exit_process_data_cell(): data received while resolving/connecting. Queueing.");
|
|
|
|
}
|
|
|
|
log(LOG_DEBUG,"connection_exit_process_data_cell(): put %d bytes on outbuf.",cell->length - TOPIC_HEADER_SIZE);
|
2003-03-17 03:42:45 +01:00
|
|
|
#ifdef USE_ZLIB
|
|
|
|
if(connection_decompress_to_buf(cell->payload + TOPIC_HEADER_SIZE,
|
2003-03-19 21:48:56 +01:00
|
|
|
cell->length - TOPIC_HEADER_SIZE,
|
|
|
|
conn, Z_SYNC_FLUSH) < 0) {
|
2003-03-17 03:42:45 +01:00
|
|
|
log(LOG_INFO,"connection_exit_process_data_cell(): write to buf failed. Marking for close.");
|
|
|
|
conn->marked_for_close = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#else
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
if(connection_write_to_buf(cell->payload + TOPIC_HEADER_SIZE,
|
|
|
|
cell->length - TOPIC_HEADER_SIZE, conn) < 0) {
|
|
|
|
log(LOG_INFO,"connection_exit_process_data_cell(): write to buf failed. Marking for close.");
|
|
|
|
conn->marked_for_close = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
2003-03-17 03:42:45 +01:00
|
|
|
#endif
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
if(connection_consider_sending_sendme(conn, EDGE_EXIT) < 0)
|
|
|
|
conn->marked_for_close = 1;
|
|
|
|
return 0;
|
|
|
|
case TOPIC_COMMAND_END:
|
|
|
|
if(!conn) {
|
|
|
|
log(LOG_DEBUG,"connection_exit_process_data_cell(): end cell dropped, unknown topic %d.",topic_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
log(LOG_DEBUG,"connection_exit_process_data_cell(): end cell for topic %d. Removing topic.",topic_id);
|
2002-06-27 00:45:49 +02:00
|
|
|
|
2002-09-22 13:09:07 +02:00
|
|
|
#if 0
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
/* go through and identify who points to conn. remove conn from the list. */
|
|
|
|
if(conn == circ->n_conn) {
|
|
|
|
circ->n_conn = conn->next_topic;
|
|
|
|
}
|
|
|
|
for(prevconn = circ->n_conn; prevconn->next_topic != conn; prevconn = prevconn->next_topic) ;
|
|
|
|
prevconn->next_topic = conn->next_topic;
|
2002-09-22 13:09:07 +02:00
|
|
|
#endif
|
2003-03-19 22:59:07 +01:00
|
|
|
#ifdef HALF_OPEN
|
2003-03-17 03:42:45 +01:00
|
|
|
conn->done_sending = 1;
|
|
|
|
shutdown(conn->s, 1); /* XXX check return; refactor NM */
|
|
|
|
if (conn->done_receiving)
|
2003-03-19 21:48:56 +01:00
|
|
|
conn->marked_for_close = 1;
|
2003-03-17 03:42:45 +01:00
|
|
|
#endif
|
2003-03-19 21:48:56 +01:00
|
|
|
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
conn->marked_for_close = 1;
|
|
|
|
break;
|
|
|
|
case TOPIC_COMMAND_CONNECTED:
|
|
|
|
log(LOG_INFO,"connection_exit_process_data_cell(): topic connected request unsupported. Dropping.");
|
|
|
|
break;
|
|
|
|
case TOPIC_COMMAND_SENDME:
|
|
|
|
if(!conn) {
|
|
|
|
log(LOG_DEBUG,"connection_exit_process_data_cell(): sendme cell dropped, unknown topic %d.",topic_id);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
conn->n_receive_topicwindow += TOPICWINDOW_INCREMENT;
|
|
|
|
connection_start_reading(conn);
|
|
|
|
connection_package_raw_inbuf(conn); /* handle whatever might still be on the inbuf */
|
|
|
|
circuit_consider_stop_edge_reading(circ, EDGE_EXIT);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
log(LOG_DEBUG,"connection_exit_process_data_cell(): unknown topic command %d.",topic_command);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
#if 0
|
|
|
|
static uint32_t address_to_addr(char *address) {
|
|
|
|
struct hostent *rent;
|
|
|
|
uint32_t addr;
|
|
|
|
char *caddr;
|
2002-06-27 00:45:49 +02:00
|
|
|
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
rent = gethostbyname(address);
|
|
|
|
if (!rent) {
|
|
|
|
log(LOG_ERR,"address_to_addr(): Could not resolve dest addr %s.",address);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
memcpy(&addr, rent->h_addr,rent->h_length);
|
|
|
|
addr = ntohl(addr); /* get it back to host order */
|
|
|
|
caddr = (char *)&addr;
|
|
|
|
log(LOG_DEBUG,"address_to_addr(): addr is %d %d %d %d",
|
|
|
|
caddr[0], caddr[1], caddr[2], caddr[3]);
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
#endif
|
2002-09-17 10:14:37 +02:00
|
|
|
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
int connection_exit_connect(connection_t *conn) {
|
|
|
|
int s; /* for the new socket */
|
|
|
|
struct sockaddr_in dest_addr;
|
|
|
|
|
|
|
|
/* all the necessary info is here. Start the connect() */
|
|
|
|
s=socket(PF_INET,SOCK_STREAM,IPPROTO_TCP);
|
|
|
|
if (s < 0) {
|
|
|
|
log(LOG_ERR,"connection_exit_connect(): Error creating network socket.");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
fcntl(s, F_SETFL, O_NONBLOCK); /* set s to non-blocking */
|
|
|
|
|
|
|
|
memset((void *)&dest_addr,0,sizeof(dest_addr));
|
|
|
|
dest_addr.sin_family = AF_INET;
|
|
|
|
dest_addr.sin_port = htons(conn->port);
|
|
|
|
dest_addr.sin_addr.s_addr = htonl(conn->addr);
|
|
|
|
|
|
|
|
log(LOG_DEBUG,"connection_exit_connect(): Connecting to %s:%u.",conn->address,conn->port);
|
|
|
|
|
|
|
|
if(connect(s,(struct sockaddr *)&dest_addr,sizeof(dest_addr)) < 0) {
|
|
|
|
if(errno != EINPROGRESS){
|
|
|
|
/* yuck. kill it. */
|
|
|
|
perror("connect");
|
|
|
|
log(LOG_DEBUG,"connection_exit_connect(): Connect failed.");
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
/* it's in progress. set state appropriately and return. */
|
|
|
|
conn->s = s;
|
|
|
|
connection_set_poll_socket(conn);
|
|
|
|
conn->state = EXIT_CONN_STATE_CONNECTING;
|
|
|
|
|
|
|
|
log(LOG_DEBUG,"connection_exit_connect(): connect in progress, socket %d.",s);
|
|
|
|
connection_watch_events(conn, POLLOUT | POLLIN);
|
2002-07-10 20:39:33 +02:00
|
|
|
return 0;
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
}
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|
major overhaul: dns slave subsystem, topics
on startup, it forks off a master dns handler, which forks off dns
slaves (like the apache model). slaves as spawned as load increases,
and then reused. excess slaves are not ever killed, currently.
implemented topics. each topic has a receive window in each direction
at each edge of the circuit, and sends sendme's at the data level, as
per before. each circuit also has receive windows in each direction at
each hop; an edge sends a circuit-level sendme as soon as enough data
cells have arrived (regardless of whether the data cells were flushed
to the exit conns). removed the 'connected' cell type, since it's now
a topic command within data cells.
at the edge of the circuit, there can be multiple connections associated
with a single circuit. you find them via the linked list conn->next_topic.
currently each new ap connection starts its own circuit, so we ought
to see comparable performance to what we had before. but that's only
because i haven't written the code to reattach to old circuits. please
try to break it as-is, and then i'll make it reuse the same circuit and
we'll try to break that.
svn:r152
2003-01-26 10:02:24 +01:00
|
|
|
/* it succeeded. we're connected. */
|
|
|
|
log(LOG_DEBUG,"connection_exit_connect(): Connection to %s:%u established.",conn->address,conn->port);
|
|
|
|
|
|
|
|
conn->s = s;
|
|
|
|
connection_set_poll_socket(conn);
|
|
|
|
conn->state = EXIT_CONN_STATE_OPEN;
|
|
|
|
if(connection_wants_to_flush(conn)) { /* in case there are any queued data cells */
|
|
|
|
log(LOG_ERR,"connection_exit_connect(): tell roger: newly connected conn had data waiting!");
|
|
|
|
// connection_start_writing(conn);
|
|
|
|
}
|
|
|
|
// connection_process_inbuf(conn);
|
|
|
|
connection_watch_events(conn, POLLIN);
|
|
|
|
|
|
|
|
/* also, deliver a 'connected' cell back through the circuit. */
|
|
|
|
return connection_exit_send_connected(conn);
|
2002-06-27 00:45:49 +02:00
|
|
|
}
|
|
|
|
|