2007-12-12 22:09:01 +01:00
|
|
|
/* Copyright (c) 2003-2004, Roger Dingledine.
|
|
|
|
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
|
2012-06-05 02:58:17 +02:00
|
|
|
* Copyright (c) 2007-2012, The Tor Project, Inc. */
|
2003-08-21 01:05:22 +02:00
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/**
|
|
|
|
* \file cpuworker.c
|
2005-06-11 07:31:17 +02:00
|
|
|
* \brief Implements a farm of 'CPU worker' processes to perform
|
|
|
|
* CPU-intensive tasks in another thread or process, to not
|
|
|
|
* interrupt the main thread.
|
2004-05-05 23:32:43 +02:00
|
|
|
*
|
|
|
|
* Right now, we only use this for processing onionskins.
|
2004-05-09 18:47:25 +02:00
|
|
|
**/
|
2004-05-05 23:32:43 +02:00
|
|
|
|
2003-08-21 01:05:22 +02:00
|
|
|
#include "or.h"
|
2010-07-22 00:46:18 +02:00
|
|
|
#include "buffers.h"
|
2012-08-31 00:47:05 +02:00
|
|
|
#include "channel.h"
|
|
|
|
#include "channeltls.h"
|
2010-07-22 01:21:00 +02:00
|
|
|
#include "circuitbuild.h"
|
2010-07-22 09:46:23 +02:00
|
|
|
#include "circuitlist.h"
|
2010-07-22 10:22:51 +02:00
|
|
|
#include "config.h"
|
2010-07-22 10:32:52 +02:00
|
|
|
#include "connection.h"
|
2010-07-22 11:35:09 +02:00
|
|
|
#include "cpuworker.h"
|
2010-07-23 19:58:06 +02:00
|
|
|
#include "main.h"
|
2010-07-23 20:38:25 +02:00
|
|
|
#include "onion.h"
|
2010-07-21 16:17:10 +02:00
|
|
|
#include "router.h"
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2004-05-10 12:27:54 +02:00
|
|
|
/** The maximum number of cpuworker processes we will keep around. */
|
2003-11-16 18:00:02 +01:00
|
|
|
#define MAX_CPUWORKERS 16
|
2004-05-10 12:27:54 +02:00
|
|
|
/** The minimum number of cpuworker processes we will keep around. */
|
2003-09-14 04:58:50 +02:00
|
|
|
#define MIN_CPUWORKERS 1
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2004-05-10 12:27:54 +02:00
|
|
|
/** The tag specifies which circuit this onionskin was from. */
|
2008-08-15 15:55:01 +02:00
|
|
|
#define TAG_LEN 10
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2004-05-10 12:27:54 +02:00
|
|
|
/** How many cpuworkers we have running right now. */
|
2004-04-25 00:17:50 +02:00
|
|
|
static int num_cpuworkers=0;
|
2004-05-10 12:27:54 +02:00
|
|
|
/** How many of the running cpuworkers have an assigned task right now. */
|
2004-04-25 00:17:50 +02:00
|
|
|
static int num_cpuworkers_busy=0;
|
2004-05-09 18:47:25 +02:00
|
|
|
/** We need to spawn new cpuworkers whenever we rotate the onion keys
|
2004-05-05 23:32:43 +02:00
|
|
|
* on platforms where execution contexts==processes. This variable stores
|
2004-05-10 12:27:54 +02:00
|
|
|
* the last time we got a key rotation event. */
|
2004-04-25 00:17:50 +02:00
|
|
|
static time_t last_rotation_time=0;
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2007-05-15 23:17:48 +02:00
|
|
|
static void cpuworker_main(void *data) ATTR_NORETURN;
|
2003-08-21 01:05:22 +02:00
|
|
|
static int spawn_cpuworker(void);
|
|
|
|
static void spawn_enough_cpuworkers(void);
|
2003-09-14 04:58:50 +02:00
|
|
|
static void process_pending_task(connection_t *cpuworker);
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Initialize the cpuworker subsystem.
|
2004-05-05 23:32:43 +02:00
|
|
|
*/
|
2005-06-11 20:52:12 +02:00
|
|
|
void
|
|
|
|
cpu_init(void)
|
|
|
|
{
|
2005-08-15 12:27:37 +02:00
|
|
|
cpuworkers_rotate();
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Called when we're done sending a request to a cpuworker. */
|
2005-06-11 20:52:12 +02:00
|
|
|
int
|
|
|
|
connection_cpu_finished_flushing(connection_t *conn)
|
|
|
|
{
|
2004-10-17 00:14:52 +02:00
|
|
|
tor_assert(conn);
|
|
|
|
tor_assert(conn->type == CONN_TYPE_CPUWORKER);
|
2003-08-21 01:05:22 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-08-15 15:55:01 +02:00
|
|
|
/** Pack global_id and circ_id; set *tag to the result. (See note on
|
2004-05-05 23:32:43 +02:00
|
|
|
* cpuworker_main for wire format.) */
|
2005-06-11 20:52:12 +02:00
|
|
|
static void
|
2012-12-06 04:34:49 +01:00
|
|
|
tag_pack(uint8_t *tag, uint64_t chan_id, circid_t circ_id)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2008-08-05 22:08:19 +02:00
|
|
|
/*XXXX RETHINK THIS WHOLE MESS !!!! !NM NM NM NM*/
|
2012-08-31 00:47:05 +02:00
|
|
|
/*XXXX DOUBLEPLUSTHIS!!!! AS AS AS AS*/
|
|
|
|
set_uint64(tag, chan_id);
|
2009-03-02 20:15:05 +01:00
|
|
|
set_uint16(tag+8, circ_id);
|
2003-09-14 04:58:50 +02:00
|
|
|
}
|
|
|
|
|
2004-05-10 06:34:48 +02:00
|
|
|
/** Unpack <b>tag</b> into addr, port, and circ_id.
|
2004-05-05 23:32:43 +02:00
|
|
|
*/
|
2005-06-11 20:52:12 +02:00
|
|
|
static void
|
2012-12-06 04:34:49 +01:00
|
|
|
tag_unpack(const uint8_t *tag, uint64_t *chan_id, circid_t *circ_id)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2012-08-31 00:47:05 +02:00
|
|
|
*chan_id = get_uint64(tag);
|
2009-03-02 20:15:05 +01:00
|
|
|
*circ_id = get_uint16(tag+8);
|
2003-09-14 04:58:50 +02:00
|
|
|
}
|
|
|
|
|
2012-12-26 04:38:20 +01:00
|
|
|
/** Magic numbers to make sure our cpuworker_requests don't grow any
|
|
|
|
* mis-framing bugs. */
|
2012-12-06 04:34:49 +01:00
|
|
|
#define CPUWORKER_REQUEST_MAGIC 0xda4afeed
|
|
|
|
#define CPUWORKER_REPLY_MAGIC 0x5eedf00d
|
|
|
|
|
2012-12-26 04:38:20 +01:00
|
|
|
/** A request sent to a cpuworker. */
|
2012-12-06 04:34:49 +01:00
|
|
|
typedef struct cpuworker_request_t {
|
2012-12-26 04:38:20 +01:00
|
|
|
/** Magic number; must be CPUWORKER_REQUEST_MAGIC. */
|
2012-12-06 04:34:49 +01:00
|
|
|
uint32_t magic;
|
|
|
|
/** Opaque tag to identify the job */
|
|
|
|
uint8_t tag[TAG_LEN];
|
2012-12-26 04:38:20 +01:00
|
|
|
/** Task code. Must be one of CPUWORKER_TASK_* */
|
2012-12-06 04:34:49 +01:00
|
|
|
uint8_t task;
|
|
|
|
|
2012-12-27 00:08:01 +01:00
|
|
|
/** Flag: Are we timing this request? */
|
|
|
|
unsigned timed : 1;
|
|
|
|
/** If we're timing this request, when was it sent to the cpuworker? */
|
|
|
|
struct timeval started_at;
|
|
|
|
|
2012-12-26 04:38:20 +01:00
|
|
|
/** A create cell for the cpuworker to process. */
|
2012-12-06 04:34:49 +01:00
|
|
|
create_cell_t create_cell;
|
2012-12-26 04:38:20 +01:00
|
|
|
|
2012-12-06 04:34:49 +01:00
|
|
|
/* Turn the above into a tagged union if needed. */
|
|
|
|
} cpuworker_request_t;
|
|
|
|
|
2012-12-26 04:38:20 +01:00
|
|
|
/** A reply sent by a cpuworker. */
|
2012-12-06 04:34:49 +01:00
|
|
|
typedef struct cpuworker_reply_t {
|
2012-12-26 04:38:20 +01:00
|
|
|
/** Magic number; must be CPUWORKER_REPLY_MAGIC. */
|
2012-12-06 04:34:49 +01:00
|
|
|
uint32_t magic;
|
2012-12-26 04:38:20 +01:00
|
|
|
/** Opaque tag to identify the job; matches the request's tag.*/
|
2012-12-06 04:34:49 +01:00
|
|
|
uint8_t tag[TAG_LEN];
|
2012-12-26 04:38:20 +01:00
|
|
|
/** True iff we got a successful request. */
|
2012-12-06 04:34:49 +01:00
|
|
|
uint8_t success;
|
|
|
|
|
2012-12-27 00:08:01 +01:00
|
|
|
/** Are we timing this request? */
|
|
|
|
unsigned int timed : 1;
|
|
|
|
/** What handshake type was the request? (Used for timing) */
|
|
|
|
uint16_t handshake_type;
|
|
|
|
/** When did we send the request to the cpuworker? */
|
|
|
|
struct timeval started_at;
|
|
|
|
/** Once the cpuworker received the request, how many microseconds did it
|
|
|
|
* take? (This shouldn't overflow; 4 billion micoseconds is over an hour,
|
|
|
|
* and we'll never have an onion handshake that takes so long.) */
|
|
|
|
uint32_t n_usec;
|
|
|
|
|
2012-12-26 04:38:20 +01:00
|
|
|
/** Output of processing a create cell
|
|
|
|
*
|
|
|
|
* @{
|
|
|
|
*/
|
|
|
|
/** The created cell to send back. */
|
2012-12-06 04:34:49 +01:00
|
|
|
created_cell_t created_cell;
|
2012-12-26 04:38:20 +01:00
|
|
|
/** The keys to use on this circuit. */
|
2012-12-06 04:34:49 +01:00
|
|
|
uint8_t keys[CPATH_KEY_MATERIAL_LEN];
|
2012-12-26 04:38:20 +01:00
|
|
|
/** Input to use for authenticating introduce1 cells. */
|
2012-12-06 04:34:49 +01:00
|
|
|
uint8_t rend_auth_material[DIGEST_LEN];
|
|
|
|
} cpuworker_reply_t;
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Called when the onion key has changed and we need to spawn new
|
2004-05-05 23:32:43 +02:00
|
|
|
* cpuworkers. Close all currently idle cpuworkers, and mark the last
|
|
|
|
* rotation time as now.
|
|
|
|
*/
|
2005-06-11 20:52:12 +02:00
|
|
|
void
|
|
|
|
cpuworkers_rotate(void)
|
2004-04-25 00:17:50 +02:00
|
|
|
{
|
|
|
|
connection_t *cpuworker;
|
|
|
|
while ((cpuworker = connection_get_by_type_state(CONN_TYPE_CPUWORKER,
|
|
|
|
CPUWORKER_STATE_IDLE))) {
|
2004-05-12 23:12:33 +02:00
|
|
|
connection_mark_for_close(cpuworker);
|
2004-04-25 00:17:50 +02:00
|
|
|
--num_cpuworkers;
|
|
|
|
}
|
|
|
|
last_rotation_time = time(NULL);
|
2005-08-16 01:46:18 +02:00
|
|
|
if (server_mode(get_options()))
|
|
|
|
spawn_enough_cpuworkers();
|
2004-04-25 00:17:50 +02:00
|
|
|
}
|
|
|
|
|
2004-11-21 11:14:57 +01:00
|
|
|
/** If the cpuworker closes the connection,
|
|
|
|
* mark it as closed and spawn a new one as needed. */
|
2005-06-11 20:52:12 +02:00
|
|
|
int
|
|
|
|
connection_cpu_reached_eof(connection_t *conn)
|
|
|
|
{
|
2006-09-30 00:33:40 +02:00
|
|
|
log_warn(LD_GENERAL,"Read eof. CPU worker died unexpectedly.");
|
2004-11-28 10:05:49 +01:00
|
|
|
if (conn->state != CPUWORKER_STATE_IDLE) {
|
2004-11-21 11:14:57 +01:00
|
|
|
/* the circ associated with this cpuworker will have to wait until
|
|
|
|
* it gets culled in run_connection_housekeeping(), since we have
|
|
|
|
* no way to find out which circ it was. */
|
2006-02-13 10:02:35 +01:00
|
|
|
log_warn(LD_GENERAL,"...and it left a circuit queued; abandoning circ.");
|
2004-11-21 11:14:57 +01:00
|
|
|
num_cpuworkers_busy--;
|
|
|
|
}
|
|
|
|
num_cpuworkers--;
|
2005-12-14 21:40:40 +01:00
|
|
|
spawn_enough_cpuworkers(); /* try to regrow. hope we don't end up
|
|
|
|
spinning. */
|
2004-11-21 11:14:57 +01:00
|
|
|
connection_mark_for_close(conn);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-27 00:08:01 +01:00
|
|
|
/** Indexed by handshake type: how many onionskins have we processed and
|
|
|
|
* counted of that type? */
|
|
|
|
static uint64_t onionskins_n_processed[MAX_ONION_HANDSHAKE_TYPE+1];
|
|
|
|
/** Indexed by handshake type, corresponding to the onionskins counted in
|
|
|
|
* onionskins_n_processed: how many microseconds have we spent in cpuworkers
|
|
|
|
* processing that kind of onionskin? */
|
|
|
|
static uint64_t onionskins_usec_internal[MAX_ONION_HANDSHAKE_TYPE+1];
|
|
|
|
/** Indexed by handshake type, corresponding to onionskins counted in
|
|
|
|
* onionskins_n_processed: how many microseconds have we spent waiting for
|
|
|
|
* cpuworkers to give us answers for that kind of onionskin?
|
|
|
|
*/
|
|
|
|
static uint64_t onionskins_usec_roundtrip[MAX_ONION_HANDSHAKE_TYPE+1];
|
|
|
|
|
|
|
|
/** If any onionskin takes longer than this, we clip them to this
|
|
|
|
* time. (microseconds) */
|
|
|
|
#define MAX_BELIEVABLE_ONIONSKIN_DELAY (2*1000*1000)
|
|
|
|
|
|
|
|
/** Return true iff we'd like to measure a handshake of type
|
|
|
|
* <b>onionskin_type</b>. */
|
|
|
|
static int
|
|
|
|
should_time_request(uint16_t onionskin_type)
|
|
|
|
{
|
|
|
|
/* If we've never heard of this type, we shouldn't even be here. */
|
|
|
|
if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE)
|
|
|
|
return 0;
|
|
|
|
/* Measure the first N handshakes of each type, to ensure we have a
|
|
|
|
* sample */
|
|
|
|
if (onionskins_n_processed[onionskin_type] < 4096)
|
|
|
|
return 1;
|
|
|
|
/** Otherwise, measure with P=1/128. We avoid doing this for every
|
|
|
|
* handshake, since the measurement itself can take a little time. */
|
|
|
|
return tor_weak_random() < (TOR_RAND_MAX/128);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Return an estimate of how many microseconds we will need for a single
|
|
|
|
* cpuworker to to process <b>n_requests</b> onionskins of type
|
|
|
|
* <b>onionskin_type</b>. */
|
|
|
|
uint64_t
|
|
|
|
estimated_usec_for_onionskins(uint32_t n_requests, uint16_t onionskin_type)
|
|
|
|
{
|
|
|
|
if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
|
|
|
|
return 1000 * n_requests;
|
|
|
|
if (PREDICT_UNLIKELY(onionskins_n_processed[onionskin_type] < 100)) {
|
|
|
|
/* Until we have 100 data points, just asssume everything takes 1 msec. */
|
|
|
|
return 1000 * n_requests;
|
|
|
|
} else {
|
|
|
|
/* This can't overflow: we'll never have more than 500000 onionskins
|
|
|
|
* measured in onionskin_usec_internal, and they won't take anything near
|
|
|
|
* 1 sec each, and we won't have anything like 1 million queued
|
|
|
|
* onionskins. But that's 5e5 * 1e6 * 1e6, which is still less than
|
|
|
|
* UINT64_MAX. */
|
|
|
|
return (onionskins_usec_internal[onionskin_type] * n_requests) /
|
|
|
|
onionskins_n_processed[onionskin_type];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-03 19:20:20 +01:00
|
|
|
/** Compute the absolute and relative overhead of using the cpuworker
|
|
|
|
* framework for onionskins of type <b>onionskin_type</b>.*/
|
|
|
|
static int
|
|
|
|
get_overhead_for_onionskins(uint32_t *usec_out, double *frac_out,
|
|
|
|
uint16_t onionskin_type)
|
|
|
|
{
|
|
|
|
uint64_t overhead;
|
|
|
|
|
|
|
|
*usec_out = 0;
|
|
|
|
*frac_out = 0.0;
|
|
|
|
|
|
|
|
if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
|
|
|
|
return -1;
|
|
|
|
if (onionskins_n_processed[onionskin_type] == 0 ||
|
|
|
|
onionskins_usec_internal[onionskin_type] == 0 ||
|
|
|
|
onionskins_usec_roundtrip[onionskin_type] == 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
overhead = onionskins_usec_roundtrip[onionskin_type] -
|
|
|
|
onionskins_usec_internal[onionskin_type];
|
|
|
|
|
|
|
|
*usec_out = (uint32_t)(overhead / onionskins_n_processed[onionskin_type]);
|
|
|
|
*frac_out = U64_TO_DBL(overhead) / onionskins_usec_internal[onionskin_type];
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** If we've measured overhead for onionskins of type <b>onionskin_type</b>,
|
|
|
|
* log it. */
|
|
|
|
void
|
|
|
|
cpuworker_log_onionskin_overhead(int severity, int onionskin_type,
|
|
|
|
const char *onionskin_type_name)
|
|
|
|
{
|
|
|
|
uint32_t overhead;
|
|
|
|
double relative_overhead;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = get_overhead_for_onionskins(&overhead, &relative_overhead,
|
|
|
|
onionskin_type);
|
|
|
|
if (!overhead || r<0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
log_fn(severity, LD_OR,
|
|
|
|
"%s onionskins have averaged %u usec overhead (%.2f%%) in "
|
|
|
|
"cpuworker code ",
|
|
|
|
onionskin_type_name, (unsigned)overhead, relative_overhead*100);
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Called when we get data from a cpuworker. If the answer is not complete,
|
2004-11-21 11:14:57 +01:00
|
|
|
* wait for a complete answer. If the answer is complete,
|
2004-05-05 23:32:43 +02:00
|
|
|
* process it as appropriate.
|
|
|
|
*/
|
2005-06-11 20:52:12 +02:00
|
|
|
int
|
|
|
|
connection_cpu_process_inbuf(connection_t *conn)
|
|
|
|
{
|
2012-08-31 00:47:05 +02:00
|
|
|
uint64_t chan_id;
|
2008-07-23 17:58:30 +02:00
|
|
|
circid_t circ_id;
|
2012-08-31 00:47:05 +02:00
|
|
|
channel_t *p_chan = NULL;
|
2003-09-14 04:58:50 +02:00
|
|
|
circuit_t *circ;
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2004-10-17 00:14:52 +02:00
|
|
|
tor_assert(conn);
|
|
|
|
tor_assert(conn->type == CONN_TYPE_CPUWORKER);
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2009-07-31 17:39:31 +02:00
|
|
|
if (!connection_get_inbuf_len(conn))
|
2004-12-07 00:19:55 +01:00
|
|
|
return 0;
|
|
|
|
|
2004-11-28 10:05:49 +01:00
|
|
|
if (conn->state == CPUWORKER_STATE_BUSY_ONION) {
|
2012-12-06 04:34:49 +01:00
|
|
|
cpuworker_reply_t rpl;
|
|
|
|
if (connection_get_inbuf_len(conn) < sizeof(cpuworker_reply_t))
|
2003-08-21 01:05:22 +02:00
|
|
|
return 0; /* not yet */
|
2012-12-06 04:34:49 +01:00
|
|
|
tor_assert(connection_get_inbuf_len(conn) == sizeof(cpuworker_reply_t));
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2012-12-06 04:34:49 +01:00
|
|
|
connection_fetch_from_buf((void*)&rpl,sizeof(cpuworker_reply_t),conn);
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2012-12-06 04:34:49 +01:00
|
|
|
tor_assert(rpl.magic == CPUWORKER_REPLY_MAGIC);
|
2012-12-27 00:08:01 +01:00
|
|
|
|
|
|
|
if (rpl.timed && rpl.success &&
|
|
|
|
rpl.handshake_type <= MAX_ONION_HANDSHAKE_TYPE) {
|
|
|
|
/* Time how long this request took. The handshake_type check should be
|
|
|
|
needless, but let's leave it in to be safe. */
|
|
|
|
struct timeval tv_end, tv_diff;
|
|
|
|
int64_t usec_roundtrip;
|
|
|
|
tor_gettimeofday(&tv_end);
|
|
|
|
timersub(&tv_end, &rpl.started_at, &tv_diff);
|
|
|
|
usec_roundtrip = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec;
|
|
|
|
if (usec_roundtrip < 0 ||
|
|
|
|
usec_roundtrip > MAX_BELIEVABLE_ONIONSKIN_DELAY) {
|
|
|
|
usec_roundtrip = MAX_BELIEVABLE_ONIONSKIN_DELAY;
|
|
|
|
}
|
|
|
|
++onionskins_n_processed[rpl.handshake_type];
|
|
|
|
onionskins_usec_internal[rpl.handshake_type] += rpl.n_usec;
|
|
|
|
onionskins_usec_roundtrip[rpl.handshake_type] += usec_roundtrip;
|
|
|
|
if (onionskins_n_processed[rpl.handshake_type] >= 500000) {
|
|
|
|
/* Scale down every 500000 handshakes. On a busy server, that's
|
|
|
|
* less impressive than it sounds. */
|
|
|
|
onionskins_n_processed[rpl.handshake_type] /= 2;
|
|
|
|
onionskins_usec_internal[rpl.handshake_type] /= 2;
|
|
|
|
onionskins_usec_roundtrip[rpl.handshake_type] /= 2;
|
|
|
|
}
|
|
|
|
}
|
2003-09-14 04:58:50 +02:00
|
|
|
/* parse out the circ it was talking about */
|
2012-12-06 04:34:49 +01:00
|
|
|
tag_unpack(rpl.tag, &chan_id, &circ_id);
|
2003-09-14 04:58:50 +02:00
|
|
|
circ = NULL;
|
2012-08-31 00:47:05 +02:00
|
|
|
log_debug(LD_OR,
|
2012-10-09 04:48:06 +02:00
|
|
|
"Unpacking cpuworker reply, chan_id is " U64_FORMAT
|
|
|
|
", circ_id is %d",
|
|
|
|
U64_PRINTF_ARG(chan_id), circ_id);
|
2012-08-31 00:47:05 +02:00
|
|
|
p_chan = channel_find_by_global_id(chan_id);
|
2008-08-15 15:55:01 +02:00
|
|
|
|
2012-08-31 00:47:05 +02:00
|
|
|
if (p_chan)
|
|
|
|
circ = circuit_get_by_circid_channel(circ_id, p_chan);
|
2003-09-14 04:58:50 +02:00
|
|
|
|
2012-12-06 04:34:49 +01:00
|
|
|
if (rpl.success == 0) {
|
2006-02-13 10:02:35 +01:00
|
|
|
log_debug(LD_OR,
|
|
|
|
"decoding onionskin failed. "
|
|
|
|
"(Old key or bad software.) Closing.");
|
2004-11-28 10:05:49 +01:00
|
|
|
if (circ)
|
2006-01-05 22:23:03 +01:00
|
|
|
circuit_mark_for_close(circ, END_CIRC_REASON_TORPROTOCOL);
|
2004-04-25 06:49:11 +02:00
|
|
|
goto done_processing;
|
|
|
|
}
|
2004-11-28 10:05:49 +01:00
|
|
|
if (!circ) {
|
2005-11-01 07:13:12 +01:00
|
|
|
/* This happens because somebody sends us a destroy cell and the
|
|
|
|
* circuit goes away, while the cpuworker is working. This is also
|
|
|
|
* why our tag doesn't include a pointer to the circ, because we'd
|
|
|
|
* never know if it's still valid.
|
|
|
|
*/
|
2006-02-13 10:02:35 +01:00
|
|
|
log_debug(LD_OR,"processed onion for a circ that's gone. Dropping.");
|
2003-09-14 04:58:50 +02:00
|
|
|
goto done_processing;
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
2006-07-23 09:37:35 +02:00
|
|
|
tor_assert(! CIRCUIT_IS_ORIGIN(circ));
|
2012-12-06 04:34:49 +01:00
|
|
|
if (onionskin_answer(TO_OR_CIRCUIT(circ),
|
2012-12-06 05:44:27 +01:00
|
|
|
&rpl.created_cell,
|
2012-12-06 04:34:49 +01:00
|
|
|
(const char*)rpl.keys,
|
|
|
|
rpl.rend_auth_material) < 0) {
|
2006-02-13 10:02:35 +01:00
|
|
|
log_warn(LD_OR,"onionskin_answer failed. Closing.");
|
2006-01-05 22:23:03 +01:00
|
|
|
circuit_mark_for_close(circ, END_CIRC_REASON_INTERNAL);
|
2003-09-14 04:58:50 +02:00
|
|
|
goto done_processing;
|
|
|
|
}
|
2006-02-13 10:02:35 +01:00
|
|
|
log_debug(LD_OR,"onionskin_answer succeeded. Yay.");
|
2003-08-21 01:05:22 +02:00
|
|
|
} else {
|
2004-04-25 22:37:37 +02:00
|
|
|
tor_assert(0); /* don't ask me to do handshakes yet */
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
|
|
|
|
2010-08-16 00:29:27 +02:00
|
|
|
done_processing:
|
2003-08-21 01:05:22 +02:00
|
|
|
conn->state = CPUWORKER_STATE_IDLE;
|
|
|
|
num_cpuworkers_busy--;
|
2004-04-25 00:17:50 +02:00
|
|
|
if (conn->timestamp_created < last_rotation_time) {
|
2004-05-12 23:12:33 +02:00
|
|
|
connection_mark_for_close(conn);
|
2004-04-25 00:17:50 +02:00
|
|
|
num_cpuworkers--;
|
|
|
|
spawn_enough_cpuworkers();
|
|
|
|
} else {
|
|
|
|
process_pending_task(conn);
|
|
|
|
}
|
2003-08-21 01:05:22 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** Implement a cpuworker. 'data' is an fdarray as returned by socketpair.
|
2004-05-05 23:32:43 +02:00
|
|
|
* Read and writes from fdarray[1]. Reads requests, writes answers.
|
|
|
|
*
|
|
|
|
* Request format:
|
2012-12-06 04:34:49 +01:00
|
|
|
* cpuworker_request_t.
|
2004-05-05 23:32:43 +02:00
|
|
|
* Response format:
|
2012-12-06 04:34:49 +01:00
|
|
|
* cpuworker_reply_t
|
2004-05-05 23:32:43 +02:00
|
|
|
*/
|
2006-09-06 10:42:20 +02:00
|
|
|
static void
|
2005-06-11 20:52:12 +02:00
|
|
|
cpuworker_main(void *data)
|
|
|
|
{
|
2012-12-06 04:34:49 +01:00
|
|
|
/* For talking to the parent thread/process */
|
2011-05-23 06:17:48 +02:00
|
|
|
tor_socket_t *fdarray = data;
|
|
|
|
tor_socket_t fd;
|
2003-08-21 01:05:22 +02:00
|
|
|
|
|
|
|
/* variables for onion processing */
|
2012-12-05 03:27:07 +01:00
|
|
|
server_onion_keys_t onion_keys;
|
2012-12-06 04:34:49 +01:00
|
|
|
cpuworker_request_t req;
|
|
|
|
cpuworker_reply_t rpl;
|
2003-08-21 01:05:22 +02:00
|
|
|
|
|
|
|
fd = fdarray[1]; /* this side is ours */
|
2005-01-03 19:06:51 +01:00
|
|
|
#ifndef TOR_IS_MULTITHREADED
|
2005-12-14 21:40:40 +01:00
|
|
|
tor_close_socket(fdarray[0]); /* this is the side of the socketpair the
|
|
|
|
* parent uses */
|
2005-05-03 05:51:20 +02:00
|
|
|
tor_free_all(1); /* so the child doesn't hold the parent's fd's open */
|
2004-08-08 09:25:45 +02:00
|
|
|
handle_signals(0); /* ignore interrupts from the keyboard, etc */
|
2005-01-03 20:07:25 +01:00
|
|
|
#endif
|
|
|
|
tor_free(data);
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2012-12-05 03:27:07 +01:00
|
|
|
setup_server_onion_keys(&onion_keys);
|
2004-04-25 00:17:50 +02:00
|
|
|
|
2004-11-28 10:05:49 +01:00
|
|
|
for (;;) {
|
2012-12-06 04:34:49 +01:00
|
|
|
if (read_all(fd, (void *)&req, sizeof(req), 1) != sizeof(req)) {
|
|
|
|
log_info(LD_OR, "read request failed. Exiting.");
|
2004-04-25 00:17:50 +02:00
|
|
|
goto end;
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
2012-12-06 04:34:49 +01:00
|
|
|
tor_assert(req.magic == CPUWORKER_REQUEST_MAGIC);
|
|
|
|
|
|
|
|
memset(&rpl, 0, sizeof(rpl));
|
|
|
|
|
|
|
|
if (req.task == CPUWORKER_TASK_ONION) {
|
|
|
|
const create_cell_t *cc = &req.create_cell;
|
|
|
|
created_cell_t *cell_out = &rpl.created_cell;
|
2012-12-27 00:08:01 +01:00
|
|
|
struct timeval tv_start, tv_end;
|
2012-12-06 04:34:49 +01:00
|
|
|
int n;
|
2012-12-27 00:08:01 +01:00
|
|
|
rpl.timed = req.timed;
|
|
|
|
rpl.started_at = req.started_at;
|
|
|
|
rpl.handshake_type = cc->handshake_type;
|
|
|
|
if (req.timed)
|
|
|
|
tor_gettimeofday(&tv_start);
|
2012-12-06 04:34:49 +01:00
|
|
|
n = onion_skin_server_handshake(cc->handshake_type,
|
|
|
|
cc->onionskin, cc->handshake_len,
|
|
|
|
&onion_keys,
|
|
|
|
cell_out->reply,
|
|
|
|
rpl.keys, CPATH_KEY_MATERIAL_LEN,
|
|
|
|
rpl.rend_auth_material);
|
|
|
|
if (n < 0) {
|
2003-08-21 01:05:22 +02:00
|
|
|
/* failure */
|
2006-02-13 10:02:35 +01:00
|
|
|
log_debug(LD_OR,"onion_skin_server_handshake failed.");
|
2012-12-06 04:34:49 +01:00
|
|
|
memset(&rpl, 0, sizeof(rpl));
|
|
|
|
memcpy(rpl.tag, req.tag, TAG_LEN);
|
|
|
|
rpl.success = 0;
|
2003-08-21 01:05:22 +02:00
|
|
|
} else {
|
|
|
|
/* success */
|
2006-02-13 10:02:35 +01:00
|
|
|
log_debug(LD_OR,"onion_skin_server_handshake succeeded.");
|
2012-12-06 04:34:49 +01:00
|
|
|
memcpy(rpl.tag, req.tag, TAG_LEN);
|
|
|
|
cell_out->handshake_len = n;
|
|
|
|
switch (cc->cell_type) {
|
|
|
|
case CELL_CREATE:
|
|
|
|
cell_out->cell_type = CELL_CREATED; break;
|
|
|
|
case CELL_CREATE2:
|
|
|
|
cell_out->cell_type = CELL_CREATED2; break;
|
|
|
|
case CELL_CREATE_FAST:
|
|
|
|
cell_out->cell_type = CELL_CREATED_FAST; break;
|
|
|
|
default:
|
|
|
|
tor_assert(0);
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
rpl.success = 1;
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
2012-12-06 04:34:49 +01:00
|
|
|
rpl.magic = CPUWORKER_REPLY_MAGIC;
|
2012-12-27 00:08:01 +01:00
|
|
|
if (req.timed) {
|
|
|
|
struct timeval tv_diff;
|
|
|
|
int64_t usec;
|
|
|
|
tor_gettimeofday(&tv_end);
|
|
|
|
timersub(&tv_end, &tv_start, &tv_diff);
|
|
|
|
usec = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec;
|
|
|
|
if (usec < 0 || usec > MAX_BELIEVABLE_ONIONSKIN_DELAY)
|
|
|
|
rpl.n_usec = MAX_BELIEVABLE_ONIONSKIN_DELAY;
|
|
|
|
else
|
|
|
|
rpl.n_usec = (uint32_t) usec;
|
|
|
|
}
|
2012-12-06 04:34:49 +01:00
|
|
|
if (write_all(fd, (void*)&rpl, sizeof(rpl), 1) != sizeof(rpl)) {
|
2006-02-13 10:02:35 +01:00
|
|
|
log_err(LD_BUG,"writing response buf failed. Exiting.");
|
2005-04-04 23:46:08 +02:00
|
|
|
goto end;
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
2006-02-13 10:02:35 +01:00
|
|
|
log_debug(LD_OR,"finished writing response.");
|
2012-12-06 04:34:49 +01:00
|
|
|
} else if (req.task == CPUWORKER_TASK_SHUTDOWN) {
|
|
|
|
log_info(LD_OR,"Clean shutdown: exiting");
|
|
|
|
goto end;
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
2012-12-06 04:34:49 +01:00
|
|
|
memwipe(&req, 0, sizeof(req));
|
|
|
|
memwipe(&rpl, 0, sizeof(req));
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
2004-04-25 00:17:50 +02:00
|
|
|
end:
|
2012-12-06 04:34:49 +01:00
|
|
|
memwipe(&req, 0, sizeof(req));
|
|
|
|
memwipe(&rpl, 0, sizeof(req));
|
2012-12-05 03:27:07 +01:00
|
|
|
release_server_onion_keys(&onion_keys);
|
2005-04-04 23:53:26 +02:00
|
|
|
tor_close_socket(fd);
|
2005-10-25 21:01:48 +02:00
|
|
|
crypto_thread_cleanup();
|
2004-04-25 00:17:50 +02:00
|
|
|
spawn_exit();
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
|
|
|
|
2005-09-23 02:04:44 +02:00
|
|
|
/** Launch a new cpuworker. Return 0 if we're happy, -1 if we failed.
|
2004-05-05 23:32:43 +02:00
|
|
|
*/
|
2005-06-11 20:52:12 +02:00
|
|
|
static int
|
|
|
|
spawn_cpuworker(void)
|
|
|
|
{
|
2011-05-23 06:17:48 +02:00
|
|
|
tor_socket_t *fdarray;
|
|
|
|
tor_socket_t fd;
|
2003-08-21 01:05:22 +02:00
|
|
|
connection_t *conn;
|
2005-06-30 09:17:38 +02:00
|
|
|
int err;
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2011-05-23 06:17:48 +02:00
|
|
|
fdarray = tor_malloc(sizeof(tor_socket_t)*2);
|
2005-06-30 09:17:38 +02:00
|
|
|
if ((err = tor_socketpair(AF_UNIX, SOCK_STREAM, 0, fdarray)) < 0) {
|
2006-09-30 00:33:40 +02:00
|
|
|
log_warn(LD_NET, "Couldn't construct socketpair for cpuworker: %s",
|
2006-02-13 10:02:35 +01:00
|
|
|
tor_socket_strerror(-err));
|
2005-01-03 20:07:25 +01:00
|
|
|
tor_free(fdarray);
|
2005-09-23 02:04:44 +02:00
|
|
|
return -1;
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
|
|
|
|
2012-01-17 22:35:07 +01:00
|
|
|
tor_assert(SOCKET_OK(fdarray[0]));
|
|
|
|
tor_assert(SOCKET_OK(fdarray[1]));
|
2006-06-06 02:06:52 +02:00
|
|
|
|
2005-01-03 20:07:25 +01:00
|
|
|
fd = fdarray[0];
|
2006-09-06 10:42:20 +02:00
|
|
|
spawn_func(cpuworker_main, (void*)fdarray);
|
2006-02-13 10:02:35 +01:00
|
|
|
log_debug(LD_OR,"just spawned a cpu worker.");
|
2005-01-03 19:06:51 +01:00
|
|
|
#ifndef TOR_IS_MULTITHREADED
|
2005-12-14 21:40:40 +01:00
|
|
|
tor_close_socket(fdarray[1]); /* don't need the worker's side of the pipe */
|
2005-01-03 20:07:25 +01:00
|
|
|
tor_free(fdarray);
|
2005-01-03 19:06:51 +01:00
|
|
|
#endif
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2007-06-05 22:54:49 +02:00
|
|
|
conn = connection_new(CONN_TYPE_CPUWORKER, AF_UNIX);
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2005-01-03 20:07:25 +01:00
|
|
|
set_socket_nonblocking(fd);
|
2003-08-21 01:05:22 +02:00
|
|
|
|
|
|
|
/* set up conn so it's got all the data we need to remember */
|
2005-01-03 20:07:25 +01:00
|
|
|
conn->s = fd;
|
2003-10-04 05:29:09 +02:00
|
|
|
conn->address = tor_strdup("localhost");
|
2011-12-02 22:21:50 +01:00
|
|
|
tor_addr_make_unspec(&conn->addr);
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2004-11-28 10:05:49 +01:00
|
|
|
if (connection_add(conn) < 0) { /* no space, forget it */
|
2006-09-30 00:33:34 +02:00
|
|
|
log_warn(LD_NET,"connection_add for cpuworker failed. Giving up.");
|
2005-01-03 20:07:25 +01:00
|
|
|
connection_free(conn); /* this closes fd */
|
2003-08-21 01:05:22 +02:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
conn->state = CPUWORKER_STATE_IDLE;
|
|
|
|
connection_start_reading(conn);
|
|
|
|
|
|
|
|
return 0; /* success */
|
|
|
|
}
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/** If we have too few or too many active cpuworkers, try to spawn new ones
|
2004-05-05 23:32:43 +02:00
|
|
|
* or kill idle ones.
|
|
|
|
*/
|
2005-06-11 20:52:12 +02:00
|
|
|
static void
|
|
|
|
spawn_enough_cpuworkers(void)
|
|
|
|
{
|
2010-09-28 20:36:28 +02:00
|
|
|
int num_cpuworkers_needed = get_num_cpus(get_options());
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2004-11-28 10:05:49 +01:00
|
|
|
if (num_cpuworkers_needed < MIN_CPUWORKERS)
|
2003-08-21 01:05:22 +02:00
|
|
|
num_cpuworkers_needed = MIN_CPUWORKERS;
|
2004-11-28 10:05:49 +01:00
|
|
|
if (num_cpuworkers_needed > MAX_CPUWORKERS)
|
2003-08-21 01:05:22 +02:00
|
|
|
num_cpuworkers_needed = MAX_CPUWORKERS;
|
|
|
|
|
2004-11-28 10:05:49 +01:00
|
|
|
while (num_cpuworkers < num_cpuworkers_needed) {
|
|
|
|
if (spawn_cpuworker() < 0) {
|
2006-09-30 00:33:34 +02:00
|
|
|
log_warn(LD_GENERAL,"Cpuworker spawn failed. Will try again later.");
|
2003-08-21 01:05:22 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
num_cpuworkers++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-05-10 12:27:54 +02:00
|
|
|
/** Take a pending task from the queue and assign it to 'cpuworker'. */
|
2005-06-11 20:52:12 +02:00
|
|
|
static void
|
|
|
|
process_pending_task(connection_t *cpuworker)
|
|
|
|
{
|
2006-07-23 09:37:35 +02:00
|
|
|
or_circuit_t *circ;
|
2012-12-06 04:34:49 +01:00
|
|
|
create_cell_t *onionskin = NULL;
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2004-04-25 22:37:37 +02:00
|
|
|
tor_assert(cpuworker);
|
2003-08-21 01:05:22 +02:00
|
|
|
|
|
|
|
/* for now only process onion tasks */
|
|
|
|
|
2008-02-06 00:20:49 +01:00
|
|
|
circ = onion_next_task(&onionskin);
|
2004-11-28 10:05:49 +01:00
|
|
|
if (!circ)
|
2003-09-14 04:58:50 +02:00
|
|
|
return;
|
2008-02-06 00:20:49 +01:00
|
|
|
if (assign_onionskin_to_cpuworker(cpuworker, circ, onionskin))
|
2006-02-13 10:02:35 +01:00
|
|
|
log_warn(LD_OR,"assign_to_cpuworker failed. Ignoring.");
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
|
|
|
|
2006-04-18 21:48:06 +02:00
|
|
|
/** How long should we let a cpuworker stay busy before we give
|
|
|
|
* up on it and decide that we have a bug or infinite loop?
|
|
|
|
* This value is high because some servers with low memory/cpu
|
|
|
|
* sometimes spend an hour or more swapping, and Tor starves. */
|
|
|
|
#define CPUWORKER_BUSY_TIMEOUT (60*60*12)
|
2005-04-07 22:02:00 +02:00
|
|
|
|
2006-03-12 23:48:18 +01:00
|
|
|
/** We have a bug that I can't find. Sometimes, very rarely, cpuworkers get
|
|
|
|
* stuck in the 'busy' state, even though the cpuworker process thinks of
|
|
|
|
* itself as idle. I don't know why. But here's a workaround to kill any
|
|
|
|
* cpuworker that's been busy for more than CPUWORKER_BUSY_TIMEOUT.
|
|
|
|
*/
|
2005-04-07 22:02:00 +02:00
|
|
|
static void
|
2005-06-11 20:52:12 +02:00
|
|
|
cull_wedged_cpuworkers(void)
|
|
|
|
{
|
2005-04-07 22:02:00 +02:00
|
|
|
time_t now = time(NULL);
|
2007-05-22 17:49:14 +02:00
|
|
|
smartlist_t *conns = get_connection_array();
|
2012-07-17 15:33:38 +02:00
|
|
|
SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
|
2005-04-07 22:02:00 +02:00
|
|
|
if (!conn->marked_for_close &&
|
|
|
|
conn->type == CONN_TYPE_CPUWORKER &&
|
|
|
|
conn->state == CPUWORKER_STATE_BUSY_ONION &&
|
2005-04-08 06:59:34 +02:00
|
|
|
conn->timestamp_lastwritten + CPUWORKER_BUSY_TIMEOUT < now) {
|
2006-02-13 10:02:35 +01:00
|
|
|
log_notice(LD_BUG,
|
2007-03-04 21:11:46 +01:00
|
|
|
"closing wedged cpuworker. Can somebody find the bug?");
|
2005-04-07 22:02:00 +02:00
|
|
|
num_cpuworkers_busy--;
|
|
|
|
num_cpuworkers--;
|
|
|
|
connection_mark_for_close(conn);
|
|
|
|
}
|
2012-07-17 15:33:38 +02:00
|
|
|
} SMARTLIST_FOREACH_END(conn);
|
2005-04-07 22:02:00 +02:00
|
|
|
}
|
|
|
|
|
2008-02-06 17:58:05 +01:00
|
|
|
/** Try to tell a cpuworker to perform the public key operations necessary to
|
|
|
|
* respond to <b>onionskin</b> for the circuit <b>circ</b>.
|
|
|
|
*
|
|
|
|
* If <b>cpuworker</b> is defined, assert that he's idle, and use him. Else,
|
|
|
|
* look for an idle cpuworker and use him. If none idle, queue task onto the
|
|
|
|
* pending onion list and return. Return 0 if we successfully assign the
|
|
|
|
* task, or -1 on failure.
|
2003-08-21 01:05:22 +02:00
|
|
|
*/
|
2005-06-11 20:52:12 +02:00
|
|
|
int
|
2008-02-06 00:20:49 +01:00
|
|
|
assign_onionskin_to_cpuworker(connection_t *cpuworker,
|
2012-12-06 04:34:49 +01:00
|
|
|
or_circuit_t *circ,
|
|
|
|
create_cell_t *onionskin)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2012-12-06 04:34:49 +01:00
|
|
|
cpuworker_request_t req;
|
2011-11-20 00:29:42 +01:00
|
|
|
time_t now = approx_time();
|
|
|
|
static time_t last_culled_cpuworkers = 0;
|
2012-12-27 00:08:01 +01:00
|
|
|
int should_time;
|
2003-09-14 04:58:50 +02:00
|
|
|
|
2011-11-20 00:29:42 +01:00
|
|
|
/* Checking for wedged cpuworkers requires a linear search over all
|
|
|
|
* connections, so let's do it only once a minute.
|
|
|
|
*/
|
|
|
|
#define CULL_CPUWORKERS_INTERVAL 60
|
|
|
|
|
|
|
|
if (last_culled_cpuworkers + CULL_CPUWORKERS_INTERVAL <= now) {
|
|
|
|
cull_wedged_cpuworkers();
|
|
|
|
spawn_enough_cpuworkers();
|
|
|
|
last_culled_cpuworkers = now;
|
|
|
|
}
|
2005-04-07 22:02:00 +02:00
|
|
|
|
2008-02-06 00:20:49 +01:00
|
|
|
if (1) {
|
2004-11-28 10:05:49 +01:00
|
|
|
if (num_cpuworkers_busy == num_cpuworkers) {
|
2006-02-13 10:02:35 +01:00
|
|
|
log_debug(LD_OR,"No idle cpuworkers. Queuing.");
|
2009-07-28 04:38:09 +02:00
|
|
|
if (onion_pending_add(circ, onionskin) < 0) {
|
|
|
|
tor_free(onionskin);
|
2003-08-21 01:05:22 +02:00
|
|
|
return -1;
|
2009-07-28 04:38:09 +02:00
|
|
|
}
|
2003-08-21 01:05:22 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-04-25 00:17:50 +02:00
|
|
|
if (!cpuworker)
|
2005-12-14 21:40:40 +01:00
|
|
|
cpuworker = connection_get_by_type_state(CONN_TYPE_CPUWORKER,
|
|
|
|
CPUWORKER_STATE_IDLE);
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2004-04-25 22:37:37 +02:00
|
|
|
tor_assert(cpuworker);
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2012-08-31 00:47:05 +02:00
|
|
|
if (!circ->p_chan) {
|
|
|
|
log_info(LD_OR,"circ->p_chan gone. Failing circ.");
|
2008-02-06 00:20:49 +01:00
|
|
|
tor_free(onionskin);
|
2003-09-14 04:58:50 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2012-12-06 04:34:49 +01:00
|
|
|
|
2012-12-27 00:08:01 +01:00
|
|
|
should_time = should_time_request(onionskin->handshake_type);
|
2012-12-06 04:34:49 +01:00
|
|
|
memset(&req, 0, sizeof(req));
|
|
|
|
req.magic = CPUWORKER_REQUEST_MAGIC;
|
|
|
|
tag_pack(req.tag, circ->p_chan->global_identifier,
|
2006-07-26 21:07:26 +02:00
|
|
|
circ->p_circ_id);
|
2012-12-27 00:08:01 +01:00
|
|
|
req.timed = should_time;
|
2003-09-14 04:58:50 +02:00
|
|
|
|
2003-08-21 01:05:22 +02:00
|
|
|
cpuworker->state = CPUWORKER_STATE_BUSY_ONION;
|
2006-06-12 13:59:19 +02:00
|
|
|
/* touch the lastwritten timestamp, since that's how we check to
|
|
|
|
* see how long it's been since we asked the question, and sometimes
|
|
|
|
* we check before the first call to connection_handle_write(). */
|
2012-12-27 00:08:01 +01:00
|
|
|
cpuworker->timestamp_lastwritten = now;
|
2003-08-21 01:05:22 +02:00
|
|
|
num_cpuworkers_busy++;
|
|
|
|
|
2012-12-06 04:34:49 +01:00
|
|
|
req.task = CPUWORKER_TASK_ONION;
|
|
|
|
memcpy(&req.create_cell, onionskin, sizeof(create_cell_t));
|
|
|
|
|
2008-02-06 00:20:49 +01:00
|
|
|
tor_free(onionskin);
|
2012-12-06 04:34:49 +01:00
|
|
|
|
2012-12-27 00:08:01 +01:00
|
|
|
if (should_time)
|
|
|
|
tor_gettimeofday(&req.started_at);
|
|
|
|
|
2012-12-06 04:34:49 +01:00
|
|
|
connection_write_to_buf((void*)&req, sizeof(req), cpuworker);
|
|
|
|
memwipe(&req, 0, sizeof(req));
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
2003-11-16 18:00:02 +01:00
|
|
|
return 0;
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
2005-06-09 21:03:31 +02:00
|
|
|
|