2007-12-12 22:09:01 +01:00
|
|
|
/* Copyright (c) 2003-2004, Roger Dingledine.
|
|
|
|
* Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
|
2016-02-27 18:48:19 +01:00
|
|
|
* Copyright (c) 2007-2016, The Tor Project, Inc. */
|
2003-08-21 01:05:22 +02:00
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
|
2004-05-09 18:47:25 +02:00
|
|
|
/**
|
|
|
|
* \file cpuworker.c
|
2013-10-02 18:32:09 +02:00
|
|
|
* \brief Uses the workqueue/threadpool code to farm CPU-intensive activities
|
|
|
|
* out to subprocesses.
|
2004-05-05 23:32:43 +02:00
|
|
|
*
|
|
|
|
* Right now, we only use this for processing onionskins.
|
2004-05-09 18:47:25 +02:00
|
|
|
**/
|
2003-08-21 01:05:22 +02:00
|
|
|
#include "or.h"
|
2012-08-31 00:47:05 +02:00
|
|
|
#include "channel.h"
|
2010-07-22 01:21:00 +02:00
|
|
|
#include "circuitbuild.h"
|
2010-07-22 09:46:23 +02:00
|
|
|
#include "circuitlist.h"
|
2013-09-04 23:43:15 +02:00
|
|
|
#include "connection_or.h"
|
2013-10-02 18:32:09 +02:00
|
|
|
#include "config.h"
|
2010-07-22 11:35:09 +02:00
|
|
|
#include "cpuworker.h"
|
2010-07-23 19:58:06 +02:00
|
|
|
#include "main.h"
|
2010-07-23 20:38:25 +02:00
|
|
|
#include "onion.h"
|
2013-09-04 23:43:15 +02:00
|
|
|
#include "rephist.h"
|
2010-07-21 16:17:10 +02:00
|
|
|
#include "router.h"
|
2013-10-02 18:32:09 +02:00
|
|
|
#include "workqueue.h"
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
#include <event2/event.h>
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
static void queue_pending_tasks(void);
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
typedef struct worker_state_s {
|
|
|
|
int generation;
|
|
|
|
server_onion_keys_t *onion_keys;
|
|
|
|
} worker_state_t;
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
static void *
|
|
|
|
worker_state_new(void *arg)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2013-10-02 18:32:09 +02:00
|
|
|
worker_state_t *ws;
|
|
|
|
(void)arg;
|
|
|
|
ws = tor_malloc_zero(sizeof(worker_state_t));
|
|
|
|
ws->onion_keys = server_onion_keys_new();
|
|
|
|
return ws;
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
2013-10-02 18:32:09 +02:00
|
|
|
static void
|
|
|
|
worker_state_free(void *arg)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2013-10-02 18:32:09 +02:00
|
|
|
worker_state_t *ws = arg;
|
|
|
|
server_onion_keys_free(ws->onion_keys);
|
|
|
|
tor_free(ws);
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
static replyqueue_t *replyqueue = NULL;
|
|
|
|
static threadpool_t *threadpool = NULL;
|
|
|
|
static struct event *reply_event = NULL;
|
|
|
|
|
|
|
|
static tor_weak_rng_t request_sample_rng = TOR_WEAK_RNG_INIT;
|
|
|
|
|
|
|
|
static int total_pending_tasks = 0;
|
|
|
|
static int max_pending_tasks = 128;
|
|
|
|
|
2005-06-11 20:52:12 +02:00
|
|
|
static void
|
2013-10-02 18:32:09 +02:00
|
|
|
replyqueue_process_cb(evutil_socket_t sock, short events, void *arg)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2013-10-02 18:32:09 +02:00
|
|
|
replyqueue_t *rq = arg;
|
|
|
|
(void) sock;
|
|
|
|
(void) events;
|
|
|
|
replyqueue_process(rq);
|
2003-09-14 04:58:50 +02:00
|
|
|
}
|
|
|
|
|
2015-02-15 11:49:19 +01:00
|
|
|
/** Initialize the cpuworker subsystem. It is OK to call this more than once
|
|
|
|
* during Tor's lifetime.
|
2004-05-05 23:32:43 +02:00
|
|
|
*/
|
2013-10-02 18:32:09 +02:00
|
|
|
void
|
|
|
|
cpu_init(void)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2013-10-02 18:32:09 +02:00
|
|
|
if (!replyqueue) {
|
|
|
|
replyqueue = replyqueue_new(0);
|
|
|
|
}
|
|
|
|
if (!reply_event) {
|
|
|
|
reply_event = tor_event_new(tor_libevent_get_base(),
|
|
|
|
replyqueue_get_socket(replyqueue),
|
|
|
|
EV_READ|EV_PERSIST,
|
|
|
|
replyqueue_process_cb,
|
|
|
|
replyqueue);
|
|
|
|
event_add(reply_event, NULL);
|
|
|
|
}
|
|
|
|
if (!threadpool) {
|
|
|
|
threadpool = threadpool_new(get_num_cpus(get_options()),
|
|
|
|
replyqueue,
|
|
|
|
worker_state_new,
|
|
|
|
worker_state_free,
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
/* Total voodoo. Can we make this more sensible? */
|
|
|
|
max_pending_tasks = get_num_cpus(get_options()) * 64;
|
|
|
|
crypto_seed_weak_rng(&request_sample_rng);
|
2003-09-14 04:58:50 +02:00
|
|
|
}
|
|
|
|
|
2012-12-26 04:38:20 +01:00
|
|
|
/** Magic numbers to make sure our cpuworker_requests don't grow any
|
|
|
|
* mis-framing bugs. */
|
2012-12-06 04:34:49 +01:00
|
|
|
#define CPUWORKER_REQUEST_MAGIC 0xda4afeed
|
|
|
|
#define CPUWORKER_REPLY_MAGIC 0x5eedf00d
|
|
|
|
|
2012-12-26 04:38:20 +01:00
|
|
|
/** A request sent to a cpuworker. */
|
2012-12-06 04:34:49 +01:00
|
|
|
typedef struct cpuworker_request_t {
|
2012-12-26 04:38:20 +01:00
|
|
|
/** Magic number; must be CPUWORKER_REQUEST_MAGIC. */
|
2012-12-06 04:34:49 +01:00
|
|
|
uint32_t magic;
|
|
|
|
|
2012-12-27 00:08:01 +01:00
|
|
|
/** Flag: Are we timing this request? */
|
|
|
|
unsigned timed : 1;
|
|
|
|
/** If we're timing this request, when was it sent to the cpuworker? */
|
|
|
|
struct timeval started_at;
|
|
|
|
|
2012-12-26 04:38:20 +01:00
|
|
|
/** A create cell for the cpuworker to process. */
|
2012-12-06 04:34:49 +01:00
|
|
|
create_cell_t create_cell;
|
2012-12-26 04:38:20 +01:00
|
|
|
|
2012-12-06 04:34:49 +01:00
|
|
|
/* Turn the above into a tagged union if needed. */
|
|
|
|
} cpuworker_request_t;
|
|
|
|
|
2012-12-26 04:38:20 +01:00
|
|
|
/** A reply sent by a cpuworker. */
|
2012-12-06 04:34:49 +01:00
|
|
|
typedef struct cpuworker_reply_t {
|
2012-12-26 04:38:20 +01:00
|
|
|
/** Magic number; must be CPUWORKER_REPLY_MAGIC. */
|
2012-12-06 04:34:49 +01:00
|
|
|
uint32_t magic;
|
2013-10-02 18:32:09 +02:00
|
|
|
|
2012-12-26 04:38:20 +01:00
|
|
|
/** True iff we got a successful request. */
|
2012-12-06 04:34:49 +01:00
|
|
|
uint8_t success;
|
|
|
|
|
2012-12-27 00:08:01 +01:00
|
|
|
/** Are we timing this request? */
|
|
|
|
unsigned int timed : 1;
|
|
|
|
/** What handshake type was the request? (Used for timing) */
|
|
|
|
uint16_t handshake_type;
|
|
|
|
/** When did we send the request to the cpuworker? */
|
|
|
|
struct timeval started_at;
|
|
|
|
/** Once the cpuworker received the request, how many microseconds did it
|
|
|
|
* take? (This shouldn't overflow; 4 billion micoseconds is over an hour,
|
|
|
|
* and we'll never have an onion handshake that takes so long.) */
|
|
|
|
uint32_t n_usec;
|
|
|
|
|
2012-12-26 04:38:20 +01:00
|
|
|
/** Output of processing a create cell
|
|
|
|
*
|
|
|
|
* @{
|
|
|
|
*/
|
|
|
|
/** The created cell to send back. */
|
2012-12-06 04:34:49 +01:00
|
|
|
created_cell_t created_cell;
|
2012-12-26 04:38:20 +01:00
|
|
|
/** The keys to use on this circuit. */
|
2012-12-06 04:34:49 +01:00
|
|
|
uint8_t keys[CPATH_KEY_MATERIAL_LEN];
|
2012-12-26 04:38:20 +01:00
|
|
|
/** Input to use for authenticating introduce1 cells. */
|
2012-12-06 04:34:49 +01:00
|
|
|
uint8_t rend_auth_material[DIGEST_LEN];
|
|
|
|
} cpuworker_reply_t;
|
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
typedef struct cpuworker_job_u {
|
2013-10-02 21:11:34 +02:00
|
|
|
or_circuit_t *circ;
|
2013-10-02 18:32:09 +02:00
|
|
|
union {
|
|
|
|
cpuworker_request_t request;
|
|
|
|
cpuworker_reply_t reply;
|
|
|
|
} u;
|
|
|
|
} cpuworker_job_t;
|
|
|
|
|
2015-08-20 16:48:13 +02:00
|
|
|
static workqueue_reply_t
|
2013-10-02 18:32:09 +02:00
|
|
|
update_state_threadfn(void *state_, void *work_)
|
|
|
|
{
|
|
|
|
worker_state_t *state = state_;
|
|
|
|
worker_state_t *update = work_;
|
|
|
|
server_onion_keys_free(state->onion_keys);
|
|
|
|
state->onion_keys = update->onion_keys;
|
|
|
|
update->onion_keys = NULL;
|
|
|
|
++state->generation;
|
|
|
|
return WQ_RPL_REPLY;
|
|
|
|
}
|
|
|
|
|
2015-01-21 20:29:03 +01:00
|
|
|
/** Called when the onion key has changed so update all CPU worker(s) with
|
|
|
|
* new function pointers with which a new state will be generated.
|
2004-05-05 23:32:43 +02:00
|
|
|
*/
|
2005-06-11 20:52:12 +02:00
|
|
|
void
|
2013-10-02 18:32:09 +02:00
|
|
|
cpuworkers_rotate_keyinfo(void)
|
2004-04-25 00:17:50 +02:00
|
|
|
{
|
2015-03-12 16:28:18 +01:00
|
|
|
if (!threadpool) {
|
|
|
|
/* If we're a client, then we won't have cpuworkers, and we won't need
|
|
|
|
* to tell them to rotate their state.
|
|
|
|
*/
|
2015-03-12 16:13:57 +01:00
|
|
|
return;
|
2015-03-12 16:28:18 +01:00
|
|
|
}
|
2015-01-14 19:29:58 +01:00
|
|
|
if (threadpool_queue_update(threadpool,
|
|
|
|
worker_state_new,
|
|
|
|
update_state_threadfn,
|
|
|
|
worker_state_free,
|
|
|
|
NULL)) {
|
2013-10-02 18:32:09 +02:00
|
|
|
log_warn(LD_OR, "Failed to queue key update for worker threads.");
|
2004-04-25 00:17:50 +02:00
|
|
|
}
|
2004-11-21 11:14:57 +01:00
|
|
|
}
|
|
|
|
|
2012-12-27 00:08:01 +01:00
|
|
|
/** Indexed by handshake type: how many onionskins have we processed and
|
|
|
|
* counted of that type? */
|
|
|
|
static uint64_t onionskins_n_processed[MAX_ONION_HANDSHAKE_TYPE+1];
|
|
|
|
/** Indexed by handshake type, corresponding to the onionskins counted in
|
|
|
|
* onionskins_n_processed: how many microseconds have we spent in cpuworkers
|
|
|
|
* processing that kind of onionskin? */
|
|
|
|
static uint64_t onionskins_usec_internal[MAX_ONION_HANDSHAKE_TYPE+1];
|
|
|
|
/** Indexed by handshake type, corresponding to onionskins counted in
|
|
|
|
* onionskins_n_processed: how many microseconds have we spent waiting for
|
|
|
|
* cpuworkers to give us answers for that kind of onionskin?
|
|
|
|
*/
|
|
|
|
static uint64_t onionskins_usec_roundtrip[MAX_ONION_HANDSHAKE_TYPE+1];
|
|
|
|
|
|
|
|
/** If any onionskin takes longer than this, we clip them to this
|
|
|
|
* time. (microseconds) */
|
|
|
|
#define MAX_BELIEVABLE_ONIONSKIN_DELAY (2*1000*1000)
|
|
|
|
|
|
|
|
/** Return true iff we'd like to measure a handshake of type
|
2013-02-08 22:28:05 +01:00
|
|
|
* <b>onionskin_type</b>. Call only from the main thread. */
|
2012-12-27 00:08:01 +01:00
|
|
|
static int
|
|
|
|
should_time_request(uint16_t onionskin_type)
|
|
|
|
{
|
|
|
|
/* If we've never heard of this type, we shouldn't even be here. */
|
|
|
|
if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE)
|
|
|
|
return 0;
|
|
|
|
/* Measure the first N handshakes of each type, to ensure we have a
|
|
|
|
* sample */
|
|
|
|
if (onionskins_n_processed[onionskin_type] < 4096)
|
|
|
|
return 1;
|
|
|
|
/** Otherwise, measure with P=1/128. We avoid doing this for every
|
|
|
|
* handshake, since the measurement itself can take a little time. */
|
2013-02-08 22:28:05 +01:00
|
|
|
return tor_weak_random_one_in_n(&request_sample_rng, 128);
|
2012-12-27 00:08:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Return an estimate of how many microseconds we will need for a single
|
|
|
|
* cpuworker to to process <b>n_requests</b> onionskins of type
|
|
|
|
* <b>onionskin_type</b>. */
|
|
|
|
uint64_t
|
|
|
|
estimated_usec_for_onionskins(uint32_t n_requests, uint16_t onionskin_type)
|
|
|
|
{
|
|
|
|
if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
|
2013-02-11 22:40:48 +01:00
|
|
|
return 1000 * (uint64_t)n_requests;
|
2012-12-27 00:08:01 +01:00
|
|
|
if (PREDICT_UNLIKELY(onionskins_n_processed[onionskin_type] < 100)) {
|
|
|
|
/* Until we have 100 data points, just asssume everything takes 1 msec. */
|
2013-02-11 22:40:48 +01:00
|
|
|
return 1000 * (uint64_t)n_requests;
|
2012-12-27 00:08:01 +01:00
|
|
|
} else {
|
|
|
|
/* This can't overflow: we'll never have more than 500000 onionskins
|
|
|
|
* measured in onionskin_usec_internal, and they won't take anything near
|
|
|
|
* 1 sec each, and we won't have anything like 1 million queued
|
|
|
|
* onionskins. But that's 5e5 * 1e6 * 1e6, which is still less than
|
|
|
|
* UINT64_MAX. */
|
|
|
|
return (onionskins_usec_internal[onionskin_type] * n_requests) /
|
|
|
|
onionskins_n_processed[onionskin_type];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-03 19:20:20 +01:00
|
|
|
/** Compute the absolute and relative overhead of using the cpuworker
|
|
|
|
* framework for onionskins of type <b>onionskin_type</b>.*/
|
|
|
|
static int
|
|
|
|
get_overhead_for_onionskins(uint32_t *usec_out, double *frac_out,
|
|
|
|
uint16_t onionskin_type)
|
|
|
|
{
|
|
|
|
uint64_t overhead;
|
|
|
|
|
|
|
|
*usec_out = 0;
|
|
|
|
*frac_out = 0.0;
|
|
|
|
|
|
|
|
if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
|
|
|
|
return -1;
|
|
|
|
if (onionskins_n_processed[onionskin_type] == 0 ||
|
|
|
|
onionskins_usec_internal[onionskin_type] == 0 ||
|
|
|
|
onionskins_usec_roundtrip[onionskin_type] == 0)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
overhead = onionskins_usec_roundtrip[onionskin_type] -
|
|
|
|
onionskins_usec_internal[onionskin_type];
|
|
|
|
|
|
|
|
*usec_out = (uint32_t)(overhead / onionskins_n_processed[onionskin_type]);
|
|
|
|
*frac_out = U64_TO_DBL(overhead) / onionskins_usec_internal[onionskin_type];
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** If we've measured overhead for onionskins of type <b>onionskin_type</b>,
|
|
|
|
* log it. */
|
|
|
|
void
|
|
|
|
cpuworker_log_onionskin_overhead(int severity, int onionskin_type,
|
|
|
|
const char *onionskin_type_name)
|
|
|
|
{
|
|
|
|
uint32_t overhead;
|
|
|
|
double relative_overhead;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = get_overhead_for_onionskins(&overhead, &relative_overhead,
|
|
|
|
onionskin_type);
|
|
|
|
if (!overhead || r<0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
log_fn(severity, LD_OR,
|
|
|
|
"%s onionskins have averaged %u usec overhead (%.2f%%) in "
|
|
|
|
"cpuworker code ",
|
|
|
|
onionskin_type_name, (unsigned)overhead, relative_overhead*100);
|
|
|
|
}
|
|
|
|
|
2013-10-02 21:11:34 +02:00
|
|
|
/** Handle a reply from the worker threads. */
|
2013-10-02 18:32:09 +02:00
|
|
|
static void
|
|
|
|
cpuworker_onion_handshake_replyfn(void *work_)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2013-10-02 18:32:09 +02:00
|
|
|
cpuworker_job_t *job = work_;
|
|
|
|
cpuworker_reply_t rpl;
|
2013-10-02 21:11:34 +02:00
|
|
|
or_circuit_t *circ = NULL;
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2015-02-05 18:17:08 +01:00
|
|
|
tor_assert(total_pending_tasks > 0);
|
2013-10-02 18:32:09 +02:00
|
|
|
--total_pending_tasks;
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2013-10-02 18:34:08 +02:00
|
|
|
/* Could avoid this, but doesn't matter. */
|
|
|
|
memcpy(&rpl, &job->u.reply, sizeof(rpl));
|
|
|
|
|
|
|
|
tor_assert(rpl.magic == CPUWORKER_REPLY_MAGIC);
|
|
|
|
|
|
|
|
if (rpl.timed && rpl.success &&
|
|
|
|
rpl.handshake_type <= MAX_ONION_HANDSHAKE_TYPE) {
|
|
|
|
/* Time how long this request took. The handshake_type check should be
|
|
|
|
needless, but let's leave it in to be safe. */
|
|
|
|
struct timeval tv_end, tv_diff;
|
|
|
|
int64_t usec_roundtrip;
|
|
|
|
tor_gettimeofday(&tv_end);
|
|
|
|
timersub(&tv_end, &rpl.started_at, &tv_diff);
|
|
|
|
usec_roundtrip = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec;
|
|
|
|
if (usec_roundtrip >= 0 &&
|
|
|
|
usec_roundtrip < MAX_BELIEVABLE_ONIONSKIN_DELAY) {
|
|
|
|
++onionskins_n_processed[rpl.handshake_type];
|
|
|
|
onionskins_usec_internal[rpl.handshake_type] += rpl.n_usec;
|
|
|
|
onionskins_usec_roundtrip[rpl.handshake_type] += usec_roundtrip;
|
|
|
|
if (onionskins_n_processed[rpl.handshake_type] >= 500000) {
|
|
|
|
/* Scale down every 500000 handshakes. On a busy server, that's
|
|
|
|
* less impressive than it sounds. */
|
|
|
|
onionskins_n_processed[rpl.handshake_type] /= 2;
|
|
|
|
onionskins_usec_internal[rpl.handshake_type] /= 2;
|
|
|
|
onionskins_usec_roundtrip[rpl.handshake_type] /= 2;
|
2012-12-27 00:08:01 +01:00
|
|
|
}
|
|
|
|
}
|
2013-10-02 18:34:08 +02:00
|
|
|
}
|
2013-10-02 18:32:09 +02:00
|
|
|
|
2013-10-02 21:11:34 +02:00
|
|
|
circ = job->circ;
|
2008-08-15 15:55:01 +02:00
|
|
|
|
2013-10-02 18:34:08 +02:00
|
|
|
log_debug(LD_OR,
|
2013-10-02 21:11:34 +02:00
|
|
|
"Unpacking cpuworker reply %p, circ=%p, success=%d",
|
|
|
|
job, circ, rpl.success);
|
|
|
|
|
2015-01-21 20:29:03 +01:00
|
|
|
if (circ->base_.magic == DEAD_CIRCUIT_MAGIC) {
|
2013-10-02 21:11:34 +02:00
|
|
|
/* The circuit was supposed to get freed while the reply was
|
|
|
|
* pending. Instead, it got left for us to free so that we wouldn't freak
|
|
|
|
* out when the job->circ field wound up pointing to nothing. */
|
|
|
|
log_debug(LD_OR, "Circuit died while reply was pending. Freeing memory.");
|
|
|
|
circ->base_.magic = 0;
|
|
|
|
tor_free(circ);
|
|
|
|
goto done_processing;
|
|
|
|
}
|
|
|
|
|
|
|
|
circ->workqueue_entry = NULL;
|
2003-09-14 04:58:50 +02:00
|
|
|
|
2015-01-26 16:19:07 +01:00
|
|
|
if (TO_CIRCUIT(circ)->marked_for_close) {
|
|
|
|
/* We already marked this circuit; we can't call it open. */
|
|
|
|
log_debug(LD_OR,"circuit is already marked.");
|
|
|
|
goto done_processing;
|
|
|
|
}
|
|
|
|
|
2013-10-02 18:34:08 +02:00
|
|
|
if (rpl.success == 0) {
|
2013-10-02 18:32:09 +02:00
|
|
|
log_debug(LD_OR,
|
2013-10-02 18:34:08 +02:00
|
|
|
"decoding onionskin failed. "
|
|
|
|
"(Old key or bad software.) Closing.");
|
2015-02-16 21:40:15 +01:00
|
|
|
circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_TORPROTOCOL);
|
2013-10-02 18:34:08 +02:00
|
|
|
goto done_processing;
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
2013-10-02 21:11:34 +02:00
|
|
|
|
|
|
|
if (onionskin_answer(circ,
|
2013-10-02 18:34:08 +02:00
|
|
|
&rpl.created_cell,
|
|
|
|
(const char*)rpl.keys,
|
|
|
|
rpl.rend_auth_material) < 0) {
|
|
|
|
log_warn(LD_OR,"onionskin_answer failed. Closing.");
|
2013-10-02 21:11:34 +02:00
|
|
|
circuit_mark_for_close(TO_CIRCUIT(circ), END_CIRC_REASON_INTERNAL);
|
2013-10-02 18:34:08 +02:00
|
|
|
goto done_processing;
|
|
|
|
}
|
|
|
|
log_debug(LD_OR,"onionskin_answer succeeded. Yay.");
|
|
|
|
|
2010-08-16 00:29:27 +02:00
|
|
|
done_processing:
|
2013-10-02 18:32:09 +02:00
|
|
|
memwipe(&rpl, 0, sizeof(rpl));
|
|
|
|
memwipe(job, 0, sizeof(*job));
|
|
|
|
tor_free(job);
|
|
|
|
queue_pending_tasks();
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
/** Implementation function for onion handshake requests. */
|
2015-08-20 16:48:13 +02:00
|
|
|
static workqueue_reply_t
|
2013-10-02 18:32:09 +02:00
|
|
|
cpuworker_onion_handshake_threadfn(void *state_, void *work_)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2013-10-02 18:32:09 +02:00
|
|
|
worker_state_t *state = state_;
|
|
|
|
cpuworker_job_t *job = work_;
|
2003-08-21 01:05:22 +02:00
|
|
|
|
|
|
|
/* variables for onion processing */
|
2013-10-02 18:32:09 +02:00
|
|
|
server_onion_keys_t *onion_keys = state->onion_keys;
|
2012-12-06 04:34:49 +01:00
|
|
|
cpuworker_request_t req;
|
|
|
|
cpuworker_reply_t rpl;
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
memcpy(&req, &job->u.request, sizeof(req));
|
2012-12-06 04:34:49 +01:00
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
tor_assert(req.magic == CPUWORKER_REQUEST_MAGIC);
|
|
|
|
memset(&rpl, 0, sizeof(rpl));
|
2012-12-06 04:34:49 +01:00
|
|
|
|
2013-10-02 18:34:08 +02:00
|
|
|
const create_cell_t *cc = &req.create_cell;
|
|
|
|
created_cell_t *cell_out = &rpl.created_cell;
|
|
|
|
struct timeval tv_start = {0,0}, tv_end;
|
|
|
|
int n;
|
|
|
|
rpl.timed = req.timed;
|
|
|
|
rpl.started_at = req.started_at;
|
|
|
|
rpl.handshake_type = cc->handshake_type;
|
|
|
|
if (req.timed)
|
|
|
|
tor_gettimeofday(&tv_start);
|
|
|
|
n = onion_skin_server_handshake(cc->handshake_type,
|
|
|
|
cc->onionskin, cc->handshake_len,
|
|
|
|
onion_keys,
|
|
|
|
cell_out->reply,
|
|
|
|
rpl.keys, CPATH_KEY_MATERIAL_LEN,
|
|
|
|
rpl.rend_auth_material);
|
|
|
|
if (n < 0) {
|
|
|
|
/* failure */
|
|
|
|
log_debug(LD_OR,"onion_skin_server_handshake failed.");
|
|
|
|
memset(&rpl, 0, sizeof(rpl));
|
|
|
|
rpl.success = 0;
|
|
|
|
} else {
|
|
|
|
/* success */
|
|
|
|
log_debug(LD_OR,"onion_skin_server_handshake succeeded.");
|
|
|
|
cell_out->handshake_len = n;
|
|
|
|
switch (cc->cell_type) {
|
|
|
|
case CELL_CREATE:
|
|
|
|
cell_out->cell_type = CELL_CREATED; break;
|
|
|
|
case CELL_CREATE2:
|
|
|
|
cell_out->cell_type = CELL_CREATED2; break;
|
|
|
|
case CELL_CREATE_FAST:
|
|
|
|
cell_out->cell_type = CELL_CREATED_FAST; break;
|
|
|
|
default:
|
|
|
|
tor_assert(0);
|
|
|
|
return WQ_RPL_SHUTDOWN;
|
|
|
|
}
|
|
|
|
rpl.success = 1;
|
|
|
|
}
|
|
|
|
rpl.magic = CPUWORKER_REPLY_MAGIC;
|
|
|
|
if (req.timed) {
|
|
|
|
struct timeval tv_diff;
|
|
|
|
int64_t usec;
|
|
|
|
tor_gettimeofday(&tv_end);
|
|
|
|
timersub(&tv_end, &tv_start, &tv_diff);
|
|
|
|
usec = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec;
|
|
|
|
if (usec < 0 || usec > MAX_BELIEVABLE_ONIONSKIN_DELAY)
|
|
|
|
rpl.n_usec = MAX_BELIEVABLE_ONIONSKIN_DELAY;
|
|
|
|
else
|
|
|
|
rpl.n_usec = (uint32_t) usec;
|
2014-01-17 18:04:53 +01:00
|
|
|
}
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
memcpy(&job->u.reply, &rpl, sizeof(rpl));
|
2013-02-08 22:28:05 +01:00
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
memwipe(&req, 0, sizeof(req));
|
|
|
|
memwipe(&rpl, 0, sizeof(req));
|
|
|
|
return WQ_RPL_REPLY;
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
/** Take pending tasks from the queue and assign them to cpuworkers. */
|
2005-06-11 20:52:12 +02:00
|
|
|
static void
|
2013-10-02 18:32:09 +02:00
|
|
|
queue_pending_tasks(void)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2006-07-23 09:37:35 +02:00
|
|
|
or_circuit_t *circ;
|
2012-12-06 04:34:49 +01:00
|
|
|
create_cell_t *onionskin = NULL;
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
while (total_pending_tasks < max_pending_tasks) {
|
|
|
|
circ = onion_next_task(&onionskin);
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
if (!circ)
|
|
|
|
return;
|
2005-04-07 22:02:00 +02:00
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
if (assign_onionskin_to_cpuworker(circ, onionskin))
|
|
|
|
log_warn(LD_OR,"assign_to_cpuworker failed. Ignoring.");
|
|
|
|
}
|
2005-04-07 22:02:00 +02:00
|
|
|
}
|
|
|
|
|
2008-02-06 17:58:05 +01:00
|
|
|
/** Try to tell a cpuworker to perform the public key operations necessary to
|
|
|
|
* respond to <b>onionskin</b> for the circuit <b>circ</b>.
|
|
|
|
*
|
2013-10-02 18:32:09 +02:00
|
|
|
* Return 0 if we successfully assign the task, or -1 on failure.
|
2003-08-21 01:05:22 +02:00
|
|
|
*/
|
2005-06-11 20:52:12 +02:00
|
|
|
int
|
2013-10-02 18:32:09 +02:00
|
|
|
assign_onionskin_to_cpuworker(or_circuit_t *circ,
|
2012-12-06 04:34:49 +01:00
|
|
|
create_cell_t *onionskin)
|
2005-06-11 20:52:12 +02:00
|
|
|
{
|
2013-10-02 18:32:09 +02:00
|
|
|
workqueue_entry_t *queue_entry;
|
|
|
|
cpuworker_job_t *job;
|
2012-12-06 04:34:49 +01:00
|
|
|
cpuworker_request_t req;
|
2012-12-27 00:08:01 +01:00
|
|
|
int should_time;
|
2003-09-14 04:58:50 +02:00
|
|
|
|
2015-03-12 16:15:50 +01:00
|
|
|
tor_assert(threadpool);
|
|
|
|
|
2013-10-02 18:34:08 +02:00
|
|
|
if (!circ->p_chan) {
|
|
|
|
log_info(LD_OR,"circ->p_chan gone. Failing circ.");
|
|
|
|
tor_free(onionskin);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (total_pending_tasks >= max_pending_tasks) {
|
|
|
|
log_debug(LD_OR,"No idle cpuworkers. Queuing.");
|
|
|
|
if (onion_pending_add(circ, onionskin) < 0) {
|
2013-10-02 18:32:09 +02:00
|
|
|
tor_free(onionskin);
|
|
|
|
return -1;
|
|
|
|
}
|
2013-10-02 18:34:08 +02:00
|
|
|
return 0;
|
|
|
|
}
|
2013-10-02 18:32:09 +02:00
|
|
|
|
2013-10-02 18:34:08 +02:00
|
|
|
if (connection_or_digest_is_known_relay(circ->p_chan->identity_digest))
|
|
|
|
rep_hist_note_circuit_handshake_assigned(onionskin->handshake_type);
|
2003-08-21 01:05:22 +02:00
|
|
|
|
2013-10-02 18:34:08 +02:00
|
|
|
should_time = should_time_request(onionskin->handshake_type);
|
|
|
|
memset(&req, 0, sizeof(req));
|
|
|
|
req.magic = CPUWORKER_REQUEST_MAGIC;
|
|
|
|
req.timed = should_time;
|
2013-09-04 23:43:15 +02:00
|
|
|
|
2013-10-02 18:34:08 +02:00
|
|
|
memcpy(&req.create_cell, onionskin, sizeof(create_cell_t));
|
2003-09-14 04:58:50 +02:00
|
|
|
|
2013-10-02 18:34:08 +02:00
|
|
|
tor_free(onionskin);
|
2012-12-06 04:34:49 +01:00
|
|
|
|
2013-10-02 18:34:08 +02:00
|
|
|
if (should_time)
|
|
|
|
tor_gettimeofday(&req.started_at);
|
2012-12-06 04:34:49 +01:00
|
|
|
|
2013-10-02 18:34:08 +02:00
|
|
|
job = tor_malloc_zero(sizeof(cpuworker_job_t));
|
2013-10-02 21:11:34 +02:00
|
|
|
job->circ = circ;
|
2013-10-02 18:34:08 +02:00
|
|
|
memcpy(&job->u.request, &req, sizeof(req));
|
|
|
|
memwipe(&req, 0, sizeof(req));
|
2013-10-02 18:32:09 +02:00
|
|
|
|
2013-10-02 18:34:08 +02:00
|
|
|
++total_pending_tasks;
|
|
|
|
queue_entry = threadpool_queue_work(threadpool,
|
|
|
|
cpuworker_onion_handshake_threadfn,
|
|
|
|
cpuworker_onion_handshake_replyfn,
|
|
|
|
job);
|
|
|
|
if (!queue_entry) {
|
|
|
|
log_warn(LD_BUG, "Couldn't queue work on threadpool");
|
|
|
|
tor_free(job);
|
|
|
|
return -1;
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
2013-10-02 21:11:34 +02:00
|
|
|
|
|
|
|
log_debug(LD_OR, "Queued task %p (qe=%p, circ=%p)",
|
|
|
|
job, queue_entry, job->circ);
|
2013-10-02 18:34:08 +02:00
|
|
|
|
|
|
|
circ->workqueue_entry = queue_entry;
|
|
|
|
|
2003-11-16 18:00:02 +01:00
|
|
|
return 0;
|
2003-08-21 01:05:22 +02:00
|
|
|
}
|
2005-06-09 21:03:31 +02:00
|
|
|
|
2013-10-02 18:32:09 +02:00
|
|
|
/** If <b>circ</b> has a pending handshake that hasn't been processed yet,
|
|
|
|
* remove it from the worker queue. */
|
|
|
|
void
|
|
|
|
cpuworker_cancel_circ_handshake(or_circuit_t *circ)
|
|
|
|
{
|
|
|
|
cpuworker_job_t *job;
|
|
|
|
if (circ->workqueue_entry == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
job = workqueue_entry_cancel(circ->workqueue_entry);
|
|
|
|
if (job) {
|
|
|
|
/* It successfully cancelled. */
|
|
|
|
memwipe(job, 0xe0, sizeof(*job));
|
|
|
|
tor_free(job);
|
2015-02-05 18:17:08 +01:00
|
|
|
tor_assert(total_pending_tasks > 0);
|
|
|
|
--total_pending_tasks;
|
2015-02-09 16:11:45 +01:00
|
|
|
/* if (!job), this is done in cpuworker_onion_handshake_replyfn. */
|
2015-02-09 16:04:51 +01:00
|
|
|
circ->workqueue_entry = NULL;
|
2013-10-02 18:32:09 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|