2015-01-14 19:29:58 +01:00
|
|
|
/* Copyright (c) 2013-2015, The Tor Project, Inc. */
|
2013-09-23 07:19:16 +02:00
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
|
|
|
|
#include "orconfig.h"
|
|
|
|
#include "compat.h"
|
|
|
|
#include "compat_threads.h"
|
|
|
|
#include "util.h"
|
|
|
|
#include "workqueue.h"
|
|
|
|
#include "tor_queue.h"
|
|
|
|
#include "torlog.h"
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
struct threadpool_s {
|
|
|
|
/** An array of pointers to workerthread_t: one for each running worker
|
|
|
|
* thread. */
|
|
|
|
struct workerthread_s **threads;
|
2015-01-14 19:29:58 +01:00
|
|
|
|
|
|
|
/** Condition variable that we wait on when we have no work, and which
|
|
|
|
* gets signaled when our queue becomes nonempty. */
|
|
|
|
tor_cond_t condition;
|
|
|
|
/** Queue of pending work that we have to do. */
|
|
|
|
TOR_TAILQ_HEAD(, workqueue_entry_s) work;
|
|
|
|
|
|
|
|
/** The current 'update generation' of the threadpool. Any thread that is
|
|
|
|
* at an earlier generation needs to run the update function. */
|
|
|
|
unsigned generation;
|
|
|
|
|
|
|
|
/** Function that should be run for updates on each thread. */
|
|
|
|
int (*update_fn)(void *, void *);
|
|
|
|
/** Function to free update arguments if they can't be run. */
|
|
|
|
void (*free_update_arg_fn)(void *);
|
|
|
|
/** Array of n_threads update arguments. */
|
|
|
|
void **update_args;
|
2013-09-23 07:19:16 +02:00
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Number of elements in threads. */
|
|
|
|
int n_threads;
|
|
|
|
/** Mutex to protect all the above fields. */
|
|
|
|
tor_mutex_t lock;
|
2013-09-23 07:19:16 +02:00
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/** A reply queue to use when constructing new threads. */
|
|
|
|
replyqueue_t *reply_queue;
|
2013-09-23 07:19:16 +02:00
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Functions used to allocate and free thread state. */
|
|
|
|
void *(*new_thread_state_fn)(void*);
|
|
|
|
void (*free_thread_state_fn)(void*);
|
|
|
|
void *new_thread_state_arg;
|
|
|
|
};
|
2013-09-23 07:19:16 +02:00
|
|
|
|
2013-09-24 22:57:40 +02:00
|
|
|
struct workqueue_entry_s {
|
2013-09-25 17:05:27 +02:00
|
|
|
/** The next workqueue_entry_t that's pending on the same thread or
|
|
|
|
* reply queue. */
|
2013-09-24 22:57:40 +02:00
|
|
|
TOR_TAILQ_ENTRY(workqueue_entry_s) next_work;
|
2015-01-14 19:29:58 +01:00
|
|
|
/** The threadpool to which this workqueue_entry_t was assigned. This field
|
2013-09-25 17:05:27 +02:00
|
|
|
* is set when the workqueue_entry_t is created, and won't be cleared until
|
|
|
|
* after it's handled in the main thread. */
|
2015-01-14 19:29:58 +01:00
|
|
|
struct threadpool_s *on_pool;
|
2013-09-25 17:05:27 +02:00
|
|
|
/** True iff this entry is waiting for a worker to start processing it. */
|
2013-09-24 22:57:40 +02:00
|
|
|
uint8_t pending;
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Function to run in the worker thread. */
|
2013-09-24 22:57:40 +02:00
|
|
|
int (*fn)(void *state, void *arg);
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Function to run while processing the reply queue. */
|
2013-09-23 07:19:16 +02:00
|
|
|
void (*reply_fn)(void *arg);
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Argument for the above functions. */
|
2013-09-23 07:19:16 +02:00
|
|
|
void *arg;
|
2013-09-24 22:57:40 +02:00
|
|
|
};
|
2013-09-23 07:19:16 +02:00
|
|
|
|
|
|
|
struct replyqueue_s {
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Mutex to protect the answers field */
|
2013-09-23 07:19:16 +02:00
|
|
|
tor_mutex_t lock;
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Doubly-linked list of answers that the reply queue needs to handle. */
|
2013-09-24 22:57:40 +02:00
|
|
|
TOR_TAILQ_HEAD(, workqueue_entry_s) answers;
|
2013-09-23 07:19:16 +02:00
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Mechanism to wake up the main thread when it is receiving answers. */
|
|
|
|
alert_sockets_t alert;
|
2013-09-23 07:19:16 +02:00
|
|
|
};
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/** A worker thread represents a single thread in a thread pool. To avoid
|
|
|
|
* contention, each gets its own queue. This breaks the guarantee that that
|
|
|
|
* queued work will get executed strictly in order. */
|
2013-09-23 07:19:16 +02:00
|
|
|
typedef struct workerthread_s {
|
2015-01-14 19:29:58 +01:00
|
|
|
/** Which thread it this? In range 0..in_pool->n_threads-1 */
|
|
|
|
int index;
|
|
|
|
/** The pool this thread is a part of. */
|
|
|
|
struct threadpool_s *in_pool;
|
2013-12-16 16:20:40 +01:00
|
|
|
/** True iff this thread is currently in its loop. (Not currently used.) */
|
2013-09-23 07:19:16 +02:00
|
|
|
unsigned is_running;
|
2013-12-16 16:20:40 +01:00
|
|
|
/** True iff this thread has crashed or is shut down for some reason. (Not
|
|
|
|
* currently used.) */
|
2013-09-23 07:19:16 +02:00
|
|
|
unsigned is_shut_down;
|
2013-09-25 17:05:27 +02:00
|
|
|
/** True if we're waiting for more elements to get added to the queue. */
|
2013-09-23 07:19:16 +02:00
|
|
|
unsigned waiting;
|
2013-09-25 17:05:27 +02:00
|
|
|
/** User-supplied state field that we pass to the worker functions of each
|
|
|
|
* work item. */
|
2013-09-23 07:19:16 +02:00
|
|
|
void *state;
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Reply queue to which we pass our results. */
|
2013-09-23 07:19:16 +02:00
|
|
|
replyqueue_t *reply_queue;
|
2015-01-14 19:29:58 +01:00
|
|
|
/** The current update generation of this thread */
|
|
|
|
unsigned generation;
|
2013-09-23 07:19:16 +02:00
|
|
|
} workerthread_t;
|
|
|
|
|
|
|
|
static void queue_reply(replyqueue_t *queue, workqueue_entry_t *work);
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Allocate and return a new workqueue_entry_t, set up to run the function
|
|
|
|
* <b>fn</b> in the worker thread, and <b>reply_fn</b> in the main
|
|
|
|
* thread. See threadpool_queue_work() for full documentation. */
|
2013-09-23 07:19:16 +02:00
|
|
|
static workqueue_entry_t *
|
2013-09-24 22:57:40 +02:00
|
|
|
workqueue_entry_new(int (*fn)(void*, void*),
|
2013-09-23 07:19:16 +02:00
|
|
|
void (*reply_fn)(void*),
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
workqueue_entry_t *ent = tor_malloc_zero(sizeof(workqueue_entry_t));
|
|
|
|
ent->fn = fn;
|
|
|
|
ent->reply_fn = reply_fn;
|
|
|
|
ent->arg = arg;
|
|
|
|
return ent;
|
|
|
|
}
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/**
|
|
|
|
* Release all storage held in <b>ent</b>. Call only when <b>ent</b> is not on
|
|
|
|
* any queue.
|
|
|
|
*/
|
2013-09-23 07:19:16 +02:00
|
|
|
static void
|
|
|
|
workqueue_entry_free(workqueue_entry_t *ent)
|
|
|
|
{
|
|
|
|
if (!ent)
|
|
|
|
return;
|
2013-10-02 18:32:09 +02:00
|
|
|
memset(ent, 0xf0, sizeof(*ent));
|
2013-09-23 07:19:16 +02:00
|
|
|
tor_free(ent);
|
|
|
|
}
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/**
|
|
|
|
* Cancel a workqueue_entry_t that has been returned from
|
|
|
|
* threadpool_queue_work.
|
|
|
|
*
|
|
|
|
* You must not call this function on any work whose reply function has been
|
|
|
|
* executed in the main thread; that will cause undefined behavior (probably,
|
|
|
|
* a crash).
|
|
|
|
*
|
2013-09-28 06:33:10 +02:00
|
|
|
* If the work is cancelled, this function return the argument passed to the
|
|
|
|
* work function. It is the caller's responsibility to free this storage.
|
2013-09-25 17:05:27 +02:00
|
|
|
*
|
|
|
|
* This function will have no effect if the worker thread has already executed
|
2013-09-28 06:33:10 +02:00
|
|
|
* or begun to execute the work item. In that case, it will return NULL.
|
2013-09-25 17:05:27 +02:00
|
|
|
*/
|
2013-09-28 06:33:10 +02:00
|
|
|
void *
|
2013-09-24 22:57:40 +02:00
|
|
|
workqueue_entry_cancel(workqueue_entry_t *ent)
|
|
|
|
{
|
|
|
|
int cancelled = 0;
|
2013-09-28 06:33:10 +02:00
|
|
|
void *result = NULL;
|
2015-01-14 19:29:58 +01:00
|
|
|
tor_mutex_acquire(&ent->on_pool->lock);
|
2013-09-24 22:57:40 +02:00
|
|
|
if (ent->pending) {
|
2015-01-14 19:29:58 +01:00
|
|
|
TOR_TAILQ_REMOVE(&ent->on_pool->work, ent, next_work);
|
2013-09-24 22:57:40 +02:00
|
|
|
cancelled = 1;
|
2013-09-28 06:33:10 +02:00
|
|
|
result = ent->arg;
|
2013-09-24 22:57:40 +02:00
|
|
|
}
|
2015-01-14 19:29:58 +01:00
|
|
|
tor_mutex_release(&ent->on_pool->lock);
|
2013-09-24 22:57:40 +02:00
|
|
|
|
|
|
|
if (cancelled) {
|
|
|
|
tor_free(ent);
|
|
|
|
}
|
2013-09-28 06:33:10 +02:00
|
|
|
return result;
|
2013-09-24 22:57:40 +02:00
|
|
|
}
|
|
|
|
|
2015-01-14 19:29:58 +01:00
|
|
|
/**DOCDOC
|
|
|
|
|
|
|
|
must hold lock */
|
|
|
|
static int
|
|
|
|
worker_thread_has_work(workerthread_t *thread)
|
|
|
|
{
|
|
|
|
return !TOR_TAILQ_EMPTY(&thread->in_pool->work) ||
|
|
|
|
thread->generation != thread->in_pool->generation;
|
|
|
|
}
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/**
|
|
|
|
* Main function for the worker thread.
|
|
|
|
*/
|
2013-09-23 07:19:16 +02:00
|
|
|
static void
|
|
|
|
worker_thread_main(void *thread_)
|
|
|
|
{
|
|
|
|
workerthread_t *thread = thread_;
|
2015-01-14 19:29:58 +01:00
|
|
|
threadpool_t *pool = thread->in_pool;
|
2013-09-23 07:19:16 +02:00
|
|
|
workqueue_entry_t *work;
|
|
|
|
int result;
|
|
|
|
|
|
|
|
thread->is_running = 1;
|
2015-01-14 19:29:58 +01:00
|
|
|
|
|
|
|
tor_mutex_acquire(&pool->lock);
|
2013-09-23 07:19:16 +02:00
|
|
|
while (1) {
|
2013-09-25 17:05:27 +02:00
|
|
|
/* lock must be held at this point. */
|
2015-01-14 19:29:58 +01:00
|
|
|
while (worker_thread_has_work(thread)) {
|
2013-09-25 17:05:27 +02:00
|
|
|
/* lock must be held at this point. */
|
2015-01-14 19:29:58 +01:00
|
|
|
if (thread->in_pool->generation != thread->generation) {
|
|
|
|
void *arg = thread->in_pool->update_args[thread->index];
|
|
|
|
thread->in_pool->update_args[thread->index] = NULL;
|
|
|
|
int (*update_fn)(void*,void*) = thread->in_pool->update_fn;
|
|
|
|
thread->generation = thread->in_pool->generation;
|
|
|
|
tor_mutex_release(&pool->lock);
|
|
|
|
|
|
|
|
int r = update_fn(thread->state, arg);
|
|
|
|
|
|
|
|
if (r < 0) {
|
|
|
|
thread->is_running = 0;
|
|
|
|
thread->is_shut_down = 1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
tor_mutex_acquire(&pool->lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
work = TOR_TAILQ_FIRST(&pool->work);
|
|
|
|
TOR_TAILQ_REMOVE(&pool->work, work, next_work);
|
2013-09-24 22:57:40 +02:00
|
|
|
work->pending = 0;
|
2015-01-14 19:29:58 +01:00
|
|
|
tor_mutex_release(&pool->lock);
|
2013-09-23 07:19:16 +02:00
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/* We run the work function without holding the thread lock. This
|
|
|
|
* is the main thread's first opportunity to give us more work. */
|
2013-09-24 22:57:40 +02:00
|
|
|
result = work->fn(thread->state, work->arg);
|
2013-09-23 07:19:16 +02:00
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/* Queue the reply for the main thread. */
|
2013-09-24 22:57:40 +02:00
|
|
|
queue_reply(thread->reply_queue, work);
|
2013-09-23 07:19:16 +02:00
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/* We may need to exit the thread. */
|
2013-09-23 07:19:16 +02:00
|
|
|
if (result >= WQ_RPL_ERROR) {
|
|
|
|
thread->is_running = 0;
|
|
|
|
thread->is_shut_down = 1;
|
|
|
|
return;
|
|
|
|
}
|
2015-01-14 19:29:58 +01:00
|
|
|
tor_mutex_acquire(&pool->lock);
|
2013-09-23 07:19:16 +02:00
|
|
|
}
|
2013-09-25 17:05:27 +02:00
|
|
|
/* At this point the lock is held, and there is no work in this thread's
|
|
|
|
* queue. */
|
2013-09-23 07:19:16 +02:00
|
|
|
|
|
|
|
/* TODO: support an idle-function */
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/* Okay. Now, wait till somebody has work for us. */
|
2013-12-16 16:20:40 +01:00
|
|
|
/* XXXX we could just omit waiting and instead */
|
2013-09-23 07:19:16 +02:00
|
|
|
thread->waiting = 1;
|
2015-01-14 19:29:58 +01:00
|
|
|
if (tor_cond_wait(&pool->condition, &pool->lock, NULL) < 0) {
|
2013-09-25 17:05:27 +02:00
|
|
|
/* XXXX ERROR */
|
|
|
|
}
|
2013-09-23 07:19:16 +02:00
|
|
|
thread->waiting = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Put a reply on the reply queue. The reply must not currently be on
|
|
|
|
* any thread's work queue. */
|
2013-09-23 07:19:16 +02:00
|
|
|
static void
|
|
|
|
queue_reply(replyqueue_t *queue, workqueue_entry_t *work)
|
|
|
|
{
|
|
|
|
int was_empty;
|
|
|
|
tor_mutex_acquire(&queue->lock);
|
2013-09-24 22:57:40 +02:00
|
|
|
was_empty = TOR_TAILQ_EMPTY(&queue->answers);
|
|
|
|
TOR_TAILQ_INSERT_TAIL(&queue->answers, work, next_work);
|
2013-09-23 07:19:16 +02:00
|
|
|
tor_mutex_release(&queue->lock);
|
|
|
|
|
|
|
|
if (was_empty) {
|
2013-09-25 02:43:48 +02:00
|
|
|
if (queue->alert.alert_fn(queue->alert.write_fd) < 0) {
|
|
|
|
/* XXXX complain! */
|
|
|
|
}
|
2013-09-23 07:19:16 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Allocate and start a new worker thread to use state object <b>state</b>,
|
|
|
|
* and send responses to <b>replyqueue</b>. */
|
2013-09-23 07:19:16 +02:00
|
|
|
static workerthread_t *
|
2015-01-14 19:29:58 +01:00
|
|
|
workerthread_new(void *state, threadpool_t *pool, replyqueue_t *replyqueue)
|
2013-09-23 07:19:16 +02:00
|
|
|
{
|
|
|
|
workerthread_t *thr = tor_malloc_zero(sizeof(workerthread_t));
|
|
|
|
thr->state = state;
|
|
|
|
thr->reply_queue = replyqueue;
|
2015-01-14 19:29:58 +01:00
|
|
|
thr->in_pool = pool;
|
2013-09-23 07:19:16 +02:00
|
|
|
|
|
|
|
if (spawn_func(worker_thread_main, thr) < 0) {
|
|
|
|
log_err(LD_GENERAL, "Can't launch worker thread.");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return thr;
|
|
|
|
}
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/**
|
|
|
|
* Queue an item of work for a thread in a thread pool. The function
|
|
|
|
* <b>fn</b> will be run in a worker thread, and will receive as arguments the
|
|
|
|
* thread's state object, and the provided object <b>arg</b>. It must return
|
|
|
|
* one of WQ_RPL_REPLY, WQ_RPL_ERROR, or WQ_RPL_SHUTDOWN.
|
|
|
|
*
|
|
|
|
* Regardless of its return value, the function <b>reply_fn</b> will later be
|
|
|
|
* run in the main thread when it invokes replyqueue_process(), and will
|
|
|
|
* receive as its argument the same <b>arg</b> object. It's the reply
|
|
|
|
* function's responsibility to free the work object.
|
|
|
|
*
|
|
|
|
* On success, return a workqueue_entry_t object that can be passed to
|
|
|
|
* workqueue_entry_cancel(). On failure, return NULL.
|
|
|
|
*
|
|
|
|
* Note that because each thread has its own work queue, work items may not
|
|
|
|
* be executed strictly in order.
|
|
|
|
*/
|
2013-09-24 22:57:40 +02:00
|
|
|
workqueue_entry_t *
|
2013-09-23 07:19:16 +02:00
|
|
|
threadpool_queue_work(threadpool_t *pool,
|
2013-09-24 22:57:40 +02:00
|
|
|
int (*fn)(void *, void *),
|
2013-09-23 07:19:16 +02:00
|
|
|
void (*reply_fn)(void *),
|
|
|
|
void *arg)
|
|
|
|
{
|
2015-01-14 19:29:58 +01:00
|
|
|
workqueue_entry_t *ent = workqueue_entry_new(fn, reply_fn, arg);
|
|
|
|
ent->on_pool = pool;
|
|
|
|
ent->pending = 1;
|
2013-09-23 07:19:16 +02:00
|
|
|
|
|
|
|
tor_mutex_acquire(&pool->lock);
|
2015-01-14 19:29:58 +01:00
|
|
|
|
|
|
|
TOR_TAILQ_INSERT_TAIL(&pool->work, ent, next_work);
|
|
|
|
|
2013-09-23 07:19:16 +02:00
|
|
|
tor_mutex_release(&pool->lock);
|
|
|
|
|
2015-01-14 19:29:58 +01:00
|
|
|
tor_cond_signal_one(&pool->condition);
|
|
|
|
|
|
|
|
return ent;
|
2013-09-25 02:55:09 +02:00
|
|
|
}
|
2013-09-23 07:19:16 +02:00
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/**
|
|
|
|
* Queue a copy of a work item for every thread in a pool. This can be used,
|
|
|
|
* for example, to tell the threads to update some parameter in their states.
|
|
|
|
*
|
|
|
|
* Arguments are as for <b>threadpool_queue_work</b>, except that the
|
|
|
|
* <b>arg</b> value is passed to <b>dup_fn</b> once per each thread to
|
|
|
|
* make a copy of it.
|
|
|
|
*
|
2015-01-14 19:29:58 +01:00
|
|
|
* UPDATE FUNCTIONS MUST BE IDEMPOTENT. We do not guarantee that every update
|
|
|
|
* will be run. If a new update is scheduled before the old update finishes
|
|
|
|
* running, then the new will replace the old in any threads that haven't run
|
|
|
|
* it yet.
|
|
|
|
*
|
2013-09-25 17:05:27 +02:00
|
|
|
* Return 0 on success, -1 on failure.
|
|
|
|
*/
|
2013-09-25 02:55:09 +02:00
|
|
|
int
|
2015-01-14 19:29:58 +01:00
|
|
|
threadpool_queue_update(threadpool_t *pool,
|
2013-10-02 18:32:09 +02:00
|
|
|
void *(*dup_fn)(void *),
|
2013-09-25 02:55:09 +02:00
|
|
|
int (*fn)(void *, void *),
|
2015-01-14 19:29:58 +01:00
|
|
|
void (*free_fn)(void *),
|
2013-09-25 02:55:09 +02:00
|
|
|
void *arg)
|
|
|
|
{
|
2015-01-14 19:29:58 +01:00
|
|
|
int i, n_threads;
|
|
|
|
void (*old_args_free_fn)(void *arg);
|
|
|
|
void **old_args;
|
|
|
|
void **new_args;
|
|
|
|
|
|
|
|
tor_mutex_acquire(&pool->lock);
|
|
|
|
n_threads = pool->n_threads;
|
|
|
|
old_args = pool->update_args;
|
|
|
|
old_args_free_fn = pool->free_update_arg_fn;
|
|
|
|
|
|
|
|
new_args = tor_calloc(n_threads, sizeof(void*));
|
|
|
|
for (i = 0; i < n_threads; ++i) {
|
|
|
|
if (dup_fn)
|
|
|
|
new_args[i] = dup_fn(arg);
|
|
|
|
else
|
|
|
|
new_args[i] = arg;
|
|
|
|
}
|
|
|
|
|
|
|
|
pool->update_args = new_args;
|
|
|
|
pool->free_update_arg_fn = free_fn;
|
|
|
|
pool->update_fn = fn;
|
|
|
|
++pool->generation;
|
2013-09-23 07:19:16 +02:00
|
|
|
|
2015-01-14 19:29:58 +01:00
|
|
|
tor_mutex_release(&pool->lock);
|
|
|
|
|
|
|
|
tor_cond_signal_all(&pool->condition);
|
|
|
|
|
|
|
|
if (old_args) {
|
|
|
|
for (i = 0; i < n_threads; ++i) {
|
|
|
|
if (old_args[i] && old_args_free_fn)
|
|
|
|
old_args_free_fn(old_args[i]);
|
|
|
|
}
|
|
|
|
tor_free(old_args);
|
2013-09-25 02:55:09 +02:00
|
|
|
}
|
2015-01-14 19:29:58 +01:00
|
|
|
|
|
|
|
return 0;
|
2013-09-23 07:19:16 +02:00
|
|
|
}
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Launch threads until we have <b>n</b>. */
|
2013-09-25 02:55:09 +02:00
|
|
|
static int
|
2013-09-23 07:19:16 +02:00
|
|
|
threadpool_start_threads(threadpool_t *pool, int n)
|
|
|
|
{
|
|
|
|
tor_mutex_acquire(&pool->lock);
|
|
|
|
|
|
|
|
if (pool->n_threads < n)
|
|
|
|
pool->threads = tor_realloc(pool->threads, sizeof(workerthread_t*)*n);
|
|
|
|
|
|
|
|
while (pool->n_threads < n) {
|
|
|
|
void *state = pool->new_thread_state_fn(pool->new_thread_state_arg);
|
2015-01-14 19:29:58 +01:00
|
|
|
workerthread_t *thr = workerthread_new(state, pool, pool->reply_queue);
|
|
|
|
thr->index = pool->n_threads;
|
2013-09-23 07:19:16 +02:00
|
|
|
|
|
|
|
if (!thr) {
|
|
|
|
tor_mutex_release(&pool->lock);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
pool->threads[pool->n_threads++] = thr;
|
|
|
|
}
|
|
|
|
tor_mutex_release(&pool->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/**
|
|
|
|
* Construct a new thread pool with <b>n</b> worker threads, configured to
|
|
|
|
* send their output to <b>replyqueue</b>. The threads' states will be
|
|
|
|
* constructed with the <b>new_thread_state_fn</b> call, receiving <b>arg</b>
|
|
|
|
* as its argument. When the threads close, they will call
|
|
|
|
* <b>free_thread_state_fn</b> on their states.
|
|
|
|
*/
|
2013-09-23 07:19:16 +02:00
|
|
|
threadpool_t *
|
|
|
|
threadpool_new(int n_threads,
|
|
|
|
replyqueue_t *replyqueue,
|
|
|
|
void *(*new_thread_state_fn)(void*),
|
|
|
|
void (*free_thread_state_fn)(void*),
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
threadpool_t *pool;
|
|
|
|
pool = tor_malloc_zero(sizeof(threadpool_t));
|
2015-01-14 19:29:58 +01:00
|
|
|
tor_mutex_init_nonrecursive(&pool->lock);
|
|
|
|
tor_cond_init(&pool->condition);
|
|
|
|
TOR_TAILQ_INIT(&pool->work);
|
|
|
|
|
2013-09-23 07:19:16 +02:00
|
|
|
pool->new_thread_state_fn = new_thread_state_fn;
|
|
|
|
pool->new_thread_state_arg = arg;
|
|
|
|
pool->free_thread_state_fn = free_thread_state_fn;
|
|
|
|
pool->reply_queue = replyqueue;
|
|
|
|
|
|
|
|
if (threadpool_start_threads(pool, n_threads) < 0) {
|
|
|
|
tor_mutex_uninit(&pool->lock);
|
|
|
|
tor_free(pool);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return pool;
|
|
|
|
}
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Return the reply queue associated with a given thread pool. */
|
2013-09-23 07:19:16 +02:00
|
|
|
replyqueue_t *
|
|
|
|
threadpool_get_replyqueue(threadpool_t *tp)
|
|
|
|
{
|
|
|
|
return tp->reply_queue;
|
|
|
|
}
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/** Allocate a new reply queue. Reply queues are used to pass results from
|
|
|
|
* worker threads to the main thread. Since the main thread is running an
|
|
|
|
* IO-centric event loop, it needs to get woken up with means other than a
|
|
|
|
* condition variable. */
|
2013-09-23 07:19:16 +02:00
|
|
|
replyqueue_t *
|
2013-09-25 20:31:59 +02:00
|
|
|
replyqueue_new(uint32_t alertsocks_flags)
|
2013-09-23 07:19:16 +02:00
|
|
|
{
|
|
|
|
replyqueue_t *rq;
|
|
|
|
|
|
|
|
rq = tor_malloc_zero(sizeof(replyqueue_t));
|
2013-09-25 20:31:59 +02:00
|
|
|
if (alert_sockets_create(&rq->alert, alertsocks_flags) < 0) {
|
2013-09-25 02:43:48 +02:00
|
|
|
tor_free(rq);
|
|
|
|
return NULL;
|
|
|
|
}
|
2013-09-23 07:19:16 +02:00
|
|
|
|
|
|
|
tor_mutex_init(&rq->lock);
|
2013-09-24 22:57:40 +02:00
|
|
|
TOR_TAILQ_INIT(&rq->answers);
|
2013-09-23 07:19:16 +02:00
|
|
|
|
|
|
|
return rq;
|
|
|
|
}
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/**
|
|
|
|
* Return the "read socket" for a given reply queue. The main thread should
|
|
|
|
* listen for read events on this socket, and call replyqueue_process() every
|
|
|
|
* time it triggers.
|
|
|
|
*/
|
2013-09-23 07:19:16 +02:00
|
|
|
tor_socket_t
|
|
|
|
replyqueue_get_socket(replyqueue_t *rq)
|
|
|
|
{
|
2013-09-25 02:43:48 +02:00
|
|
|
return rq->alert.read_fd;
|
2013-09-23 07:19:16 +02:00
|
|
|
}
|
|
|
|
|
2013-09-25 17:05:27 +02:00
|
|
|
/**
|
|
|
|
* Process all pending replies on a reply queue. The main thread should call
|
|
|
|
* this function every time the socket returned by replyqueue_get_socket() is
|
|
|
|
* readable.
|
|
|
|
*/
|
2013-09-23 07:19:16 +02:00
|
|
|
void
|
|
|
|
replyqueue_process(replyqueue_t *queue)
|
|
|
|
{
|
2013-09-25 02:43:48 +02:00
|
|
|
if (queue->alert.drain_fn(queue->alert.read_fd) < 0) {
|
|
|
|
/* XXXX complain! */
|
|
|
|
}
|
2013-09-23 07:19:16 +02:00
|
|
|
|
|
|
|
tor_mutex_acquire(&queue->lock);
|
2013-09-24 22:57:40 +02:00
|
|
|
while (!TOR_TAILQ_EMPTY(&queue->answers)) {
|
2013-09-25 17:05:27 +02:00
|
|
|
/* lock must be held at this point.*/
|
2013-09-24 22:57:40 +02:00
|
|
|
workqueue_entry_t *work = TOR_TAILQ_FIRST(&queue->answers);
|
|
|
|
TOR_TAILQ_REMOVE(&queue->answers, work, next_work);
|
2013-09-23 07:19:16 +02:00
|
|
|
tor_mutex_release(&queue->lock);
|
2015-01-14 19:29:58 +01:00
|
|
|
work->on_pool = NULL;
|
2013-09-23 07:19:16 +02:00
|
|
|
|
|
|
|
work->reply_fn(work->arg);
|
|
|
|
workqueue_entry_free(work);
|
|
|
|
|
|
|
|
tor_mutex_acquire(&queue->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
tor_mutex_release(&queue->lock);
|
|
|
|
}
|
2013-09-24 22:57:40 +02:00
|
|
|
|