Merge branch 'bsd_queue' of ssh://git-rw.torproject.org/nickm/tor

This commit is contained in:
Andrea Shepard 2012-10-30 14:39:14 -07:00
commit be37125030
7 changed files with 754 additions and 221 deletions

29
LICENSE
View File

@ -70,6 +70,35 @@ under the following license:
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
===============================================================================
src/ext/tor_queue.h is licensed under the following license:
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
===============================================================================
src/config/geoip is licensed under the following license:

7
changes/bsd_queue Normal file
View File

@ -0,0 +1,7 @@
o Code simplification and refactoring:
- Start using OpenBSD's implementation of queue.h, so that we don't
need to hand-roll our own pointer and list structures whenever we
need them. (We can't rely on a sys/queue.h, since some operating
systems don't have them, and the ones that do have them don't all
present the same extensions.)

View File

@ -29,3 +29,11 @@ tinytest_macros.h
A unit testing framework. https://github.com/nmathewson/tinytest
tor_queue.h
A copy of sys/queue.h from OpenBSD. We keep our own copy rather
than using sys/queue.h, since some platforms don't have a
sys/queue.h, and the ones that do have diverged in incompatible
ways. (CIRCLEQ or no CIRCLEQ? SIMPLQ or STAILQ?)

View File

@ -9,7 +9,8 @@ EXTHEADERS = \
src/ext/tinytest.h \
src/ext/strlcat.c \
src/ext/strlcpy.c \
src/ext/tinytest_macros.h
src/ext/tinytest_macros.h \
src/ext/tor_queue.h
noinst_HEADERS+= $(EXTHEADERS)

568
src/ext/tor_queue.h Normal file
View File

@ -0,0 +1,568 @@
/* $OpenBSD: queue.h,v 1.36 2012/04/11 13:29:14 naddy Exp $ */
/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
*/
#ifndef _SYS_QUEUE_H_
#define _SYS_QUEUE_H_
/*
* This file defines five types of data structures: singly-linked lists,
* lists, simple queues, tail queues, and circular queues.
*
*
* A singly-linked list is headed by a single forward pointer. The elements
* are singly linked for minimum space and pointer manipulation overhead at
* the expense of O(n) removal for arbitrary elements. New elements can be
* added to the list after an existing element or at the head of the list.
* Elements being removed from the head of the list should use the explicit
* macro for this purpose for optimum efficiency. A singly-linked list may
* only be traversed in the forward direction. Singly-linked lists are ideal
* for applications with large datasets and few or no removals or for
* implementing a LIFO queue.
*
* A list is headed by a single forward pointer (or an array of forward
* pointers for a hash table header). The elements are doubly linked
* so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction.
*
* A simple queue is headed by a pair of pointers, one the head of the
* list and the other to the tail of the list. The elements are singly
* linked to save space, so elements can only be removed from the
* head of the list. New elements can be added to the list before or after
* an existing element, at the head of the list, or at the end of the
* list. A simple queue may only be traversed in the forward direction.
*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or
* after an existing element, at the head of the list, or at the end of
* the list. A tail queue may be traversed in either direction.
*
* A circle queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or after
* an existing element, at the head of the list, or at the end of the list.
* A circle queue may be traversed in either direction, but has a more
* complex end of list detection.
*
* For details on the use of these macros, see the queue(3) manual page.
*/
#if defined(QUEUE_MACRO_DEBUG) || (defined(_KERNEL) && defined(DIAGNOSTIC))
#define _Q_INVALIDATE(a) (a) = ((void *)-1)
#else
#define _Q_INVALIDATE(a)
#endif
/*
* Singly-linked List definitions.
*/
#define SLIST_HEAD(name, type) \
struct name { \
struct type *slh_first; /* first element */ \
}
#define SLIST_HEAD_INITIALIZER(head) \
{ NULL }
#define SLIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
/*
* Singly-linked List access methods.
*/
#define SLIST_FIRST(head) ((head)->slh_first)
#define SLIST_END(head) NULL
#define SLIST_EMPTY(head) (SLIST_FIRST(head) == SLIST_END(head))
#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
#define SLIST_FOREACH(var, head, field) \
for((var) = SLIST_FIRST(head); \
(var) != SLIST_END(head); \
(var) = SLIST_NEXT(var, field))
#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = SLIST_FIRST(head); \
(var) && ((tvar) = SLIST_NEXT(var, field), 1); \
(var) = (tvar))
/*
* Singly-linked List functions.
*/
#define SLIST_INIT(head) { \
SLIST_FIRST(head) = SLIST_END(head); \
}
#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
(elm)->field.sle_next = (slistelm)->field.sle_next; \
(slistelm)->field.sle_next = (elm); \
} while (0)
#define SLIST_INSERT_HEAD(head, elm, field) do { \
(elm)->field.sle_next = (head)->slh_first; \
(head)->slh_first = (elm); \
} while (0)
#define SLIST_REMOVE_AFTER(elm, field) do { \
(elm)->field.sle_next = (elm)->field.sle_next->field.sle_next; \
} while (0)
#define SLIST_REMOVE_HEAD(head, field) do { \
(head)->slh_first = (head)->slh_first->field.sle_next; \
} while (0)
#define SLIST_REMOVE(head, elm, type, field) do { \
if ((head)->slh_first == (elm)) { \
SLIST_REMOVE_HEAD((head), field); \
} else { \
struct type *curelm = (head)->slh_first; \
\
while (curelm->field.sle_next != (elm)) \
curelm = curelm->field.sle_next; \
curelm->field.sle_next = \
curelm->field.sle_next->field.sle_next; \
_Q_INVALIDATE((elm)->field.sle_next); \
} \
} while (0)
/*
* List definitions.
*/
#define LIST_HEAD(name, type) \
struct name { \
struct type *lh_first; /* first element */ \
}
#define LIST_HEAD_INITIALIZER(head) \
{ NULL }
#define LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \
}
/*
* List access methods
*/
#define LIST_FIRST(head) ((head)->lh_first)
#define LIST_END(head) NULL
#define LIST_EMPTY(head) (LIST_FIRST(head) == LIST_END(head))
#define LIST_NEXT(elm, field) ((elm)->field.le_next)
#define LIST_FOREACH(var, head, field) \
for((var) = LIST_FIRST(head); \
(var)!= LIST_END(head); \
(var) = LIST_NEXT(var, field))
#define LIST_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = LIST_FIRST(head); \
(var) && ((tvar) = LIST_NEXT(var, field), 1); \
(var) = (tvar))
/*
* List functions.
*/
#define LIST_INIT(head) do { \
LIST_FIRST(head) = LIST_END(head); \
} while (0)
#define LIST_INSERT_AFTER(listelm, elm, field) do { \
if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
(listelm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
(listelm)->field.le_next = (elm); \
(elm)->field.le_prev = &(listelm)->field.le_next; \
} while (0)
#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
(elm)->field.le_next = (listelm); \
*(listelm)->field.le_prev = (elm); \
(listelm)->field.le_prev = &(elm)->field.le_next; \
} while (0)
#define LIST_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.le_next = (head)->lh_first) != NULL) \
(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
(head)->lh_first = (elm); \
(elm)->field.le_prev = &(head)->lh_first; \
} while (0)
#define LIST_REMOVE(elm, field) do { \
if ((elm)->field.le_next != NULL) \
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = (elm)->field.le_next; \
_Q_INVALIDATE((elm)->field.le_prev); \
_Q_INVALIDATE((elm)->field.le_next); \
} while (0)
#define LIST_REPLACE(elm, elm2, field) do { \
if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
(elm2)->field.le_next->field.le_prev = \
&(elm2)->field.le_next; \
(elm2)->field.le_prev = (elm)->field.le_prev; \
*(elm2)->field.le_prev = (elm2); \
_Q_INVALIDATE((elm)->field.le_prev); \
_Q_INVALIDATE((elm)->field.le_next); \
} while (0)
/*
* Simple queue definitions.
*/
#define SIMPLEQ_HEAD(name, type) \
struct name { \
struct type *sqh_first; /* first element */ \
struct type **sqh_last; /* addr of last next element */ \
}
#define SIMPLEQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).sqh_first }
#define SIMPLEQ_ENTRY(type) \
struct { \
struct type *sqe_next; /* next element */ \
}
/*
* Simple queue access methods.
*/
#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
#define SIMPLEQ_END(head) NULL
#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
#define SIMPLEQ_FOREACH(var, head, field) \
for((var) = SIMPLEQ_FIRST(head); \
(var) != SIMPLEQ_END(head); \
(var) = SIMPLEQ_NEXT(var, field))
#define SIMPLEQ_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = SIMPLEQ_FIRST(head); \
(var) && ((tvar) = SIMPLEQ_NEXT(var, field), 1); \
(var) = (tvar))
/*
* Simple queue functions.
*/
#define SIMPLEQ_INIT(head) do { \
(head)->sqh_first = NULL; \
(head)->sqh_last = &(head)->sqh_first; \
} while (0)
#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
(head)->sqh_last = &(elm)->field.sqe_next; \
(head)->sqh_first = (elm); \
} while (0)
#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.sqe_next = NULL; \
*(head)->sqh_last = (elm); \
(head)->sqh_last = &(elm)->field.sqe_next; \
} while (0)
#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
(head)->sqh_last = &(elm)->field.sqe_next; \
(listelm)->field.sqe_next = (elm); \
} while (0)
#define SIMPLEQ_REMOVE_HEAD(head, field) do { \
if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
(head)->sqh_last = &(head)->sqh_first; \
} while (0)
#define SIMPLEQ_REMOVE_AFTER(head, elm, field) do { \
if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
== NULL) \
(head)->sqh_last = &(elm)->field.sqe_next; \
} while (0)
/*
* Tail queue definitions.
*/
#define TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; /* first element */ \
struct type **tqh_last; /* addr of last next element */ \
}
#define TAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first }
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
struct type **tqe_prev; /* address of previous next element */ \
}
/*
* tail queue access methods
*/
#define TAILQ_FIRST(head) ((head)->tqh_first)
#define TAILQ_END(head) NULL
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define TAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
/* XXX */
#define TAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
#define TAILQ_EMPTY(head) \
(TAILQ_FIRST(head) == TAILQ_END(head))
#define TAILQ_FOREACH(var, head, field) \
for((var) = TAILQ_FIRST(head); \
(var) != TAILQ_END(head); \
(var) = TAILQ_NEXT(var, field))
#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = TAILQ_FIRST(head); \
(var) != TAILQ_END(head) && \
((tvar) = TAILQ_NEXT(var, field), 1); \
(var) = (tvar))
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for((var) = TAILQ_LAST(head, headname); \
(var) != TAILQ_END(head); \
(var) = TAILQ_PREV(var, headname, field))
#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
for ((var) = TAILQ_LAST(head, headname); \
(var) != TAILQ_END(head) && \
((tvar) = TAILQ_PREV(var, headname, field), 1); \
(var) = (tvar))
/*
* Tail queue functions.
*/
#define TAILQ_INIT(head) do { \
(head)->tqh_first = NULL; \
(head)->tqh_last = &(head)->tqh_first; \
} while (0)
#define TAILQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
(head)->tqh_first->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(head)->tqh_first = (elm); \
(elm)->field.tqe_prev = &(head)->tqh_first; \
} while (0)
#define TAILQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.tqe_next = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &(elm)->field.tqe_next; \
} while (0)
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
(elm)->field.tqe_next->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(listelm)->field.tqe_next = (elm); \
(elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
} while (0)
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (0)
#define TAILQ_REMOVE(head, elm, field) do { \
if (((elm)->field.tqe_next) != NULL) \
(elm)->field.tqe_next->field.tqe_prev = \
(elm)->field.tqe_prev; \
else \
(head)->tqh_last = (elm)->field.tqe_prev; \
*(elm)->field.tqe_prev = (elm)->field.tqe_next; \
_Q_INVALIDATE((elm)->field.tqe_prev); \
_Q_INVALIDATE((elm)->field.tqe_next); \
} while (0)
#define TAILQ_REPLACE(head, elm, elm2, field) do { \
if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \
(elm2)->field.tqe_next->field.tqe_prev = \
&(elm2)->field.tqe_next; \
else \
(head)->tqh_last = &(elm2)->field.tqe_next; \
(elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
*(elm2)->field.tqe_prev = (elm2); \
_Q_INVALIDATE((elm)->field.tqe_prev); \
_Q_INVALIDATE((elm)->field.tqe_next); \
} while (0)
/*
* Circular queue definitions.
*/
#define CIRCLEQ_HEAD(name, type) \
struct name { \
struct type *cqh_first; /* first element */ \
struct type *cqh_last; /* last element */ \
}
#define CIRCLEQ_HEAD_INITIALIZER(head) \
{ CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
#define CIRCLEQ_ENTRY(type) \
struct { \
struct type *cqe_next; /* next element */ \
struct type *cqe_prev; /* previous element */ \
}
/*
* Circular queue access methods
*/
#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
#define CIRCLEQ_LAST(head) ((head)->cqh_last)
#define CIRCLEQ_END(head) ((void *)(head))
#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
#define CIRCLEQ_EMPTY(head) \
(CIRCLEQ_FIRST(head) == CIRCLEQ_END(head))
#define CIRCLEQ_FOREACH(var, head, field) \
for((var) = CIRCLEQ_FIRST(head); \
(var) != CIRCLEQ_END(head); \
(var) = CIRCLEQ_NEXT(var, field))
#define CIRCLEQ_FOREACH_SAFE(var, head, field, tvar) \
for ((var) = CIRCLEQ_FIRST(head); \
(var) != CIRCLEQ_END(head) && \
((tvar) = CIRCLEQ_NEXT(var, field), 1); \
(var) = (tvar))
#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
for((var) = CIRCLEQ_LAST(head); \
(var) != CIRCLEQ_END(head); \
(var) = CIRCLEQ_PREV(var, field))
#define CIRCLEQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
for ((var) = CIRCLEQ_LAST(head, headname); \
(var) != CIRCLEQ_END(head) && \
((tvar) = CIRCLEQ_PREV(var, headname, field), 1); \
(var) = (tvar))
/*
* Circular queue functions.
*/
#define CIRCLEQ_INIT(head) do { \
(head)->cqh_first = CIRCLEQ_END(head); \
(head)->cqh_last = CIRCLEQ_END(head); \
} while (0)
#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm)->field.cqe_next; \
(elm)->field.cqe_prev = (listelm); \
if ((listelm)->field.cqe_next == CIRCLEQ_END(head)) \
(head)->cqh_last = (elm); \
else \
(listelm)->field.cqe_next->field.cqe_prev = (elm); \
(listelm)->field.cqe_next = (elm); \
} while (0)
#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm); \
(elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
if ((listelm)->field.cqe_prev == CIRCLEQ_END(head)) \
(head)->cqh_first = (elm); \
else \
(listelm)->field.cqe_prev->field.cqe_next = (elm); \
(listelm)->field.cqe_prev = (elm); \
} while (0)
#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
(elm)->field.cqe_next = (head)->cqh_first; \
(elm)->field.cqe_prev = CIRCLEQ_END(head); \
if ((head)->cqh_last == CIRCLEQ_END(head)) \
(head)->cqh_last = (elm); \
else \
(head)->cqh_first->field.cqe_prev = (elm); \
(head)->cqh_first = (elm); \
} while (0)
#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.cqe_next = CIRCLEQ_END(head); \
(elm)->field.cqe_prev = (head)->cqh_last; \
if ((head)->cqh_first == CIRCLEQ_END(head)) \
(head)->cqh_first = (elm); \
else \
(head)->cqh_last->field.cqe_next = (elm); \
(head)->cqh_last = (elm); \
} while (0)
#define CIRCLEQ_REMOVE(head, elm, field) do { \
if ((elm)->field.cqe_next == CIRCLEQ_END(head)) \
(head)->cqh_last = (elm)->field.cqe_prev; \
else \
(elm)->field.cqe_next->field.cqe_prev = \
(elm)->field.cqe_prev; \
if ((elm)->field.cqe_prev == CIRCLEQ_END(head)) \
(head)->cqh_first = (elm)->field.cqe_next; \
else \
(elm)->field.cqe_prev->field.cqe_next = \
(elm)->field.cqe_next; \
_Q_INVALIDATE((elm)->field.cqe_prev); \
_Q_INVALIDATE((elm)->field.cqe_next); \
} while (0)
#define CIRCLEQ_REPLACE(head, elm, elm2, field) do { \
if (((elm2)->field.cqe_next = (elm)->field.cqe_next) == \
CIRCLEQ_END(head)) \
(head).cqh_last = (elm2); \
else \
(elm2)->field.cqe_next->field.cqe_prev = (elm2); \
if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) == \
CIRCLEQ_END(head)) \
(head).cqh_first = (elm2); \
else \
(elm2)->field.cqe_prev->field.cqe_next = (elm2); \
_Q_INVALIDATE((elm)->field.cqe_prev); \
_Q_INVALIDATE((elm)->field.cqe_next); \
} while (0)
#endif /* !_SYS_QUEUE_H_ */

View File

@ -33,6 +33,7 @@
typedef struct cell_queue_entry_s cell_queue_entry_t;
struct cell_queue_entry_s {
SIMPLEQ_ENTRY(cell_queue_entry_s) next;
enum {
CELL_QUEUE_FIXED,
CELL_QUEUE_VAR,
@ -82,7 +83,37 @@ static uint64_t n_channels_allocated = 0;
* If more than one channel exists, follow the next_with_same_id pointer
* as a linked list.
*/
static digestmap_t *channel_identity_map = NULL;
HT_HEAD(channel_idmap, channel_idmap_entry_s) channel_identity_map =
HT_INITIALIZER();
typedef struct channel_idmap_entry_s {
HT_ENTRY(channel_idmap_entry_s) node;
uint8_t digest[DIGEST_LEN];
LIST_HEAD(channel_list_s, channel_s) channel_list;
} channel_idmap_entry_t;
static INLINE unsigned
channel_idmap_hash(const channel_idmap_entry_t *ent)
{
const unsigned *a = (const unsigned *)ent->digest;
#if SIZEOF_INT == 4
return a[0] ^ a[1] ^ a[2] ^ a[3] ^ a[4];
#elif SIZEOF_INT == 8
return a[0] ^ a[1];
#endif
}
static INLINE int
channel_idmap_eq(const channel_idmap_entry_t *a,
const channel_idmap_entry_t *b)
{
return tor_memeq(a->digest, b->digest, DIGEST_LEN);
}
HT_PROTOTYPE(channel_idmap, channel_idmap_entry_s, node, channel_idmap_hash,
channel_idmap_eq);
HT_GENERATE(channel_idmap, channel_idmap_entry_s, node, channel_idmap_hash,
channel_idmap_eq, 0.5, tor_malloc, tor_realloc, tor_free_);
static cell_queue_entry_t * cell_queue_entry_dup(cell_queue_entry_t *q);
static void cell_queue_entry_free(cell_queue_entry_t *q, int handed_off);
@ -506,7 +537,7 @@ channel_listener_unregister(channel_listener_t *chan_l)
static void
channel_add_to_digest_map(channel_t *chan)
{
channel_t *tmp;
channel_idmap_entry_t *ent, search;
tor_assert(chan);
@ -518,23 +549,15 @@ channel_add_to_digest_map(channel_t *chan)
/* Assert that there is a digest */
tor_assert(!tor_digest_is_zero(chan->identity_digest));
/* Allocate the identity map if we have to */
if (!channel_identity_map) channel_identity_map = digestmap_new();
/* Insert it */
tmp = digestmap_set(channel_identity_map,
chan->identity_digest,
chan);
if (tmp) {
/* There already was one, this goes at the head of the list */
chan->next_with_same_id = tmp;
chan->prev_with_same_id = NULL;
tmp->prev_with_same_id = chan;
} else {
/* First with this digest */
chan->next_with_same_id = NULL;
chan->prev_with_same_id = NULL;
memcpy(search.digest, chan->identity_digest, DIGEST_LEN);
ent = HT_FIND(channel_idmap, &channel_identity_map, &search);
if (! ent) {
ent = tor_malloc(sizeof(channel_idmap_entry_t));
memcpy(ent->digest, chan->identity_digest, DIGEST_LEN);
LIST_INIT(&ent->channel_list);
HT_INSERT(channel_idmap, &channel_identity_map, ent);
}
LIST_INSERT_HEAD(&ent->channel_list, chan, next_with_same_id);
log_debug(LD_CHANNEL,
"Added channel %p (global ID " U64_FORMAT ") "
@ -554,13 +577,14 @@ channel_add_to_digest_map(channel_t *chan)
static void
channel_remove_from_digest_map(channel_t *chan)
{
channel_t *tmp;
channel_idmap_entry_t *ent, search;
tor_assert(chan);
/* Assert that there is a digest */
tor_assert(!tor_digest_is_zero(chan->identity_digest));
#if 0
/* Make sure we have a map */
if (!channel_identity_map) {
/*
@ -585,66 +609,29 @@ channel_remove_from_digest_map(channel_t *chan)
return;
}
#endif
/* Pull it out of its list, wherever that list is */
LIST_REMOVE(chan, next_with_same_id);
memcpy(search.digest, chan->identity_digest, DIGEST_LEN);
ent = HT_FIND(channel_idmap, &channel_identity_map, &search);
/* Look for it in the map */
tmp = digestmap_get(channel_identity_map, chan->identity_digest);
if (tmp) {
if (ent) {
/* Okay, it's here */
/* Look for this channel */
while (tmp && tmp != chan) {
tmp = tmp->next_with_same_id;
if (LIST_EMPTY(&ent->channel_list)) {
HT_REMOVE(channel_idmap, &channel_identity_map, ent);
tor_free(ent);
}
if (tmp == chan) {
/* Found it, good */
if (chan->next_with_same_id) {
chan->next_with_same_id->prev_with_same_id = chan->prev_with_same_id;
}
/* else we're the tail of the list */
if (chan->prev_with_same_id) {
/* We're not the head of the list, so we can *just* unlink */
chan->prev_with_same_id->next_with_same_id = chan->next_with_same_id;
} else {
/* We're the head, so we have to point the digest map entry at our
* next if we have one, or remove it if we're also the tail */
if (chan->next_with_same_id) {
digestmap_set(channel_identity_map,
chan->identity_digest,
chan->next_with_same_id);
} else {
digestmap_remove(channel_identity_map,
chan->identity_digest);
}
}
/* NULL out its next/prev pointers, and we're finished */
chan->next_with_same_id = NULL;
chan->prev_with_same_id = NULL;
log_debug(LD_CHANNEL,
"Removed channel %p (global ID " U64_FORMAT ") from "
"identity map in state %s (%d) with digest %s",
chan, U64_PRINTF_ARG(chan->global_identifier),
channel_state_to_string(chan->state), chan->state,
hex_str(chan->identity_digest, DIGEST_LEN));
} else {
/* This is not good */
log_warn(LD_BUG,
"Trying to remove channel %p (global ID " U64_FORMAT ") "
"with digest %s from identity map, but couldn't find it in "
"the list for that digest",
chan, U64_PRINTF_ARG(chan->global_identifier),
hex_str(chan->identity_digest, DIGEST_LEN));
/* Unlink it and hope for the best */
if (chan->next_with_same_id) {
chan->next_with_same_id->prev_with_same_id = chan->prev_with_same_id;
}
if (chan->prev_with_same_id) {
chan->prev_with_same_id->next_with_same_id = chan->next_with_same_id;
}
chan->next_with_same_id = NULL;
chan->prev_with_same_id = NULL;
}
log_debug(LD_CHANNEL,
"Removed channel %p (global ID " U64_FORMAT ") from "
"identity map in state %s (%d) with digest %s",
chan, U64_PRINTF_ARG(chan->global_identifier),
channel_state_to_string(chan->state), chan->state,
hex_str(chan->identity_digest, DIGEST_LEN));
} else {
/* Shouldn't happen */
log_warn(LD_BUG,
@ -653,15 +640,6 @@ channel_remove_from_digest_map(channel_t *chan)
"that digest",
chan, U64_PRINTF_ARG(chan->global_identifier),
hex_str(chan->identity_digest, DIGEST_LEN));
/* Clear out its next/prev pointers */
if (chan->next_with_same_id) {
chan->next_with_same_id->prev_with_same_id = chan->prev_with_same_id;
}
if (chan->prev_with_same_id) {
chan->prev_with_same_id->next_with_same_id = chan->next_with_same_id;
}
chan->next_with_same_id = NULL;
chan->prev_with_same_id = NULL;
}
}
@ -699,20 +677,21 @@ channel_find_by_global_id(uint64_t global_identifier)
*
* This function looks up a channel by the digest of its remote endpoint in
* the channel digest map. It's possible that more than one channel to a
* given endpoint exists. Use channel_next_with_digest() and
* channel_prev_with_digest() to walk the list.
* given endpoint exists. Use channel_next_with_digest() to walk the list.
*/
channel_t *
channel_find_by_remote_digest(const char *identity_digest)
{
channel_t *rv = NULL;
channel_idmap_entry_t *ent, search;
tor_assert(identity_digest);
/* Search for it in the identity map */
if (channel_identity_map) {
rv = digestmap_get(channel_identity_map, identity_digest);
memcpy(search.digest, identity_digest, DIGEST_LEN);
ent = HT_FIND(channel_idmap, &channel_identity_map, &search);
if (ent) {
rv = LIST_FIRST(&ent->channel_list);
}
return rv;
@ -730,22 +709,7 @@ channel_next_with_digest(channel_t *chan)
{
tor_assert(chan);
return chan->next_with_same_id;
}
/**
* Get previous channel with digest
*
* This function takes a channel and finds the previos channel in the list
* with the same digest.
*/
channel_t *
channel_prev_with_digest(channel_t *chan)
{
tor_assert(chan);
return chan->prev_with_same_id;
return LIST_NEXT(chan, next_with_same_id);
}
/**
@ -770,6 +734,13 @@ channel_init(channel_t *chan)
/* Init next_circ_id */
chan->next_circ_id = crypto_rand_int(1 << 15);
/* Initialize queues. */
SIMPLEQ_INIT(&chan->incoming_queue);
SIMPLEQ_INIT(&chan->outgoing_queue);
/* Initialize list entries. */
memset(&chan->next_with_same_id, 0, sizeof(chan->next_with_same_id));
/* Timestamp it */
channel_timestamp_created(chan);
}
@ -881,6 +852,7 @@ channel_listener_free(channel_listener_t *chan_l)
static void
channel_force_free(channel_t *chan)
{
cell_queue_entry_t *cell, *cell_tmp;
tor_assert(chan);
log_debug(LD_CHANNEL,
@ -907,26 +879,16 @@ channel_force_free(channel_t *chan)
}
/* We might still have a cell queue; kill it */
if (chan->incoming_queue) {
SMARTLIST_FOREACH_BEGIN(chan->incoming_queue,
cell_queue_entry_t *, q) {
cell_queue_entry_free(q, 0);
} SMARTLIST_FOREACH_END(q);
smartlist_free(chan->incoming_queue);
chan->incoming_queue = NULL;
SIMPLEQ_FOREACH_SAFE(cell, &chan->incoming_queue, next, cell_tmp) {
cell_queue_entry_free(cell, 0);
}
SIMPLEQ_INIT(&chan->incoming_queue);
/* Outgoing cell queue is similar, but we can have to free packed cells */
if (chan->outgoing_queue) {
SMARTLIST_FOREACH_BEGIN(chan->outgoing_queue,
cell_queue_entry_t *, q) {
cell_queue_entry_free(q, 0);
} SMARTLIST_FOREACH_END(q);
smartlist_free(chan->outgoing_queue);
chan->outgoing_queue = NULL;
SIMPLEQ_FOREACH_SAFE(cell, &chan->outgoing_queue, next, cell_tmp) {
cell_queue_entry_free(cell, 0);
}
SIMPLEQ_INIT(&chan->outgoing_queue);
tor_free(chan);
}
@ -1089,8 +1051,7 @@ channel_set_cell_handlers(channel_t *chan,
chan->var_cell_handler = var_cell_handler;
/* Re-run the queue if we have one and there's any reason to */
if (chan->incoming_queue &&
(smartlist_len(chan->incoming_queue) > 0) &&
if (! SIMPLEQ_EMPTY(&chan->incoming_queue) &&
try_again &&
(chan->cell_handler ||
chan->var_cell_handler)) channel_process_cells(chan);
@ -1712,9 +1673,8 @@ channel_write_cell_queue_entry(channel_t *chan, cell_queue_entry_t *q)
}
/* Can we send it right out? If so, try */
if (!(chan->outgoing_queue &&
(smartlist_len(chan->outgoing_queue) > 0)) &&
chan->state == CHANNEL_STATE_OPEN) {
if (SIMPLEQ_EMPTY(&chan->outgoing_queue) &&
chan->state == CHANNEL_STATE_OPEN) {
/* Pick the right write function for this cell type and save the result */
switch (q->type) {
case CELL_QUEUE_FIXED:
@ -1750,14 +1710,12 @@ channel_write_cell_queue_entry(channel_t *chan, cell_queue_entry_t *q)
if (!sent) {
/* Not sent, queue it */
if (!(chan->outgoing_queue))
chan->outgoing_queue = smartlist_new();
/*
* We have to copy the queue entry passed in, since the caller probably
* used the stack.
*/
tmp = cell_queue_entry_dup(q);
smartlist_add(chan->outgoing_queue, tmp);
SIMPLEQ_INSERT_TAIL(&chan->outgoing_queue, tmp, next);
/* Try to process the queue? */
if (chan->state == CHANNEL_STATE_OPEN) channel_flush_cells(chan);
}
@ -1943,19 +1901,15 @@ channel_change_state(channel_t *chan, channel_state_t to_state)
channel_do_open_actions(chan);
/* Check for queued cells to process */
if (chan->incoming_queue &&
smartlist_len(chan->incoming_queue) > 0)
if (! SIMPLEQ_EMPTY(&chan->incoming_queue))
channel_process_cells(chan);
if (chan->outgoing_queue &&
smartlist_len(chan->outgoing_queue) > 0)
if (! SIMPLEQ_EMPTY(&chan->outgoing_queue))
channel_flush_cells(chan);
} else if (to_state == CHANNEL_STATE_CLOSED ||
to_state == CHANNEL_STATE_ERROR) {
/* Assert that all queues are empty */
tor_assert(!(chan->incoming_queue) ||
smartlist_len(chan->incoming_queue) == 0);
tor_assert(!(chan->outgoing_queue) ||
smartlist_len(chan->outgoing_queue) == 0);
tor_assert(SIMPLEQ_EMPTY(&chan->incoming_queue));
tor_assert(SIMPLEQ_EMPTY(&chan->outgoing_queue));
}
}
@ -2129,16 +2083,9 @@ channel_flush_some_cells_from_outgoing_queue(channel_t *chan,
/* If we aren't in CHANNEL_STATE_OPEN, nothing goes through */
if (chan->state == CHANNEL_STATE_OPEN) {
while ((unlimited || num_cells > flushed) &&
(chan->outgoing_queue &&
(smartlist_len(chan->outgoing_queue) > 0))) {
/*
* Ewww, smartlist_del_keeporder() is O(n) in list length; maybe a
* a linked list would make more sense for the queue.
*/
NULL != (q = SIMPLEQ_FIRST(&chan->outgoing_queue))) {
/* Get the head of the queue */
q = smartlist_get(chan->outgoing_queue, 0);
if (q) {
if (1) {
/*
* Okay, we have a good queue entry, try to give it to the lower
* layer.
@ -2223,26 +2170,17 @@ channel_flush_some_cells_from_outgoing_queue(channel_t *chan,
cell_queue_entry_free(q, 0);
q = NULL;
}
} else {
/* This shouldn't happen; log and throw it away */
log_info(LD_CHANNEL,
"Saw a NULL entry in the outgoing cell queue on channel %p "
"(global ID " U64_FORMAT "); this is definitely a bug.",
chan, U64_PRINTF_ARG(chan->global_identifier));
/* q is already NULL, so we know to delete that queue entry */
}
/* if q got NULLed out, we used it and should remove the queue entry */
if (!q) smartlist_del_keeporder(chan->outgoing_queue, 0);
/* No cell removed from list, so we can't go on any further */
else break;
/* if q got NULLed out, we used it and should remove the queue entry */
if (!q) SIMPLEQ_REMOVE_HEAD(&chan->outgoing_queue, next);
/* No cell removed from list, so we can't go on any further */
else break;
}
}
}
/* Did we drain the queue? */
if (!(chan->outgoing_queue) ||
smartlist_len(chan->outgoing_queue) == 0) {
/* Timestamp it */
if (SIMPLEQ_EMPTY(&chan->outgoing_queue)) {
channel_timestamp_drained(chan);
}
@ -2276,8 +2214,8 @@ channel_more_to_flush(channel_t *chan)
tor_assert(chan);
/* Check if we have any queued */
if (chan->incoming_queue &&
smartlist_len(chan->incoming_queue) > 0) return 1;
if (! SIMPLEQ_EMPTY(&chan->incoming_queue))
return 1;
/* Check if any circuits would like to queue some */
if (circuitmux_num_cells(chan->cmux) > 0) return 1;
@ -2470,8 +2408,7 @@ channel_listener_queue_incoming(channel_listener_t *listener,
void
channel_process_cells(channel_t *chan)
{
smartlist_t *queue;
int putback = 0;
cell_queue_entry_t *q;
tor_assert(chan);
tor_assert(chan->state == CHANNEL_STATE_CLOSING ||
chan->state == CHANNEL_STATE_MAINT ||
@ -2485,24 +2422,21 @@ channel_process_cells(channel_t *chan)
if (!(chan->cell_handler ||
chan->var_cell_handler)) return;
/* Nothing we can do if we have no cells */
if (!(chan->incoming_queue)) return;
if (SIMPLEQ_EMPTY(&chan->incoming_queue)) return;
queue = chan->incoming_queue;
chan->incoming_queue = NULL;
/*
* Process cells until we're done or find one we have no current handler
* for.
*/
SMARTLIST_FOREACH_BEGIN(queue, cell_queue_entry_t *, q) {
while (NULL != (q = SIMPLEQ_FIRST(&chan->incoming_queue))) {
tor_assert(q);
tor_assert(q->type == CELL_QUEUE_FIXED ||
q->type == CELL_QUEUE_VAR);
if (putback) {
smartlist_add(chan->incoming_queue, q);
} else if (q->type == CELL_QUEUE_FIXED &&
if (q->type == CELL_QUEUE_FIXED &&
chan->cell_handler) {
/* Handle a fixed-length cell */
SIMPLEQ_REMOVE_HEAD(&chan->incoming_queue, next);
tor_assert(q->u.fixed.cell);
log_debug(LD_CHANNEL,
"Processing incoming cell_t %p for channel %p (global ID "
@ -2514,6 +2448,7 @@ channel_process_cells(channel_t *chan)
} else if (q->type == CELL_QUEUE_VAR &&
chan->var_cell_handler) {
/* Handle a variable-length cell */
SIMPLEQ_REMOVE_HEAD(&chan->incoming_queue, next);
tor_assert(q->u.var.var_cell);
log_debug(LD_CHANNEL,
"Processing incoming var_cell_t %p for channel %p (global ID "
@ -2524,15 +2459,10 @@ channel_process_cells(channel_t *chan)
tor_free(q);
} else {
/* Can't handle this one */
if (!chan->incoming_queue)
chan->incoming_queue = smartlist_new();
smartlist_add(chan->incoming_queue, q);
putback = 1;
break;
}
} SMARTLIST_FOREACH_END(q);
}
/* If the list is empty, free it */
smartlist_free(queue);
}
/**
@ -2554,15 +2484,9 @@ channel_queue_cell(channel_t *chan, cell_t *cell)
/* Do we need to queue it, or can we just call the handler right away? */
if (!(chan->cell_handler)) need_to_queue = 1;
if (chan->incoming_queue &&
(smartlist_len(chan->incoming_queue) > 0))
if (! SIMPLEQ_EMPTY(&chan->incoming_queue))
need_to_queue = 1;
/* If we need to queue and have no queue, create one */
if (need_to_queue && !(chan->incoming_queue)) {
chan->incoming_queue = smartlist_new();
}
/* Timestamp for receiving */
channel_timestamp_recv(chan);
@ -2580,14 +2504,13 @@ channel_queue_cell(channel_t *chan, cell_t *cell)
chan->cell_handler(chan, cell);
} else {
/* Otherwise queue it and then process the queue if possible. */
tor_assert(chan->incoming_queue);
q = cell_queue_entry_new_fixed(cell);
log_debug(LD_CHANNEL,
"Queueing incoming cell_t %p for channel %p "
"(global ID " U64_FORMAT ")",
cell, chan,
U64_PRINTF_ARG(chan->global_identifier));
smartlist_add(chan->incoming_queue, q);
SIMPLEQ_INSERT_TAIL(&chan->incoming_queue, q, next);
if (chan->cell_handler ||
chan->var_cell_handler) {
channel_process_cells(chan);
@ -2614,15 +2537,9 @@ channel_queue_var_cell(channel_t *chan, var_cell_t *var_cell)
/* Do we need to queue it, or can we just call the handler right away? */
if (!(chan->var_cell_handler)) need_to_queue = 1;
if (chan->incoming_queue &&
(smartlist_len(chan->incoming_queue) > 0))
if (! SIMPLEQ_EMPTY(&chan->incoming_queue))
need_to_queue = 1;
/* If we need to queue and have no queue, create one */
if (need_to_queue && !(chan->incoming_queue)) {
chan->incoming_queue = smartlist_new();
}
/* Timestamp for receiving */
channel_timestamp_recv(chan);
@ -2640,14 +2557,13 @@ channel_queue_var_cell(channel_t *chan, var_cell_t *var_cell)
chan->var_cell_handler(chan, var_cell);
} else {
/* Otherwise queue it and then process the queue if possible. */
tor_assert(chan->incoming_queue);
q = cell_queue_entry_new_var(var_cell);
log_debug(LD_CHANNEL,
"Queueing incoming var_cell_t %p for channel %p "
"(global ID " U64_FORMAT ")",
var_cell, chan,
U64_PRINTF_ARG(chan->global_identifier));
smartlist_add(chan->incoming_queue, q);
SIMPLEQ_INSERT_TAIL(&chan->incoming_queue, q, next);
if (chan->cell_handler ||
chan->var_cell_handler) {
channel_process_cells(chan);
@ -2939,13 +2855,10 @@ channel_free_all(void)
}
/* Now free channel_identity_map */
if (channel_identity_map) {
log_debug(LD_CHANNEL,
"Freeing channel_identity_map");
/* Geez, anything still left over just won't die ... let it leak then */
digestmap_free(channel_identity_map, NULL);
channel_identity_map = NULL;
}
log_debug(LD_CHANNEL,
"Freeing channel_identity_map");
/* Geez, anything still left over just won't die ... let it leak then */
HT_CLEAR(channel_idmap, &channel_identity_map);
log_debug(LD_CHANNEL,
"Done cleaning up after channels");
@ -3052,12 +2965,6 @@ channel_get_for_extend(const char *digest,
tor_assert(msg_out);
tor_assert(launch_out);
if (!channel_identity_map) {
*msg_out = "Router not connected (nothing is). Connecting.";
*launch_out = 1;
return NULL;
}
chan = channel_find_by_remote_digest(digest);
/* Walk the list, unrefing the old one and refing the new at each
@ -3176,6 +3083,19 @@ channel_listener_describe_transport(channel_listener_t *chan_l)
return chan_l->describe_transport(chan_l);
}
/**
* Return the number of entries in <b>queue</b>
*/
static int
chan_cell_queue_len(const chan_cell_queue_t *queue)
{
int r = 0;
cell_queue_entry_t *cell;
SIMPLEQ_FOREACH(cell, queue, next)
++r;
return r;
}
/**
* Dump channel statistics
*
@ -3293,10 +3213,8 @@ channel_dump_statistics(channel_t *chan, int severity)
" * Channel " U64_FORMAT " has %d queued incoming cells"
" and %d queued outgoing cells",
U64_PRINTF_ARG(chan->global_identifier),
(chan->incoming_queue != NULL) ?
smartlist_len(chan->incoming_queue) : 0,
(chan->outgoing_queue != NULL) ?
smartlist_len(chan->outgoing_queue) : 0);
chan_cell_queue_len(&chan->incoming_queue),
chan_cell_queue_len(&chan->outgoing_queue));
/* Describe circuits */
log(severity, LD_GENERAL,
@ -3562,8 +3480,7 @@ channel_has_queued_writes(channel_t *chan)
tor_assert(chan);
tor_assert(chan->has_queued_writes);
if (chan->outgoing_queue &&
smartlist_len(chan->outgoing_queue) > 0) {
if (! SIMPLEQ_EMPTY(&chan->outgoing_queue)) {
has_writes = 1;
} else {
/* Check with the lower layer */

View File

@ -10,6 +10,7 @@
#define TOR_CHANNEL_H
#include "or.h"
#include "tor_queue.h"
#include "circuitmux.h"
/* Channel handler function pointer typedefs */
@ -17,6 +18,10 @@ typedef void (*channel_listener_fn_ptr)(channel_listener_t *, channel_t *);
typedef void (*channel_cell_handler_fn_ptr)(channel_t *, cell_t *);
typedef void (*channel_var_cell_handler_fn_ptr)(channel_t *, var_cell_t *);
struct cell_queue_entry_s;
SIMPLEQ_HEAD(chan_cell_queue, cell_queue_entry_s) incoming_queue;
typedef struct chan_cell_queue chan_cell_queue_t;
/*
* Channel struct; see the channel_t typedef in or.h. A channel is an
* abstract interface for the OR-to-OR connection, similar to connection_or_t,
@ -120,13 +125,13 @@ struct channel_s {
* Linked list of channels with the same identity digest, for the
* digest->channel map
*/
channel_t *next_with_same_id, *prev_with_same_id;
LIST_ENTRY(channel_s) next_with_same_id;
/* List of incoming cells to handle */
smartlist_t *incoming_queue;
chan_cell_queue_t incoming_queue;
/* List of queued outgoing cells */
smartlist_t *outgoing_queue;
chan_cell_queue_t outgoing_queue;
/* Circuit mux for circuits sending on this channel */
circuitmux_t *cmux;
@ -415,9 +420,7 @@ channel_t * channel_find_by_remote_digest(const char *identity_digest);
/** For things returned by channel_find_by_remote_digest(), walk the list.
*/
channel_t * channel_next_with_digest(channel_t *chan);
channel_t * channel_prev_with_digest(channel_t *chan);
/*
* Metadata queries/updates