mirror of
https://gitlab.torproject.org/tpo/core/tor.git
synced 2024-11-27 22:03:31 +01:00
r15890@tombo: nickm | 2008-01-12 17:19:51 -0500
Add a --disable-buffer-freelists configure argument to build without buffer RAM chunk freelists, so we can benchmark the impact of this. svn:r13121
This commit is contained in:
parent
1b4ef78f6a
commit
ae6df065ed
@ -34,13 +34,20 @@ AC_ARG_ENABLE(iphone,
|
|||||||
CFLAGS="$CFLAGS -D__DARWIN_UNIX03 -DIPHONE"
|
CFLAGS="$CFLAGS -D__DARWIN_UNIX03 -DIPHONE"
|
||||||
fi])
|
fi])
|
||||||
|
|
||||||
|
#XXXX020 We should make these enabled or not, before 0.2.0.x-final
|
||||||
AC_ARG_ENABLE(cell-pool,
|
AC_ARG_ENABLE(cell-pool,
|
||||||
AS_HELP_STRING(--disable-cell-pool, disable pool allocator for cells))
|
AS_HELP_STRING(--disable-cell-pool, disable pool allocator for cells))
|
||||||
|
AC_ARG_ENABLE(buf-freelists,
|
||||||
|
AS_HELP_STRING(--disable-buf-freelists, disable freelists for buffer RAM))
|
||||||
|
|
||||||
if test x$enable_cell_pool != xno; then
|
if test x$enable_cell_pool != xno; then
|
||||||
AC_DEFINE(ENABLE_CELL_POOL, 1,
|
AC_DEFINE(ENABLE_CELL_POOL, 1,
|
||||||
[Defined if we try to use the pool allocator for queued cells])
|
[Defined if we try to use the pool allocator for queued cells])
|
||||||
fi
|
fi
|
||||||
|
if test x$enable_buf_freelists != xno; then
|
||||||
|
AC_DEFINE(ENABLE_BUF_FREELISTS, 1,
|
||||||
|
[Defined if we try to use freelists for buffer RAM chunks])
|
||||||
|
fi
|
||||||
|
|
||||||
AC_ARG_ENABLE(transparent,
|
AC_ARG_ENABLE(transparent,
|
||||||
AS_HELP_STRING(--disable-transparent, disable transparent proxy support),
|
AS_HELP_STRING(--disable-transparent, disable transparent proxy support),
|
||||||
|
@ -95,6 +95,7 @@ chunk_repack(chunk_t *chunk)
|
|||||||
chunk->data = &chunk->mem[0];
|
chunk->data = &chunk->mem[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ENABLE_BUF_FREELISTS
|
||||||
/** A freelist of chunks. */
|
/** A freelist of chunks. */
|
||||||
typedef struct chunk_freelist_t {
|
typedef struct chunk_freelist_t {
|
||||||
size_t alloc_size; /**< What size chunks does this freelist hold? */
|
size_t alloc_size; /**< What size chunks does this freelist hold? */
|
||||||
@ -188,6 +189,24 @@ chunk_new_with_alloc_size(size_t alloc)
|
|||||||
ch->data = &ch->mem[0];
|
ch->data = &ch->mem[0];
|
||||||
return ch;
|
return ch;
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
static void
|
||||||
|
chunk_free(chunk_t *chunk)
|
||||||
|
{
|
||||||
|
tor_free(chunk);
|
||||||
|
}
|
||||||
|
static INLINE chunk_t *
|
||||||
|
chunk_new_with_alloc_size(size_t alloc)
|
||||||
|
{
|
||||||
|
chunk_t *ch;
|
||||||
|
ch = tor_malloc_roundup(&alloc);
|
||||||
|
ch->next = NULL;
|
||||||
|
ch->datalen = 0;
|
||||||
|
ch->memlen = CHUNK_SIZE_WITH_ALLOC(alloc);
|
||||||
|
ch->data = &ch->mem[0];
|
||||||
|
return ch;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/** Allocate a new chunk with memory size of <b>sz</b>. */
|
/** Allocate a new chunk with memory size of <b>sz</b>. */
|
||||||
#define chunk_new_with_capacity(sz) \
|
#define chunk_new_with_capacity(sz) \
|
||||||
@ -221,6 +240,7 @@ static INLINE size_t
|
|||||||
preferred_chunk_size(size_t target)
|
preferred_chunk_size(size_t target)
|
||||||
{
|
{
|
||||||
/* XXXX020 use log2 code, maybe. */
|
/* XXXX020 use log2 code, maybe. */
|
||||||
|
/* XXXX020 or make sizing code more fine-grained! */
|
||||||
size_t sz = MIN_CHUNK_ALLOC;
|
size_t sz = MIN_CHUNK_ALLOC;
|
||||||
while (CHUNK_SIZE_WITH_ALLOC(sz) < target) {
|
while (CHUNK_SIZE_WITH_ALLOC(sz) < target) {
|
||||||
sz <<= 1;
|
sz <<= 1;
|
||||||
@ -233,6 +253,7 @@ preferred_chunk_size(size_t target)
|
|||||||
void
|
void
|
||||||
buf_shrink_freelists(int free_all)
|
buf_shrink_freelists(int free_all)
|
||||||
{
|
{
|
||||||
|
#ifdef ENABLE_BUF_FREELISTS
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; freelists[i].alloc_size; ++i) {
|
for (i = 0; freelists[i].alloc_size; ++i) {
|
||||||
int slack = freelists[i].slack;
|
int slack = freelists[i].slack;
|
||||||
@ -267,6 +288,9 @@ buf_shrink_freelists(int free_all)
|
|||||||
freelists[i].lowest_length = freelists[i].cur_length;
|
freelists[i].lowest_length = freelists[i].cur_length;
|
||||||
assert_freelist_ok(&freelists[i]);
|
assert_freelist_ok(&freelists[i]);
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
(void) free_all;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Describe the current status of the freelists at log level <b>severity</b>.
|
/** Describe the current status of the freelists at log level <b>severity</b>.
|
||||||
@ -274,6 +298,7 @@ buf_shrink_freelists(int free_all)
|
|||||||
void
|
void
|
||||||
buf_dump_freelist_sizes(int severity)
|
buf_dump_freelist_sizes(int severity)
|
||||||
{
|
{
|
||||||
|
#ifdef ENABLE_BUF_FREELISTS
|
||||||
int i;
|
int i;
|
||||||
log(severity, LD_MM, "====== Buffer freelists:");
|
log(severity, LD_MM, "====== Buffer freelists:");
|
||||||
for (i = 0; freelists[i].alloc_size; ++i) {
|
for (i = 0; freelists[i].alloc_size; ++i) {
|
||||||
@ -290,6 +315,9 @@ buf_dump_freelist_sizes(int severity)
|
|||||||
}
|
}
|
||||||
log(severity, LD_MM, U64_FORMAT" allocations in non-freelist sizes",
|
log(severity, LD_MM, U64_FORMAT" allocations in non-freelist sizes",
|
||||||
U64_PRINTF_ARG(n_freelist_miss));
|
U64_PRINTF_ARG(n_freelist_miss));
|
||||||
|
#else
|
||||||
|
(void)severity;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Magic value for buf_t.magic, to catch pointer errors. */
|
/** Magic value for buf_t.magic, to catch pointer errors. */
|
||||||
@ -1612,6 +1640,7 @@ assert_buf_ok(buf_t *buf)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ENABLE_BUF_FREELISTS
|
||||||
/** Log an error and exit if <b>fl</b> is corrupted.
|
/** Log an error and exit if <b>fl</b> is corrupted.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
@ -1629,4 +1658,5 @@ assert_freelist_ok(chunk_freelist_t *fl)
|
|||||||
tor_assert(n >= fl->lowest_length);
|
tor_assert(n >= fl->lowest_length);
|
||||||
tor_assert(n <= fl->max_length);
|
tor_assert(n <= fl->max_length);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user