Remove mempools and buf freelists

They have been off-by-default since 0.2.5 and nobody has complained. :)

Also remove the buf_shrink() function, which hasn't done anything
since we first stopped using contiguous memory to store buffers.

Closes ticket 14848.
This commit is contained in:
Nick Mathewson 2015-02-11 09:03:50 -05:00
parent 0c81dfa848
commit 6f331645c7
11 changed files with 9 additions and 1080 deletions

5
changes/remove_mempools Normal file
View File

@ -0,0 +1,5 @@
o Removed features:
- The --enable-mempool and --enable-buf-freelists options, which
were originally created to work around bad malloc implementations,
no longer exist. They were off-by-default in 0.2.5. Closes
ticket 14848.

View File

@ -25,10 +25,6 @@ fi
CPPFLAGS="$CPPFLAGS -I\${top_srcdir}/src/common"
#XXXX020 We should make these enabled or not, before 0.2.0.x-final
AC_ARG_ENABLE(buf-freelists,
AS_HELP_STRING(--enable-buf-freelists, enable freelists for buffer RAM))
AC_ARG_ENABLE(mempools,
AS_HELP_STRING(--enable-mempools, enable mempools for relay cells))
AC_ARG_ENABLE(openbsd-malloc,
AS_HELP_STRING(--enable-openbsd-malloc, Use malloc code from openbsd. Linux only))
AC_ARG_ENABLE(instrument-downloads,

View File

@ -24,14 +24,6 @@ else
libor_extra_source=
endif
if USE_MEMPOOLS
libor_mempool_source=src/common/mempool.c
libor_mempool_header=src/common/mempool.h
else
libor_mempool_source=
libor_mempool_header=
endif
src_common_libcurve25519_donna_a_CFLAGS=
if BUILD_CURVE25519_DONNA
@ -78,7 +70,6 @@ LIBOR_A_SOURCES = \
src/ext/csiphash.c \
src/ext/trunnel/trunnel.c \
$(libor_extra_source) \
$(libor_mempool_source) \
$(threads_impl_source)
LIBOR_CRYPTO_A_SOURCES = \
@ -138,8 +129,7 @@ COMMONHEADERS = \
src/common/tortls.h \
src/common/util.h \
src/common/util_process.h \
src/common/workqueue.h \
$(libor_mempool_header)
src/common/workqueue.h
noinst_HEADERS+= $(COMMONHEADERS)

View File

@ -1,628 +0,0 @@
/* Copyright (c) 2007-2015, The Tor Project, Inc. */
/* See LICENSE for licensing information */
#if 1
/* Tor dependencies */
#include "orconfig.h"
#endif
#include <stdlib.h>
#include <string.h>
#include "torint.h"
#include "crypto.h"
#define MEMPOOL_PRIVATE
#include "mempool.h"
/* OVERVIEW:
*
* This is an implementation of memory pools for Tor cells. It may be
* useful for you too.
*
* Generally, a memory pool is an allocation strategy optimized for large
* numbers of identically-sized objects. Rather than the elaborate arena
* and coalescing strategies you need to get good performance for a
* general-purpose malloc(), pools use a series of large memory "chunks",
* each of which is carved into a bunch of smaller "items" or
* "allocations".
*
* To get decent performance, you need to:
* - Minimize the number of times you hit the underlying allocator.
* - Try to keep accesses as local in memory as possible.
* - Try to keep the common case fast.
*
* Our implementation uses three lists of chunks per pool. Each chunk can
* be either "full" (no more room for items); "empty" (no items); or
* "used" (not full, not empty). There are independent doubly-linked
* lists for each state.
*
* CREDIT:
*
* I wrote this after looking at 3 or 4 other pooling allocators, but
* without copying. The strategy this most resembles (which is funny,
* since that's the one I looked at longest ago) is the pool allocator
* underlying Python's obmalloc code. Major differences from obmalloc's
* pools are:
* - We don't even try to be threadsafe.
* - We only handle objects of one size.
* - Our list of empty chunks is doubly-linked, not singly-linked.
* (This could change pretty easily; it's only doubly-linked for
* consistency.)
* - We keep a list of full chunks (so we can have a "nuke everything"
* function). Obmalloc's pools leave full chunks to float unanchored.
*
* LIMITATIONS:
* - Not even slightly threadsafe.
* - Likes to have lots of items per chunks.
* - One pointer overhead per allocated thing. (The alternative is
* something like glib's use of an RB-tree to keep track of what
* chunk any given piece of memory is in.)
* - Only aligns allocated things to void* level: redefine ALIGNMENT_TYPE
* if you need doubles.
* - Could probably be optimized a bit; the representation contains
* a bit more info than it really needs to have.
*/
#if 1
/* Tor dependencies */
#include "util.h"
#include "compat.h"
#include "torlog.h"
#define ALLOC(x) tor_malloc(x)
#define FREE(x) tor_free(x)
#define ASSERT(x) tor_assert(x)
#undef ALLOC_CAN_RETURN_NULL
#define TOR
/* End Tor dependencies */
#else
/* If you're not building this as part of Tor, you'll want to define the
* following macros. For now, these should do as defaults.
*/
#include <assert.h>
#define PREDICT_UNLIKELY(x) (x)
#define PREDICT_LIKELY(x) (x)
#define ALLOC(x) malloc(x)
#define FREE(x) free(x)
#define STRUCT_OFFSET(tp, member) \
((off_t) (((char*)&((tp*)0)->member)-(char*)0))
#define ASSERT(x) assert(x)
#define ALLOC_CAN_RETURN_NULL
#endif
/* Tuning parameters */
/** Largest type that we need to ensure returned memory items are aligned to.
* Change this to "double" if we need to be safe for structs with doubles. */
#define ALIGNMENT_TYPE void *
/** Increment that we need to align allocated. */
#define ALIGNMENT sizeof(ALIGNMENT_TYPE)
/** Largest memory chunk that we should allocate. */
#define MAX_CHUNK (8*(1L<<20))
/** Smallest memory chunk size that we should allocate. */
#define MIN_CHUNK 4096
typedef struct mp_allocated_t mp_allocated_t;
typedef struct mp_chunk_t mp_chunk_t;
/** Holds a single allocated item, allocated as part of a chunk. */
struct mp_allocated_t {
/** The chunk that this item is allocated in. This adds overhead to each
* allocated item, thus making this implementation inappropriate for
* very small items. */
mp_chunk_t *in_chunk;
union {
/** If this item is free, the next item on the free list. */
mp_allocated_t *next_free;
/** If this item is not free, the actual memory contents of this item.
* (Not actual size.) */
char mem[1];
/** An extra element to the union to insure correct alignment. */
ALIGNMENT_TYPE dummy_;
} u;
};
/** 'Magic' value used to detect memory corruption. */
#define MP_CHUNK_MAGIC 0x09870123
/** A chunk of memory. Chunks come from malloc; we use them */
struct mp_chunk_t {
unsigned long magic; /**< Must be MP_CHUNK_MAGIC if this chunk is valid. */
mp_chunk_t *next; /**< The next free, used, or full chunk in sequence. */
mp_chunk_t *prev; /**< The previous free, used, or full chunk in sequence. */
mp_pool_t *pool; /**< The pool that this chunk is part of. */
/** First free item in the freelist for this chunk. Note that this may be
* NULL even if this chunk is not at capacity: if so, the free memory at
* next_mem has not yet been carved into items.
*/
mp_allocated_t *first_free;
int n_allocated; /**< Number of currently allocated items in this chunk. */
int capacity; /**< Number of items that can be fit into this chunk. */
size_t mem_size; /**< Number of usable bytes in mem. */
char *next_mem; /**< Pointer into part of <b>mem</b> not yet carved up. */
char mem[FLEXIBLE_ARRAY_MEMBER]; /**< Storage for this chunk. */
};
/** Number of extra bytes needed beyond mem_size to allocate a chunk. */
#define CHUNK_OVERHEAD STRUCT_OFFSET(mp_chunk_t, mem[0])
/** Given a pointer to a mp_allocated_t, return a pointer to the memory
* item it holds. */
#define A2M(a) (&(a)->u.mem)
/** Given a pointer to a memory_item_t, return a pointer to its enclosing
* mp_allocated_t. */
#define M2A(p) ( ((char*)p) - STRUCT_OFFSET(mp_allocated_t, u.mem) )
#ifdef ALLOC_CAN_RETURN_NULL
/** If our ALLOC() macro can return NULL, check whether <b>x</b> is NULL,
* and if so, return NULL. */
#define CHECK_ALLOC(x) \
if (PREDICT_UNLIKELY(!x)) { return NULL; }
#else
/** If our ALLOC() macro can't return NULL, do nothing. */
#define CHECK_ALLOC(x)
#endif
/** Helper: Allocate and return a new memory chunk for <b>pool</b>. Does not
* link the chunk into any list. */
static mp_chunk_t *
mp_chunk_new(mp_pool_t *pool)
{
size_t sz = pool->new_chunk_capacity * pool->item_alloc_size;
mp_chunk_t *chunk = ALLOC(CHUNK_OVERHEAD + sz);
#ifdef MEMPOOL_STATS
++pool->total_chunks_allocated;
#endif
CHECK_ALLOC(chunk);
memset(chunk, 0, sizeof(mp_chunk_t)); /* Doesn't clear the whole thing. */
chunk->magic = MP_CHUNK_MAGIC;
chunk->capacity = pool->new_chunk_capacity;
chunk->mem_size = sz;
chunk->next_mem = chunk->mem;
chunk->pool = pool;
return chunk;
}
/** Take a <b>chunk</b> that has just been allocated or removed from
* <b>pool</b>'s empty chunk list, and add it to the head of the used chunk
* list. */
static INLINE void
add_newly_used_chunk_to_used_list(mp_pool_t *pool, mp_chunk_t *chunk)
{
chunk->next = pool->used_chunks;
if (chunk->next)
chunk->next->prev = chunk;
pool->used_chunks = chunk;
ASSERT(!chunk->prev);
}
/** Return a newly allocated item from <b>pool</b>. */
void *
mp_pool_get(mp_pool_t *pool)
{
mp_chunk_t *chunk;
mp_allocated_t *allocated;
if (PREDICT_LIKELY(pool->used_chunks != NULL)) {
/* Common case: there is some chunk that is neither full nor empty. Use
* that one. (We can't use the full ones, obviously, and we should fill
* up the used ones before we start on any empty ones. */
chunk = pool->used_chunks;
} else if (pool->empty_chunks) {
/* We have no used chunks, but we have an empty chunk that we haven't
* freed yet: use that. (We pull from the front of the list, which should
* get us the most recently emptied chunk.) */
chunk = pool->empty_chunks;
/* Remove the chunk from the empty list. */
pool->empty_chunks = chunk->next;
if (chunk->next)
chunk->next->prev = NULL;
/* Put the chunk on the 'used' list*/
add_newly_used_chunk_to_used_list(pool, chunk);
ASSERT(!chunk->prev);
--pool->n_empty_chunks;
if (pool->n_empty_chunks < pool->min_empty_chunks)
pool->min_empty_chunks = pool->n_empty_chunks;
} else {
/* We have no used or empty chunks: allocate a new chunk. */
chunk = mp_chunk_new(pool);
CHECK_ALLOC(chunk);
/* Add the new chunk to the used list. */
add_newly_used_chunk_to_used_list(pool, chunk);
}
ASSERT(chunk->n_allocated < chunk->capacity);
if (chunk->first_free) {
/* If there's anything on the chunk's freelist, unlink it and use it. */
allocated = chunk->first_free;
chunk->first_free = allocated->u.next_free;
allocated->u.next_free = NULL; /* For debugging; not really needed. */
ASSERT(allocated->in_chunk == chunk);
} else {
/* Otherwise, the chunk had better have some free space left on it. */
ASSERT(chunk->next_mem + pool->item_alloc_size <=
chunk->mem + chunk->mem_size);
/* Good, it did. Let's carve off a bit of that free space, and use
* that. */
allocated = (void*)chunk->next_mem;
chunk->next_mem += pool->item_alloc_size;
allocated->in_chunk = chunk;
allocated->u.next_free = NULL; /* For debugging; not really needed. */
}
++chunk->n_allocated;
#ifdef MEMPOOL_STATS
++pool->total_items_allocated;
#endif
if (PREDICT_UNLIKELY(chunk->n_allocated == chunk->capacity)) {
/* This chunk just became full. */
ASSERT(chunk == pool->used_chunks);
ASSERT(chunk->prev == NULL);
/* Take it off the used list. */
pool->used_chunks = chunk->next;
if (chunk->next)
chunk->next->prev = NULL;
/* Put it on the full list. */
chunk->next = pool->full_chunks;
if (chunk->next)
chunk->next->prev = chunk;
pool->full_chunks = chunk;
}
/* And return the memory portion of the mp_allocated_t. */
return A2M(allocated);
}
/** Return an allocated memory item to its memory pool. */
void
mp_pool_release(void *item)
{
mp_allocated_t *allocated = (void*) M2A(item);
mp_chunk_t *chunk = allocated->in_chunk;
ASSERT(chunk);
ASSERT(chunk->magic == MP_CHUNK_MAGIC);
ASSERT(chunk->n_allocated > 0);
allocated->u.next_free = chunk->first_free;
chunk->first_free = allocated;
if (PREDICT_UNLIKELY(chunk->n_allocated == chunk->capacity)) {
/* This chunk was full and is about to be used. */
mp_pool_t *pool = chunk->pool;
/* unlink from the full list */
if (chunk->prev)
chunk->prev->next = chunk->next;
if (chunk->next)
chunk->next->prev = chunk->prev;
if (chunk == pool->full_chunks)
pool->full_chunks = chunk->next;
/* link to the used list. */
chunk->next = pool->used_chunks;
chunk->prev = NULL;
if (chunk->next)
chunk->next->prev = chunk;
pool->used_chunks = chunk;
} else if (PREDICT_UNLIKELY(chunk->n_allocated == 1)) {
/* This was used and is about to be empty. */
mp_pool_t *pool = chunk->pool;
/* Unlink from the used list */
if (chunk->prev)
chunk->prev->next = chunk->next;
if (chunk->next)
chunk->next->prev = chunk->prev;
if (chunk == pool->used_chunks)
pool->used_chunks = chunk->next;
/* Link to the empty list */
chunk->next = pool->empty_chunks;
chunk->prev = NULL;
if (chunk->next)
chunk->next->prev = chunk;
pool->empty_chunks = chunk;
/* Reset the guts of this chunk to defragment it, in case it gets
* used again. */
chunk->first_free = NULL;
chunk->next_mem = chunk->mem;
++pool->n_empty_chunks;
}
--chunk->n_allocated;
}
/** Allocate a new memory pool to hold items of size <b>item_size</b>. We'll
* try to fit about <b>chunk_capacity</b> bytes in each chunk. */
mp_pool_t *
mp_pool_new(size_t item_size, size_t chunk_capacity)
{
mp_pool_t *pool;
size_t alloc_size, new_chunk_cap;
tor_assert(item_size < SIZE_T_CEILING);
tor_assert(chunk_capacity < SIZE_T_CEILING);
tor_assert(SIZE_T_CEILING / item_size > chunk_capacity);
pool = ALLOC(sizeof(mp_pool_t));
CHECK_ALLOC(pool);
memset(pool, 0, sizeof(mp_pool_t));
/* First, we figure out how much space to allow per item. We'll want to
* use make sure we have enough for the overhead plus the item size. */
alloc_size = (size_t)(STRUCT_OFFSET(mp_allocated_t, u.mem) + item_size);
/* If the item_size is less than sizeof(next_free), we need to make
* the allocation bigger. */
if (alloc_size < sizeof(mp_allocated_t))
alloc_size = sizeof(mp_allocated_t);
/* If we're not an even multiple of ALIGNMENT, round up. */
if (alloc_size % ALIGNMENT) {
alloc_size = alloc_size + ALIGNMENT - (alloc_size % ALIGNMENT);
}
if (alloc_size < ALIGNMENT)
alloc_size = ALIGNMENT;
ASSERT((alloc_size % ALIGNMENT) == 0);
/* Now we figure out how many items fit in each chunk. We need to fit at
* least 2 items per chunk. No chunk can be more than MAX_CHUNK bytes long,
* or less than MIN_CHUNK. */
if (chunk_capacity > MAX_CHUNK)
chunk_capacity = MAX_CHUNK;
/* Try to be around a power of 2 in size, since that's what allocators like
* handing out. 512K-1 byte is a lot better than 512K+1 byte. */
chunk_capacity = (size_t) round_to_power_of_2(chunk_capacity);
while (chunk_capacity < alloc_size * 2 + CHUNK_OVERHEAD)
chunk_capacity *= 2;
if (chunk_capacity < MIN_CHUNK)
chunk_capacity = MIN_CHUNK;
new_chunk_cap = (chunk_capacity-CHUNK_OVERHEAD) / alloc_size;
tor_assert(new_chunk_cap < INT_MAX);
pool->new_chunk_capacity = (int)new_chunk_cap;
pool->item_alloc_size = alloc_size;
log_debug(LD_MM, "Capacity is %lu, item size is %lu, alloc size is %lu",
(unsigned long)pool->new_chunk_capacity,
(unsigned long)pool->item_alloc_size,
(unsigned long)(pool->new_chunk_capacity*pool->item_alloc_size));
return pool;
}
/** Helper function for qsort: used to sort pointers to mp_chunk_t into
* descending order of fullness. */
static int
mp_pool_sort_used_chunks_helper(const void *_a, const void *_b)
{
mp_chunk_t *a = *(mp_chunk_t**)_a;
mp_chunk_t *b = *(mp_chunk_t**)_b;
return b->n_allocated - a->n_allocated;
}
/** Sort the used chunks in <b>pool</b> into descending order of fullness,
* so that we preferentially fill up mostly full chunks before we make
* nearly empty chunks less nearly empty. */
static void
mp_pool_sort_used_chunks(mp_pool_t *pool)
{
int i, n=0, inverted=0;
mp_chunk_t **chunks, *chunk;
for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
++n;
if (chunk->next && chunk->next->n_allocated > chunk->n_allocated)
++inverted;
}
if (!inverted)
return;
//printf("Sort %d/%d\n",inverted,n);
chunks = ALLOC(sizeof(mp_chunk_t *)*n);
#ifdef ALLOC_CAN_RETURN_NULL
if (PREDICT_UNLIKELY(!chunks)) return;
#endif
for (i=0,chunk = pool->used_chunks; chunk; chunk = chunk->next)
chunks[i++] = chunk;
qsort(chunks, n, sizeof(mp_chunk_t *), mp_pool_sort_used_chunks_helper);
pool->used_chunks = chunks[0];
chunks[0]->prev = NULL;
for (i=1;i<n;++i) {
chunks[i-1]->next = chunks[i];
chunks[i]->prev = chunks[i-1];
}
chunks[n-1]->next = NULL;
FREE(chunks);
mp_pool_assert_ok(pool);
}
/** If there are more than <b>n</b> empty chunks in <b>pool</b>, free the
* excess ones that have been empty for the longest. If
* <b>keep_recently_used</b> is true, do not free chunks unless they have been
* empty since the last call to this function.
**/
void
mp_pool_clean(mp_pool_t *pool, int n_to_keep, int keep_recently_used)
{
mp_chunk_t *chunk, **first_to_free;
mp_pool_sort_used_chunks(pool);
ASSERT(n_to_keep >= 0);
if (keep_recently_used) {
int n_recently_used = pool->n_empty_chunks - pool->min_empty_chunks;
if (n_to_keep < n_recently_used)
n_to_keep = n_recently_used;
}
ASSERT(n_to_keep >= 0);
first_to_free = &pool->empty_chunks;
while (*first_to_free && n_to_keep > 0) {
first_to_free = &(*first_to_free)->next;
--n_to_keep;
}
if (!*first_to_free) {
pool->min_empty_chunks = pool->n_empty_chunks;
return;
}
chunk = *first_to_free;
while (chunk) {
mp_chunk_t *next = chunk->next;
chunk->magic = 0xdeadbeef;
FREE(chunk);
#ifdef MEMPOOL_STATS
++pool->total_chunks_freed;
#endif
--pool->n_empty_chunks;
chunk = next;
}
pool->min_empty_chunks = pool->n_empty_chunks;
*first_to_free = NULL;
}
/** Helper: Given a list of chunks, free all the chunks in the list. */
static void
destroy_chunks(mp_chunk_t *chunk)
{
mp_chunk_t *next;
while (chunk) {
chunk->magic = 0xd3adb33f;
next = chunk->next;
FREE(chunk);
chunk = next;
}
}
/** Free all space held in <b>pool</b> This makes all pointers returned from
* mp_pool_get(<b>pool</b>) invalid. */
void
mp_pool_destroy(mp_pool_t *pool)
{
destroy_chunks(pool->empty_chunks);
destroy_chunks(pool->used_chunks);
destroy_chunks(pool->full_chunks);
memwipe(pool, 0xe0, sizeof(mp_pool_t));
FREE(pool);
}
/** Helper: make sure that a given chunk list is not corrupt. */
static int
assert_chunks_ok(mp_pool_t *pool, mp_chunk_t *chunk, int empty, int full)
{
mp_allocated_t *allocated;
int n = 0;
if (chunk)
ASSERT(chunk->prev == NULL);
while (chunk) {
n++;
ASSERT(chunk->magic == MP_CHUNK_MAGIC);
ASSERT(chunk->pool == pool);
for (allocated = chunk->first_free; allocated;
allocated = allocated->u.next_free) {
ASSERT(allocated->in_chunk == chunk);
}
if (empty)
ASSERT(chunk->n_allocated == 0);
else if (full)
ASSERT(chunk->n_allocated == chunk->capacity);
else
ASSERT(chunk->n_allocated > 0 && chunk->n_allocated < chunk->capacity);
ASSERT(chunk->capacity == pool->new_chunk_capacity);
ASSERT(chunk->mem_size ==
pool->new_chunk_capacity * pool->item_alloc_size);
ASSERT(chunk->next_mem >= chunk->mem &&
chunk->next_mem <= chunk->mem + chunk->mem_size);
if (chunk->next)
ASSERT(chunk->next->prev == chunk);
chunk = chunk->next;
}
return n;
}
/** Fail with an assertion if <b>pool</b> is not internally consistent. */
void
mp_pool_assert_ok(mp_pool_t *pool)
{
int n_empty;
n_empty = assert_chunks_ok(pool, pool->empty_chunks, 1, 0);
assert_chunks_ok(pool, pool->full_chunks, 0, 1);
assert_chunks_ok(pool, pool->used_chunks, 0, 0);
ASSERT(pool->n_empty_chunks == n_empty);
}
#ifdef TOR
/** Dump information about <b>pool</b>'s memory usage to the Tor log at level
* <b>severity</b>. */
/*FFFF uses Tor logging functions. */
void
mp_pool_log_status(mp_pool_t *pool, int severity)
{
uint64_t bytes_used = 0;
uint64_t bytes_allocated = 0;
uint64_t bu = 0, ba = 0;
mp_chunk_t *chunk;
int n_full = 0, n_used = 0;
ASSERT(pool);
for (chunk = pool->empty_chunks; chunk; chunk = chunk->next) {
bytes_allocated += chunk->mem_size;
}
log_fn(severity, LD_MM, U64_FORMAT" bytes in %d empty chunks",
U64_PRINTF_ARG(bytes_allocated), pool->n_empty_chunks);
for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
++n_used;
bu += chunk->n_allocated * pool->item_alloc_size;
ba += chunk->mem_size;
log_fn(severity, LD_MM, " used chunk: %d items allocated",
chunk->n_allocated);
}
log_fn(severity, LD_MM, U64_FORMAT"/"U64_FORMAT
" bytes in %d partially full chunks",
U64_PRINTF_ARG(bu), U64_PRINTF_ARG(ba), n_used);
bytes_used += bu;
bytes_allocated += ba;
bu = ba = 0;
for (chunk = pool->full_chunks; chunk; chunk = chunk->next) {
++n_full;
bu += chunk->n_allocated * pool->item_alloc_size;
ba += chunk->mem_size;
}
log_fn(severity, LD_MM, U64_FORMAT"/"U64_FORMAT
" bytes in %d full chunks",
U64_PRINTF_ARG(bu), U64_PRINTF_ARG(ba), n_full);
bytes_used += bu;
bytes_allocated += ba;
log_fn(severity, LD_MM, "Total: "U64_FORMAT"/"U64_FORMAT" bytes allocated "
"for cell pools are full.",
U64_PRINTF_ARG(bytes_used), U64_PRINTF_ARG(bytes_allocated));
#ifdef MEMPOOL_STATS
log_fn(severity, LD_MM, U64_FORMAT" cell allocations ever; "
U64_FORMAT" chunk allocations ever; "
U64_FORMAT" chunk frees ever.",
U64_PRINTF_ARG(pool->total_items_allocated),
U64_PRINTF_ARG(pool->total_chunks_allocated),
U64_PRINTF_ARG(pool->total_chunks_freed));
#endif
}
#endif

View File

@ -1,65 +0,0 @@
/* Copyright (c) 2007-2015, The Tor Project, Inc. */
/* See LICENSE for licensing information */
/**
* \file mempool.h
* \brief Headers for mempool.c
**/
#ifndef TOR_MEMPOOL_H
#define TOR_MEMPOOL_H
/** A memory pool is a context in which a large number of fixed-sized
* objects can be allocated efficiently. See mempool.c for implementation
* details. */
typedef struct mp_pool_t mp_pool_t;
void *mp_pool_get(mp_pool_t *pool);
void mp_pool_release(void *item);
mp_pool_t *mp_pool_new(size_t item_size, size_t chunk_capacity);
void mp_pool_clean(mp_pool_t *pool, int n_to_keep, int keep_recently_used);
void mp_pool_destroy(mp_pool_t *pool);
void mp_pool_assert_ok(mp_pool_t *pool);
void mp_pool_log_status(mp_pool_t *pool, int severity);
#define MP_POOL_ITEM_OVERHEAD (sizeof(void*))
#define MEMPOOL_STATS
#ifdef MEMPOOL_PRIVATE
/* These declarations are only used by mempool.c and test.c */
struct mp_pool_t {
/** Doubly-linked list of chunks in which no items have been allocated.
* The front of the list is the most recently emptied chunk. */
struct mp_chunk_t *empty_chunks;
/** Doubly-linked list of chunks in which some items have been allocated,
* but which are not yet full. The front of the list is the chunk that has
* most recently been modified. */
struct mp_chunk_t *used_chunks;
/** Doubly-linked list of chunks in which no more items can be allocated.
* The front of the list is the chunk that has most recently become full. */
struct mp_chunk_t *full_chunks;
/** Length of <b>empty_chunks</b>. */
int n_empty_chunks;
/** Lowest value of <b>empty_chunks</b> since last call to
* mp_pool_clean(-1). */
int min_empty_chunks;
/** Size of each chunk (in items). */
int new_chunk_capacity;
/** Size to allocate for each item, including overhead and alignment
* padding. */
size_t item_alloc_size;
#ifdef MEMPOOL_STATS
/** Total number of items allocated ever. */
uint64_t total_items_allocated;
/** Total number of chunks allocated ever. */
uint64_t total_chunks_allocated;
/** Total number of chunks freed ever. */
uint64_t total_chunks_freed;
#endif
};
#endif
#endif

View File

@ -105,114 +105,6 @@ chunk_repack(chunk_t *chunk)
/** Keep track of total size of allocated chunks for consistency asserts */
static size_t total_bytes_allocated_in_chunks = 0;
#if defined(ENABLE_BUF_FREELISTS) || defined(RUNNING_DOXYGEN)
/** A freelist of chunks. */
typedef struct chunk_freelist_t {
size_t alloc_size; /**< What size chunks does this freelist hold? */
int max_length; /**< Never allow more than this number of chunks in the
* freelist. */
int slack; /**< When trimming the freelist, leave this number of extra
* chunks beyond lowest_length.*/
int cur_length; /**< How many chunks on the freelist now? */
int lowest_length; /**< What's the smallest value of cur_length since the
* last time we cleaned this freelist? */
uint64_t n_alloc;
uint64_t n_free;
uint64_t n_hit;
chunk_t *head; /**< First chunk on the freelist. */
} chunk_freelist_t;
/** Macro to help define freelists. */
#define FL(a,m,s) { a, m, s, 0, 0, 0, 0, 0, NULL }
/** Static array of freelists, sorted by alloc_len, terminated by an entry
* with alloc_size of 0. */
static chunk_freelist_t freelists[] = {
FL(4096, 256, 8), FL(8192, 128, 4), FL(16384, 64, 4), FL(32768, 32, 2),
FL(0, 0, 0)
};
#undef FL
/** How many times have we looked for a chunk of a size that no freelist
* could help with? */
static uint64_t n_freelist_miss = 0;
static void assert_freelist_ok(chunk_freelist_t *fl);
/** Return the freelist to hold chunks of size <b>alloc</b>, or NULL if
* no freelist exists for that size. */
static INLINE chunk_freelist_t *
get_freelist(size_t alloc)
{
int i;
for (i=0; (freelists[i].alloc_size <= alloc &&
freelists[i].alloc_size); ++i ) {
if (freelists[i].alloc_size == alloc) {
return &freelists[i];
}
}
return NULL;
}
/** Deallocate a chunk or put it on a freelist */
static void
chunk_free_unchecked(chunk_t *chunk)
{
size_t alloc;
chunk_freelist_t *freelist;
alloc = CHUNK_ALLOC_SIZE(chunk->memlen);
freelist = get_freelist(alloc);
if (freelist && freelist->cur_length < freelist->max_length) {
chunk->next = freelist->head;
freelist->head = chunk;
++freelist->cur_length;
} else {
if (freelist)
++freelist->n_free;
#ifdef DEBUG_CHUNK_ALLOC
tor_assert(alloc == chunk->DBG_alloc);
#endif
tor_assert(total_bytes_allocated_in_chunks >= alloc);
total_bytes_allocated_in_chunks -= alloc;
tor_free(chunk);
}
}
/** Allocate a new chunk with a given allocation size, or get one from the
* freelist. Note that a chunk with allocation size A can actually hold only
* CHUNK_SIZE_WITH_ALLOC(A) bytes in its mem field. */
static INLINE chunk_t *
chunk_new_with_alloc_size(size_t alloc)
{
chunk_t *ch;
chunk_freelist_t *freelist;
tor_assert(alloc >= sizeof(chunk_t));
freelist = get_freelist(alloc);
if (freelist && freelist->head) {
ch = freelist->head;
freelist->head = ch->next;
if (--freelist->cur_length < freelist->lowest_length)
freelist->lowest_length = freelist->cur_length;
++freelist->n_hit;
} else {
if (freelist)
++freelist->n_alloc;
else
++n_freelist_miss;
ch = tor_malloc(alloc);
#ifdef DEBUG_CHUNK_ALLOC
ch->DBG_alloc = alloc;
#endif
total_bytes_allocated_in_chunks += alloc;
}
ch->next = NULL;
ch->datalen = 0;
ch->memlen = CHUNK_SIZE_WITH_ALLOC(alloc);
ch->data = &ch->mem[0];
return ch;
}
#else
static void
chunk_free_unchecked(chunk_t *chunk)
{
@ -241,7 +133,6 @@ chunk_new_with_alloc_size(size_t alloc)
ch->data = &ch->mem[0];
return ch;
}
#endif
/** Expand <b>chunk</b> until it can hold <b>sz</b> bytes, and return a
* new pointer to <b>chunk</b>. Old pointers are no longer valid. */
@ -284,115 +175,6 @@ preferred_chunk_size(size_t target)
return sz;
}
/** Remove from the freelists most chunks that have not been used since the
* last call to buf_shrink_freelists(). Return the amount of memory
* freed. */
size_t
buf_shrink_freelists(int free_all)
{
#ifdef ENABLE_BUF_FREELISTS
int i;
size_t total_freed = 0;
disable_control_logging();
for (i = 0; freelists[i].alloc_size; ++i) {
int slack = freelists[i].slack;
assert_freelist_ok(&freelists[i]);
if (free_all || freelists[i].lowest_length > slack) {
int n_to_free = free_all ? freelists[i].cur_length :
(freelists[i].lowest_length - slack);
int n_to_skip = freelists[i].cur_length - n_to_free;
int orig_length = freelists[i].cur_length;
int orig_n_to_free = n_to_free, n_freed=0;
int orig_n_to_skip = n_to_skip;
int new_length = n_to_skip;
chunk_t **chp = &freelists[i].head;
chunk_t *chunk;
while (n_to_skip) {
if (!(*chp) || ! (*chp)->next) {
log_warn(LD_BUG, "I wanted to skip %d chunks in the freelist for "
"%d-byte chunks, but only found %d. (Length %d)",
orig_n_to_skip, (int)freelists[i].alloc_size,
orig_n_to_skip-n_to_skip, freelists[i].cur_length);
assert_freelist_ok(&freelists[i]);
goto done;
}
// tor_assert((*chp)->next);
chp = &(*chp)->next;
--n_to_skip;
}
chunk = *chp;
*chp = NULL;
while (chunk) {
chunk_t *next = chunk->next;
#ifdef DEBUG_CHUNK_ALLOC
tor_assert(chunk->DBG_alloc == CHUNK_ALLOC_SIZE(chunk->memlen));
#endif
tor_assert(total_bytes_allocated_in_chunks >=
CHUNK_ALLOC_SIZE(chunk->memlen));
total_bytes_allocated_in_chunks -= CHUNK_ALLOC_SIZE(chunk->memlen);
total_freed += CHUNK_ALLOC_SIZE(chunk->memlen);
tor_free(chunk);
chunk = next;
--n_to_free;
++n_freed;
++freelists[i].n_free;
}
if (n_to_free) {
log_warn(LD_BUG, "Freelist length for %d-byte chunks may have been "
"messed up somehow.", (int)freelists[i].alloc_size);
log_warn(LD_BUG, "There were %d chunks at the start. I decided to "
"keep %d. I wanted to free %d. I freed %d. I somehow think "
"I have %d left to free.",
freelists[i].cur_length, n_to_skip, orig_n_to_free,
n_freed, n_to_free);
}
// tor_assert(!n_to_free);
freelists[i].cur_length = new_length;
tor_assert(orig_n_to_skip == new_length);
log_info(LD_MM, "Cleaned freelist for %d-byte chunks: original "
"length %d, kept %d, dropped %d. New length is %d",
(int)freelists[i].alloc_size, orig_length,
orig_n_to_skip, orig_n_to_free, new_length);
}
freelists[i].lowest_length = freelists[i].cur_length;
assert_freelist_ok(&freelists[i]);
}
done:
enable_control_logging();
return total_freed;
#else
(void) free_all;
return 0;
#endif
}
/** Describe the current status of the freelists at log level <b>severity</b>.
*/
void
buf_dump_freelist_sizes(int severity)
{
#ifdef ENABLE_BUF_FREELISTS
int i;
tor_log(severity, LD_MM, "====== Buffer freelists:");
for (i = 0; freelists[i].alloc_size; ++i) {
uint64_t total = ((uint64_t)freelists[i].cur_length) *
freelists[i].alloc_size;
tor_log(severity, LD_MM,
U64_FORMAT" bytes in %d %d-byte chunks ["U64_FORMAT
" misses; "U64_FORMAT" frees; "U64_FORMAT" hits]",
U64_PRINTF_ARG(total),
freelists[i].cur_length, (int)freelists[i].alloc_size,
U64_PRINTF_ARG(freelists[i].n_alloc),
U64_PRINTF_ARG(freelists[i].n_free),
U64_PRINTF_ARG(freelists[i].n_hit));
}
tor_log(severity, LD_MM, U64_FORMAT" allocations in non-freelist sizes",
U64_PRINTF_ARG(n_freelist_miss));
#else
(void)severity;
#endif
}
/** Collapse data from the first N chunks from <b>buf</b> into buf->head,
* growing it as necessary, until buf->head has the first <b>bytes</b> bytes
* of data from the buffer, or until buf->head has all the data in <b>buf</b>.
@ -488,15 +270,6 @@ buf_get_first_chunk_data(const buf_t *buf, const char **cp, size_t *sz)
}
#endif
/** Resize buf so it won't hold extra memory that we haven't been
* using lately.
*/
void
buf_shrink(buf_t *buf)
{
(void)buf;
}
/** Remove the first <b>n</b> bytes from buf. */
static INLINE void
buf_remove_from_front(buf_t *buf, size_t n)
@ -2672,23 +2445,3 @@ assert_buf_ok(buf_t *buf)
}
}
#ifdef ENABLE_BUF_FREELISTS
/** Log an error and exit if <b>fl</b> is corrupted.
*/
static void
assert_freelist_ok(chunk_freelist_t *fl)
{
chunk_t *ch;
int n;
tor_assert(fl->alloc_size > 0);
n = 0;
for (ch = fl->head; ch; ch = ch->next) {
tor_assert(CHUNK_ALLOC_SIZE(ch->memlen) == fl->alloc_size);
++n;
}
tor_assert(n == fl->cur_length);
tor_assert(n >= fl->lowest_length);
tor_assert(n <= fl->max_length);
}
#endif

View File

@ -20,9 +20,6 @@ size_t buf_get_default_chunk_size(const buf_t *buf);
void buf_free(buf_t *buf);
void buf_clear(buf_t *buf);
buf_t *buf_copy(const buf_t *buf);
void buf_shrink(buf_t *buf);
size_t buf_shrink_freelists(int free_all);
void buf_dump_freelist_sizes(int severity);
MOCK_DECL(size_t, buf_datalen, (const buf_t *buf));
size_t buf_allocation(const buf_t *buf);
@ -108,9 +105,9 @@ STATIC void buf_pullup(buf_t *buf, size_t bytes, int nulterminate);
void buf_get_first_chunk_data(const buf_t *buf, const char **cp, size_t *sz);
#define DEBUG_CHUNK_ALLOC
/** A single chunk on a buffer or in a freelist. */
/** A single chunk on a buffer. */
typedef struct chunk_t {
struct chunk_t *next; /**< The next chunk on the buffer or freelist. */
struct chunk_t *next; /**< The next chunk on the buffer. */
size_t datalen; /**< The number of bytes stored in this chunk */
size_t memlen; /**< The number of usable bytes of storage in <b>mem</b>. */
#ifdef DEBUG_CHUNK_ALLOC

View File

@ -2062,17 +2062,6 @@ circuits_handle_oom(size_t current_allocation)
"over-long queues. (This behavior is controlled by "
"MaxMemInQueues.)");
{
const size_t recovered = buf_shrink_freelists(1);
if (recovered >= current_allocation) {
log_warn(LD_BUG, "We somehow recovered more memory from freelists "
"than we thought we had allocated");
current_allocation = 0;
} else {
current_allocation -= recovered;
}
}
{
size_t mem_target = (size_t)(get_options()->MaxMemInQueues *
FRACTION_OF_DATA_TO_RETAIN_ON_OOM);
@ -2156,12 +2145,6 @@ circuits_handle_oom(size_t current_allocation)
done_recovering_mem:
#ifdef ENABLE_MEMPOOLS
clean_cell_pool(); /* In case this helps. */
#endif /* ENABLE_MEMPOOLS */
buf_shrink_freelists(1); /* This is necessary to actually release buffer
chunks. */
log_notice(LD_GENERAL, "Removed "U64_FORMAT" bytes by killing %d circuits; "
"%d circuits remain alive. Also killed %d non-linked directory "
"connections.",

View File

@ -1223,7 +1223,6 @@ run_scheduled_events(time_t now)
static time_t time_to_check_v3_certificate = 0;
static time_t time_to_check_listeners = 0;
static time_t time_to_download_networkstatus = 0;
static time_t time_to_shrink_memory = 0;
static time_t time_to_try_getting_descriptors = 0;
static time_t time_to_reset_descriptor_failures = 0;
static time_t time_to_add_entropy = 0;
@ -1573,22 +1572,6 @@ run_scheduled_events(time_t now)
for (i=0;i<smartlist_len(connection_array);i++) {
run_connection_housekeeping(i, now);
}
if (time_to_shrink_memory < now) {
SMARTLIST_FOREACH(connection_array, connection_t *, conn, {
if (conn->outbuf)
buf_shrink(conn->outbuf);
if (conn->inbuf)
buf_shrink(conn->inbuf);
});
#ifdef ENABLE_MEMPOOL
clean_cell_pool();
#endif /* ENABLE_MEMPOOL */
buf_shrink_freelists(0);
/** How often do we check buffers and pools for empty space that can be
* deallocated? */
#define MEM_SHRINK_INTERVAL (60)
time_to_shrink_memory = now + MEM_SHRINK_INTERVAL;
}
/* 6. And remove any marked circuits... */
circuit_close_all_marked();
@ -2260,7 +2243,6 @@ dumpmemusage(int severity)
dump_routerlist_mem_usage(severity);
dump_cell_pool_usage(severity);
dump_dns_mem_usage(severity);
buf_dump_freelist_sizes(severity);
tor_log_mallinfo(severity);
}
@ -2652,7 +2634,6 @@ tor_free_all(int postfork)
channel_free_all();
connection_free_all();
scheduler_free_all();
buf_shrink_freelists(1);
memarea_clear_freelist();
nodelist_free_all();
microdesc_free_all();

View File

@ -193,7 +193,6 @@ test_buffers_basic(void *arg)
buf_free(buf);
if (buf2)
buf_free(buf2);
buf_shrink_freelists(1);
}
static void
@ -297,12 +296,9 @@ test_buffer_pullup(void *arg)
buf_free(buf);
buf = NULL;
buf_shrink_freelists(1);
tt_int_op(buf_get_total_allocation(), OP_EQ, 0);
done:
buf_free(buf);
buf_shrink_freelists(1);
tor_free(stuff);
tor_free(tmp);
}
@ -370,7 +366,6 @@ test_buffer_copy(void *arg)
generic_buffer_free(buf);
if (buf2)
generic_buffer_free(buf2);
buf_shrink_freelists(1);
}
static void
@ -445,7 +440,6 @@ test_buffer_ext_or_cmd(void *arg)
ext_or_cmd_free(cmd);
generic_buffer_free(buf);
tor_free(tmp);
buf_shrink_freelists(1);
}
static void
@ -481,20 +475,13 @@ test_buffer_allocation_tracking(void *arg)
fetch_from_buf(junk, 4096, buf1); /* drop a 1k chunk... */
tt_int_op(buf_allocation(buf1), OP_EQ, 3*4096); /* now 3 4k chunks */
#ifdef ENABLE_BUF_FREELISTS
tt_int_op(buf_get_total_allocation(), OP_EQ, 16384); /* that chunk went onto
the freelist. */
#else
tt_int_op(buf_get_total_allocation(), OP_EQ, 12288); /* that chunk was really
freed. */
#endif
write_to_buf(junk, 4000, buf2);
tt_int_op(buf_allocation(buf2), OP_EQ, 4096); /* another 4k chunk. */
/*
* If we're using freelists, size stays at 16384 because we just pulled a
* chunk from the freelist. If we aren't, we bounce back up to 16384 by
* allocating a new chunk.
* We bounce back up to 16384 by allocating a new chunk.
*/
tt_int_op(buf_get_total_allocation(), OP_EQ, 16384);
write_to_buf(junk, 4000, buf2);
@ -512,17 +499,14 @@ test_buffer_allocation_tracking(void *arg)
buf2 = NULL;
tt_int_op(buf_get_total_allocation(), OP_LT, 4008000);
buf_shrink_freelists(1);
tt_int_op(buf_get_total_allocation(), OP_EQ, buf_allocation(buf1));
buf_free(buf1);
buf1 = NULL;
buf_shrink_freelists(1);
tt_int_op(buf_get_total_allocation(), OP_EQ, 0);
done:
buf_free(buf1);
buf_free(buf2);
buf_shrink_freelists(1);
tor_free(junk);
}

View File

@ -6,15 +6,11 @@
#include "orconfig.h"
#define COMPAT_PRIVATE
#define CONTROL_PRIVATE
#define MEMPOOL_PRIVATE
#define UTIL_PRIVATE
#include "or.h"
#include "config.h"
#include "control.h"
#include "test.h"
#ifdef ENABLE_MEMPOOLS
#include "mempool.h"
#endif /* ENABLE_MEMPOOLS */
#include "memarea.h"
#include "util_process.h"
@ -2644,69 +2640,6 @@ test_util_path_is_relative(void *arg)
;
}
#ifdef ENABLE_MEMPOOLS
/** Run unittests for memory pool allocator */
static void
test_util_mempool(void *arg)
{
mp_pool_t *pool = NULL;
smartlist_t *allocated = NULL;
int i;
(void)arg;
pool = mp_pool_new(1, 100);
tt_assert(pool);
tt_assert(pool->new_chunk_capacity >= 100);
tt_assert(pool->item_alloc_size >= sizeof(void*)+1);
mp_pool_destroy(pool);
pool = NULL;
pool = mp_pool_new(241, 2500);
tt_assert(pool);
tt_assert(pool->new_chunk_capacity >= 10);
tt_assert(pool->item_alloc_size >= sizeof(void*)+241);
tt_int_op(pool->item_alloc_size & 0x03,OP_EQ, 0);
tt_assert(pool->new_chunk_capacity < 60);
allocated = smartlist_new();
for (i = 0; i < 20000; ++i) {
if (smartlist_len(allocated) < 20 || crypto_rand_int(2)) {
void *m = mp_pool_get(pool);
memset(m, 0x09, 241);
smartlist_add(allocated, m);
//printf("%d: %p\n", i, m);
//mp_pool_assert_ok(pool);
} else {
int idx = crypto_rand_int(smartlist_len(allocated));
void *m = smartlist_get(allocated, idx);
//printf("%d: free %p\n", i, m);
smartlist_del(allocated, idx);
mp_pool_release(m);
//mp_pool_assert_ok(pool);
}
if (crypto_rand_int(777)==0)
mp_pool_clean(pool, 1, 1);
if (i % 777)
mp_pool_assert_ok(pool);
}
done:
if (allocated) {
SMARTLIST_FOREACH(allocated, void *, m, mp_pool_release(m));
mp_pool_assert_ok(pool);
mp_pool_clean(pool, 0, 0);
mp_pool_assert_ok(pool);
smartlist_free(allocated);
}
if (pool)
mp_pool_destroy(pool);
}
#endif /* ENABLE_MEMPOOLS */
/** Run unittests for memory area allocator */
static void
test_util_memarea(void *arg)