2011-01-03 17:50:39 +01:00
|
|
|
/* Copyright (c) 2008-2011, The Tor Project, Inc. */
|
2008-03-26 17:33:33 +01:00
|
|
|
/* See LICENSE for licensing information */
|
|
|
|
|
|
|
|
/** \file memarea.c
|
|
|
|
* \brief Implementation for memarea_t, an allocator for allocating lots of
|
|
|
|
* small objects that will be freed all at once.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "orconfig.h"
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include "memarea.h"
|
|
|
|
#include "util.h"
|
|
|
|
#include "compat.h"
|
2010-07-10 03:52:20 +02:00
|
|
|
#include "torlog.h"
|
2008-03-26 17:33:33 +01:00
|
|
|
|
2009-05-12 21:10:23 +02:00
|
|
|
/** If true, we try to detect any attempts to write beyond the length of a
|
|
|
|
* memarea. */
|
|
|
|
#define USE_SENTINELS
|
|
|
|
|
2008-03-26 17:33:33 +01:00
|
|
|
/** All returned pointers should be aligned to the nearest multiple of this
|
|
|
|
* value. */
|
|
|
|
#define MEMAREA_ALIGN SIZEOF_VOID_P
|
|
|
|
|
|
|
|
#if MEMAREA_ALIGN == 4
|
|
|
|
#define MEMAREA_ALIGN_MASK 3lu
|
|
|
|
#elif MEMAREA_ALIGN == 8
|
|
|
|
#define MEMAREA_ALIGN_MASK 7lu
|
|
|
|
#else
|
|
|
|
#error "void* is neither 4 nor 8 bytes long. I don't know how to align stuff."
|
|
|
|
#endif
|
|
|
|
|
2009-05-12 21:10:23 +02:00
|
|
|
#ifdef USE_SENTINELS
|
2011-03-16 22:05:37 +01:00
|
|
|
/** Magic value that we stick at the end of a memarea so we can make sure
|
|
|
|
* there are no run-off-the-end bugs. */
|
2009-05-12 21:10:23 +02:00
|
|
|
#define SENTINEL_VAL 0x90806622u
|
2011-03-16 22:05:37 +01:00
|
|
|
/** How many bytes per area do we devote to the sentinel? */
|
2009-05-12 21:10:23 +02:00
|
|
|
#define SENTINEL_LEN sizeof(uint32_t)
|
2011-03-16 22:05:37 +01:00
|
|
|
/** Given a mem_area_chunk_t with SENTINEL_LEN extra bytes allocated at the
|
|
|
|
* end, set those bytes. */
|
2009-05-12 21:10:23 +02:00
|
|
|
#define SET_SENTINEL(chunk) \
|
|
|
|
STMT_BEGIN \
|
|
|
|
set_uint32( &(chunk)->u.mem[chunk->mem_size], SENTINEL_VAL ); \
|
|
|
|
STMT_END
|
2011-03-16 22:05:37 +01:00
|
|
|
/** Assert that the sentinel on a memarea is set correctly. */
|
2009-05-12 21:10:23 +02:00
|
|
|
#define CHECK_SENTINEL(chunk) \
|
|
|
|
STMT_BEGIN \
|
|
|
|
uint32_t sent_val = get_uint32(&(chunk)->u.mem[chunk->mem_size]); \
|
|
|
|
tor_assert(sent_val == SENTINEL_VAL); \
|
|
|
|
STMT_END
|
|
|
|
#else
|
|
|
|
#define SENTINEL_LEN 0
|
|
|
|
#define SET_SENTINEL(chunk) STMT_NIL
|
|
|
|
#define CHECK_SENTINEL(chunk) STMT_NIL
|
|
|
|
#endif
|
|
|
|
|
2008-12-22 15:56:28 +01:00
|
|
|
/** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
|
2008-03-26 17:33:33 +01:00
|
|
|
static INLINE void *
|
|
|
|
realign_pointer(void *ptr)
|
|
|
|
{
|
|
|
|
uintptr_t x = (uintptr_t)ptr;
|
|
|
|
x = (x+MEMAREA_ALIGN_MASK) & ~MEMAREA_ALIGN_MASK;
|
2011-03-25 20:44:02 +01:00
|
|
|
/* Reinstate this if bug 930 ever reappears
|
|
|
|
tor_assert(((void*)x) >= ptr);
|
|
|
|
*/
|
2008-03-26 17:33:33 +01:00
|
|
|
return (void*)x;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Implements part of a memarea. New memory is carved off from chunk->mem in
|
|
|
|
* increasing order until a request is too big, at which point a new chunk is
|
|
|
|
* allocated. */
|
|
|
|
typedef struct memarea_chunk_t {
|
|
|
|
/** Next chunk in this area. Only kept around so we can free it. */
|
|
|
|
struct memarea_chunk_t *next_chunk;
|
|
|
|
size_t mem_size; /**< How much RAM is available in u.mem, total? */
|
|
|
|
char *next_mem; /**< Next position in u.mem to allocate data at. If it's
|
|
|
|
* greater than or equal to mem+mem_size, this chunk is
|
|
|
|
* full. */
|
|
|
|
union {
|
|
|
|
char mem[1]; /**< Memory space in this chunk. */
|
|
|
|
void *_void_for_alignment; /**< Dummy; used to make sure mem is aligned. */
|
|
|
|
} u;
|
|
|
|
} memarea_chunk_t;
|
|
|
|
|
2011-03-16 22:05:37 +01:00
|
|
|
/** How many bytes are needed for overhead before we get to the memory part
|
|
|
|
* of a chunk? */
|
2008-03-26 17:33:33 +01:00
|
|
|
#define CHUNK_HEADER_SIZE STRUCT_OFFSET(memarea_chunk_t, u)
|
|
|
|
|
2011-03-16 22:05:37 +01:00
|
|
|
/** What's the smallest that we'll allocate a chunk? */
|
2008-11-05 21:34:22 +01:00
|
|
|
#define CHUNK_SIZE 4096
|
2008-04-08 19:29:05 +02:00
|
|
|
|
2008-03-26 17:33:33 +01:00
|
|
|
/** A memarea_t is an allocation region for a set of small memory requests
|
|
|
|
* that will all be freed at once. */
|
|
|
|
struct memarea_t {
|
2008-11-05 21:34:22 +01:00
|
|
|
memarea_chunk_t *first; /**< Top of the chunk stack: never NULL. */
|
2008-03-26 17:33:33 +01:00
|
|
|
};
|
|
|
|
|
2008-12-22 20:14:08 +01:00
|
|
|
/** How many chunks will we put into the freelist before freeing them? */
|
2008-04-08 19:29:05 +02:00
|
|
|
#define MAX_FREELIST_LEN 4
|
2008-12-22 20:14:08 +01:00
|
|
|
/** The number of memarea chunks currently in our freelist. */
|
|
|
|
static int freelist_len=0;
|
|
|
|
/** A linked list of unused memory area chunks. Used to prevent us from
|
|
|
|
* spinning in malloc/free loops. */
|
2008-04-08 19:29:05 +02:00
|
|
|
static memarea_chunk_t *freelist = NULL;
|
|
|
|
|
2008-03-26 17:33:33 +01:00
|
|
|
/** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */
|
|
|
|
static memarea_chunk_t *
|
2008-04-08 19:50:03 +02:00
|
|
|
alloc_chunk(size_t sz, int freelist_ok)
|
2008-03-26 17:33:33 +01:00
|
|
|
{
|
2010-12-14 00:40:21 +01:00
|
|
|
tor_assert(sz < SIZE_T_CEILING);
|
2008-04-08 19:50:03 +02:00
|
|
|
if (freelist && freelist_ok) {
|
2008-04-08 19:29:05 +02:00
|
|
|
memarea_chunk_t *res = freelist;
|
|
|
|
freelist = res->next_chunk;
|
2008-04-08 19:50:03 +02:00
|
|
|
res->next_chunk = NULL;
|
2008-04-08 19:29:05 +02:00
|
|
|
--freelist_len;
|
2009-05-12 21:10:23 +02:00
|
|
|
CHECK_SENTINEL(res);
|
2008-04-08 19:29:05 +02:00
|
|
|
return res;
|
|
|
|
} else {
|
2008-04-08 19:50:03 +02:00
|
|
|
size_t chunk_size = freelist_ok ? CHUNK_SIZE : sz;
|
2009-05-12 21:10:23 +02:00
|
|
|
memarea_chunk_t *res;
|
|
|
|
chunk_size += SENTINEL_LEN;
|
|
|
|
res = tor_malloc_roundup(&chunk_size);
|
2008-04-08 19:29:05 +02:00
|
|
|
res->next_chunk = NULL;
|
2009-05-12 21:10:23 +02:00
|
|
|
res->mem_size = chunk_size - CHUNK_HEADER_SIZE - SENTINEL_LEN;
|
2008-04-08 19:29:05 +02:00
|
|
|
res->next_mem = res->u.mem;
|
2009-05-12 21:10:23 +02:00
|
|
|
tor_assert(res->next_mem+res->mem_size+SENTINEL_LEN ==
|
|
|
|
((char*)res)+chunk_size);
|
2009-03-18 16:12:56 +01:00
|
|
|
tor_assert(realign_pointer(res->next_mem) == res->next_mem);
|
2009-05-12 21:10:23 +02:00
|
|
|
SET_SENTINEL(res);
|
2008-04-08 19:29:05 +02:00
|
|
|
return res;
|
|
|
|
}
|
2008-03-26 17:33:33 +01:00
|
|
|
}
|
|
|
|
|
2008-12-22 18:53:04 +01:00
|
|
|
/** Release <b>chunk</b> from a memarea, either by adding it to the freelist
|
|
|
|
* or by freeing it if the freelist is already too big. */
|
2008-04-08 19:29:05 +02:00
|
|
|
static void
|
2009-09-28 16:37:01 +02:00
|
|
|
chunk_free_unchecked(memarea_chunk_t *chunk)
|
2008-04-08 19:29:05 +02:00
|
|
|
{
|
2009-05-12 21:10:23 +02:00
|
|
|
CHECK_SENTINEL(chunk);
|
2008-04-08 19:33:29 +02:00
|
|
|
if (freelist_len < MAX_FREELIST_LEN) {
|
2008-04-08 19:29:05 +02:00
|
|
|
++freelist_len;
|
|
|
|
chunk->next_chunk = freelist;
|
|
|
|
freelist = chunk;
|
2008-04-08 19:50:03 +02:00
|
|
|
chunk->next_mem = chunk->u.mem;
|
2008-04-08 19:29:05 +02:00
|
|
|
} else {
|
|
|
|
tor_free(chunk);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Allocate and return new memarea. */
|
2008-03-26 17:33:33 +01:00
|
|
|
memarea_t *
|
2008-11-05 21:34:22 +01:00
|
|
|
memarea_new(void)
|
2008-03-26 17:33:33 +01:00
|
|
|
{
|
|
|
|
memarea_t *head = tor_malloc(sizeof(memarea_t));
|
2008-11-05 21:34:22 +01:00
|
|
|
head->first = alloc_chunk(CHUNK_SIZE, 1);
|
2008-03-26 17:33:33 +01:00
|
|
|
return head;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Free <b>area</b>, invalidating all pointers returned from memarea_alloc()
|
|
|
|
* and friends for this area */
|
|
|
|
void
|
|
|
|
memarea_drop_all(memarea_t *area)
|
|
|
|
{
|
|
|
|
memarea_chunk_t *chunk, *next;
|
|
|
|
for (chunk = area->first; chunk; chunk = next) {
|
|
|
|
next = chunk->next_chunk;
|
2009-09-28 16:37:01 +02:00
|
|
|
chunk_free_unchecked(chunk);
|
2008-03-26 17:33:33 +01:00
|
|
|
}
|
|
|
|
area->first = NULL; /*fail fast on */
|
|
|
|
tor_free(area);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Forget about having allocated anything in <b>area</b>, and free some of
|
|
|
|
* the backing storage associated with it, as appropriate. Invalidates all
|
|
|
|
* pointers returned from memarea_alloc() for this area. */
|
|
|
|
void
|
|
|
|
memarea_clear(memarea_t *area)
|
|
|
|
{
|
|
|
|
memarea_chunk_t *chunk, *next;
|
|
|
|
if (area->first->next_chunk) {
|
|
|
|
for (chunk = area->first->next_chunk; chunk; chunk = next) {
|
|
|
|
next = chunk->next_chunk;
|
2009-09-28 16:37:01 +02:00
|
|
|
chunk_free_unchecked(chunk);
|
2008-03-26 17:33:33 +01:00
|
|
|
}
|
|
|
|
area->first->next_chunk = NULL;
|
|
|
|
}
|
|
|
|
area->first->next_mem = area->first->u.mem;
|
|
|
|
}
|
|
|
|
|
2008-12-17 23:58:20 +01:00
|
|
|
/** Remove all unused memarea chunks from the internal freelist. */
|
2008-04-08 19:29:05 +02:00
|
|
|
void
|
|
|
|
memarea_clear_freelist(void)
|
|
|
|
{
|
|
|
|
memarea_chunk_t *chunk, *next;
|
|
|
|
freelist_len = 0;
|
|
|
|
for (chunk = freelist; chunk; chunk = next) {
|
|
|
|
next = chunk->next_chunk;
|
|
|
|
tor_free(chunk);
|
|
|
|
}
|
|
|
|
freelist = NULL;
|
|
|
|
}
|
|
|
|
|
2008-03-26 17:33:33 +01:00
|
|
|
/** Return true iff <b>p</b> is in a range that has been returned by an
|
|
|
|
* allocation from <b>area</b>. */
|
|
|
|
int
|
|
|
|
memarea_owns_ptr(const memarea_t *area, const void *p)
|
|
|
|
{
|
|
|
|
memarea_chunk_t *chunk;
|
|
|
|
const char *ptr = p;
|
|
|
|
for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
|
|
|
|
if (ptr >= chunk->u.mem && ptr < chunk->next_mem)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b>
|
|
|
|
* bytes. <b>sz</b> should be significantly smaller than the area's chunk
|
|
|
|
* size, though we can deal if it isn't. */
|
|
|
|
void *
|
|
|
|
memarea_alloc(memarea_t *area, size_t sz)
|
|
|
|
{
|
|
|
|
memarea_chunk_t *chunk = area->first;
|
|
|
|
char *result;
|
|
|
|
tor_assert(chunk);
|
2009-05-12 21:10:23 +02:00
|
|
|
CHECK_SENTINEL(chunk);
|
2010-12-14 00:40:21 +01:00
|
|
|
tor_assert(sz < SIZE_T_CEILING);
|
2009-05-17 05:57:30 +02:00
|
|
|
if (sz == 0)
|
|
|
|
sz = 1;
|
2008-03-26 17:33:33 +01:00
|
|
|
if (chunk->next_mem+sz > chunk->u.mem+chunk->mem_size) {
|
2008-04-08 20:04:05 +02:00
|
|
|
if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) {
|
2008-03-26 17:33:33 +01:00
|
|
|
/* This allocation is too big. Stick it in a special chunk, and put
|
|
|
|
* that chunk second in the list. */
|
2008-04-08 19:50:03 +02:00
|
|
|
memarea_chunk_t *new_chunk = alloc_chunk(sz+CHUNK_HEADER_SIZE, 0);
|
2008-03-26 17:33:33 +01:00
|
|
|
new_chunk->next_chunk = chunk->next_chunk;
|
|
|
|
chunk->next_chunk = new_chunk;
|
|
|
|
chunk = new_chunk;
|
|
|
|
} else {
|
2008-11-05 21:34:22 +01:00
|
|
|
memarea_chunk_t *new_chunk = alloc_chunk(CHUNK_SIZE, 1);
|
2008-03-26 17:33:33 +01:00
|
|
|
new_chunk->next_chunk = chunk;
|
|
|
|
area->first = chunk = new_chunk;
|
|
|
|
}
|
|
|
|
tor_assert(chunk->mem_size >= sz);
|
|
|
|
}
|
|
|
|
result = chunk->next_mem;
|
2009-05-17 05:57:30 +02:00
|
|
|
chunk->next_mem = chunk->next_mem + sz;
|
2011-03-25 20:44:02 +01:00
|
|
|
/* Reinstate these if bug 930 ever comes back
|
2009-03-18 16:12:56 +01:00
|
|
|
tor_assert(chunk->next_mem >= chunk->u.mem);
|
|
|
|
tor_assert(chunk->next_mem <= chunk->u.mem+chunk->mem_size);
|
2011-03-25 20:44:02 +01:00
|
|
|
*/
|
2009-05-17 05:57:30 +02:00
|
|
|
chunk->next_mem = realign_pointer(chunk->next_mem);
|
2008-03-26 17:33:33 +01:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** As memarea_alloc(), but clears the memory it returns. */
|
|
|
|
void *
|
|
|
|
memarea_alloc_zero(memarea_t *area, size_t sz)
|
|
|
|
{
|
|
|
|
void *result = memarea_alloc(area, sz);
|
|
|
|
memset(result, 0, sz);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** As memdup, but returns the memory from <b>area</b>. */
|
|
|
|
void *
|
|
|
|
memarea_memdup(memarea_t *area, const void *s, size_t n)
|
|
|
|
{
|
|
|
|
char *result = memarea_alloc(area, n);
|
|
|
|
memcpy(result, s, n);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** As strdup, but returns the memory from <b>area</b>. */
|
|
|
|
char *
|
|
|
|
memarea_strdup(memarea_t *area, const char *s)
|
|
|
|
{
|
|
|
|
return memarea_memdup(area, s, strlen(s)+1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/** As strndup, but returns the memory from <b>area</b>. */
|
|
|
|
char *
|
|
|
|
memarea_strndup(memarea_t *area, const char *s, size_t n)
|
|
|
|
{
|
|
|
|
size_t ln;
|
|
|
|
char *result;
|
|
|
|
const char *cp, *end = s+n;
|
2010-12-14 00:40:21 +01:00
|
|
|
tor_assert(n < SIZE_T_CEILING);
|
2009-03-21 12:52:53 +01:00
|
|
|
for (cp = s; cp < end && *cp; ++cp)
|
2008-03-26 17:33:33 +01:00
|
|
|
;
|
|
|
|
/* cp now points to s+n, or to the 0 in the string. */
|
|
|
|
ln = cp-s;
|
2009-03-21 17:01:52 +01:00
|
|
|
result = memarea_alloc(area, ln+1);
|
|
|
|
memcpy(result, s, ln);
|
2008-03-26 17:33:33 +01:00
|
|
|
result[ln]='\0';
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2008-03-26 18:50:27 +01:00
|
|
|
/** Set <b>allocated_out</b> to the number of bytes allocated in <b>area</b>,
|
|
|
|
* and <b>used_out</b> to the number of bytes currently used. */
|
|
|
|
void
|
|
|
|
memarea_get_stats(memarea_t *area, size_t *allocated_out, size_t *used_out)
|
|
|
|
{
|
|
|
|
size_t a = 0, u = 0;
|
|
|
|
memarea_chunk_t *chunk;
|
|
|
|
for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
|
2009-05-12 21:10:23 +02:00
|
|
|
CHECK_SENTINEL(chunk);
|
2008-03-26 18:50:27 +01:00
|
|
|
a += CHUNK_HEADER_SIZE + chunk->mem_size;
|
|
|
|
tor_assert(chunk->next_mem >= chunk->u.mem);
|
|
|
|
u += CHUNK_HEADER_SIZE + (chunk->next_mem - chunk->u.mem);
|
|
|
|
}
|
|
|
|
*allocated_out = a;
|
|
|
|
*used_out = u;
|
|
|
|
}
|
|
|
|
|
2008-03-26 17:33:33 +01:00
|
|
|
/** Assert that <b>area</b> is okay. */
|
|
|
|
void
|
|
|
|
memarea_assert_ok(memarea_t *area)
|
|
|
|
{
|
|
|
|
memarea_chunk_t *chunk;
|
|
|
|
tor_assert(area->first);
|
|
|
|
|
|
|
|
for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
|
2009-05-12 21:10:23 +02:00
|
|
|
CHECK_SENTINEL(chunk);
|
2008-03-26 17:33:33 +01:00
|
|
|
tor_assert(chunk->next_mem >= chunk->u.mem);
|
2009-05-17 05:57:30 +02:00
|
|
|
tor_assert(chunk->next_mem <=
|
|
|
|
(char*) realign_pointer(chunk->u.mem+chunk->mem_size));
|
2008-03-26 17:33:33 +01:00
|
|
|
}
|
|
|
|
}
|
2008-03-26 17:56:37 +01:00
|
|
|
|