commit 204f7255f4c06fe12d99e785b91ea2fb18047018 Author: Nick Mathewson nickm@torproject.org Date: Tue Mar 18 10:02:27 2014 -0400
memarea.c: use flexible array member for mem
This make clang's memory sanitizer happier that we aren't reading off the end of a char[1]. We hadn't replaced the char[1] with a char[FLEXIBLE_ARRAY_MEMBER] before because we were doing a union trick to force alignment. Now we use __attribute__(aligned) where available, and we do the union trick elsewhere.
Most of this patch is just replacing accesses to (x)->u.mem with (x)->U_MEM, where U_MEM is defined as "u.mem" or "mem" depending on our implementation. --- src/common/memarea.c | 43 +++++++++++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 16 deletions(-)
diff --git a/src/common/memarea.c b/src/common/memarea.c index 0ae0ccc..e2d07fc 100644 --- a/src/common/memarea.c +++ b/src/common/memarea.c @@ -29,6 +29,13 @@ #error "void* is neither 4 nor 8 bytes long. I don't know how to align stuff." #endif
+#if defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER) +#define USE_ALIGNED_ATTRIBUTE +#define U_MEM mem +#else +#define U_MEM u.mem +#endif + #ifdef USE_SENTINELS /** Magic value that we stick at the end of a memarea so we can make sure * there are no run-off-the-end bugs. */ @@ -39,12 +46,12 @@ * end, set those bytes. */ #define SET_SENTINEL(chunk) \ STMT_BEGIN \ - set_uint32( &(chunk)->u.mem[chunk->mem_size], SENTINEL_VAL ); \ + set_uint32( &(chunk)->U_MEM[chunk->mem_size], SENTINEL_VAL ); \ STMT_END /** Assert that the sentinel on a memarea is set correctly. */ #define CHECK_SENTINEL(chunk) \ STMT_BEGIN \ - uint32_t sent_val = get_uint32(&(chunk)->u.mem[chunk->mem_size]); \ + uint32_t sent_val = get_uint32(&(chunk)->U_MEM[chunk->mem_size]); \ tor_assert(sent_val == SENTINEL_VAL); \ STMT_END #else @@ -71,19 +78,23 @@ realign_pointer(void *ptr) typedef struct memarea_chunk_t { /** Next chunk in this area. Only kept around so we can free it. */ struct memarea_chunk_t *next_chunk; - size_t mem_size; /**< How much RAM is available in u.mem, total? */ - char *next_mem; /**< Next position in u.mem to allocate data at. If it's + size_t mem_size; /**< How much RAM is available in mem, total? */ + char *next_mem; /**< Next position in mem to allocate data at. If it's * greater than or equal to mem+mem_size, this chunk is * full. */ +#ifdef USE_ALIGNED_ATTRIBUTE + char mem[FLEXIBLE_ARRAY_MEMBER] __attribute__((aligned(MEMAREA_ALIGN))); +#else union { char mem[1]; /**< Memory space in this chunk. */ void *void_for_alignment_; /**< Dummy; used to make sure mem is aligned. */ } u; +#endif } memarea_chunk_t;
/** How many bytes are needed for overhead before we get to the memory part * of a chunk? */ -#define CHUNK_HEADER_SIZE STRUCT_OFFSET(memarea_chunk_t, u) +#define CHUNK_HEADER_SIZE STRUCT_OFFSET(memarea_chunk_t, U_MEM)
/** What's the smallest that we'll allocate a chunk? */ #define CHUNK_SIZE 4096 @@ -121,7 +132,7 @@ alloc_chunk(size_t sz, int freelist_ok) res = tor_malloc(chunk_size); res->next_chunk = NULL; res->mem_size = chunk_size - CHUNK_HEADER_SIZE - SENTINEL_LEN; - res->next_mem = res->u.mem; + res->next_mem = res->U_MEM; tor_assert(res->next_mem+res->mem_size+SENTINEL_LEN == ((char*)res)+chunk_size); tor_assert(realign_pointer(res->next_mem) == res->next_mem); @@ -140,7 +151,7 @@ chunk_free_unchecked(memarea_chunk_t *chunk) ++freelist_len; chunk->next_chunk = freelist; freelist = chunk; - chunk->next_mem = chunk->u.mem; + chunk->next_mem = chunk->U_MEM; } else { tor_free(chunk); } @@ -183,7 +194,7 @@ memarea_clear(memarea_t *area) } area->first->next_chunk = NULL; } - area->first->next_mem = area->first->u.mem; + area->first->next_mem = area->first->U_MEM; }
/** Remove all unused memarea chunks from the internal freelist. */ @@ -207,7 +218,7 @@ memarea_owns_ptr(const memarea_t *area, const void *p) memarea_chunk_t *chunk; const char *ptr = p; for (chunk = area->first; chunk; chunk = chunk->next_chunk) { - if (ptr >= chunk->u.mem && ptr < chunk->next_mem) + if (ptr >= chunk->U_MEM && ptr < chunk->next_mem) return 1; } return 0; @@ -226,7 +237,7 @@ memarea_alloc(memarea_t *area, size_t sz) tor_assert(sz < SIZE_T_CEILING); if (sz == 0) sz = 1; - if (chunk->next_mem+sz > chunk->u.mem+chunk->mem_size) { + if (chunk->next_mem+sz > chunk->U_MEM+chunk->mem_size) { if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) { /* This allocation is too big. Stick it in a special chunk, and put * that chunk second in the list. */ @@ -244,8 +255,8 @@ memarea_alloc(memarea_t *area, size_t sz) result = chunk->next_mem; chunk->next_mem = chunk->next_mem + sz; /* Reinstate these if bug 930 ever comes back - tor_assert(chunk->next_mem >= chunk->u.mem); - tor_assert(chunk->next_mem <= chunk->u.mem+chunk->mem_size); + tor_assert(chunk->next_mem >= chunk->U_MEM); + tor_assert(chunk->next_mem <= chunk->U_MEM+chunk->mem_size); */ chunk->next_mem = realign_pointer(chunk->next_mem); return result; @@ -304,8 +315,8 @@ memarea_get_stats(memarea_t *area, size_t *allocated_out, size_t *used_out) for (chunk = area->first; chunk; chunk = chunk->next_chunk) { CHECK_SENTINEL(chunk); a += CHUNK_HEADER_SIZE + chunk->mem_size; - tor_assert(chunk->next_mem >= chunk->u.mem); - u += CHUNK_HEADER_SIZE + (chunk->next_mem - chunk->u.mem); + tor_assert(chunk->next_mem >= chunk->U_MEM); + u += CHUNK_HEADER_SIZE + (chunk->next_mem - chunk->U_MEM); } *allocated_out = a; *used_out = u; @@ -320,9 +331,9 @@ memarea_assert_ok(memarea_t *area)
for (chunk = area->first; chunk; chunk = chunk->next_chunk) { CHECK_SENTINEL(chunk); - tor_assert(chunk->next_mem >= chunk->u.mem); + tor_assert(chunk->next_mem >= chunk->U_MEM); tor_assert(chunk->next_mem <= - (char*) realign_pointer(chunk->u.mem+chunk->mem_size)); + (char*) realign_pointer(chunk->U_MEM+chunk->mem_size)); } }