]> Zhao Yanbai Git Server - minix.git/commitdiff
Add MALLOC_DEBUG env var to debug applications using malloc
authorErik van der Kouwe <erik@minix3.org>
Fri, 20 Aug 2010 19:16:31 +0000 (19:16 +0000)
committerErik van der Kouwe <erik@minix3.org>
Fri, 20 Aug 2010 19:16:31 +0000 (19:16 +0000)
lib/libc/ansi/Makefile.inc
lib/libc/ansi/malloc-debug.c [new file with mode: 0644]
lib/libc/ansi/malloc-debug.h [new file with mode: 0644]
lib/libc/ansi/malloc.c
man/man3/malloc.3

index 4e6e7aa16913e8a86cb091ca4232802ebbad2c37..a0af2539c15ad4e5bbad602ce3d5d1b2a092c1b8 100644 (file)
@@ -36,6 +36,7 @@ SRCS+=  \
        ldiv.c \
        localeconv.c \
        malloc.c \
+       malloc-debug.c \
        mblen.c \
        mbstowcs.c \
        mbtowc.c \
diff --git a/lib/libc/ansi/malloc-debug.c b/lib/libc/ansi/malloc-debug.c
new file mode 100644 (file)
index 0000000..ae0420d
--- /dev/null
@@ -0,0 +1,245 @@
+/* pointless without assertions */
+#ifdef NDEBUG
+#undef NDEBUG
+#endif
+
+#include <assert.h>
+#include <machine/vm.h>
+#include <minix/minlib.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+
+#include "malloc-debug.h"
+
+#if 0
+#include <stdio.h>
+static int reenter; 
+#define LOG(args) if (!reenter) { reenter++; printf args; reenter--; }
+#else
+#define LOG(args)
+#endif
+
+#define PAGE_SIZE I386_PAGE_SIZE
+
+struct block {
+       size_t size;
+       unsigned magic;
+};
+
+static u8_t *ptr_min, *ptr_max;
+
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+
+static unsigned long page_round_down(unsigned long x)
+{
+       return x - x % PAGE_SIZE;
+}
+
+static unsigned long page_round_up(unsigned long x)
+{
+       unsigned long rem;
+       
+       rem = x % PAGE_SIZE;
+       if (rem)
+               x += PAGE_SIZE - rem;
+               
+       return x;
+}
+
+#define page_round_down_ptr(x) ((u8_t *) page_round_down((unsigned long) (x)))
+#define page_round_up_ptr(x) ((u8_t *) page_round_up((unsigned long) (x)))
+
+static unsigned long block_compute_magic(struct block *block)
+{
+       return (unsigned long) block + block->size + 0xDEADBEEFUL;
+}
+
+static size_t block_get_totalsize(size_t size)
+{
+       return page_round_up(sizeof(struct block) + size);
+}
+
+static u8_t *block_get_endptr(struct block *block)
+{
+       return (u8_t *) block + block_get_totalsize(block->size);
+}
+
+static u8_t *block_get_dataptr(struct block *block)
+{
+       return block_get_endptr(block) - block->size;
+}
+
+static void block_check(struct block *block) 
+{
+       u8_t *dataptr, *p;
+
+       /* check location */
+       assert(block);
+       assert(!((unsigned long) block % PAGE_SIZE));
+       assert((u8_t *) block >= ptr_min);
+       assert((u8_t *) block <= ptr_max);
+       
+       /* check size */
+       assert(block->size > 0);
+       
+       /* check fillers */
+       assert(block->magic == block_compute_magic(block));
+       dataptr = block_get_dataptr(block);
+       for (p = (u8_t *) (block + 1); p < dataptr; p++)
+               assert(*p == ((unsigned long) p & 0xff));
+}
+
+static struct block *block_alloc(size_t size) 
+{
+       struct block *block;
+       u8_t *dataptr, *p, *ptr;
+       unsigned page_index, page_index_max;
+       size_t sizerem, totalsize;
+       u64_t tsc;
+
+       LOG(("block_alloc; size=0x%x\n", size));
+       assert(size > 0);
+       
+       /* round size up to machine word size */
+       sizerem = size % sizeof(long);
+       if (sizerem)
+               size += sizeof(long) - sizerem;
+
+       /* initialize address range */
+       if (!ptr_min && !ptr_max) {
+               /* keep a safe distance from areas that are in use:
+                * - 4MB from the break (should not change if traditional
+                *   malloc is not used so a small margin is sufficient
+                * - 256MB from the stack (big margin because memory beyond
+                *   this may be allocated by mmap when the address space 
+                *   starts to fill up)
+                */
+               ptr_min = page_round_up_ptr((u8_t *) sbrk(0) + 0x400000);
+               ptr_max = page_round_down_ptr((u8_t *) &size - 0x10000000);
+       }
+       assert(ptr_min);
+       assert(ptr_max);
+       assert(ptr_min < ptr_max);
+
+       /* select address at random */
+       read_tsc_64(&tsc);
+       totalsize = block_get_totalsize(size);
+       page_index_max = (ptr_max - ptr_min - totalsize) / PAGE_SIZE;
+       page_index = rem64u(tsc, page_index_max);
+       ptr = ptr_min + page_index * PAGE_SIZE;
+       
+       /* allocate block */
+       block = (struct block *) mmap(
+               ptr,                            /* addr */
+               totalsize,                      /* len */ 
+               PROT_READ|PROT_WRITE,           /* prot */
+               MAP_PREALLOC,                   /* flags */
+               -1,                             /* fd */
+               0);                             /* offset */
+       if (block == MAP_FAILED) {
+               /* mmap call failed */
+               abort();
+       }
+
+       /* block may not be at the requested location if that is in use */
+       if (ptr_min < (u8_t *) block)
+               ptr_min = (u8_t *) block;
+
+       if (ptr_max < (u8_t *) block)
+               ptr_max = (u8_t *) block;
+
+       /* initialize block, including fillers */
+       block->size = size;
+       block->magic = block_compute_magic(block);
+       dataptr = block_get_dataptr(block);
+       for (p = (u8_t *) (block + 1); p < dataptr; p++)
+               *p = ((unsigned long) p & 0xff);
+               
+       LOG(("block_alloc; block=0x%x\n", block));
+       return block;
+}
+
+static struct block *block_find(const void *ptr) 
+{
+       struct block *block;
+
+       LOG(("block_find; ptr=0x%x\n", ptr));
+       assert(ptr);
+
+       /* locate block based on pointer, then check whether it is valid */
+       block = (struct block *) page_round_down(
+               (unsigned long) ((struct block *) ptr - 1));
+       block_check(block);
+       LOG(("block_find; block=0x%x\n", block));
+       return block;
+}
+
+static void block_free(struct block *block) 
+{
+       LOG(("block_free; block=0x%x\n", block));
+       assert(block);
+
+       /* simply unmap the block */
+       if (munmap(block, block_get_totalsize(block->size)) < 0) {
+               /* munmap call failed */
+               abort();
+       }
+}
+
+void *_dbg_malloc(size_t size)
+{
+       struct block *newblock;
+       u8_t *ptr;
+       
+       LOG(("_dbg_malloc; size=0x%x\n", size));
+       assert(size > 0); /* enforced by regular malloc */
+
+       newblock = block_alloc(size);
+       if (!newblock)
+               return NULL;
+               
+       ptr = block_get_dataptr(newblock);
+       LOG(("_dbg_malloc; ptr=0x%x\n", ptr));
+       return ptr;
+}
+
+void *_dbg_realloc(void *oldp, size_t size)
+{
+       u8_t *newp;
+       struct block *oldblock, *newblock;
+       
+       LOG(("_dbg_realloc; oldp=0x%x; size=0x%x\n", oldp, size));
+       assert(oldp); /* enforced by regular realloc */
+       assert(size > 0); /* enforced by regular realloc */
+
+       /* always allocate new block */
+       newblock = block_alloc(size);
+       if (!newblock)
+               return NULL;
+
+       /* copy the data */
+       oldblock = block_find(oldp);
+       memcpy(block_get_dataptr(newblock), 
+               block_get_dataptr(oldblock), 
+               MIN(newblock->size, oldblock->size));
+               
+       /* deallocate old block */
+       block_free(oldblock);
+       
+       newp = block_get_dataptr(newblock);
+       LOG(("_dbg_realloc; newp=0x%x\n", newp));
+       return newp;
+}
+
+void _dbg_free(void *ptr)
+{
+       LOG(("_dbg_free; ptr=0x%x\n", ptr));
+       assert(ptr); /* enforced by regular free */
+
+       /* find the block and free it */
+       block_free(block_find(ptr));
+
+       LOG(("_dbg_free done\n"));
+}
+
diff --git a/lib/libc/ansi/malloc-debug.h b/lib/libc/ansi/malloc-debug.h
new file mode 100644 (file)
index 0000000..47118da
--- /dev/null
@@ -0,0 +1,8 @@
+#include <minix/u64.h>
+#include <sys/types.h>
+
+/* malloc-debug.c */
+void *_dbg_malloc(size_t size);
+void *_dbg_realloc(void *oldp, size_t size);
+void _dbg_free(void *ptr);
+
index f4644f1c269020ade924965ad3134c2d3e9a4e28..32922bd685e8c704f69d6a358a2c9e05b69637ee 100644 (file)
 #include       <errno.h>
 #include       <assert.h>
 
+#include       "malloc-debug.h"
+
+static int no_debug = -1;
+#define CHECK_DBG(statement)                                           \
+  if (no_debug <= 0) {                                                 \
+       if (no_debug < 0) no_debug = getenv("MALLOC_DEBUG") ? 0 : 1;    \
+       if (no_debug == 0) { statement; }                               \
+  }
+
 #if _EM_WSIZE == _EM_PSIZE
 #define        ptrint          int
 #else
@@ -83,6 +92,8 @@ malloc(const size_t size)
   if (size == 0)
        return NULL;
 
+  CHECK_DBG(return _dbg_malloc(size));
+
   for (ntries = 0; ntries < 2; ntries++) {
        unsigned len = Align(size, PTRSIZE) + PTRSIZE;
        if (len < 2 * PTRSIZE) {
@@ -140,6 +151,9 @@ realloc(void *oldp, size_t size)
        free(old);
        return NULL;
   }
+
+  CHECK_DBG(return _dbg_realloc(oldp, size));
+
   len = Align(size, PTRSIZE) + PTRSIZE;
   next = NextSlot(old);
   n = (int)(next - old);                       /* old length */
@@ -188,6 +202,8 @@ free(void *ptr)
   if (p == 0)
        return;
 
+  CHECK_DBG(_dbg_free(ptr); return);
+
 #ifdef SLOWDEBUG
   {
        int found;
index efbe0e4170151a529e9f761d450480ad9add64e2..81e11931abacaee6f61a3f0b85f629b492a6e27a 100644 (file)
@@ -86,6 +86,17 @@ return.
 Each of the allocation routines returns a pointer
 to space suitably aligned (after possible pointer coercion)
 for storage of any type of object.
+.PP
+To debug malloc-related errors, specify the
+.I MALLOC_DEBUG
+variable in the environment of the program you want to debug. This causes an
+alternate malloc implementation to be used. This version allocates blocks at
+the end of random pages so that reads and writes past the end of the buffer
+cause SIGSEGV. On realloc or free calls, the area just before the buffer is
+verified to also detect writes before the start of the buffer. Buffer overflows
+in the BSS section are also more likely to be detected because the brk is never
+moved. Please note that this flags comes with a considerable performance 
+penalty and dramatically increases memory usage.
 .SH SEE ALSO
 .BR brk (2).
 .SH DIAGNOSTICS