| 1 |
1325 |
phoenix |
/*
|
| 2 |
|
|
* libc/stdlib/malloc/malloc.c -- malloc function
|
| 3 |
|
|
*
|
| 4 |
|
|
* Copyright (C) 2002,03 NEC Electronics Corporation
|
| 5 |
|
|
* Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
|
| 6 |
|
|
*
|
| 7 |
|
|
* This file is subject to the terms and conditions of the GNU Lesser
|
| 8 |
|
|
* General Public License. See the file COPYING.LIB in the main
|
| 9 |
|
|
* directory of this archive for more details.
|
| 10 |
|
|
*
|
| 11 |
|
|
* Written by Miles Bader <miles@gnu.org>
|
| 12 |
|
|
*/
|
| 13 |
|
|
|
| 14 |
|
|
#include <stdlib.h>
|
| 15 |
|
|
#include <unistd.h>
|
| 16 |
|
|
#include <errno.h>
|
| 17 |
|
|
#include <sys/mman.h>
|
| 18 |
|
|
|
| 19 |
|
|
#include "malloc.h"
|
| 20 |
|
|
#include "heap.h"
|
| 21 |
|
|
|
| 22 |
|
|
|
| 23 |
|
|
/* The malloc heap. We provide a bit of initial static space so that
|
| 24 |
|
|
programs can do a little mallocing without mmaping in more space. */
|
| 25 |
|
|
HEAP_DECLARE_STATIC_FREE_AREA (initial_fa, 256);
|
| 26 |
|
|
struct heap __malloc_heap = HEAP_INIT_WITH_FA (initial_fa);
|
| 27 |
|
|
|
| 28 |
|
|
#if defined(MALLOC_USE_LOCKING) && defined(MALLOC_USE_SBRK)
|
| 29 |
|
|
/* A lock protecting our use of sbrk. */
|
| 30 |
|
|
malloc_mutex_t __malloc_sbrk_lock;
|
| 31 |
|
|
#endif /* MALLOC_USE_LOCKING && MALLOC_USE_SBRK */
|
| 32 |
|
|
|
| 33 |
|
|
|
| 34 |
|
|
#ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
|
| 35 |
|
|
/* A list of all malloc_mmb structures describing blocsk that
|
| 36 |
|
|
malloc has mmapped, ordered by the block address. */
|
| 37 |
|
|
struct malloc_mmb *__malloc_mmapped_blocks = 0;
|
| 38 |
|
|
|
| 39 |
|
|
/* A heap used for allocating malloc_mmb structures. We could allocate
|
| 40 |
|
|
them from the main heap, but that tends to cause heap fragmentation in
|
| 41 |
|
|
annoying ways. */
|
| 42 |
|
|
HEAP_DECLARE_STATIC_FREE_AREA (initial_mmb_fa, 48); /* enough for 3 mmbs */
|
| 43 |
|
|
struct heap __malloc_mmb_heap = HEAP_INIT_WITH_FA (initial_mmb_fa);
|
| 44 |
|
|
#endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
|
| 45 |
|
|
|
| 46 |
|
|
|
| 47 |
|
|
static void *
|
| 48 |
|
|
malloc_from_heap (size_t size, struct heap *heap)
|
| 49 |
|
|
{
|
| 50 |
|
|
void *mem;
|
| 51 |
|
|
|
| 52 |
|
|
MALLOC_DEBUG (1, "malloc: %d bytes", size);
|
| 53 |
|
|
|
| 54 |
|
|
/* Include extra space to record the size of the allocated block. */
|
| 55 |
|
|
size += MALLOC_HEADER_SIZE;
|
| 56 |
|
|
|
| 57 |
|
|
__heap_lock (heap);
|
| 58 |
|
|
|
| 59 |
|
|
/* First try to get memory that's already in our heap. */
|
| 60 |
|
|
mem = __heap_alloc (heap, &size);
|
| 61 |
|
|
|
| 62 |
|
|
__heap_unlock (heap);
|
| 63 |
|
|
|
| 64 |
|
|
if (unlikely (! mem))
|
| 65 |
|
|
/* We couldn't allocate from the heap, so grab some more
|
| 66 |
|
|
from the system, add it to the heap, and try again. */
|
| 67 |
|
|
{
|
| 68 |
|
|
/* If we're trying to allocate a block bigger than the default
|
| 69 |
|
|
MALLOC_HEAP_EXTEND_SIZE, make sure we get enough to hold it. */
|
| 70 |
|
|
void *block;
|
| 71 |
|
|
size_t block_size
|
| 72 |
|
|
= (size < MALLOC_HEAP_EXTEND_SIZE
|
| 73 |
|
|
? MALLOC_HEAP_EXTEND_SIZE
|
| 74 |
|
|
: MALLOC_ROUND_UP_TO_PAGE_SIZE (size));
|
| 75 |
|
|
|
| 76 |
|
|
/* Allocate the new heap block. */
|
| 77 |
|
|
#ifdef MALLOC_USE_SBRK
|
| 78 |
|
|
|
| 79 |
|
|
__malloc_lock_sbrk ();
|
| 80 |
|
|
|
| 81 |
|
|
/* Use sbrk we can, as it's faster than mmap, and guarantees
|
| 82 |
|
|
contiguous allocation. */
|
| 83 |
|
|
block = sbrk (block_size);
|
| 84 |
|
|
if (likely (block != (void *)-1))
|
| 85 |
|
|
{
|
| 86 |
|
|
/* Because sbrk can return results of arbitrary
|
| 87 |
|
|
alignment, align the result to a MALLOC_ALIGNMENT boundary. */
|
| 88 |
|
|
long aligned_block = MALLOC_ROUND_UP ((long)block, MALLOC_ALIGNMENT);
|
| 89 |
|
|
if (block != (void *)aligned_block)
|
| 90 |
|
|
/* Have to adjust. We should only have to actually do this
|
| 91 |
|
|
the first time (after which we will have aligned the brk
|
| 92 |
|
|
correctly). */
|
| 93 |
|
|
{
|
| 94 |
|
|
/* Move the brk to reflect the alignment; our next allocation
|
| 95 |
|
|
should start on exactly the right alignment. */
|
| 96 |
|
|
sbrk (aligned_block - (long)block);
|
| 97 |
|
|
block = (void *)aligned_block;
|
| 98 |
|
|
}
|
| 99 |
|
|
}
|
| 100 |
|
|
|
| 101 |
|
|
__malloc_unlock_sbrk ();
|
| 102 |
|
|
|
| 103 |
|
|
#else /* !MALLOC_USE_SBRK */
|
| 104 |
|
|
|
| 105 |
|
|
/* Otherwise, use mmap. */
|
| 106 |
|
|
block = mmap (0, block_size, PROT_READ | PROT_WRITE,
|
| 107 |
|
|
MAP_SHARED | MAP_ANONYMOUS, 0, 0);
|
| 108 |
|
|
|
| 109 |
|
|
#endif /* MALLOC_USE_SBRK */
|
| 110 |
|
|
|
| 111 |
|
|
if (likely (block != (void *)-1))
|
| 112 |
|
|
{
|
| 113 |
|
|
#if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
|
| 114 |
|
|
struct malloc_mmb *mmb, *prev_mmb, *new_mmb;
|
| 115 |
|
|
#endif
|
| 116 |
|
|
|
| 117 |
|
|
MALLOC_DEBUG (1, "adding system memroy to heap: 0x%lx - 0x%lx (%d bytes)",
|
| 118 |
|
|
(long)block, (long)block + block_size, block_size);
|
| 119 |
|
|
|
| 120 |
|
|
/* Get back the heap lock. */
|
| 121 |
|
|
__heap_lock (heap);
|
| 122 |
|
|
|
| 123 |
|
|
/* Put BLOCK into the heap. */
|
| 124 |
|
|
__heap_free (heap, block, block_size);
|
| 125 |
|
|
|
| 126 |
|
|
MALLOC_DEBUG_INDENT (-1);
|
| 127 |
|
|
|
| 128 |
|
|
/* Try again to allocate. */
|
| 129 |
|
|
mem = __heap_alloc (heap, &size);
|
| 130 |
|
|
|
| 131 |
|
|
__heap_unlock (heap);
|
| 132 |
|
|
|
| 133 |
|
|
#if !defined(MALLOC_USE_SBRK) && defined(__UCLIBC_UCLINUX_BROKEN_MUNMAP__)
|
| 134 |
|
|
/* Insert a record of BLOCK in sorted order into the
|
| 135 |
|
|
__malloc_mmapped_blocks list. */
|
| 136 |
|
|
|
| 137 |
|
|
for (prev_mmb = 0, mmb = __malloc_mmapped_blocks;
|
| 138 |
|
|
mmb;
|
| 139 |
|
|
prev_mmb = mmb, mmb = mmb->next)
|
| 140 |
|
|
if (block < mmb->mem)
|
| 141 |
|
|
break;
|
| 142 |
|
|
|
| 143 |
|
|
new_mmb = malloc_from_heap (sizeof *new_mmb, &__malloc_mmb_heap);
|
| 144 |
|
|
new_mmb->next = mmb;
|
| 145 |
|
|
new_mmb->mem = block;
|
| 146 |
|
|
new_mmb->size = block_size;
|
| 147 |
|
|
|
| 148 |
|
|
if (prev_mmb)
|
| 149 |
|
|
prev_mmb->next = new_mmb;
|
| 150 |
|
|
else
|
| 151 |
|
|
__malloc_mmapped_blocks = new_mmb;
|
| 152 |
|
|
|
| 153 |
|
|
MALLOC_MMB_DEBUG (0, "new mmb at 0x%x: 0x%x[%d]",
|
| 154 |
|
|
(unsigned)new_mmb,
|
| 155 |
|
|
(unsigned)new_mmb->mem, block_size);
|
| 156 |
|
|
#endif /* !MALLOC_USE_SBRK && __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
|
| 157 |
|
|
}
|
| 158 |
|
|
}
|
| 159 |
|
|
|
| 160 |
|
|
if (likely (mem))
|
| 161 |
|
|
/* Record the size of the block and get the user address. */
|
| 162 |
|
|
{
|
| 163 |
|
|
mem = MALLOC_SETUP (mem, size);
|
| 164 |
|
|
|
| 165 |
|
|
MALLOC_DEBUG (-1, "malloc: returning 0x%lx (base:0x%lx, total_size:%ld)",
|
| 166 |
|
|
(long)mem, (long)MALLOC_BASE(mem), (long)MALLOC_SIZE(mem));
|
| 167 |
|
|
}
|
| 168 |
|
|
else
|
| 169 |
|
|
MALLOC_DEBUG (-1, "malloc: returning 0");
|
| 170 |
|
|
|
| 171 |
|
|
return mem;
|
| 172 |
|
|
}
|
| 173 |
|
|
|
| 174 |
|
|
void *
|
| 175 |
|
|
malloc (size_t size)
|
| 176 |
|
|
{
|
| 177 |
|
|
void *mem;
|
| 178 |
|
|
#ifdef MALLOC_DEBUGGING
|
| 179 |
|
|
static int debugging_initialized = 0;
|
| 180 |
|
|
if (! debugging_initialized)
|
| 181 |
|
|
{
|
| 182 |
|
|
debugging_initialized = 1;
|
| 183 |
|
|
__malloc_debug_init ();
|
| 184 |
|
|
}
|
| 185 |
|
|
if (__malloc_check)
|
| 186 |
|
|
__heap_check (&__malloc_heap, "malloc");
|
| 187 |
|
|
#endif
|
| 188 |
|
|
|
| 189 |
|
|
#ifdef __MALLOC_GLIBC_COMPAT__
|
| 190 |
|
|
if (unlikely (size == 0))
|
| 191 |
|
|
size++;
|
| 192 |
|
|
#else
|
| 193 |
|
|
/* Some programs will call malloc (0). Lets be strict and return NULL */
|
| 194 |
|
|
if (unlikely (size == 0))
|
| 195 |
|
|
return 0;
|
| 196 |
|
|
#endif
|
| 197 |
|
|
|
| 198 |
|
|
/* Check if they are doing something dumb like malloc(-1) */
|
| 199 |
|
|
if (unlikely(((unsigned long)size > (unsigned long)(MALLOC_HEADER_SIZE*-2))))
|
| 200 |
|
|
goto oom;
|
| 201 |
|
|
|
| 202 |
|
|
mem = malloc_from_heap (size, &__malloc_heap);
|
| 203 |
|
|
if (unlikely (!mem))
|
| 204 |
|
|
{
|
| 205 |
|
|
oom:
|
| 206 |
|
|
__set_errno (ENOMEM);
|
| 207 |
|
|
return 0;
|
| 208 |
|
|
}
|
| 209 |
|
|
|
| 210 |
|
|
return mem;
|
| 211 |
|
|
}
|