1 |
207 |
jeremybenn |
/* Malloc implementation for multiple threads without lock contention.
|
2 |
|
|
Copyright (C) 1996-2001, 2002 Free Software Foundation, Inc.
|
3 |
|
|
This file is part of the GNU C Library.
|
4 |
|
|
Contributed by Wolfram Gloger <wmglo@dent.med.uni-muenchen.de>
|
5 |
|
|
and Doug Lea <dl@cs.oswego.edu>, 1996.
|
6 |
|
|
|
7 |
|
|
The GNU C Library is free software; you can redistribute it and/or
|
8 |
|
|
modify it under the terms of the GNU Lesser General Public
|
9 |
|
|
License as published by the Free Software Foundation; either
|
10 |
|
|
version 2.1 of the License, or (at your option) any later version.
|
11 |
|
|
|
12 |
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
13 |
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14 |
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
15 |
|
|
Lesser General Public License for more details.
|
16 |
|
|
|
17 |
|
|
You should have received a copy of the GNU Lesser General Public
|
18 |
|
|
License along with the GNU C Library; if not, write to the Free
|
19 |
|
|
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
|
20 |
|
|
02111-1307 USA. */
|
21 |
|
|
|
22 |
|
|
/* $Id: malloc.c 207 2010-07-19 17:43:24Z jeremybennett $
|
23 |
|
|
|
24 |
|
|
This work is mainly derived from malloc-2.6.4 by Doug Lea
|
25 |
|
|
<dl@cs.oswego.edu>, which is available from:
|
26 |
|
|
|
27 |
|
|
ftp://g.oswego.edu/pub/misc/malloc.c
|
28 |
|
|
|
29 |
|
|
Most of the original comments are reproduced in the code below.
|
30 |
|
|
|
31 |
|
|
* Why use this malloc?
|
32 |
|
|
|
33 |
|
|
This is not the fastest, most space-conserving, most portable, or
|
34 |
|
|
most tunable malloc ever written. However it is among the fastest
|
35 |
|
|
while also being among the most space-conserving, portable and tunable.
|
36 |
|
|
Consistent balance across these factors results in a good general-purpose
|
37 |
|
|
allocator. For a high-level description, see
|
38 |
|
|
http://g.oswego.edu/dl/html/malloc.html
|
39 |
|
|
|
40 |
|
|
On many systems, the standard malloc implementation is by itself not
|
41 |
|
|
thread-safe, and therefore wrapped with a single global lock around
|
42 |
|
|
all malloc-related functions. In some applications, especially with
|
43 |
|
|
multiple available processors, this can lead to contention problems
|
44 |
|
|
and bad performance. This malloc version was designed with the goal
|
45 |
|
|
to avoid waiting for locks as much as possible. Statistics indicate
|
46 |
|
|
that this goal is achieved in many cases.
|
47 |
|
|
|
48 |
|
|
* Synopsis of public routines
|
49 |
|
|
|
50 |
|
|
(Much fuller descriptions are contained in the program documentation below.)
|
51 |
|
|
|
52 |
|
|
ptmalloc_init();
|
53 |
|
|
Initialize global configuration. When compiled for multiple threads,
|
54 |
|
|
this function must be called once before any other function in the
|
55 |
|
|
package. It is not required otherwise. It is called automatically
|
56 |
|
|
in the Linux/GNU C libray or when compiling with MALLOC_HOOKS.
|
57 |
|
|
malloc(size_t n);
|
58 |
|
|
Return a pointer to a newly allocated chunk of at least n bytes, or null
|
59 |
|
|
if no space is available.
|
60 |
|
|
free(Void_t* p);
|
61 |
|
|
Release the chunk of memory pointed to by p, or no effect if p is null.
|
62 |
|
|
realloc(Void_t* p, size_t n);
|
63 |
|
|
Return a pointer to a chunk of size n that contains the same data
|
64 |
|
|
as does chunk p up to the minimum of (n, p's size) bytes, or null
|
65 |
|
|
if no space is available. The returned pointer may or may not be
|
66 |
|
|
the same as p. If p is null, equivalent to malloc. Unless the
|
67 |
|
|
#define REALLOC_ZERO_BYTES_FREES below is set, realloc with a
|
68 |
|
|
size argument of zero (re)allocates a minimum-sized chunk.
|
69 |
|
|
memalign(size_t alignment, size_t n);
|
70 |
|
|
Return a pointer to a newly allocated chunk of n bytes, aligned
|
71 |
|
|
in accord with the alignment argument, which must be a power of
|
72 |
|
|
two.
|
73 |
|
|
valloc(size_t n);
|
74 |
|
|
Equivalent to memalign(pagesize, n), where pagesize is the page
|
75 |
|
|
size of the system (or as near to this as can be figured out from
|
76 |
|
|
all the includes/defines below.)
|
77 |
|
|
pvalloc(size_t n);
|
78 |
|
|
Equivalent to valloc(minimum-page-that-holds(n)), that is,
|
79 |
|
|
round up n to nearest pagesize.
|
80 |
|
|
calloc(size_t unit, size_t quantity);
|
81 |
|
|
Returns a pointer to quantity * unit bytes, with all locations
|
82 |
|
|
set to zero.
|
83 |
|
|
cfree(Void_t* p);
|
84 |
|
|
Equivalent to free(p).
|
85 |
|
|
malloc_trim(size_t pad);
|
86 |
|
|
Release all but pad bytes of freed top-most memory back
|
87 |
|
|
to the system. Return 1 if successful, else 0.
|
88 |
|
|
malloc_usable_size(Void_t* p);
|
89 |
|
|
Report the number usable allocated bytes associated with allocated
|
90 |
|
|
chunk p. This may or may not report more bytes than were requested,
|
91 |
|
|
due to alignment and minimum size constraints.
|
92 |
|
|
malloc_stats();
|
93 |
|
|
Prints brief summary statistics on stderr.
|
94 |
|
|
mallinfo()
|
95 |
|
|
Returns (by copy) a struct containing various summary statistics.
|
96 |
|
|
mallopt(int parameter_number, int parameter_value)
|
97 |
|
|
Changes one of the tunable parameters described below. Returns
|
98 |
|
|
1 if successful in changing the parameter, else 0.
|
99 |
|
|
|
100 |
|
|
* Vital statistics:
|
101 |
|
|
|
102 |
|
|
Alignment: 8-byte
|
103 |
|
|
8 byte alignment is currently hardwired into the design. This
|
104 |
|
|
seems to suffice for all current machines and C compilers.
|
105 |
|
|
|
106 |
|
|
Assumed pointer representation: 4 or 8 bytes
|
107 |
|
|
Code for 8-byte pointers is untested by me but has worked
|
108 |
|
|
reliably by Wolfram Gloger, who contributed most of the
|
109 |
|
|
changes supporting this.
|
110 |
|
|
|
111 |
|
|
Assumed size_t representation: 4 or 8 bytes
|
112 |
|
|
Note that size_t is allowed to be 4 bytes even if pointers are 8.
|
113 |
|
|
|
114 |
|
|
Minimum overhead per allocated chunk: 4 or 8 bytes
|
115 |
|
|
Each malloced chunk has a hidden overhead of 4 bytes holding size
|
116 |
|
|
and status information.
|
117 |
|
|
|
118 |
|
|
Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
|
119 |
|
|
8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
|
120 |
|
|
|
121 |
|
|
When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
|
122 |
|
|
ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
|
123 |
|
|
needed; 4 (8) for a trailing size field
|
124 |
|
|
and 8 (16) bytes for free list pointers. Thus, the minimum
|
125 |
|
|
allocatable size is 16/24/32 bytes.
|
126 |
|
|
|
127 |
|
|
Even a request for zero bytes (i.e., malloc(0)) returns a
|
128 |
|
|
pointer to something of the minimum allocatable size.
|
129 |
|
|
|
130 |
|
|
Maximum allocated size: 4-byte size_t: 2^31 - 8 bytes
|
131 |
|
|
8-byte size_t: 2^63 - 16 bytes
|
132 |
|
|
|
133 |
|
|
It is assumed that (possibly signed) size_t bit values suffice to
|
134 |
|
|
represent chunk sizes. `Possibly signed' is due to the fact
|
135 |
|
|
that `size_t' may be defined on a system as either a signed or
|
136 |
|
|
an unsigned type. To be conservative, values that would appear
|
137 |
|
|
as negative numbers are avoided.
|
138 |
|
|
Requests for sizes with a negative sign bit will return a
|
139 |
|
|
minimum-sized chunk.
|
140 |
|
|
|
141 |
|
|
Maximum overhead wastage per allocated chunk: normally 15 bytes
|
142 |
|
|
|
143 |
|
|
Alignment demands, plus the minimum allocatable size restriction
|
144 |
|
|
make the normal worst-case wastage 15 bytes (i.e., up to 15
|
145 |
|
|
more bytes will be allocated than were requested in malloc), with
|
146 |
|
|
two exceptions:
|
147 |
|
|
1. Because requests for zero bytes allocate non-zero space,
|
148 |
|
|
the worst case wastage for a request of zero bytes is 24 bytes.
|
149 |
|
|
2. For requests >= mmap_threshold that are serviced via
|
150 |
|
|
mmap(), the worst case wastage is 8 bytes plus the remainder
|
151 |
|
|
from a system page (the minimal mmap unit); typically 4096 bytes.
|
152 |
|
|
|
153 |
|
|
* Limitations
|
154 |
|
|
|
155 |
|
|
Here are some features that are NOT currently supported
|
156 |
|
|
|
157 |
|
|
* No automated mechanism for fully checking that all accesses
|
158 |
|
|
to malloced memory stay within their bounds.
|
159 |
|
|
* No support for compaction.
|
160 |
|
|
|
161 |
|
|
* Synopsis of compile-time options:
|
162 |
|
|
|
163 |
|
|
People have reported using previous versions of this malloc on all
|
164 |
|
|
versions of Unix, sometimes by tweaking some of the defines
|
165 |
|
|
below. It has been tested most extensively on Solaris and
|
166 |
|
|
Linux. People have also reported adapting this malloc for use in
|
167 |
|
|
stand-alone embedded systems.
|
168 |
|
|
|
169 |
|
|
The implementation is in straight, hand-tuned ANSI C. Among other
|
170 |
|
|
consequences, it uses a lot of macros. Because of this, to be at
|
171 |
|
|
all usable, this code should be compiled using an optimizing compiler
|
172 |
|
|
(for example gcc -O2) that can simplify expressions and control
|
173 |
|
|
paths.
|
174 |
|
|
|
175 |
|
|
__STD_C (default: derived from C compiler defines)
|
176 |
|
|
Nonzero if using ANSI-standard C compiler, a C++ compiler, or
|
177 |
|
|
a C compiler sufficiently close to ANSI to get away with it.
|
178 |
|
|
MALLOC_DEBUG (default: NOT defined)
|
179 |
|
|
Define to enable debugging. Adds fairly extensive assertion-based
|
180 |
|
|
checking to help track down memory errors, but noticeably slows down
|
181 |
|
|
execution.
|
182 |
|
|
MALLOC_HOOKS (default: NOT defined)
|
183 |
|
|
Define to enable support run-time replacement of the allocation
|
184 |
|
|
functions through user-defined `hooks'.
|
185 |
|
|
REALLOC_ZERO_BYTES_FREES (default: defined)
|
186 |
|
|
Define this if you think that realloc(p, 0) should be equivalent
|
187 |
|
|
to free(p). (The C standard requires this behaviour, therefore
|
188 |
|
|
it is the default.) Otherwise, since malloc returns a unique
|
189 |
|
|
pointer for malloc(0), so does realloc(p, 0).
|
190 |
|
|
HAVE_MEMCPY (default: defined)
|
191 |
|
|
Define if you are not otherwise using ANSI STD C, but still
|
192 |
|
|
have memcpy and memset in your C library and want to use them.
|
193 |
|
|
Otherwise, simple internal versions are supplied.
|
194 |
|
|
USE_MEMCPY (default: 1 if HAVE_MEMCPY is defined, 0 otherwise)
|
195 |
|
|
Define as 1 if you want the C library versions of memset and
|
196 |
|
|
memcpy called in realloc and calloc (otherwise macro versions are used).
|
197 |
|
|
At least on some platforms, the simple macro versions usually
|
198 |
|
|
outperform libc versions.
|
199 |
|
|
HAVE_MMAP (default: defined as 1)
|
200 |
|
|
Define to non-zero to optionally make malloc() use mmap() to
|
201 |
|
|
allocate very large blocks.
|
202 |
|
|
HAVE_MREMAP (default: defined as 0 unless Linux libc set)
|
203 |
|
|
Define to non-zero to optionally make realloc() use mremap() to
|
204 |
|
|
reallocate very large blocks.
|
205 |
|
|
USE_ARENAS (default: the same as HAVE_MMAP)
|
206 |
|
|
Enable support for multiple arenas, allocated using mmap().
|
207 |
|
|
malloc_getpagesize (default: derived from system #includes)
|
208 |
|
|
Either a constant or routine call returning the system page size.
|
209 |
|
|
HAVE_USR_INCLUDE_MALLOC_H (default: NOT defined)
|
210 |
|
|
Optionally define if you are on a system with a /usr/include/malloc.h
|
211 |
|
|
that declares struct mallinfo. It is not at all necessary to
|
212 |
|
|
define this even if you do, but will ensure consistency.
|
213 |
|
|
INTERNAL_SIZE_T (default: size_t)
|
214 |
|
|
Define to a 32-bit type (probably `unsigned int') if you are on a
|
215 |
|
|
64-bit machine, yet do not want or need to allow malloc requests of
|
216 |
|
|
greater than 2^31 to be handled. This saves space, especially for
|
217 |
|
|
very small chunks.
|
218 |
|
|
_LIBC (default: NOT defined)
|
219 |
|
|
Defined only when compiled as part of the Linux libc/glibc.
|
220 |
|
|
Also note that there is some odd internal name-mangling via defines
|
221 |
|
|
(for example, internally, `malloc' is named `mALLOc') needed
|
222 |
|
|
when compiling in this case. These look funny but don't otherwise
|
223 |
|
|
affect anything.
|
224 |
|
|
LACKS_UNISTD_H (default: undefined)
|
225 |
|
|
Define this if your system does not have a <unistd.h>.
|
226 |
|
|
MORECORE (default: sbrk)
|
227 |
|
|
The name of the routine to call to obtain more memory from the system.
|
228 |
|
|
MORECORE_FAILURE (default: -1)
|
229 |
|
|
The value returned upon failure of MORECORE.
|
230 |
|
|
MORECORE_CLEARS (default 1)
|
231 |
|
|
The degree to which the routine mapped to MORECORE zeroes out
|
232 |
|
|
memory: never (0), only for newly allocated space (1) or always
|
233 |
|
|
(2). The distinction between (1) and (2) is necessary because on
|
234 |
|
|
some systems, if the application first decrements and then
|
235 |
|
|
increments the break value, the contents of the reallocated space
|
236 |
|
|
are unspecified.
|
237 |
|
|
DEFAULT_TRIM_THRESHOLD
|
238 |
|
|
DEFAULT_TOP_PAD
|
239 |
|
|
DEFAULT_MMAP_THRESHOLD
|
240 |
|
|
DEFAULT_MMAP_MAX
|
241 |
|
|
Default values of tunable parameters (described in detail below)
|
242 |
|
|
controlling interaction with host system routines (sbrk, mmap, etc).
|
243 |
|
|
These values may also be changed dynamically via mallopt(). The
|
244 |
|
|
preset defaults are those that give best performance for typical
|
245 |
|
|
programs/systems.
|
246 |
|
|
DEFAULT_CHECK_ACTION
|
247 |
|
|
When the standard debugging hooks are in place, and a pointer is
|
248 |
|
|
detected as corrupt, do nothing (0), print an error message (1),
|
249 |
|
|
or call abort() (2).
|
250 |
|
|
|
251 |
|
|
|
252 |
|
|
*/
|
253 |
|
|
|
254 |
|
|
/*
|
255 |
|
|
|
256 |
|
|
* Compile-time options for multiple threads:
|
257 |
|
|
|
258 |
|
|
USE_PTHREADS, USE_THR, USE_SPROC
|
259 |
|
|
Define one of these as 1 to select the thread interface:
|
260 |
|
|
POSIX threads, Solaris threads or SGI sproc's, respectively.
|
261 |
|
|
If none of these is defined as non-zero, you get a `normal'
|
262 |
|
|
malloc implementation which is not thread-safe. Support for
|
263 |
|
|
multiple threads requires HAVE_MMAP=1. As an exception, when
|
264 |
|
|
compiling for GNU libc, i.e. when _LIBC is defined, then none of
|
265 |
|
|
the USE_... symbols have to be defined.
|
266 |
|
|
|
267 |
|
|
HEAP_MIN_SIZE
|
268 |
|
|
HEAP_MAX_SIZE
|
269 |
|
|
When thread support is enabled, additional `heap's are created
|
270 |
|
|
with mmap calls. These are limited in size; HEAP_MIN_SIZE should
|
271 |
|
|
be a multiple of the page size, while HEAP_MAX_SIZE must be a power
|
272 |
|
|
of two for alignment reasons. HEAP_MAX_SIZE should be at least
|
273 |
|
|
twice as large as the mmap threshold.
|
274 |
|
|
THREAD_STATS
|
275 |
|
|
When this is defined as non-zero, some statistics on mutex locking
|
276 |
|
|
are computed.
|
277 |
|
|
|
278 |
|
|
*/
|
279 |
|
|
|
280 |
|
|
|
281 |
|
|
|
282 |
|
|
|
283 |
|
|
/* Preliminaries */
|
284 |
|
|
|
285 |
|
|
#ifndef __STD_C
|
286 |
|
|
#if defined (__STDC__)
|
287 |
|
|
#define __STD_C 1
|
288 |
|
|
#else
|
289 |
|
|
#if __cplusplus
|
290 |
|
|
#define __STD_C 1
|
291 |
|
|
#else
|
292 |
|
|
#define __STD_C 0
|
293 |
|
|
#endif /*__cplusplus*/
|
294 |
|
|
#endif /*__STDC__*/
|
295 |
|
|
#endif /*__STD_C*/
|
296 |
|
|
|
297 |
|
|
#ifndef Void_t
|
298 |
|
|
#if __STD_C
|
299 |
|
|
#define Void_t void
|
300 |
|
|
#else
|
301 |
|
|
#define Void_t char
|
302 |
|
|
#endif
|
303 |
|
|
#endif /*Void_t*/
|
304 |
|
|
|
305 |
|
|
#define _GNU_SOURCE
|
306 |
|
|
#include <features.h>
|
307 |
|
|
#define _LIBC 1
|
308 |
|
|
#define NOT_IN_libc 1
|
309 |
|
|
|
310 |
|
|
#if __STD_C
|
311 |
|
|
# include <stddef.h> /* for size_t */
|
312 |
|
|
# if defined _LIBC || defined MALLOC_HOOKS
|
313 |
|
|
# include <stdlib.h> /* for getenv(), abort() */
|
314 |
|
|
# endif
|
315 |
|
|
#else
|
316 |
|
|
# include <sys/types.h>
|
317 |
|
|
# if defined _LIBC || defined MALLOC_HOOKS
|
318 |
|
|
extern char* getenv();
|
319 |
|
|
# endif
|
320 |
|
|
#endif
|
321 |
|
|
|
322 |
|
|
/* newlib modifications */
|
323 |
|
|
|
324 |
|
|
#include <libc-symbols.h>
|
325 |
|
|
#include <sys/types.h>
|
326 |
|
|
|
327 |
|
|
extern void __pthread_initialize (void) __attribute__((weak));
|
328 |
|
|
extern void *__mmap (void *__addr, size_t __len, int __prot,
|
329 |
|
|
int __flags, int __fd, off_t __offset);
|
330 |
|
|
extern int __munmap (void *__addr, size_t __len);
|
331 |
|
|
extern void *__mremap (void *__addr, size_t __old_len, size_t __new_len,
|
332 |
|
|
int __may_move);
|
333 |
|
|
extern int __getpagesize (void);
|
334 |
|
|
|
335 |
|
|
#define __libc_enable_secure 1
|
336 |
|
|
|
337 |
|
|
/* Macros for handling mutexes and thread-specific data. This is
|
338 |
|
|
included early, because some thread-related header files (such as
|
339 |
|
|
pthread.h) should be included before any others. */
|
340 |
|
|
#include <bits/libc-lock.h>
|
341 |
|
|
#include "thread-m.h"
|
342 |
|
|
|
343 |
|
|
void *(*__malloc_internal_tsd_get) (enum __libc_tsd_key_t) = NULL;
|
344 |
|
|
int (*__malloc_internal_tsd_set) (enum __libc_tsd_key_t,
|
345 |
|
|
__const void *) = NULL;
|
346 |
|
|
|
347 |
|
|
weak_alias(__malloc_internal_tsd_get, __libc_internal_tsd_get)
|
348 |
|
|
weak_alias(__malloc_internal_tsd_set, __libc_internal_tsd_set)
|
349 |
|
|
|
350 |
|
|
|
351 |
|
|
#ifdef __cplusplus
|
352 |
|
|
extern "C" {
|
353 |
|
|
#endif
|
354 |
|
|
|
355 |
|
|
#include <errno.h>
|
356 |
|
|
#include <stdio.h> /* needed for malloc_stats */
|
357 |
|
|
|
358 |
|
|
|
359 |
|
|
/*
|
360 |
|
|
Compile-time options
|
361 |
|
|
*/
|
362 |
|
|
|
363 |
|
|
|
364 |
|
|
/*
|
365 |
|
|
Debugging:
|
366 |
|
|
|
367 |
|
|
Because freed chunks may be overwritten with link fields, this
|
368 |
|
|
malloc will often die when freed memory is overwritten by user
|
369 |
|
|
programs. This can be very effective (albeit in an annoying way)
|
370 |
|
|
in helping track down dangling pointers.
|
371 |
|
|
|
372 |
|
|
If you compile with -DMALLOC_DEBUG, a number of assertion checks are
|
373 |
|
|
enabled that will catch more memory errors. You probably won't be
|
374 |
|
|
able to make much sense of the actual assertion errors, but they
|
375 |
|
|
should help you locate incorrectly overwritten memory. The
|
376 |
|
|
checking is fairly extensive, and will slow down execution
|
377 |
|
|
noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set will
|
378 |
|
|
attempt to check every non-mmapped allocated and free chunk in the
|
379 |
|
|
course of computing the summaries. (By nature, mmapped regions
|
380 |
|
|
cannot be checked very much automatically.)
|
381 |
|
|
|
382 |
|
|
Setting MALLOC_DEBUG may also be helpful if you are trying to modify
|
383 |
|
|
this code. The assertions in the check routines spell out in more
|
384 |
|
|
detail the assumptions and invariants underlying the algorithms.
|
385 |
|
|
|
386 |
|
|
*/
|
387 |
|
|
|
388 |
|
|
#if MALLOC_DEBUG
|
389 |
|
|
#include <assert.h>
|
390 |
|
|
#else
|
391 |
|
|
#define assert(x) ((void)0)
|
392 |
|
|
#endif
|
393 |
|
|
|
394 |
|
|
|
395 |
|
|
/*
|
396 |
|
|
INTERNAL_SIZE_T is the word-size used for internal bookkeeping
|
397 |
|
|
of chunk sizes. On a 64-bit machine, you can reduce malloc
|
398 |
|
|
overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
|
399 |
|
|
at the expense of not being able to handle requests greater than
|
400 |
|
|
2^31. This limitation is hardly ever a concern; you are encouraged
|
401 |
|
|
to set this. However, the default version is the same as size_t.
|
402 |
|
|
*/
|
403 |
|
|
|
404 |
|
|
#ifndef INTERNAL_SIZE_T
|
405 |
|
|
#define INTERNAL_SIZE_T size_t
|
406 |
|
|
#endif
|
407 |
|
|
|
408 |
|
|
/*
|
409 |
|
|
REALLOC_ZERO_BYTES_FREES should be set if a call to realloc with
|
410 |
|
|
zero bytes should be the same as a call to free. The C standard
|
411 |
|
|
requires this. Otherwise, since this malloc returns a unique pointer
|
412 |
|
|
for malloc(0), so does realloc(p, 0).
|
413 |
|
|
*/
|
414 |
|
|
|
415 |
|
|
|
416 |
|
|
#define REALLOC_ZERO_BYTES_FREES
|
417 |
|
|
|
418 |
|
|
|
419 |
|
|
/*
|
420 |
|
|
HAVE_MEMCPY should be defined if you are not otherwise using
|
421 |
|
|
ANSI STD C, but still have memcpy and memset in your C library
|
422 |
|
|
and want to use them in calloc and realloc. Otherwise simple
|
423 |
|
|
macro versions are defined here.
|
424 |
|
|
|
425 |
|
|
USE_MEMCPY should be defined as 1 if you actually want to
|
426 |
|
|
have memset and memcpy called. People report that the macro
|
427 |
|
|
versions are often enough faster than libc versions on many
|
428 |
|
|
systems that it is better to use them.
|
429 |
|
|
|
430 |
|
|
*/
|
431 |
|
|
|
432 |
|
|
#define HAVE_MEMCPY 1
|
433 |
|
|
|
434 |
|
|
#ifndef USE_MEMCPY
|
435 |
|
|
#ifdef HAVE_MEMCPY
|
436 |
|
|
#define USE_MEMCPY 1
|
437 |
|
|
#else
|
438 |
|
|
#define USE_MEMCPY 0
|
439 |
|
|
#endif
|
440 |
|
|
#endif
|
441 |
|
|
|
442 |
|
|
#if (__STD_C || defined(HAVE_MEMCPY))
|
443 |
|
|
|
444 |
|
|
#if __STD_C
|
445 |
|
|
void* memset(void*, int, size_t);
|
446 |
|
|
void* memcpy(void*, const void*, size_t);
|
447 |
|
|
void* memmove(void*, const void*, size_t);
|
448 |
|
|
#else
|
449 |
|
|
Void_t* memset();
|
450 |
|
|
Void_t* memcpy();
|
451 |
|
|
Void_t* memmove();
|
452 |
|
|
#endif
|
453 |
|
|
#endif
|
454 |
|
|
|
455 |
|
|
/* The following macros are only invoked with (2n+1)-multiples of
|
456 |
|
|
INTERNAL_SIZE_T units, with a positive integer n. This is exploited
|
457 |
|
|
for fast inline execution when n is small. If the regions to be
|
458 |
|
|
copied do overlap, the destination lies always _below_ the source. */
|
459 |
|
|
|
460 |
|
|
#if USE_MEMCPY
|
461 |
|
|
|
462 |
|
|
#define MALLOC_ZERO(charp, nbytes) \
|
463 |
|
|
do { \
|
464 |
|
|
INTERNAL_SIZE_T mzsz = (nbytes); \
|
465 |
|
|
if(mzsz <= 9*sizeof(mzsz)) { \
|
466 |
|
|
INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp); \
|
467 |
|
|
if(mzsz >= 5*sizeof(mzsz)) { *mz++ = 0; \
|
468 |
|
|
*mz++ = 0; \
|
469 |
|
|
if(mzsz >= 7*sizeof(mzsz)) { *mz++ = 0; \
|
470 |
|
|
*mz++ = 0; \
|
471 |
|
|
if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0; \
|
472 |
|
|
*mz++ = 0; }}} \
|
473 |
|
|
*mz++ = 0; \
|
474 |
|
|
*mz++ = 0; \
|
475 |
|
|
*mz = 0; \
|
476 |
|
|
} else memset((charp), 0, mzsz); \
|
477 |
|
|
} while(0)
|
478 |
|
|
|
479 |
|
|
/* If the regions overlap, dest is always _below_ src. */
|
480 |
|
|
|
481 |
|
|
#define MALLOC_COPY(dest,src,nbytes,overlap) \
|
482 |
|
|
do { \
|
483 |
|
|
INTERNAL_SIZE_T mcsz = (nbytes); \
|
484 |
|
|
if(mcsz <= 9*sizeof(mcsz)) { \
|
485 |
|
|
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src); \
|
486 |
|
|
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest); \
|
487 |
|
|
if(mcsz >= 5*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
|
488 |
|
|
*mcdst++ = *mcsrc++; \
|
489 |
|
|
if(mcsz >= 7*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
|
490 |
|
|
*mcdst++ = *mcsrc++; \
|
491 |
|
|
if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++; \
|
492 |
|
|
*mcdst++ = *mcsrc++; }}} \
|
493 |
|
|
*mcdst++ = *mcsrc++; \
|
494 |
|
|
*mcdst++ = *mcsrc++; \
|
495 |
|
|
*mcdst = *mcsrc ; \
|
496 |
|
|
} else if(overlap) \
|
497 |
|
|
memmove(dest, src, mcsz); \
|
498 |
|
|
else \
|
499 |
|
|
memcpy(dest, src, mcsz); \
|
500 |
|
|
} while(0)
|
501 |
|
|
|
502 |
|
|
#else /* !USE_MEMCPY */
|
503 |
|
|
|
504 |
|
|
/* Use Duff's device for good zeroing/copying performance. */
|
505 |
|
|
|
506 |
|
|
#define MALLOC_ZERO(charp, nbytes) \
|
507 |
|
|
do { \
|
508 |
|
|
INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp); \
|
509 |
|
|
long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \
|
510 |
|
|
if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
|
511 |
|
|
switch (mctmp) { \
|
512 |
|
|
case 0: for(;;) { *mzp++ = 0; \
|
513 |
|
|
case 7: *mzp++ = 0; \
|
514 |
|
|
case 6: *mzp++ = 0; \
|
515 |
|
|
case 5: *mzp++ = 0; \
|
516 |
|
|
case 4: *mzp++ = 0; \
|
517 |
|
|
case 3: *mzp++ = 0; \
|
518 |
|
|
case 2: *mzp++ = 0; \
|
519 |
|
|
case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
|
520 |
|
|
} \
|
521 |
|
|
} while(0)
|
522 |
|
|
|
523 |
|
|
/* If the regions overlap, dest is always _below_ src. */
|
524 |
|
|
|
525 |
|
|
#define MALLOC_COPY(dest,src,nbytes,overlap) \
|
526 |
|
|
do { \
|
527 |
|
|
INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src; \
|
528 |
|
|
INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest; \
|
529 |
|
|
long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn; \
|
530 |
|
|
if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; } \
|
531 |
|
|
switch (mctmp) { \
|
532 |
|
|
case 0: for(;;) { *mcdst++ = *mcsrc++; \
|
533 |
|
|
case 7: *mcdst++ = *mcsrc++; \
|
534 |
|
|
case 6: *mcdst++ = *mcsrc++; \
|
535 |
|
|
case 5: *mcdst++ = *mcsrc++; \
|
536 |
|
|
case 4: *mcdst++ = *mcsrc++; \
|
537 |
|
|
case 3: *mcdst++ = *mcsrc++; \
|
538 |
|
|
case 2: *mcdst++ = *mcsrc++; \
|
539 |
|
|
case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
|
540 |
|
|
} \
|
541 |
|
|
} while(0)
|
542 |
|
|
|
543 |
|
|
#endif
|
544 |
|
|
|
545 |
|
|
|
546 |
|
|
#ifndef LACKS_UNISTD_H
|
547 |
|
|
# include <unistd.h>
|
548 |
|
|
#endif
|
549 |
|
|
|
550 |
|
|
/*
|
551 |
|
|
Define HAVE_MMAP to optionally make malloc() use mmap() to allocate
|
552 |
|
|
very large blocks. These will be returned to the operating system
|
553 |
|
|
immediately after a free(). HAVE_MMAP is also a prerequisite to
|
554 |
|
|
support multiple `arenas' (see USE_ARENAS below).
|
555 |
|
|
*/
|
556 |
|
|
|
557 |
|
|
#ifndef HAVE_MMAP
|
558 |
|
|
# ifdef _POSIX_MAPPED_FILES
|
559 |
|
|
# define HAVE_MMAP 1
|
560 |
|
|
# endif
|
561 |
|
|
#endif
|
562 |
|
|
|
563 |
|
|
/*
|
564 |
|
|
Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
|
565 |
|
|
large blocks. This is currently only possible on Linux with
|
566 |
|
|
kernel versions newer than 1.3.77.
|
567 |
|
|
*/
|
568 |
|
|
|
569 |
|
|
#ifndef HAVE_MREMAP
|
570 |
|
|
#define HAVE_MREMAP defined(__linux__)
|
571 |
|
|
#endif
|
572 |
|
|
|
573 |
|
|
/* Define USE_ARENAS to enable support for multiple `arenas'. These
|
574 |
|
|
are allocated using mmap(), are necessary for threads and
|
575 |
|
|
occasionally useful to overcome address space limitations affecting
|
576 |
|
|
sbrk(). */
|
577 |
|
|
|
578 |
|
|
#ifndef USE_ARENAS
|
579 |
|
|
#define USE_ARENAS HAVE_MMAP
|
580 |
|
|
#endif
|
581 |
|
|
|
582 |
|
|
#if HAVE_MMAP
|
583 |
|
|
|
584 |
|
|
#include <unistd.h>
|
585 |
|
|
#include <fcntl.h>
|
586 |
|
|
#include <sys/mman.h>
|
587 |
|
|
|
588 |
|
|
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
|
589 |
|
|
#define MAP_ANONYMOUS MAP_ANON
|
590 |
|
|
#endif
|
591 |
|
|
#if !defined(MAP_FAILED)
|
592 |
|
|
#define MAP_FAILED ((char*)-1)
|
593 |
|
|
#endif
|
594 |
|
|
|
595 |
|
|
#ifndef MAP_NORESERVE
|
596 |
|
|
# ifdef MAP_AUTORESRV
|
597 |
|
|
# define MAP_NORESERVE MAP_AUTORESRV
|
598 |
|
|
# else
|
599 |
|
|
# define MAP_NORESERVE 0
|
600 |
|
|
# endif
|
601 |
|
|
#endif
|
602 |
|
|
|
603 |
|
|
#endif /* HAVE_MMAP */
|
604 |
|
|
|
605 |
|
|
/*
|
606 |
|
|
Access to system page size. To the extent possible, this malloc
|
607 |
|
|
manages memory from the system in page-size units.
|
608 |
|
|
|
609 |
|
|
The following mechanics for getpagesize were adapted from
|
610 |
|
|
bsd/gnu getpagesize.h
|
611 |
|
|
*/
|
612 |
|
|
|
613 |
|
|
#ifndef malloc_getpagesize
|
614 |
|
|
# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
|
615 |
|
|
# ifndef _SC_PAGE_SIZE
|
616 |
|
|
# define _SC_PAGE_SIZE _SC_PAGESIZE
|
617 |
|
|
# endif
|
618 |
|
|
# endif
|
619 |
|
|
# ifdef _SC_PAGE_SIZE
|
620 |
|
|
# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
|
621 |
|
|
# else
|
622 |
|
|
# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
|
623 |
|
|
extern size_t getpagesize();
|
624 |
|
|
# define malloc_getpagesize getpagesize()
|
625 |
|
|
# else
|
626 |
|
|
# include <sys/param.h>
|
627 |
|
|
# ifdef EXEC_PAGESIZE
|
628 |
|
|
# define malloc_getpagesize EXEC_PAGESIZE
|
629 |
|
|
# else
|
630 |
|
|
# ifdef NBPG
|
631 |
|
|
# ifndef CLSIZE
|
632 |
|
|
# define malloc_getpagesize NBPG
|
633 |
|
|
# else
|
634 |
|
|
# define malloc_getpagesize (NBPG * CLSIZE)
|
635 |
|
|
# endif
|
636 |
|
|
# else
|
637 |
|
|
# ifdef NBPC
|
638 |
|
|
# define malloc_getpagesize NBPC
|
639 |
|
|
# else
|
640 |
|
|
# ifdef PAGESIZE
|
641 |
|
|
# define malloc_getpagesize PAGESIZE
|
642 |
|
|
# else
|
643 |
|
|
# define malloc_getpagesize (4096) /* just guess */
|
644 |
|
|
# endif
|
645 |
|
|
# endif
|
646 |
|
|
# endif
|
647 |
|
|
# endif
|
648 |
|
|
# endif
|
649 |
|
|
# endif
|
650 |
|
|
#endif
|
651 |
|
|
|
652 |
|
|
|
653 |
|
|
|
654 |
|
|
/*
|
655 |
|
|
|
656 |
|
|
This version of malloc supports the standard SVID/XPG mallinfo
|
657 |
|
|
routine that returns a struct containing the same kind of
|
658 |
|
|
information you can get from malloc_stats. It should work on
|
659 |
|
|
any SVID/XPG compliant system that has a /usr/include/malloc.h
|
660 |
|
|
defining struct mallinfo. (If you'd like to install such a thing
|
661 |
|
|
yourself, cut out the preliminary declarations as described above
|
662 |
|
|
and below and save them in a malloc.h file. But there's no
|
663 |
|
|
compelling reason to bother to do this.)
|
664 |
|
|
|
665 |
|
|
The main declaration needed is the mallinfo struct that is returned
|
666 |
|
|
(by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
|
667 |
|
|
bunch of fields, most of which are not even meaningful in this
|
668 |
|
|
version of malloc. Some of these fields are are instead filled by
|
669 |
|
|
mallinfo() with other numbers that might possibly be of interest.
|
670 |
|
|
|
671 |
|
|
HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
|
672 |
|
|
/usr/include/malloc.h file that includes a declaration of struct
|
673 |
|
|
mallinfo. If so, it is included; else an SVID2/XPG2 compliant
|
674 |
|
|
version is declared below. These must be precisely the same for
|
675 |
|
|
mallinfo() to work.
|
676 |
|
|
|
677 |
|
|
*/
|
678 |
|
|
|
679 |
|
|
/* #define HAVE_USR_INCLUDE_MALLOC_H */
|
680 |
|
|
|
681 |
|
|
#if HAVE_USR_INCLUDE_MALLOC_H
|
682 |
|
|
# include "/usr/include/malloc.h"
|
683 |
|
|
#else
|
684 |
|
|
# ifdef _LIBC
|
685 |
|
|
# include "malloc.h"
|
686 |
|
|
# else
|
687 |
|
|
# include "ptmalloc.h"
|
688 |
|
|
# endif
|
689 |
|
|
#endif
|
690 |
|
|
|
691 |
|
|
#include <bp-checks.h>
|
692 |
|
|
|
693 |
|
|
#ifndef DEFAULT_TRIM_THRESHOLD
|
694 |
|
|
#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
|
695 |
|
|
#endif
|
696 |
|
|
|
697 |
|
|
/*
|
698 |
|
|
M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
|
699 |
|
|
to keep before releasing via malloc_trim in free().
|
700 |
|
|
|
701 |
|
|
Automatic trimming is mainly useful in long-lived programs.
|
702 |
|
|
Because trimming via sbrk can be slow on some systems, and can
|
703 |
|
|
sometimes be wasteful (in cases where programs immediately
|
704 |
|
|
afterward allocate more large chunks) the value should be high
|
705 |
|
|
enough so that your overall system performance would improve by
|
706 |
|
|
releasing.
|
707 |
|
|
|
708 |
|
|
The trim threshold and the mmap control parameters (see below)
|
709 |
|
|
can be traded off with one another. Trimming and mmapping are
|
710 |
|
|
two different ways of releasing unused memory back to the
|
711 |
|
|
system. Between these two, it is often possible to keep
|
712 |
|
|
system-level demands of a long-lived program down to a bare
|
713 |
|
|
minimum. For example, in one test suite of sessions measuring
|
714 |
|
|
the XF86 X server on Linux, using a trim threshold of 128K and a
|
715 |
|
|
mmap threshold of 192K led to near-minimal long term resource
|
716 |
|
|
consumption.
|
717 |
|
|
|
718 |
|
|
If you are using this malloc in a long-lived program, it should
|
719 |
|
|
pay to experiment with these values. As a rough guide, you
|
720 |
|
|
might set to a value close to the average size of a process
|
721 |
|
|
(program) running on your system. Releasing this much memory
|
722 |
|
|
would allow such a process to run in memory. Generally, it's
|
723 |
|
|
worth it to tune for trimming rather than memory mapping when a
|
724 |
|
|
program undergoes phases where several large chunks are
|
725 |
|
|
allocated and released in ways that can reuse each other's
|
726 |
|
|
storage, perhaps mixed with phases where there are no such
|
727 |
|
|
chunks at all. And in well-behaved long-lived programs,
|
728 |
|
|
controlling release of large blocks via trimming versus mapping
|
729 |
|
|
is usually faster.
|
730 |
|
|
|
731 |
|
|
However, in most programs, these parameters serve mainly as
|
732 |
|
|
protection against the system-level effects of carrying around
|
733 |
|
|
massive amounts of unneeded memory. Since frequent calls to
|
734 |
|
|
sbrk, mmap, and munmap otherwise degrade performance, the default
|
735 |
|
|
parameters are set to relatively high values that serve only as
|
736 |
|
|
safeguards.
|
737 |
|
|
|
738 |
|
|
The default trim value is high enough to cause trimming only in
|
739 |
|
|
fairly extreme (by current memory consumption standards) cases.
|
740 |
|
|
It must be greater than page size to have any useful effect. To
|
741 |
|
|
disable trimming completely, you can set to (unsigned long)(-1);
|
742 |
|
|
|
743 |
|
|
|
744 |
|
|
*/
|
745 |
|
|
|
746 |
|
|
|
747 |
|
|
#ifndef DEFAULT_TOP_PAD
|
748 |
|
|
#define DEFAULT_TOP_PAD (0)
|
749 |
|
|
#endif
|
750 |
|
|
|
751 |
|
|
/*
|
752 |
|
|
M_TOP_PAD is the amount of extra `padding' space to allocate or
|
753 |
|
|
retain whenever sbrk is called. It is used in two ways internally:
|
754 |
|
|
|
755 |
|
|
* When sbrk is called to extend the top of the arena to satisfy
|
756 |
|
|
a new malloc request, this much padding is added to the sbrk
|
757 |
|
|
request.
|
758 |
|
|
|
759 |
|
|
* When malloc_trim is called automatically from free(),
|
760 |
|
|
it is used as the `pad' argument.
|
761 |
|
|
|
762 |
|
|
In both cases, the actual amount of padding is rounded
|
763 |
|
|
so that the end of the arena is always a system page boundary.
|
764 |
|
|
|
765 |
|
|
The main reason for using padding is to avoid calling sbrk so
|
766 |
|
|
often. Having even a small pad greatly reduces the likelihood
|
767 |
|
|
that nearly every malloc request during program start-up (or
|
768 |
|
|
after trimming) will invoke sbrk, which needlessly wastes
|
769 |
|
|
time.
|
770 |
|
|
|
771 |
|
|
Automatic rounding-up to page-size units is normally sufficient
|
772 |
|
|
to avoid measurable overhead, so the default is 0. However, in
|
773 |
|
|
systems where sbrk is relatively slow, it can pay to increase
|
774 |
|
|
this value, at the expense of carrying around more memory than
|
775 |
|
|
the program needs.
|
776 |
|
|
|
777 |
|
|
*/
|
778 |
|
|
|
779 |
|
|
|
780 |
|
|
#ifndef DEFAULT_MMAP_THRESHOLD
|
781 |
|
|
#define DEFAULT_MMAP_THRESHOLD (128 * 1024)
|
782 |
|
|
#endif
|
783 |
|
|
|
784 |
|
|
/*
|
785 |
|
|
|
786 |
|
|
M_MMAP_THRESHOLD is the request size threshold for using mmap()
|
787 |
|
|
to service a request. Requests of at least this size that cannot
|
788 |
|
|
be allocated using already-existing space will be serviced via mmap.
|
789 |
|
|
(If enough normal freed space already exists it is used instead.)
|
790 |
|
|
|
791 |
|
|
Using mmap segregates relatively large chunks of memory so that
|
792 |
|
|
they can be individually obtained and released from the host
|
793 |
|
|
system. A request serviced through mmap is never reused by any
|
794 |
|
|
other request (at least not directly; the system may just so
|
795 |
|
|
happen to remap successive requests to the same locations).
|
796 |
|
|
|
797 |
|
|
Segregating space in this way has the benefit that mmapped space
|
798 |
|
|
can ALWAYS be individually released back to the system, which
|
799 |
|
|
helps keep the system level memory demands of a long-lived
|
800 |
|
|
program low. Mapped memory can never become `locked' between
|
801 |
|
|
other chunks, as can happen with normally allocated chunks, which
|
802 |
|
|
menas that even trimming via malloc_trim would not release them.
|
803 |
|
|
|
804 |
|
|
However, it has the disadvantages that:
|
805 |
|
|
|
806 |
|
|
1. The space cannot be reclaimed, consolidated, and then
|
807 |
|
|
used to service later requests, as happens with normal chunks.
|
808 |
|
|
2. It can lead to more wastage because of mmap page alignment
|
809 |
|
|
requirements
|
810 |
|
|
3. It causes malloc performance to be more dependent on host
|
811 |
|
|
system memory management support routines which may vary in
|
812 |
|
|
implementation quality and may impose arbitrary
|
813 |
|
|
limitations. Generally, servicing a request via normal
|
814 |
|
|
malloc steps is faster than going through a system's mmap.
|
815 |
|
|
|
816 |
|
|
All together, these considerations should lead you to use mmap
|
817 |
|
|
only for relatively large requests.
|
818 |
|
|
|
819 |
|
|
|
820 |
|
|
*/
|
821 |
|
|
|
822 |
|
|
|
823 |
|
|
|
824 |
|
|
#ifndef DEFAULT_MMAP_MAX
|
825 |
|
|
#if HAVE_MMAP
|
826 |
|
|
#define DEFAULT_MMAP_MAX (1024)
|
827 |
|
|
#else
|
828 |
|
|
#define DEFAULT_MMAP_MAX (0)
|
829 |
|
|
#endif
|
830 |
|
|
#endif
|
831 |
|
|
|
832 |
|
|
/*
|
833 |
|
|
M_MMAP_MAX is the maximum number of requests to simultaneously
|
834 |
|
|
service using mmap. This parameter exists because:
|
835 |
|
|
|
836 |
|
|
1. Some systems have a limited number of internal tables for
|
837 |
|
|
use by mmap.
|
838 |
|
|
2. In most systems, overreliance on mmap can degrade overall
|
839 |
|
|
performance.
|
840 |
|
|
3. If a program allocates many large regions, it is probably
|
841 |
|
|
better off using normal sbrk-based allocation routines that
|
842 |
|
|
can reclaim and reallocate normal heap memory. Using a
|
843 |
|
|
small value allows transition into this mode after the
|
844 |
|
|
first few allocations.
|
845 |
|
|
|
846 |
|
|
Setting to 0 disables all use of mmap. If HAVE_MMAP is not set,
|
847 |
|
|
the default value is 0, and attempts to set it to non-zero values
|
848 |
|
|
in mallopt will fail.
|
849 |
|
|
*/
|
850 |
|
|
|
851 |
|
|
|
852 |
|
|
|
853 |
|
|
#ifndef DEFAULT_CHECK_ACTION
|
854 |
|
|
#define DEFAULT_CHECK_ACTION 1
|
855 |
|
|
#endif
|
856 |
|
|
|
857 |
|
|
/* What to do if the standard debugging hooks are in place and a
|
858 |
|
|
corrupt pointer is detected: do nothing (0), print an error message
|
859 |
|
|
(1), or call abort() (2). */
|
860 |
|
|
|
861 |
|
|
|
862 |
|
|
|
863 |
|
|
#define HEAP_MIN_SIZE (32*1024)
|
864 |
|
|
#define HEAP_MAX_SIZE (1024*1024) /* must be a power of two */
|
865 |
|
|
|
866 |
|
|
/* HEAP_MIN_SIZE and HEAP_MAX_SIZE limit the size of mmap()ed heaps
|
867 |
|
|
that are dynamically created for multi-threaded programs. The
|
868 |
|
|
maximum size must be a power of two, for fast determination of
|
869 |
|
|
which heap belongs to a chunk. It should be much larger than
|
870 |
|
|
the mmap threshold, so that requests with a size just below that
|
871 |
|
|
threshold can be fulfilled without creating too many heaps.
|
872 |
|
|
*/
|
873 |
|
|
|
874 |
|
|
|
875 |
|
|
|
876 |
|
|
#ifndef THREAD_STATS
|
877 |
|
|
#define THREAD_STATS 0
|
878 |
|
|
#endif
|
879 |
|
|
|
880 |
|
|
/* If THREAD_STATS is non-zero, some statistics on mutex locking are
|
881 |
|
|
computed. */
|
882 |
|
|
|
883 |
|
|
|
884 |
|
|
/* Macro to set errno. */
|
885 |
|
|
#ifndef __set_errno
|
886 |
|
|
# define __set_errno(val) errno = (val)
|
887 |
|
|
#endif
|
888 |
|
|
|
889 |
|
|
/* On some platforms we can compile internal, not exported functions better.
|
890 |
|
|
Let the environment provide a macro and define it to be empty if it
|
891 |
|
|
is not available. */
|
892 |
|
|
#ifndef internal_function
|
893 |
|
|
# define internal_function
|
894 |
|
|
#endif
|
895 |
|
|
|
896 |
|
|
|
897 |
|
|
/*
|
898 |
|
|
|
899 |
|
|
Special defines for the Linux/GNU C library.
|
900 |
|
|
|
901 |
|
|
*/
|
902 |
|
|
|
903 |
|
|
|
904 |
|
|
#ifdef _LIBC
|
905 |
|
|
|
906 |
|
|
#if __STD_C
|
907 |
|
|
|
908 |
|
|
Void_t * __default_morecore (ptrdiff_t);
|
909 |
|
|
Void_t *(*__morecore)(ptrdiff_t) = __default_morecore;
|
910 |
|
|
|
911 |
|
|
#else
|
912 |
|
|
|
913 |
|
|
Void_t * __default_morecore ();
|
914 |
|
|
Void_t *(*__morecore)() = __default_morecore;
|
915 |
|
|
|
916 |
|
|
#endif
|
917 |
|
|
|
918 |
|
|
#define MORECORE (*__morecore)
|
919 |
|
|
#define MORECORE_FAILURE 0
|
920 |
|
|
|
921 |
|
|
#ifndef MORECORE_CLEARS
|
922 |
|
|
#define MORECORE_CLEARS 1
|
923 |
|
|
#endif
|
924 |
|
|
|
925 |
|
|
static size_t __libc_pagesize;
|
926 |
|
|
|
927 |
|
|
#define access __access
|
928 |
|
|
#define mmap __mmap
|
929 |
|
|
#define munmap __munmap
|
930 |
|
|
#define mremap __mremap
|
931 |
|
|
#define mprotect __mprotect
|
932 |
|
|
#undef malloc_getpagesize
|
933 |
|
|
#define malloc_getpagesize __libc_pagesize
|
934 |
|
|
|
935 |
|
|
#else /* _LIBC */
|
936 |
|
|
|
937 |
|
|
#if __STD_C
|
938 |
|
|
extern Void_t* sbrk(ptrdiff_t);
|
939 |
|
|
#else
|
940 |
|
|
extern Void_t* sbrk();
|
941 |
|
|
#endif
|
942 |
|
|
|
943 |
|
|
#ifndef MORECORE
|
944 |
|
|
#define MORECORE sbrk
|
945 |
|
|
#endif
|
946 |
|
|
|
947 |
|
|
#ifndef MORECORE_FAILURE
|
948 |
|
|
#define MORECORE_FAILURE -1
|
949 |
|
|
#endif
|
950 |
|
|
|
951 |
|
|
#ifndef MORECORE_CLEARS
|
952 |
|
|
#define MORECORE_CLEARS 1
|
953 |
|
|
#endif
|
954 |
|
|
|
955 |
|
|
#endif /* _LIBC */
|
956 |
|
|
|
957 |
|
|
#ifdef _LIBC
|
958 |
|
|
|
959 |
|
|
#define cALLOc __libc_calloc
|
960 |
|
|
#define fREe __libc_free
|
961 |
|
|
#define mALLOc __libc_malloc
|
962 |
|
|
#define mEMALIGn __libc_memalign
|
963 |
|
|
#define rEALLOc __libc_realloc
|
964 |
|
|
#define vALLOc __libc_valloc
|
965 |
|
|
#define pvALLOc __libc_pvalloc
|
966 |
|
|
#define mALLINFo __libc_mallinfo
|
967 |
|
|
#define mALLOPt __libc_mallopt
|
968 |
|
|
#define mALLOC_STATs __malloc_stats
|
969 |
|
|
#define mALLOC_USABLE_SIZe __malloc_usable_size
|
970 |
|
|
#define mALLOC_TRIm __malloc_trim
|
971 |
|
|
#define mALLOC_GET_STATe __malloc_get_state
|
972 |
|
|
#define mALLOC_SET_STATe __malloc_set_state
|
973 |
|
|
|
974 |
|
|
#else
|
975 |
|
|
|
976 |
|
|
#define cALLOc calloc
|
977 |
|
|
#define fREe free
|
978 |
|
|
#define mALLOc malloc
|
979 |
|
|
#define mEMALIGn memalign
|
980 |
|
|
#define rEALLOc realloc
|
981 |
|
|
#define vALLOc valloc
|
982 |
|
|
#define pvALLOc pvalloc
|
983 |
|
|
#define mALLINFo mallinfo
|
984 |
|
|
#define mALLOPt mallopt
|
985 |
|
|
#define mALLOC_STATs malloc_stats
|
986 |
|
|
#define mALLOC_USABLE_SIZe malloc_usable_size
|
987 |
|
|
#define mALLOC_TRIm malloc_trim
|
988 |
|
|
#define mALLOC_GET_STATe malloc_get_state
|
989 |
|
|
#define mALLOC_SET_STATe malloc_set_state
|
990 |
|
|
|
991 |
|
|
#endif
|
992 |
|
|
|
993 |
|
|
/* Public routines */
|
994 |
|
|
|
995 |
|
|
#if __STD_C
|
996 |
|
|
|
997 |
|
|
#ifndef _LIBC
|
998 |
|
|
void ptmalloc_init(void);
|
999 |
|
|
#endif
|
1000 |
|
|
Void_t* mALLOc(size_t);
|
1001 |
|
|
void fREe(Void_t*);
|
1002 |
|
|
Void_t* rEALLOc(Void_t*, size_t);
|
1003 |
|
|
Void_t* mEMALIGn(size_t, size_t);
|
1004 |
|
|
Void_t* vALLOc(size_t);
|
1005 |
|
|
Void_t* pvALLOc(size_t);
|
1006 |
|
|
Void_t* cALLOc(size_t, size_t);
|
1007 |
|
|
void cfree(Void_t*);
|
1008 |
|
|
int mALLOC_TRIm(size_t);
|
1009 |
|
|
size_t mALLOC_USABLE_SIZe(Void_t*);
|
1010 |
|
|
void mALLOC_STATs(void);
|
1011 |
|
|
int mALLOPt(int, int);
|
1012 |
|
|
struct mallinfo mALLINFo(void);
|
1013 |
|
|
Void_t* mALLOC_GET_STATe(void);
|
1014 |
|
|
int mALLOC_SET_STATe(Void_t*);
|
1015 |
|
|
|
1016 |
|
|
#else /* !__STD_C */
|
1017 |
|
|
|
1018 |
|
|
#ifndef _LIBC
|
1019 |
|
|
void ptmalloc_init();
|
1020 |
|
|
#endif
|
1021 |
|
|
Void_t* mALLOc();
|
1022 |
|
|
void fREe();
|
1023 |
|
|
Void_t* rEALLOc();
|
1024 |
|
|
Void_t* mEMALIGn();
|
1025 |
|
|
Void_t* vALLOc();
|
1026 |
|
|
Void_t* pvALLOc();
|
1027 |
|
|
Void_t* cALLOc();
|
1028 |
|
|
void cfree();
|
1029 |
|
|
int mALLOC_TRIm();
|
1030 |
|
|
size_t mALLOC_USABLE_SIZe();
|
1031 |
|
|
void mALLOC_STATs();
|
1032 |
|
|
int mALLOPt();
|
1033 |
|
|
struct mallinfo mALLINFo();
|
1034 |
|
|
Void_t* mALLOC_GET_STATe();
|
1035 |
|
|
int mALLOC_SET_STATe();
|
1036 |
|
|
|
1037 |
|
|
#endif /* __STD_C */
|
1038 |
|
|
|
1039 |
|
|
|
1040 |
|
|
#ifdef __cplusplus
|
1041 |
|
|
} /* end of extern "C" */
|
1042 |
|
|
#endif
|
1043 |
|
|
|
1044 |
|
|
#if !defined(NO_THREADS) && !HAVE_MMAP
|
1045 |
|
|
"Can't have threads support without mmap"
|
1046 |
|
|
#endif
|
1047 |
|
|
#if USE_ARENAS && !HAVE_MMAP
|
1048 |
|
|
"Can't have multiple arenas without mmap"
|
1049 |
|
|
#endif
|
1050 |
|
|
|
1051 |
|
|
|
1052 |
|
|
/*
|
1053 |
|
|
Type declarations
|
1054 |
|
|
*/
|
1055 |
|
|
|
1056 |
|
|
|
1057 |
|
|
struct malloc_chunk
|
1058 |
|
|
{
|
1059 |
|
|
INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
|
1060 |
|
|
INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
|
1061 |
|
|
struct malloc_chunk* fd; /* double links -- used only if free. */
|
1062 |
|
|
struct malloc_chunk* bk;
|
1063 |
|
|
};
|
1064 |
|
|
|
1065 |
|
|
typedef struct malloc_chunk* mchunkptr;
|
1066 |
|
|
|
1067 |
|
|
/*
|
1068 |
|
|
|
1069 |
|
|
malloc_chunk details:
|
1070 |
|
|
|
1071 |
|
|
(The following includes lightly edited explanations by Colin Plumb.)
|
1072 |
|
|
|
1073 |
|
|
Chunks of memory are maintained using a `boundary tag' method as
|
1074 |
|
|
described in e.g., Knuth or Standish. (See the paper by Paul
|
1075 |
|
|
Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
|
1076 |
|
|
survey of such techniques.) Sizes of free chunks are stored both
|
1077 |
|
|
in the front of each chunk and at the end. This makes
|
1078 |
|
|
consolidating fragmented chunks into bigger chunks very fast. The
|
1079 |
|
|
size fields also hold bits representing whether chunks are free or
|
1080 |
|
|
in use.
|
1081 |
|
|
|
1082 |
|
|
An allocated chunk looks like this:
|
1083 |
|
|
|
1084 |
|
|
|
1085 |
|
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
1086 |
|
|
| Size of previous chunk, if allocated | |
|
1087 |
|
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
1088 |
|
|
| Size of chunk, in bytes |P|
|
1089 |
|
|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
1090 |
|
|
| User data starts here... .
|
1091 |
|
|
. .
|
1092 |
|
|
. (malloc_usable_space() bytes) .
|
1093 |
|
|
. |
|
1094 |
|
|
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
1095 |
|
|
| Size of chunk |
|
1096 |
|
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
1097 |
|
|
|
1098 |
|
|
|
1099 |
|
|
Where "chunk" is the front of the chunk for the purpose of most of
|
1100 |
|
|
the malloc code, but "mem" is the pointer that is returned to the
|
1101 |
|
|
user. "Nextchunk" is the beginning of the next contiguous chunk.
|
1102 |
|
|
|
1103 |
|
|
Chunks always begin on even word boundaries, so the mem portion
|
1104 |
|
|
(which is returned to the user) is also on an even word boundary, and
|
1105 |
|
|
thus double-word aligned.
|
1106 |
|
|
|
1107 |
|
|
Free chunks are stored in circular doubly-linked lists, and look like this:
|
1108 |
|
|
|
1109 |
|
|
chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
1110 |
|
|
| Size of previous chunk |
|
1111 |
|
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
1112 |
|
|
`head:' | Size of chunk, in bytes |P|
|
1113 |
|
|
mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
1114 |
|
|
| Forward pointer to next chunk in list |
|
1115 |
|
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
1116 |
|
|
| Back pointer to previous chunk in list |
|
1117 |
|
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
1118 |
|
|
| Unused space (may be 0 bytes long) .
|
1119 |
|
|
. .
|
1120 |
|
|
. |
|
1121 |
|
|
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
1122 |
|
|
`foot:' | Size of chunk, in bytes |
|
1123 |
|
|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
1124 |
|
|
|
1125 |
|
|
The P (PREV_INUSE) bit, stored in the unused low-order bit of the
|
1126 |
|
|
chunk size (which is always a multiple of two words), is an in-use
|
1127 |
|
|
bit for the *previous* chunk. If that bit is *clear*, then the
|
1128 |
|
|
word before the current chunk size contains the previous chunk
|
1129 |
|
|
size, and can be used to find the front of the previous chunk.
|
1130 |
|
|
(The very first chunk allocated always has this bit set,
|
1131 |
|
|
preventing access to non-existent (or non-owned) memory.)
|
1132 |
|
|
|
1133 |
|
|
Note that the `foot' of the current chunk is actually represented
|
1134 |
|
|
as the prev_size of the NEXT chunk. (This makes it easier to
|
1135 |
|
|
deal with alignments etc).
|
1136 |
|
|
|
1137 |
|
|
The two exceptions to all this are
|
1138 |
|
|
|
1139 |
|
|
1. The special chunk `top', which doesn't bother using the
|
1140 |
|
|
trailing size field since there is no
|
1141 |
|
|
next contiguous chunk that would have to index off it. (After
|
1142 |
|
|
initialization, `top' is forced to always exist. If it would
|
1143 |
|
|
become less than MINSIZE bytes long, it is replenished via
|
1144 |
|
|
malloc_extend_top.)
|
1145 |
|
|
|
1146 |
|
|
2. Chunks allocated via mmap, which have the second-lowest-order
|
1147 |
|
|
bit (IS_MMAPPED) set in their size fields. Because they are
|
1148 |
|
|
never merged or traversed from any other chunk, they have no
|
1149 |
|
|
foot size or inuse information.
|
1150 |
|
|
|
1151 |
|
|
Available chunks are kept in any of several places (all declared below):
|
1152 |
|
|
|
1153 |
|
|
* `av': An array of chunks serving as bin headers for consolidated
|
1154 |
|
|
chunks. Each bin is doubly linked. The bins are approximately
|
1155 |
|
|
proportionally (log) spaced. There are a lot of these bins
|
1156 |
|
|
(128). This may look excessive, but works very well in
|
1157 |
|
|
practice. All procedures maintain the invariant that no
|
1158 |
|
|
consolidated chunk physically borders another one. Chunks in
|
1159 |
|
|
bins are kept in size order, with ties going to the
|
1160 |
|
|
approximately least recently used chunk.
|
1161 |
|
|
|
1162 |
|
|
The chunks in each bin are maintained in decreasing sorted order by
|
1163 |
|
|
size. This is irrelevant for the small bins, which all contain
|
1164 |
|
|
the same-sized chunks, but facilitates best-fit allocation for
|
1165 |
|
|
larger chunks. (These lists are just sequential. Keeping them in
|
1166 |
|
|
order almost never requires enough traversal to warrant using
|
1167 |
|
|
fancier ordered data structures.) Chunks of the same size are
|
1168 |
|
|
linked with the most recently freed at the front, and allocations
|
1169 |
|
|
are taken from the back. This results in LRU or FIFO allocation
|
1170 |
|
|
order, which tends to give each chunk an equal opportunity to be
|
1171 |
|
|
consolidated with adjacent freed chunks, resulting in larger free
|
1172 |
|
|
chunks and less fragmentation.
|
1173 |
|
|
|
1174 |
|
|
* `top': The top-most available chunk (i.e., the one bordering the
|
1175 |
|
|
end of available memory) is treated specially. It is never
|
1176 |
|
|
included in any bin, is used only if no other chunk is
|
1177 |
|
|
available, and is released back to the system if it is very
|
1178 |
|
|
large (see M_TRIM_THRESHOLD).
|
1179 |
|
|
|
1180 |
|
|
* `last_remainder': A bin holding only the remainder of the
|
1181 |
|
|
most recently split (non-top) chunk. This bin is checked
|
1182 |
|
|
before other non-fitting chunks, so as to provide better
|
1183 |
|
|
locality for runs of sequentially allocated chunks.
|
1184 |
|
|
|
1185 |
|
|
* Implicitly, through the host system's memory mapping tables.
|
1186 |
|
|
If supported, requests greater than a threshold are usually
|
1187 |
|
|
serviced via calls to mmap, and then later released via munmap.
|
1188 |
|
|
|
1189 |
|
|
*/
|
1190 |
|
|
|
1191 |
|
|
/*
|
1192 |
|
|
Bins
|
1193 |
|
|
|
1194 |
|
|
The bins are an array of pairs of pointers serving as the
|
1195 |
|
|
heads of (initially empty) doubly-linked lists of chunks, laid out
|
1196 |
|
|
in a way so that each pair can be treated as if it were in a
|
1197 |
|
|
malloc_chunk. (This way, the fd/bk offsets for linking bin heads
|
1198 |
|
|
and chunks are the same).
|
1199 |
|
|
|
1200 |
|
|
Bins for sizes < 512 bytes contain chunks of all the same size, spaced
|
1201 |
|
|
8 bytes apart. Larger bins are approximately logarithmically
|
1202 |
|
|
spaced. (See the table below.)
|
1203 |
|
|
|
1204 |
|
|
Bin layout:
|
1205 |
|
|
|
1206 |
|
|
64 bins of size 8
|
1207 |
|
|
32 bins of size 64
|
1208 |
|
|
16 bins of size 512
|
1209 |
|
|
8 bins of size 4096
|
1210 |
|
|
4 bins of size 32768
|
1211 |
|
|
2 bins of size 262144
|
1212 |
|
|
1 bin of size what's left
|
1213 |
|
|
|
1214 |
|
|
There is actually a little bit of slop in the numbers in bin_index
|
1215 |
|
|
for the sake of speed. This makes no difference elsewhere.
|
1216 |
|
|
|
1217 |
|
|
The special chunks `top' and `last_remainder' get their own bins,
|
1218 |
|
|
(this is implemented via yet more trickery with the av array),
|
1219 |
|
|
although `top' is never properly linked to its bin since it is
|
1220 |
|
|
always handled specially.
|
1221 |
|
|
|
1222 |
|
|
*/
|
1223 |
|
|
|
1224 |
|
|
#define NAV 128 /* number of bins */
|
1225 |
|
|
|
1226 |
|
|
typedef struct malloc_chunk* mbinptr;
|
1227 |
|
|
|
1228 |
|
|
/* An arena is a configuration of malloc_chunks together with an array
|
1229 |
|
|
of bins. With multiple threads, it must be locked via a mutex
|
1230 |
|
|
before changing its data structures. One or more `heaps' are
|
1231 |
|
|
associated with each arena, except for the main_arena, which is
|
1232 |
|
|
associated only with the `main heap', i.e. the conventional free
|
1233 |
|
|
store obtained with calls to MORECORE() (usually sbrk). The `av'
|
1234 |
|
|
array is never mentioned directly in the code, but instead used via
|
1235 |
|
|
bin access macros. */
|
1236 |
|
|
|
1237 |
|
|
typedef struct _arena {
|
1238 |
|
|
mbinptr av[2*NAV + 2];
|
1239 |
|
|
struct _arena *next;
|
1240 |
|
|
size_t size;
|
1241 |
|
|
#if THREAD_STATS
|
1242 |
|
|
long stat_lock_direct, stat_lock_loop, stat_lock_wait;
|
1243 |
|
|
#endif
|
1244 |
|
|
mutex_t mutex;
|
1245 |
|
|
} arena;
|
1246 |
|
|
|
1247 |
|
|
|
1248 |
|
|
/* A heap is a single contiguous memory region holding (coalesceable)
|
1249 |
|
|
malloc_chunks. It is allocated with mmap() and always starts at an
|
1250 |
|
|
address aligned to HEAP_MAX_SIZE. Not used unless compiling with
|
1251 |
|
|
USE_ARENAS. */
|
1252 |
|
|
|
1253 |
|
|
typedef struct _heap_info {
|
1254 |
|
|
arena *ar_ptr; /* Arena for this heap. */
|
1255 |
|
|
struct _heap_info *prev; /* Previous heap. */
|
1256 |
|
|
size_t size; /* Current size in bytes. */
|
1257 |
|
|
size_t pad; /* Make sure the following data is properly aligned. */
|
1258 |
|
|
} heap_info;
|
1259 |
|
|
|
1260 |
|
|
|
1261 |
|
|
/*
|
1262 |
|
|
Static functions (forward declarations)
|
1263 |
|
|
*/
|
1264 |
|
|
|
1265 |
|
|
#if __STD_C
|
1266 |
|
|
|
1267 |
|
|
static void chunk_free(arena *ar_ptr, mchunkptr p) internal_function;
|
1268 |
|
|
static mchunkptr chunk_alloc(arena *ar_ptr, INTERNAL_SIZE_T size)
|
1269 |
|
|
internal_function;
|
1270 |
|
|
static mchunkptr chunk_realloc(arena *ar_ptr, mchunkptr oldp,
|
1271 |
|
|
INTERNAL_SIZE_T oldsize, INTERNAL_SIZE_T nb)
|
1272 |
|
|
internal_function;
|
1273 |
|
|
static mchunkptr chunk_align(arena *ar_ptr, INTERNAL_SIZE_T nb,
|
1274 |
|
|
size_t alignment) internal_function;
|
1275 |
|
|
static int main_trim(size_t pad) internal_function;
|
1276 |
|
|
#if USE_ARENAS
|
1277 |
|
|
static int heap_trim(heap_info *heap, size_t pad) internal_function;
|
1278 |
|
|
#endif
|
1279 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
1280 |
|
|
static Void_t* malloc_check(size_t sz, const Void_t *caller);
|
1281 |
|
|
static void free_check(Void_t* mem, const Void_t *caller);
|
1282 |
|
|
static Void_t* realloc_check(Void_t* oldmem, size_t bytes,
|
1283 |
|
|
const Void_t *caller);
|
1284 |
|
|
static Void_t* memalign_check(size_t alignment, size_t bytes,
|
1285 |
|
|
const Void_t *caller);
|
1286 |
|
|
#ifndef NO_THREADS
|
1287 |
|
|
static Void_t* malloc_starter(size_t sz, const Void_t *caller);
|
1288 |
|
|
static void free_starter(Void_t* mem, const Void_t *caller);
|
1289 |
|
|
static Void_t* malloc_atfork(size_t sz, const Void_t *caller);
|
1290 |
|
|
static void free_atfork(Void_t* mem, const Void_t *caller);
|
1291 |
|
|
#endif
|
1292 |
|
|
#endif
|
1293 |
|
|
|
1294 |
|
|
#else
|
1295 |
|
|
|
1296 |
|
|
static void chunk_free();
|
1297 |
|
|
static mchunkptr chunk_alloc();
|
1298 |
|
|
static mchunkptr chunk_realloc();
|
1299 |
|
|
static mchunkptr chunk_align();
|
1300 |
|
|
static int main_trim();
|
1301 |
|
|
#if USE_ARENAS
|
1302 |
|
|
static int heap_trim();
|
1303 |
|
|
#endif
|
1304 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
1305 |
|
|
static Void_t* malloc_check();
|
1306 |
|
|
static void free_check();
|
1307 |
|
|
static Void_t* realloc_check();
|
1308 |
|
|
static Void_t* memalign_check();
|
1309 |
|
|
#ifndef NO_THREADS
|
1310 |
|
|
static Void_t* malloc_starter();
|
1311 |
|
|
static void free_starter();
|
1312 |
|
|
static Void_t* malloc_atfork();
|
1313 |
|
|
static void free_atfork();
|
1314 |
|
|
#endif
|
1315 |
|
|
#endif
|
1316 |
|
|
|
1317 |
|
|
#endif
|
1318 |
|
|
|
1319 |
|
|
|
1320 |
|
|
|
1321 |
|
|
/* sizes, alignments */
|
1322 |
|
|
|
1323 |
|
|
#define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
|
1324 |
|
|
/* Allow the default to be overwritten on the compiler command line. */
|
1325 |
|
|
#ifndef MALLOC_ALIGNMENT
|
1326 |
|
|
# define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ)
|
1327 |
|
|
#endif
|
1328 |
|
|
#define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
|
1329 |
|
|
#define MINSIZE (sizeof(struct malloc_chunk))
|
1330 |
|
|
|
1331 |
|
|
/* conversion from malloc headers to user pointers, and back */
|
1332 |
|
|
|
1333 |
|
|
#define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
|
1334 |
|
|
#define mem2chunk(mem) chunk_at_offset((mem), -2*SIZE_SZ)
|
1335 |
|
|
|
1336 |
|
|
/* pad request bytes into a usable size, return non-zero on overflow */
|
1337 |
|
|
|
1338 |
|
|
#define request2size(req, nb) \
|
1339 |
|
|
((nb = (req) + (SIZE_SZ + MALLOC_ALIGN_MASK)),\
|
1340 |
|
|
((long)nb <= 0 || nb < (INTERNAL_SIZE_T) (req) \
|
1341 |
|
|
? (__set_errno (ENOMEM), 1) \
|
1342 |
|
|
: ((nb < (MINSIZE + MALLOC_ALIGN_MASK) \
|
1343 |
|
|
? (nb = MINSIZE) : (nb &= ~MALLOC_ALIGN_MASK)), 0)))
|
1344 |
|
|
|
1345 |
|
|
/* Check if m has acceptable alignment */
|
1346 |
|
|
|
1347 |
|
|
#define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
|
1348 |
|
|
|
1349 |
|
|
|
1350 |
|
|
|
1351 |
|
|
|
1352 |
|
|
/*
|
1353 |
|
|
Physical chunk operations
|
1354 |
|
|
*/
|
1355 |
|
|
|
1356 |
|
|
|
1357 |
|
|
/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
|
1358 |
|
|
|
1359 |
|
|
#define PREV_INUSE 0x1UL
|
1360 |
|
|
|
1361 |
|
|
/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
|
1362 |
|
|
|
1363 |
|
|
#define IS_MMAPPED 0x2UL
|
1364 |
|
|
|
1365 |
|
|
/* Bits to mask off when extracting size */
|
1366 |
|
|
|
1367 |
|
|
#define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
|
1368 |
|
|
|
1369 |
|
|
|
1370 |
|
|
/* Ptr to next physical malloc_chunk. */
|
1371 |
|
|
|
1372 |
|
|
#define next_chunk(p) chunk_at_offset((p), (p)->size & ~PREV_INUSE)
|
1373 |
|
|
|
1374 |
|
|
/* Ptr to previous physical malloc_chunk */
|
1375 |
|
|
|
1376 |
|
|
#define prev_chunk(p) chunk_at_offset((p), -(p)->prev_size)
|
1377 |
|
|
|
1378 |
|
|
|
1379 |
|
|
/* Treat space at ptr + offset as a chunk */
|
1380 |
|
|
|
1381 |
|
|
#define chunk_at_offset(p, s) BOUNDED_1((mchunkptr)(((char*)(p)) + (s)))
|
1382 |
|
|
|
1383 |
|
|
|
1384 |
|
|
|
1385 |
|
|
|
1386 |
|
|
/*
|
1387 |
|
|
Dealing with use bits
|
1388 |
|
|
*/
|
1389 |
|
|
|
1390 |
|
|
/* extract p's inuse bit */
|
1391 |
|
|
|
1392 |
|
|
#define inuse(p) (next_chunk(p)->size & PREV_INUSE)
|
1393 |
|
|
|
1394 |
|
|
/* extract inuse bit of previous chunk */
|
1395 |
|
|
|
1396 |
|
|
#define prev_inuse(p) ((p)->size & PREV_INUSE)
|
1397 |
|
|
|
1398 |
|
|
/* check for mmap()'ed chunk */
|
1399 |
|
|
|
1400 |
|
|
#define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
|
1401 |
|
|
|
1402 |
|
|
/* set/clear chunk as in use without otherwise disturbing */
|
1403 |
|
|
|
1404 |
|
|
#define set_inuse(p) (next_chunk(p)->size |= PREV_INUSE)
|
1405 |
|
|
|
1406 |
|
|
#define clear_inuse(p) (next_chunk(p)->size &= ~PREV_INUSE)
|
1407 |
|
|
|
1408 |
|
|
/* check/set/clear inuse bits in known places */
|
1409 |
|
|
|
1410 |
|
|
#define inuse_bit_at_offset(p, s) \
|
1411 |
|
|
(chunk_at_offset((p), (s))->size & PREV_INUSE)
|
1412 |
|
|
|
1413 |
|
|
#define set_inuse_bit_at_offset(p, s) \
|
1414 |
|
|
(chunk_at_offset((p), (s))->size |= PREV_INUSE)
|
1415 |
|
|
|
1416 |
|
|
#define clear_inuse_bit_at_offset(p, s) \
|
1417 |
|
|
(chunk_at_offset((p), (s))->size &= ~(PREV_INUSE))
|
1418 |
|
|
|
1419 |
|
|
|
1420 |
|
|
|
1421 |
|
|
|
1422 |
|
|
/*
|
1423 |
|
|
Dealing with size fields
|
1424 |
|
|
*/
|
1425 |
|
|
|
1426 |
|
|
/* Get size, ignoring use bits */
|
1427 |
|
|
|
1428 |
|
|
#define chunksize(p) ((p)->size & ~(SIZE_BITS))
|
1429 |
|
|
|
1430 |
|
|
/* Set size at head, without disturbing its use bit */
|
1431 |
|
|
|
1432 |
|
|
#define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
|
1433 |
|
|
|
1434 |
|
|
/* Set size/use ignoring previous bits in header */
|
1435 |
|
|
|
1436 |
|
|
#define set_head(p, s) ((p)->size = (s))
|
1437 |
|
|
|
1438 |
|
|
/* Set size at footer (only when chunk is not in use) */
|
1439 |
|
|
|
1440 |
|
|
#define set_foot(p, s) (chunk_at_offset(p, s)->prev_size = (s))
|
1441 |
|
|
|
1442 |
|
|
|
1443 |
|
|
|
1444 |
|
|
|
1445 |
|
|
|
1446 |
|
|
/* access macros */
|
1447 |
|
|
|
1448 |
|
|
#define bin_at(a, i) BOUNDED_1(_bin_at(a, i))
|
1449 |
|
|
#define _bin_at(a, i) ((mbinptr)((char*)&(((a)->av)[2*(i)+2]) - 2*SIZE_SZ))
|
1450 |
|
|
#define init_bin(a, i) ((a)->av[2*(i)+2] = (a)->av[2*(i)+3] = bin_at((a), (i)))
|
1451 |
|
|
#define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(((arena*)0)->av[0])))
|
1452 |
|
|
#define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(((arena*)0)->av[0])))
|
1453 |
|
|
|
1454 |
|
|
/*
|
1455 |
|
|
The first 2 bins are never indexed. The corresponding av cells are instead
|
1456 |
|
|
used for bookkeeping. This is not to save space, but to simplify
|
1457 |
|
|
indexing, maintain locality, and avoid some initialization tests.
|
1458 |
|
|
*/
|
1459 |
|
|
|
1460 |
|
|
#define binblocks(a) (bin_at(a,0)->size)/* bitvector of nonempty blocks */
|
1461 |
|
|
#define top(a) (bin_at(a,0)->fd) /* The topmost chunk */
|
1462 |
|
|
#define last_remainder(a) (bin_at(a,1)) /* remainder from last split */
|
1463 |
|
|
|
1464 |
|
|
/*
|
1465 |
|
|
Because top initially points to its own bin with initial
|
1466 |
|
|
zero size, thus forcing extension on the first malloc request,
|
1467 |
|
|
we avoid having any special code in malloc to check whether
|
1468 |
|
|
it even exists yet. But we still need to in malloc_extend_top.
|
1469 |
|
|
*/
|
1470 |
|
|
|
1471 |
|
|
#define initial_top(a) ((mchunkptr)bin_at(a, 0))
|
1472 |
|
|
|
1473 |
|
|
|
1474 |
|
|
|
1475 |
|
|
/* field-extraction macros */
|
1476 |
|
|
|
1477 |
|
|
#define first(b) ((b)->fd)
|
1478 |
|
|
#define last(b) ((b)->bk)
|
1479 |
|
|
|
1480 |
|
|
/*
|
1481 |
|
|
Indexing into bins
|
1482 |
|
|
*/
|
1483 |
|
|
|
1484 |
|
|
#define bin_index(sz) \
|
1485 |
|
|
(((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3):\
|
1486 |
|
|
((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6):\
|
1487 |
|
|
((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9):\
|
1488 |
|
|
((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12):\
|
1489 |
|
|
((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15):\
|
1490 |
|
|
((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18):\
|
1491 |
|
|
126)
|
1492 |
|
|
/*
|
1493 |
|
|
bins for chunks < 512 are all spaced 8 bytes apart, and hold
|
1494 |
|
|
identically sized chunks. This is exploited in malloc.
|
1495 |
|
|
*/
|
1496 |
|
|
|
1497 |
|
|
#define MAX_SMALLBIN 63
|
1498 |
|
|
#define MAX_SMALLBIN_SIZE 512
|
1499 |
|
|
#define SMALLBIN_WIDTH 8
|
1500 |
|
|
|
1501 |
|
|
#define smallbin_index(sz) (((unsigned long)(sz)) >> 3)
|
1502 |
|
|
|
1503 |
|
|
/*
|
1504 |
|
|
Requests are `small' if both the corresponding and the next bin are small
|
1505 |
|
|
*/
|
1506 |
|
|
|
1507 |
|
|
#define is_small_request(nb) ((nb) < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
|
1508 |
|
|
|
1509 |
|
|
|
1510 |
|
|
|
1511 |
|
|
/*
|
1512 |
|
|
To help compensate for the large number of bins, a one-level index
|
1513 |
|
|
structure is used for bin-by-bin searching. `binblocks' is a
|
1514 |
|
|
one-word bitvector recording whether groups of BINBLOCKWIDTH bins
|
1515 |
|
|
have any (possibly) non-empty bins, so they can be skipped over
|
1516 |
|
|
all at once during during traversals. The bits are NOT always
|
1517 |
|
|
cleared as soon as all bins in a block are empty, but instead only
|
1518 |
|
|
when all are noticed to be empty during traversal in malloc.
|
1519 |
|
|
*/
|
1520 |
|
|
|
1521 |
|
|
#define BINBLOCKWIDTH 4 /* bins per block */
|
1522 |
|
|
|
1523 |
|
|
/* bin<->block macros */
|
1524 |
|
|
|
1525 |
|
|
#define idx2binblock(ix) ((unsigned)1 << ((ix) / BINBLOCKWIDTH))
|
1526 |
|
|
#define mark_binblock(a, ii) (binblocks(a) |= idx2binblock(ii))
|
1527 |
|
|
#define clear_binblock(a, ii) (binblocks(a) &= ~(idx2binblock(ii)))
|
1528 |
|
|
|
1529 |
|
|
|
1530 |
|
|
|
1531 |
|
|
|
1532 |
|
|
/* Static bookkeeping data */
|
1533 |
|
|
|
1534 |
|
|
/* Helper macro to initialize bins */
|
1535 |
|
|
#define IAV(i) _bin_at(&main_arena, i), _bin_at(&main_arena, i)
|
1536 |
|
|
|
1537 |
|
|
static arena main_arena = {
|
1538 |
|
|
{
|
1539 |
|
|
0, 0,
|
1540 |
|
|
IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7),
|
1541 |
|
|
IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15),
|
1542 |
|
|
IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23),
|
1543 |
|
|
IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31),
|
1544 |
|
|
IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39),
|
1545 |
|
|
IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47),
|
1546 |
|
|
IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55),
|
1547 |
|
|
IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63),
|
1548 |
|
|
IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71),
|
1549 |
|
|
IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79),
|
1550 |
|
|
IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87),
|
1551 |
|
|
IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95),
|
1552 |
|
|
IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103),
|
1553 |
|
|
IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
|
1554 |
|
|
IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
|
1555 |
|
|
IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
|
1556 |
|
|
},
|
1557 |
|
|
&main_arena, /* next */
|
1558 |
|
|
0, /* size */
|
1559 |
|
|
#if THREAD_STATS
|
1560 |
|
|
0, 0, 0, /* stat_lock_direct, stat_lock_loop, stat_lock_wait */
|
1561 |
|
|
#endif
|
1562 |
|
|
MUTEX_INITIALIZER /* mutex */
|
1563 |
|
|
};
|
1564 |
|
|
|
1565 |
|
|
#undef IAV
|
1566 |
|
|
|
1567 |
|
|
/* Thread specific data */
|
1568 |
|
|
|
1569 |
|
|
static tsd_key_t arena_key;
|
1570 |
|
|
static mutex_t list_lock = MUTEX_INITIALIZER;
|
1571 |
|
|
|
1572 |
|
|
#if THREAD_STATS
|
1573 |
|
|
static int stat_n_heaps;
|
1574 |
|
|
#define THREAD_STAT(x) x
|
1575 |
|
|
#else
|
1576 |
|
|
#define THREAD_STAT(x) do ; while(0)
|
1577 |
|
|
#endif
|
1578 |
|
|
|
1579 |
|
|
/* variables holding tunable values */
|
1580 |
|
|
|
1581 |
|
|
static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD;
|
1582 |
|
|
static unsigned long top_pad = DEFAULT_TOP_PAD;
|
1583 |
|
|
static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX;
|
1584 |
|
|
static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD;
|
1585 |
|
|
static int check_action = DEFAULT_CHECK_ACTION;
|
1586 |
|
|
|
1587 |
|
|
/* The first value returned from sbrk */
|
1588 |
|
|
static char* sbrk_base = (char*)(-1);
|
1589 |
|
|
|
1590 |
|
|
/* The maximum memory obtained from system via sbrk */
|
1591 |
|
|
static unsigned long max_sbrked_mem;
|
1592 |
|
|
|
1593 |
|
|
/* The maximum via either sbrk or mmap (too difficult to track with threads) */
|
1594 |
|
|
#ifdef NO_THREADS
|
1595 |
|
|
static unsigned long max_total_mem;
|
1596 |
|
|
#endif
|
1597 |
|
|
|
1598 |
|
|
/* The total memory obtained from system via sbrk */
|
1599 |
|
|
#define sbrked_mem (main_arena.size)
|
1600 |
|
|
|
1601 |
|
|
/* Tracking mmaps */
|
1602 |
|
|
|
1603 |
|
|
static unsigned int n_mmaps;
|
1604 |
|
|
static unsigned int max_n_mmaps;
|
1605 |
|
|
static unsigned long mmapped_mem;
|
1606 |
|
|
static unsigned long max_mmapped_mem;
|
1607 |
|
|
|
1608 |
|
|
/* Mapped memory in non-main arenas (reliable only for NO_THREADS). */
|
1609 |
|
|
static unsigned long arena_mem;
|
1610 |
|
|
|
1611 |
|
|
|
1612 |
|
|
|
1613 |
|
|
#ifndef _LIBC
|
1614 |
|
|
#define weak_variable
|
1615 |
|
|
#else
|
1616 |
|
|
/* In GNU libc we want the hook variables to be weak definitions to
|
1617 |
|
|
avoid a problem with Emacs. */
|
1618 |
|
|
#define weak_variable weak_function
|
1619 |
|
|
#endif
|
1620 |
|
|
|
1621 |
|
|
/* Already initialized? */
|
1622 |
|
|
int __malloc_initialized = -1;
|
1623 |
|
|
|
1624 |
|
|
|
1625 |
|
|
#ifndef NO_THREADS
|
1626 |
|
|
|
1627 |
|
|
/* Magic value for the thread-specific arena pointer when
|
1628 |
|
|
malloc_atfork() is in use. */
|
1629 |
|
|
|
1630 |
|
|
#define ATFORK_ARENA_PTR ((Void_t*)-1)
|
1631 |
|
|
|
1632 |
|
|
/* The following two functions are registered via thread_atfork() to
|
1633 |
|
|
make sure that the mutexes remain in a consistent state in the
|
1634 |
|
|
fork()ed version of a thread. Also adapt the malloc and free hooks
|
1635 |
|
|
temporarily, because the `atfork' handler mechanism may use
|
1636 |
|
|
malloc/free internally (e.g. in LinuxThreads). */
|
1637 |
|
|
|
1638 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
1639 |
|
|
static __malloc_ptr_t (*save_malloc_hook) __MALLOC_P ((size_t __size,
|
1640 |
|
|
const __malloc_ptr_t));
|
1641 |
|
|
static void (*save_free_hook) __MALLOC_P ((__malloc_ptr_t __ptr,
|
1642 |
|
|
const __malloc_ptr_t));
|
1643 |
|
|
static Void_t* save_arena;
|
1644 |
|
|
#endif
|
1645 |
|
|
|
1646 |
|
|
static void
|
1647 |
|
|
ptmalloc_lock_all __MALLOC_P((void))
|
1648 |
|
|
{
|
1649 |
|
|
arena *ar_ptr;
|
1650 |
|
|
|
1651 |
|
|
(void)mutex_lock(&list_lock);
|
1652 |
|
|
for(ar_ptr = &main_arena;;) {
|
1653 |
|
|
(void)mutex_lock(&ar_ptr->mutex);
|
1654 |
|
|
ar_ptr = ar_ptr->next;
|
1655 |
|
|
if(ar_ptr == &main_arena) break;
|
1656 |
|
|
}
|
1657 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
1658 |
|
|
save_malloc_hook = __malloc_hook;
|
1659 |
|
|
save_free_hook = __free_hook;
|
1660 |
|
|
__malloc_hook = malloc_atfork;
|
1661 |
|
|
__free_hook = free_atfork;
|
1662 |
|
|
/* Only the current thread may perform malloc/free calls now. */
|
1663 |
|
|
tsd_getspecific(arena_key, save_arena);
|
1664 |
|
|
tsd_setspecific(arena_key, ATFORK_ARENA_PTR);
|
1665 |
|
|
#endif
|
1666 |
|
|
}
|
1667 |
|
|
|
1668 |
|
|
static void
|
1669 |
|
|
ptmalloc_unlock_all __MALLOC_P((void))
|
1670 |
|
|
{
|
1671 |
|
|
arena *ar_ptr;
|
1672 |
|
|
|
1673 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
1674 |
|
|
tsd_setspecific(arena_key, save_arena);
|
1675 |
|
|
__malloc_hook = save_malloc_hook;
|
1676 |
|
|
__free_hook = save_free_hook;
|
1677 |
|
|
#endif
|
1678 |
|
|
for(ar_ptr = &main_arena;;) {
|
1679 |
|
|
(void)mutex_unlock(&ar_ptr->mutex);
|
1680 |
|
|
ar_ptr = ar_ptr->next;
|
1681 |
|
|
if(ar_ptr == &main_arena) break;
|
1682 |
|
|
}
|
1683 |
|
|
(void)mutex_unlock(&list_lock);
|
1684 |
|
|
}
|
1685 |
|
|
|
1686 |
|
|
static void
|
1687 |
|
|
ptmalloc_init_all __MALLOC_P((void))
|
1688 |
|
|
{
|
1689 |
|
|
arena *ar_ptr;
|
1690 |
|
|
|
1691 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
1692 |
|
|
tsd_setspecific(arena_key, save_arena);
|
1693 |
|
|
__malloc_hook = save_malloc_hook;
|
1694 |
|
|
__free_hook = save_free_hook;
|
1695 |
|
|
#endif
|
1696 |
|
|
for(ar_ptr = &main_arena;;) {
|
1697 |
|
|
(void)mutex_init(&ar_ptr->mutex);
|
1698 |
|
|
ar_ptr = ar_ptr->next;
|
1699 |
|
|
if(ar_ptr == &main_arena) break;
|
1700 |
|
|
}
|
1701 |
|
|
(void)mutex_init(&list_lock);
|
1702 |
|
|
}
|
1703 |
|
|
|
1704 |
|
|
#endif /* !defined NO_THREADS */
|
1705 |
|
|
|
1706 |
|
|
/* Initialization routine. */
|
1707 |
|
|
#if defined(_LIBC)
|
1708 |
|
|
#if 0
|
1709 |
|
|
static void ptmalloc_init __MALLOC_P ((void)) __attribute__ ((constructor));
|
1710 |
|
|
#endif
|
1711 |
|
|
|
1712 |
|
|
#ifdef _LIBC
|
1713 |
|
|
#include <string.h>
|
1714 |
|
|
extern char **environ;
|
1715 |
|
|
|
1716 |
|
|
static char *
|
1717 |
|
|
internal_function
|
1718 |
|
|
next_env_entry (char ***position)
|
1719 |
|
|
{
|
1720 |
|
|
char **current = *position;
|
1721 |
|
|
char *result = NULL;
|
1722 |
|
|
|
1723 |
|
|
while (*current != NULL)
|
1724 |
|
|
{
|
1725 |
|
|
if (__builtin_expect ((*current)[0] == 'M', 0)
|
1726 |
|
|
&& (*current)[1] == 'A'
|
1727 |
|
|
&& (*current)[2] == 'L'
|
1728 |
|
|
&& (*current)[3] == 'L'
|
1729 |
|
|
&& (*current)[4] == 'O'
|
1730 |
|
|
&& (*current)[5] == 'C'
|
1731 |
|
|
&& (*current)[6] == '_')
|
1732 |
|
|
{
|
1733 |
|
|
result = &(*current)[7];
|
1734 |
|
|
|
1735 |
|
|
/* Save current position for next visit. */
|
1736 |
|
|
*position = ++current;
|
1737 |
|
|
|
1738 |
|
|
break;
|
1739 |
|
|
}
|
1740 |
|
|
|
1741 |
|
|
++current;
|
1742 |
|
|
}
|
1743 |
|
|
|
1744 |
|
|
return result;
|
1745 |
|
|
}
|
1746 |
|
|
#endif
|
1747 |
|
|
|
1748 |
|
|
static void
|
1749 |
|
|
ptmalloc_init __MALLOC_P((void))
|
1750 |
|
|
#else
|
1751 |
|
|
void
|
1752 |
|
|
ptmalloc_init __MALLOC_P((void))
|
1753 |
|
|
#endif
|
1754 |
|
|
{
|
1755 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
1756 |
|
|
# if __STD_C
|
1757 |
|
|
const char* s;
|
1758 |
|
|
# else
|
1759 |
|
|
char* s;
|
1760 |
|
|
# endif
|
1761 |
|
|
#endif
|
1762 |
|
|
int secure;
|
1763 |
|
|
|
1764 |
|
|
if(__malloc_initialized >= 0) return;
|
1765 |
|
|
__malloc_initialized = 0;
|
1766 |
|
|
#ifdef _LIBC
|
1767 |
|
|
__libc_pagesize = __getpagesize();
|
1768 |
|
|
#endif
|
1769 |
|
|
#ifndef NO_THREADS
|
1770 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
1771 |
|
|
/* With some threads implementations, creating thread-specific data
|
1772 |
|
|
or initializing a mutex may call malloc() itself. Provide a
|
1773 |
|
|
simple starter version (realloc() won't work). */
|
1774 |
|
|
save_malloc_hook = __malloc_hook;
|
1775 |
|
|
save_free_hook = __free_hook;
|
1776 |
|
|
__malloc_hook = malloc_starter;
|
1777 |
|
|
__free_hook = free_starter;
|
1778 |
|
|
#endif
|
1779 |
|
|
#ifdef _LIBC
|
1780 |
|
|
/* Initialize the pthreads interface. */
|
1781 |
|
|
if (__pthread_initialize != NULL)
|
1782 |
|
|
__pthread_initialize();
|
1783 |
|
|
#endif
|
1784 |
|
|
#endif /* !defined NO_THREADS */
|
1785 |
|
|
mutex_init(&main_arena.mutex);
|
1786 |
|
|
mutex_init(&list_lock);
|
1787 |
|
|
tsd_key_create(&arena_key, NULL);
|
1788 |
|
|
tsd_setspecific(arena_key, (Void_t *)&main_arena);
|
1789 |
|
|
thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_init_all);
|
1790 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
1791 |
|
|
#ifndef NO_THREADS
|
1792 |
|
|
__malloc_hook = save_malloc_hook;
|
1793 |
|
|
__free_hook = save_free_hook;
|
1794 |
|
|
#endif
|
1795 |
|
|
secure = __libc_enable_secure;
|
1796 |
|
|
#ifdef _LIBC
|
1797 |
|
|
s = NULL;
|
1798 |
|
|
if (environ != NULL)
|
1799 |
|
|
{
|
1800 |
|
|
char **runp = environ;
|
1801 |
|
|
char *envline;
|
1802 |
|
|
|
1803 |
|
|
while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, 0))
|
1804 |
|
|
{
|
1805 |
|
|
size_t len = strcspn (envline, "=");
|
1806 |
|
|
|
1807 |
|
|
if (envline[len] != '=')
|
1808 |
|
|
/* This is a "MALLOC_" variable at the end of the string
|
1809 |
|
|
without a '=' character. Ignore it since otherwise we
|
1810 |
|
|
will access invalid memory below. */
|
1811 |
|
|
continue;
|
1812 |
|
|
|
1813 |
|
|
switch (len)
|
1814 |
|
|
{
|
1815 |
|
|
case 6:
|
1816 |
|
|
if (memcmp (envline, "CHECK_", 6) == 0)
|
1817 |
|
|
s = &envline[7];
|
1818 |
|
|
break;
|
1819 |
|
|
case 8:
|
1820 |
|
|
if (! secure && memcmp (envline, "TOP_PAD_", 8) == 0)
|
1821 |
|
|
mALLOPt(M_TOP_PAD, atoi(&envline[9]));
|
1822 |
|
|
break;
|
1823 |
|
|
case 9:
|
1824 |
|
|
if (! secure && memcmp (envline, "MMAP_MAX_", 9) == 0)
|
1825 |
|
|
mALLOPt(M_MMAP_MAX, atoi(&envline[10]));
|
1826 |
|
|
break;
|
1827 |
|
|
case 15:
|
1828 |
|
|
if (! secure)
|
1829 |
|
|
{
|
1830 |
|
|
if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
|
1831 |
|
|
mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16]));
|
1832 |
|
|
else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
|
1833 |
|
|
mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16]));
|
1834 |
|
|
}
|
1835 |
|
|
break;
|
1836 |
|
|
default:
|
1837 |
|
|
break;
|
1838 |
|
|
}
|
1839 |
|
|
}
|
1840 |
|
|
}
|
1841 |
|
|
#else
|
1842 |
|
|
if (! secure)
|
1843 |
|
|
{
|
1844 |
|
|
if((s = getenv("MALLOC_TRIM_THRESHOLD_")))
|
1845 |
|
|
mALLOPt(M_TRIM_THRESHOLD, atoi(s));
|
1846 |
|
|
if((s = getenv("MALLOC_TOP_PAD_")))
|
1847 |
|
|
mALLOPt(M_TOP_PAD, atoi(s));
|
1848 |
|
|
if((s = getenv("MALLOC_MMAP_THRESHOLD_")))
|
1849 |
|
|
mALLOPt(M_MMAP_THRESHOLD, atoi(s));
|
1850 |
|
|
if((s = getenv("MALLOC_MMAP_MAX_")))
|
1851 |
|
|
mALLOPt(M_MMAP_MAX, atoi(s));
|
1852 |
|
|
}
|
1853 |
|
|
s = getenv("MALLOC_CHECK_");
|
1854 |
|
|
#endif
|
1855 |
|
|
if(s) {
|
1856 |
|
|
if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0'));
|
1857 |
|
|
__malloc_check_init();
|
1858 |
|
|
}
|
1859 |
|
|
if(__malloc_initialize_hook != NULL)
|
1860 |
|
|
(*__malloc_initialize_hook)();
|
1861 |
|
|
#endif
|
1862 |
|
|
__malloc_initialized = 1;
|
1863 |
|
|
}
|
1864 |
|
|
|
1865 |
|
|
/* There are platforms (e.g. Hurd) with a link-time hook mechanism. */
|
1866 |
|
|
#ifdef thread_atfork_static
|
1867 |
|
|
thread_atfork_static(ptmalloc_lock_all, ptmalloc_unlock_all, \
|
1868 |
|
|
ptmalloc_init_all)
|
1869 |
|
|
#endif
|
1870 |
|
|
|
1871 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
1872 |
|
|
|
1873 |
|
|
/* Hooks for debugging versions. The initial hooks just call the
|
1874 |
|
|
initialization routine, then do the normal work. */
|
1875 |
|
|
|
1876 |
|
|
static Void_t*
|
1877 |
|
|
#if __STD_C
|
1878 |
|
|
malloc_hook_ini(size_t sz, const __malloc_ptr_t caller)
|
1879 |
|
|
#else
|
1880 |
|
|
malloc_hook_ini(sz, caller)
|
1881 |
|
|
size_t sz; const __malloc_ptr_t caller;
|
1882 |
|
|
#endif
|
1883 |
|
|
{
|
1884 |
|
|
__malloc_hook = NULL;
|
1885 |
|
|
ptmalloc_init();
|
1886 |
|
|
return mALLOc(sz);
|
1887 |
|
|
}
|
1888 |
|
|
|
1889 |
|
|
static Void_t*
|
1890 |
|
|
#if __STD_C
|
1891 |
|
|
realloc_hook_ini(Void_t* ptr, size_t sz, const __malloc_ptr_t caller)
|
1892 |
|
|
#else
|
1893 |
|
|
realloc_hook_ini(ptr, sz, caller)
|
1894 |
|
|
Void_t* ptr; size_t sz; const __malloc_ptr_t caller;
|
1895 |
|
|
#endif
|
1896 |
|
|
{
|
1897 |
|
|
__malloc_hook = NULL;
|
1898 |
|
|
__realloc_hook = NULL;
|
1899 |
|
|
ptmalloc_init();
|
1900 |
|
|
return rEALLOc(ptr, sz);
|
1901 |
|
|
}
|
1902 |
|
|
|
1903 |
|
|
static Void_t*
|
1904 |
|
|
#if __STD_C
|
1905 |
|
|
memalign_hook_ini(size_t alignment, size_t sz, const __malloc_ptr_t caller)
|
1906 |
|
|
#else
|
1907 |
|
|
memalign_hook_ini(alignment, sz, caller)
|
1908 |
|
|
size_t alignment; size_t sz; const __malloc_ptr_t caller;
|
1909 |
|
|
#endif
|
1910 |
|
|
{
|
1911 |
|
|
__memalign_hook = NULL;
|
1912 |
|
|
ptmalloc_init();
|
1913 |
|
|
return mEMALIGn(alignment, sz);
|
1914 |
|
|
}
|
1915 |
|
|
|
1916 |
|
|
void weak_variable (*__malloc_initialize_hook) __MALLOC_P ((void)) = NULL;
|
1917 |
|
|
void weak_variable (*__free_hook) __MALLOC_P ((__malloc_ptr_t __ptr,
|
1918 |
|
|
const __malloc_ptr_t)) = NULL;
|
1919 |
|
|
__malloc_ptr_t weak_variable (*__malloc_hook)
|
1920 |
|
|
__MALLOC_P ((size_t __size, const __malloc_ptr_t)) = malloc_hook_ini;
|
1921 |
|
|
__malloc_ptr_t weak_variable (*__realloc_hook)
|
1922 |
|
|
__MALLOC_P ((__malloc_ptr_t __ptr, size_t __size, const __malloc_ptr_t))
|
1923 |
|
|
= realloc_hook_ini;
|
1924 |
|
|
__malloc_ptr_t weak_variable (*__memalign_hook)
|
1925 |
|
|
__MALLOC_P ((size_t __alignment, size_t __size, const __malloc_ptr_t))
|
1926 |
|
|
= memalign_hook_ini;
|
1927 |
|
|
void weak_variable (*__after_morecore_hook) __MALLOC_P ((void)) = NULL;
|
1928 |
|
|
|
1929 |
|
|
/* Whether we are using malloc checking. */
|
1930 |
|
|
static int using_malloc_checking;
|
1931 |
|
|
|
1932 |
|
|
/* A flag that is set by malloc_set_state, to signal that malloc checking
|
1933 |
|
|
must not be enabled on the request from the user (via the MALLOC_CHECK_
|
1934 |
|
|
environment variable). It is reset by __malloc_check_init to tell
|
1935 |
|
|
malloc_set_state that the user has requested malloc checking.
|
1936 |
|
|
|
1937 |
|
|
The purpose of this flag is to make sure that malloc checking is not
|
1938 |
|
|
enabled when the heap to be restored was constructed without malloc
|
1939 |
|
|
checking, and thus does not contain the required magic bytes.
|
1940 |
|
|
Otherwise the heap would be corrupted by calls to free and realloc. If
|
1941 |
|
|
it turns out that the heap was created with malloc checking and the
|
1942 |
|
|
user has requested it malloc_set_state just calls __malloc_check_init
|
1943 |
|
|
again to enable it. On the other hand, reusing such a heap without
|
1944 |
|
|
further malloc checking is safe. */
|
1945 |
|
|
static int disallow_malloc_check;
|
1946 |
|
|
|
1947 |
|
|
/* Activate a standard set of debugging hooks. */
|
1948 |
|
|
void
|
1949 |
|
|
__malloc_check_init()
|
1950 |
|
|
{
|
1951 |
|
|
if (disallow_malloc_check) {
|
1952 |
|
|
disallow_malloc_check = 0;
|
1953 |
|
|
return;
|
1954 |
|
|
}
|
1955 |
|
|
using_malloc_checking = 1;
|
1956 |
|
|
__malloc_hook = malloc_check;
|
1957 |
|
|
__free_hook = free_check;
|
1958 |
|
|
__realloc_hook = realloc_check;
|
1959 |
|
|
__memalign_hook = memalign_check;
|
1960 |
|
|
if(check_action & 1)
|
1961 |
|
|
fprintf(stderr, "malloc: using debugging hooks\n");
|
1962 |
|
|
}
|
1963 |
|
|
|
1964 |
|
|
#endif
|
1965 |
|
|
|
1966 |
|
|
|
1967 |
|
|
|
1968 |
|
|
|
1969 |
|
|
|
1970 |
|
|
/* Routines dealing with mmap(). */
|
1971 |
|
|
|
1972 |
|
|
#if HAVE_MMAP
|
1973 |
|
|
|
1974 |
|
|
#ifndef MAP_ANONYMOUS
|
1975 |
|
|
|
1976 |
|
|
static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
|
1977 |
|
|
|
1978 |
|
|
#define MMAP(addr, size, prot, flags) ((dev_zero_fd < 0) ? \
|
1979 |
|
|
(dev_zero_fd = open("/dev/zero", O_RDWR), \
|
1980 |
|
|
mmap((addr), (size), (prot), (flags), dev_zero_fd, 0)) : \
|
1981 |
|
|
mmap((addr), (size), (prot), (flags), dev_zero_fd, 0))
|
1982 |
|
|
|
1983 |
|
|
#else
|
1984 |
|
|
|
1985 |
|
|
#define MMAP(addr, size, prot, flags) \
|
1986 |
|
|
(mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS, -1, 0))
|
1987 |
|
|
|
1988 |
|
|
#endif
|
1989 |
|
|
|
1990 |
|
|
#if defined __GNUC__ && __GNUC__ >= 2
|
1991 |
|
|
/* This function is only called from one place, inline it. */
|
1992 |
|
|
__inline__
|
1993 |
|
|
#endif
|
1994 |
|
|
static mchunkptr
|
1995 |
|
|
internal_function
|
1996 |
|
|
#if __STD_C
|
1997 |
|
|
mmap_chunk(size_t size)
|
1998 |
|
|
#else
|
1999 |
|
|
mmap_chunk(size) size_t size;
|
2000 |
|
|
#endif
|
2001 |
|
|
{
|
2002 |
|
|
size_t page_mask = malloc_getpagesize - 1;
|
2003 |
|
|
mchunkptr p;
|
2004 |
|
|
|
2005 |
|
|
/* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
|
2006 |
|
|
* there is no following chunk whose prev_size field could be used.
|
2007 |
|
|
*/
|
2008 |
|
|
size = (size + SIZE_SZ + page_mask) & ~page_mask;
|
2009 |
|
|
|
2010 |
|
|
p = (mchunkptr)MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE);
|
2011 |
|
|
if(p == (mchunkptr) MAP_FAILED) return 0;
|
2012 |
|
|
|
2013 |
|
|
n_mmaps++;
|
2014 |
|
|
if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
|
2015 |
|
|
|
2016 |
|
|
/* We demand that eight bytes into a page must be 8-byte aligned. */
|
2017 |
|
|
assert(aligned_OK(chunk2mem(p)));
|
2018 |
|
|
|
2019 |
|
|
/* The offset to the start of the mmapped region is stored
|
2020 |
|
|
* in the prev_size field of the chunk; normally it is zero,
|
2021 |
|
|
* but that can be changed in memalign().
|
2022 |
|
|
*/
|
2023 |
|
|
p->prev_size = 0;
|
2024 |
|
|
set_head(p, size|IS_MMAPPED);
|
2025 |
|
|
|
2026 |
|
|
mmapped_mem += size;
|
2027 |
|
|
if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
|
2028 |
|
|
max_mmapped_mem = mmapped_mem;
|
2029 |
|
|
#ifdef NO_THREADS
|
2030 |
|
|
if ((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
|
2031 |
|
|
max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
|
2032 |
|
|
#endif
|
2033 |
|
|
return p;
|
2034 |
|
|
}
|
2035 |
|
|
|
2036 |
|
|
static void
|
2037 |
|
|
internal_function
|
2038 |
|
|
#if __STD_C
|
2039 |
|
|
munmap_chunk(mchunkptr p)
|
2040 |
|
|
#else
|
2041 |
|
|
munmap_chunk(p) mchunkptr p;
|
2042 |
|
|
#endif
|
2043 |
|
|
{
|
2044 |
|
|
INTERNAL_SIZE_T size = chunksize(p);
|
2045 |
|
|
int ret;
|
2046 |
|
|
|
2047 |
|
|
assert (chunk_is_mmapped(p));
|
2048 |
|
|
assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
|
2049 |
|
|
assert((n_mmaps > 0));
|
2050 |
|
|
assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
|
2051 |
|
|
|
2052 |
|
|
n_mmaps--;
|
2053 |
|
|
mmapped_mem -= (size + p->prev_size);
|
2054 |
|
|
|
2055 |
|
|
ret = munmap((char *)p - p->prev_size, size + p->prev_size);
|
2056 |
|
|
|
2057 |
|
|
/* munmap returns non-zero on failure */
|
2058 |
|
|
assert(ret == 0);
|
2059 |
|
|
}
|
2060 |
|
|
|
2061 |
|
|
#if HAVE_MREMAP
|
2062 |
|
|
|
2063 |
|
|
static mchunkptr
|
2064 |
|
|
internal_function
|
2065 |
|
|
#if __STD_C
|
2066 |
|
|
mremap_chunk(mchunkptr p, size_t new_size)
|
2067 |
|
|
#else
|
2068 |
|
|
mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
|
2069 |
|
|
#endif
|
2070 |
|
|
{
|
2071 |
|
|
size_t page_mask = malloc_getpagesize - 1;
|
2072 |
|
|
INTERNAL_SIZE_T offset = p->prev_size;
|
2073 |
|
|
INTERNAL_SIZE_T size = chunksize(p);
|
2074 |
|
|
char *cp;
|
2075 |
|
|
|
2076 |
|
|
assert (chunk_is_mmapped(p));
|
2077 |
|
|
assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
|
2078 |
|
|
assert((n_mmaps > 0));
|
2079 |
|
|
assert(((size + offset) & (malloc_getpagesize-1)) == 0);
|
2080 |
|
|
|
2081 |
|
|
/* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
|
2082 |
|
|
new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
|
2083 |
|
|
|
2084 |
|
|
cp = (char *)mremap((char *)p - offset, size + offset, new_size,
|
2085 |
|
|
MREMAP_MAYMOVE);
|
2086 |
|
|
|
2087 |
|
|
if (cp == MAP_FAILED) return 0;
|
2088 |
|
|
|
2089 |
|
|
p = (mchunkptr)(cp + offset);
|
2090 |
|
|
|
2091 |
|
|
assert(aligned_OK(chunk2mem(p)));
|
2092 |
|
|
|
2093 |
|
|
assert((p->prev_size == offset));
|
2094 |
|
|
set_head(p, (new_size - offset)|IS_MMAPPED);
|
2095 |
|
|
|
2096 |
|
|
mmapped_mem -= size + offset;
|
2097 |
|
|
mmapped_mem += new_size;
|
2098 |
|
|
if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
|
2099 |
|
|
max_mmapped_mem = mmapped_mem;
|
2100 |
|
|
#ifdef NO_THREADS
|
2101 |
|
|
if ((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
|
2102 |
|
|
max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
|
2103 |
|
|
#endif
|
2104 |
|
|
return p;
|
2105 |
|
|
}
|
2106 |
|
|
|
2107 |
|
|
#endif /* HAVE_MREMAP */
|
2108 |
|
|
|
2109 |
|
|
#endif /* HAVE_MMAP */
|
2110 |
|
|
|
2111 |
|
|
|
2112 |
|
|
|
2113 |
|
|
/* Managing heaps and arenas (for concurrent threads) */
|
2114 |
|
|
|
2115 |
|
|
#if USE_ARENAS
|
2116 |
|
|
|
2117 |
|
|
/* Create a new heap. size is automatically rounded up to a multiple
|
2118 |
|
|
of the page size. */
|
2119 |
|
|
|
2120 |
|
|
static heap_info *
|
2121 |
|
|
internal_function
|
2122 |
|
|
#if __STD_C
|
2123 |
|
|
new_heap(size_t size)
|
2124 |
|
|
#else
|
2125 |
|
|
new_heap(size) size_t size;
|
2126 |
|
|
#endif
|
2127 |
|
|
{
|
2128 |
|
|
size_t page_mask = malloc_getpagesize - 1;
|
2129 |
|
|
char *p1, *p2;
|
2130 |
|
|
unsigned long ul;
|
2131 |
|
|
heap_info *h;
|
2132 |
|
|
|
2133 |
|
|
if(size+top_pad < HEAP_MIN_SIZE)
|
2134 |
|
|
size = HEAP_MIN_SIZE;
|
2135 |
|
|
else if(size+top_pad <= HEAP_MAX_SIZE)
|
2136 |
|
|
size += top_pad;
|
2137 |
|
|
else if(size > HEAP_MAX_SIZE)
|
2138 |
|
|
return 0;
|
2139 |
|
|
else
|
2140 |
|
|
size = HEAP_MAX_SIZE;
|
2141 |
|
|
size = (size + page_mask) & ~page_mask;
|
2142 |
|
|
|
2143 |
|
|
/* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
|
2144 |
|
|
No swap space needs to be reserved for the following large
|
2145 |
|
|
mapping (on Linux, this is the case for all non-writable mappings
|
2146 |
|
|
anyway). */
|
2147 |
|
|
p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
|
2148 |
|
|
if(p1 != MAP_FAILED) {
|
2149 |
|
|
p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1)) & ~(HEAP_MAX_SIZE-1));
|
2150 |
|
|
ul = p2 - p1;
|
2151 |
|
|
if (ul)
|
2152 |
|
|
munmap(p1, ul);
|
2153 |
|
|
munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
|
2154 |
|
|
} else {
|
2155 |
|
|
/* Try to take the chance that an allocation of only HEAP_MAX_SIZE
|
2156 |
|
|
is already aligned. */
|
2157 |
|
|
p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_PRIVATE|MAP_NORESERVE);
|
2158 |
|
|
if(p2 == MAP_FAILED)
|
2159 |
|
|
return 0;
|
2160 |
|
|
if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
|
2161 |
|
|
munmap(p2, HEAP_MAX_SIZE);
|
2162 |
|
|
return 0;
|
2163 |
|
|
}
|
2164 |
|
|
}
|
2165 |
|
|
if(MMAP(p2, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED)
|
2166 |
|
|
== (char *) MAP_FAILED) {
|
2167 |
|
|
munmap(p2, HEAP_MAX_SIZE);
|
2168 |
|
|
return 0;
|
2169 |
|
|
}
|
2170 |
|
|
h = (heap_info *)p2;
|
2171 |
|
|
h->size = size;
|
2172 |
|
|
THREAD_STAT(stat_n_heaps++);
|
2173 |
|
|
return h;
|
2174 |
|
|
}
|
2175 |
|
|
|
2176 |
|
|
/* Grow or shrink a heap. size is automatically rounded up to a
|
2177 |
|
|
multiple of the page size if it is positive. */
|
2178 |
|
|
|
2179 |
|
|
static int
|
2180 |
|
|
#if __STD_C
|
2181 |
|
|
grow_heap(heap_info *h, long diff)
|
2182 |
|
|
#else
|
2183 |
|
|
grow_heap(h, diff) heap_info *h; long diff;
|
2184 |
|
|
#endif
|
2185 |
|
|
{
|
2186 |
|
|
size_t page_mask = malloc_getpagesize - 1;
|
2187 |
|
|
long new_size;
|
2188 |
|
|
|
2189 |
|
|
if(diff >= 0) {
|
2190 |
|
|
diff = (diff + page_mask) & ~page_mask;
|
2191 |
|
|
new_size = (long)h->size + diff;
|
2192 |
|
|
if(new_size > HEAP_MAX_SIZE)
|
2193 |
|
|
return -1;
|
2194 |
|
|
if(MMAP((char *)h + h->size, diff, PROT_READ|PROT_WRITE,
|
2195 |
|
|
MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED)
|
2196 |
|
|
return -2;
|
2197 |
|
|
} else {
|
2198 |
|
|
new_size = (long)h->size + diff;
|
2199 |
|
|
if(new_size < (long)sizeof(*h))
|
2200 |
|
|
return -1;
|
2201 |
|
|
/* Try to re-map the extra heap space freshly to save memory, and
|
2202 |
|
|
make it inaccessible. */
|
2203 |
|
|
if((char *)MMAP((char *)h + new_size, -diff, PROT_NONE,
|
2204 |
|
|
MAP_PRIVATE|MAP_FIXED) == (char *) MAP_FAILED)
|
2205 |
|
|
return -2;
|
2206 |
|
|
}
|
2207 |
|
|
h->size = new_size;
|
2208 |
|
|
return 0;
|
2209 |
|
|
}
|
2210 |
|
|
|
2211 |
|
|
/* Delete a heap. */
|
2212 |
|
|
|
2213 |
|
|
#define delete_heap(heap) munmap((char*)(heap), HEAP_MAX_SIZE)
|
2214 |
|
|
|
2215 |
|
|
/* arena_get() acquires an arena and locks the corresponding mutex.
|
2216 |
|
|
First, try the one last locked successfully by this thread. (This
|
2217 |
|
|
is the common case and handled with a macro for speed.) Then, loop
|
2218 |
|
|
once over the circularly linked list of arenas. If no arena is
|
2219 |
|
|
readily available, create a new one. In this latter case, `size'
|
2220 |
|
|
is just a hint as to how much memory will be required immediately
|
2221 |
|
|
in the new arena. */
|
2222 |
|
|
|
2223 |
|
|
#define arena_get(ptr, size) do { \
|
2224 |
|
|
Void_t *vptr = NULL; \
|
2225 |
|
|
ptr = (arena *)tsd_getspecific(arena_key, vptr); \
|
2226 |
|
|
if(ptr && !mutex_trylock(&ptr->mutex)) { \
|
2227 |
|
|
THREAD_STAT(++(ptr->stat_lock_direct)); \
|
2228 |
|
|
} else \
|
2229 |
|
|
ptr = arena_get2(ptr, (size)); \
|
2230 |
|
|
} while(0)
|
2231 |
|
|
|
2232 |
|
|
static arena *
|
2233 |
|
|
internal_function
|
2234 |
|
|
#if __STD_C
|
2235 |
|
|
arena_get2(arena *a_tsd, size_t size)
|
2236 |
|
|
#else
|
2237 |
|
|
arena_get2(a_tsd, size) arena *a_tsd; size_t size;
|
2238 |
|
|
#endif
|
2239 |
|
|
{
|
2240 |
|
|
arena *a;
|
2241 |
|
|
heap_info *h;
|
2242 |
|
|
char *ptr;
|
2243 |
|
|
int i;
|
2244 |
|
|
unsigned long misalign;
|
2245 |
|
|
|
2246 |
|
|
if(!a_tsd)
|
2247 |
|
|
a = a_tsd = &main_arena;
|
2248 |
|
|
else {
|
2249 |
|
|
a = a_tsd->next;
|
2250 |
|
|
if(!a) {
|
2251 |
|
|
/* This can only happen while initializing the new arena. */
|
2252 |
|
|
(void)mutex_lock(&main_arena.mutex);
|
2253 |
|
|
THREAD_STAT(++(main_arena.stat_lock_wait));
|
2254 |
|
|
return &main_arena;
|
2255 |
|
|
}
|
2256 |
|
|
}
|
2257 |
|
|
|
2258 |
|
|
/* Check the global, circularly linked list for available arenas. */
|
2259 |
|
|
repeat:
|
2260 |
|
|
do {
|
2261 |
|
|
if(!mutex_trylock(&a->mutex)) {
|
2262 |
|
|
THREAD_STAT(++(a->stat_lock_loop));
|
2263 |
|
|
tsd_setspecific(arena_key, (Void_t *)a);
|
2264 |
|
|
return a;
|
2265 |
|
|
}
|
2266 |
|
|
a = a->next;
|
2267 |
|
|
} while(a != a_tsd);
|
2268 |
|
|
|
2269 |
|
|
/* If not even the list_lock can be obtained, try again. This can
|
2270 |
|
|
happen during `atfork', or for example on systems where thread
|
2271 |
|
|
creation makes it temporarily impossible to obtain _any_
|
2272 |
|
|
locks. */
|
2273 |
|
|
if(mutex_trylock(&list_lock)) {
|
2274 |
|
|
a = a_tsd;
|
2275 |
|
|
goto repeat;
|
2276 |
|
|
}
|
2277 |
|
|
(void)mutex_unlock(&list_lock);
|
2278 |
|
|
|
2279 |
|
|
/* Nothing immediately available, so generate a new arena. */
|
2280 |
|
|
h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT));
|
2281 |
|
|
if(!h) {
|
2282 |
|
|
/* Maybe size is too large to fit in a single heap. So, just try
|
2283 |
|
|
to create a minimally-sized arena and let chunk_alloc() attempt
|
2284 |
|
|
to deal with the large request via mmap_chunk(). */
|
2285 |
|
|
h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT);
|
2286 |
|
|
if(!h)
|
2287 |
|
|
return 0;
|
2288 |
|
|
}
|
2289 |
|
|
a = h->ar_ptr = (arena *)(h+1);
|
2290 |
|
|
for(i=0; i<NAV; i++)
|
2291 |
|
|
init_bin(a, i);
|
2292 |
|
|
a->next = NULL;
|
2293 |
|
|
a->size = h->size;
|
2294 |
|
|
arena_mem += h->size;
|
2295 |
|
|
#ifdef NO_THREADS
|
2296 |
|
|
if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
|
2297 |
|
|
max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
|
2298 |
|
|
#endif
|
2299 |
|
|
tsd_setspecific(arena_key, (Void_t *)a);
|
2300 |
|
|
mutex_init(&a->mutex);
|
2301 |
|
|
i = mutex_lock(&a->mutex); /* remember result */
|
2302 |
|
|
|
2303 |
|
|
/* Set up the top chunk, with proper alignment. */
|
2304 |
|
|
ptr = (char *)(a + 1);
|
2305 |
|
|
misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK;
|
2306 |
|
|
if (misalign > 0)
|
2307 |
|
|
ptr += MALLOC_ALIGNMENT - misalign;
|
2308 |
|
|
top(a) = (mchunkptr)ptr;
|
2309 |
|
|
set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE);
|
2310 |
|
|
|
2311 |
|
|
/* Add the new arena to the list. */
|
2312 |
|
|
(void)mutex_lock(&list_lock);
|
2313 |
|
|
a->next = main_arena.next;
|
2314 |
|
|
main_arena.next = a;
|
2315 |
|
|
(void)mutex_unlock(&list_lock);
|
2316 |
|
|
|
2317 |
|
|
if(i) /* locking failed; keep arena for further attempts later */
|
2318 |
|
|
return 0;
|
2319 |
|
|
|
2320 |
|
|
THREAD_STAT(++(a->stat_lock_loop));
|
2321 |
|
|
return a;
|
2322 |
|
|
}
|
2323 |
|
|
|
2324 |
|
|
/* find the heap and corresponding arena for a given ptr */
|
2325 |
|
|
|
2326 |
|
|
#define heap_for_ptr(ptr) \
|
2327 |
|
|
((heap_info *)((unsigned long)(ptr) & ~(HEAP_MAX_SIZE-1)))
|
2328 |
|
|
#define arena_for_ptr(ptr) \
|
2329 |
|
|
(((mchunkptr)(ptr) < top(&main_arena) && (char *)(ptr) >= sbrk_base) ? \
|
2330 |
|
|
&main_arena : heap_for_ptr(ptr)->ar_ptr)
|
2331 |
|
|
|
2332 |
|
|
#else /* !USE_ARENAS */
|
2333 |
|
|
|
2334 |
|
|
/* There is only one arena, main_arena. */
|
2335 |
|
|
|
2336 |
|
|
#define arena_get(ptr, sz) (ptr = &main_arena)
|
2337 |
|
|
#define arena_for_ptr(ptr) (&main_arena)
|
2338 |
|
|
|
2339 |
|
|
#endif /* USE_ARENAS */
|
2340 |
|
|
|
2341 |
|
|
|
2342 |
|
|
|
2343 |
|
|
/*
|
2344 |
|
|
Debugging support
|
2345 |
|
|
*/
|
2346 |
|
|
|
2347 |
|
|
#if MALLOC_DEBUG
|
2348 |
|
|
|
2349 |
|
|
|
2350 |
|
|
/*
|
2351 |
|
|
These routines make a number of assertions about the states
|
2352 |
|
|
of data structures that should be true at all times. If any
|
2353 |
|
|
are not true, it's very likely that a user program has somehow
|
2354 |
|
|
trashed memory. (It's also possible that there is a coding error
|
2355 |
|
|
in malloc. In which case, please report it!)
|
2356 |
|
|
*/
|
2357 |
|
|
|
2358 |
|
|
#if __STD_C
|
2359 |
|
|
static void do_check_chunk(arena *ar_ptr, mchunkptr p)
|
2360 |
|
|
#else
|
2361 |
|
|
static void do_check_chunk(ar_ptr, p) arena *ar_ptr; mchunkptr p;
|
2362 |
|
|
#endif
|
2363 |
|
|
{
|
2364 |
|
|
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
|
2365 |
|
|
|
2366 |
|
|
/* No checkable chunk is mmapped */
|
2367 |
|
|
assert(!chunk_is_mmapped(p));
|
2368 |
|
|
|
2369 |
|
|
#if USE_ARENAS
|
2370 |
|
|
if(ar_ptr != &main_arena) {
|
2371 |
|
|
heap_info *heap = heap_for_ptr(p);
|
2372 |
|
|
assert(heap->ar_ptr == ar_ptr);
|
2373 |
|
|
if(p != top(ar_ptr))
|
2374 |
|
|
assert((char *)p + sz <= (char *)heap + heap->size);
|
2375 |
|
|
else
|
2376 |
|
|
assert((char *)p + sz == (char *)heap + heap->size);
|
2377 |
|
|
return;
|
2378 |
|
|
}
|
2379 |
|
|
#endif
|
2380 |
|
|
|
2381 |
|
|
/* Check for legal address ... */
|
2382 |
|
|
assert((char*)p >= sbrk_base);
|
2383 |
|
|
if (p != top(ar_ptr))
|
2384 |
|
|
assert((char*)p + sz <= (char*)top(ar_ptr));
|
2385 |
|
|
else
|
2386 |
|
|
assert((char*)p + sz <= sbrk_base + sbrked_mem);
|
2387 |
|
|
|
2388 |
|
|
}
|
2389 |
|
|
|
2390 |
|
|
|
2391 |
|
|
#if __STD_C
|
2392 |
|
|
static void do_check_free_chunk(arena *ar_ptr, mchunkptr p)
|
2393 |
|
|
#else
|
2394 |
|
|
static void do_check_free_chunk(ar_ptr, p) arena *ar_ptr; mchunkptr p;
|
2395 |
|
|
#endif
|
2396 |
|
|
{
|
2397 |
|
|
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
|
2398 |
|
|
mchunkptr next = chunk_at_offset(p, sz);
|
2399 |
|
|
|
2400 |
|
|
do_check_chunk(ar_ptr, p);
|
2401 |
|
|
|
2402 |
|
|
/* Check whether it claims to be free ... */
|
2403 |
|
|
assert(!inuse(p));
|
2404 |
|
|
|
2405 |
|
|
/* Must have OK size and fields */
|
2406 |
|
|
assert((long)sz >= (long)MINSIZE);
|
2407 |
|
|
assert((sz & MALLOC_ALIGN_MASK) == 0);
|
2408 |
|
|
assert(aligned_OK(chunk2mem(p)));
|
2409 |
|
|
/* ... matching footer field */
|
2410 |
|
|
assert(next->prev_size == sz);
|
2411 |
|
|
/* ... and is fully consolidated */
|
2412 |
|
|
assert(prev_inuse(p));
|
2413 |
|
|
assert (next == top(ar_ptr) || inuse(next));
|
2414 |
|
|
|
2415 |
|
|
/* ... and has minimally sane links */
|
2416 |
|
|
assert(p->fd->bk == p);
|
2417 |
|
|
assert(p->bk->fd == p);
|
2418 |
|
|
}
|
2419 |
|
|
|
2420 |
|
|
#if __STD_C
|
2421 |
|
|
static void do_check_inuse_chunk(arena *ar_ptr, mchunkptr p)
|
2422 |
|
|
#else
|
2423 |
|
|
static void do_check_inuse_chunk(ar_ptr, p) arena *ar_ptr; mchunkptr p;
|
2424 |
|
|
#endif
|
2425 |
|
|
{
|
2426 |
|
|
mchunkptr next = next_chunk(p);
|
2427 |
|
|
do_check_chunk(ar_ptr, p);
|
2428 |
|
|
|
2429 |
|
|
/* Check whether it claims to be in use ... */
|
2430 |
|
|
assert(inuse(p));
|
2431 |
|
|
|
2432 |
|
|
/* ... whether its size is OK (it might be a fencepost) ... */
|
2433 |
|
|
assert(chunksize(p) >= MINSIZE || next->size == (0|PREV_INUSE));
|
2434 |
|
|
|
2435 |
|
|
/* ... and is surrounded by OK chunks.
|
2436 |
|
|
Since more things can be checked with free chunks than inuse ones,
|
2437 |
|
|
if an inuse chunk borders them and debug is on, it's worth doing them.
|
2438 |
|
|
*/
|
2439 |
|
|
if (!prev_inuse(p))
|
2440 |
|
|
{
|
2441 |
|
|
mchunkptr prv = prev_chunk(p);
|
2442 |
|
|
assert(next_chunk(prv) == p);
|
2443 |
|
|
do_check_free_chunk(ar_ptr, prv);
|
2444 |
|
|
}
|
2445 |
|
|
if (next == top(ar_ptr))
|
2446 |
|
|
{
|
2447 |
|
|
assert(prev_inuse(next));
|
2448 |
|
|
assert(chunksize(next) >= MINSIZE);
|
2449 |
|
|
}
|
2450 |
|
|
else if (!inuse(next))
|
2451 |
|
|
do_check_free_chunk(ar_ptr, next);
|
2452 |
|
|
|
2453 |
|
|
}
|
2454 |
|
|
|
2455 |
|
|
#if __STD_C
|
2456 |
|
|
static void do_check_malloced_chunk(arena *ar_ptr,
|
2457 |
|
|
mchunkptr p, INTERNAL_SIZE_T s)
|
2458 |
|
|
#else
|
2459 |
|
|
static void do_check_malloced_chunk(ar_ptr, p, s)
|
2460 |
|
|
arena *ar_ptr; mchunkptr p; INTERNAL_SIZE_T s;
|
2461 |
|
|
#endif
|
2462 |
|
|
{
|
2463 |
|
|
INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
|
2464 |
|
|
long room = sz - s;
|
2465 |
|
|
|
2466 |
|
|
do_check_inuse_chunk(ar_ptr, p);
|
2467 |
|
|
|
2468 |
|
|
/* Legal size ... */
|
2469 |
|
|
assert((long)sz >= (long)MINSIZE);
|
2470 |
|
|
assert((sz & MALLOC_ALIGN_MASK) == 0);
|
2471 |
|
|
assert(room >= 0);
|
2472 |
|
|
assert(room < (long)MINSIZE);
|
2473 |
|
|
|
2474 |
|
|
/* ... and alignment */
|
2475 |
|
|
assert(aligned_OK(chunk2mem(p)));
|
2476 |
|
|
|
2477 |
|
|
|
2478 |
|
|
/* ... and was allocated at front of an available chunk */
|
2479 |
|
|
assert(prev_inuse(p));
|
2480 |
|
|
|
2481 |
|
|
}
|
2482 |
|
|
|
2483 |
|
|
|
2484 |
|
|
#define check_free_chunk(A,P) do_check_free_chunk(A,P)
|
2485 |
|
|
#define check_inuse_chunk(A,P) do_check_inuse_chunk(A,P)
|
2486 |
|
|
#define check_chunk(A,P) do_check_chunk(A,P)
|
2487 |
|
|
#define check_malloced_chunk(A,P,N) do_check_malloced_chunk(A,P,N)
|
2488 |
|
|
#else
|
2489 |
|
|
#define check_free_chunk(A,P)
|
2490 |
|
|
#define check_inuse_chunk(A,P)
|
2491 |
|
|
#define check_chunk(A,P)
|
2492 |
|
|
#define check_malloced_chunk(A,P,N)
|
2493 |
|
|
#endif
|
2494 |
|
|
|
2495 |
|
|
|
2496 |
|
|
|
2497 |
|
|
/*
|
2498 |
|
|
Macro-based internal utilities
|
2499 |
|
|
*/
|
2500 |
|
|
|
2501 |
|
|
|
2502 |
|
|
/*
|
2503 |
|
|
Linking chunks in bin lists.
|
2504 |
|
|
Call these only with variables, not arbitrary expressions, as arguments.
|
2505 |
|
|
*/
|
2506 |
|
|
|
2507 |
|
|
/*
|
2508 |
|
|
Place chunk p of size s in its bin, in size order,
|
2509 |
|
|
putting it ahead of others of same size.
|
2510 |
|
|
*/
|
2511 |
|
|
|
2512 |
|
|
|
2513 |
|
|
#define frontlink(A, P, S, IDX, BK, FD) \
|
2514 |
|
|
{ \
|
2515 |
|
|
if (S < MAX_SMALLBIN_SIZE) \
|
2516 |
|
|
{ \
|
2517 |
|
|
IDX = smallbin_index(S); \
|
2518 |
|
|
mark_binblock(A, IDX); \
|
2519 |
|
|
BK = bin_at(A, IDX); \
|
2520 |
|
|
FD = BK->fd; \
|
2521 |
|
|
P->bk = BK; \
|
2522 |
|
|
P->fd = FD; \
|
2523 |
|
|
FD->bk = BK->fd = P; \
|
2524 |
|
|
} \
|
2525 |
|
|
else \
|
2526 |
|
|
{ \
|
2527 |
|
|
IDX = bin_index(S); \
|
2528 |
|
|
BK = bin_at(A, IDX); \
|
2529 |
|
|
FD = BK->fd; \
|
2530 |
|
|
if (FD == BK) mark_binblock(A, IDX); \
|
2531 |
|
|
else \
|
2532 |
|
|
{ \
|
2533 |
|
|
while (FD != BK && S < chunksize(FD)) FD = FD->fd; \
|
2534 |
|
|
BK = FD->bk; \
|
2535 |
|
|
} \
|
2536 |
|
|
P->bk = BK; \
|
2537 |
|
|
P->fd = FD; \
|
2538 |
|
|
FD->bk = BK->fd = P; \
|
2539 |
|
|
} \
|
2540 |
|
|
}
|
2541 |
|
|
|
2542 |
|
|
|
2543 |
|
|
/* take a chunk off a list */
|
2544 |
|
|
|
2545 |
|
|
#define unlink(P, BK, FD) \
|
2546 |
|
|
{ \
|
2547 |
|
|
BK = P->bk; \
|
2548 |
|
|
FD = P->fd; \
|
2549 |
|
|
FD->bk = BK; \
|
2550 |
|
|
BK->fd = FD; \
|
2551 |
|
|
} \
|
2552 |
|
|
|
2553 |
|
|
/* Place p as the last remainder */
|
2554 |
|
|
|
2555 |
|
|
#define link_last_remainder(A, P) \
|
2556 |
|
|
{ \
|
2557 |
|
|
last_remainder(A)->fd = last_remainder(A)->bk = P; \
|
2558 |
|
|
P->fd = P->bk = last_remainder(A); \
|
2559 |
|
|
}
|
2560 |
|
|
|
2561 |
|
|
/* Clear the last_remainder bin */
|
2562 |
|
|
|
2563 |
|
|
#define clear_last_remainder(A) \
|
2564 |
|
|
(last_remainder(A)->fd = last_remainder(A)->bk = last_remainder(A))
|
2565 |
|
|
|
2566 |
|
|
|
2567 |
|
|
|
2568 |
|
|
|
2569 |
|
|
|
2570 |
|
|
/*
|
2571 |
|
|
Extend the top-most chunk by obtaining memory from system.
|
2572 |
|
|
Main interface to sbrk (but see also malloc_trim).
|
2573 |
|
|
*/
|
2574 |
|
|
|
2575 |
|
|
#if defined __GNUC__ && __GNUC__ >= 2
|
2576 |
|
|
/* This function is called only from one place, inline it. */
|
2577 |
|
|
__inline__
|
2578 |
|
|
#endif
|
2579 |
|
|
static void
|
2580 |
|
|
internal_function
|
2581 |
|
|
#if __STD_C
|
2582 |
|
|
malloc_extend_top(arena *ar_ptr, INTERNAL_SIZE_T nb)
|
2583 |
|
|
#else
|
2584 |
|
|
malloc_extend_top(ar_ptr, nb) arena *ar_ptr; INTERNAL_SIZE_T nb;
|
2585 |
|
|
#endif
|
2586 |
|
|
{
|
2587 |
|
|
unsigned long pagesz = malloc_getpagesize;
|
2588 |
|
|
mchunkptr old_top = top(ar_ptr); /* Record state of old top */
|
2589 |
|
|
INTERNAL_SIZE_T old_top_size = chunksize(old_top);
|
2590 |
|
|
INTERNAL_SIZE_T top_size; /* new size of top chunk */
|
2591 |
|
|
|
2592 |
|
|
#if USE_ARENAS
|
2593 |
|
|
if(ar_ptr == &main_arena) {
|
2594 |
|
|
#endif
|
2595 |
|
|
|
2596 |
|
|
char* brk; /* return value from sbrk */
|
2597 |
|
|
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
|
2598 |
|
|
INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */
|
2599 |
|
|
char* new_brk; /* return of 2nd sbrk call */
|
2600 |
|
|
char* old_end = (char*)(chunk_at_offset(old_top, old_top_size));
|
2601 |
|
|
|
2602 |
|
|
/* Pad request with top_pad plus minimal overhead */
|
2603 |
|
|
INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE;
|
2604 |
|
|
|
2605 |
|
|
/* If not the first time through, round to preserve page boundary */
|
2606 |
|
|
/* Otherwise, we need to correct to a page size below anyway. */
|
2607 |
|
|
/* (We also correct below if an intervening foreign sbrk call.) */
|
2608 |
|
|
|
2609 |
|
|
if (sbrk_base != (char*)(-1))
|
2610 |
|
|
sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
|
2611 |
|
|
|
2612 |
|
|
brk = (char*)(MORECORE (sbrk_size));
|
2613 |
|
|
|
2614 |
|
|
/* Fail if sbrk failed or if a foreign sbrk call killed our space */
|
2615 |
|
|
if (brk == (char*)(MORECORE_FAILURE) ||
|
2616 |
|
|
(brk < old_end && old_top != initial_top(&main_arena)))
|
2617 |
|
|
return;
|
2618 |
|
|
|
2619 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
2620 |
|
|
/* Call the `morecore' hook if necessary. */
|
2621 |
|
|
if (__after_morecore_hook)
|
2622 |
|
|
(*__after_morecore_hook) ();
|
2623 |
|
|
#endif
|
2624 |
|
|
|
2625 |
|
|
sbrked_mem += sbrk_size;
|
2626 |
|
|
|
2627 |
|
|
if (brk == old_end) { /* can just add bytes to current top */
|
2628 |
|
|
top_size = sbrk_size + old_top_size;
|
2629 |
|
|
set_head(old_top, top_size | PREV_INUSE);
|
2630 |
|
|
old_top = 0; /* don't free below */
|
2631 |
|
|
} else {
|
2632 |
|
|
if (sbrk_base == (char*)(-1)) /* First time through. Record base */
|
2633 |
|
|
sbrk_base = brk;
|
2634 |
|
|
else
|
2635 |
|
|
/* Someone else called sbrk(). Count those bytes as sbrked_mem. */
|
2636 |
|
|
sbrked_mem += brk - (char*)old_end;
|
2637 |
|
|
|
2638 |
|
|
/* Guarantee alignment of first new chunk made from this space */
|
2639 |
|
|
front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
|
2640 |
|
|
if (front_misalign > 0) {
|
2641 |
|
|
correction = (MALLOC_ALIGNMENT) - front_misalign;
|
2642 |
|
|
brk += correction;
|
2643 |
|
|
} else
|
2644 |
|
|
correction = 0;
|
2645 |
|
|
|
2646 |
|
|
/* Guarantee the next brk will be at a page boundary */
|
2647 |
|
|
correction += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
|
2648 |
|
|
|
2649 |
|
|
/* Allocate correction */
|
2650 |
|
|
new_brk = (char*)(MORECORE (correction));
|
2651 |
|
|
if (new_brk == (char*)(MORECORE_FAILURE)) return;
|
2652 |
|
|
|
2653 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
2654 |
|
|
/* Call the `morecore' hook if necessary. */
|
2655 |
|
|
if (__after_morecore_hook)
|
2656 |
|
|
(*__after_morecore_hook) ();
|
2657 |
|
|
#endif
|
2658 |
|
|
|
2659 |
|
|
sbrked_mem += correction;
|
2660 |
|
|
|
2661 |
|
|
top(&main_arena) = chunk_at_offset(brk, 0);
|
2662 |
|
|
top_size = new_brk - brk + correction;
|
2663 |
|
|
set_head(top(&main_arena), top_size | PREV_INUSE);
|
2664 |
|
|
|
2665 |
|
|
if (old_top == initial_top(&main_arena))
|
2666 |
|
|
old_top = 0; /* don't free below */
|
2667 |
|
|
}
|
2668 |
|
|
|
2669 |
|
|
if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
|
2670 |
|
|
max_sbrked_mem = sbrked_mem;
|
2671 |
|
|
#ifdef NO_THREADS
|
2672 |
|
|
if ((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
|
2673 |
|
|
max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
|
2674 |
|
|
#endif
|
2675 |
|
|
|
2676 |
|
|
#if USE_ARENAS
|
2677 |
|
|
} else { /* ar_ptr != &main_arena */
|
2678 |
|
|
heap_info *old_heap, *heap;
|
2679 |
|
|
size_t old_heap_size;
|
2680 |
|
|
|
2681 |
|
|
if(old_top_size < MINSIZE) /* this should never happen */
|
2682 |
|
|
return;
|
2683 |
|
|
|
2684 |
|
|
/* First try to extend the current heap. */
|
2685 |
|
|
if(MINSIZE + nb <= old_top_size)
|
2686 |
|
|
return;
|
2687 |
|
|
old_heap = heap_for_ptr(old_top);
|
2688 |
|
|
old_heap_size = old_heap->size;
|
2689 |
|
|
if(grow_heap(old_heap, MINSIZE + nb - old_top_size) == 0) {
|
2690 |
|
|
ar_ptr->size += old_heap->size - old_heap_size;
|
2691 |
|
|
arena_mem += old_heap->size - old_heap_size;
|
2692 |
|
|
#ifdef NO_THREADS
|
2693 |
|
|
if(mmapped_mem + arena_mem + sbrked_mem > max_total_mem)
|
2694 |
|
|
max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
|
2695 |
|
|
#endif
|
2696 |
|
|
top_size = ((char *)old_heap + old_heap->size) - (char *)old_top;
|
2697 |
|
|
set_head(old_top, top_size | PREV_INUSE);
|
2698 |
|
|
return;
|
2699 |
|
|
}
|
2700 |
|
|
|
2701 |
|
|
/* A new heap must be created. */
|
2702 |
|
|
heap = new_heap(nb + (MINSIZE + sizeof(*heap)));
|
2703 |
|
|
if(!heap)
|
2704 |
|
|
return;
|
2705 |
|
|
heap->ar_ptr = ar_ptr;
|
2706 |
|
|
heap->prev = old_heap;
|
2707 |
|
|
ar_ptr->size += heap->size;
|
2708 |
|
|
arena_mem += heap->size;
|
2709 |
|
|
#ifdef NO_THREADS
|
2710 |
|
|
if((unsigned long)(mmapped_mem + arena_mem + sbrked_mem) > max_total_mem)
|
2711 |
|
|
max_total_mem = mmapped_mem + arena_mem + sbrked_mem;
|
2712 |
|
|
#endif
|
2713 |
|
|
|
2714 |
|
|
/* Set up the new top, so we can safely use chunk_free() below. */
|
2715 |
|
|
top(ar_ptr) = chunk_at_offset(heap, sizeof(*heap));
|
2716 |
|
|
top_size = heap->size - sizeof(*heap);
|
2717 |
|
|
set_head(top(ar_ptr), top_size | PREV_INUSE);
|
2718 |
|
|
}
|
2719 |
|
|
#endif /* USE_ARENAS */
|
2720 |
|
|
|
2721 |
|
|
/* We always land on a page boundary */
|
2722 |
|
|
assert(((unsigned long)((char*)top(ar_ptr) + top_size) & (pagesz-1)) == 0);
|
2723 |
|
|
|
2724 |
|
|
/* Setup fencepost and free the old top chunk. */
|
2725 |
|
|
if(old_top) {
|
2726 |
|
|
/* The fencepost takes at least MINSIZE bytes, because it might
|
2727 |
|
|
become the top chunk again later. Note that a footer is set
|
2728 |
|
|
up, too, although the chunk is marked in use. */
|
2729 |
|
|
old_top_size -= MINSIZE;
|
2730 |
|
|
set_head(chunk_at_offset(old_top, old_top_size + 2*SIZE_SZ), 0|PREV_INUSE);
|
2731 |
|
|
if(old_top_size >= MINSIZE) {
|
2732 |
|
|
set_head(chunk_at_offset(old_top, old_top_size), (2*SIZE_SZ)|PREV_INUSE);
|
2733 |
|
|
set_foot(chunk_at_offset(old_top, old_top_size), (2*SIZE_SZ));
|
2734 |
|
|
set_head_size(old_top, old_top_size);
|
2735 |
|
|
chunk_free(ar_ptr, old_top);
|
2736 |
|
|
} else {
|
2737 |
|
|
set_head(old_top, (old_top_size + 2*SIZE_SZ)|PREV_INUSE);
|
2738 |
|
|
set_foot(old_top, (old_top_size + 2*SIZE_SZ));
|
2739 |
|
|
}
|
2740 |
|
|
}
|
2741 |
|
|
}
|
2742 |
|
|
|
2743 |
|
|
|
2744 |
|
|
|
2745 |
|
|
|
2746 |
|
|
/* Main public routines */
|
2747 |
|
|
|
2748 |
|
|
|
2749 |
|
|
/*
|
2750 |
|
|
Malloc Algorithm:
|
2751 |
|
|
|
2752 |
|
|
The requested size is first converted into a usable form, `nb'.
|
2753 |
|
|
This currently means to add 4 bytes overhead plus possibly more to
|
2754 |
|
|
obtain 8-byte alignment and/or to obtain a size of at least
|
2755 |
|
|
MINSIZE (currently 16, 24, or 32 bytes), the smallest allocatable
|
2756 |
|
|
size. (All fits are considered `exact' if they are within MINSIZE
|
2757 |
|
|
bytes.)
|
2758 |
|
|
|
2759 |
|
|
From there, the first successful of the following steps is taken:
|
2760 |
|
|
|
2761 |
|
|
1. The bin corresponding to the request size is scanned, and if
|
2762 |
|
|
a chunk of exactly the right size is found, it is taken.
|
2763 |
|
|
|
2764 |
|
|
2. The most recently remaindered chunk is used if it is big
|
2765 |
|
|
enough. This is a form of (roving) first fit, used only in
|
2766 |
|
|
the absence of exact fits. Runs of consecutive requests use
|
2767 |
|
|
the remainder of the chunk used for the previous such request
|
2768 |
|
|
whenever possible. This limited use of a first-fit style
|
2769 |
|
|
allocation strategy tends to give contiguous chunks
|
2770 |
|
|
coextensive lifetimes, which improves locality and can reduce
|
2771 |
|
|
fragmentation in the long run.
|
2772 |
|
|
|
2773 |
|
|
3. Other bins are scanned in increasing size order, using a
|
2774 |
|
|
chunk big enough to fulfill the request, and splitting off
|
2775 |
|
|
any remainder. This search is strictly by best-fit; i.e.,
|
2776 |
|
|
the smallest (with ties going to approximately the least
|
2777 |
|
|
recently used) chunk that fits is selected.
|
2778 |
|
|
|
2779 |
|
|
4. If large enough, the chunk bordering the end of memory
|
2780 |
|
|
(`top') is split off. (This use of `top' is in accord with
|
2781 |
|
|
the best-fit search rule. In effect, `top' is treated as
|
2782 |
|
|
larger (and thus less well fitting) than any other available
|
2783 |
|
|
chunk since it can be extended to be as large as necessary
|
2784 |
|
|
(up to system limitations).
|
2785 |
|
|
|
2786 |
|
|
5. If the request size meets the mmap threshold and the
|
2787 |
|
|
system supports mmap, and there are few enough currently
|
2788 |
|
|
allocated mmapped regions, and a call to mmap succeeds,
|
2789 |
|
|
the request is allocated via direct memory mapping.
|
2790 |
|
|
|
2791 |
|
|
6. Otherwise, the top of memory is extended by
|
2792 |
|
|
obtaining more space from the system (normally using sbrk,
|
2793 |
|
|
but definable to anything else via the MORECORE macro).
|
2794 |
|
|
Memory is gathered from the system (in system page-sized
|
2795 |
|
|
units) in a way that allows chunks obtained across different
|
2796 |
|
|
sbrk calls to be consolidated, but does not require
|
2797 |
|
|
contiguous memory. Thus, it should be safe to intersperse
|
2798 |
|
|
mallocs with other sbrk calls.
|
2799 |
|
|
|
2800 |
|
|
|
2801 |
|
|
All allocations are made from the `lowest' part of any found
|
2802 |
|
|
chunk. (The implementation invariant is that prev_inuse is
|
2803 |
|
|
always true of any allocated chunk; i.e., that each allocated
|
2804 |
|
|
chunk borders either a previously allocated and still in-use chunk,
|
2805 |
|
|
or the base of its memory arena.)
|
2806 |
|
|
|
2807 |
|
|
*/
|
2808 |
|
|
|
2809 |
|
|
#if __STD_C
|
2810 |
|
|
Void_t* mALLOc(size_t bytes)
|
2811 |
|
|
#else
|
2812 |
|
|
Void_t* mALLOc(bytes) size_t bytes;
|
2813 |
|
|
#endif
|
2814 |
|
|
{
|
2815 |
|
|
arena *ar_ptr;
|
2816 |
|
|
INTERNAL_SIZE_T nb; /* padded request size */
|
2817 |
|
|
mchunkptr victim;
|
2818 |
|
|
|
2819 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
2820 |
|
|
__malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
|
2821 |
|
|
__malloc_hook;
|
2822 |
|
|
if (hook != NULL) {
|
2823 |
|
|
Void_t* result;
|
2824 |
|
|
|
2825 |
|
|
#if defined __GNUC__ && __GNUC__ >= 2
|
2826 |
|
|
result = (*hook)(bytes, RETURN_ADDRESS (0));
|
2827 |
|
|
#else
|
2828 |
|
|
result = (*hook)(bytes, NULL);
|
2829 |
|
|
#endif
|
2830 |
|
|
return result;
|
2831 |
|
|
}
|
2832 |
|
|
#endif
|
2833 |
|
|
|
2834 |
|
|
if(request2size(bytes, nb))
|
2835 |
|
|
return 0;
|
2836 |
|
|
arena_get(ar_ptr, nb);
|
2837 |
|
|
if(!ar_ptr)
|
2838 |
|
|
return 0;
|
2839 |
|
|
victim = chunk_alloc(ar_ptr, nb);
|
2840 |
|
|
if(!victim) {
|
2841 |
|
|
/* Maybe the failure is due to running out of mmapped areas. */
|
2842 |
|
|
if(ar_ptr != &main_arena) {
|
2843 |
|
|
(void)mutex_unlock(&ar_ptr->mutex);
|
2844 |
|
|
(void)mutex_lock(&main_arena.mutex);
|
2845 |
|
|
victim = chunk_alloc(&main_arena, nb);
|
2846 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
2847 |
|
|
} else {
|
2848 |
|
|
#if USE_ARENAS
|
2849 |
|
|
/* ... or sbrk() has failed and there is still a chance to mmap() */
|
2850 |
|
|
ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, nb);
|
2851 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
2852 |
|
|
if(ar_ptr) {
|
2853 |
|
|
victim = chunk_alloc(ar_ptr, nb);
|
2854 |
|
|
(void)mutex_unlock(&ar_ptr->mutex);
|
2855 |
|
|
}
|
2856 |
|
|
#endif
|
2857 |
|
|
}
|
2858 |
|
|
if(!victim) return 0;
|
2859 |
|
|
} else
|
2860 |
|
|
(void)mutex_unlock(&ar_ptr->mutex);
|
2861 |
|
|
return BOUNDED_N(chunk2mem(victim), bytes);
|
2862 |
|
|
}
|
2863 |
|
|
|
2864 |
|
|
static mchunkptr
|
2865 |
|
|
internal_function
|
2866 |
|
|
#if __STD_C
|
2867 |
|
|
chunk_alloc(arena *ar_ptr, INTERNAL_SIZE_T nb)
|
2868 |
|
|
#else
|
2869 |
|
|
chunk_alloc(ar_ptr, nb) arena *ar_ptr; INTERNAL_SIZE_T nb;
|
2870 |
|
|
#endif
|
2871 |
|
|
{
|
2872 |
|
|
mchunkptr victim; /* inspected/selected chunk */
|
2873 |
|
|
INTERNAL_SIZE_T victim_size; /* its size */
|
2874 |
|
|
int idx; /* index for bin traversal */
|
2875 |
|
|
mbinptr bin; /* associated bin */
|
2876 |
|
|
mchunkptr remainder; /* remainder from a split */
|
2877 |
|
|
long remainder_size; /* its size */
|
2878 |
|
|
int remainder_index; /* its bin index */
|
2879 |
|
|
unsigned long block; /* block traverser bit */
|
2880 |
|
|
int startidx; /* first bin of a traversed block */
|
2881 |
|
|
mchunkptr fwd; /* misc temp for linking */
|
2882 |
|
|
mchunkptr bck; /* misc temp for linking */
|
2883 |
|
|
mbinptr q; /* misc temp */
|
2884 |
|
|
|
2885 |
|
|
|
2886 |
|
|
/* Check for exact match in a bin */
|
2887 |
|
|
|
2888 |
|
|
if (is_small_request(nb)) /* Faster version for small requests */
|
2889 |
|
|
{
|
2890 |
|
|
idx = smallbin_index(nb);
|
2891 |
|
|
|
2892 |
|
|
/* No traversal or size check necessary for small bins. */
|
2893 |
|
|
|
2894 |
|
|
q = _bin_at(ar_ptr, idx);
|
2895 |
|
|
victim = last(q);
|
2896 |
|
|
|
2897 |
|
|
/* Also scan the next one, since it would have a remainder < MINSIZE */
|
2898 |
|
|
if (victim == q)
|
2899 |
|
|
{
|
2900 |
|
|
q = next_bin(q);
|
2901 |
|
|
victim = last(q);
|
2902 |
|
|
}
|
2903 |
|
|
if (victim != q)
|
2904 |
|
|
{
|
2905 |
|
|
victim_size = chunksize(victim);
|
2906 |
|
|
unlink(victim, bck, fwd);
|
2907 |
|
|
set_inuse_bit_at_offset(victim, victim_size);
|
2908 |
|
|
check_malloced_chunk(ar_ptr, victim, nb);
|
2909 |
|
|
return victim;
|
2910 |
|
|
}
|
2911 |
|
|
|
2912 |
|
|
idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
|
2913 |
|
|
|
2914 |
|
|
}
|
2915 |
|
|
else
|
2916 |
|
|
{
|
2917 |
|
|
idx = bin_index(nb);
|
2918 |
|
|
bin = bin_at(ar_ptr, idx);
|
2919 |
|
|
|
2920 |
|
|
for (victim = last(bin); victim != bin; victim = victim->bk)
|
2921 |
|
|
{
|
2922 |
|
|
victim_size = chunksize(victim);
|
2923 |
|
|
remainder_size = victim_size - nb;
|
2924 |
|
|
|
2925 |
|
|
if (remainder_size >= (long)MINSIZE) /* too big */
|
2926 |
|
|
{
|
2927 |
|
|
--idx; /* adjust to rescan below after checking last remainder */
|
2928 |
|
|
break;
|
2929 |
|
|
}
|
2930 |
|
|
|
2931 |
|
|
else if (remainder_size >= 0) /* exact fit */
|
2932 |
|
|
{
|
2933 |
|
|
unlink(victim, bck, fwd);
|
2934 |
|
|
set_inuse_bit_at_offset(victim, victim_size);
|
2935 |
|
|
check_malloced_chunk(ar_ptr, victim, nb);
|
2936 |
|
|
return victim;
|
2937 |
|
|
}
|
2938 |
|
|
}
|
2939 |
|
|
|
2940 |
|
|
++idx;
|
2941 |
|
|
|
2942 |
|
|
}
|
2943 |
|
|
|
2944 |
|
|
/* Try to use the last split-off remainder */
|
2945 |
|
|
|
2946 |
|
|
if ( (victim = last_remainder(ar_ptr)->fd) != last_remainder(ar_ptr))
|
2947 |
|
|
{
|
2948 |
|
|
victim_size = chunksize(victim);
|
2949 |
|
|
remainder_size = victim_size - nb;
|
2950 |
|
|
|
2951 |
|
|
if (remainder_size >= (long)MINSIZE) /* re-split */
|
2952 |
|
|
{
|
2953 |
|
|
remainder = chunk_at_offset(victim, nb);
|
2954 |
|
|
set_head(victim, nb | PREV_INUSE);
|
2955 |
|
|
link_last_remainder(ar_ptr, remainder);
|
2956 |
|
|
set_head(remainder, remainder_size | PREV_INUSE);
|
2957 |
|
|
set_foot(remainder, remainder_size);
|
2958 |
|
|
check_malloced_chunk(ar_ptr, victim, nb);
|
2959 |
|
|
return victim;
|
2960 |
|
|
}
|
2961 |
|
|
|
2962 |
|
|
clear_last_remainder(ar_ptr);
|
2963 |
|
|
|
2964 |
|
|
if (remainder_size >= 0) /* exhaust */
|
2965 |
|
|
{
|
2966 |
|
|
set_inuse_bit_at_offset(victim, victim_size);
|
2967 |
|
|
check_malloced_chunk(ar_ptr, victim, nb);
|
2968 |
|
|
return victim;
|
2969 |
|
|
}
|
2970 |
|
|
|
2971 |
|
|
/* Else place in bin */
|
2972 |
|
|
|
2973 |
|
|
frontlink(ar_ptr, victim, victim_size, remainder_index, bck, fwd);
|
2974 |
|
|
}
|
2975 |
|
|
|
2976 |
|
|
/*
|
2977 |
|
|
If there are any possibly nonempty big-enough blocks,
|
2978 |
|
|
search for best fitting chunk by scanning bins in blockwidth units.
|
2979 |
|
|
*/
|
2980 |
|
|
|
2981 |
|
|
if ( (block = idx2binblock(idx)) <= binblocks(ar_ptr))
|
2982 |
|
|
{
|
2983 |
|
|
|
2984 |
|
|
/* Get to the first marked block */
|
2985 |
|
|
|
2986 |
|
|
if ( (block & binblocks(ar_ptr)) == 0)
|
2987 |
|
|
{
|
2988 |
|
|
/* force to an even block boundary */
|
2989 |
|
|
idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
|
2990 |
|
|
block <<= 1;
|
2991 |
|
|
while ((block & binblocks(ar_ptr)) == 0)
|
2992 |
|
|
{
|
2993 |
|
|
idx += BINBLOCKWIDTH;
|
2994 |
|
|
block <<= 1;
|
2995 |
|
|
}
|
2996 |
|
|
}
|
2997 |
|
|
|
2998 |
|
|
/* For each possibly nonempty block ... */
|
2999 |
|
|
for (;;)
|
3000 |
|
|
{
|
3001 |
|
|
startidx = idx; /* (track incomplete blocks) */
|
3002 |
|
|
q = bin = _bin_at(ar_ptr, idx);
|
3003 |
|
|
|
3004 |
|
|
/* For each bin in this block ... */
|
3005 |
|
|
do
|
3006 |
|
|
{
|
3007 |
|
|
/* Find and use first big enough chunk ... */
|
3008 |
|
|
|
3009 |
|
|
for (victim = last(bin); victim != bin; victim = victim->bk)
|
3010 |
|
|
{
|
3011 |
|
|
victim_size = chunksize(victim);
|
3012 |
|
|
remainder_size = victim_size - nb;
|
3013 |
|
|
|
3014 |
|
|
if (remainder_size >= (long)MINSIZE) /* split */
|
3015 |
|
|
{
|
3016 |
|
|
remainder = chunk_at_offset(victim, nb);
|
3017 |
|
|
set_head(victim, nb | PREV_INUSE);
|
3018 |
|
|
unlink(victim, bck, fwd);
|
3019 |
|
|
link_last_remainder(ar_ptr, remainder);
|
3020 |
|
|
set_head(remainder, remainder_size | PREV_INUSE);
|
3021 |
|
|
set_foot(remainder, remainder_size);
|
3022 |
|
|
check_malloced_chunk(ar_ptr, victim, nb);
|
3023 |
|
|
return victim;
|
3024 |
|
|
}
|
3025 |
|
|
|
3026 |
|
|
else if (remainder_size >= 0) /* take */
|
3027 |
|
|
{
|
3028 |
|
|
set_inuse_bit_at_offset(victim, victim_size);
|
3029 |
|
|
unlink(victim, bck, fwd);
|
3030 |
|
|
check_malloced_chunk(ar_ptr, victim, nb);
|
3031 |
|
|
return victim;
|
3032 |
|
|
}
|
3033 |
|
|
|
3034 |
|
|
}
|
3035 |
|
|
|
3036 |
|
|
bin = next_bin(bin);
|
3037 |
|
|
|
3038 |
|
|
} while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
|
3039 |
|
|
|
3040 |
|
|
/* Clear out the block bit. */
|
3041 |
|
|
|
3042 |
|
|
do /* Possibly backtrack to try to clear a partial block */
|
3043 |
|
|
{
|
3044 |
|
|
if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
|
3045 |
|
|
{
|
3046 |
|
|
binblocks(ar_ptr) &= ~block;
|
3047 |
|
|
break;
|
3048 |
|
|
}
|
3049 |
|
|
--startidx;
|
3050 |
|
|
q = prev_bin(q);
|
3051 |
|
|
} while (first(q) == q);
|
3052 |
|
|
|
3053 |
|
|
/* Get to the next possibly nonempty block */
|
3054 |
|
|
|
3055 |
|
|
if ( (block <<= 1) <= binblocks(ar_ptr) && (block != 0) )
|
3056 |
|
|
{
|
3057 |
|
|
while ((block & binblocks(ar_ptr)) == 0)
|
3058 |
|
|
{
|
3059 |
|
|
idx += BINBLOCKWIDTH;
|
3060 |
|
|
block <<= 1;
|
3061 |
|
|
}
|
3062 |
|
|
}
|
3063 |
|
|
else
|
3064 |
|
|
break;
|
3065 |
|
|
}
|
3066 |
|
|
}
|
3067 |
|
|
|
3068 |
|
|
|
3069 |
|
|
/* Try to use top chunk */
|
3070 |
|
|
|
3071 |
|
|
/* Require that there be a remainder, ensuring top always exists */
|
3072 |
|
|
if ( (remainder_size = chunksize(top(ar_ptr)) - nb) < (long)MINSIZE)
|
3073 |
|
|
{
|
3074 |
|
|
|
3075 |
|
|
#if HAVE_MMAP
|
3076 |
|
|
/* If the request is big and there are not yet too many regions,
|
3077 |
|
|
and we would otherwise need to extend, try to use mmap instead. */
|
3078 |
|
|
if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
|
3079 |
|
|
n_mmaps < n_mmaps_max &&
|
3080 |
|
|
(victim = mmap_chunk(nb)) != 0)
|
3081 |
|
|
return victim;
|
3082 |
|
|
#endif
|
3083 |
|
|
|
3084 |
|
|
/* Try to extend */
|
3085 |
|
|
malloc_extend_top(ar_ptr, nb);
|
3086 |
|
|
if ((remainder_size = chunksize(top(ar_ptr)) - nb) < (long)MINSIZE)
|
3087 |
|
|
{
|
3088 |
|
|
#if HAVE_MMAP
|
3089 |
|
|
/* A last attempt: when we are out of address space in a
|
3090 |
|
|
non-main arena, try mmap anyway, as long as it is allowed at
|
3091 |
|
|
all. */
|
3092 |
|
|
if (ar_ptr != &main_arena &&
|
3093 |
|
|
n_mmaps_max > 0 &&
|
3094 |
|
|
(victim = mmap_chunk(nb)) != 0)
|
3095 |
|
|
return victim;
|
3096 |
|
|
#endif
|
3097 |
|
|
return 0; /* propagate failure */
|
3098 |
|
|
}
|
3099 |
|
|
}
|
3100 |
|
|
|
3101 |
|
|
victim = top(ar_ptr);
|
3102 |
|
|
set_head(victim, nb | PREV_INUSE);
|
3103 |
|
|
top(ar_ptr) = chunk_at_offset(victim, nb);
|
3104 |
|
|
set_head(top(ar_ptr), remainder_size | PREV_INUSE);
|
3105 |
|
|
check_malloced_chunk(ar_ptr, victim, nb);
|
3106 |
|
|
return victim;
|
3107 |
|
|
|
3108 |
|
|
}
|
3109 |
|
|
|
3110 |
|
|
|
3111 |
|
|
|
3112 |
|
|
|
3113 |
|
|
/*
|
3114 |
|
|
|
3115 |
|
|
free() algorithm :
|
3116 |
|
|
|
3117 |
|
|
cases:
|
3118 |
|
|
|
3119 |
|
|
1. free(0) has no effect.
|
3120 |
|
|
|
3121 |
|
|
2. If the chunk was allocated via mmap, it is released via munmap().
|
3122 |
|
|
|
3123 |
|
|
3. If a returned chunk borders the current high end of memory,
|
3124 |
|
|
it is consolidated into the top, and if the total unused
|
3125 |
|
|
topmost memory exceeds the trim threshold, malloc_trim is
|
3126 |
|
|
called.
|
3127 |
|
|
|
3128 |
|
|
4. Other chunks are consolidated as they arrive, and
|
3129 |
|
|
placed in corresponding bins. (This includes the case of
|
3130 |
|
|
consolidating with the current `last_remainder').
|
3131 |
|
|
|
3132 |
|
|
*/
|
3133 |
|
|
|
3134 |
|
|
|
3135 |
|
|
#if __STD_C
|
3136 |
|
|
void fREe(Void_t* mem)
|
3137 |
|
|
#else
|
3138 |
|
|
void fREe(mem) Void_t* mem;
|
3139 |
|
|
#endif
|
3140 |
|
|
{
|
3141 |
|
|
arena *ar_ptr;
|
3142 |
|
|
mchunkptr p; /* chunk corresponding to mem */
|
3143 |
|
|
|
3144 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
3145 |
|
|
void (*hook) __MALLOC_PMT ((__malloc_ptr_t, __const __malloc_ptr_t)) =
|
3146 |
|
|
__free_hook;
|
3147 |
|
|
|
3148 |
|
|
if (hook != NULL) {
|
3149 |
|
|
#if defined __GNUC__ && __GNUC__ >= 2
|
3150 |
|
|
(*hook)(mem, RETURN_ADDRESS (0));
|
3151 |
|
|
#else
|
3152 |
|
|
(*hook)(mem, NULL);
|
3153 |
|
|
#endif
|
3154 |
|
|
return;
|
3155 |
|
|
}
|
3156 |
|
|
#endif
|
3157 |
|
|
|
3158 |
|
|
if (mem == 0) /* free(0) has no effect */
|
3159 |
|
|
return;
|
3160 |
|
|
|
3161 |
|
|
p = mem2chunk(mem);
|
3162 |
|
|
|
3163 |
|
|
#if HAVE_MMAP
|
3164 |
|
|
if (chunk_is_mmapped(p)) /* release mmapped memory. */
|
3165 |
|
|
{
|
3166 |
|
|
munmap_chunk(p);
|
3167 |
|
|
return;
|
3168 |
|
|
}
|
3169 |
|
|
#endif
|
3170 |
|
|
|
3171 |
|
|
ar_ptr = arena_for_ptr(p);
|
3172 |
|
|
#if THREAD_STATS
|
3173 |
|
|
if(!mutex_trylock(&ar_ptr->mutex))
|
3174 |
|
|
++(ar_ptr->stat_lock_direct);
|
3175 |
|
|
else {
|
3176 |
|
|
(void)mutex_lock(&ar_ptr->mutex);
|
3177 |
|
|
++(ar_ptr->stat_lock_wait);
|
3178 |
|
|
}
|
3179 |
|
|
#else
|
3180 |
|
|
(void)mutex_lock(&ar_ptr->mutex);
|
3181 |
|
|
#endif
|
3182 |
|
|
chunk_free(ar_ptr, p);
|
3183 |
|
|
(void)mutex_unlock(&ar_ptr->mutex);
|
3184 |
|
|
}
|
3185 |
|
|
|
3186 |
|
|
static void
|
3187 |
|
|
internal_function
|
3188 |
|
|
#if __STD_C
|
3189 |
|
|
chunk_free(arena *ar_ptr, mchunkptr p)
|
3190 |
|
|
#else
|
3191 |
|
|
chunk_free(ar_ptr, p) arena *ar_ptr; mchunkptr p;
|
3192 |
|
|
#endif
|
3193 |
|
|
{
|
3194 |
|
|
INTERNAL_SIZE_T hd = p->size; /* its head field */
|
3195 |
|
|
INTERNAL_SIZE_T sz; /* its size */
|
3196 |
|
|
int idx; /* its bin index */
|
3197 |
|
|
mchunkptr next; /* next contiguous chunk */
|
3198 |
|
|
INTERNAL_SIZE_T nextsz; /* its size */
|
3199 |
|
|
INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
|
3200 |
|
|
mchunkptr bck; /* misc temp for linking */
|
3201 |
|
|
mchunkptr fwd; /* misc temp for linking */
|
3202 |
|
|
int islr; /* track whether merging with last_remainder */
|
3203 |
|
|
|
3204 |
|
|
check_inuse_chunk(ar_ptr, p);
|
3205 |
|
|
|
3206 |
|
|
sz = hd & ~PREV_INUSE;
|
3207 |
|
|
next = chunk_at_offset(p, sz);
|
3208 |
|
|
nextsz = chunksize(next);
|
3209 |
|
|
|
3210 |
|
|
if (next == top(ar_ptr)) /* merge with top */
|
3211 |
|
|
{
|
3212 |
|
|
sz += nextsz;
|
3213 |
|
|
|
3214 |
|
|
if (!(hd & PREV_INUSE)) /* consolidate backward */
|
3215 |
|
|
{
|
3216 |
|
|
prevsz = p->prev_size;
|
3217 |
|
|
p = chunk_at_offset(p, -(long)prevsz);
|
3218 |
|
|
sz += prevsz;
|
3219 |
|
|
unlink(p, bck, fwd);
|
3220 |
|
|
}
|
3221 |
|
|
|
3222 |
|
|
set_head(p, sz | PREV_INUSE);
|
3223 |
|
|
top(ar_ptr) = p;
|
3224 |
|
|
|
3225 |
|
|
#if USE_ARENAS
|
3226 |
|
|
if(ar_ptr == &main_arena) {
|
3227 |
|
|
#endif
|
3228 |
|
|
if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
|
3229 |
|
|
main_trim(top_pad);
|
3230 |
|
|
#if USE_ARENAS
|
3231 |
|
|
} else {
|
3232 |
|
|
heap_info *heap = heap_for_ptr(p);
|
3233 |
|
|
|
3234 |
|
|
assert(heap->ar_ptr == ar_ptr);
|
3235 |
|
|
|
3236 |
|
|
/* Try to get rid of completely empty heaps, if possible. */
|
3237 |
|
|
if((unsigned long)(sz) >= (unsigned long)trim_threshold ||
|
3238 |
|
|
p == chunk_at_offset(heap, sizeof(*heap)))
|
3239 |
|
|
heap_trim(heap, top_pad);
|
3240 |
|
|
}
|
3241 |
|
|
#endif
|
3242 |
|
|
return;
|
3243 |
|
|
}
|
3244 |
|
|
|
3245 |
|
|
islr = 0;
|
3246 |
|
|
|
3247 |
|
|
if (!(hd & PREV_INUSE)) /* consolidate backward */
|
3248 |
|
|
{
|
3249 |
|
|
prevsz = p->prev_size;
|
3250 |
|
|
p = chunk_at_offset(p, -(long)prevsz);
|
3251 |
|
|
sz += prevsz;
|
3252 |
|
|
|
3253 |
|
|
if (p->fd == last_remainder(ar_ptr)) /* keep as last_remainder */
|
3254 |
|
|
islr = 1;
|
3255 |
|
|
else
|
3256 |
|
|
unlink(p, bck, fwd);
|
3257 |
|
|
}
|
3258 |
|
|
|
3259 |
|
|
if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */
|
3260 |
|
|
{
|
3261 |
|
|
sz += nextsz;
|
3262 |
|
|
|
3263 |
|
|
if (!islr && next->fd == last_remainder(ar_ptr))
|
3264 |
|
|
/* re-insert last_remainder */
|
3265 |
|
|
{
|
3266 |
|
|
islr = 1;
|
3267 |
|
|
link_last_remainder(ar_ptr, p);
|
3268 |
|
|
}
|
3269 |
|
|
else
|
3270 |
|
|
unlink(next, bck, fwd);
|
3271 |
|
|
|
3272 |
|
|
next = chunk_at_offset(p, sz);
|
3273 |
|
|
}
|
3274 |
|
|
else
|
3275 |
|
|
set_head(next, nextsz); /* clear inuse bit */
|
3276 |
|
|
|
3277 |
|
|
set_head(p, sz | PREV_INUSE);
|
3278 |
|
|
next->prev_size = sz;
|
3279 |
|
|
if (!islr)
|
3280 |
|
|
frontlink(ar_ptr, p, sz, idx, bck, fwd);
|
3281 |
|
|
|
3282 |
|
|
#if USE_ARENAS
|
3283 |
|
|
/* Check whether the heap containing top can go away now. */
|
3284 |
|
|
if(next->size < MINSIZE &&
|
3285 |
|
|
(unsigned long)sz > trim_threshold &&
|
3286 |
|
|
ar_ptr != &main_arena) { /* fencepost */
|
3287 |
|
|
heap_info *heap = heap_for_ptr(top(ar_ptr));
|
3288 |
|
|
|
3289 |
|
|
if(top(ar_ptr) == chunk_at_offset(heap, sizeof(*heap)) &&
|
3290 |
|
|
heap->prev == heap_for_ptr(p))
|
3291 |
|
|
heap_trim(heap, top_pad);
|
3292 |
|
|
}
|
3293 |
|
|
#endif
|
3294 |
|
|
}
|
3295 |
|
|
|
3296 |
|
|
|
3297 |
|
|
|
3298 |
|
|
|
3299 |
|
|
|
3300 |
|
|
/*
|
3301 |
|
|
|
3302 |
|
|
Realloc algorithm:
|
3303 |
|
|
|
3304 |
|
|
Chunks that were obtained via mmap cannot be extended or shrunk
|
3305 |
|
|
unless HAVE_MREMAP is defined, in which case mremap is used.
|
3306 |
|
|
Otherwise, if their reallocation is for additional space, they are
|
3307 |
|
|
copied. If for less, they are just left alone.
|
3308 |
|
|
|
3309 |
|
|
Otherwise, if the reallocation is for additional space, and the
|
3310 |
|
|
chunk can be extended, it is, else a malloc-copy-free sequence is
|
3311 |
|
|
taken. There are several different ways that a chunk could be
|
3312 |
|
|
extended. All are tried:
|
3313 |
|
|
|
3314 |
|
|
* Extending forward into following adjacent free chunk.
|
3315 |
|
|
* Shifting backwards, joining preceding adjacent space
|
3316 |
|
|
* Both shifting backwards and extending forward.
|
3317 |
|
|
* Extending into newly sbrked space
|
3318 |
|
|
|
3319 |
|
|
Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
|
3320 |
|
|
size argument of zero (re)allocates a minimum-sized chunk.
|
3321 |
|
|
|
3322 |
|
|
If the reallocation is for less space, and the new request is for
|
3323 |
|
|
a `small' (<512 bytes) size, then the newly unused space is lopped
|
3324 |
|
|
off and freed.
|
3325 |
|
|
|
3326 |
|
|
The old unix realloc convention of allowing the last-free'd chunk
|
3327 |
|
|
to be used as an argument to realloc is no longer supported.
|
3328 |
|
|
I don't know of any programs still relying on this feature,
|
3329 |
|
|
and allowing it would also allow too many other incorrect
|
3330 |
|
|
usages of realloc to be sensible.
|
3331 |
|
|
|
3332 |
|
|
|
3333 |
|
|
*/
|
3334 |
|
|
|
3335 |
|
|
|
3336 |
|
|
#if __STD_C
|
3337 |
|
|
Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
|
3338 |
|
|
#else
|
3339 |
|
|
Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
|
3340 |
|
|
#endif
|
3341 |
|
|
{
|
3342 |
|
|
arena *ar_ptr;
|
3343 |
|
|
INTERNAL_SIZE_T nb; /* padded request size */
|
3344 |
|
|
|
3345 |
|
|
mchunkptr oldp; /* chunk corresponding to oldmem */
|
3346 |
|
|
INTERNAL_SIZE_T oldsize; /* its size */
|
3347 |
|
|
|
3348 |
|
|
mchunkptr newp; /* chunk to return */
|
3349 |
|
|
|
3350 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
3351 |
|
|
__malloc_ptr_t (*hook) __MALLOC_PMT ((__malloc_ptr_t, size_t,
|
3352 |
|
|
__const __malloc_ptr_t)) =
|
3353 |
|
|
__realloc_hook;
|
3354 |
|
|
if (hook != NULL) {
|
3355 |
|
|
Void_t* result;
|
3356 |
|
|
|
3357 |
|
|
#if defined __GNUC__ && __GNUC__ >= 2
|
3358 |
|
|
result = (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
|
3359 |
|
|
#else
|
3360 |
|
|
result = (*hook)(oldmem, bytes, NULL);
|
3361 |
|
|
#endif
|
3362 |
|
|
return result;
|
3363 |
|
|
}
|
3364 |
|
|
#endif
|
3365 |
|
|
|
3366 |
|
|
#ifdef REALLOC_ZERO_BYTES_FREES
|
3367 |
|
|
if (bytes == 0 && oldmem != NULL) { fREe(oldmem); return 0; }
|
3368 |
|
|
#endif
|
3369 |
|
|
|
3370 |
|
|
/* realloc of null is supposed to be same as malloc */
|
3371 |
|
|
if (oldmem == 0) return mALLOc(bytes);
|
3372 |
|
|
|
3373 |
|
|
oldp = mem2chunk(oldmem);
|
3374 |
|
|
oldsize = chunksize(oldp);
|
3375 |
|
|
|
3376 |
|
|
if(request2size(bytes, nb))
|
3377 |
|
|
return 0;
|
3378 |
|
|
|
3379 |
|
|
#if HAVE_MMAP
|
3380 |
|
|
if (chunk_is_mmapped(oldp))
|
3381 |
|
|
{
|
3382 |
|
|
Void_t* newmem;
|
3383 |
|
|
|
3384 |
|
|
#if HAVE_MREMAP
|
3385 |
|
|
newp = mremap_chunk(oldp, nb);
|
3386 |
|
|
if(newp)
|
3387 |
|
|
return BOUNDED_N(chunk2mem(newp), bytes);
|
3388 |
|
|
#endif
|
3389 |
|
|
/* Note the extra SIZE_SZ overhead. */
|
3390 |
|
|
if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
|
3391 |
|
|
/* Must alloc, copy, free. */
|
3392 |
|
|
newmem = mALLOc(bytes);
|
3393 |
|
|
if (newmem == 0) return 0; /* propagate failure */
|
3394 |
|
|
MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ, 0);
|
3395 |
|
|
munmap_chunk(oldp);
|
3396 |
|
|
return newmem;
|
3397 |
|
|
}
|
3398 |
|
|
#endif
|
3399 |
|
|
|
3400 |
|
|
ar_ptr = arena_for_ptr(oldp);
|
3401 |
|
|
#if THREAD_STATS
|
3402 |
|
|
if(!mutex_trylock(&ar_ptr->mutex))
|
3403 |
|
|
++(ar_ptr->stat_lock_direct);
|
3404 |
|
|
else {
|
3405 |
|
|
(void)mutex_lock(&ar_ptr->mutex);
|
3406 |
|
|
++(ar_ptr->stat_lock_wait);
|
3407 |
|
|
}
|
3408 |
|
|
#else
|
3409 |
|
|
(void)mutex_lock(&ar_ptr->mutex);
|
3410 |
|
|
#endif
|
3411 |
|
|
|
3412 |
|
|
#ifndef NO_THREADS
|
3413 |
|
|
/* As in malloc(), remember this arena for the next allocation. */
|
3414 |
|
|
tsd_setspecific(arena_key, (Void_t *)ar_ptr);
|
3415 |
|
|
#endif
|
3416 |
|
|
|
3417 |
|
|
newp = chunk_realloc(ar_ptr, oldp, oldsize, nb);
|
3418 |
|
|
|
3419 |
|
|
(void)mutex_unlock(&ar_ptr->mutex);
|
3420 |
|
|
return newp ? BOUNDED_N(chunk2mem(newp), bytes) : NULL;
|
3421 |
|
|
}
|
3422 |
|
|
|
3423 |
|
|
static mchunkptr
|
3424 |
|
|
internal_function
|
3425 |
|
|
#if __STD_C
|
3426 |
|
|
chunk_realloc(arena* ar_ptr, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
|
3427 |
|
|
INTERNAL_SIZE_T nb)
|
3428 |
|
|
#else
|
3429 |
|
|
chunk_realloc(ar_ptr, oldp, oldsize, nb)
|
3430 |
|
|
arena* ar_ptr; mchunkptr oldp; INTERNAL_SIZE_T oldsize, nb;
|
3431 |
|
|
#endif
|
3432 |
|
|
{
|
3433 |
|
|
mchunkptr newp = oldp; /* chunk to return */
|
3434 |
|
|
INTERNAL_SIZE_T newsize = oldsize; /* its size */
|
3435 |
|
|
|
3436 |
|
|
mchunkptr next; /* next contiguous chunk after oldp */
|
3437 |
|
|
INTERNAL_SIZE_T nextsize; /* its size */
|
3438 |
|
|
|
3439 |
|
|
mchunkptr prev; /* previous contiguous chunk before oldp */
|
3440 |
|
|
INTERNAL_SIZE_T prevsize; /* its size */
|
3441 |
|
|
|
3442 |
|
|
mchunkptr remainder; /* holds split off extra space from newp */
|
3443 |
|
|
INTERNAL_SIZE_T remainder_size; /* its size */
|
3444 |
|
|
|
3445 |
|
|
mchunkptr bck; /* misc temp for linking */
|
3446 |
|
|
mchunkptr fwd; /* misc temp for linking */
|
3447 |
|
|
|
3448 |
|
|
check_inuse_chunk(ar_ptr, oldp);
|
3449 |
|
|
|
3450 |
|
|
if ((long)(oldsize) < (long)(nb))
|
3451 |
|
|
{
|
3452 |
|
|
Void_t* oldmem = BOUNDED_N(chunk2mem(oldp), oldsize);
|
3453 |
|
|
|
3454 |
|
|
/* Try expanding forward */
|
3455 |
|
|
|
3456 |
|
|
next = chunk_at_offset(oldp, oldsize);
|
3457 |
|
|
if (next == top(ar_ptr) || !inuse(next))
|
3458 |
|
|
{
|
3459 |
|
|
nextsize = chunksize(next);
|
3460 |
|
|
|
3461 |
|
|
/* Forward into top only if a remainder */
|
3462 |
|
|
if (next == top(ar_ptr))
|
3463 |
|
|
{
|
3464 |
|
|
if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
|
3465 |
|
|
{
|
3466 |
|
|
newsize += nextsize;
|
3467 |
|
|
top(ar_ptr) = chunk_at_offset(oldp, nb);
|
3468 |
|
|
set_head(top(ar_ptr), (newsize - nb) | PREV_INUSE);
|
3469 |
|
|
set_head_size(oldp, nb);
|
3470 |
|
|
return oldp;
|
3471 |
|
|
}
|
3472 |
|
|
}
|
3473 |
|
|
|
3474 |
|
|
/* Forward into next chunk */
|
3475 |
|
|
else if (((long)(nextsize + newsize) >= (long)(nb)))
|
3476 |
|
|
{
|
3477 |
|
|
unlink(next, bck, fwd);
|
3478 |
|
|
newsize += nextsize;
|
3479 |
|
|
goto split;
|
3480 |
|
|
}
|
3481 |
|
|
}
|
3482 |
|
|
else
|
3483 |
|
|
{
|
3484 |
|
|
next = 0;
|
3485 |
|
|
nextsize = 0;
|
3486 |
|
|
}
|
3487 |
|
|
|
3488 |
|
|
oldsize -= SIZE_SZ;
|
3489 |
|
|
|
3490 |
|
|
/* Try shifting backwards. */
|
3491 |
|
|
|
3492 |
|
|
if (!prev_inuse(oldp))
|
3493 |
|
|
{
|
3494 |
|
|
prev = prev_chunk(oldp);
|
3495 |
|
|
prevsize = chunksize(prev);
|
3496 |
|
|
|
3497 |
|
|
/* try forward + backward first to save a later consolidation */
|
3498 |
|
|
|
3499 |
|
|
if (next != 0)
|
3500 |
|
|
{
|
3501 |
|
|
/* into top */
|
3502 |
|
|
if (next == top(ar_ptr))
|
3503 |
|
|
{
|
3504 |
|
|
if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
|
3505 |
|
|
{
|
3506 |
|
|
unlink(prev, bck, fwd);
|
3507 |
|
|
newp = prev;
|
3508 |
|
|
newsize += prevsize + nextsize;
|
3509 |
|
|
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize,
|
3510 |
|
|
1);
|
3511 |
|
|
top(ar_ptr) = chunk_at_offset(newp, nb);
|
3512 |
|
|
set_head(top(ar_ptr), (newsize - nb) | PREV_INUSE);
|
3513 |
|
|
set_head_size(newp, nb);
|
3514 |
|
|
return newp;
|
3515 |
|
|
}
|
3516 |
|
|
}
|
3517 |
|
|
|
3518 |
|
|
/* into next chunk */
|
3519 |
|
|
else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
|
3520 |
|
|
{
|
3521 |
|
|
unlink(next, bck, fwd);
|
3522 |
|
|
unlink(prev, bck, fwd);
|
3523 |
|
|
newp = prev;
|
3524 |
|
|
newsize += nextsize + prevsize;
|
3525 |
|
|
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 1);
|
3526 |
|
|
goto split;
|
3527 |
|
|
}
|
3528 |
|
|
}
|
3529 |
|
|
|
3530 |
|
|
/* backward only */
|
3531 |
|
|
if (prev != 0 && (long)(prevsize + newsize) >= (long)nb)
|
3532 |
|
|
{
|
3533 |
|
|
unlink(prev, bck, fwd);
|
3534 |
|
|
newp = prev;
|
3535 |
|
|
newsize += prevsize;
|
3536 |
|
|
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 1);
|
3537 |
|
|
goto split;
|
3538 |
|
|
}
|
3539 |
|
|
}
|
3540 |
|
|
|
3541 |
|
|
/* Must allocate */
|
3542 |
|
|
|
3543 |
|
|
newp = chunk_alloc (ar_ptr, nb);
|
3544 |
|
|
|
3545 |
|
|
if (newp == 0) {
|
3546 |
|
|
/* Maybe the failure is due to running out of mmapped areas. */
|
3547 |
|
|
if (ar_ptr != &main_arena) {
|
3548 |
|
|
(void)mutex_lock(&main_arena.mutex);
|
3549 |
|
|
newp = chunk_alloc(&main_arena, nb);
|
3550 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
3551 |
|
|
} else {
|
3552 |
|
|
#if USE_ARENAS
|
3553 |
|
|
/* ... or sbrk() has failed and there is still a chance to mmap() */
|
3554 |
|
|
arena* ar_ptr2 = arena_get2(ar_ptr->next ? ar_ptr : 0, nb);
|
3555 |
|
|
if(ar_ptr2) {
|
3556 |
|
|
newp = chunk_alloc(ar_ptr2, nb);
|
3557 |
|
|
(void)mutex_unlock(&ar_ptr2->mutex);
|
3558 |
|
|
}
|
3559 |
|
|
#endif
|
3560 |
|
|
}
|
3561 |
|
|
if (newp == 0) /* propagate failure */
|
3562 |
|
|
return 0;
|
3563 |
|
|
}
|
3564 |
|
|
|
3565 |
|
|
/* Avoid copy if newp is next chunk after oldp. */
|
3566 |
|
|
/* (This can only happen when new chunk is sbrk'ed.) */
|
3567 |
|
|
|
3568 |
|
|
if ( newp == next_chunk(oldp))
|
3569 |
|
|
{
|
3570 |
|
|
newsize += chunksize(newp);
|
3571 |
|
|
newp = oldp;
|
3572 |
|
|
goto split;
|
3573 |
|
|
}
|
3574 |
|
|
|
3575 |
|
|
/* Otherwise copy, free, and exit */
|
3576 |
|
|
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), oldsize), oldmem, oldsize, 0);
|
3577 |
|
|
chunk_free(ar_ptr, oldp);
|
3578 |
|
|
return newp;
|
3579 |
|
|
}
|
3580 |
|
|
|
3581 |
|
|
|
3582 |
|
|
split: /* split off extra room in old or expanded chunk */
|
3583 |
|
|
|
3584 |
|
|
if (newsize - nb >= MINSIZE) /* split off remainder */
|
3585 |
|
|
{
|
3586 |
|
|
remainder = chunk_at_offset(newp, nb);
|
3587 |
|
|
remainder_size = newsize - nb;
|
3588 |
|
|
set_head_size(newp, nb);
|
3589 |
|
|
set_head(remainder, remainder_size | PREV_INUSE);
|
3590 |
|
|
set_inuse_bit_at_offset(remainder, remainder_size);
|
3591 |
|
|
chunk_free(ar_ptr, remainder);
|
3592 |
|
|
}
|
3593 |
|
|
else
|
3594 |
|
|
{
|
3595 |
|
|
set_head_size(newp, newsize);
|
3596 |
|
|
set_inuse_bit_at_offset(newp, newsize);
|
3597 |
|
|
}
|
3598 |
|
|
|
3599 |
|
|
check_inuse_chunk(ar_ptr, newp);
|
3600 |
|
|
return newp;
|
3601 |
|
|
}
|
3602 |
|
|
|
3603 |
|
|
|
3604 |
|
|
|
3605 |
|
|
|
3606 |
|
|
/*
|
3607 |
|
|
|
3608 |
|
|
memalign algorithm:
|
3609 |
|
|
|
3610 |
|
|
memalign requests more than enough space from malloc, finds a spot
|
3611 |
|
|
within that chunk that meets the alignment request, and then
|
3612 |
|
|
possibly frees the leading and trailing space.
|
3613 |
|
|
|
3614 |
|
|
The alignment argument must be a power of two. This property is not
|
3615 |
|
|
checked by memalign, so misuse may result in random runtime errors.
|
3616 |
|
|
|
3617 |
|
|
8-byte alignment is guaranteed by normal malloc calls, so don't
|
3618 |
|
|
bother calling memalign with an argument of 8 or less.
|
3619 |
|
|
|
3620 |
|
|
Overreliance on memalign is a sure way to fragment space.
|
3621 |
|
|
|
3622 |
|
|
*/
|
3623 |
|
|
|
3624 |
|
|
|
3625 |
|
|
#if __STD_C
|
3626 |
|
|
Void_t* mEMALIGn(size_t alignment, size_t bytes)
|
3627 |
|
|
#else
|
3628 |
|
|
Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
|
3629 |
|
|
#endif
|
3630 |
|
|
{
|
3631 |
|
|
arena *ar_ptr;
|
3632 |
|
|
INTERNAL_SIZE_T nb; /* padded request size */
|
3633 |
|
|
mchunkptr p;
|
3634 |
|
|
|
3635 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
3636 |
|
|
__malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, size_t,
|
3637 |
|
|
__const __malloc_ptr_t)) =
|
3638 |
|
|
__memalign_hook;
|
3639 |
|
|
if (hook != NULL) {
|
3640 |
|
|
Void_t* result;
|
3641 |
|
|
|
3642 |
|
|
#if defined __GNUC__ && __GNUC__ >= 2
|
3643 |
|
|
result = (*hook)(alignment, bytes, RETURN_ADDRESS (0));
|
3644 |
|
|
#else
|
3645 |
|
|
result = (*hook)(alignment, bytes, NULL);
|
3646 |
|
|
#endif
|
3647 |
|
|
return result;
|
3648 |
|
|
}
|
3649 |
|
|
#endif
|
3650 |
|
|
|
3651 |
|
|
/* If need less alignment than we give anyway, just relay to malloc */
|
3652 |
|
|
|
3653 |
|
|
if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
|
3654 |
|
|
|
3655 |
|
|
/* Otherwise, ensure that it is at least a minimum chunk size */
|
3656 |
|
|
|
3657 |
|
|
if (alignment < MINSIZE) alignment = MINSIZE;
|
3658 |
|
|
|
3659 |
|
|
if(request2size(bytes, nb))
|
3660 |
|
|
return 0;
|
3661 |
|
|
arena_get(ar_ptr, nb + alignment + MINSIZE);
|
3662 |
|
|
if(!ar_ptr)
|
3663 |
|
|
return 0;
|
3664 |
|
|
p = chunk_align(ar_ptr, nb, alignment);
|
3665 |
|
|
(void)mutex_unlock(&ar_ptr->mutex);
|
3666 |
|
|
if(!p) {
|
3667 |
|
|
/* Maybe the failure is due to running out of mmapped areas. */
|
3668 |
|
|
if(ar_ptr != &main_arena) {
|
3669 |
|
|
(void)mutex_lock(&main_arena.mutex);
|
3670 |
|
|
p = chunk_align(&main_arena, nb, alignment);
|
3671 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
3672 |
|
|
} else {
|
3673 |
|
|
#if USE_ARENAS
|
3674 |
|
|
/* ... or sbrk() has failed and there is still a chance to mmap() */
|
3675 |
|
|
ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, nb);
|
3676 |
|
|
if(ar_ptr) {
|
3677 |
|
|
p = chunk_align(ar_ptr, nb, alignment);
|
3678 |
|
|
(void)mutex_unlock(&ar_ptr->mutex);
|
3679 |
|
|
}
|
3680 |
|
|
#endif
|
3681 |
|
|
}
|
3682 |
|
|
if(!p) return 0;
|
3683 |
|
|
}
|
3684 |
|
|
return BOUNDED_N(chunk2mem(p), bytes);
|
3685 |
|
|
}
|
3686 |
|
|
|
3687 |
|
|
static mchunkptr
|
3688 |
|
|
internal_function
|
3689 |
|
|
#if __STD_C
|
3690 |
|
|
chunk_align(arena* ar_ptr, INTERNAL_SIZE_T nb, size_t alignment)
|
3691 |
|
|
#else
|
3692 |
|
|
chunk_align(ar_ptr, nb, alignment)
|
3693 |
|
|
arena* ar_ptr; INTERNAL_SIZE_T nb; size_t alignment;
|
3694 |
|
|
#endif
|
3695 |
|
|
{
|
3696 |
|
|
unsigned long m; /* memory returned by malloc call */
|
3697 |
|
|
mchunkptr p; /* corresponding chunk */
|
3698 |
|
|
char* brk; /* alignment point within p */
|
3699 |
|
|
mchunkptr newp; /* chunk to return */
|
3700 |
|
|
INTERNAL_SIZE_T newsize; /* its size */
|
3701 |
|
|
INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */
|
3702 |
|
|
mchunkptr remainder; /* spare room at end to split off */
|
3703 |
|
|
long remainder_size; /* its size */
|
3704 |
|
|
|
3705 |
|
|
/* Call chunk_alloc with worst case padding to hit alignment. */
|
3706 |
|
|
p = chunk_alloc(ar_ptr, nb + alignment + MINSIZE);
|
3707 |
|
|
if (p == 0)
|
3708 |
|
|
return 0; /* propagate failure */
|
3709 |
|
|
|
3710 |
|
|
m = (unsigned long)chunk2mem(p);
|
3711 |
|
|
|
3712 |
|
|
if ((m % alignment) == 0) /* aligned */
|
3713 |
|
|
{
|
3714 |
|
|
#if HAVE_MMAP
|
3715 |
|
|
if(chunk_is_mmapped(p)) {
|
3716 |
|
|
return p; /* nothing more to do */
|
3717 |
|
|
}
|
3718 |
|
|
#endif
|
3719 |
|
|
}
|
3720 |
|
|
else /* misaligned */
|
3721 |
|
|
{
|
3722 |
|
|
/*
|
3723 |
|
|
Find an aligned spot inside chunk.
|
3724 |
|
|
Since we need to give back leading space in a chunk of at
|
3725 |
|
|
least MINSIZE, if the first calculation places us at
|
3726 |
|
|
a spot with less than MINSIZE leader, we can move to the
|
3727 |
|
|
next aligned spot -- we've allocated enough total room so that
|
3728 |
|
|
this is always possible.
|
3729 |
|
|
*/
|
3730 |
|
|
|
3731 |
|
|
brk = (char*)mem2chunk(((m + alignment - 1)) & -(long)alignment);
|
3732 |
|
|
if ((long)(brk - (char*)(p)) < (long)MINSIZE) brk += alignment;
|
3733 |
|
|
|
3734 |
|
|
newp = chunk_at_offset(brk, 0);
|
3735 |
|
|
leadsize = brk - (char*)(p);
|
3736 |
|
|
newsize = chunksize(p) - leadsize;
|
3737 |
|
|
|
3738 |
|
|
#if HAVE_MMAP
|
3739 |
|
|
if(chunk_is_mmapped(p))
|
3740 |
|
|
{
|
3741 |
|
|
newp->prev_size = p->prev_size + leadsize;
|
3742 |
|
|
set_head(newp, newsize|IS_MMAPPED);
|
3743 |
|
|
return newp;
|
3744 |
|
|
}
|
3745 |
|
|
#endif
|
3746 |
|
|
|
3747 |
|
|
/* give back leader, use the rest */
|
3748 |
|
|
|
3749 |
|
|
set_head(newp, newsize | PREV_INUSE);
|
3750 |
|
|
set_inuse_bit_at_offset(newp, newsize);
|
3751 |
|
|
set_head_size(p, leadsize);
|
3752 |
|
|
chunk_free(ar_ptr, p);
|
3753 |
|
|
p = newp;
|
3754 |
|
|
|
3755 |
|
|
assert (newsize>=nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
|
3756 |
|
|
}
|
3757 |
|
|
|
3758 |
|
|
/* Also give back spare room at the end */
|
3759 |
|
|
|
3760 |
|
|
remainder_size = chunksize(p) - nb;
|
3761 |
|
|
|
3762 |
|
|
if (remainder_size >= (long)MINSIZE)
|
3763 |
|
|
{
|
3764 |
|
|
remainder = chunk_at_offset(p, nb);
|
3765 |
|
|
set_head(remainder, remainder_size | PREV_INUSE);
|
3766 |
|
|
set_head_size(p, nb);
|
3767 |
|
|
chunk_free(ar_ptr, remainder);
|
3768 |
|
|
}
|
3769 |
|
|
|
3770 |
|
|
check_inuse_chunk(ar_ptr, p);
|
3771 |
|
|
return p;
|
3772 |
|
|
}
|
3773 |
|
|
|
3774 |
|
|
|
3775 |
|
|
|
3776 |
|
|
|
3777 |
|
|
/*
|
3778 |
|
|
valloc just invokes memalign with alignment argument equal
|
3779 |
|
|
to the page size of the system (or as near to this as can
|
3780 |
|
|
be figured out from all the includes/defines above.)
|
3781 |
|
|
*/
|
3782 |
|
|
|
3783 |
|
|
#if __STD_C
|
3784 |
|
|
Void_t* vALLOc(size_t bytes)
|
3785 |
|
|
#else
|
3786 |
|
|
Void_t* vALLOc(bytes) size_t bytes;
|
3787 |
|
|
#endif
|
3788 |
|
|
{
|
3789 |
|
|
if(__malloc_initialized < 0)
|
3790 |
|
|
ptmalloc_init ();
|
3791 |
|
|
return mEMALIGn (malloc_getpagesize, bytes);
|
3792 |
|
|
}
|
3793 |
|
|
|
3794 |
|
|
/*
|
3795 |
|
|
pvalloc just invokes valloc for the nearest pagesize
|
3796 |
|
|
that will accommodate request
|
3797 |
|
|
*/
|
3798 |
|
|
|
3799 |
|
|
|
3800 |
|
|
#if __STD_C
|
3801 |
|
|
Void_t* pvALLOc(size_t bytes)
|
3802 |
|
|
#else
|
3803 |
|
|
Void_t* pvALLOc(bytes) size_t bytes;
|
3804 |
|
|
#endif
|
3805 |
|
|
{
|
3806 |
|
|
size_t pagesize;
|
3807 |
|
|
if(__malloc_initialized < 0)
|
3808 |
|
|
ptmalloc_init ();
|
3809 |
|
|
pagesize = malloc_getpagesize;
|
3810 |
|
|
return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
|
3811 |
|
|
}
|
3812 |
|
|
|
3813 |
|
|
/*
|
3814 |
|
|
|
3815 |
|
|
calloc calls chunk_alloc, then zeroes out the allocated chunk.
|
3816 |
|
|
|
3817 |
|
|
*/
|
3818 |
|
|
|
3819 |
|
|
#if __STD_C
|
3820 |
|
|
Void_t* cALLOc(size_t n, size_t elem_size)
|
3821 |
|
|
#else
|
3822 |
|
|
Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
|
3823 |
|
|
#endif
|
3824 |
|
|
{
|
3825 |
|
|
arena *ar_ptr;
|
3826 |
|
|
mchunkptr p, oldtop;
|
3827 |
|
|
INTERNAL_SIZE_T sz, csz, oldtopsize;
|
3828 |
|
|
Void_t* mem;
|
3829 |
|
|
|
3830 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
3831 |
|
|
__malloc_ptr_t (*hook) __MALLOC_PMT ((size_t, __const __malloc_ptr_t)) =
|
3832 |
|
|
__malloc_hook;
|
3833 |
|
|
if (hook != NULL) {
|
3834 |
|
|
sz = n * elem_size;
|
3835 |
|
|
#if defined __GNUC__ && __GNUC__ >= 2
|
3836 |
|
|
mem = (*hook)(sz, RETURN_ADDRESS (0));
|
3837 |
|
|
#else
|
3838 |
|
|
mem = (*hook)(sz, NULL);
|
3839 |
|
|
#endif
|
3840 |
|
|
if(mem == 0)
|
3841 |
|
|
return 0;
|
3842 |
|
|
#ifdef HAVE_MEMSET
|
3843 |
|
|
return memset(mem, 0, sz);
|
3844 |
|
|
#else
|
3845 |
|
|
while(sz > 0) ((char*)mem)[--sz] = 0; /* rather inefficient */
|
3846 |
|
|
return mem;
|
3847 |
|
|
#endif
|
3848 |
|
|
}
|
3849 |
|
|
#endif
|
3850 |
|
|
|
3851 |
|
|
if(request2size(n * elem_size, sz))
|
3852 |
|
|
return 0;
|
3853 |
|
|
arena_get(ar_ptr, sz);
|
3854 |
|
|
if(!ar_ptr)
|
3855 |
|
|
return 0;
|
3856 |
|
|
|
3857 |
|
|
/* Check if expand_top called, in which case there may be
|
3858 |
|
|
no need to clear. */
|
3859 |
|
|
#if MORECORE_CLEARS
|
3860 |
|
|
oldtop = top(ar_ptr);
|
3861 |
|
|
oldtopsize = chunksize(top(ar_ptr));
|
3862 |
|
|
#if MORECORE_CLEARS < 2
|
3863 |
|
|
/* Only newly allocated memory is guaranteed to be cleared. */
|
3864 |
|
|
if (ar_ptr == &main_arena &&
|
3865 |
|
|
oldtopsize < sbrk_base + max_sbrked_mem - (char *)oldtop)
|
3866 |
|
|
oldtopsize = (sbrk_base + max_sbrked_mem - (char *)oldtop);
|
3867 |
|
|
#endif
|
3868 |
|
|
#endif
|
3869 |
|
|
p = chunk_alloc (ar_ptr, sz);
|
3870 |
|
|
|
3871 |
|
|
/* Only clearing follows, so we can unlock early. */
|
3872 |
|
|
(void)mutex_unlock(&ar_ptr->mutex);
|
3873 |
|
|
|
3874 |
|
|
if (p == 0) {
|
3875 |
|
|
/* Maybe the failure is due to running out of mmapped areas. */
|
3876 |
|
|
if(ar_ptr != &main_arena) {
|
3877 |
|
|
(void)mutex_lock(&main_arena.mutex);
|
3878 |
|
|
p = chunk_alloc(&main_arena, sz);
|
3879 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
3880 |
|
|
} else {
|
3881 |
|
|
#if USE_ARENAS
|
3882 |
|
|
/* ... or sbrk() has failed and there is still a chance to mmap() */
|
3883 |
|
|
(void)mutex_lock(&main_arena.mutex);
|
3884 |
|
|
ar_ptr = arena_get2(ar_ptr->next ? ar_ptr : 0, sz);
|
3885 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
3886 |
|
|
if(ar_ptr) {
|
3887 |
|
|
p = chunk_alloc(ar_ptr, sz);
|
3888 |
|
|
(void)mutex_unlock(&ar_ptr->mutex);
|
3889 |
|
|
}
|
3890 |
|
|
#endif
|
3891 |
|
|
}
|
3892 |
|
|
if (p == 0) return 0;
|
3893 |
|
|
}
|
3894 |
|
|
mem = BOUNDED_N(chunk2mem(p), n * elem_size);
|
3895 |
|
|
|
3896 |
|
|
/* Two optional cases in which clearing not necessary */
|
3897 |
|
|
|
3898 |
|
|
#if HAVE_MMAP
|
3899 |
|
|
if (chunk_is_mmapped(p)) return mem;
|
3900 |
|
|
#endif
|
3901 |
|
|
|
3902 |
|
|
csz = chunksize(p);
|
3903 |
|
|
|
3904 |
|
|
#if MORECORE_CLEARS
|
3905 |
|
|
if (p == oldtop && csz > oldtopsize) {
|
3906 |
|
|
/* clear only the bytes from non-freshly-sbrked memory */
|
3907 |
|
|
csz = oldtopsize;
|
3908 |
|
|
}
|
3909 |
|
|
#endif
|
3910 |
|
|
|
3911 |
|
|
csz -= SIZE_SZ;
|
3912 |
|
|
MALLOC_ZERO(BOUNDED_N(chunk2mem(p), csz), csz);
|
3913 |
|
|
return mem;
|
3914 |
|
|
}
|
3915 |
|
|
|
3916 |
|
|
/*
|
3917 |
|
|
|
3918 |
|
|
cfree just calls free. It is needed/defined on some systems
|
3919 |
|
|
that pair it with calloc, presumably for odd historical reasons.
|
3920 |
|
|
|
3921 |
|
|
*/
|
3922 |
|
|
|
3923 |
|
|
#if !defined(_LIBC)
|
3924 |
|
|
#if __STD_C
|
3925 |
|
|
void cfree(Void_t *mem)
|
3926 |
|
|
#else
|
3927 |
|
|
void cfree(mem) Void_t *mem;
|
3928 |
|
|
#endif
|
3929 |
|
|
{
|
3930 |
|
|
fREe(mem);
|
3931 |
|
|
}
|
3932 |
|
|
#endif
|
3933 |
|
|
|
3934 |
|
|
|
3935 |
|
|
|
3936 |
|
|
/*
|
3937 |
|
|
|
3938 |
|
|
Malloc_trim gives memory back to the system (via negative
|
3939 |
|
|
arguments to sbrk) if there is unused memory at the `high' end of
|
3940 |
|
|
the malloc pool. You can call this after freeing large blocks of
|
3941 |
|
|
memory to potentially reduce the system-level memory requirements
|
3942 |
|
|
of a program. However, it cannot guarantee to reduce memory. Under
|
3943 |
|
|
some allocation patterns, some large free blocks of memory will be
|
3944 |
|
|
locked between two used chunks, so they cannot be given back to
|
3945 |
|
|
the system.
|
3946 |
|
|
|
3947 |
|
|
The `pad' argument to malloc_trim represents the amount of free
|
3948 |
|
|
trailing space to leave untrimmed. If this argument is zero,
|
3949 |
|
|
only the minimum amount of memory to maintain internal data
|
3950 |
|
|
structures will be left (one page or less). Non-zero arguments
|
3951 |
|
|
can be supplied to maintain enough trailing space to service
|
3952 |
|
|
future expected allocations without having to re-obtain memory
|
3953 |
|
|
from the system.
|
3954 |
|
|
|
3955 |
|
|
Malloc_trim returns 1 if it actually released any memory, else 0.
|
3956 |
|
|
|
3957 |
|
|
*/
|
3958 |
|
|
|
3959 |
|
|
#if __STD_C
|
3960 |
|
|
int mALLOC_TRIm(size_t pad)
|
3961 |
|
|
#else
|
3962 |
|
|
int mALLOC_TRIm(pad) size_t pad;
|
3963 |
|
|
#endif
|
3964 |
|
|
{
|
3965 |
|
|
int res;
|
3966 |
|
|
|
3967 |
|
|
(void)mutex_lock(&main_arena.mutex);
|
3968 |
|
|
res = main_trim(pad);
|
3969 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
3970 |
|
|
return res;
|
3971 |
|
|
}
|
3972 |
|
|
|
3973 |
|
|
/* Trim the main arena. */
|
3974 |
|
|
|
3975 |
|
|
static int
|
3976 |
|
|
internal_function
|
3977 |
|
|
#if __STD_C
|
3978 |
|
|
main_trim(size_t pad)
|
3979 |
|
|
#else
|
3980 |
|
|
main_trim(pad) size_t pad;
|
3981 |
|
|
#endif
|
3982 |
|
|
{
|
3983 |
|
|
mchunkptr top_chunk; /* The current top chunk */
|
3984 |
|
|
long top_size; /* Amount of top-most memory */
|
3985 |
|
|
long extra; /* Amount to release */
|
3986 |
|
|
char* current_brk; /* address returned by pre-check sbrk call */
|
3987 |
|
|
char* new_brk; /* address returned by negative sbrk call */
|
3988 |
|
|
|
3989 |
|
|
unsigned long pagesz = malloc_getpagesize;
|
3990 |
|
|
|
3991 |
|
|
top_chunk = top(&main_arena);
|
3992 |
|
|
top_size = chunksize(top_chunk);
|
3993 |
|
|
extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
|
3994 |
|
|
|
3995 |
|
|
if (extra < (long)pagesz) /* Not enough memory to release */
|
3996 |
|
|
return 0;
|
3997 |
|
|
|
3998 |
|
|
/* Test to make sure no one else called sbrk */
|
3999 |
|
|
current_brk = (char*)(MORECORE (0));
|
4000 |
|
|
if (current_brk != (char*)(top_chunk) + top_size)
|
4001 |
|
|
return 0; /* Apparently we don't own memory; must fail */
|
4002 |
|
|
|
4003 |
|
|
new_brk = (char*)(MORECORE (-extra));
|
4004 |
|
|
|
4005 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
4006 |
|
|
/* Call the `morecore' hook if necessary. */
|
4007 |
|
|
if (__after_morecore_hook)
|
4008 |
|
|
(*__after_morecore_hook) ();
|
4009 |
|
|
#endif
|
4010 |
|
|
|
4011 |
|
|
if (new_brk == (char*)(MORECORE_FAILURE)) { /* sbrk failed? */
|
4012 |
|
|
/* Try to figure out what we have */
|
4013 |
|
|
current_brk = (char*)(MORECORE (0));
|
4014 |
|
|
top_size = current_brk - (char*)top_chunk;
|
4015 |
|
|
if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
|
4016 |
|
|
{
|
4017 |
|
|
sbrked_mem = current_brk - sbrk_base;
|
4018 |
|
|
set_head(top_chunk, top_size | PREV_INUSE);
|
4019 |
|
|
}
|
4020 |
|
|
check_chunk(&main_arena, top_chunk);
|
4021 |
|
|
return 0;
|
4022 |
|
|
}
|
4023 |
|
|
sbrked_mem -= extra;
|
4024 |
|
|
|
4025 |
|
|
/* Success. Adjust top accordingly. */
|
4026 |
|
|
set_head(top_chunk, (top_size - extra) | PREV_INUSE);
|
4027 |
|
|
check_chunk(&main_arena, top_chunk);
|
4028 |
|
|
return 1;
|
4029 |
|
|
}
|
4030 |
|
|
|
4031 |
|
|
#if USE_ARENAS
|
4032 |
|
|
|
4033 |
|
|
static int
|
4034 |
|
|
internal_function
|
4035 |
|
|
#if __STD_C
|
4036 |
|
|
heap_trim(heap_info *heap, size_t pad)
|
4037 |
|
|
#else
|
4038 |
|
|
heap_trim(heap, pad) heap_info *heap; size_t pad;
|
4039 |
|
|
#endif
|
4040 |
|
|
{
|
4041 |
|
|
unsigned long pagesz = malloc_getpagesize;
|
4042 |
|
|
arena *ar_ptr = heap->ar_ptr;
|
4043 |
|
|
mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
|
4044 |
|
|
heap_info *prev_heap;
|
4045 |
|
|
long new_size, top_size, extra;
|
4046 |
|
|
|
4047 |
|
|
/* Can this heap go away completely ? */
|
4048 |
|
|
while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
|
4049 |
|
|
prev_heap = heap->prev;
|
4050 |
|
|
p = chunk_at_offset(prev_heap, prev_heap->size - (MINSIZE-2*SIZE_SZ));
|
4051 |
|
|
assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
|
4052 |
|
|
p = prev_chunk(p);
|
4053 |
|
|
new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ);
|
4054 |
|
|
assert(new_size>0 && new_size<(long)(2*MINSIZE));
|
4055 |
|
|
if(!prev_inuse(p))
|
4056 |
|
|
new_size += p->prev_size;
|
4057 |
|
|
assert(new_size>0 && new_size<HEAP_MAX_SIZE);
|
4058 |
|
|
if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
|
4059 |
|
|
break;
|
4060 |
|
|
ar_ptr->size -= heap->size;
|
4061 |
|
|
arena_mem -= heap->size;
|
4062 |
|
|
delete_heap(heap);
|
4063 |
|
|
heap = prev_heap;
|
4064 |
|
|
if(!prev_inuse(p)) { /* consolidate backward */
|
4065 |
|
|
p = prev_chunk(p);
|
4066 |
|
|
unlink(p, bck, fwd);
|
4067 |
|
|
}
|
4068 |
|
|
assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
|
4069 |
|
|
assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
|
4070 |
|
|
top(ar_ptr) = top_chunk = p;
|
4071 |
|
|
set_head(top_chunk, new_size | PREV_INUSE);
|
4072 |
|
|
check_chunk(ar_ptr, top_chunk);
|
4073 |
|
|
}
|
4074 |
|
|
top_size = chunksize(top_chunk);
|
4075 |
|
|
extra = ((top_size - pad - MINSIZE + (pagesz-1))/pagesz - 1) * pagesz;
|
4076 |
|
|
if(extra < (long)pagesz)
|
4077 |
|
|
return 0;
|
4078 |
|
|
/* Try to shrink. */
|
4079 |
|
|
if(grow_heap(heap, -extra) != 0)
|
4080 |
|
|
return 0;
|
4081 |
|
|
ar_ptr->size -= extra;
|
4082 |
|
|
arena_mem -= extra;
|
4083 |
|
|
|
4084 |
|
|
/* Success. Adjust top accordingly. */
|
4085 |
|
|
set_head(top_chunk, (top_size - extra) | PREV_INUSE);
|
4086 |
|
|
check_chunk(ar_ptr, top_chunk);
|
4087 |
|
|
return 1;
|
4088 |
|
|
}
|
4089 |
|
|
|
4090 |
|
|
#endif /* USE_ARENAS */
|
4091 |
|
|
|
4092 |
|
|
|
4093 |
|
|
|
4094 |
|
|
/*
|
4095 |
|
|
malloc_usable_size:
|
4096 |
|
|
|
4097 |
|
|
This routine tells you how many bytes you can actually use in an
|
4098 |
|
|
allocated chunk, which may be more than you requested (although
|
4099 |
|
|
often not). You can use this many bytes without worrying about
|
4100 |
|
|
overwriting other allocated objects. Not a particularly great
|
4101 |
|
|
programming practice, but still sometimes useful.
|
4102 |
|
|
|
4103 |
|
|
*/
|
4104 |
|
|
|
4105 |
|
|
#if __STD_C
|
4106 |
|
|
size_t mALLOC_USABLE_SIZe(Void_t* mem)
|
4107 |
|
|
#else
|
4108 |
|
|
size_t mALLOC_USABLE_SIZe(mem) Void_t* mem;
|
4109 |
|
|
#endif
|
4110 |
|
|
{
|
4111 |
|
|
mchunkptr p;
|
4112 |
|
|
|
4113 |
|
|
if (mem == 0)
|
4114 |
|
|
return 0;
|
4115 |
|
|
else
|
4116 |
|
|
{
|
4117 |
|
|
p = mem2chunk(mem);
|
4118 |
|
|
if(!chunk_is_mmapped(p))
|
4119 |
|
|
{
|
4120 |
|
|
if (!inuse(p)) return 0;
|
4121 |
|
|
check_inuse_chunk(arena_for_ptr(mem), p);
|
4122 |
|
|
return chunksize(p) - SIZE_SZ;
|
4123 |
|
|
}
|
4124 |
|
|
return chunksize(p) - 2*SIZE_SZ;
|
4125 |
|
|
}
|
4126 |
|
|
}
|
4127 |
|
|
|
4128 |
|
|
|
4129 |
|
|
|
4130 |
|
|
|
4131 |
|
|
/* Utility to update mallinfo for malloc_stats() and mallinfo() */
|
4132 |
|
|
|
4133 |
|
|
static void
|
4134 |
|
|
#if __STD_C
|
4135 |
|
|
malloc_update_mallinfo(arena *ar_ptr, struct mallinfo *mi)
|
4136 |
|
|
#else
|
4137 |
|
|
malloc_update_mallinfo(ar_ptr, mi) arena *ar_ptr; struct mallinfo *mi;
|
4138 |
|
|
#endif
|
4139 |
|
|
{
|
4140 |
|
|
int i, navail;
|
4141 |
|
|
mbinptr b;
|
4142 |
|
|
mchunkptr p;
|
4143 |
|
|
#if MALLOC_DEBUG
|
4144 |
|
|
mchunkptr q;
|
4145 |
|
|
#endif
|
4146 |
|
|
INTERNAL_SIZE_T avail;
|
4147 |
|
|
|
4148 |
|
|
(void)mutex_lock(&ar_ptr->mutex);
|
4149 |
|
|
avail = chunksize(top(ar_ptr));
|
4150 |
|
|
navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
|
4151 |
|
|
|
4152 |
|
|
for (i = 1; i < NAV; ++i)
|
4153 |
|
|
{
|
4154 |
|
|
b = bin_at(ar_ptr, i);
|
4155 |
|
|
for (p = last(b); p != b; p = p->bk)
|
4156 |
|
|
{
|
4157 |
|
|
#if MALLOC_DEBUG
|
4158 |
|
|
check_free_chunk(ar_ptr, p);
|
4159 |
|
|
for (q = next_chunk(p);
|
4160 |
|
|
q != top(ar_ptr) && inuse(q) && (long)chunksize(q) > 0;
|
4161 |
|
|
q = next_chunk(q))
|
4162 |
|
|
check_inuse_chunk(ar_ptr, q);
|
4163 |
|
|
#endif
|
4164 |
|
|
avail += chunksize(p);
|
4165 |
|
|
navail++;
|
4166 |
|
|
}
|
4167 |
|
|
}
|
4168 |
|
|
|
4169 |
|
|
mi->arena = ar_ptr->size;
|
4170 |
|
|
mi->ordblks = navail;
|
4171 |
|
|
mi->smblks = mi->usmblks = mi->fsmblks = 0; /* clear unused fields */
|
4172 |
|
|
mi->uordblks = ar_ptr->size - avail;
|
4173 |
|
|
mi->fordblks = avail;
|
4174 |
|
|
mi->hblks = n_mmaps;
|
4175 |
|
|
mi->hblkhd = mmapped_mem;
|
4176 |
|
|
mi->keepcost = chunksize(top(ar_ptr));
|
4177 |
|
|
|
4178 |
|
|
(void)mutex_unlock(&ar_ptr->mutex);
|
4179 |
|
|
}
|
4180 |
|
|
|
4181 |
|
|
#if USE_ARENAS && MALLOC_DEBUG > 1
|
4182 |
|
|
|
4183 |
|
|
/* Print the complete contents of a single heap to stderr. */
|
4184 |
|
|
|
4185 |
|
|
static void
|
4186 |
|
|
#if __STD_C
|
4187 |
|
|
dump_heap(heap_info *heap)
|
4188 |
|
|
#else
|
4189 |
|
|
dump_heap(heap) heap_info *heap;
|
4190 |
|
|
#endif
|
4191 |
|
|
{
|
4192 |
|
|
char *ptr;
|
4193 |
|
|
mchunkptr p;
|
4194 |
|
|
|
4195 |
|
|
fprintf(stderr, "Heap %p, size %10lx:\n", heap, (long)heap->size);
|
4196 |
|
|
ptr = (heap->ar_ptr != (arena*)(heap+1)) ?
|
4197 |
|
|
(char*)(heap + 1) : (char*)(heap + 1) + sizeof(arena);
|
4198 |
|
|
p = (mchunkptr)(((unsigned long)ptr + MALLOC_ALIGN_MASK) &
|
4199 |
|
|
~MALLOC_ALIGN_MASK);
|
4200 |
|
|
for(;;) {
|
4201 |
|
|
fprintf(stderr, "chunk %p size %10lx", p, (long)p->size);
|
4202 |
|
|
if(p == top(heap->ar_ptr)) {
|
4203 |
|
|
fprintf(stderr, " (top)\n");
|
4204 |
|
|
break;
|
4205 |
|
|
} else if(p->size == (0|PREV_INUSE)) {
|
4206 |
|
|
fprintf(stderr, " (fence)\n");
|
4207 |
|
|
break;
|
4208 |
|
|
}
|
4209 |
|
|
fprintf(stderr, "\n");
|
4210 |
|
|
p = next_chunk(p);
|
4211 |
|
|
}
|
4212 |
|
|
}
|
4213 |
|
|
|
4214 |
|
|
#endif
|
4215 |
|
|
|
4216 |
|
|
|
4217 |
|
|
|
4218 |
|
|
/*
|
4219 |
|
|
|
4220 |
|
|
malloc_stats:
|
4221 |
|
|
|
4222 |
|
|
For all arenas separately and in total, prints on stderr the
|
4223 |
|
|
amount of space obtained from the system, and the current number
|
4224 |
|
|
of bytes allocated via malloc (or realloc, etc) but not yet
|
4225 |
|
|
freed. (Note that this is the number of bytes allocated, not the
|
4226 |
|
|
number requested. It will be larger than the number requested
|
4227 |
|
|
because of alignment and bookkeeping overhead.) When not compiled
|
4228 |
|
|
for multiple threads, the maximum amount of allocated memory
|
4229 |
|
|
(which may be more than current if malloc_trim and/or munmap got
|
4230 |
|
|
called) is also reported. When using mmap(), prints the maximum
|
4231 |
|
|
number of simultaneous mmap regions used, too.
|
4232 |
|
|
|
4233 |
|
|
*/
|
4234 |
|
|
|
4235 |
|
|
void mALLOC_STATs()
|
4236 |
|
|
{
|
4237 |
|
|
int i;
|
4238 |
|
|
arena *ar_ptr;
|
4239 |
|
|
struct mallinfo mi;
|
4240 |
|
|
unsigned int in_use_b = mmapped_mem, system_b = in_use_b;
|
4241 |
|
|
#if THREAD_STATS
|
4242 |
|
|
long stat_lock_direct = 0, stat_lock_loop = 0, stat_lock_wait = 0;
|
4243 |
|
|
#endif
|
4244 |
|
|
|
4245 |
|
|
for(i=0, ar_ptr = &main_arena;; i++) {
|
4246 |
|
|
malloc_update_mallinfo(ar_ptr, &mi);
|
4247 |
|
|
fprintf(stderr, "Arena %d:\n", i);
|
4248 |
|
|
fprintf(stderr, "system bytes = %10u\n", (unsigned int)mi.arena);
|
4249 |
|
|
fprintf(stderr, "in use bytes = %10u\n", (unsigned int)mi.uordblks);
|
4250 |
|
|
system_b += mi.arena;
|
4251 |
|
|
in_use_b += mi.uordblks;
|
4252 |
|
|
#if THREAD_STATS
|
4253 |
|
|
stat_lock_direct += ar_ptr->stat_lock_direct;
|
4254 |
|
|
stat_lock_loop += ar_ptr->stat_lock_loop;
|
4255 |
|
|
stat_lock_wait += ar_ptr->stat_lock_wait;
|
4256 |
|
|
#endif
|
4257 |
|
|
#if USE_ARENAS && MALLOC_DEBUG > 1
|
4258 |
|
|
if(ar_ptr != &main_arena) {
|
4259 |
|
|
heap_info *heap;
|
4260 |
|
|
(void)mutex_lock(&ar_ptr->mutex);
|
4261 |
|
|
heap = heap_for_ptr(top(ar_ptr));
|
4262 |
|
|
while(heap) { dump_heap(heap); heap = heap->prev; }
|
4263 |
|
|
(void)mutex_unlock(&ar_ptr->mutex);
|
4264 |
|
|
}
|
4265 |
|
|
#endif
|
4266 |
|
|
ar_ptr = ar_ptr->next;
|
4267 |
|
|
if(ar_ptr == &main_arena) break;
|
4268 |
|
|
}
|
4269 |
|
|
#if HAVE_MMAP
|
4270 |
|
|
fprintf(stderr, "Total (incl. mmap):\n");
|
4271 |
|
|
#else
|
4272 |
|
|
fprintf(stderr, "Total:\n");
|
4273 |
|
|
#endif
|
4274 |
|
|
fprintf(stderr, "system bytes = %10u\n", system_b);
|
4275 |
|
|
fprintf(stderr, "in use bytes = %10u\n", in_use_b);
|
4276 |
|
|
#ifdef NO_THREADS
|
4277 |
|
|
fprintf(stderr, "max system bytes = %10u\n", (unsigned int)max_total_mem);
|
4278 |
|
|
#endif
|
4279 |
|
|
#if HAVE_MMAP
|
4280 |
|
|
fprintf(stderr, "max mmap regions = %10u\n", (unsigned int)max_n_mmaps);
|
4281 |
|
|
fprintf(stderr, "max mmap bytes = %10lu\n", max_mmapped_mem);
|
4282 |
|
|
#endif
|
4283 |
|
|
#if THREAD_STATS
|
4284 |
|
|
fprintf(stderr, "heaps created = %10d\n", stat_n_heaps);
|
4285 |
|
|
fprintf(stderr, "locked directly = %10ld\n", stat_lock_direct);
|
4286 |
|
|
fprintf(stderr, "locked in loop = %10ld\n", stat_lock_loop);
|
4287 |
|
|
fprintf(stderr, "locked waiting = %10ld\n", stat_lock_wait);
|
4288 |
|
|
fprintf(stderr, "locked total = %10ld\n",
|
4289 |
|
|
stat_lock_direct + stat_lock_loop + stat_lock_wait);
|
4290 |
|
|
#endif
|
4291 |
|
|
}
|
4292 |
|
|
|
4293 |
|
|
/*
|
4294 |
|
|
mallinfo returns a copy of updated current mallinfo.
|
4295 |
|
|
The information reported is for the arena last used by the thread.
|
4296 |
|
|
*/
|
4297 |
|
|
|
4298 |
|
|
struct mallinfo mALLINFo()
|
4299 |
|
|
{
|
4300 |
|
|
struct mallinfo mi;
|
4301 |
|
|
Void_t *vptr = NULL;
|
4302 |
|
|
|
4303 |
|
|
#ifndef NO_THREADS
|
4304 |
|
|
tsd_getspecific(arena_key, vptr);
|
4305 |
|
|
if(vptr == ATFORK_ARENA_PTR)
|
4306 |
|
|
vptr = (Void_t*)&main_arena;
|
4307 |
|
|
#endif
|
4308 |
|
|
malloc_update_mallinfo((vptr ? (arena*)vptr : &main_arena), &mi);
|
4309 |
|
|
return mi;
|
4310 |
|
|
}
|
4311 |
|
|
|
4312 |
|
|
|
4313 |
|
|
|
4314 |
|
|
|
4315 |
|
|
/*
|
4316 |
|
|
mallopt:
|
4317 |
|
|
|
4318 |
|
|
mallopt is the general SVID/XPG interface to tunable parameters.
|
4319 |
|
|
The format is to provide a (parameter-number, parameter-value) pair.
|
4320 |
|
|
mallopt then sets the corresponding parameter to the argument
|
4321 |
|
|
value if it can (i.e., so long as the value is meaningful),
|
4322 |
|
|
and returns 1 if successful else 0.
|
4323 |
|
|
|
4324 |
|
|
See descriptions of tunable parameters above.
|
4325 |
|
|
|
4326 |
|
|
*/
|
4327 |
|
|
|
4328 |
|
|
#if __STD_C
|
4329 |
|
|
int mALLOPt(int param_number, int value)
|
4330 |
|
|
#else
|
4331 |
|
|
int mALLOPt(param_number, value) int param_number; int value;
|
4332 |
|
|
#endif
|
4333 |
|
|
{
|
4334 |
|
|
switch(param_number)
|
4335 |
|
|
{
|
4336 |
|
|
case M_TRIM_THRESHOLD:
|
4337 |
|
|
trim_threshold = value; return 1;
|
4338 |
|
|
case M_TOP_PAD:
|
4339 |
|
|
top_pad = value; return 1;
|
4340 |
|
|
case M_MMAP_THRESHOLD:
|
4341 |
|
|
#if USE_ARENAS
|
4342 |
|
|
/* Forbid setting the threshold too high. */
|
4343 |
|
|
if((unsigned long)value > HEAP_MAX_SIZE/2) return 0;
|
4344 |
|
|
#endif
|
4345 |
|
|
mmap_threshold = value; return 1;
|
4346 |
|
|
case M_MMAP_MAX:
|
4347 |
|
|
#if HAVE_MMAP
|
4348 |
|
|
n_mmaps_max = value; return 1;
|
4349 |
|
|
#else
|
4350 |
|
|
if (value != 0) return 0; else n_mmaps_max = value; return 1;
|
4351 |
|
|
#endif
|
4352 |
|
|
case M_CHECK_ACTION:
|
4353 |
|
|
check_action = value; return 1;
|
4354 |
|
|
|
4355 |
|
|
default:
|
4356 |
|
|
return 0;
|
4357 |
|
|
}
|
4358 |
|
|
}
|
4359 |
|
|
|
4360 |
|
|
|
4361 |
|
|
|
4362 |
|
|
/* Get/set state: malloc_get_state() records the current state of all
|
4363 |
|
|
malloc variables (_except_ for the actual heap contents and `hook'
|
4364 |
|
|
function pointers) in a system dependent, opaque data structure.
|
4365 |
|
|
This data structure is dynamically allocated and can be free()d
|
4366 |
|
|
after use. malloc_set_state() restores the state of all malloc
|
4367 |
|
|
variables to the previously obtained state. This is especially
|
4368 |
|
|
useful when using this malloc as part of a shared library, and when
|
4369 |
|
|
the heap contents are saved/restored via some other method. The
|
4370 |
|
|
primary example for this is GNU Emacs with its `dumping' procedure.
|
4371 |
|
|
`Hook' function pointers are never saved or restored by these
|
4372 |
|
|
functions, with two exceptions: If malloc checking was in use when
|
4373 |
|
|
malloc_get_state() was called, then malloc_set_state() calls
|
4374 |
|
|
__malloc_check_init() if possible; if malloc checking was not in
|
4375 |
|
|
use in the recorded state but the user requested malloc checking,
|
4376 |
|
|
then the hooks are reset to 0. */
|
4377 |
|
|
|
4378 |
|
|
#define MALLOC_STATE_MAGIC 0x444c4541l
|
4379 |
|
|
#define MALLOC_STATE_VERSION (0*0x100l + 1l) /* major*0x100 + minor */
|
4380 |
|
|
|
4381 |
|
|
struct malloc_state {
|
4382 |
|
|
long magic;
|
4383 |
|
|
long version;
|
4384 |
|
|
mbinptr av[NAV * 2 + 2];
|
4385 |
|
|
char* sbrk_base;
|
4386 |
|
|
int sbrked_mem_bytes;
|
4387 |
|
|
unsigned long trim_threshold;
|
4388 |
|
|
unsigned long top_pad;
|
4389 |
|
|
unsigned int n_mmaps_max;
|
4390 |
|
|
unsigned long mmap_threshold;
|
4391 |
|
|
int check_action;
|
4392 |
|
|
unsigned long max_sbrked_mem;
|
4393 |
|
|
unsigned long max_total_mem;
|
4394 |
|
|
unsigned int n_mmaps;
|
4395 |
|
|
unsigned int max_n_mmaps;
|
4396 |
|
|
unsigned long mmapped_mem;
|
4397 |
|
|
unsigned long max_mmapped_mem;
|
4398 |
|
|
int using_malloc_checking;
|
4399 |
|
|
};
|
4400 |
|
|
|
4401 |
|
|
Void_t*
|
4402 |
|
|
mALLOC_GET_STATe()
|
4403 |
|
|
{
|
4404 |
|
|
struct malloc_state* ms;
|
4405 |
|
|
int i;
|
4406 |
|
|
mbinptr b;
|
4407 |
|
|
|
4408 |
|
|
ms = (struct malloc_state*)mALLOc(sizeof(*ms));
|
4409 |
|
|
if (!ms)
|
4410 |
|
|
return 0;
|
4411 |
|
|
(void)mutex_lock(&main_arena.mutex);
|
4412 |
|
|
ms->magic = MALLOC_STATE_MAGIC;
|
4413 |
|
|
ms->version = MALLOC_STATE_VERSION;
|
4414 |
|
|
ms->av[0] = main_arena.av[0];
|
4415 |
|
|
ms->av[1] = main_arena.av[1];
|
4416 |
|
|
for(i=0; i<NAV; i++) {
|
4417 |
|
|
b = bin_at(&main_arena, i);
|
4418 |
|
|
if(first(b) == b)
|
4419 |
|
|
ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin (or initial top) */
|
4420 |
|
|
else {
|
4421 |
|
|
ms->av[2*i+2] = first(b);
|
4422 |
|
|
ms->av[2*i+3] = last(b);
|
4423 |
|
|
}
|
4424 |
|
|
}
|
4425 |
|
|
ms->sbrk_base = sbrk_base;
|
4426 |
|
|
ms->sbrked_mem_bytes = sbrked_mem;
|
4427 |
|
|
ms->trim_threshold = trim_threshold;
|
4428 |
|
|
ms->top_pad = top_pad;
|
4429 |
|
|
ms->n_mmaps_max = n_mmaps_max;
|
4430 |
|
|
ms->mmap_threshold = mmap_threshold;
|
4431 |
|
|
ms->check_action = check_action;
|
4432 |
|
|
ms->max_sbrked_mem = max_sbrked_mem;
|
4433 |
|
|
#ifdef NO_THREADS
|
4434 |
|
|
ms->max_total_mem = max_total_mem;
|
4435 |
|
|
#else
|
4436 |
|
|
ms->max_total_mem = 0;
|
4437 |
|
|
#endif
|
4438 |
|
|
ms->n_mmaps = n_mmaps;
|
4439 |
|
|
ms->max_n_mmaps = max_n_mmaps;
|
4440 |
|
|
ms->mmapped_mem = mmapped_mem;
|
4441 |
|
|
ms->max_mmapped_mem = max_mmapped_mem;
|
4442 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
4443 |
|
|
ms->using_malloc_checking = using_malloc_checking;
|
4444 |
|
|
#else
|
4445 |
|
|
ms->using_malloc_checking = 0;
|
4446 |
|
|
#endif
|
4447 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
4448 |
|
|
return (Void_t*)ms;
|
4449 |
|
|
}
|
4450 |
|
|
|
4451 |
|
|
int
|
4452 |
|
|
#if __STD_C
|
4453 |
|
|
mALLOC_SET_STATe(Void_t* msptr)
|
4454 |
|
|
#else
|
4455 |
|
|
mALLOC_SET_STATe(msptr) Void_t* msptr;
|
4456 |
|
|
#endif
|
4457 |
|
|
{
|
4458 |
|
|
struct malloc_state* ms = (struct malloc_state*)msptr;
|
4459 |
|
|
int i;
|
4460 |
|
|
mbinptr b;
|
4461 |
|
|
|
4462 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
4463 |
|
|
disallow_malloc_check = 1;
|
4464 |
|
|
#endif
|
4465 |
|
|
ptmalloc_init();
|
4466 |
|
|
if(ms->magic != MALLOC_STATE_MAGIC) return -1;
|
4467 |
|
|
/* Must fail if the major version is too high. */
|
4468 |
|
|
if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
|
4469 |
|
|
(void)mutex_lock(&main_arena.mutex);
|
4470 |
|
|
main_arena.av[0] = ms->av[0];
|
4471 |
|
|
main_arena.av[1] = ms->av[1];
|
4472 |
|
|
for(i=0; i<NAV; i++) {
|
4473 |
|
|
b = bin_at(&main_arena, i);
|
4474 |
|
|
if(ms->av[2*i+2] == 0)
|
4475 |
|
|
first(b) = last(b) = b;
|
4476 |
|
|
else {
|
4477 |
|
|
first(b) = ms->av[2*i+2];
|
4478 |
|
|
last(b) = ms->av[2*i+3];
|
4479 |
|
|
if(i > 0) {
|
4480 |
|
|
/* Make sure the links to the `av'-bins in the heap are correct. */
|
4481 |
|
|
first(b)->bk = b;
|
4482 |
|
|
last(b)->fd = b;
|
4483 |
|
|
}
|
4484 |
|
|
}
|
4485 |
|
|
}
|
4486 |
|
|
sbrk_base = ms->sbrk_base;
|
4487 |
|
|
sbrked_mem = ms->sbrked_mem_bytes;
|
4488 |
|
|
trim_threshold = ms->trim_threshold;
|
4489 |
|
|
top_pad = ms->top_pad;
|
4490 |
|
|
n_mmaps_max = ms->n_mmaps_max;
|
4491 |
|
|
mmap_threshold = ms->mmap_threshold;
|
4492 |
|
|
check_action = ms->check_action;
|
4493 |
|
|
max_sbrked_mem = ms->max_sbrked_mem;
|
4494 |
|
|
#ifdef NO_THREADS
|
4495 |
|
|
max_total_mem = ms->max_total_mem;
|
4496 |
|
|
#endif
|
4497 |
|
|
n_mmaps = ms->n_mmaps;
|
4498 |
|
|
max_n_mmaps = ms->max_n_mmaps;
|
4499 |
|
|
mmapped_mem = ms->mmapped_mem;
|
4500 |
|
|
max_mmapped_mem = ms->max_mmapped_mem;
|
4501 |
|
|
/* add version-dependent code here */
|
4502 |
|
|
if (ms->version >= 1) {
|
4503 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
4504 |
|
|
/* Check whether it is safe to enable malloc checking, or whether
|
4505 |
|
|
it is necessary to disable it. */
|
4506 |
|
|
if (ms->using_malloc_checking && !using_malloc_checking &&
|
4507 |
|
|
!disallow_malloc_check)
|
4508 |
|
|
__malloc_check_init ();
|
4509 |
|
|
else if (!ms->using_malloc_checking && using_malloc_checking) {
|
4510 |
|
|
__malloc_hook = 0;
|
4511 |
|
|
__free_hook = 0;
|
4512 |
|
|
__realloc_hook = 0;
|
4513 |
|
|
__memalign_hook = 0;
|
4514 |
|
|
using_malloc_checking = 0;
|
4515 |
|
|
}
|
4516 |
|
|
#endif
|
4517 |
|
|
}
|
4518 |
|
|
|
4519 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
4520 |
|
|
return 0;
|
4521 |
|
|
}
|
4522 |
|
|
|
4523 |
|
|
|
4524 |
|
|
|
4525 |
|
|
#if defined _LIBC || defined MALLOC_HOOKS
|
4526 |
|
|
|
4527 |
|
|
/* A simple, standard set of debugging hooks. Overhead is `only' one
|
4528 |
|
|
byte per chunk; still this will catch most cases of double frees or
|
4529 |
|
|
overruns. The goal here is to avoid obscure crashes due to invalid
|
4530 |
|
|
usage, unlike in the MALLOC_DEBUG code. */
|
4531 |
|
|
|
4532 |
|
|
#define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF )
|
4533 |
|
|
|
4534 |
|
|
/* Instrument a chunk with overrun detector byte(s) and convert it
|
4535 |
|
|
into a user pointer with requested size sz. */
|
4536 |
|
|
|
4537 |
|
|
static Void_t*
|
4538 |
|
|
internal_function
|
4539 |
|
|
#if __STD_C
|
4540 |
|
|
chunk2mem_check(mchunkptr p, size_t sz)
|
4541 |
|
|
#else
|
4542 |
|
|
chunk2mem_check(p, sz) mchunkptr p; size_t sz;
|
4543 |
|
|
#endif
|
4544 |
|
|
{
|
4545 |
|
|
unsigned char* m_ptr = (unsigned char*)BOUNDED_N(chunk2mem(p), sz);
|
4546 |
|
|
size_t i;
|
4547 |
|
|
|
4548 |
|
|
for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1);
|
4549 |
|
|
i > sz;
|
4550 |
|
|
i -= 0xFF) {
|
4551 |
|
|
if(i-sz < 0x100) {
|
4552 |
|
|
m_ptr[i] = (unsigned char)(i-sz);
|
4553 |
|
|
break;
|
4554 |
|
|
}
|
4555 |
|
|
m_ptr[i] = 0xFF;
|
4556 |
|
|
}
|
4557 |
|
|
m_ptr[sz] = MAGICBYTE(p);
|
4558 |
|
|
return (Void_t*)m_ptr;
|
4559 |
|
|
}
|
4560 |
|
|
|
4561 |
|
|
/* Convert a pointer to be free()d or realloc()ed to a valid chunk
|
4562 |
|
|
pointer. If the provided pointer is not valid, return NULL. */
|
4563 |
|
|
|
4564 |
|
|
static mchunkptr
|
4565 |
|
|
internal_function
|
4566 |
|
|
#if __STD_C
|
4567 |
|
|
mem2chunk_check(Void_t* mem)
|
4568 |
|
|
#else
|
4569 |
|
|
mem2chunk_check(mem) Void_t* mem;
|
4570 |
|
|
#endif
|
4571 |
|
|
{
|
4572 |
|
|
mchunkptr p;
|
4573 |
|
|
INTERNAL_SIZE_T sz, c;
|
4574 |
|
|
unsigned char magic;
|
4575 |
|
|
|
4576 |
|
|
p = mem2chunk(mem);
|
4577 |
|
|
if(!aligned_OK(p)) return NULL;
|
4578 |
|
|
if( (char*)p>=sbrk_base && (char*)p<(sbrk_base+sbrked_mem) ) {
|
4579 |
|
|
/* Must be a chunk in conventional heap memory. */
|
4580 |
|
|
if(chunk_is_mmapped(p) ||
|
4581 |
|
|
( (sz = chunksize(p)), ((char*)p + sz)>=(sbrk_base+sbrked_mem) ) ||
|
4582 |
|
|
sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
|
4583 |
|
|
( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
|
4584 |
|
|
(long)prev_chunk(p)<(long)sbrk_base ||
|
4585 |
|
|
next_chunk(prev_chunk(p))!=p) ))
|
4586 |
|
|
return NULL;
|
4587 |
|
|
magic = MAGICBYTE(p);
|
4588 |
|
|
for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
|
4589 |
|
|
if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
|
4590 |
|
|
}
|
4591 |
|
|
((unsigned char*)p)[sz] ^= 0xFF;
|
4592 |
|
|
} else {
|
4593 |
|
|
unsigned long offset, page_mask = malloc_getpagesize-1;
|
4594 |
|
|
|
4595 |
|
|
/* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
|
4596 |
|
|
alignment relative to the beginning of a page. Check this
|
4597 |
|
|
first. */
|
4598 |
|
|
offset = (unsigned long)mem & page_mask;
|
4599 |
|
|
if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
|
4600 |
|
|
offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
|
4601 |
|
|
offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
|
4602 |
|
|
offset<0x2000) ||
|
4603 |
|
|
!chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
|
4604 |
|
|
( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
|
4605 |
|
|
( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
|
4606 |
|
|
return NULL;
|
4607 |
|
|
magic = MAGICBYTE(p);
|
4608 |
|
|
for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
|
4609 |
|
|
if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
|
4610 |
|
|
}
|
4611 |
|
|
((unsigned char*)p)[sz] ^= 0xFF;
|
4612 |
|
|
}
|
4613 |
|
|
return p;
|
4614 |
|
|
}
|
4615 |
|
|
|
4616 |
|
|
/* Check for corruption of the top chunk, and try to recover if
|
4617 |
|
|
necessary. */
|
4618 |
|
|
|
4619 |
|
|
static int
|
4620 |
|
|
internal_function
|
4621 |
|
|
#if __STD_C
|
4622 |
|
|
top_check(void)
|
4623 |
|
|
#else
|
4624 |
|
|
top_check()
|
4625 |
|
|
#endif
|
4626 |
|
|
{
|
4627 |
|
|
mchunkptr t = top(&main_arena);
|
4628 |
|
|
char* brk, * new_brk;
|
4629 |
|
|
INTERNAL_SIZE_T front_misalign, sbrk_size;
|
4630 |
|
|
unsigned long pagesz = malloc_getpagesize;
|
4631 |
|
|
|
4632 |
|
|
if((char*)t + chunksize(t) == sbrk_base + sbrked_mem ||
|
4633 |
|
|
t == initial_top(&main_arena)) return 0;
|
4634 |
|
|
|
4635 |
|
|
if(check_action & 1)
|
4636 |
|
|
fprintf(stderr, "malloc: top chunk is corrupt\n");
|
4637 |
|
|
if(check_action & 2)
|
4638 |
|
|
abort();
|
4639 |
|
|
|
4640 |
|
|
/* Try to set up a new top chunk. */
|
4641 |
|
|
brk = MORECORE(0);
|
4642 |
|
|
front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
|
4643 |
|
|
if (front_misalign > 0)
|
4644 |
|
|
front_misalign = MALLOC_ALIGNMENT - front_misalign;
|
4645 |
|
|
sbrk_size = front_misalign + top_pad + MINSIZE;
|
4646 |
|
|
sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1));
|
4647 |
|
|
new_brk = (char*)(MORECORE (sbrk_size));
|
4648 |
|
|
if (new_brk == (char*)(MORECORE_FAILURE)) return -1;
|
4649 |
|
|
sbrked_mem = (new_brk - sbrk_base) + sbrk_size;
|
4650 |
|
|
|
4651 |
|
|
top(&main_arena) = (mchunkptr)(brk + front_misalign);
|
4652 |
|
|
set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE);
|
4653 |
|
|
|
4654 |
|
|
return 0;
|
4655 |
|
|
}
|
4656 |
|
|
|
4657 |
|
|
static Void_t*
|
4658 |
|
|
#if __STD_C
|
4659 |
|
|
malloc_check(size_t sz, const Void_t *caller)
|
4660 |
|
|
#else
|
4661 |
|
|
malloc_check(sz, caller) size_t sz; const Void_t *caller;
|
4662 |
|
|
#endif
|
4663 |
|
|
{
|
4664 |
|
|
mchunkptr victim;
|
4665 |
|
|
INTERNAL_SIZE_T nb;
|
4666 |
|
|
|
4667 |
|
|
if(request2size(sz+1, nb))
|
4668 |
|
|
return 0;
|
4669 |
|
|
(void)mutex_lock(&main_arena.mutex);
|
4670 |
|
|
victim = (top_check() >= 0) ? chunk_alloc(&main_arena, nb) : NULL;
|
4671 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
4672 |
|
|
if(!victim) return NULL;
|
4673 |
|
|
return chunk2mem_check(victim, sz);
|
4674 |
|
|
}
|
4675 |
|
|
|
4676 |
|
|
static void
|
4677 |
|
|
#if __STD_C
|
4678 |
|
|
free_check(Void_t* mem, const Void_t *caller)
|
4679 |
|
|
#else
|
4680 |
|
|
free_check(mem, caller) Void_t* mem; const Void_t *caller;
|
4681 |
|
|
#endif
|
4682 |
|
|
{
|
4683 |
|
|
mchunkptr p;
|
4684 |
|
|
|
4685 |
|
|
if(!mem) return;
|
4686 |
|
|
(void)mutex_lock(&main_arena.mutex);
|
4687 |
|
|
p = mem2chunk_check(mem);
|
4688 |
|
|
if(!p) {
|
4689 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
4690 |
|
|
if(check_action & 1)
|
4691 |
|
|
fprintf(stderr, "free(): invalid pointer %p!\n", mem);
|
4692 |
|
|
if(check_action & 2)
|
4693 |
|
|
abort();
|
4694 |
|
|
return;
|
4695 |
|
|
}
|
4696 |
|
|
#if HAVE_MMAP
|
4697 |
|
|
if (chunk_is_mmapped(p)) {
|
4698 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
4699 |
|
|
munmap_chunk(p);
|
4700 |
|
|
return;
|
4701 |
|
|
}
|
4702 |
|
|
#endif
|
4703 |
|
|
#if 0 /* Erase freed memory. */
|
4704 |
|
|
memset(mem, 0, chunksize(p) - (SIZE_SZ+1));
|
4705 |
|
|
#endif
|
4706 |
|
|
chunk_free(&main_arena, p);
|
4707 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
4708 |
|
|
}
|
4709 |
|
|
|
4710 |
|
|
static Void_t*
|
4711 |
|
|
#if __STD_C
|
4712 |
|
|
realloc_check(Void_t* oldmem, size_t bytes, const Void_t *caller)
|
4713 |
|
|
#else
|
4714 |
|
|
realloc_check(oldmem, bytes, caller)
|
4715 |
|
|
Void_t* oldmem; size_t bytes; const Void_t *caller;
|
4716 |
|
|
#endif
|
4717 |
|
|
{
|
4718 |
|
|
mchunkptr oldp, newp;
|
4719 |
|
|
INTERNAL_SIZE_T nb, oldsize;
|
4720 |
|
|
|
4721 |
|
|
if (oldmem == 0) return malloc_check(bytes, NULL);
|
4722 |
|
|
(void)mutex_lock(&main_arena.mutex);
|
4723 |
|
|
oldp = mem2chunk_check(oldmem);
|
4724 |
|
|
if(!oldp) {
|
4725 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
4726 |
|
|
if(check_action & 1)
|
4727 |
|
|
fprintf(stderr, "realloc(): invalid pointer %p!\n", oldmem);
|
4728 |
|
|
if(check_action & 2)
|
4729 |
|
|
abort();
|
4730 |
|
|
return malloc_check(bytes, NULL);
|
4731 |
|
|
}
|
4732 |
|
|
oldsize = chunksize(oldp);
|
4733 |
|
|
|
4734 |
|
|
if(request2size(bytes+1, nb)) {
|
4735 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
4736 |
|
|
return 0;
|
4737 |
|
|
}
|
4738 |
|
|
|
4739 |
|
|
#if HAVE_MMAP
|
4740 |
|
|
if (chunk_is_mmapped(oldp)) {
|
4741 |
|
|
#if HAVE_MREMAP
|
4742 |
|
|
newp = mremap_chunk(oldp, nb);
|
4743 |
|
|
if(!newp) {
|
4744 |
|
|
#endif
|
4745 |
|
|
/* Note the extra SIZE_SZ overhead. */
|
4746 |
|
|
if(oldsize - SIZE_SZ >= nb) newp = oldp; /* do nothing */
|
4747 |
|
|
else {
|
4748 |
|
|
/* Must alloc, copy, free. */
|
4749 |
|
|
newp = (top_check() >= 0) ? chunk_alloc(&main_arena, nb) : NULL;
|
4750 |
|
|
if (newp) {
|
4751 |
|
|
MALLOC_COPY(BOUNDED_N(chunk2mem(newp), nb),
|
4752 |
|
|
oldmem, oldsize - 2*SIZE_SZ, 0);
|
4753 |
|
|
munmap_chunk(oldp);
|
4754 |
|
|
}
|
4755 |
|
|
}
|
4756 |
|
|
#if HAVE_MREMAP
|
4757 |
|
|
}
|
4758 |
|
|
#endif
|
4759 |
|
|
} else {
|
4760 |
|
|
#endif /* HAVE_MMAP */
|
4761 |
|
|
newp = (top_check() >= 0) ?
|
4762 |
|
|
chunk_realloc(&main_arena, oldp, oldsize, nb) : NULL;
|
4763 |
|
|
#if 0 /* Erase freed memory. */
|
4764 |
|
|
nb = chunksize(newp);
|
4765 |
|
|
if(oldp<newp || oldp>=chunk_at_offset(newp, nb)) {
|
4766 |
|
|
memset((char*)oldmem + 2*sizeof(mbinptr), 0,
|
4767 |
|
|
oldsize - (2*sizeof(mbinptr)+2*SIZE_SZ+1));
|
4768 |
|
|
} else if(nb > oldsize+SIZE_SZ) {
|
4769 |
|
|
memset((char*)BOUNDED_N(chunk2mem(newp), bytes) + oldsize,
|
4770 |
|
|
0, nb - (oldsize+SIZE_SZ));
|
4771 |
|
|
}
|
4772 |
|
|
#endif
|
4773 |
|
|
#if HAVE_MMAP
|
4774 |
|
|
}
|
4775 |
|
|
#endif
|
4776 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
4777 |
|
|
|
4778 |
|
|
if(!newp) return NULL;
|
4779 |
|
|
return chunk2mem_check(newp, bytes);
|
4780 |
|
|
}
|
4781 |
|
|
|
4782 |
|
|
static Void_t*
|
4783 |
|
|
#if __STD_C
|
4784 |
|
|
memalign_check(size_t alignment, size_t bytes, const Void_t *caller)
|
4785 |
|
|
#else
|
4786 |
|
|
memalign_check(alignment, bytes, caller)
|
4787 |
|
|
size_t alignment; size_t bytes; const Void_t *caller;
|
4788 |
|
|
#endif
|
4789 |
|
|
{
|
4790 |
|
|
INTERNAL_SIZE_T nb;
|
4791 |
|
|
mchunkptr p;
|
4792 |
|
|
|
4793 |
|
|
if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL);
|
4794 |
|
|
if (alignment < MINSIZE) alignment = MINSIZE;
|
4795 |
|
|
|
4796 |
|
|
if(request2size(bytes+1, nb))
|
4797 |
|
|
return 0;
|
4798 |
|
|
(void)mutex_lock(&main_arena.mutex);
|
4799 |
|
|
p = (top_check() >= 0) ? chunk_align(&main_arena, nb, alignment) : NULL;
|
4800 |
|
|
(void)mutex_unlock(&main_arena.mutex);
|
4801 |
|
|
if(!p) return NULL;
|
4802 |
|
|
return chunk2mem_check(p, bytes);
|
4803 |
|
|
}
|
4804 |
|
|
|
4805 |
|
|
#ifndef NO_THREADS
|
4806 |
|
|
|
4807 |
|
|
/* The following hooks are used when the global initialization in
|
4808 |
|
|
ptmalloc_init() hasn't completed yet. */
|
4809 |
|
|
|
4810 |
|
|
static Void_t*
|
4811 |
|
|
#if __STD_C
|
4812 |
|
|
malloc_starter(size_t sz, const Void_t *caller)
|
4813 |
|
|
#else
|
4814 |
|
|
malloc_starter(sz, caller) size_t sz; const Void_t *caller;
|
4815 |
|
|
#endif
|
4816 |
|
|
{
|
4817 |
|
|
INTERNAL_SIZE_T nb;
|
4818 |
|
|
mchunkptr victim;
|
4819 |
|
|
|
4820 |
|
|
if(request2size(sz, nb))
|
4821 |
|
|
return 0;
|
4822 |
|
|
victim = chunk_alloc(&main_arena, nb);
|
4823 |
|
|
|
4824 |
|
|
return victim ? BOUNDED_N(chunk2mem(victim), sz) : 0;
|
4825 |
|
|
}
|
4826 |
|
|
|
4827 |
|
|
static void
|
4828 |
|
|
#if __STD_C
|
4829 |
|
|
free_starter(Void_t* mem, const Void_t *caller)
|
4830 |
|
|
#else
|
4831 |
|
|
free_starter(mem, caller) Void_t* mem; const Void_t *caller;
|
4832 |
|
|
#endif
|
4833 |
|
|
{
|
4834 |
|
|
mchunkptr p;
|
4835 |
|
|
|
4836 |
|
|
if(!mem) return;
|
4837 |
|
|
p = mem2chunk(mem);
|
4838 |
|
|
#if HAVE_MMAP
|
4839 |
|
|
if (chunk_is_mmapped(p)) {
|
4840 |
|
|
munmap_chunk(p);
|
4841 |
|
|
return;
|
4842 |
|
|
}
|
4843 |
|
|
#endif
|
4844 |
|
|
chunk_free(&main_arena, p);
|
4845 |
|
|
}
|
4846 |
|
|
|
4847 |
|
|
/* The following hooks are used while the `atfork' handling mechanism
|
4848 |
|
|
is active. */
|
4849 |
|
|
|
4850 |
|
|
static Void_t*
|
4851 |
|
|
#if __STD_C
|
4852 |
|
|
malloc_atfork (size_t sz, const Void_t *caller)
|
4853 |
|
|
#else
|
4854 |
|
|
malloc_atfork(sz, caller) size_t sz; const Void_t *caller;
|
4855 |
|
|
#endif
|
4856 |
|
|
{
|
4857 |
|
|
Void_t *vptr = NULL;
|
4858 |
|
|
INTERNAL_SIZE_T nb;
|
4859 |
|
|
mchunkptr victim;
|
4860 |
|
|
|
4861 |
|
|
tsd_getspecific(arena_key, vptr);
|
4862 |
|
|
if(vptr == ATFORK_ARENA_PTR) {
|
4863 |
|
|
/* We are the only thread that may allocate at all. */
|
4864 |
|
|
if(save_malloc_hook != malloc_check) {
|
4865 |
|
|
if(request2size(sz, nb))
|
4866 |
|
|
return 0;
|
4867 |
|
|
victim = chunk_alloc(&main_arena, nb);
|
4868 |
|
|
return victim ? BOUNDED_N(chunk2mem(victim), sz) : 0;
|
4869 |
|
|
} else {
|
4870 |
|
|
if(top_check()<0 || request2size(sz+1, nb))
|
4871 |
|
|
return 0;
|
4872 |
|
|
victim = chunk_alloc(&main_arena, nb);
|
4873 |
|
|
return victim ? chunk2mem_check(victim, sz) : 0;
|
4874 |
|
|
}
|
4875 |
|
|
} else {
|
4876 |
|
|
/* Suspend the thread until the `atfork' handlers have completed.
|
4877 |
|
|
By that time, the hooks will have been reset as well, so that
|
4878 |
|
|
mALLOc() can be used again. */
|
4879 |
|
|
(void)mutex_lock(&list_lock);
|
4880 |
|
|
(void)mutex_unlock(&list_lock);
|
4881 |
|
|
return mALLOc(sz);
|
4882 |
|
|
}
|
4883 |
|
|
}
|
4884 |
|
|
|
4885 |
|
|
static void
|
4886 |
|
|
#if __STD_C
|
4887 |
|
|
free_atfork(Void_t* mem, const Void_t *caller)
|
4888 |
|
|
#else
|
4889 |
|
|
free_atfork(mem, caller) Void_t* mem; const Void_t *caller;
|
4890 |
|
|
#endif
|
4891 |
|
|
{
|
4892 |
|
|
Void_t *vptr = NULL;
|
4893 |
|
|
arena *ar_ptr;
|
4894 |
|
|
mchunkptr p; /* chunk corresponding to mem */
|
4895 |
|
|
|
4896 |
|
|
if (mem == 0) /* free(0) has no effect */
|
4897 |
|
|
return;
|
4898 |
|
|
|
4899 |
|
|
p = mem2chunk(mem); /* do not bother to replicate free_check here */
|
4900 |
|
|
|
4901 |
|
|
#if HAVE_MMAP
|
4902 |
|
|
if (chunk_is_mmapped(p)) /* release mmapped memory. */
|
4903 |
|
|
{
|
4904 |
|
|
munmap_chunk(p);
|
4905 |
|
|
return;
|
4906 |
|
|
}
|
4907 |
|
|
#endif
|
4908 |
|
|
|
4909 |
|
|
ar_ptr = arena_for_ptr(p);
|
4910 |
|
|
tsd_getspecific(arena_key, vptr);
|
4911 |
|
|
if(vptr != ATFORK_ARENA_PTR)
|
4912 |
|
|
(void)mutex_lock(&ar_ptr->mutex);
|
4913 |
|
|
chunk_free(ar_ptr, p);
|
4914 |
|
|
if(vptr != ATFORK_ARENA_PTR)
|
4915 |
|
|
(void)mutex_unlock(&ar_ptr->mutex);
|
4916 |
|
|
}
|
4917 |
|
|
|
4918 |
|
|
#endif /* !defined NO_THREADS */
|
4919 |
|
|
|
4920 |
|
|
#endif /* defined _LIBC || defined MALLOC_HOOKS */
|
4921 |
|
|
|
4922 |
|
|
|
4923 |
|
|
|
4924 |
|
|
#ifdef _LIBC
|
4925 |
|
|
|
4926 |
|
|
/* default method of getting more storage */
|
4927 |
|
|
__malloc_ptr_t
|
4928 |
|
|
__default_morecore (int inc)
|
4929 |
|
|
{
|
4930 |
|
|
__malloc_ptr_t result = (__malloc_ptr_t)sbrk (inc);
|
4931 |
|
|
if (result == (__malloc_ptr_t)-1)
|
4932 |
|
|
return NULL;
|
4933 |
|
|
return result;
|
4934 |
|
|
}
|
4935 |
|
|
|
4936 |
|
|
/* We need a wrapper function for one of the additions of POSIX. */
|
4937 |
|
|
int
|
4938 |
|
|
__posix_memalign (void **memptr, size_t alignment, size_t size)
|
4939 |
|
|
{
|
4940 |
|
|
void *mem;
|
4941 |
|
|
|
4942 |
|
|
/* Test whether the ALIGNMENT argument is valid. It must be a power
|
4943 |
|
|
of two multiple of sizeof (void *). */
|
4944 |
|
|
if (alignment % sizeof (void *) != 0 || (alignment & (alignment - 1)) != 0)
|
4945 |
|
|
return EINVAL;
|
4946 |
|
|
|
4947 |
|
|
mem = __libc_memalign (alignment, size);
|
4948 |
|
|
|
4949 |
|
|
if (mem != NULL)
|
4950 |
|
|
{
|
4951 |
|
|
*memptr = mem;
|
4952 |
|
|
return 0;
|
4953 |
|
|
}
|
4954 |
|
|
|
4955 |
|
|
return ENOMEM;
|
4956 |
|
|
}
|
4957 |
|
|
weak_alias (__posix_memalign, posix_memalign)
|
4958 |
|
|
|
4959 |
|
|
weak_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
|
4960 |
|
|
weak_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
|
4961 |
|
|
weak_alias (__libc_free, __free) weak_alias (__libc_free, free)
|
4962 |
|
|
weak_alias (__libc_malloc, __malloc) weak_alias (__libc_malloc, malloc)
|
4963 |
|
|
weak_alias (__libc_memalign, __memalign) weak_alias (__libc_memalign, memalign)
|
4964 |
|
|
weak_alias (__libc_realloc, __realloc) weak_alias (__libc_realloc, realloc)
|
4965 |
|
|
weak_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
|
4966 |
|
|
weak_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
|
4967 |
|
|
weak_alias (__libc_mallinfo, __mallinfo) weak_alias (__libc_mallinfo, mallinfo)
|
4968 |
|
|
weak_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
|
4969 |
|
|
|
4970 |
|
|
weak_alias (__malloc_stats, malloc_stats)
|
4971 |
|
|
weak_alias (__malloc_usable_size, malloc_usable_size)
|
4972 |
|
|
weak_alias (__malloc_trim, malloc_trim)
|
4973 |
|
|
weak_alias (__malloc_get_state, malloc_get_state)
|
4974 |
|
|
weak_alias (__malloc_set_state, malloc_set_state)
|
4975 |
|
|
#endif
|
4976 |
|
|
|
4977 |
|
|
/*
|
4978 |
|
|
|
4979 |
|
|
History:
|
4980 |
|
|
|
4981 |
|
|
V2.6.4-pt3 Thu Feb 20 1997 Wolfram Gloger (wmglo@dent.med.uni-muenchen.de)
|
4982 |
|
|
* Added malloc_get/set_state() (mainly for use in GNU emacs),
|
4983 |
|
|
using interface from Marcus Daniels
|
4984 |
|
|
* All parameters are now adjustable via environment variables
|
4985 |
|
|
|
4986 |
|
|
V2.6.4-pt2 Sat Dec 14 1996 Wolfram Gloger (wmglo@dent.med.uni-muenchen.de)
|
4987 |
|
|
* Added debugging hooks
|
4988 |
|
|
* Fixed possible deadlock in realloc() when out of memory
|
4989 |
|
|
* Don't pollute namespace in glibc: use __getpagesize, __mmap, etc.
|
4990 |
|
|
|
4991 |
|
|
V2.6.4-pt Wed Dec 4 1996 Wolfram Gloger (wmglo@dent.med.uni-muenchen.de)
|
4992 |
|
|
* Very minor updates from the released 2.6.4 version.
|
4993 |
|
|
* Trimmed include file down to exported data structures.
|
4994 |
|
|
* Changes from H.J. Lu for glibc-2.0.
|
4995 |
|
|
|
4996 |
|
|
V2.6.3i-pt Sep 16 1996 Wolfram Gloger (wmglo@dent.med.uni-muenchen.de)
|
4997 |
|
|
* Many changes for multiple threads
|
4998 |
|
|
* Introduced arenas and heaps
|
4999 |
|
|
|
5000 |
|
|
V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
|
5001 |
|
|
* Added pvalloc, as recommended by H.J. Liu
|
5002 |
|
|
* Added 64bit pointer support mainly from Wolfram Gloger
|
5003 |
|
|
* Added anonymously donated WIN32 sbrk emulation
|
5004 |
|
|
* Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
|
5005 |
|
|
* malloc_extend_top: fix mask error that caused wastage after
|
5006 |
|
|
foreign sbrks
|
5007 |
|
|
* Add linux mremap support code from HJ Liu
|
5008 |
|
|
|
5009 |
|
|
V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
|
5010 |
|
|
* Integrated most documentation with the code.
|
5011 |
|
|
* Add support for mmap, with help from
|
5012 |
|
|
Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
|
5013 |
|
|
* Use last_remainder in more cases.
|
5014 |
|
|
* Pack bins using idea from colin@nyx10.cs.du.edu
|
5015 |
|
|
* Use ordered bins instead of best-fit threshold
|
5016 |
|
|
* Eliminate block-local decls to simplify tracing and debugging.
|
5017 |
|
|
* Support another case of realloc via move into top
|
5018 |
|
|
* Fix error occurring when initial sbrk_base not word-aligned.
|
5019 |
|
|
* Rely on page size for units instead of SBRK_UNIT to
|
5020 |
|
|
avoid surprises about sbrk alignment conventions.
|
5021 |
|
|
* Add mallinfo, mallopt. Thanks to Raymond Nijssen
|
5022 |
|
|
(raymond@es.ele.tue.nl) for the suggestion.
|
5023 |
|
|
* Add `pad' argument to malloc_trim and top_pad mallopt parameter.
|
5024 |
|
|
* More precautions for cases where other routines call sbrk,
|
5025 |
|
|
courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
|
5026 |
|
|
* Added macros etc., allowing use in linux libc from
|
5027 |
|
|
H.J. Lu (hjl@gnu.ai.mit.edu)
|
5028 |
|
|
* Inverted this history list
|
5029 |
|
|
|
5030 |
|
|
V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
|
5031 |
|
|
* Re-tuned and fixed to behave more nicely with V2.6.0 changes.
|
5032 |
|
|
* Removed all preallocation code since under current scheme
|
5033 |
|
|
the work required to undo bad preallocations exceeds
|
5034 |
|
|
the work saved in good cases for most test programs.
|
5035 |
|
|
* No longer use return list or unconsolidated bins since
|
5036 |
|
|
no scheme using them consistently outperforms those that don't
|
5037 |
|
|
given above changes.
|
5038 |
|
|
* Use best fit for very large chunks to prevent some worst-cases.
|
5039 |
|
|
* Added some support for debugging
|
5040 |
|
|
|
5041 |
|
|
V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
|
5042 |
|
|
* Removed footers when chunks are in use. Thanks to
|
5043 |
|
|
Paul Wilson (wilson@cs.texas.edu) for the suggestion.
|
5044 |
|
|
|
5045 |
|
|
V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
|
5046 |
|
|
* Added malloc_trim, with help from Wolfram Gloger
|
5047 |
|
|
(wmglo@Dent.MED.Uni-Muenchen.DE).
|
5048 |
|
|
|
5049 |
|
|
V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
|
5050 |
|
|
|
5051 |
|
|
V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
|
5052 |
|
|
* realloc: try to expand in both directions
|
5053 |
|
|
* malloc: swap order of clean-bin strategy;
|
5054 |
|
|
* realloc: only conditionally expand backwards
|
5055 |
|
|
* Try not to scavenge used bins
|
5056 |
|
|
* Use bin counts as a guide to preallocation
|
5057 |
|
|
* Occasionally bin return list chunks in first scan
|
5058 |
|
|
* Add a few optimizations from colin@nyx10.cs.du.edu
|
5059 |
|
|
|
5060 |
|
|
V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
|
5061 |
|
|
* faster bin computation & slightly different binning
|
5062 |
|
|
* merged all consolidations to one part of malloc proper
|
5063 |
|
|
(eliminating old malloc_find_space & malloc_clean_bin)
|
5064 |
|
|
* Scan 2 returns chunks (not just 1)
|
5065 |
|
|
* Propagate failure in realloc if malloc returns 0
|
5066 |
|
|
* Add stuff to allow compilation on non-ANSI compilers
|
5067 |
|
|
from kpv@research.att.com
|
5068 |
|
|
|
5069 |
|
|
V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
|
5070 |
|
|
* removed potential for odd address access in prev_chunk
|
5071 |
|
|
* removed dependency on getpagesize.h
|
5072 |
|
|
* misc cosmetics and a bit more internal documentation
|
5073 |
|
|
* anticosmetics: mangled names in macros to evade debugger strangeness
|
5074 |
|
|
* tested on sparc, hp-700, dec-mips, rs6000
|
5075 |
|
|
with gcc & native cc (hp, dec only) allowing
|
5076 |
|
|
Detlefs & Zorn comparison study (in SIGPLAN Notices.)
|
5077 |
|
|
|
5078 |
|
|
Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
|
5079 |
|
|
* Based loosely on libg++-1.2X malloc. (It retains some of the overall
|
5080 |
|
|
structure of old version, but most details differ.)
|
5081 |
|
|
|
5082 |
|
|
*/
|