OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [ecos-2.0/] [packages/] [services/] [memalloc/] [common/] [v2_0/] [src/] [dlmalloc.cxx] - Blame information for rev 1773

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1254 phoenix
//==========================================================================
2
//
3
//      dlmalloc.cxx
4
//
5
//      Port of Doug Lea's malloc implementation
6
//
7
//==========================================================================
8
//####ECOSGPLCOPYRIGHTBEGIN####
9
// -------------------------------------------
10
// This file is part of eCos, the Embedded Configurable Operating System.
11
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
12
//
13
// eCos is free software; you can redistribute it and/or modify it under
14
// the terms of the GNU General Public License as published by the Free
15
// Software Foundation; either version 2 or (at your option) any later version.
16
//
17
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY
18
// WARRANTY; without even the implied warranty of MERCHANTABILITY or
19
// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
20
// for more details.
21
//
22
// You should have received a copy of the GNU General Public License along
23
// with eCos; if not, write to the Free Software Foundation, Inc.,
24
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25
//
26
// As a special exception, if other files instantiate templates or use macros
27
// or inline functions from this file, or you compile this file and link it
28
// with other works to produce a work based on this file, this file does not
29
// by itself cause the resulting work to be covered by the GNU General Public
30
// License. However the source code for this file must still be made available
31
// in accordance with section (3) of the GNU General Public License.
32
//
33
// This exception does not invalidate any other reasons why a work based on
34
// this file might be covered by the GNU General Public License.
35
//
36
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
37
// at http://sources.redhat.com/ecos/ecos-license/
38
// -------------------------------------------
39
//####ECOSGPLCOPYRIGHTEND####
40
//==========================================================================
41
//#####DESCRIPTIONBEGIN####
42
//
43
// Author(s):    Doug Lea (dl at g.oswego.edu), jlarmour
44
// Contributors: 
45
// Date:         2000-06-18
46
// Purpose:      Doug Lea's malloc implementation
47
// Description:  Doug Lea's malloc has been ported to eCos. This file
48
//               provides the implementation in a way acceptable to eCos.
49
//               Substantial amounts of unnecessary bits (to eCos) of the
50
//               original implementation have been removed to make the
51
//               code more tractable. Note this may make a number of the
52
//               comments appear to make little sense, or no longer apply!
53
//               In particular, mmap support is removed entirely.
54
//               Also the memory is "sbrked" all at once at the
55
//               beginning, covering the entire memory region given at
56
//               construction, and there can be no more afterwards.
57
// Usage:        #include <cyg/memalloc/dlmalloc.hxx>
58
//              
59
//
60
//####DESCRIPTIONEND####
61
//
62
//==========================================================================
63
 
64
// DOCUMENTATION FROM ORIGINAL FILE:
65
// (some now irrelevant parts elided)
66
 
67
//----------------------------------------------------------------------------
68
 
69
/*
70
  A version of malloc/free/realloc written by Doug Lea and released to the
71
  public domain.  Send questions/comments/complaints/performance data
72
  to dl at cs.oswego.edu
73
 
74
* VERSION 2.6.6  Sun Mar  5 19:10:03 2000  Doug Lea  (dl at gee)
75
 
76
   Note: There may be an updated version of this malloc obtainable at
77
           ftp://g.oswego.edu/pub/misc/malloc.c
78
         Check before installing!
79
 
80
* Why use this malloc?
81
 
82
  This is not the fastest, most space-conserving, most portable, or
83
  most tunable malloc ever written. However it is among the fastest
84
  while also being among the most space-conserving, portable and tunable.
85
  Consistent balance across these factors results in a good general-purpose
86
  allocator. For a high-level description, see
87
     http://g.oswego.edu/dl/html/malloc.html
88
 
89
* Synopsis of public routines
90
 
91
  (Much fuller descriptions are contained in the program documentation below.)
92
 
93
[ these have of course been renamed in the eCos port ]a
94
 
95
  malloc(size_t n);
96
     Return a pointer to a newly allocated chunk of at least n bytes, or null
97
     if no space is available.
98
  free(Void_t* p);
99
     Release the chunk of memory pointed to by p, or no effect if p is null.
100
  realloc(Void_t* p, size_t n);
101
     Return a pointer to a chunk of size n that contains the same data
102
     as does chunk p up to the minimum of (n, p's size) bytes, or null
103
     if no space is available. The returned pointer may or may not be
104
     the same as p. If p is null, equivalent to malloc. realloc of
105
     zero bytes calls free(p)
106
 
107
* Vital statistics:
108
 
109
  Alignment:                            8-byte
110
       8 byte alignment is currently hardwired into the design.  This
111
       seems to suffice for all current machines and C compilers.
112
 
113
  Assumed pointer representation:       4 or 8 bytes
114
       Code for 8-byte pointers is untested by me but has worked
115
       reliably by Wolfram Gloger, who contributed most of the
116
       changes supporting this.
117
 
118
  Assumed size_t  representation:       4 or 8 bytes
119
       Note that size_t is allowed to be 4 bytes even if pointers are 8.
120
 
121
  Minimum overhead per allocated chunk: 4 or 8 bytes
122
       Each malloced chunk has a hidden overhead of 4 bytes holding size
123
       and status information.
124
 
125
  Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
126
                          8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
127
 
128
       When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
129
       ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
130
       needed; 4 (8) for a trailing size field
131
       and 8 (16) bytes for free list pointers. Thus, the minimum
132
       allocatable size is 16/24/32 bytes.
133
 
134
       Even a request for zero bytes (i.e., malloc(0)) returns a
135
       pointer to something of the minimum allocatable size.
136
 
137
  Maximum allocated size: 4-byte size_t: 2^31 -  8 bytes
138
                          8-byte size_t: 2^63 - 16 bytes
139
 
140
       It is assumed that (possibly signed) size_t bit values suffice to
141
       represent chunk sizes. `Possibly signed' is due to the fact
142
       that `size_t' may be defined on a system as either a signed or
143
       an unsigned type. To be conservative, values that would appear
144
       as negative numbers are avoided.
145
       Requests for sizes with a negative sign bit when the request
146
       size is treaded as a long will return null.
147
 
148
  Maximum overhead wastage per allocated chunk: normally 15 bytes
149
 
150
       Alignnment demands, plus the minimum allocatable size restriction
151
       make the normal worst-case wastage 15 bytes (i.e., up to 15
152
       more bytes will be allocated than were requested in malloc), with
153
       one exception: because requests for zero bytes allocate non-zero space,
154
       the worst case wastage for a request of zero bytes is 24 bytes.
155
 
156
* Limitations
157
 
158
    Here are some features that are NOT currently supported
159
 
160
    * No user-definable hooks for callbacks and the like.
161
    * No automated mechanism for fully checking that all accesses
162
      to malloced memory stay within their bounds.
163
    * No support for compaction.
164
 
165
* Synopsis of compile-time options:
166
 
167
    People have reported using previous versions of this malloc on all
168
    versions of Unix, sometimes by tweaking some of the defines
169
    below. It has been tested most extensively on Solaris and
170
    Linux. It is also reported to work on WIN32 platforms.
171
    People have also reported adapting this malloc for use in
172
    stand-alone embedded systems.
173
 
174
    The implementation is in straight, hand-tuned ANSI C.  Among other
175
    consequences, it uses a lot of macros.  Because of this, to be at
176
    all usable, this code should be compiled using an optimizing compiler
177
    (for example gcc -O2) that can simplify expressions and control
178
    paths.
179
 
180
  CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG      (default: NOT defined)
181
     Define to enable debugging. Adds fairly extensive assertion-based
182
     checking to help track down memory errors, but noticeably slows down
183
     execution.
184
  MALLOC_LOCK              (default: NOT defined)
185
  MALLOC_UNLOCK            (default: NOT defined)
186
     Define these to C expressions which are run to lock and unlock
187
     the malloc data structures.  Calls may be nested; that is,
188
     MALLOC_LOCK may be called more than once before the corresponding
189
     MALLOC_UNLOCK calls.  MALLOC_LOCK must avoid waiting for a lock
190
     that it already holds.
191
  MALLOC_ALIGNMENT          (default: NOT defined)
192
     Define this to 16 if you need 16 byte alignment instead of 8 byte alignment
193
     which is the normal default.
194
  SIZE_T_SMALLER_THAN_LONG (default: NOT defined)
195
     Define this when the platform you are compiling has
196
     sizeof(long) > sizeof(size_t).
197
     The option causes some extra code to be generated to handle operations
198
     that use size_t operands and have long results.
199
  INTERNAL_SIZE_T           (default: size_t)
200
     Define to a 32-bit type (probably `unsigned int') if you are on a
201
     64-bit machine, yet do not want or need to allow malloc requests of
202
     greater than 2^31 to be handled. This saves space, especially for
203
     very small chunks.
204
 
205
*/
206
 
207
//----------------------------------------------------------------------------
208
 
209
 
210
/* Preliminaries */
211
 
212
#include <pkgconf/memalloc.h>          // configuration header
213
#include <pkgconf/infra.h>             // CYGDBG_USE_ASSERTS
214
#include <cyg/infra/cyg_type.h>        // types
215
#include <cyg/infra/cyg_ass.h>         // assertions
216
#include <stddef.h>                    // for size_t
217
#include <cyg/memalloc/dlmalloc.hxx>
218
//#include <cyg/infra/diag.h>
219
 
220
/*
221
    Debugging:
222
 
223
    Because freed chunks may be overwritten with link fields, this
224
    malloc will often die when freed memory is overwritten by user
225
    programs.  This can be very effective (albeit in an annoying way)
226
    in helping track down dangling pointers.
227
 
228
    If you compile with CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG enabled, a
229
    number of assertion checks are
230
    enabled that will catch more memory errors. You probably won't be
231
    able to make much sense of the actual assertion errors, but they
232
    should help you locate incorrectly overwritten memory.  The
233
    checking is fairly extensive, and will slow down execution
234
    noticeably. Calling get_status() with DEBUG set will
235
    attempt to check every allocated and free chunk in the
236
    course of computing the summmaries.
237
 
238
    Setting CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG may also be helpful if you
239
    are trying to modify this code. The assertions in the check routines
240
    spell out in more detail the assumptions and invariants underlying
241
    the algorithms.
242
 
243
*/
244
 
245
#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
246
# define ASSERT(x) CYG_ASSERTC( x )
247
#else
248
# define ASSERT(x) ((void)0)
249
#endif
250
 
251
 
252
/*
253
   Define MALLOC_LOCK and MALLOC_UNLOCK to C expressions to run to
254
   lock and unlock the malloc data structures.  MALLOC_LOCK may be
255
   called recursively.
256
 */
257
 
258
#ifndef MALLOC_LOCK
259
#define MALLOC_LOCK
260
#endif
261
 
262
#ifndef MALLOC_UNLOCK
263
#define MALLOC_UNLOCK
264
#endif
265
 
266
/*
267
  INTERNAL_SIZE_T is the word-size used for internal bookkeeping
268
  of chunk sizes. On a 64-bit machine, you can reduce malloc
269
  overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
270
  at the expense of not being able to handle requests greater than
271
  2^31. This limitation is hardly ever a concern; you are encouraged
272
  to set this. However, the default version is the same as size_t.
273
*/
274
 
275
#ifndef INTERNAL_SIZE_T
276
#define INTERNAL_SIZE_T Cyg_Mempool_dlmalloc_Implementation::Cyg_dlmalloc_size_t
277
#endif
278
 
279
/*
280
  Following is needed on implementations whereby long > size_t.
281
  The problem is caused because the code performs subtractions of
282
  size_t values and stores the result in long values.  In the case
283
  where long > size_t and the first value is actually less than
284
  the second value, the resultant value is positive.  For example,
285
  (long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF
286
  which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF.  This is due to the
287
  fact that assignment from unsigned to signed won't sign extend.
288
*/
289
 
290
#ifdef SIZE_T_SMALLER_THAN_LONG
291
#define long_sub_size_t(x, y) ( (x < y) ? -((long)(y - x)) : (x - y) );
292
#else
293
#define long_sub_size_t(x, y) ( (long)(x - y) )
294
#endif
295
 
296
 
297
#ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_USE_MEMCPY
298
 
299
#include <string.h>                    // memcpy, memset
300
 
301
/* The following macros are only invoked with (2n+1)-multiples of
302
   INTERNAL_SIZE_T units, with a positive integer n. This is exploited
303
   for fast inline execution when n is small. */
304
 
305
#define MALLOC_ZERO(charp, nbytes)                                            \
306
do {                                                                          \
307
  INTERNAL_SIZE_T mzsz = (nbytes);                                        \
308
  if(mzsz <= 9*sizeof(mzsz)) {                                                \
309
    INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                 \
310
    if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \
311
                                     *mz++ = 0;                               \
312
      if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \
313
                                     *mz++ = 0;                               \
314
        if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \
315
                                     *mz++ = 0; }}}                           \
316
                                     *mz++ = 0;                               \
317
                                     *mz++ = 0;                               \
318
                                     *mz   = 0;                               \
319
  } else memset((charp), 0, mzsz);                                            \
320
} while(0)
321
 
322
#define MALLOC_COPY(dest,src,nbytes)                                          \
323
do {                                                                          \
324
  INTERNAL_SIZE_T mcsz = (nbytes);                                        \
325
  if(mcsz <= 9*sizeof(mcsz)) {                                                \
326
    INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                \
327
    INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);               \
328
    if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
329
                                     *mcdst++ = *mcsrc++;                     \
330
      if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
331
                                     *mcdst++ = *mcsrc++;                     \
332
        if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
333
                                     *mcdst++ = *mcsrc++; }}}                 \
334
                                     *mcdst++ = *mcsrc++;                     \
335
                                     *mcdst++ = *mcsrc++;                     \
336
                                     *mcdst   = *mcsrc  ;                     \
337
  } else memcpy(dest, src, mcsz);                                             \
338
} while(0)
339
 
340
#else /* !CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_USE_MEMCPY */
341
 
342
/* Use Duff's device for good zeroing/copying performance. */
343
 
344
#define MALLOC_ZERO(charp, nbytes)                                            \
345
do {                                                                          \
346
  INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                   \
347
  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                     \
348
  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
349
  switch (mctmp) {                                                            \
350
    case 0: for(;;) { *mzp++ = 0;                                             \
351
    case 7:           *mzp++ = 0;                                             \
352
    case 6:           *mzp++ = 0;                                             \
353
    case 5:           *mzp++ = 0;                                             \
354
    case 4:           *mzp++ = 0;                                             \
355
    case 3:           *mzp++ = 0;                                             \
356
    case 2:           *mzp++ = 0;                                             \
357
    case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
358
  }                                                                           \
359
} while(0)
360
 
361
#define MALLOC_COPY(dest,src,nbytes)                                          \
362
do {                                                                          \
363
  INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                    \
364
  INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                   \
365
  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                     \
366
  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
367
  switch (mctmp) {                                                            \
368
    case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
369
    case 7:           *mcdst++ = *mcsrc++;                                    \
370
    case 6:           *mcdst++ = *mcsrc++;                                    \
371
    case 5:           *mcdst++ = *mcsrc++;                                    \
372
    case 4:           *mcdst++ = *mcsrc++;                                    \
373
    case 3:           *mcdst++ = *mcsrc++;                                    \
374
    case 2:           *mcdst++ = *mcsrc++;                                    \
375
    case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
376
  }                                                                           \
377
} while(0)
378
 
379
#endif
380
 
381
 
382
//----------------------------------------------------------------------------
383
 
384
/*
385
   malloc_chunk details:
386
 
387
    (The following includes lightly edited explanations by Colin Plumb.)
388
 
389
    Chunks of memory are maintained using a `boundary tag' method as
390
    described in e.g., Knuth or Standish.  (See the paper by Paul
391
    Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
392
    survey of such techniques.)  Sizes of free chunks are stored both
393
    in the front of each chunk and at the end.  This makes
394
    consolidating fragmented chunks into bigger chunks very fast.  The
395
    size fields also hold bits representing whether chunks are free or
396
    in use.
397
 
398
    An allocated chunk looks like this:
399
 
400
 
401
    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
402
            |             Size of previous chunk, if allocated            | |
403
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
404
            |             Size of chunk, in bytes                         |P|
405
      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
406
            |             User data starts here...                          .
407
            .                                                               .
408
            .             (malloc_usable_space() bytes)                     .
409
            .                                                               |
410
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
411
            |             Size of chunk                                     |
412
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
413
 
414
 
415
    Where "chunk" is the front of the chunk for the purpose of most of
416
    the malloc code, but "mem" is the pointer that is returned to the
417
    user.  "Nextchunk" is the beginning of the next contiguous chunk.
418
 
419
    Chunks always begin on even word boundries, so the mem portion
420
    (which is returned to the user) is also on an even word boundary, and
421
    thus double-word aligned.
422
 
423
    Free chunks are stored in circular doubly-linked lists, and look like this:
424
 
425
    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
426
            |             Size of previous chunk                            |
427
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
428
    `head:' |             Size of chunk, in bytes                         |P|
429
      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
430
            |             Forward pointer to next chunk in list             |
431
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
432
            |             Back pointer to previous chunk in list            |
433
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
434
            |             Unused space (may be 0 bytes long)                .
435
            .                                                               .
436
            .                                                               |
437
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
438
    `foot:' |             Size of chunk, in bytes                           |
439
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
440
 
441
    The P (PREV_INUSE) bit, stored in the unused low-order bit of the
442
    chunk size (which is always a multiple of two words), is an in-use
443
    bit for the *previous* chunk.  If that bit is *clear*, then the
444
    word before the current chunk size contains the previous chunk
445
    size, and can be used to find the front of the previous chunk.
446
    (The very first chunk allocated always has this bit set,
447
    preventing access to non-existent (or non-owned) memory.)
448
 
449
    Note that the `foot' of the current chunk is actually represented
450
    as the prev_size of the NEXT chunk. (This makes it easier to
451
    deal with alignments etc).
452
 
453
    The exception to all this is the special chunk `top', which doesn't
454
    bother using the trailing size field since there is no next
455
    contiguous chunk that would have to index off it. (After
456
    initialization, `top' is forced to always exist. )
457
 
458
    Available chunks are kept in any of several places (all declared below):
459
 
460
    * `av': An array of chunks serving as bin headers for consolidated
461
       chunks. Each bin is doubly linked.  The bins are approximately
462
       proportionally (log) spaced.  There are a lot of these bins
463
       (128). This may look excessive, but works very well in
464
       practice.  All procedures maintain the invariant that no
465
       consolidated chunk physically borders another one. Chunks in
466
       bins are kept in size order, with ties going to the
467
       approximately least recently used chunk.
468
 
469
       The chunks in each bin are maintained in decreasing sorted order by
470
       size.  This is irrelevant for the small bins, which all contain
471
       the same-sized chunks, but facilitates best-fit allocation for
472
       larger chunks. (These lists are just sequential. Keeping them in
473
       order almost never requires enough traversal to warrant using
474
       fancier ordered data structures.)  Chunks of the same size are
475
       linked with the most recently freed at the front, and allocations
476
       are taken from the back.  This results in LRU or FIFO allocation
477
       order, which tends to give each chunk an equal opportunity to be
478
       consolidated with adjacent freed chunks, resulting in larger free
479
       chunks and less fragmentation.
480
 
481
    * `top': The top-most available chunk (i.e., the one bordering the
482
       end of available memory) is treated specially. It is never
483
       included in any bin, is used only if no other chunk is
484
       available.
485
 
486
    * `last_remainder': A bin holding only the remainder of the
487
       most recently split (non-top) chunk. This bin is checked
488
       before other non-fitting chunks, so as to provide better
489
       locality for runs of sequentially allocated chunks.
490
 
491
*/
492
 
493
typedef struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk* mchunkptr;
494
 
495
/*  sizes, alignments */
496
 
497
#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
498
#ifndef MALLOC_ALIGNMENT
499
#define MALLOC_ALIGN           8
500
#define MALLOC_ALIGNMENT       (SIZE_SZ + SIZE_SZ)
501
#else
502
#define MALLOC_ALIGN           MALLOC_ALIGNMENT
503
#endif
504
#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
505
#define MINSIZE                \
506
             (sizeof(struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk))
507
 
508
/* conversion from malloc headers to user pointers, and back */
509
 
510
#define chunk2mem(p)   ((cyg_uint8*)((char*)(p) + 2*SIZE_SZ))
511
#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
512
 
513
/* pad request bytes into a usable size */
514
 
515
#define request2size(req) \
516
 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
517
  (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)) : \
518
   (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
519
 
520
/* Check if m has acceptable alignment */
521
 
522
#define aligned_OK(m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
523
 
524
 
525
/*
526
  Physical chunk operations
527
*/
528
 
529
 
530
/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
531
 
532
#define PREV_INUSE 0x1 
533
 
534
/* Bits to mask off when extracting size */
535
 
536
#define SIZE_BITS (PREV_INUSE)
537
 
538
 
539
/* Ptr to next physical malloc_chunk. */
540
 
541
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
542
 
543
/* Ptr to previous physical malloc_chunk */
544
 
545
#define prev_chunk(p)\
546
   ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
547
 
548
 
549
/* Treat space at ptr + offset as a chunk */
550
 
551
#define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
552
 
553
/*
554
  Dealing with use bits
555
*/
556
 
557
/* extract p's inuse bit */
558
 
559
#define inuse(p)\
560
((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
561
 
562
/* extract inuse bit of previous chunk */
563
 
564
#define prev_inuse(p)  ((p)->size & PREV_INUSE)
565
 
566
/* set/clear chunk as in use without otherwise disturbing */
567
 
568
#define set_inuse(p)\
569
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
570
 
571
#define clear_inuse(p)\
572
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
573
 
574
/* check/set/clear inuse bits in known places */
575
 
576
#define inuse_bit_at_offset(p, s)\
577
 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
578
 
579
#define set_inuse_bit_at_offset(p, s)\
580
 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
581
 
582
#define clear_inuse_bit_at_offset(p, s)\
583
 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
584
 
585
 
586
/*
587
  Dealing with size fields
588
*/
589
 
590
/* Get size, ignoring use bits */
591
 
592
#define chunksize(p)          ((p)->size & ~(SIZE_BITS))
593
 
594
/* Set size at head, without disturbing its use bit */
595
 
596
#define set_head_size(p, s)   ((p)->size = (((p)->size & PREV_INUSE) | (s)))
597
 
598
/* Set size/use ignoring previous bits in header */
599
 
600
#define set_head(p, s)        ((p)->size = (s))
601
 
602
/* Set size at footer (only when chunk is not in use) */
603
 
604
#define set_foot(p, s)   (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
605
 
606
 
607
//----------------------------------------------------------------------------
608
 
609
/*
610
   Bins
611
 
612
    The bins, `av_' are an array of pairs of pointers serving as the
613
    heads of (initially empty) doubly-linked lists of chunks, laid out
614
    in a way so that each pair can be treated as if it were in a
615
    malloc_chunk. (This way, the fd/bk offsets for linking bin heads
616
    and chunks are the same).
617
 
618
    Bins for sizes < 512 bytes contain chunks of all the same size, spaced
619
    8 bytes apart. Larger bins are approximately logarithmically
620
    spaced. (See the table below.) The `av_' array is never mentioned
621
    directly in the code, but instead via bin access macros.
622
 
623
    Bin layout:
624
 
625
    64 bins of size       8
626
    32 bins of size      64
627
    16 bins of size     512
628
     8 bins of size    4096
629
     4 bins of size   32768
630
     2 bins of size  262144
631
     1 bin  of size what's left
632
 
633
    There is actually a little bit of slop in the numbers in bin_index
634
    for the sake of speed. This makes no difference elsewhere.
635
 
636
    The special chunks `top' and `last_remainder' get their own bins,
637
    (this is implemented via yet more trickery with the av_ array),
638
    although `top' is never properly linked to its bin since it is
639
    always handled specially.
640
 
641
*/
642
 
643
typedef struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk* mbinptr;
644
 
645
/* access macros */
646
 
647
#define bin_at(i)      ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
648
#define next_bin(b)    ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
649
#define prev_bin(b)    ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
650
 
651
/*
652
   The first 2 bins are never indexed. The corresponding av_ cells are instead
653
   used for bookkeeping. This is not to save space, but to simplify
654
   indexing, maintain locality, and avoid some initialization tests.
655
*/
656
 
657
#define top            (bin_at(0)->fd)   /* The topmost chunk */
658
#define last_remainder (bin_at(1))       /* remainder from last split */
659
 
660
 
661
/* Helper macro to initialize bins */
662
 
663
#define IAV(i)  bin_at(i), bin_at(i)
664
 
665
#ifndef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE
666
static mbinptr av_[CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV * 2 + 2] = {
667
 0, 0,
668
 IAV(0),   IAV(1),   IAV(2),   IAV(3),   IAV(4),   IAV(5),   IAV(6),   IAV(7),
669
 IAV(8),   IAV(9),   IAV(10),  IAV(11),  IAV(12),  IAV(13),  IAV(14),  IAV(15),
670
 IAV(16),  IAV(17),  IAV(18),  IAV(19),  IAV(20),  IAV(21),  IAV(22),  IAV(23),
671
 IAV(24),  IAV(25),  IAV(26),  IAV(27),  IAV(28),  IAV(29),  IAV(30),  IAV(31),
672
 IAV(32),  IAV(33),  IAV(34),  IAV(35),  IAV(36),  IAV(37),  IAV(38),  IAV(39),
673
 IAV(40),  IAV(41),  IAV(42),  IAV(43),  IAV(44),  IAV(45),  IAV(46),  IAV(47),
674
 IAV(48),  IAV(49),  IAV(50),  IAV(51),  IAV(52),  IAV(53),  IAV(54),  IAV(55),
675
 IAV(56),  IAV(57),  IAV(58),  IAV(59),  IAV(60),  IAV(61),  IAV(62),  IAV(63),
676
 IAV(64),  IAV(65),  IAV(66),  IAV(67),  IAV(68),  IAV(69),  IAV(70),  IAV(71),
677
 IAV(72),  IAV(73),  IAV(74),  IAV(75),  IAV(76),  IAV(77),  IAV(78),  IAV(79),
678
 IAV(80),  IAV(81),  IAV(82),  IAV(83),  IAV(84),  IAV(85),  IAV(86),  IAV(87),
679
 IAV(88),  IAV(89),  IAV(90),  IAV(91),  IAV(92),  IAV(93),  IAV(94),  IAV(95),
680
 IAV(96),  IAV(97),  IAV(98),  IAV(99),  IAV(100), IAV(101), IAV(102), IAV(103),
681
 IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
682
 IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
683
 IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
684
};
685
#endif
686
 
687
/* field-extraction macros */
688
 
689
#define first(b) ((b)->fd)
690
#define last(b)  ((b)->bk)
691
 
692
/*
693
  Indexing into bins
694
*/
695
 
696
#define bin_index(sz)                                                          \
697
(((((unsigned long)(sz)) >> 9) ==    0) ?       (((unsigned long)(sz)) >>  3): \
698
 ((((unsigned long)(sz)) >> 9) <=    4) ?  56 + (((unsigned long)(sz)) >>  6): \
699
 ((((unsigned long)(sz)) >> 9) <=   20) ?  91 + (((unsigned long)(sz)) >>  9): \
700
 ((((unsigned long)(sz)) >> 9) <=   84) ? 110 + (((unsigned long)(sz)) >> 12): \
701
 ((((unsigned long)(sz)) >> 9) <=  340) ? 119 + (((unsigned long)(sz)) >> 15): \
702
 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
703
                                          126)
704
/*
705
  bins for chunks < 512 are all spaced SMALLBIN_WIDTH bytes apart, and hold
706
  identically sized chunks. This is exploited in malloc.
707
*/
708
 
709
#define MAX_SMALLBIN_SIZE   512
710
#define SMALLBIN_WIDTH        8
711
#define SMALLBIN_WIDTH_BITS   3
712
#define MAX_SMALLBIN        (MAX_SMALLBIN_SIZE / SMALLBIN_WIDTH) - 1
713
 
714
#define smallbin_index(sz)  (((unsigned long)(sz)) >> SMALLBIN_WIDTH_BITS)
715
 
716
/*
717
   Requests are `small' if both the corresponding and the next bin are small
718
*/
719
 
720
#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
721
 
722
/*
723
    To help compensate for the large number of bins, a one-level index
724
    structure is used for bin-by-bin searching.  `binblocks' is a
725
    one-word bitvector recording whether groups of BINBLOCKWIDTH bins
726
    have any (possibly) non-empty bins, so they can be skipped over
727
    all at once during during traversals. The bits are NOT always
728
    cleared as soon as all bins in a block are empty, but instead only
729
    when all are noticed to be empty during traversal in malloc.
730
*/
731
 
732
#define BINBLOCKWIDTH     4   /* bins per block */
733
 
734
#define binblocks      (bin_at(0)->size) /* bitvector of nonempty blocks */
735
 
736
/* bin<->block macros */
737
 
738
#define idx2binblock(ix)    ((unsigned long)1 << (ix / BINBLOCKWIDTH))
739
#define mark_binblock(ii)   (binblocks |= idx2binblock(ii))
740
#define clear_binblock(ii)  (binblocks &= ~(idx2binblock(ii)))
741
 
742
 
743
//----------------------------------------------------------------------------
744
 
745
/*
746
  Debugging support
747
*/
748
 
749
#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
750
 
751
/*
752
  These routines make a number of assertions about the states
753
  of data structures that should be true at all times. If any
754
  are not true, it's very likely that a user program has somehow
755
  trashed memory. (It's also possible that there is a coding error
756
  in malloc. In which case, please report it!)
757
*/
758
 
759
void
760
Cyg_Mempool_dlmalloc_Implementation::do_check_chunk( mchunkptr p )
761
{
762
  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
763
 
764
  /* Check for legal address ... */
765
  ASSERT((cyg_uint8 *)p >= arenabase);
766
  if (p != top)
767
    ASSERT((cyg_uint8 *)p + sz <= (cyg_uint8 *)top);
768
  else
769
    ASSERT((cyg_uint8 *)p + sz <= arenabase + arenasize);
770
 
771
} // Cyg_Mempool_dlmalloc_Implementation::do_check_chunk()
772
 
773
 
774
void
775
Cyg_Mempool_dlmalloc_Implementation::do_check_free_chunk(mchunkptr p)
776
{
777
  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
778
  mchunkptr next = chunk_at_offset(p, sz);
779
 
780
  do_check_chunk(p);
781
 
782
  /* Check whether it claims to be free ... */
783
  ASSERT(!inuse(p));
784
 
785
  /* Unless a special marker, must have OK fields */
786
  if ((long)sz >= (long)MINSIZE)
787
  {
788
    ASSERT((sz & MALLOC_ALIGN_MASK) == 0);
789
    ASSERT(aligned_OK(chunk2mem(p)));
790
    /* ... matching footer field */
791
    ASSERT(next->prev_size == sz);
792
    /* ... and is fully consolidated */
793
    ASSERT(prev_inuse(p));
794
    ASSERT (next == top || inuse(next));
795
 
796
    /* ... and has minimally sane links */
797
    ASSERT(p->fd->bk == p);
798
    ASSERT(p->bk->fd == p);
799
  }
800
  else /* markers are always of size SIZE_SZ */
801
    ASSERT(sz == SIZE_SZ);
802
} // Cyg_Mempool_dlmalloc_Implementation::do_check_free_chunk()
803
 
804
void
805
Cyg_Mempool_dlmalloc_Implementation::do_check_inuse_chunk(mchunkptr p)
806
{
807
  mchunkptr next = next_chunk(p);
808
  do_check_chunk(p);
809
 
810
  /* Check whether it claims to be in use ... */
811
  ASSERT(inuse(p));
812
 
813
  /* ... and is surrounded by OK chunks.
814
    Since more things can be checked with free chunks than inuse ones,
815
    if an inuse chunk borders them and debug is on, it's worth doing them.
816
  */
817
  if (!prev_inuse(p))
818
  {
819
    mchunkptr prv = prev_chunk(p);
820
    ASSERT(next_chunk(prv) == p);
821
    do_check_free_chunk(prv);
822
  }
823
  if (next == top)
824
  {
825
    ASSERT(prev_inuse(next));
826
    ASSERT(chunksize(next) >= MINSIZE);
827
  }
828
  else if (!inuse(next))
829
    do_check_free_chunk(next);
830
 
831
} // Cyg_Mempool_dlmalloc_Implementation::do_check_inuse_chunk(
832
 
833
void
834
Cyg_Mempool_dlmalloc_Implementation::do_check_malloced_chunk(mchunkptr p,
835
                                              INTERNAL_SIZE_T s)
836
{
837
  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
838
  long room = long_sub_size_t(sz, s);
839
 
840
  do_check_inuse_chunk(p);
841
 
842
  /* Legal size ... */
843
  ASSERT((long)sz >= (long)MINSIZE);
844
  ASSERT((sz & MALLOC_ALIGN_MASK) == 0);
845
  ASSERT(room >= 0);
846
  ASSERT(room < (long)MINSIZE);
847
 
848
  /* ... and alignment */
849
  ASSERT(aligned_OK(chunk2mem(p)));
850
 
851
 
852
  /* ... and was allocated at front of an available chunk */
853
  ASSERT(prev_inuse(p));
854
 
855
} // Cyg_Mempool_dlmalloc_Implementation::do_check_malloced_chunk(
856
 
857
 
858
#define check_free_chunk(P)  do_check_free_chunk(P)
859
#define check_inuse_chunk(P) do_check_inuse_chunk(P)
860
#define check_chunk(P) do_check_chunk(P)
861
#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
862
#else
863
#define check_free_chunk(P) 
864
#define check_inuse_chunk(P)
865
#define check_chunk(P)
866
#define check_malloced_chunk(P,N)
867
#endif
868
 
869
 
870
//----------------------------------------------------------------------------
871
 
872
/*
873
  Macro-based internal utilities
874
*/
875
 
876
 
877
/*
878
  Linking chunks in bin lists.
879
  Call these only with variables, not arbitrary expressions, as arguments.
880
*/
881
 
882
/*
883
  Place chunk p of size s in its bin, in size order,
884
  putting it ahead of others of same size.
885
*/
886
 
887
 
888
#define frontlink(P, S, IDX, BK, FD)                                          \
889
{                                                                             \
890
  if (S < MAX_SMALLBIN_SIZE)                                                  \
891
  {                                                                           \
892
    IDX = smallbin_index(S);                                                  \
893
    mark_binblock(IDX);                                                       \
894
    BK = bin_at(IDX);                                                         \
895
    FD = BK->fd;                                                              \
896
    P->bk = BK;                                                               \
897
    P->fd = FD;                                                               \
898
    FD->bk = BK->fd = P;                                                      \
899
  }                                                                           \
900
  else                                                                        \
901
  {                                                                           \
902
    IDX = bin_index(S);                                                       \
903
    BK = bin_at(IDX);                                                         \
904
    FD = BK->fd;                                                              \
905
    if (FD == BK) mark_binblock(IDX);                                         \
906
    else                                                                      \
907
    {                                                                         \
908
      while (FD != BK && S < chunksize(FD)) FD = FD->fd;                      \
909
      BK = FD->bk;                                                            \
910
    }                                                                         \
911
    P->bk = BK;                                                               \
912
    P->fd = FD;                                                               \
913
    FD->bk = BK->fd = P;                                                      \
914
  }                                                                           \
915
}
916
 
917
 
918
/* take a chunk off a list */
919
 
920
#define unlink(P, BK, FD)                                                     \
921
{                                                                             \
922
  BK = P->bk;                                                                 \
923
  FD = P->fd;                                                                 \
924
  FD->bk = BK;                                                                \
925
  BK->fd = FD;                                                                \
926
}                                                                             \
927
 
928
/* Place p as the last remainder */
929
 
930
#define link_last_remainder(P)                                                \
931
{                                                                             \
932
  last_remainder->fd = last_remainder->bk =  P;                               \
933
  P->fd = P->bk = last_remainder;                                             \
934
}
935
 
936
/* Clear the last_remainder bin */
937
 
938
#define clear_last_remainder \
939
  (last_remainder->fd = last_remainder->bk = last_remainder)
940
 
941
 
942
//----------------------------------------------------------------------------
943
 
944
Cyg_Mempool_dlmalloc_Implementation::Cyg_Mempool_dlmalloc_Implementation(
945
                                            cyg_uint8 *base, cyg_int32 size,
946
                                            CYG_ADDRWORD /* argthru */ )
947
{
948
    arenabase = base;
949
    arenasize = size;
950
 
951
    CYG_ADDRESS front_misalign;
952
    cyg_int32 correction;
953
 
954
#ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE
955
    cyg_ucount16 i;
956
    av_[0] = av_[1] = 0;
957
    for (i=0; i < CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV; i++) {
958
        av_[ i*2+2 ] = av_[ i*2+3 ] = bin_at(i);
959
    } // for
960
 
961
#elif defined(CYGDBG_USE_ASSERTS)
962
    static int instances;
963
    if ( ++instances > 1 )
964
        CYG_FAIL( "Multiple dlmalloc instances but "
965
                  "CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE "
966
                  "not defined" );
967
#endif
968
 
969
    front_misalign = (CYG_ADDRESS)chunk2mem(base) & MALLOC_ALIGN_MASK;
970
 
971
    if ( front_misalign > 0 ) {
972
        correction = (MALLOC_ALIGNMENT) - front_misalign;
973
    } else {
974
        correction = 0;
975
    }
976
 
977
    // too small to be useful?
978
    if ( correction + 2*MALLOC_ALIGNMENT > (unsigned) size )
979
        // help catch errors. Don't fail now.
980
        arenabase = NULL;
981
    else {
982
        top = (mchunkptr)(base + correction);
983
        set_head(top, arenasize | PREV_INUSE);
984
    }
985
}
986
 
987
//----------------------------------------------------------------------------
988
 
989
/* Main public routines */
990
 
991
/*
992
  Malloc Algorithm:
993
 
994
    The requested size is first converted into a usable form, `nb'.
995
    This currently means to add 4 bytes overhead plus possibly more to
996
    obtain 8-byte alignment and/or to obtain a size of at least
997
    MINSIZE (currently 16 bytes), the smallest allocatable size.
998
    (All fits are considered `exact' if they are within MINSIZE bytes.)
999
 
1000
    From there, the first successful of the following steps is taken:
1001
 
1002
      1. The bin corresponding to the request size is scanned, and if
1003
         a chunk of exactly the right size is found, it is taken.
1004
 
1005
      2. The most recently remaindered chunk is used if it is big
1006
         enough.  This is a form of (roving) first fit, used only in
1007
         the absence of exact fits. Runs of consecutive requests use
1008
         the remainder of the chunk used for the previous such request
1009
         whenever possible. This limited use of a first-fit style
1010
         allocation strategy tends to give contiguous chunks
1011
         coextensive lifetimes, which improves locality and can reduce
1012
         fragmentation in the long run.
1013
 
1014
      3. Other bins are scanned in increasing size order, using a
1015
         chunk big enough to fulfill the request, and splitting off
1016
         any remainder.  This search is strictly by best-fit; i.e.,
1017
         the smallest (with ties going to approximately the least
1018
         recently used) chunk that fits is selected.
1019
 
1020
      4. If large enough, the chunk bordering the end of memory
1021
         (`top') is split off. (This use of `top' is in accord with
1022
         the best-fit search rule.  In effect, `top' is treated as
1023
         larger (and thus less well fitting) than any other available
1024
         chunk since it can be extended to be as large as necessary
1025
         (up to system limitations).
1026
 
1027
      All allocations are made from the the `lowest' part of any found
1028
      chunk. (The implementation invariant is that prev_inuse is
1029
      always true of any allocated chunk; i.e., that each allocated
1030
      chunk borders either a previously allocated and still in-use chunk,
1031
      or the base of its memory arena.)
1032
 
1033
*/
1034
 
1035
cyg_uint8 *
1036
Cyg_Mempool_dlmalloc_Implementation::try_alloc( cyg_int32 bytes )
1037
{
1038
  mchunkptr victim;                  /* inspected/selected chunk */
1039
  INTERNAL_SIZE_T victim_size;   /* its size */
1040
  int       idx;                     /* index for bin traversal */
1041
  mbinptr   bin;                     /* associated bin */
1042
  mchunkptr remainder;               /* remainder from a split */
1043
  long      remainder_size;          /* its size */
1044
  int       remainder_index;         /* its bin index */
1045
  unsigned long block;               /* block traverser bit */
1046
  int       startidx;                /* first bin of a traversed block */
1047
  mchunkptr fwd;                     /* misc temp for linking */
1048
  mchunkptr bck;                     /* misc temp for linking */
1049
  mbinptr q;                         /* misc temp */
1050
 
1051
  INTERNAL_SIZE_T nb;
1052
 
1053
  /*  Allow uninitialised (zero sized) heaps because they could exist as a
1054
   *  quirk of the MLT setup where a dynamically sized heap is at the top of
1055
   *  memory. */
1056
  if (NULL==arenabase) return NULL;
1057
 
1058
  if ((long)bytes < 0) return 0;
1059
 
1060
  nb = request2size(bytes);  /* padded request size; */
1061
 
1062
  MALLOC_LOCK;
1063
 
1064
  /* Check for exact match in a bin */
1065
 
1066
  if (is_small_request(nb))  /* Faster version for small requests */
1067
  {
1068
    idx = smallbin_index(nb);
1069
 
1070
    /* No traversal or size check necessary for small bins.  */
1071
 
1072
    q = bin_at(idx);
1073
    victim = last(q);
1074
 
1075
#if MALLOC_ALIGN != 16
1076
    /* Also scan the next one, since it would have a remainder < MINSIZE */
1077
    if (victim == q)
1078
    {
1079
      q = next_bin(q);
1080
      victim = last(q);
1081
    }
1082
#endif
1083
    if (victim != q)
1084
    {
1085
      victim_size = chunksize(victim);
1086
      unlink(victim, bck, fwd);
1087
      set_inuse_bit_at_offset(victim, victim_size);
1088
      check_malloced_chunk(victim, nb);
1089
      MALLOC_UNLOCK;
1090
      return chunk2mem(victim);
1091
    }
1092
 
1093
    idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
1094
 
1095
  }
1096
  else
1097
  {
1098
    idx = bin_index(nb);
1099
    bin = bin_at(idx);
1100
 
1101
    for (victim = last(bin); victim != bin; victim = victim->bk)
1102
    {
1103
      victim_size = chunksize(victim);
1104
      remainder_size = long_sub_size_t(victim_size, nb);
1105
 
1106
      if (remainder_size >= (long)MINSIZE) /* too big */
1107
      {
1108
        --idx; /* adjust to rescan below after checking last remainder */
1109
        break;
1110
      }
1111
 
1112
      else if (remainder_size >= 0) /* exact fit */
1113
      {
1114
        unlink(victim, bck, fwd);
1115
        set_inuse_bit_at_offset(victim, victim_size);
1116
        check_malloced_chunk(victim, nb);
1117
        MALLOC_UNLOCK;
1118
        return chunk2mem(victim);
1119
      }
1120
    }
1121
 
1122
    ++idx;
1123
 
1124
  }
1125
 
1126
  /* Try to use the last split-off remainder */
1127
 
1128
  if ( (victim = last_remainder->fd) != last_remainder)
1129
  {
1130
    victim_size = chunksize(victim);
1131
    remainder_size = long_sub_size_t(victim_size, nb);
1132
 
1133
    if (remainder_size >= (long)MINSIZE) /* re-split */
1134
    {
1135
      remainder = chunk_at_offset(victim, nb);
1136
      set_head(victim, nb | PREV_INUSE);
1137
      link_last_remainder(remainder);
1138
      set_head(remainder, remainder_size | PREV_INUSE);
1139
      set_foot(remainder, remainder_size);
1140
      check_malloced_chunk(victim, nb);
1141
      MALLOC_UNLOCK;
1142
      return chunk2mem(victim);
1143
    }
1144
 
1145
    clear_last_remainder;
1146
 
1147
    if (remainder_size >= 0)  /* exhaust */
1148
    {
1149
      set_inuse_bit_at_offset(victim, victim_size);
1150
      check_malloced_chunk(victim, nb);
1151
      MALLOC_UNLOCK;
1152
      return chunk2mem(victim);
1153
    }
1154
 
1155
    /* Else place in bin */
1156
 
1157
    frontlink(victim, victim_size, remainder_index, bck, fwd);
1158
  }
1159
 
1160
  /*
1161
     If there are any possibly nonempty big-enough blocks,
1162
     search for best fitting chunk by scanning bins in blockwidth units.
1163
  */
1164
 
1165
  if ( (block = idx2binblock(idx)) <= binblocks)
1166
  {
1167
 
1168
    /* Get to the first marked block */
1169
 
1170
    if ( (block & binblocks) == 0)
1171
    {
1172
      /* force to an even block boundary */
1173
      idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
1174
      block <<= 1;
1175
      while ((block & binblocks) == 0)
1176
      {
1177
        idx += BINBLOCKWIDTH;
1178
        block <<= 1;
1179
      }
1180
    }
1181
 
1182
    /* For each possibly nonempty block ... */
1183
    for (;;)
1184
    {
1185
      startidx = idx;          /* (track incomplete blocks) */
1186
      q = bin = bin_at(idx);
1187
 
1188
      /* For each bin in this block ... */
1189
      do
1190
      {
1191
        /* Find and use first big enough chunk ... */
1192
 
1193
        for (victim = last(bin); victim != bin; victim = victim->bk)
1194
        {
1195
          victim_size = chunksize(victim);
1196
          remainder_size = long_sub_size_t(victim_size, nb);
1197
 
1198
          if (remainder_size >= (long)MINSIZE) /* split */
1199
          {
1200
            remainder = chunk_at_offset(victim, nb);
1201
            set_head(victim, nb | PREV_INUSE);
1202
            unlink(victim, bck, fwd);
1203
            link_last_remainder(remainder);
1204
            set_head(remainder, remainder_size | PREV_INUSE);
1205
            set_foot(remainder, remainder_size);
1206
            check_malloced_chunk(victim, nb);
1207
            MALLOC_UNLOCK;
1208
            return chunk2mem(victim);
1209
          }
1210
 
1211
          else if (remainder_size >= 0)  /* take */
1212
          {
1213
            set_inuse_bit_at_offset(victim, victim_size);
1214
            unlink(victim, bck, fwd);
1215
            check_malloced_chunk(victim, nb);
1216
            MALLOC_UNLOCK;
1217
            return chunk2mem(victim);
1218
          }
1219
 
1220
        }
1221
 
1222
       bin = next_bin(bin);
1223
 
1224
#if MALLOC_ALIGN == 16
1225
       if (idx < MAX_SMALLBIN)
1226
         {
1227
           bin = next_bin(bin);
1228
           ++idx;
1229
         }
1230
#endif
1231
      } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
1232
 
1233
      /* Clear out the block bit. */
1234
 
1235
      do   /* Possibly backtrack to try to clear a partial block */
1236
      {
1237
        if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
1238
        {
1239
          binblocks &= ~block;
1240
          break;
1241
        }
1242
        --startidx;
1243
       q = prev_bin(q);
1244
      } while (first(q) == q);
1245
 
1246
      /* Get to the next possibly nonempty block */
1247
 
1248
      if ( (block <<= 1) <= binblocks && (block != 0) )
1249
      {
1250
        while ((block & binblocks) == 0)
1251
        {
1252
          idx += BINBLOCKWIDTH;
1253
          block <<= 1;
1254
        }
1255
      }
1256
      else
1257
        break;
1258
    }
1259
  }
1260
 
1261
 
1262
  /* Try to use top chunk */
1263
 
1264
  /* Require that there be a remainder, ensuring top always exists  */
1265
  remainder_size = long_sub_size_t(chunksize(top), nb);
1266
  if (chunksize(top) < nb || remainder_size < (long)MINSIZE)
1267
  {
1268
      //diag_printf("chunksize(top)=%ld, nb=%d, remainder=%ld\n", chunksize(top),
1269
      //            nb, remainder_size);
1270
      MALLOC_UNLOCK;
1271
      return NULL; /* propagate failure */
1272
  }
1273
 
1274
  victim = top;
1275
  set_head(victim, nb | PREV_INUSE);
1276
  top = chunk_at_offset(victim, nb);
1277
  set_head(top, remainder_size | PREV_INUSE);
1278
  check_malloced_chunk(victim, nb);
1279
  MALLOC_UNLOCK;
1280
  return chunk2mem(victim);
1281
 
1282
} // Cyg_Mempool_dlmalloc_Implementation::try_alloc()
1283
 
1284
//----------------------------------------------------------------------------
1285
 
1286
/*
1287
  free() algorithm :
1288
 
1289
    cases:
1290
 
1291
       1. free(NULL) has no effect.
1292
 
1293
       2. Chunks are consolidated as they arrive, and
1294
          placed in corresponding bins. (This includes the case of
1295
          consolidating with the current `last_remainder').
1296
*/
1297
 
1298
cyg_bool
1299
Cyg_Mempool_dlmalloc_Implementation::free( cyg_uint8 *mem, cyg_int32 )
1300
{
1301
  mchunkptr p;         /* chunk corresponding to mem */
1302
  INTERNAL_SIZE_T hd;  /* its head field */
1303
  INTERNAL_SIZE_T sz;  /* its size */
1304
  int       idx;       /* its bin index */
1305
  mchunkptr next;      /* next contiguous chunk */
1306
  INTERNAL_SIZE_T nextsz; /* its size */
1307
  INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
1308
  mchunkptr bck;       /* misc temp for linking */
1309
  mchunkptr fwd;       /* misc temp for linking */
1310
  int       islr;      /* track whether merging with last_remainder */
1311
 
1312
  if (mem == NULL)                              /* free(NULL) has no effect */
1313
    return false;
1314
 
1315
  MALLOC_LOCK;
1316
 
1317
  p = mem2chunk(mem);
1318
  hd = p->size;
1319
 
1320
  check_inuse_chunk(p);
1321
 
1322
  sz = hd & ~PREV_INUSE;
1323
  next = chunk_at_offset(p, sz);
1324
  nextsz = chunksize(next);
1325
 
1326
  if (next == top)                            /* merge with top */
1327
  {
1328
    sz += nextsz;
1329
 
1330
    if (!(hd & PREV_INUSE))                    /* consolidate backward */
1331
    {
1332
      prevsz = p->prev_size;
1333
      p = chunk_at_offset(p, -((long) prevsz));
1334
      sz += prevsz;
1335
      unlink(p, bck, fwd);
1336
    }
1337
 
1338
    set_head(p, sz | PREV_INUSE);
1339
    top = p;
1340
    MALLOC_UNLOCK;
1341
    return true;
1342
  }
1343
 
1344
  set_head(next, nextsz);                    /* clear inuse bit */
1345
 
1346
  islr = 0;
1347
 
1348
  if (!(hd & PREV_INUSE))                    /* consolidate backward */
1349
  {
1350
    prevsz = p->prev_size;
1351
    p = chunk_at_offset(p, -((long) prevsz));
1352
    sz += prevsz;
1353
 
1354
    if (p->fd == last_remainder)             /* keep as last_remainder */
1355
      islr = 1;
1356
    else
1357
      unlink(p, bck, fwd);
1358
  }
1359
 
1360
  if (!(inuse_bit_at_offset(next, nextsz)))   /* consolidate forward */
1361
  {
1362
    sz += nextsz;
1363
 
1364
    if (!islr && next->fd == last_remainder)  /* re-insert last_remainder */
1365
    {
1366
      islr = 1;
1367
      link_last_remainder(p);
1368
    }
1369
    else
1370
      unlink(next, bck, fwd);
1371
  }
1372
 
1373
 
1374
  set_head(p, sz | PREV_INUSE);
1375
  set_foot(p, sz);
1376
  if (!islr)
1377
    frontlink(p, sz, idx, bck, fwd);
1378
 
1379
  MALLOC_UNLOCK;
1380
 
1381
  return true;
1382
} // Cyg_Mempool_dlmalloc_Implementation::free()
1383
 
1384
//----------------------------------------------------------------------------
1385
 
1386
// resize existing allocation, if oldsize is non-NULL, previous
1387
// allocation size is placed into it. If previous size not available,
1388
// it is set to 0. NB previous allocation size may have been rounded up.
1389
// Occasionally the allocation can be adjusted *backwards* as well as,
1390
// or instead of forwards, therefore the address of the resized
1391
// allocation is returned, or NULL if no resizing was possible.
1392
// Note that this differs from ::realloc() in that no attempt is
1393
// made to call malloc() if resizing is not possible - that is left
1394
// to higher layers. The data is copied from old to new though.
1395
// The effects of alloc_ptr==NULL or newsize==0 are undefined
1396
 
1397
 
1398
// DOCUMENTATION FROM ORIGINAL FILE:
1399
// (some now irrelevant parts elided)
1400
/*
1401
  Realloc algorithm:
1402
 
1403
    If the reallocation is for additional space, and the
1404
    chunk can be extended, it is, else a malloc-copy-free sequence is
1405
    taken.  There are several different ways that a chunk could be
1406
    extended. All are tried:
1407
 
1408
       * Extending forward into following adjacent free chunk.
1409
       * Shifting backwards, joining preceding adjacent space
1410
       * Both shifting backwards and extending forward.
1411
 
1412
    If the reallocation is for less space, and the new request is for
1413
    a `small' (<512 bytes) size, then the newly unused space is lopped
1414
    off and freed.
1415
 
1416
    The old unix realloc convention of allowing the last-free'd chunk
1417
    to be used as an argument to realloc is no longer supported.
1418
    I don't know of any programs still relying on this feature,
1419
    and allowing it would also allow too many other incorrect
1420
    usages of realloc to be sensible.
1421
*/
1422
 
1423
cyg_uint8 *
1424
Cyg_Mempool_dlmalloc_Implementation::resize_alloc( cyg_uint8 *oldmem,
1425
                                                   cyg_int32 bytes,
1426
                                                   cyg_int32 *poldsize )
1427
{
1428
 
1429
  INTERNAL_SIZE_T nb;             /* padded request size */
1430
 
1431
  mchunkptr oldp;                 /* chunk corresponding to oldmem */
1432
  INTERNAL_SIZE_T oldsize;        /* its size */
1433
 
1434
  mchunkptr newp;                 /* chunk to return */
1435
  INTERNAL_SIZE_T newsize;        /* its size */
1436
  cyg_uint8*   newmem;            /* corresponding user mem */
1437
 
1438
  mchunkptr next;                 /* next contiguous chunk after oldp */
1439
  INTERNAL_SIZE_T nextsize;       /* its size */
1440
 
1441
  mchunkptr prev;                 /* previous contiguous chunk before oldp */
1442
  INTERNAL_SIZE_T prevsize;       /* its size */
1443
 
1444
  mchunkptr remainder;            /* holds split off extra space from newp */
1445
  INTERNAL_SIZE_T remainder_size; /* its size */
1446
 
1447
  mchunkptr bck;                  /* misc temp for linking */
1448
  mchunkptr fwd;                  /* misc temp for linking */
1449
 
1450
  MALLOC_LOCK;
1451
 
1452
  newp    = oldp    = mem2chunk(oldmem);
1453
  newsize = oldsize = chunksize(oldp);
1454
 
1455
  if (NULL != poldsize)
1456
      *poldsize = oldsize - SIZE_SZ;
1457
 
1458
  nb = request2size(bytes);
1459
 
1460
  check_inuse_chunk(oldp);
1461
 
1462
  if ((long)(oldsize) < (long)(nb))
1463
  {
1464
 
1465
    /* Try expanding forward */
1466
 
1467
    next = chunk_at_offset(oldp, oldsize);
1468
    if (next == top || !inuse(next))
1469
    {
1470
      nextsize = chunksize(next);
1471
 
1472
      /* Forward into top only if a remainder */
1473
      if (next == top)
1474
      {
1475
        if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
1476
        {
1477
          newsize += nextsize;
1478
          top = chunk_at_offset(oldp, nb);
1479
          set_head(top, (newsize - nb) | PREV_INUSE);
1480
          set_head_size(oldp, nb);
1481
          MALLOC_UNLOCK;
1482
          return chunk2mem(oldp);
1483
        }
1484
      }
1485
 
1486
      /* Forward into next chunk */
1487
      else if (((long)(nextsize + newsize) >= (long)(nb)))
1488
      {
1489
        unlink(next, bck, fwd);
1490
        newsize  += nextsize;
1491
        goto split;
1492
      }
1493
    }
1494
    else
1495
    {
1496
      next = 0;
1497
      nextsize = 0;
1498
    }
1499
 
1500
    /* Try shifting backwards. */
1501
 
1502
    if (!prev_inuse(oldp))
1503
    {
1504
      prev = prev_chunk(oldp);
1505
      prevsize = chunksize(prev);
1506
 
1507
      /* try forward + backward first to save a later consolidation */
1508
 
1509
      if (next != 0)
1510
      {
1511
        /* into top */
1512
        if (next == top)
1513
        {
1514
          if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
1515
          {
1516
            unlink(prev, bck, fwd);
1517
            newp = prev;
1518
            newsize += prevsize + nextsize;
1519
            newmem = chunk2mem(newp);
1520
            MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1521
            top = chunk_at_offset(newp, nb);
1522
            set_head(top, (newsize - nb) | PREV_INUSE);
1523
            set_head_size(newp, nb);
1524
            MALLOC_UNLOCK;
1525
            return newmem;
1526
          }
1527
        }
1528
 
1529
        /* into next chunk */
1530
        else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
1531
        {
1532
          unlink(next, bck, fwd);
1533
          unlink(prev, bck, fwd);
1534
          newp = prev;
1535
          newsize += nextsize + prevsize;
1536
          newmem = chunk2mem(newp);
1537
          MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1538
          goto split;
1539
        }
1540
      }
1541
 
1542
      /* backward only */
1543
      if (prev != 0 && (long)(prevsize + newsize) >= (long)nb)
1544
      {
1545
        unlink(prev, bck, fwd);
1546
        newp = prev;
1547
        newsize += prevsize;
1548
        newmem = chunk2mem(newp);
1549
        MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1550
        goto split;
1551
      }
1552
    }
1553
 
1554
    // couldn't resize the allocation any direction, so return failure
1555
    MALLOC_UNLOCK;
1556
    return NULL;
1557
  }
1558
 
1559
 
1560
 split:  /* split off extra room in old or expanded chunk */
1561
 
1562
  remainder_size = long_sub_size_t(newsize, nb);
1563
 
1564
  if (remainder_size >= (long)MINSIZE) /* split off remainder */
1565
  {
1566
    remainder = chunk_at_offset(newp, nb);
1567
    set_head_size(newp, nb);
1568
    set_head(remainder, remainder_size | PREV_INUSE);
1569
    set_inuse_bit_at_offset(remainder, remainder_size);
1570
    /* let free() deal with it */
1571
    Cyg_Mempool_dlmalloc_Implementation::free( chunk2mem(remainder) );
1572
  }
1573
  else
1574
  {
1575
    set_head_size(newp, newsize);
1576
    set_inuse_bit_at_offset(newp, newsize);
1577
  }
1578
 
1579
  check_inuse_chunk(newp);
1580
  MALLOC_UNLOCK;
1581
  return chunk2mem(newp);
1582
 
1583
} // Cyg_Mempool_dlmalloc_Implementation::resize_alloc()
1584
 
1585
//----------------------------------------------------------------------------
1586
 
1587
// Get memory pool status
1588
// flags is a bitmask of requested fields to fill in. The flags are
1589
// defined in common.hxx
1590
void
1591
Cyg_Mempool_dlmalloc_Implementation::get_status(
1592
                                  cyg_mempool_status_flag_t flags,
1593
                                  Cyg_Mempool_Status &status )
1594
{
1595
    if (0 != (flags&(CYG_MEMPOOL_STAT_FREEBLOCKS|CYG_MEMPOOL_STAT_TOTALFREE|
1596
                     CYG_MEMPOOL_STAT_TOTALALLOCATED|CYG_MEMPOOL_STAT_MAXFREE)))
1597
    {
1598
        int i;
1599
        mbinptr b;
1600
        mchunkptr p;
1601
        cyg_int32 chunksizep;
1602
        cyg_int32 maxfree;
1603
#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
1604
        mchunkptr q;
1605
#endif
1606
 
1607
        INTERNAL_SIZE_T avail = chunksize(top);
1608
        int   navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
1609
        maxfree = avail;
1610
 
1611
        for (i = 1; i < CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV; ++i) {
1612
            b = bin_at(i);
1613
            for (p = last(b); p != b; p = p->bk) {
1614
#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
1615
                check_free_chunk(p);
1616
                for (q = next_chunk(p);
1617
                     (q < top) && inuse(q) &&
1618
                         (long)(chunksize(q)) >= (long)MINSIZE;
1619
                     q = next_chunk(q))
1620
                    check_inuse_chunk(q);
1621
#endif
1622
                chunksizep = chunksize(p);
1623
                avail += chunksizep;
1624
                if ( chunksizep > maxfree )
1625
                    maxfree = chunksizep;
1626
                navail++;
1627
            }
1628
        }
1629
 
1630
        if ( 0 != (flags & CYG_MEMPOOL_STAT_TOTALALLOCATED) )
1631
            status.totalallocated = arenasize - avail;
1632
        // as quick or quicker to just set most of these, rather than
1633
        // test flag first
1634
        status.totalfree = (avail & ~(MALLOC_ALIGN_MASK)) - SIZE_SZ - MINSIZE;
1635
        CYG_ASSERT( ((avail + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1636
                    >= MINSIZE, "free mem negative!" );
1637
        status.freeblocks = navail;
1638
        status.maxfree = (maxfree & ~(MALLOC_ALIGN_MASK)) - SIZE_SZ - MINSIZE;
1639
        //diag_printf("raw mf: %d, ret mf: %d\n", maxfree, status.maxfree);
1640
        CYG_ASSERT( ((maxfree + SIZE_SZ + MALLOC_ALIGN_MASK) &
1641
                    ~MALLOC_ALIGN_MASK) >= MINSIZE,
1642
                    "max free block size negative!" );
1643
    } // if
1644
 
1645
    // as quick or quicker to just set most of these, rather than
1646
    // test flag first
1647
    status.arenabase = status.origbase = arenabase;
1648
    status.arenasize = status.origsize = arenasize;
1649
    status.maxoverhead = MINSIZE + MALLOC_ALIGNMENT;
1650
 
1651
} // Cyg_Mempool_dlmalloc_Implementation::get_status()
1652
 
1653
 
1654
//----------------------------------------------------------------------------
1655
 
1656
// EOF dlmalloc.cxx

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.