OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [rtos/] [ecos-3.0/] [packages/] [services/] [memalloc/] [common/] [current/] [src/] [dlmalloc.cxx] - Blame information for rev 819

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 786 skrzyp
//==========================================================================
2
//
3
//      dlmalloc.cxx
4
//
5
//      Port of Doug Lea's malloc implementation
6
//
7
//==========================================================================
8
// ####ECOSGPLCOPYRIGHTBEGIN####                                            
9
// -------------------------------------------                              
10
// This file is part of eCos, the Embedded Configurable Operating System.   
11
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
12
//
13
// eCos is free software; you can redistribute it and/or modify it under    
14
// the terms of the GNU General Public License as published by the Free     
15
// Software Foundation; either version 2 or (at your option) any later      
16
// version.                                                                 
17
//
18
// eCos is distributed in the hope that it will be useful, but WITHOUT      
19
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or    
20
// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License    
21
// for more details.                                                        
22
//
23
// You should have received a copy of the GNU General Public License        
24
// along with eCos; if not, write to the Free Software Foundation, Inc.,    
25
// 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.            
26
//
27
// As a special exception, if other files instantiate templates or use      
28
// macros or inline functions from this file, or you compile this file      
29
// and link it with other works to produce a work based on this file,       
30
// this file does not by itself cause the resulting work to be covered by   
31
// the GNU General Public License. However the source code for this file    
32
// must still be made available in accordance with section (3) of the GNU   
33
// General Public License v2.                                               
34
//
35
// This exception does not invalidate any other reasons why a work based    
36
// on this file might be covered by the GNU General Public License.         
37
// -------------------------------------------                              
38
// ####ECOSGPLCOPYRIGHTEND####                                              
39
//==========================================================================
40
//#####DESCRIPTIONBEGIN####
41
//
42
// Author(s):    Doug Lea (dl at g.oswego.edu), jlarmour
43
// Contributors: 
44
// Date:         2000-06-18
45
// Purpose:      Doug Lea's malloc implementation
46
// Description:  Doug Lea's malloc has been ported to eCos. This file
47
//               provides the implementation in a way acceptable to eCos.
48
//               Substantial amounts of unnecessary bits (to eCos) of the
49
//               original implementation have been removed to make the
50
//               code more tractable. Note this may make a number of the
51
//               comments appear to make little sense, or no longer apply!
52
//               In particular, mmap support is removed entirely.
53
//               Also the memory is "sbrked" all at once at the
54
//               beginning, covering the entire memory region given at
55
//               construction, and there can be no more afterwards.
56
// Usage:        #include <cyg/memalloc/dlmalloc.hxx>
57
//              
58
//
59
//####DESCRIPTIONEND####
60
//
61
//==========================================================================
62
 
63
// DOCUMENTATION FROM ORIGINAL FILE:
64
// (some now irrelevant parts elided)
65
 
66
//----------------------------------------------------------------------------
67
 
68
/*
69
  A version of malloc/free/realloc written by Doug Lea and released to the
70
  public domain.  Send questions/comments/complaints/performance data
71
  to dl at cs.oswego.edu
72
 
73
* VERSION 2.6.6  Sun Mar  5 19:10:03 2000  Doug Lea  (dl at gee)
74
 
75
   Note: There may be an updated version of this malloc obtainable at
76
           ftp://g.oswego.edu/pub/misc/malloc.c
77
         Check before installing!
78
 
79
* Why use this malloc?
80
 
81
  This is not the fastest, most space-conserving, most portable, or
82
  most tunable malloc ever written. However it is among the fastest
83
  while also being among the most space-conserving, portable and tunable.
84
  Consistent balance across these factors results in a good general-purpose
85
  allocator. For a high-level description, see
86
     http://g.oswego.edu/dl/html/malloc.html
87
 
88
* Synopsis of public routines
89
 
90
  (Much fuller descriptions are contained in the program documentation below.)
91
 
92
[ these have of course been renamed in the eCos port ]a
93
 
94
  malloc(size_t n);
95
     Return a pointer to a newly allocated chunk of at least n bytes, or null
96
     if no space is available.
97
  free(Void_t* p);
98
     Release the chunk of memory pointed to by p, or no effect if p is null.
99
  realloc(Void_t* p, size_t n);
100
     Return a pointer to a chunk of size n that contains the same data
101
     as does chunk p up to the minimum of (n, p's size) bytes, or null
102
     if no space is available. The returned pointer may or may not be
103
     the same as p. If p is null, equivalent to malloc. realloc of
104
     zero bytes calls free(p)
105
 
106
* Vital statistics:
107
 
108
  Alignment:                            8-byte
109
       8 byte alignment is currently hardwired into the design.  This
110
       seems to suffice for all current machines and C compilers.
111
 
112
  Assumed pointer representation:       4 or 8 bytes
113
       Code for 8-byte pointers is untested by me but has worked
114
       reliably by Wolfram Gloger, who contributed most of the
115
       changes supporting this.
116
 
117
  Assumed size_t  representation:       4 or 8 bytes
118
       Note that size_t is allowed to be 4 bytes even if pointers are 8.
119
 
120
  Minimum overhead per allocated chunk: 4 or 8 bytes
121
       Each malloced chunk has a hidden overhead of 4 bytes holding size
122
       and status information.
123
 
124
  Minimum allocated size: 4-byte ptrs:  16 bytes    (including 4 overhead)
125
                          8-byte ptrs:  24/32 bytes (including, 4/8 overhead)
126
 
127
       When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
128
       ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
129
       needed; 4 (8) for a trailing size field
130
       and 8 (16) bytes for free list pointers. Thus, the minimum
131
       allocatable size is 16/24/32 bytes.
132
 
133
       Even a request for zero bytes (i.e., malloc(0)) returns a
134
       pointer to something of the minimum allocatable size.
135
 
136
  Maximum allocated size: 4-byte size_t: 2^31 -  8 bytes
137
                          8-byte size_t: 2^63 - 16 bytes
138
 
139
       It is assumed that (possibly signed) size_t bit values suffice to
140
       represent chunk sizes. `Possibly signed' is due to the fact
141
       that `size_t' may be defined on a system as either a signed or
142
       an unsigned type. To be conservative, values that would appear
143
       as negative numbers are avoided.
144
       Requests for sizes with a negative sign bit when the request
145
       size is treaded as a long will return null.
146
 
147
  Maximum overhead wastage per allocated chunk: normally 15 bytes
148
 
149
       Alignnment demands, plus the minimum allocatable size restriction
150
       make the normal worst-case wastage 15 bytes (i.e., up to 15
151
       more bytes will be allocated than were requested in malloc), with
152
       one exception: because requests for zero bytes allocate non-zero space,
153
       the worst case wastage for a request of zero bytes is 24 bytes.
154
 
155
* Limitations
156
 
157
    Here are some features that are NOT currently supported
158
 
159
    * No user-definable hooks for callbacks and the like.
160
    * No automated mechanism for fully checking that all accesses
161
      to malloced memory stay within their bounds.
162
    * No support for compaction.
163
 
164
* Synopsis of compile-time options:
165
 
166
    People have reported using previous versions of this malloc on all
167
    versions of Unix, sometimes by tweaking some of the defines
168
    below. It has been tested most extensively on Solaris and
169
    Linux. It is also reported to work on WIN32 platforms.
170
    People have also reported adapting this malloc for use in
171
    stand-alone embedded systems.
172
 
173
    The implementation is in straight, hand-tuned ANSI C.  Among other
174
    consequences, it uses a lot of macros.  Because of this, to be at
175
    all usable, this code should be compiled using an optimizing compiler
176
    (for example gcc -O2) that can simplify expressions and control
177
    paths.
178
 
179
  CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG      (default: NOT defined)
180
     Define to enable debugging. Adds fairly extensive assertion-based
181
     checking to help track down memory errors, but noticeably slows down
182
     execution.
183
  MALLOC_LOCK              (default: NOT defined)
184
  MALLOC_UNLOCK            (default: NOT defined)
185
     Define these to C expressions which are run to lock and unlock
186
     the malloc data structures.  Calls may be nested; that is,
187
     MALLOC_LOCK may be called more than once before the corresponding
188
     MALLOC_UNLOCK calls.  MALLOC_LOCK must avoid waiting for a lock
189
     that it already holds.
190
  MALLOC_ALIGNMENT          (default: NOT defined)
191
     Define this to 16 if you need 16 byte alignment instead of 8 byte alignment
192
     which is the normal default.
193
  SIZE_T_SMALLER_THAN_LONG (default: NOT defined)
194
     Define this when the platform you are compiling has
195
     sizeof(long) > sizeof(size_t).
196
     The option causes some extra code to be generated to handle operations
197
     that use size_t operands and have long results.
198
  INTERNAL_SIZE_T           (default: size_t)
199
     Define to a 32-bit type (probably `unsigned int') if you are on a
200
     64-bit machine, yet do not want or need to allow malloc requests of
201
     greater than 2^31 to be handled. This saves space, especially for
202
     very small chunks.
203
 
204
*/
205
 
206
//----------------------------------------------------------------------------
207
 
208
 
209
/* Preliminaries */
210
 
211
#include <pkgconf/memalloc.h>          // configuration header
212
#include <pkgconf/infra.h>             // CYGDBG_USE_ASSERTS
213
#include <cyg/infra/cyg_type.h>        // types
214
#include <cyg/infra/cyg_ass.h>         // assertions
215
#include <stddef.h>                    // for size_t
216
#include <cyg/memalloc/dlmalloc.hxx>
217
 
218
/*
219
    Debugging:
220
 
221
    Because freed chunks may be overwritten with link fields, this
222
    malloc will often die when freed memory is overwritten by user
223
    programs.  This can be very effective (albeit in an annoying way)
224
    in helping track down dangling pointers.
225
 
226
    If you compile with CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG enabled, a
227
    number of assertion checks are
228
    enabled that will catch more memory errors. You probably won't be
229
    able to make much sense of the actual assertion errors, but they
230
    should help you locate incorrectly overwritten memory.  The
231
    checking is fairly extensive, and will slow down execution
232
    noticeably. Calling get_status() with DEBUG set will
233
    attempt to check every allocated and free chunk in the
234
    course of computing the summmaries.
235
 
236
    Setting CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG may also be helpful if you
237
    are trying to modify this code. The assertions in the check routines
238
    spell out in more detail the assumptions and invariants underlying
239
    the algorithms.
240
 
241
*/
242
 
243
#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
244
# define ASSERT(x) CYG_ASSERTC( x )
245
#else
246
# define ASSERT(x) ((void)0)
247
#endif
248
 
249
 
250
/*
251
   Define MALLOC_LOCK and MALLOC_UNLOCK to C expressions to run to
252
   lock and unlock the malloc data structures.  MALLOC_LOCK may be
253
   called recursively.
254
 */
255
 
256
#ifndef MALLOC_LOCK
257
#define MALLOC_LOCK
258
#endif
259
 
260
#ifndef MALLOC_UNLOCK
261
#define MALLOC_UNLOCK
262
#endif
263
 
264
/*
265
  INTERNAL_SIZE_T is the word-size used for internal bookkeeping
266
  of chunk sizes. On a 64-bit machine, you can reduce malloc
267
  overhead by defining INTERNAL_SIZE_T to be a 32 bit `unsigned int'
268
  at the expense of not being able to handle requests greater than
269
  2^31. This limitation is hardly ever a concern; you are encouraged
270
  to set this. However, the default version is the same as size_t.
271
*/
272
 
273
#ifndef INTERNAL_SIZE_T
274
#define INTERNAL_SIZE_T Cyg_Mempool_dlmalloc_Implementation::Cyg_dlmalloc_size_t
275
#endif
276
 
277
/*
278
  Following is needed on implementations whereby long > size_t.
279
  The problem is caused because the code performs subtractions of
280
  size_t values and stores the result in long values.  In the case
281
  where long > size_t and the first value is actually less than
282
  the second value, the resultant value is positive.  For example,
283
  (long)(x - y) where x = 0 and y is 1 ends up being 0x00000000FFFFFFFF
284
  which is 2*31 - 1 instead of 0xFFFFFFFFFFFFFFFF.  This is due to the
285
  fact that assignment from unsigned to signed won't sign extend.
286
*/
287
 
288
#ifdef SIZE_T_SMALLER_THAN_LONG
289
#define long_sub_size_t(x, y) ( (x < y) ? -((long)(y - x)) : (x - y) );
290
#else
291
#define long_sub_size_t(x, y) ( (long)(x - y) )
292
#endif
293
 
294
 
295
#ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_USE_MEMCPY
296
 
297
#include <string.h>                    // memmove, memset
298
 
299
/* The following macros are only invoked with (2n+1)-multiples of
300
   INTERNAL_SIZE_T units, with a positive integer n. This is exploited
301
   for fast inline execution when n is small. */
302
 
303
#define MALLOC_ZERO(charp, nbytes)                                            \
304
do {                                                                          \
305
  INTERNAL_SIZE_T mzsz = (nbytes);                                        \
306
  if(mzsz <= 9*sizeof(mzsz)) {                                                \
307
    INTERNAL_SIZE_T* mz = (INTERNAL_SIZE_T*) (charp);                 \
308
    if(mzsz >= 5*sizeof(mzsz)) {     *mz++ = 0;                               \
309
                                     *mz++ = 0;                               \
310
      if(mzsz >= 7*sizeof(mzsz)) {   *mz++ = 0;                               \
311
                                     *mz++ = 0;                               \
312
        if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0;                               \
313
                                     *mz++ = 0; }}}                           \
314
                                     *mz++ = 0;                               \
315
                                     *mz++ = 0;                               \
316
                                     *mz   = 0;                               \
317
  } else memset((charp), 0, mzsz);                                            \
318
} while(0)
319
 
320
#define MALLOC_COPY(dest,src,nbytes)                                          \
321
do {                                                                          \
322
  INTERNAL_SIZE_T mcsz = (nbytes);                                        \
323
  if(mcsz <= 9*sizeof(mcsz)) {                                                \
324
    INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) (src);                \
325
    INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) (dest);               \
326
    if(mcsz >= 5*sizeof(mcsz)) {     *mcdst++ = *mcsrc++;                     \
327
                                     *mcdst++ = *mcsrc++;                     \
328
      if(mcsz >= 7*sizeof(mcsz)) {   *mcdst++ = *mcsrc++;                     \
329
                                     *mcdst++ = *mcsrc++;                     \
330
        if(mcsz >= 9*sizeof(mcsz)) { *mcdst++ = *mcsrc++;                     \
331
                                     *mcdst++ = *mcsrc++; }}}                 \
332
                                     *mcdst++ = *mcsrc++;                     \
333
                                     *mcdst++ = *mcsrc++;                     \
334
                                     *mcdst   = *mcsrc  ;                     \
335
  } else memmove(dest, src, mcsz);                                            \
336
} while(0)
337
 
338
#else /* !CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_USE_MEMCPY */
339
 
340
/* Use Duff's device for good zeroing/copying performance. */
341
 
342
#define MALLOC_ZERO(charp, nbytes)                                            \
343
do {                                                                          \
344
  INTERNAL_SIZE_T* mzp = (INTERNAL_SIZE_T*)(charp);                   \
345
  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                     \
346
  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
347
  switch (mctmp) {                                                            \
348
    case 0: for(;;) { *mzp++ = 0;                                             \
349
    case 7:           *mzp++ = 0;                                             \
350
    case 6:           *mzp++ = 0;                                             \
351
    case 5:           *mzp++ = 0;                                             \
352
    case 4:           *mzp++ = 0;                                             \
353
    case 3:           *mzp++ = 0;                                             \
354
    case 2:           *mzp++ = 0;                                             \
355
    case 1:           *mzp++ = 0; if(mcn <= 0) break; mcn--; }                \
356
  }                                                                           \
357
} while(0)
358
 
359
#define MALLOC_COPY(dest,src,nbytes)                                          \
360
do {                                                                          \
361
  INTERNAL_SIZE_T* mcsrc = (INTERNAL_SIZE_T*) src;                    \
362
  INTERNAL_SIZE_T* mcdst = (INTERNAL_SIZE_T*) dest;                   \
363
  long mctmp = (nbytes)/sizeof(INTERNAL_SIZE_T), mcn;                     \
364
  if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp %= 8; }             \
365
  switch (mctmp) {                                                            \
366
    case 0: for(;;) { *mcdst++ = *mcsrc++;                                    \
367
    case 7:           *mcdst++ = *mcsrc++;                                    \
368
    case 6:           *mcdst++ = *mcsrc++;                                    \
369
    case 5:           *mcdst++ = *mcsrc++;                                    \
370
    case 4:           *mcdst++ = *mcsrc++;                                    \
371
    case 3:           *mcdst++ = *mcsrc++;                                    \
372
    case 2:           *mcdst++ = *mcsrc++;                                    \
373
    case 1:           *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; }       \
374
  }                                                                           \
375
} while(0)
376
 
377
#endif
378
 
379
 
380
//----------------------------------------------------------------------------
381
 
382
/*
383
   malloc_chunk details:
384
 
385
    (The following includes lightly edited explanations by Colin Plumb.)
386
 
387
    Chunks of memory are maintained using a `boundary tag' method as
388
    described in e.g., Knuth or Standish.  (See the paper by Paul
389
    Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
390
    survey of such techniques.)  Sizes of free chunks are stored both
391
    in the front of each chunk and at the end.  This makes
392
    consolidating fragmented chunks into bigger chunks very fast.  The
393
    size fields also hold bits representing whether chunks are free or
394
    in use.
395
 
396
    An allocated chunk looks like this:
397
 
398
 
399
    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
400
            |             Size of previous chunk, if allocated            | |
401
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
402
            |             Size of chunk, in bytes                         |P|
403
      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
404
            |             User data starts here...                          .
405
            .                                                               .
406
            .             (malloc_usable_space() bytes)                     .
407
            .                                                               |
408
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
409
            |             Size of chunk                                     |
410
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
411
 
412
 
413
    Where "chunk" is the front of the chunk for the purpose of most of
414
    the malloc code, but "mem" is the pointer that is returned to the
415
    user.  "Nextchunk" is the beginning of the next contiguous chunk.
416
 
417
    Chunks always begin on even word boundries, so the mem portion
418
    (which is returned to the user) is also on an even word boundary, and
419
    thus double-word aligned.
420
 
421
    Free chunks are stored in circular doubly-linked lists, and look like this:
422
 
423
    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
424
            |             Size of previous chunk                            |
425
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
426
    `head:' |             Size of chunk, in bytes                         |P|
427
      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
428
            |             Forward pointer to next chunk in list             |
429
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
430
            |             Back pointer to previous chunk in list            |
431
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
432
            |             Unused space (may be 0 bytes long)                .
433
            .                                                               .
434
            .                                                               |
435
nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
436
    `foot:' |             Size of chunk, in bytes                           |
437
            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
438
 
439
    The P (PREV_INUSE) bit, stored in the unused low-order bit of the
440
    chunk size (which is always a multiple of two words), is an in-use
441
    bit for the *previous* chunk.  If that bit is *clear*, then the
442
    word before the current chunk size contains the previous chunk
443
    size, and can be used to find the front of the previous chunk.
444
    (The very first chunk allocated always has this bit set,
445
    preventing access to non-existent (or non-owned) memory.)
446
 
447
    Note that the `foot' of the current chunk is actually represented
448
    as the prev_size of the NEXT chunk. (This makes it easier to
449
    deal with alignments etc).
450
 
451
    The exception to all this is the special chunk `top', which doesn't
452
    bother using the trailing size field since there is no next
453
    contiguous chunk that would have to index off it. (After
454
    initialization, `top' is forced to always exist. )
455
 
456
    Available chunks are kept in any of several places (all declared below):
457
 
458
    * `av': An array of chunks serving as bin headers for consolidated
459
       chunks. Each bin is doubly linked.  The bins are approximately
460
       proportionally (log) spaced.  There are a lot of these bins
461
       (128). This may look excessive, but works very well in
462
       practice.  All procedures maintain the invariant that no
463
       consolidated chunk physically borders another one. Chunks in
464
       bins are kept in size order, with ties going to the
465
       approximately least recently used chunk.
466
 
467
       The chunks in each bin are maintained in decreasing sorted order by
468
       size.  This is irrelevant for the small bins, which all contain
469
       the same-sized chunks, but facilitates best-fit allocation for
470
       larger chunks. (These lists are just sequential. Keeping them in
471
       order almost never requires enough traversal to warrant using
472
       fancier ordered data structures.)  Chunks of the same size are
473
       linked with the most recently freed at the front, and allocations
474
       are taken from the back.  This results in LRU or FIFO allocation
475
       order, which tends to give each chunk an equal opportunity to be
476
       consolidated with adjacent freed chunks, resulting in larger free
477
       chunks and less fragmentation.
478
 
479
    * `top': The top-most available chunk (i.e., the one bordering the
480
       end of available memory) is treated specially. It is never
481
       included in any bin, is used only if no other chunk is
482
       available.
483
 
484
    * `last_remainder': A bin holding only the remainder of the
485
       most recently split (non-top) chunk. This bin is checked
486
       before other non-fitting chunks, so as to provide better
487
       locality for runs of sequentially allocated chunks.
488
 
489
*/
490
 
491
typedef struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk* mchunkptr;
492
 
493
/*  sizes, alignments */
494
 
495
#define SIZE_SZ                (sizeof(INTERNAL_SIZE_T))
496
 
497
#ifdef CYGNUM_MEMALLOC_ALLOCATOR_DLMALLOC_ALIGNMENT
498
#define MALLOC_ALIGNMENT (1<<(CYGNUM_MEMALLOC_ALLOCATOR_DLMALLOC_ALIGNMENT))
499
#endif
500
 
501
#ifndef MALLOC_ALIGNMENT
502
#define MALLOC_ALIGN           8
503
#define MALLOC_ALIGNMENT       (SIZE_SZ + SIZE_SZ)
504
#else
505
#define MALLOC_ALIGN           MALLOC_ALIGNMENT
506
#endif
507
#define MALLOC_ALIGN_MASK      (MALLOC_ALIGNMENT - 1)
508
#define MINSIZE                \
509
             (sizeof(struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk))
510
 
511
/* conversion from malloc headers to user pointers, and back */
512
 
513
#define chunk2mem(p)   ((cyg_uint8*)((char*)(p) + 2*SIZE_SZ))
514
#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
515
 
516
/* pad request bytes into a usable size */
517
 
518
#define request2size(req) \
519
 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
520
  (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? ((MINSIZE + MALLOC_ALIGN_MASK) & ~(MALLOC_ALIGN_MASK)) : \
521
   (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
522
 
523
/* Check if m has acceptable alignment */
524
 
525
#define aligned_OK(m)    (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
526
 
527
 
528
/*
529
  Physical chunk operations
530
*/
531
 
532
 
533
/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
534
 
535
#define PREV_INUSE 0x1 
536
 
537
/* Bits to mask off when extracting size */
538
 
539
#define SIZE_BITS (PREV_INUSE)
540
 
541
 
542
/* Ptr to next physical malloc_chunk. */
543
 
544
#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
545
 
546
/* Ptr to previous physical malloc_chunk */
547
 
548
#define prev_chunk(p)\
549
   ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
550
 
551
 
552
/* Treat space at ptr + offset as a chunk */
553
 
554
#define chunk_at_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
555
 
556
/*
557
  Dealing with use bits
558
*/
559
 
560
/* extract p's inuse bit */
561
 
562
#define inuse(p)\
563
((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
564
 
565
/* extract inuse bit of previous chunk */
566
 
567
#define prev_inuse(p)  ((p)->size & PREV_INUSE)
568
 
569
/* set/clear chunk as in use without otherwise disturbing */
570
 
571
#define set_inuse(p)\
572
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
573
 
574
#define clear_inuse(p)\
575
((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
576
 
577
/* check/set/clear inuse bits in known places */
578
 
579
#define inuse_bit_at_offset(p, s)\
580
 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
581
 
582
#define set_inuse_bit_at_offset(p, s)\
583
 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
584
 
585
#define clear_inuse_bit_at_offset(p, s)\
586
 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
587
 
588
 
589
/*
590
  Dealing with size fields
591
*/
592
 
593
/* Get size, ignoring use bits */
594
 
595
#define chunksize(p)          ((p)->size & ~(SIZE_BITS))
596
 
597
/* Set size at head, without disturbing its use bit */
598
 
599
#define set_head_size(p, s)   ((p)->size = (((p)->size & PREV_INUSE) | (s)))
600
 
601
/* Set size/use ignoring previous bits in header */
602
 
603
#define set_head(p, s)        ((p)->size = (s))
604
 
605
/* Set size at footer (only when chunk is not in use) */
606
 
607
#define set_foot(p, s)   (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
608
 
609
 
610
//----------------------------------------------------------------------------
611
 
612
/*
613
   Bins
614
 
615
    The bins, `av_' are an array of pairs of pointers serving as the
616
    heads of (initially empty) doubly-linked lists of chunks, laid out
617
    in a way so that each pair can be treated as if it were in a
618
    malloc_chunk. (This way, the fd/bk offsets for linking bin heads
619
    and chunks are the same).
620
 
621
    Bins for sizes < 512 bytes contain chunks of all the same size, spaced
622
    8 bytes apart. Larger bins are approximately logarithmically
623
    spaced. (See the table below.) The `av_' array is never mentioned
624
    directly in the code, but instead via bin access macros.
625
 
626
    Bin layout:
627
 
628
    64 bins of size       8
629
    32 bins of size      64
630
    16 bins of size     512
631
     8 bins of size    4096
632
     4 bins of size   32768
633
     2 bins of size  262144
634
     1 bin  of size what's left
635
 
636
    There is actually a little bit of slop in the numbers in bin_index
637
    for the sake of speed. This makes no difference elsewhere.
638
 
639
    The special chunks `top' and `last_remainder' get their own bins,
640
    (this is implemented via yet more trickery with the av_ array),
641
    although `top' is never properly linked to its bin since it is
642
    always handled specially.
643
 
644
*/
645
 
646
typedef struct Cyg_Mempool_dlmalloc_Implementation::malloc_chunk* mbinptr;
647
 
648
/* access macros */
649
 
650
#define bin_at(i)      ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
651
#define next_bin(b)    ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
652
#define prev_bin(b)    ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
653
 
654
/*
655
   The first 2 bins are never indexed. The corresponding av_ cells are instead
656
   used for bookkeeping. This is not to save space, but to simplify
657
   indexing, maintain locality, and avoid some initialization tests.
658
*/
659
 
660
#define top            (bin_at(0)->fd)   /* The topmost chunk */
661
#define last_remainder (bin_at(1))       /* remainder from last split */
662
 
663
 
664
/* Helper macro to initialize bins */
665
 
666
#define IAV(i)  bin_at(i), bin_at(i)
667
 
668
#ifndef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE
669
static mbinptr av_[CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV * 2 + 2] = {
670
 0, 0,
671
 IAV(0),   IAV(1),   IAV(2),   IAV(3),   IAV(4),   IAV(5),   IAV(6),   IAV(7),
672
 IAV(8),   IAV(9),   IAV(10),  IAV(11),  IAV(12),  IAV(13),  IAV(14),  IAV(15),
673
 IAV(16),  IAV(17),  IAV(18),  IAV(19),  IAV(20),  IAV(21),  IAV(22),  IAV(23),
674
 IAV(24),  IAV(25),  IAV(26),  IAV(27),  IAV(28),  IAV(29),  IAV(30),  IAV(31),
675
 IAV(32),  IAV(33),  IAV(34),  IAV(35),  IAV(36),  IAV(37),  IAV(38),  IAV(39),
676
 IAV(40),  IAV(41),  IAV(42),  IAV(43),  IAV(44),  IAV(45),  IAV(46),  IAV(47),
677
 IAV(48),  IAV(49),  IAV(50),  IAV(51),  IAV(52),  IAV(53),  IAV(54),  IAV(55),
678
 IAV(56),  IAV(57),  IAV(58),  IAV(59),  IAV(60),  IAV(61),  IAV(62),  IAV(63),
679
 IAV(64),  IAV(65),  IAV(66),  IAV(67),  IAV(68),  IAV(69),  IAV(70),  IAV(71),
680
 IAV(72),  IAV(73),  IAV(74),  IAV(75),  IAV(76),  IAV(77),  IAV(78),  IAV(79),
681
 IAV(80),  IAV(81),  IAV(82),  IAV(83),  IAV(84),  IAV(85),  IAV(86),  IAV(87),
682
 IAV(88),  IAV(89),  IAV(90),  IAV(91),  IAV(92),  IAV(93),  IAV(94),  IAV(95),
683
 IAV(96),  IAV(97),  IAV(98),  IAV(99),  IAV(100), IAV(101), IAV(102), IAV(103),
684
 IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
685
 IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
686
 IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
687
};
688
#endif
689
 
690
/* field-extraction macros */
691
 
692
#define first(b) ((b)->fd)
693
#define last(b)  ((b)->bk)
694
 
695
/*
696
  Indexing into bins
697
*/
698
 
699
#define bin_index(sz)                                                          \
700
(((((unsigned long)(sz)) >> 9) ==    0) ?       (((unsigned long)(sz)) >>  3): \
701
 ((((unsigned long)(sz)) >> 9) <=    4) ?  56 + (((unsigned long)(sz)) >>  6): \
702
 ((((unsigned long)(sz)) >> 9) <=   20) ?  91 + (((unsigned long)(sz)) >>  9): \
703
 ((((unsigned long)(sz)) >> 9) <=   84) ? 110 + (((unsigned long)(sz)) >> 12): \
704
 ((((unsigned long)(sz)) >> 9) <=  340) ? 119 + (((unsigned long)(sz)) >> 15): \
705
 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
706
                                          126)
707
/*
708
  bins for chunks < 512 are all spaced SMALLBIN_WIDTH bytes apart, and hold
709
  identically sized chunks. This is exploited in malloc.
710
*/
711
 
712
#define MAX_SMALLBIN_SIZE   512
713
#define SMALLBIN_WIDTH        8
714
#define SMALLBIN_WIDTH_BITS   3
715
#define MAX_SMALLBIN        (MAX_SMALLBIN_SIZE / SMALLBIN_WIDTH) - 1
716
 
717
#define smallbin_index(sz)  (((unsigned long)(sz)) >> SMALLBIN_WIDTH_BITS)
718
 
719
/*
720
   Requests are `small' if both the corresponding and the next bin are small
721
*/
722
 
723
#define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
724
 
725
/*
726
    To help compensate for the large number of bins, a one-level index
727
    structure is used for bin-by-bin searching.  `binblocks' is a
728
    one-word bitvector recording whether groups of BINBLOCKWIDTH bins
729
    have any (possibly) non-empty bins, so they can be skipped over
730
    all at once during during traversals. The bits are NOT always
731
    cleared as soon as all bins in a block are empty, but instead only
732
    when all are noticed to be empty during traversal in malloc.
733
*/
734
 
735
#define BINBLOCKWIDTH     4   /* bins per block */
736
 
737
#define binblocks      (bin_at(0)->size) /* bitvector of nonempty blocks */
738
 
739
/* bin<->block macros */
740
 
741
#define idx2binblock(ix)    ((unsigned long)1 << (ix / BINBLOCKWIDTH))
742
#define mark_binblock(ii)   (binblocks |= idx2binblock(ii))
743
#define clear_binblock(ii)  (binblocks &= ~(idx2binblock(ii)))
744
 
745
 
746
//----------------------------------------------------------------------------
747
 
748
/*
749
  Debugging support
750
*/
751
 
752
#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
753
 
754
/*
755
  These routines make a number of assertions about the states
756
  of data structures that should be true at all times. If any
757
  are not true, it's very likely that a user program has somehow
758
  trashed memory. (It's also possible that there is a coding error
759
  in malloc. In which case, please report it!)
760
*/
761
 
762
void
763
Cyg_Mempool_dlmalloc_Implementation::do_check_chunk( mchunkptr p )
764
{
765
  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
766
 
767
  /* Check for legal address ... */
768
  ASSERT((cyg_uint8 *)p >= arenabase);
769
  if (p != top)
770
    ASSERT((cyg_uint8 *)p + sz <= (cyg_uint8 *)top);
771
  else
772
    ASSERT((cyg_uint8 *)p + sz <= arenabase + arenasize);
773
 
774
} // Cyg_Mempool_dlmalloc_Implementation::do_check_chunk()
775
 
776
 
777
void
778
Cyg_Mempool_dlmalloc_Implementation::do_check_free_chunk(mchunkptr p)
779
{
780
  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
781
  mchunkptr next = chunk_at_offset(p, sz);
782
 
783
  do_check_chunk(p);
784
 
785
  /* Check whether it claims to be free ... */
786
  ASSERT(!inuse(p));
787
 
788
  /* Unless a special marker, must have OK fields */
789
  if ((long)sz >= (long)MINSIZE)
790
  {
791
    ASSERT((sz & MALLOC_ALIGN_MASK) == 0);
792
    ASSERT(aligned_OK(chunk2mem(p)));
793
    /* ... matching footer field */
794
    ASSERT(next->prev_size == sz);
795
    /* ... and is fully consolidated */
796
    ASSERT(prev_inuse(p));
797
    ASSERT (next == top || inuse(next));
798
 
799
    /* ... and has minimally sane links */
800
    ASSERT(p->fd->bk == p);
801
    ASSERT(p->bk->fd == p);
802
  }
803
  else /* markers are always of size SIZE_SZ */
804
    ASSERT(sz == SIZE_SZ);
805
} // Cyg_Mempool_dlmalloc_Implementation::do_check_free_chunk()
806
 
807
void
808
Cyg_Mempool_dlmalloc_Implementation::do_check_inuse_chunk(mchunkptr p)
809
{
810
  mchunkptr next = next_chunk(p);
811
  do_check_chunk(p);
812
 
813
  /* Check whether it claims to be in use ... */
814
  ASSERT(inuse(p));
815
 
816
  /* ... and is surrounded by OK chunks.
817
    Since more things can be checked with free chunks than inuse ones,
818
    if an inuse chunk borders them and debug is on, it's worth doing them.
819
  */
820
  if (!prev_inuse(p))
821
  {
822
    mchunkptr prv = prev_chunk(p);
823
    ASSERT(next_chunk(prv) == p);
824
    do_check_free_chunk(prv);
825
  }
826
  if (next == top)
827
  {
828
    ASSERT(prev_inuse(next));
829
    ASSERT(chunksize(next) >= MINSIZE);
830
  }
831
  else if (!inuse(next))
832
    do_check_free_chunk(next);
833
 
834
} // Cyg_Mempool_dlmalloc_Implementation::do_check_inuse_chunk(
835
 
836
void
837
Cyg_Mempool_dlmalloc_Implementation::do_check_malloced_chunk(mchunkptr p,
838
                                              INTERNAL_SIZE_T s)
839
{
840
  INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
841
  long room = long_sub_size_t(sz, s);
842
 
843
  do_check_inuse_chunk(p);
844
 
845
  /* Legal size ... */
846
  ASSERT((long)sz >= (long)MINSIZE);
847
  ASSERT((sz & MALLOC_ALIGN_MASK) == 0);
848
  ASSERT(room >= 0);
849
  ASSERT(room < (long)MINSIZE);
850
 
851
  /* ... and alignment */
852
  ASSERT(aligned_OK(chunk2mem(p)));
853
 
854
 
855
  /* ... and was allocated at front of an available chunk */
856
  ASSERT(prev_inuse(p));
857
 
858
} // Cyg_Mempool_dlmalloc_Implementation::do_check_malloced_chunk(
859
 
860
 
861
#define check_free_chunk(P)  do_check_free_chunk(P)
862
#define check_inuse_chunk(P) do_check_inuse_chunk(P)
863
#define check_chunk(P) do_check_chunk(P)
864
#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
865
#else
866
#define check_free_chunk(P) 
867
#define check_inuse_chunk(P)
868
#define check_chunk(P)
869
#define check_malloced_chunk(P,N)
870
#endif
871
 
872
 
873
//----------------------------------------------------------------------------
874
 
875
/*
876
  Macro-based internal utilities
877
*/
878
 
879
 
880
/*
881
  Linking chunks in bin lists.
882
  Call these only with variables, not arbitrary expressions, as arguments.
883
*/
884
 
885
/*
886
  Place chunk p of size s in its bin, in size order,
887
  putting it ahead of others of same size.
888
*/
889
 
890
 
891
#define frontlink(P, S, IDX, BK, FD)                                          \
892
{                                                                             \
893
  if (S < MAX_SMALLBIN_SIZE)                                                  \
894
  {                                                                           \
895
    IDX = smallbin_index(S);                                                  \
896
    mark_binblock(IDX);                                                       \
897
    BK = bin_at(IDX);                                                         \
898
    FD = BK->fd;                                                              \
899
    P->bk = BK;                                                               \
900
    P->fd = FD;                                                               \
901
    FD->bk = BK->fd = P;                                                      \
902
  }                                                                           \
903
  else                                                                        \
904
  {                                                                           \
905
    IDX = bin_index(S);                                                       \
906
    BK = bin_at(IDX);                                                         \
907
    FD = BK->fd;                                                              \
908
    if (FD == BK) mark_binblock(IDX);                                         \
909
    else                                                                      \
910
    {                                                                         \
911
      while (FD != BK && S < chunksize(FD)) FD = FD->fd;                      \
912
      BK = FD->bk;                                                            \
913
    }                                                                         \
914
    P->bk = BK;                                                               \
915
    P->fd = FD;                                                               \
916
    FD->bk = BK->fd = P;                                                      \
917
  }                                                                           \
918
}
919
 
920
 
921
/* take a chunk off a list */
922
 
923
#define unlink(P, BK, FD)                                                     \
924
{                                                                             \
925
  BK = P->bk;                                                                 \
926
  FD = P->fd;                                                                 \
927
  FD->bk = BK;                                                                \
928
  BK->fd = FD;                                                                \
929
}                                                                             \
930
 
931
/* Place p as the last remainder */
932
 
933
#define link_last_remainder(P)                                                \
934
{                                                                             \
935
  last_remainder->fd = last_remainder->bk =  P;                               \
936
  P->fd = P->bk = last_remainder;                                             \
937
}
938
 
939
/* Clear the last_remainder bin */
940
 
941
#define clear_last_remainder \
942
  (last_remainder->fd = last_remainder->bk = last_remainder)
943
 
944
 
945
//----------------------------------------------------------------------------
946
 
947
Cyg_Mempool_dlmalloc_Implementation::Cyg_Mempool_dlmalloc_Implementation(
948
                                            cyg_uint8 *base, cyg_int32 size,
949
                                            CYG_ADDRWORD /* argthru */ )
950
{
951
    arenabase = base;
952
    arenasize = size;
953
 
954
    CYG_ADDRESS front_misalign;
955
    cyg_int32 correction;
956
 
957
#ifdef CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE
958
    cyg_ucount16 i;
959
    av_[0] = av_[1] = 0;
960
    for (i=0; i < CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV; i++) {
961
        av_[ i*2+2 ] = av_[ i*2+3 ] = bin_at(i);
962
    } // for
963
 
964
#elif defined(CYGDBG_USE_ASSERTS)
965
    static int instances;
966
    if ( ++instances > 1 )
967
        CYG_FAIL( "Multiple dlmalloc instances but "
968
                  "CYGIMP_MEMALLOC_ALLOCATOR_DLMALLOC_SAFE_MULTIPLE "
969
                  "not defined" );
970
#endif
971
 
972
    front_misalign = (CYG_ADDRESS)chunk2mem(base) & MALLOC_ALIGN_MASK;
973
 
974
    if ( front_misalign > 0 ) {
975
        correction = (MALLOC_ALIGNMENT) - front_misalign;
976
    } else {
977
        correction = 0;
978
    }
979
 
980
    // too small to be useful?
981
    if ( correction + 2*(unsigned)MALLOC_ALIGNMENT > (unsigned) size )
982
        // help catch errors. Don't fail now.
983
        arenabase = NULL;
984
    else {
985
        top = (mchunkptr)(base + correction);
986
        set_head(top, arenasize | PREV_INUSE);
987
    }
988
}
989
 
990
//----------------------------------------------------------------------------
991
 
992
/* Main public routines */
993
 
994
/*
995
  Malloc Algorithm:
996
 
997
    The requested size is first converted into a usable form, `nb'.
998
    This currently means to add 4 bytes overhead plus possibly more to
999
    obtain 8-byte alignment and/or to obtain a size of at least
1000
    MINSIZE (currently 16 bytes), the smallest allocatable size.
1001
    (All fits are considered `exact' if they are within MINSIZE bytes.)
1002
 
1003
    From there, the first successful of the following steps is taken:
1004
 
1005
      1. The bin corresponding to the request size is scanned, and if
1006
         a chunk of exactly the right size is found, it is taken.
1007
 
1008
      2. The most recently remaindered chunk is used if it is big
1009
         enough.  This is a form of (roving) first fit, used only in
1010
         the absence of exact fits. Runs of consecutive requests use
1011
         the remainder of the chunk used for the previous such request
1012
         whenever possible. This limited use of a first-fit style
1013
         allocation strategy tends to give contiguous chunks
1014
         coextensive lifetimes, which improves locality and can reduce
1015
         fragmentation in the long run.
1016
 
1017
      3. Other bins are scanned in increasing size order, using a
1018
         chunk big enough to fulfill the request, and splitting off
1019
         any remainder.  This search is strictly by best-fit; i.e.,
1020
         the smallest (with ties going to approximately the least
1021
         recently used) chunk that fits is selected.
1022
 
1023
      4. If large enough, the chunk bordering the end of memory
1024
         (`top') is split off. (This use of `top' is in accord with
1025
         the best-fit search rule.  In effect, `top' is treated as
1026
         larger (and thus less well fitting) than any other available
1027
         chunk since it can be extended to be as large as necessary
1028
         (up to system limitations).
1029
 
1030
      All allocations are made from the the `lowest' part of any found
1031
      chunk. (The implementation invariant is that prev_inuse is
1032
      always true of any allocated chunk; i.e., that each allocated
1033
      chunk borders either a previously allocated and still in-use chunk,
1034
      or the base of its memory arena.)
1035
 
1036
*/
1037
 
1038
cyg_uint8 *
1039
Cyg_Mempool_dlmalloc_Implementation::try_alloc( cyg_int32 bytes )
1040
{
1041
  mchunkptr victim;                  /* inspected/selected chunk */
1042
  INTERNAL_SIZE_T victim_size;   /* its size */
1043
  int       idx;                     /* index for bin traversal */
1044
  mbinptr   bin;                     /* associated bin */
1045
  mchunkptr remainder;               /* remainder from a split */
1046
  long      remainder_size;          /* its size */
1047
  int       remainder_index;         /* its bin index */
1048
  unsigned long block;               /* block traverser bit */
1049
  int       startidx;                /* first bin of a traversed block */
1050
  mchunkptr fwd;                     /* misc temp for linking */
1051
  mchunkptr bck;                     /* misc temp for linking */
1052
  mbinptr q;                         /* misc temp */
1053
 
1054
  INTERNAL_SIZE_T nb;
1055
 
1056
  /*  Allow uninitialised (zero sized) heaps because they could exist as a
1057
   *  quirk of the MLT setup where a dynamically sized heap is at the top of
1058
   *  memory. */
1059
  if (NULL==arenabase) return NULL;
1060
 
1061
  if ((long)bytes < 0) return 0;
1062
 
1063
  nb = request2size(bytes);  /* padded request size; */
1064
 
1065
  MALLOC_LOCK;
1066
 
1067
  /* Check for exact match in a bin */
1068
 
1069
  if (is_small_request(nb))  /* Faster version for small requests */
1070
  {
1071
    idx = smallbin_index(nb);
1072
 
1073
    /* No traversal or size check necessary for small bins.  */
1074
 
1075
    q = bin_at(idx);
1076
    victim = last(q);
1077
 
1078
#if MALLOC_ALIGN != 16
1079
    /* Also scan the next one, since it would have a remainder < MINSIZE */
1080
    if (victim == q)
1081
    {
1082
      q = next_bin(q);
1083
      victim = last(q);
1084
    }
1085
#endif
1086
    if (victim != q)
1087
    {
1088
      victim_size = chunksize(victim);
1089
      unlink(victim, bck, fwd);
1090
      set_inuse_bit_at_offset(victim, victim_size);
1091
      check_malloced_chunk(victim, nb);
1092
      MALLOC_UNLOCK;
1093
      return chunk2mem(victim);
1094
    }
1095
 
1096
    idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
1097
 
1098
  }
1099
  else
1100
  {
1101
    idx = bin_index(nb);
1102
    bin = bin_at(idx);
1103
 
1104
    for (victim = last(bin); victim != bin; victim = victim->bk)
1105
    {
1106
      victim_size = chunksize(victim);
1107
      remainder_size = long_sub_size_t(victim_size, nb);
1108
 
1109
      if (remainder_size >= (long)MINSIZE) /* too big */
1110
      {
1111
        --idx; /* adjust to rescan below after checking last remainder */
1112
        break;
1113
      }
1114
 
1115
      else if (remainder_size >= 0) /* exact fit */
1116
      {
1117
        unlink(victim, bck, fwd);
1118
        set_inuse_bit_at_offset(victim, victim_size);
1119
        check_malloced_chunk(victim, nb);
1120
        MALLOC_UNLOCK;
1121
        return chunk2mem(victim);
1122
      }
1123
    }
1124
 
1125
    ++idx;
1126
 
1127
  }
1128
 
1129
  /* Try to use the last split-off remainder */
1130
 
1131
  if ( (victim = last_remainder->fd) != last_remainder)
1132
  {
1133
    victim_size = chunksize(victim);
1134
    remainder_size = long_sub_size_t(victim_size, nb);
1135
 
1136
    if (remainder_size >= (long)MINSIZE) /* re-split */
1137
    {
1138
      remainder = chunk_at_offset(victim, nb);
1139
      set_head(victim, nb | PREV_INUSE);
1140
      link_last_remainder(remainder);
1141
      set_head(remainder, remainder_size | PREV_INUSE);
1142
      set_foot(remainder, remainder_size);
1143
      check_malloced_chunk(victim, nb);
1144
      MALLOC_UNLOCK;
1145
      return chunk2mem(victim);
1146
    }
1147
 
1148
    clear_last_remainder;
1149
 
1150
    if (remainder_size >= 0)  /* exhaust */
1151
    {
1152
      set_inuse_bit_at_offset(victim, victim_size);
1153
      check_malloced_chunk(victim, nb);
1154
      MALLOC_UNLOCK;
1155
      return chunk2mem(victim);
1156
    }
1157
 
1158
    /* Else place in bin */
1159
 
1160
    frontlink(victim, victim_size, remainder_index, bck, fwd);
1161
  }
1162
 
1163
  /*
1164
     If there are any possibly nonempty big-enough blocks,
1165
     search for best fitting chunk by scanning bins in blockwidth units.
1166
  */
1167
 
1168
  if ( (block = idx2binblock(idx)) <= binblocks)
1169
  {
1170
 
1171
    /* Get to the first marked block */
1172
 
1173
    if ( (block & binblocks) == 0)
1174
    {
1175
      /* force to an even block boundary */
1176
      idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
1177
      block <<= 1;
1178
      while ((block & binblocks) == 0)
1179
      {
1180
        idx += BINBLOCKWIDTH;
1181
        block <<= 1;
1182
      }
1183
    }
1184
 
1185
    /* For each possibly nonempty block ... */
1186
    for (;;)
1187
    {
1188
      startidx = idx;          /* (track incomplete blocks) */
1189
      q = bin = bin_at(idx);
1190
 
1191
      /* For each bin in this block ... */
1192
      do
1193
      {
1194
        /* Find and use first big enough chunk ... */
1195
 
1196
        for (victim = last(bin); victim != bin; victim = victim->bk)
1197
        {
1198
          victim_size = chunksize(victim);
1199
          remainder_size = long_sub_size_t(victim_size, nb);
1200
 
1201
          if (remainder_size >= (long)MINSIZE) /* split */
1202
          {
1203
            remainder = chunk_at_offset(victim, nb);
1204
            set_head(victim, nb | PREV_INUSE);
1205
            unlink(victim, bck, fwd);
1206
            link_last_remainder(remainder);
1207
            set_head(remainder, remainder_size | PREV_INUSE);
1208
            set_foot(remainder, remainder_size);
1209
            check_malloced_chunk(victim, nb);
1210
            MALLOC_UNLOCK;
1211
            return chunk2mem(victim);
1212
          }
1213
 
1214
          else if (remainder_size >= 0)  /* take */
1215
          {
1216
            set_inuse_bit_at_offset(victim, victim_size);
1217
            unlink(victim, bck, fwd);
1218
            check_malloced_chunk(victim, nb);
1219
            MALLOC_UNLOCK;
1220
            return chunk2mem(victim);
1221
          }
1222
 
1223
        }
1224
 
1225
       bin = next_bin(bin);
1226
 
1227
#if MALLOC_ALIGN == 16
1228
       if (idx < MAX_SMALLBIN)
1229
         {
1230
           bin = next_bin(bin);
1231
           ++idx;
1232
         }
1233
#endif
1234
      } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
1235
 
1236
      /* Clear out the block bit. */
1237
 
1238
      do   /* Possibly backtrack to try to clear a partial block */
1239
      {
1240
        if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
1241
        {
1242
          binblocks &= ~block;
1243
          break;
1244
        }
1245
        --startidx;
1246
       q = prev_bin(q);
1247
      } while (first(q) == q);
1248
 
1249
      /* Get to the next possibly nonempty block */
1250
 
1251
      if ( (block <<= 1) <= binblocks && (block != 0) )
1252
      {
1253
        while ((block & binblocks) == 0)
1254
        {
1255
          idx += BINBLOCKWIDTH;
1256
          block <<= 1;
1257
        }
1258
      }
1259
      else
1260
        break;
1261
    }
1262
  }
1263
 
1264
 
1265
  /* Try to use top chunk */
1266
 
1267
  /* Require that there be a remainder, ensuring top always exists  */
1268
  remainder_size = long_sub_size_t(chunksize(top), nb);
1269
  if (chunksize(top) < nb || remainder_size < (long)MINSIZE)
1270
  {
1271
      //diag_printf("chunksize(top)=%ld, nb=%d, remainder=%ld\n", chunksize(top),
1272
      //            nb, remainder_size);
1273
      MALLOC_UNLOCK;
1274
      CYG_MEMALLOC_FAIL(bytes);
1275
      return NULL; /* propagate failure */
1276
  }
1277
 
1278
  victim = top;
1279
  set_head(victim, nb | PREV_INUSE);
1280
  top = chunk_at_offset(victim, nb);
1281
  set_head(top, remainder_size | PREV_INUSE);
1282
  check_malloced_chunk(victim, nb);
1283
  MALLOC_UNLOCK;
1284
  return chunk2mem(victim);
1285
 
1286
} // Cyg_Mempool_dlmalloc_Implementation::try_alloc()
1287
 
1288
//----------------------------------------------------------------------------
1289
 
1290
/*
1291
  free() algorithm :
1292
 
1293
    cases:
1294
 
1295
       1. free(NULL) has no effect.
1296
 
1297
       2. Chunks are consolidated as they arrive, and
1298
          placed in corresponding bins. (This includes the case of
1299
          consolidating with the current `last_remainder').
1300
*/
1301
 
1302
cyg_bool
1303
Cyg_Mempool_dlmalloc_Implementation::free( cyg_uint8 *mem, cyg_int32 )
1304
{
1305
  mchunkptr p;         /* chunk corresponding to mem */
1306
  INTERNAL_SIZE_T hd;  /* its head field */
1307
  INTERNAL_SIZE_T sz;  /* its size */
1308
  int       idx;       /* its bin index */
1309
  mchunkptr next;      /* next contiguous chunk */
1310
  INTERNAL_SIZE_T nextsz; /* its size */
1311
  INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
1312
  mchunkptr bck;       /* misc temp for linking */
1313
  mchunkptr fwd;       /* misc temp for linking */
1314
  int       islr;      /* track whether merging with last_remainder */
1315
 
1316
  if (mem == NULL)                              /* free(NULL) has no effect */
1317
    return false;
1318
 
1319
  MALLOC_LOCK;
1320
 
1321
  p = mem2chunk(mem);
1322
  hd = p->size;
1323
 
1324
  check_inuse_chunk(p);
1325
 
1326
  sz = hd & ~PREV_INUSE;
1327
  next = chunk_at_offset(p, sz);
1328
  nextsz = chunksize(next);
1329
 
1330
  if (next == top)                            /* merge with top */
1331
  {
1332
    sz += nextsz;
1333
 
1334
    if (!(hd & PREV_INUSE))                    /* consolidate backward */
1335
    {
1336
      prevsz = p->prev_size;
1337
      p = chunk_at_offset(p, -((long) prevsz));
1338
      sz += prevsz;
1339
      unlink(p, bck, fwd);
1340
    }
1341
 
1342
    set_head(p, sz | PREV_INUSE);
1343
    top = p;
1344
    MALLOC_UNLOCK;
1345
    return true;
1346
  }
1347
 
1348
  set_head(next, nextsz);                    /* clear inuse bit */
1349
 
1350
  islr = 0;
1351
 
1352
  if (!(hd & PREV_INUSE))                    /* consolidate backward */
1353
  {
1354
    prevsz = p->prev_size;
1355
    p = chunk_at_offset(p, -((long) prevsz));
1356
    sz += prevsz;
1357
 
1358
    if (p->fd == last_remainder)             /* keep as last_remainder */
1359
      islr = 1;
1360
    else
1361
      unlink(p, bck, fwd);
1362
  }
1363
 
1364
  if (!(inuse_bit_at_offset(next, nextsz)))   /* consolidate forward */
1365
  {
1366
    sz += nextsz;
1367
 
1368
    if (!islr && next->fd == last_remainder)  /* re-insert last_remainder */
1369
    {
1370
      islr = 1;
1371
      link_last_remainder(p);
1372
    }
1373
    else
1374
      unlink(next, bck, fwd);
1375
  }
1376
 
1377
 
1378
  set_head(p, sz | PREV_INUSE);
1379
  set_foot(p, sz);
1380
  if (!islr)
1381
    frontlink(p, sz, idx, bck, fwd);
1382
 
1383
  MALLOC_UNLOCK;
1384
 
1385
  return true;
1386
} // Cyg_Mempool_dlmalloc_Implementation::free()
1387
 
1388
//----------------------------------------------------------------------------
1389
 
1390
// resize existing allocation, if oldsize is non-NULL, previous
1391
// allocation size is placed into it. If previous size not available,
1392
// it is set to 0. NB previous allocation size may have been rounded up.
1393
// Occasionally the allocation can be adjusted *backwards* as well as,
1394
// or instead of forwards, therefore the address of the resized
1395
// allocation is returned, or NULL if no resizing was possible.
1396
// Note that this differs from ::realloc() in that no attempt is
1397
// made to call malloc() if resizing is not possible - that is left
1398
// to higher layers. The data is copied from old to new though.
1399
// The effects of alloc_ptr==NULL or newsize==0 are undefined
1400
 
1401
 
1402
// DOCUMENTATION FROM ORIGINAL FILE:
1403
// (some now irrelevant parts elided)
1404
/*
1405
  Realloc algorithm:
1406
 
1407
    If the reallocation is for additional space, and the
1408
    chunk can be extended, it is, else a malloc-copy-free sequence is
1409
    taken.  There are several different ways that a chunk could be
1410
    extended. All are tried:
1411
 
1412
       * Extending forward into following adjacent free chunk.
1413
       * Shifting backwards, joining preceding adjacent space
1414
       * Both shifting backwards and extending forward.
1415
 
1416
    If the reallocation is for less space, and the new request is for
1417
    a `small' (<512 bytes) size, then the newly unused space is lopped
1418
    off and freed.
1419
 
1420
    The old unix realloc convention of allowing the last-free'd chunk
1421
    to be used as an argument to realloc is no longer supported.
1422
    I don't know of any programs still relying on this feature,
1423
    and allowing it would also allow too many other incorrect
1424
    usages of realloc to be sensible.
1425
*/
1426
 
1427
cyg_uint8 *
1428
Cyg_Mempool_dlmalloc_Implementation::resize_alloc( cyg_uint8 *oldmem,
1429
                                                   cyg_int32 bytes,
1430
                                                   cyg_int32 *poldsize )
1431
{
1432
 
1433
  INTERNAL_SIZE_T nb;             /* padded request size */
1434
 
1435
  mchunkptr oldp;                 /* chunk corresponding to oldmem */
1436
  INTERNAL_SIZE_T oldsize;        /* its size */
1437
 
1438
  mchunkptr newp;                 /* chunk to return */
1439
  INTERNAL_SIZE_T newsize;        /* its size */
1440
  cyg_uint8*   newmem;            /* corresponding user mem */
1441
 
1442
  mchunkptr next;                 /* next contiguous chunk after oldp */
1443
  INTERNAL_SIZE_T nextsize;       /* its size */
1444
 
1445
  mchunkptr prev;                 /* previous contiguous chunk before oldp */
1446
  INTERNAL_SIZE_T prevsize;       /* its size */
1447
 
1448
  mchunkptr remainder;            /* holds split off extra space from newp */
1449
  INTERNAL_SIZE_T remainder_size; /* its size */
1450
 
1451
  mchunkptr bck;                  /* misc temp for linking */
1452
  mchunkptr fwd;                  /* misc temp for linking */
1453
 
1454
  MALLOC_LOCK;
1455
 
1456
  newp    = oldp    = mem2chunk(oldmem);
1457
  newsize = oldsize = chunksize(oldp);
1458
 
1459
  if (NULL != poldsize)
1460
      *poldsize = oldsize - SIZE_SZ;
1461
 
1462
  nb = request2size(bytes);
1463
 
1464
  check_inuse_chunk(oldp);
1465
 
1466
  if ((long)(oldsize) < (long)(nb))
1467
  {
1468
 
1469
    /* Try expanding forward */
1470
 
1471
    next = chunk_at_offset(oldp, oldsize);
1472
    if (next == top || !inuse(next))
1473
    {
1474
      nextsize = chunksize(next);
1475
 
1476
      /* Forward into top only if a remainder */
1477
      if (next == top)
1478
      {
1479
        if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
1480
        {
1481
          newsize += nextsize;
1482
          top = chunk_at_offset(oldp, nb);
1483
          set_head(top, (newsize - nb) | PREV_INUSE);
1484
          set_head_size(oldp, nb);
1485
          MALLOC_UNLOCK;
1486
          return chunk2mem(oldp);
1487
        }
1488
      }
1489
 
1490
      /* Forward into next chunk */
1491
      else if (((long)(nextsize + newsize) >= (long)(nb)))
1492
      {
1493
        unlink(next, bck, fwd);
1494
        newsize  += nextsize;
1495
        goto split;
1496
      }
1497
    }
1498
    else
1499
    {
1500
      next = 0;
1501
      nextsize = 0;
1502
    }
1503
 
1504
    /* Try shifting backwards. */
1505
 
1506
    if (!prev_inuse(oldp))
1507
    {
1508
      prev = prev_chunk(oldp);
1509
      prevsize = chunksize(prev);
1510
 
1511
      /* try forward + backward first to save a later consolidation */
1512
 
1513
      if (next != 0)
1514
      {
1515
        /* into top */
1516
        if (next == top)
1517
        {
1518
          if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
1519
          {
1520
            unlink(prev, bck, fwd);
1521
            newp = prev;
1522
            newsize += prevsize + nextsize;
1523
            newmem = chunk2mem(newp);
1524
            MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1525
            top = chunk_at_offset(newp, nb);
1526
            set_head(top, (newsize - nb) | PREV_INUSE);
1527
            set_head_size(newp, nb);
1528
            MALLOC_UNLOCK;
1529
            return newmem;
1530
          }
1531
        }
1532
 
1533
        /* into next chunk */
1534
        else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
1535
        {
1536
          unlink(next, bck, fwd);
1537
          unlink(prev, bck, fwd);
1538
          newp = prev;
1539
          newsize += nextsize + prevsize;
1540
          newmem = chunk2mem(newp);
1541
          MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1542
          goto split;
1543
        }
1544
      }
1545
 
1546
      /* backward only */
1547
      if (prev != 0 && (long)(prevsize + newsize) >= (long)nb)
1548
      {
1549
        unlink(prev, bck, fwd);
1550
        newp = prev;
1551
        newsize += prevsize;
1552
        newmem = chunk2mem(newp);
1553
        MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1554
        goto split;
1555
      }
1556
    }
1557
 
1558
    // couldn't resize the allocation any direction, so return failure
1559
    MALLOC_UNLOCK;
1560
    return NULL;
1561
  }
1562
 
1563
 
1564
 split:  /* split off extra room in old or expanded chunk */
1565
 
1566
  remainder_size = long_sub_size_t(newsize, nb);
1567
 
1568
  if (remainder_size >= (long)MINSIZE) /* split off remainder */
1569
  {
1570
    remainder = chunk_at_offset(newp, nb);
1571
    set_head_size(newp, nb);
1572
    set_head(remainder, remainder_size | PREV_INUSE);
1573
    set_inuse_bit_at_offset(remainder, remainder_size);
1574
    /* let free() deal with it */
1575
    Cyg_Mempool_dlmalloc_Implementation::free( chunk2mem(remainder) );
1576
  }
1577
  else
1578
  {
1579
    set_head_size(newp, newsize);
1580
    set_inuse_bit_at_offset(newp, newsize);
1581
  }
1582
 
1583
  check_inuse_chunk(newp);
1584
  MALLOC_UNLOCK;
1585
  return chunk2mem(newp);
1586
 
1587
} // Cyg_Mempool_dlmalloc_Implementation::resize_alloc()
1588
 
1589
//----------------------------------------------------------------------------
1590
 
1591
// Get memory pool status
1592
// flags is a bitmask of requested fields to fill in. The flags are
1593
// defined in common.hxx
1594
void
1595
Cyg_Mempool_dlmalloc_Implementation::get_status(
1596
                                  cyg_mempool_status_flag_t flags,
1597
                                  Cyg_Mempool_Status &status )
1598
{
1599
    if (0 != (flags&(CYG_MEMPOOL_STAT_FREEBLOCKS|CYG_MEMPOOL_STAT_TOTALFREE|
1600
                     CYG_MEMPOOL_STAT_TOTALALLOCATED|CYG_MEMPOOL_STAT_MAXFREE)))
1601
    {
1602
        int i;
1603
        mbinptr b;
1604
        mchunkptr p;
1605
        cyg_int32 chunksizep;
1606
        cyg_int32 maxfree;
1607
#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
1608
        mchunkptr q;
1609
#endif
1610
 
1611
        INTERNAL_SIZE_T avail = chunksize(top);
1612
        int   navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
1613
        maxfree = avail;
1614
 
1615
        for (i = 1; i < CYGPRI_MEMALLOC_ALLOCATOR_DLMALLOC_NAV; ++i) {
1616
            b = bin_at(i);
1617
            for (p = last(b); p != b; p = p->bk) {
1618
#ifdef CYGDBG_MEMALLOC_ALLOCATOR_DLMALLOC_DEBUG
1619
                check_free_chunk(p);
1620
                for (q = next_chunk(p);
1621
                     (q < top) && inuse(q) &&
1622
                         (long)(chunksize(q)) >= (long)MINSIZE;
1623
                     q = next_chunk(q))
1624
                    check_inuse_chunk(q);
1625
#endif
1626
                chunksizep = chunksize(p);
1627
                avail += chunksizep;
1628
                if ( chunksizep > maxfree )
1629
                    maxfree = chunksizep;
1630
                navail++;
1631
            }
1632
        }
1633
 
1634
        if ( 0 != (flags & CYG_MEMPOOL_STAT_TOTALALLOCATED) )
1635
            status.totalallocated = arenasize - avail;
1636
        // as quick or quicker to just set most of these, rather than
1637
        // test flag first
1638
        status.totalfree = (avail & ~(MALLOC_ALIGN_MASK)) - SIZE_SZ - MINSIZE;
1639
        CYG_ASSERT( ((avail + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1640
                    >= MINSIZE, "free mem negative!" );
1641
        status.freeblocks = navail;
1642
        status.maxfree = (maxfree & ~(MALLOC_ALIGN_MASK)) - SIZE_SZ - MINSIZE;
1643
        //diag_printf("raw mf: %d, ret mf: %d\n", maxfree, status.maxfree);
1644
        CYG_ASSERT( ((maxfree + SIZE_SZ + MALLOC_ALIGN_MASK) &
1645
                    ~MALLOC_ALIGN_MASK) >= MINSIZE,
1646
                    "max free block size negative!" );
1647
    } // if
1648
 
1649
    // as quick or quicker to just set most of these, rather than
1650
    // test flag first
1651
    status.arenabase = status.origbase = arenabase;
1652
    status.arenasize = status.origsize = arenasize;
1653
    status.maxoverhead = MINSIZE + MALLOC_ALIGNMENT;
1654
 
1655
} // Cyg_Mempool_dlmalloc_Implementation::get_status()
1656
 
1657
 
1658
//----------------------------------------------------------------------------
1659
 
1660
// EOF dlmalloc.cxx

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.