OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [uClibc/] [libc/] [stdlib/] [malloc-standard/] [malloc.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1325 phoenix
/*
2
  This is a version (aka dlmalloc) of malloc/free/realloc written by
3
  Doug Lea and released to the public domain.  Use, modify, and
4
  redistribute this code without permission or acknowledgement in any
5
  way you wish.  Send questions, comments, complaints, performance
6
  data, etc to dl@cs.oswego.edu
7
 
8
  VERSION 2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
9
 
10
  Note: There may be an updated version of this malloc obtainable at
11
           ftp://gee.cs.oswego.edu/pub/misc/malloc.c
12
  Check before installing!
13
 
14
  Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
15
*/
16
 
17
#define _GNU_SOURCE
18
#include "malloc.h"
19
 
20
 
21
#ifdef __UCLIBC_HAS_THREADS__
22
pthread_mutex_t __malloc_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
23
#endif
24
 
25
/*
26
   There is exactly one instance of this struct in this malloc.
27
   If you are adapting this malloc in a way that does NOT use a static
28
   malloc_state, you MUST explicitly zero-fill it before using. This
29
   malloc relies on the property that malloc_state is initialized to
30
   all zeroes (as is true of C statics).
31
*/
32
struct malloc_state __malloc_state;  /* never directly referenced */
33
 
34
/* forward declaration */
35
static int __malloc_largebin_index(unsigned int sz);
36
 
37
#ifdef __MALLOC_DEBUGGING
38
 
39
/*
40
  Debugging support
41
 
42
  Because freed chunks may be overwritten with bookkeeping fields, this
43
  malloc will often die when freed memory is overwritten by user
44
  programs.  This can be very effective (albeit in an annoying way)
45
  in helping track down dangling pointers.
46
 
47
  If you compile with -D__MALLOC_DEBUGGING, a number of assertion checks are
48
  enabled that will catch more memory errors. You probably won't be
49
  able to make much sense of the actual assertion errors, but they
50
  should help you locate incorrectly overwritten memory.  The
51
  checking is fairly extensive, and will slow down execution
52
  noticeably. Calling malloc_stats or mallinfo with __MALLOC_DEBUGGING set will
53
  attempt to check every non-mmapped allocated and free chunk in the
54
  course of computing the summmaries. (By nature, mmapped regions
55
  cannot be checked very much automatically.)
56
 
57
  Setting __MALLOC_DEBUGGING may also be helpful if you are trying to modify
58
  this code. The assertions in the check routines spell out in more
59
  detail the assumptions and invariants underlying the algorithms.
60
 
61
  Setting __MALLOC_DEBUGGING does NOT provide an automated mechanism for checking
62
  that all accesses to malloced memory stay within their
63
  bounds. However, there are several add-ons and adaptations of this
64
  or other mallocs available that do this.
65
*/
66
 
67
/* Properties of all chunks */
68
void __do_check_chunk(mchunkptr p)
69
{
70
    mstate av = get_malloc_state();
71
#ifdef __DOASSERTS__
72
    /* min and max possible addresses assuming contiguous allocation */
73
    char* max_address = (char*)(av->top) + chunksize(av->top);
74
    char* min_address = max_address - av->sbrked_mem;
75
    unsigned long  sz = chunksize(p);
76
#endif
77
 
78
    if (!chunk_is_mmapped(p)) {
79
 
80
        /* Has legal address ... */
81
        if (p != av->top) {
82
            if (contiguous(av)) {
83
                assert(((char*)p) >= min_address);
84
                assert(((char*)p + sz) <= ((char*)(av->top)));
85
            }
86
        }
87
        else {
88
            /* top size is always at least MINSIZE */
89
            assert((unsigned long)(sz) >= MINSIZE);
90
            /* top predecessor always marked inuse */
91
            assert(prev_inuse(p));
92
        }
93
 
94
    }
95
    else {
96
        /* address is outside main heap  */
97
        if (contiguous(av) && av->top != initial_top(av)) {
98
            assert(((char*)p) < min_address || ((char*)p) > max_address);
99
        }
100
        /* chunk is page-aligned */
101
        assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
102
        /* mem is aligned */
103
        assert(aligned_OK(chunk2mem(p)));
104
    }
105
}
106
 
107
/* Properties of free chunks */
108
void __do_check_free_chunk(mchunkptr p)
109
{
110
    size_t sz = p->size & ~PREV_INUSE;
111
#ifdef __DOASSERTS__
112
    mstate av = get_malloc_state();
113
    mchunkptr next = chunk_at_offset(p, sz);
114
#endif
115
 
116
    __do_check_chunk(p);
117
 
118
    /* Chunk must claim to be free ... */
119
    assert(!inuse(p));
120
    assert (!chunk_is_mmapped(p));
121
 
122
    /* Unless a special marker, must have OK fields */
123
    if ((unsigned long)(sz) >= MINSIZE)
124
    {
125
        assert((sz & MALLOC_ALIGN_MASK) == 0);
126
        assert(aligned_OK(chunk2mem(p)));
127
        /* ... matching footer field */
128
        assert(next->prev_size == sz);
129
        /* ... and is fully consolidated */
130
        assert(prev_inuse(p));
131
        assert (next == av->top || inuse(next));
132
 
133
        /* ... and has minimally sane links */
134
        assert(p->fd->bk == p);
135
        assert(p->bk->fd == p);
136
    }
137
    else /* markers are always of size (sizeof(size_t)) */
138
        assert(sz == (sizeof(size_t)));
139
}
140
 
141
/* Properties of inuse chunks */
142
void __do_check_inuse_chunk(mchunkptr p)
143
{
144
    mstate av = get_malloc_state();
145
    mchunkptr next;
146
    __do_check_chunk(p);
147
 
148
    if (chunk_is_mmapped(p))
149
        return; /* mmapped chunks have no next/prev */
150
 
151
    /* Check whether it claims to be in use ... */
152
    assert(inuse(p));
153
 
154
    next = next_chunk(p);
155
 
156
    /* ... and is surrounded by OK chunks.
157
       Since more things can be checked with free chunks than inuse ones,
158
       if an inuse chunk borders them and debug is on, it's worth doing them.
159
       */
160
    if (!prev_inuse(p))  {
161
        /* Note that we cannot even look at prev unless it is not inuse */
162
        mchunkptr prv = prev_chunk(p);
163
        assert(next_chunk(prv) == p);
164
        __do_check_free_chunk(prv);
165
    }
166
 
167
    if (next == av->top) {
168
        assert(prev_inuse(next));
169
        assert(chunksize(next) >= MINSIZE);
170
    }
171
    else if (!inuse(next))
172
        __do_check_free_chunk(next);
173
}
174
 
175
/* Properties of chunks recycled from fastbins */
176
void __do_check_remalloced_chunk(mchunkptr p, size_t s)
177
{
178
#ifdef __DOASSERTS__
179
    size_t sz = p->size & ~PREV_INUSE;
180
#endif
181
 
182
    __do_check_inuse_chunk(p);
183
 
184
    /* Legal size ... */
185
    assert((sz & MALLOC_ALIGN_MASK) == 0);
186
    assert((unsigned long)(sz) >= MINSIZE);
187
    /* ... and alignment */
188
    assert(aligned_OK(chunk2mem(p)));
189
    /* chunk is less than MINSIZE more than request */
190
    assert((long)(sz) - (long)(s) >= 0);
191
    assert((long)(sz) - (long)(s + MINSIZE) < 0);
192
}
193
 
194
/* Properties of nonrecycled chunks at the point they are malloced */
195
void __do_check_malloced_chunk(mchunkptr p, size_t s)
196
{
197
    /* same as recycled case ... */
198
    __do_check_remalloced_chunk(p, s);
199
 
200
    /*
201
       ... plus,  must obey implementation invariant that prev_inuse is
202
       always true of any allocated chunk; i.e., that each allocated
203
       chunk borders either a previously allocated and still in-use
204
       chunk, or the base of its memory arena. This is ensured
205
       by making all allocations from the the `lowest' part of any found
206
       chunk.  This does not necessarily hold however for chunks
207
       recycled via fastbins.
208
       */
209
 
210
    assert(prev_inuse(p));
211
}
212
 
213
 
214
/*
215
  Properties of malloc_state.
216
 
217
  This may be useful for debugging malloc, as well as detecting user
218
  programmer errors that somehow write into malloc_state.
219
 
220
  If you are extending or experimenting with this malloc, you can
221
  probably figure out how to hack this routine to print out or
222
  display chunk addresses, sizes, bins, and other instrumentation.
223
*/
224
void __do_check_malloc_state(void)
225
{
226
    mstate av = get_malloc_state();
227
    int i;
228
    mchunkptr p;
229
    mchunkptr q;
230
    mbinptr b;
231
    unsigned int binbit;
232
    int empty;
233
    unsigned int idx;
234
    size_t size;
235
    unsigned long  total = 0;
236
    int max_fast_bin;
237
 
238
    /* internal size_t must be no wider than pointer type */
239
    assert(sizeof(size_t) <= sizeof(char*));
240
 
241
    /* alignment is a power of 2 */
242
    assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
243
 
244
    /* cannot run remaining checks until fully initialized */
245
    if (av->top == 0 || av->top == initial_top(av))
246
        return;
247
 
248
    /* pagesize is a power of 2 */
249
    assert((av->pagesize & (av->pagesize-1)) == 0);
250
 
251
    /* properties of fastbins */
252
 
253
    /* max_fast is in allowed range */
254
    assert(get_max_fast(av) <= request2size(MAX_FAST_SIZE));
255
 
256
    max_fast_bin = fastbin_index(av->max_fast);
257
 
258
    for (i = 0; i < NFASTBINS; ++i) {
259
        p = av->fastbins[i];
260
 
261
        /* all bins past max_fast are empty */
262
        if (i > max_fast_bin)
263
            assert(p == 0);
264
 
265
        while (p != 0) {
266
            /* each chunk claims to be inuse */
267
            __do_check_inuse_chunk(p);
268
            total += chunksize(p);
269
            /* chunk belongs in this bin */
270
            assert(fastbin_index(chunksize(p)) == i);
271
            p = p->fd;
272
        }
273
    }
274
 
275
    if (total != 0)
276
        assert(have_fastchunks(av));
277
    else if (!have_fastchunks(av))
278
        assert(total == 0);
279
 
280
    /* check normal bins */
281
    for (i = 1; i < NBINS; ++i) {
282
        b = bin_at(av,i);
283
 
284
        /* binmap is accurate (except for bin 1 == unsorted_chunks) */
285
        if (i >= 2) {
286
            binbit = get_binmap(av,i);
287
            empty = last(b) == b;
288
            if (!binbit)
289
                assert(empty);
290
            else if (!empty)
291
                assert(binbit);
292
        }
293
 
294
        for (p = last(b); p != b; p = p->bk) {
295
            /* each chunk claims to be free */
296
            __do_check_free_chunk(p);
297
            size = chunksize(p);
298
            total += size;
299
            if (i >= 2) {
300
                /* chunk belongs in bin */
301
                idx = bin_index(size);
302
                assert(idx == i);
303
                /* lists are sorted */
304
                if ((unsigned long) size >= (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
305
                    assert(p->bk == b ||
306
                            (unsigned long)chunksize(p->bk) >=
307
                            (unsigned long)chunksize(p));
308
                }
309
            }
310
            /* chunk is followed by a legal chain of inuse chunks */
311
            for (q = next_chunk(p);
312
                    (q != av->top && inuse(q) &&
313
                     (unsigned long)(chunksize(q)) >= MINSIZE);
314
                    q = next_chunk(q))
315
                __do_check_inuse_chunk(q);
316
        }
317
    }
318
 
319
    /* top chunk is OK */
320
    __do_check_chunk(av->top);
321
 
322
    /* sanity checks for statistics */
323
 
324
    assert(total <= (unsigned long)(av->max_total_mem));
325
    assert(av->n_mmaps >= 0);
326
    assert(av->n_mmaps <= av->max_n_mmaps);
327
 
328
    assert((unsigned long)(av->sbrked_mem) <=
329
            (unsigned long)(av->max_sbrked_mem));
330
 
331
    assert((unsigned long)(av->mmapped_mem) <=
332
            (unsigned long)(av->max_mmapped_mem));
333
 
334
    assert((unsigned long)(av->max_total_mem) >=
335
            (unsigned long)(av->mmapped_mem) + (unsigned long)(av->sbrked_mem));
336
}
337
#endif
338
 
339
 
340
/* ----------- Routines dealing with system allocation -------------- */
341
 
342
/*
343
  sysmalloc handles malloc cases requiring more memory from the system.
344
  On entry, it is assumed that av->top does not have enough
345
  space to service request for nb bytes, thus requiring that av->top
346
  be extended or replaced.
347
*/
348
static void* __malloc_alloc(size_t nb, mstate av)
349
{
350
    mchunkptr       old_top;        /* incoming value of av->top */
351
    size_t old_size;       /* its size */
352
    char*           old_end;        /* its end address */
353
 
354
    long            size;           /* arg to first MORECORE or mmap call */
355
    char*           brk;            /* return value from MORECORE */
356
 
357
    long            correction;     /* arg to 2nd MORECORE call */
358
    char*           snd_brk;        /* 2nd return val */
359
 
360
    size_t front_misalign; /* unusable bytes at front of new space */
361
    size_t end_misalign;   /* partial page left at end of new space */
362
    char*           aligned_brk;    /* aligned offset into brk */
363
 
364
    mchunkptr       p;              /* the allocated/returned chunk */
365
    mchunkptr       remainder;      /* remainder from allocation */
366
    unsigned long    remainder_size; /* its size */
367
 
368
    unsigned long    sum;            /* for updating stats */
369
 
370
    size_t          pagemask  = av->pagesize - 1;
371
 
372
    /*
373
       If there is space available in fastbins, consolidate and retry
374
       malloc from scratch rather than getting memory from system.  This
375
       can occur only if nb is in smallbin range so we didn't consolidate
376
       upon entry to malloc. It is much easier to handle this case here
377
       than in malloc proper.
378
       */
379
 
380
    if (have_fastchunks(av)) {
381
        assert(in_smallbin_range(nb));
382
        __malloc_consolidate(av);
383
        return malloc(nb - MALLOC_ALIGN_MASK);
384
    }
385
 
386
 
387
    /*
388
       If have mmap, and the request size meets the mmap threshold, and
389
       the system supports mmap, and there are few enough currently
390
       allocated mmapped regions, try to directly map this request
391
       rather than expanding top.
392
       */
393
 
394
    if ((unsigned long)(nb) >= (unsigned long)(av->mmap_threshold) &&
395
            (av->n_mmaps < av->n_mmaps_max)) {
396
 
397
        char* mm;             /* return value from mmap call*/
398
 
399
        /*
400
           Round up size to nearest page.  For mmapped chunks, the overhead
401
           is one (sizeof(size_t)) unit larger than for normal chunks, because there
402
           is no following chunk whose prev_size field could be used.
403
           */
404
        size = (nb + (sizeof(size_t)) + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
405
 
406
        /* Don't try if size wraps around 0 */
407
        if ((unsigned long)(size) > (unsigned long)(nb)) {
408
 
409
            mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
410
 
411
            if (mm != (char*)(MORECORE_FAILURE)) {
412
 
413
                /*
414
                   The offset to the start of the mmapped region is stored
415
                   in the prev_size field of the chunk. This allows us to adjust
416
                   returned start address to meet alignment requirements here
417
                   and in memalign(), and still be able to compute proper
418
                   address argument for later munmap in free() and realloc().
419
                   */
420
 
421
                front_misalign = (size_t)chunk2mem(mm) & MALLOC_ALIGN_MASK;
422
                if (front_misalign > 0) {
423
                    correction = MALLOC_ALIGNMENT - front_misalign;
424
                    p = (mchunkptr)(mm + correction);
425
                    p->prev_size = correction;
426
                    set_head(p, (size - correction) |IS_MMAPPED);
427
                }
428
                else {
429
                    p = (mchunkptr)mm;
430
                    p->prev_size = 0;
431
                    set_head(p, size|IS_MMAPPED);
432
                }
433
 
434
                /* update statistics */
435
 
436
                if (++av->n_mmaps > av->max_n_mmaps)
437
                    av->max_n_mmaps = av->n_mmaps;
438
 
439
                sum = av->mmapped_mem += size;
440
                if (sum > (unsigned long)(av->max_mmapped_mem))
441
                    av->max_mmapped_mem = sum;
442
                sum += av->sbrked_mem;
443
                if (sum > (unsigned long)(av->max_total_mem))
444
                    av->max_total_mem = sum;
445
 
446
                check_chunk(p);
447
 
448
                return chunk2mem(p);
449
            }
450
        }
451
    }
452
 
453
    /* Record incoming configuration of top */
454
 
455
    old_top  = av->top;
456
    old_size = chunksize(old_top);
457
    old_end  = (char*)(chunk_at_offset(old_top, old_size));
458
 
459
    brk = snd_brk = (char*)(MORECORE_FAILURE);
460
 
461
    /* If not the first time through, we require old_size to
462
     * be at least MINSIZE and to have prev_inuse set.  */
463
 
464
    assert((old_top == initial_top(av) && old_size == 0) ||
465
            ((unsigned long) (old_size) >= MINSIZE &&
466
             prev_inuse(old_top)));
467
 
468
    /* Precondition: not enough current space to satisfy nb request */
469
    assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
470
 
471
    /* Precondition: all fastbins are consolidated */
472
    assert(!have_fastchunks(av));
473
 
474
 
475
    /* Request enough space for nb + pad + overhead */
476
 
477
    size = nb + av->top_pad + MINSIZE;
478
 
479
    /*
480
       If contiguous, we can subtract out existing space that we hope to
481
       combine with new space. We add it back later only if
482
       we don't actually get contiguous space.
483
       */
484
 
485
    if (contiguous(av))
486
        size -= old_size;
487
 
488
    /*
489
       Round to a multiple of page size.
490
       If MORECORE is not contiguous, this ensures that we only call it
491
       with whole-page arguments.  And if MORECORE is contiguous and
492
       this is not first time through, this preserves page-alignment of
493
       previous calls. Otherwise, we correct to page-align below.
494
       */
495
 
496
    size = (size + pagemask) & ~pagemask;
497
 
498
    /*
499
       Don't try to call MORECORE if argument is so big as to appear
500
       negative. Note that since mmap takes size_t arg, it may succeed
501
       below even if we cannot call MORECORE.
502
       */
503
 
504
    if (size > 0)
505
        brk = (char*)(MORECORE(size));
506
 
507
    /*
508
       If have mmap, try using it as a backup when MORECORE fails or
509
       cannot be used. This is worth doing on systems that have "holes" in
510
       address space, so sbrk cannot extend to give contiguous space, but
511
       space is available elsewhere.  Note that we ignore mmap max count
512
       and threshold limits, since the space will not be used as a
513
       segregated mmap region.
514
       */
515
 
516
    if (brk == (char*)(MORECORE_FAILURE)) {
517
 
518
        /* Cannot merge with old top, so add its size back in */
519
        if (contiguous(av))
520
            size = (size + old_size + pagemask) & ~pagemask;
521
 
522
        /* If we are relying on mmap as backup, then use larger units */
523
        if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
524
            size = MMAP_AS_MORECORE_SIZE;
525
 
526
        /* Don't try if size wraps around 0 */
527
        if ((unsigned long)(size) > (unsigned long)(nb)) {
528
 
529
            brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
530
 
531
            if (brk != (char*)(MORECORE_FAILURE)) {
532
 
533
                /* We do not need, and cannot use, another sbrk call to find end */
534
                snd_brk = brk + size;
535
 
536
                /* Record that we no longer have a contiguous sbrk region.
537
                   After the first time mmap is used as backup, we do not
538
                   ever rely on contiguous space since this could incorrectly
539
                   bridge regions.
540
                   */
541
                set_noncontiguous(av);
542
            }
543
        }
544
    }
545
 
546
    if (brk != (char*)(MORECORE_FAILURE)) {
547
        av->sbrked_mem += size;
548
 
549
        /*
550
           If MORECORE extends previous space, we can likewise extend top size.
551
           */
552
 
553
        if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
554
            set_head(old_top, (size + old_size) | PREV_INUSE);
555
        }
556
 
557
        /*
558
           Otherwise, make adjustments:
559
 
560
         * If the first time through or noncontiguous, we need to call sbrk
561
         just to find out where the end of memory lies.
562
 
563
         * We need to ensure that all returned chunks from malloc will meet
564
         MALLOC_ALIGNMENT
565
 
566
         * If there was an intervening foreign sbrk, we need to adjust sbrk
567
         request size to account for fact that we will not be able to
568
         combine new space with existing space in old_top.
569
 
570
         * Almost all systems internally allocate whole pages at a time, in
571
         which case we might as well use the whole last page of request.
572
         So we allocate enough more memory to hit a page boundary now,
573
         which in turn causes future contiguous calls to page-align.
574
         */
575
 
576
        else {
577
            front_misalign = 0;
578
            end_misalign = 0;
579
            correction = 0;
580
            aligned_brk = brk;
581
 
582
            /*
583
               If MORECORE returns an address lower than we have seen before,
584
               we know it isn't really contiguous.  This and some subsequent
585
               checks help cope with non-conforming MORECORE functions and
586
               the presence of "foreign" calls to MORECORE from outside of
587
               malloc or by other threads.  We cannot guarantee to detect
588
               these in all cases, but cope with the ones we do detect.
589
               */
590
            if (contiguous(av) && old_size != 0 && brk < old_end) {
591
                set_noncontiguous(av);
592
            }
593
 
594
            /* handle contiguous cases */
595
            if (contiguous(av)) {
596
 
597
                /* We can tolerate forward non-contiguities here (usually due
598
                   to foreign calls) but treat them as part of our space for
599
                   stats reporting.  */
600
                if (old_size != 0)
601
                    av->sbrked_mem += brk - old_end;
602
 
603
                /* Guarantee alignment of first new chunk made from this space */
604
 
605
                front_misalign = (size_t)chunk2mem(brk) & MALLOC_ALIGN_MASK;
606
                if (front_misalign > 0) {
607
 
608
                    /*
609
                       Skip over some bytes to arrive at an aligned position.
610
                       We don't need to specially mark these wasted front bytes.
611
                       They will never be accessed anyway because
612
                       prev_inuse of av->top (and any chunk created from its start)
613
                       is always true after initialization.
614
                       */
615
 
616
                    correction = MALLOC_ALIGNMENT - front_misalign;
617
                    aligned_brk += correction;
618
                }
619
 
620
                /*
621
                   If this isn't adjacent to existing space, then we will not
622
                   be able to merge with old_top space, so must add to 2nd request.
623
                   */
624
 
625
                correction += old_size;
626
 
627
                /* Extend the end address to hit a page boundary */
628
                end_misalign = (size_t)(brk + size + correction);
629
                correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
630
 
631
                assert(correction >= 0);
632
                snd_brk = (char*)(MORECORE(correction));
633
 
634
                if (snd_brk == (char*)(MORECORE_FAILURE)) {
635
                    /*
636
                       If can't allocate correction, try to at least find out current
637
                       brk.  It might be enough to proceed without failing.
638
                       */
639
                    correction = 0;
640
                    snd_brk = (char*)(MORECORE(0));
641
                }
642
                else if (snd_brk < brk) {
643
                    /*
644
                       If the second call gives noncontiguous space even though
645
                       it says it won't, the only course of action is to ignore
646
                       results of second call, and conservatively estimate where
647
                       the first call left us. Also set noncontiguous, so this
648
                       won't happen again, leaving at most one hole.
649
 
650
                       Note that this check is intrinsically incomplete.  Because
651
                       MORECORE is allowed to give more space than we ask for,
652
                       there is no reliable way to detect a noncontiguity
653
                       producing a forward gap for the second call.
654
                       */
655
                    snd_brk = brk + size;
656
                    correction = 0;
657
                    set_noncontiguous(av);
658
                }
659
 
660
            }
661
 
662
            /* handle non-contiguous cases */
663
            else {
664
                /* MORECORE/mmap must correctly align */
665
                assert(aligned_OK(chunk2mem(brk)));
666
 
667
                /* Find out current end of memory */
668
                if (snd_brk == (char*)(MORECORE_FAILURE)) {
669
                    snd_brk = (char*)(MORECORE(0));
670
                    av->sbrked_mem += snd_brk - brk - size;
671
                }
672
            }
673
 
674
            /* Adjust top based on results of second sbrk */
675
            if (snd_brk != (char*)(MORECORE_FAILURE)) {
676
                av->top = (mchunkptr)aligned_brk;
677
                set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
678
                av->sbrked_mem += correction;
679
 
680
                /*
681
                   If not the first time through, we either have a
682
                   gap due to foreign sbrk or a non-contiguous region.  Insert a
683
                   double fencepost at old_top to prevent consolidation with space
684
                   we don't own. These fenceposts are artificial chunks that are
685
                   marked as inuse and are in any case too small to use.  We need
686
                   two to make sizes and alignments work out.
687
                   */
688
 
689
                if (old_size != 0) {
690
                    /* Shrink old_top to insert fenceposts, keeping size a
691
                       multiple of MALLOC_ALIGNMENT. We know there is at least
692
                       enough space in old_top to do this.
693
                       */
694
                    old_size = (old_size - 3*(sizeof(size_t))) & ~MALLOC_ALIGN_MASK;
695
                    set_head(old_top, old_size | PREV_INUSE);
696
 
697
                    /*
698
                       Note that the following assignments completely overwrite
699
                       old_top when old_size was previously MINSIZE.  This is
700
                       intentional. We need the fencepost, even if old_top otherwise gets
701
                       lost.
702
                       */
703
                    chunk_at_offset(old_top, old_size          )->size =
704
                        (sizeof(size_t))|PREV_INUSE;
705
 
706
                    chunk_at_offset(old_top, old_size + (sizeof(size_t)))->size =
707
                        (sizeof(size_t))|PREV_INUSE;
708
 
709
                    /* If possible, release the rest, suppressing trimming.  */
710
                    if (old_size >= MINSIZE) {
711
                        size_t tt = av->trim_threshold;
712
                        av->trim_threshold = (size_t)(-1);
713
                        free(chunk2mem(old_top));
714
                        av->trim_threshold = tt;
715
                    }
716
                }
717
            }
718
        }
719
 
720
        /* Update statistics */
721
        sum = av->sbrked_mem;
722
        if (sum > (unsigned long)(av->max_sbrked_mem))
723
            av->max_sbrked_mem = sum;
724
 
725
        sum += av->mmapped_mem;
726
        if (sum > (unsigned long)(av->max_total_mem))
727
            av->max_total_mem = sum;
728
 
729
        check_malloc_state();
730
 
731
        /* finally, do the allocation */
732
 
733
        p = av->top;
734
        size = chunksize(p);
735
 
736
        /* check that one of the above allocation paths succeeded */
737
        if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
738
            remainder_size = size - nb;
739
            remainder = chunk_at_offset(p, nb);
740
            av->top = remainder;
741
            set_head(p, nb | PREV_INUSE);
742
            set_head(remainder, remainder_size | PREV_INUSE);
743
            check_malloced_chunk(p, nb);
744
            return chunk2mem(p);
745
        }
746
 
747
    }
748
 
749
    /* catch all failure paths */
750
    errno = ENOMEM;
751
    return 0;
752
}
753
 
754
 
755
/*
756
  Compute index for size. We expect this to be inlined when
757
  compiled with optimization, else not, which works out well.
758
*/
759
static int __malloc_largebin_index(unsigned int sz)
760
{
761
    unsigned int  x = sz >> SMALLBIN_WIDTH;
762
    unsigned int m;            /* bit position of highest set bit of m */
763
 
764
    if (x >= 0x10000) return NBINS-1;
765
 
766
    /* On intel, use BSRL instruction to find highest bit */
767
#if defined(__GNUC__) && defined(i386)
768
 
769
    __asm__("bsrl %1,%0\n\t"
770
            : "=r" (m)
771
            : "g"  (x));
772
 
773
#else
774
    {
775
        /*
776
           Based on branch-free nlz algorithm in chapter 5 of Henry
777
           S. Warren Jr's book "Hacker's Delight".
778
           */
779
 
780
        unsigned int n = ((x - 0x100) >> 16) & 8;
781
        x <<= n;
782
        m = ((x - 0x1000) >> 16) & 4;
783
        n += m;
784
        x <<= m;
785
        m = ((x - 0x4000) >> 16) & 2;
786
        n += m;
787
        x = (x << m) >> 14;
788
        m = 13 - n + (x & ~(x>>1));
789
    }
790
#endif
791
 
792
    /* Use next 2 bits to create finer-granularity bins */
793
    return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3);
794
}
795
 
796
 
797
 
798
/* ----------------------------------------------------------------------
799
 *
800
 * PUBLIC STUFF
801
 *
802
 * ----------------------------------------------------------------------*/
803
 
804
 
805
/* ------------------------------ malloc ------------------------------ */
806
void* malloc(size_t bytes)
807
{
808
    mstate av;
809
 
810
    size_t nb;               /* normalized request size */
811
    unsigned int    idx;              /* associated bin index */
812
    mbinptr         bin;              /* associated bin */
813
    mfastbinptr*    fb;               /* associated fastbin */
814
 
815
    mchunkptr       victim;           /* inspected/selected chunk */
816
    size_t size;             /* its size */
817
    int             victim_index;     /* its bin index */
818
 
819
    mchunkptr       remainder;        /* remainder from a split */
820
    unsigned long    remainder_size;   /* its size */
821
 
822
    unsigned int    block;            /* bit map traverser */
823
    unsigned int    bit;              /* bit map traverser */
824
    unsigned int    map;              /* current word of binmap */
825
 
826
    mchunkptr       fwd;              /* misc temp for linking */
827
    mchunkptr       bck;              /* misc temp for linking */
828
    void *          sysmem;
829
 
830
    LOCK;
831
    av = get_malloc_state();
832
    /*
833
       Convert request size to internal form by adding (sizeof(size_t)) bytes
834
       overhead plus possibly more to obtain necessary alignment and/or
835
       to obtain a size of at least MINSIZE, the smallest allocatable
836
       size. Also, checked_request2size traps (returning 0) request sizes
837
       that are so large that they wrap around zero when padded and
838
       aligned.
839
       */
840
 
841
    checked_request2size(bytes, nb);
842
 
843
    /*
844
       Bypass search if no frees yet
845
       */
846
    if (!have_anychunks(av)) {
847
        if (av->max_fast == 0) /* initialization check */
848
            __malloc_consolidate(av);
849
        goto use_top;
850
    }
851
 
852
    /*
853
       If the size qualifies as a fastbin, first check corresponding bin.
854
       */
855
 
856
    if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) {
857
        fb = &(av->fastbins[(fastbin_index(nb))]);
858
        if ( (victim = *fb) != 0) {
859
            *fb = victim->fd;
860
            check_remalloced_chunk(victim, nb);
861
            UNLOCK;
862
            return chunk2mem(victim);
863
        }
864
    }
865
 
866
    /*
867
       If a small request, check regular bin.  Since these "smallbins"
868
       hold one size each, no searching within bins is necessary.
869
       (For a large request, we need to wait until unsorted chunks are
870
       processed to find best fit. But for small ones, fits are exact
871
       anyway, so we can check now, which is faster.)
872
       */
873
 
874
    if (in_smallbin_range(nb)) {
875
        idx = smallbin_index(nb);
876
        bin = bin_at(av,idx);
877
 
878
        if ( (victim = last(bin)) != bin) {
879
            bck = victim->bk;
880
            set_inuse_bit_at_offset(victim, nb);
881
            bin->bk = bck;
882
            bck->fd = bin;
883
 
884
            check_malloced_chunk(victim, nb);
885
            UNLOCK;
886
            return chunk2mem(victim);
887
        }
888
    }
889
 
890
    /* If this is a large request, consolidate fastbins before continuing.
891
       While it might look excessive to kill all fastbins before
892
       even seeing if there is space available, this avoids
893
       fragmentation problems normally associated with fastbins.
894
       Also, in practice, programs tend to have runs of either small or
895
       large requests, but less often mixtures, so consolidation is not
896
       invoked all that often in most programs. And the programs that
897
       it is called frequently in otherwise tend to fragment.
898
       */
899
 
900
    else {
901
        idx = __malloc_largebin_index(nb);
902
        if (have_fastchunks(av))
903
            __malloc_consolidate(av);
904
    }
905
 
906
    /*
907
       Process recently freed or remaindered chunks, taking one only if
908
       it is exact fit, or, if this a small request, the chunk is remainder from
909
       the most recent non-exact fit.  Place other traversed chunks in
910
       bins.  Note that this step is the only place in any routine where
911
       chunks are placed in bins.
912
       */
913
 
914
    while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
915
        bck = victim->bk;
916
        size = chunksize(victim);
917
 
918
        /* If a small request, try to use last remainder if it is the
919
           only chunk in unsorted bin.  This helps promote locality for
920
           runs of consecutive small requests. This is the only
921
           exception to best-fit, and applies only when there is
922
           no exact fit for a small chunk.
923
           */
924
 
925
        if (in_smallbin_range(nb) &&
926
                bck == unsorted_chunks(av) &&
927
                victim == av->last_remainder &&
928
                (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
929
 
930
            /* split and reattach remainder */
931
            remainder_size = size - nb;
932
            remainder = chunk_at_offset(victim, nb);
933
            unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
934
            av->last_remainder = remainder;
935
            remainder->bk = remainder->fd = unsorted_chunks(av);
936
 
937
            set_head(victim, nb | PREV_INUSE);
938
            set_head(remainder, remainder_size | PREV_INUSE);
939
            set_foot(remainder, remainder_size);
940
 
941
            check_malloced_chunk(victim, nb);
942
            UNLOCK;
943
            return chunk2mem(victim);
944
        }
945
 
946
        /* remove from unsorted list */
947
        unsorted_chunks(av)->bk = bck;
948
        bck->fd = unsorted_chunks(av);
949
 
950
        /* Take now instead of binning if exact fit */
951
 
952
        if (size == nb) {
953
            set_inuse_bit_at_offset(victim, size);
954
            check_malloced_chunk(victim, nb);
955
            UNLOCK;
956
            return chunk2mem(victim);
957
        }
958
 
959
        /* place chunk in bin */
960
 
961
        if (in_smallbin_range(size)) {
962
            victim_index = smallbin_index(size);
963
            bck = bin_at(av, victim_index);
964
            fwd = bck->fd;
965
        }
966
        else {
967
            victim_index = __malloc_largebin_index(size);
968
            bck = bin_at(av, victim_index);
969
            fwd = bck->fd;
970
 
971
            if (fwd != bck) {
972
                /* if smaller than smallest, place first */
973
                if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
974
                    fwd = bck;
975
                    bck = bck->bk;
976
                }
977
                else if ((unsigned long)(size) >=
978
                        (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
979
 
980
                    /* maintain large bins in sorted order */
981
                    size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
982
                    while ((unsigned long)(size) < (unsigned long)(fwd->size))
983
                        fwd = fwd->fd;
984
                    bck = fwd->bk;
985
                }
986
            }
987
        }
988
 
989
        mark_bin(av, victim_index);
990
        victim->bk = bck;
991
        victim->fd = fwd;
992
        fwd->bk = victim;
993
        bck->fd = victim;
994
    }
995
 
996
    /*
997
       If a large request, scan through the chunks of current bin to
998
       find one that fits.  (This will be the smallest that fits unless
999
       FIRST_SORTED_BIN_SIZE has been changed from default.)  This is
1000
       the only step where an unbounded number of chunks might be
1001
       scanned without doing anything useful with them. However the
1002
       lists tend to be short.
1003
       */
1004
 
1005
    if (!in_smallbin_range(nb)) {
1006
        bin = bin_at(av, idx);
1007
 
1008
        for (victim = last(bin); victim != bin; victim = victim->bk) {
1009
            size = chunksize(victim);
1010
 
1011
            if ((unsigned long)(size) >= (unsigned long)(nb)) {
1012
                remainder_size = size - nb;
1013
                unlink(victim, bck, fwd);
1014
 
1015
                /* Exhaust */
1016
                if (remainder_size < MINSIZE)  {
1017
                    set_inuse_bit_at_offset(victim, size);
1018
                    check_malloced_chunk(victim, nb);
1019
                    UNLOCK;
1020
                    return chunk2mem(victim);
1021
                }
1022
                /* Split */
1023
                else {
1024
                    remainder = chunk_at_offset(victim, nb);
1025
                    unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
1026
                    remainder->bk = remainder->fd = unsorted_chunks(av);
1027
                    set_head(victim, nb | PREV_INUSE);
1028
                    set_head(remainder, remainder_size | PREV_INUSE);
1029
                    set_foot(remainder, remainder_size);
1030
                    check_malloced_chunk(victim, nb);
1031
                    UNLOCK;
1032
                    return chunk2mem(victim);
1033
                }
1034
            }
1035
        }
1036
    }
1037
 
1038
    /*
1039
       Search for a chunk by scanning bins, starting with next largest
1040
       bin. This search is strictly by best-fit; i.e., the smallest
1041
       (with ties going to approximately the least recently used) chunk
1042
       that fits is selected.
1043
 
1044
       The bitmap avoids needing to check that most blocks are nonempty.
1045
       */
1046
 
1047
    ++idx;
1048
    bin = bin_at(av,idx);
1049
    block = idx2block(idx);
1050
    map = av->binmap[block];
1051
    bit = idx2bit(idx);
1052
 
1053
    for (;;) {
1054
 
1055
        /* Skip rest of block if there are no more set bits in this block.  */
1056
        if (bit > map || bit == 0) {
1057
            do {
1058
                if (++block >= BINMAPSIZE)  /* out of bins */
1059
                    goto use_top;
1060
            } while ( (map = av->binmap[block]) == 0);
1061
 
1062
            bin = bin_at(av, (block << BINMAPSHIFT));
1063
            bit = 1;
1064
        }
1065
 
1066
        /* Advance to bin with set bit. There must be one. */
1067
        while ((bit & map) == 0) {
1068
            bin = next_bin(bin);
1069
            bit <<= 1;
1070
            assert(bit != 0);
1071
        }
1072
 
1073
        /* Inspect the bin. It is likely to be non-empty */
1074
        victim = last(bin);
1075
 
1076
        /*  If a false alarm (empty bin), clear the bit. */
1077
        if (victim == bin) {
1078
            av->binmap[block] = map &= ~bit; /* Write through */
1079
            bin = next_bin(bin);
1080
            bit <<= 1;
1081
        }
1082
 
1083
        else {
1084
            size = chunksize(victim);
1085
 
1086
            /*  We know the first chunk in this bin is big enough to use. */
1087
            assert((unsigned long)(size) >= (unsigned long)(nb));
1088
 
1089
            remainder_size = size - nb;
1090
 
1091
            /* unlink */
1092
            bck = victim->bk;
1093
            bin->bk = bck;
1094
            bck->fd = bin;
1095
 
1096
            /* Exhaust */
1097
            if (remainder_size < MINSIZE) {
1098
                set_inuse_bit_at_offset(victim, size);
1099
                check_malloced_chunk(victim, nb);
1100
                UNLOCK;
1101
                return chunk2mem(victim);
1102
            }
1103
 
1104
            /* Split */
1105
            else {
1106
                remainder = chunk_at_offset(victim, nb);
1107
 
1108
                unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
1109
                remainder->bk = remainder->fd = unsorted_chunks(av);
1110
                /* advertise as last remainder */
1111
                if (in_smallbin_range(nb))
1112
                    av->last_remainder = remainder;
1113
 
1114
                set_head(victim, nb | PREV_INUSE);
1115
                set_head(remainder, remainder_size | PREV_INUSE);
1116
                set_foot(remainder, remainder_size);
1117
                check_malloced_chunk(victim, nb);
1118
                UNLOCK;
1119
                return chunk2mem(victim);
1120
            }
1121
        }
1122
    }
1123
 
1124
use_top:
1125
    /*
1126
       If large enough, split off the chunk bordering the end of memory
1127
       (held in av->top). Note that this is in accord with the best-fit
1128
       search rule.  In effect, av->top is treated as larger (and thus
1129
       less well fitting) than any other available chunk since it can
1130
       be extended to be as large as necessary (up to system
1131
       limitations).
1132
 
1133
       We require that av->top always exists (i.e., has size >=
1134
       MINSIZE) after initialization, so if it would otherwise be
1135
       exhuasted by current request, it is replenished. (The main
1136
       reason for ensuring it exists is that we may need MINSIZE space
1137
       to put in fenceposts in sysmalloc.)
1138
       */
1139
 
1140
    victim = av->top;
1141
    size = chunksize(victim);
1142
 
1143
    if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
1144
        remainder_size = size - nb;
1145
        remainder = chunk_at_offset(victim, nb);
1146
        av->top = remainder;
1147
        set_head(victim, nb | PREV_INUSE);
1148
        set_head(remainder, remainder_size | PREV_INUSE);
1149
 
1150
        check_malloced_chunk(victim, nb);
1151
        UNLOCK;
1152
        return chunk2mem(victim);
1153
    }
1154
 
1155
    /* If no space in top, relay to handle system-dependent cases */
1156
    sysmem = __malloc_alloc(nb, av);
1157
    UNLOCK;
1158
    return sysmem;
1159
}
1160
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.