OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [uClibc/] [libc/] [stdlib/] [malloc-standard/] [free.c] - Blame information for rev 1771

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1325 phoenix
/*
2
  This is a version (aka dlmalloc) of malloc/free/realloc written by
3
  Doug Lea and released to the public domain.  Use, modify, and
4
  redistribute this code without permission or acknowledgement in any
5
  way you wish.  Send questions, comments, complaints, performance
6
  data, etc to dl@cs.oswego.edu
7
 
8
  VERSION 2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
9
 
10
  Note: There may be an updated version of this malloc obtainable at
11
           ftp://gee.cs.oswego.edu/pub/misc/malloc.c
12
  Check before installing!
13
 
14
  Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
15
*/
16
 
17
#include "malloc.h"
18
 
19
 
20
/* ------------------------- __malloc_trim -------------------------
21
   __malloc_trim is an inverse of sorts to __malloc_alloc.  It gives memory
22
   back to the system (via negative arguments to sbrk) if there is unused
23
   memory at the `high' end of the malloc pool. It is called automatically by
24
   free() when top space exceeds the trim threshold. It is also called by the
25
   public malloc_trim routine.  It returns 1 if it actually released any
26
   memory, else 0.
27
*/
28
static int __malloc_trim(size_t pad, mstate av)
29
{
30
    long  top_size;        /* Amount of top-most memory */
31
    long  extra;           /* Amount to release */
32
    long  released;        /* Amount actually released */
33
    char* current_brk;     /* address returned by pre-check sbrk call */
34
    char* new_brk;         /* address returned by post-check sbrk call */
35
    size_t pagesz;
36
 
37
    pagesz = av->pagesize;
38
    top_size = chunksize(av->top);
39
 
40
    /* Release in pagesize units, keeping at least one page */
41
    extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
42
 
43
    if (extra > 0) {
44
 
45
        /*
46
           Only proceed if end of memory is where we last set it.
47
           This avoids problems if there were foreign sbrk calls.
48
           */
49
        current_brk = (char*)(MORECORE(0));
50
        if (current_brk == (char*)(av->top) + top_size) {
51
 
52
            /*
53
               Attempt to release memory. We ignore MORECORE return value,
54
               and instead call again to find out where new end of memory is.
55
               This avoids problems if first call releases less than we asked,
56
               of if failure somehow altered brk value. (We could still
57
               encounter problems if it altered brk in some very bad way,
58
               but the only thing we can do is adjust anyway, which will cause
59
               some downstream failure.)
60
               */
61
 
62
            MORECORE(-extra);
63
            new_brk = (char*)(MORECORE(0));
64
 
65
            if (new_brk != (char*)MORECORE_FAILURE) {
66
                released = (long)(current_brk - new_brk);
67
 
68
                if (released != 0) {
69
                    /* Success. Adjust top. */
70
                    av->sbrked_mem -= released;
71
                    set_head(av->top, (top_size - released) | PREV_INUSE);
72
                    check_malloc_state();
73
                    return 1;
74
                }
75
            }
76
        }
77
    }
78
    return 0;
79
}
80
 
81
/*
82
  Initialize a malloc_state struct.
83
 
84
  This is called only from within __malloc_consolidate, which needs
85
  be called in the same contexts anyway.  It is never called directly
86
  outside of __malloc_consolidate because some optimizing compilers try
87
  to inline it at all call points, which turns out not to be an
88
  optimization at all. (Inlining it in __malloc_consolidate is fine though.)
89
*/
90
static void malloc_init_state(mstate av)
91
{
92
    int     i;
93
    mbinptr bin;
94
 
95
    /* Establish circular links for normal bins */
96
    for (i = 1; i < NBINS; ++i) {
97
        bin = bin_at(av,i);
98
        bin->fd = bin->bk = bin;
99
    }
100
 
101
    av->top_pad        = DEFAULT_TOP_PAD;
102
    av->n_mmaps_max    = DEFAULT_MMAP_MAX;
103
    av->mmap_threshold = DEFAULT_MMAP_THRESHOLD;
104
    av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
105
 
106
#if MORECORE_CONTIGUOUS
107
    set_contiguous(av);
108
#else
109
    set_noncontiguous(av);
110
#endif
111
 
112
 
113
    set_max_fast(av, DEFAULT_MXFAST);
114
 
115
    av->top            = initial_top(av);
116
    av->pagesize       = malloc_getpagesize;
117
}
118
 
119
 
120
/* ----------------------------------------------------------------------
121
 *
122
 * PUBLIC STUFF
123
 *
124
 * ----------------------------------------------------------------------*/
125
 
126
 
127
/* ------------------------- __malloc_consolidate -------------------------
128
 
129
  __malloc_consolidate is a specialized version of free() that tears
130
  down chunks held in fastbins.  Free itself cannot be used for this
131
  purpose since, among other things, it might place chunks back onto
132
  fastbins.  So, instead, we need to use a minor variant of the same
133
  code.
134
 
135
  Also, because this routine needs to be called the first time through
136
  malloc anyway, it turns out to be the perfect place to trigger
137
  initialization code.
138
*/
139
void __malloc_consolidate(mstate av)
140
{
141
    mfastbinptr*    fb;                 /* current fastbin being consolidated */
142
    mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
143
    mchunkptr       p;                  /* current chunk being consolidated */
144
    mchunkptr       nextp;              /* next chunk to consolidate */
145
    mchunkptr       unsorted_bin;       /* bin header */
146
    mchunkptr       first_unsorted;     /* chunk to link to */
147
 
148
    /* These have same use as in free() */
149
    mchunkptr       nextchunk;
150
    size_t size;
151
    size_t nextsize;
152
    size_t prevsize;
153
    int             nextinuse;
154
    mchunkptr       bck;
155
    mchunkptr       fwd;
156
 
157
    /*
158
       If max_fast is 0, we know that av hasn't
159
       yet been initialized, in which case do so below
160
       */
161
 
162
    if (av->max_fast != 0) {
163
        clear_fastchunks(av);
164
 
165
        unsorted_bin = unsorted_chunks(av);
166
 
167
        /*
168
           Remove each chunk from fast bin and consolidate it, placing it
169
           then in unsorted bin. Among other reasons for doing this,
170
           placing in unsorted bin avoids needing to calculate actual bins
171
           until malloc is sure that chunks aren't immediately going to be
172
           reused anyway.
173
           */
174
 
175
        maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
176
        fb = &(av->fastbins[0]);
177
        do {
178
            if ( (p = *fb) != 0) {
179
                *fb = 0;
180
 
181
                do {
182
                    check_inuse_chunk(p);
183
                    nextp = p->fd;
184
 
185
                    /* Slightly streamlined version of consolidation code in free() */
186
                    size = p->size & ~PREV_INUSE;
187
                    nextchunk = chunk_at_offset(p, size);
188
                    nextsize = chunksize(nextchunk);
189
 
190
                    if (!prev_inuse(p)) {
191
                        prevsize = p->prev_size;
192
                        size += prevsize;
193
                        p = chunk_at_offset(p, -((long) prevsize));
194
                        unlink(p, bck, fwd);
195
                    }
196
 
197
                    if (nextchunk != av->top) {
198
                        nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
199
                        set_head(nextchunk, nextsize);
200
 
201
                        if (!nextinuse) {
202
                            size += nextsize;
203
                            unlink(nextchunk, bck, fwd);
204
                        }
205
 
206
                        first_unsorted = unsorted_bin->fd;
207
                        unsorted_bin->fd = p;
208
                        first_unsorted->bk = p;
209
 
210
                        set_head(p, size | PREV_INUSE);
211
                        p->bk = unsorted_bin;
212
                        p->fd = first_unsorted;
213
                        set_foot(p, size);
214
                    }
215
 
216
                    else {
217
                        size += nextsize;
218
                        set_head(p, size | PREV_INUSE);
219
                        av->top = p;
220
                    }
221
 
222
                } while ( (p = nextp) != 0);
223
 
224
            }
225
        } while (fb++ != maxfb);
226
    }
227
    else {
228
        malloc_init_state(av);
229
        check_malloc_state();
230
    }
231
}
232
 
233
 
234
/* ------------------------------ free ------------------------------ */
235
void free(void* mem)
236
{
237
    mstate av;
238
 
239
    mchunkptr       p;           /* chunk corresponding to mem */
240
    size_t size;        /* its size */
241
    mfastbinptr*    fb;          /* associated fastbin */
242
    mchunkptr       nextchunk;   /* next contiguous chunk */
243
    size_t nextsize;    /* its size */
244
    int             nextinuse;   /* true if nextchunk is used */
245
    size_t prevsize;    /* size of previous contiguous chunk */
246
    mchunkptr       bck;         /* misc temp for linking */
247
    mchunkptr       fwd;         /* misc temp for linking */
248
 
249
    /* free(0) has no effect */
250
    if (mem == NULL)
251
        return;
252
 
253
    LOCK;
254
    av = get_malloc_state();
255
    p = mem2chunk(mem);
256
    size = chunksize(p);
257
 
258
    check_inuse_chunk(p);
259
 
260
    /*
261
       If eligible, place chunk on a fastbin so it can be found
262
       and used quickly in malloc.
263
       */
264
 
265
    if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
266
 
267
#if TRIM_FASTBINS
268
            /* If TRIM_FASTBINS set, don't place chunks
269
               bordering top into fastbins */
270
            && (chunk_at_offset(p, size) != av->top)
271
#endif
272
       ) {
273
 
274
        set_fastchunks(av);
275
        fb = &(av->fastbins[fastbin_index(size)]);
276
        p->fd = *fb;
277
        *fb = p;
278
    }
279
 
280
    /*
281
       Consolidate other non-mmapped chunks as they arrive.
282
       */
283
 
284
    else if (!chunk_is_mmapped(p)) {
285
        set_anychunks(av);
286
 
287
        nextchunk = chunk_at_offset(p, size);
288
        nextsize = chunksize(nextchunk);
289
 
290
        /* consolidate backward */
291
        if (!prev_inuse(p)) {
292
            prevsize = p->prev_size;
293
            size += prevsize;
294
            p = chunk_at_offset(p, -((long) prevsize));
295
            unlink(p, bck, fwd);
296
        }
297
 
298
        if (nextchunk != av->top) {
299
            /* get and clear inuse bit */
300
            nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
301
            set_head(nextchunk, nextsize);
302
 
303
            /* consolidate forward */
304
            if (!nextinuse) {
305
                unlink(nextchunk, bck, fwd);
306
                size += nextsize;
307
            }
308
 
309
            /*
310
               Place the chunk in unsorted chunk list. Chunks are
311
               not placed into regular bins until after they have
312
               been given one chance to be used in malloc.
313
               */
314
 
315
            bck = unsorted_chunks(av);
316
            fwd = bck->fd;
317
            p->bk = bck;
318
            p->fd = fwd;
319
            bck->fd = p;
320
            fwd->bk = p;
321
 
322
            set_head(p, size | PREV_INUSE);
323
            set_foot(p, size);
324
 
325
            check_free_chunk(p);
326
        }
327
 
328
        /*
329
           If the chunk borders the current high end of memory,
330
           consolidate into top
331
           */
332
 
333
        else {
334
            size += nextsize;
335
            set_head(p, size | PREV_INUSE);
336
            av->top = p;
337
            check_chunk(p);
338
        }
339
 
340
        /*
341
           If freeing a large space, consolidate possibly-surrounding
342
           chunks. Then, if the total unused topmost memory exceeds trim
343
           threshold, ask malloc_trim to reduce top.
344
 
345
           Unless max_fast is 0, we don't know if there are fastbins
346
           bordering top, so we cannot tell for sure whether threshold
347
           has been reached unless fastbins are consolidated.  But we
348
           don't want to consolidate on each free.  As a compromise,
349
           consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
350
           is reached.
351
           */
352
 
353
        if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
354
            if (have_fastchunks(av))
355
                __malloc_consolidate(av);
356
 
357
            if ((unsigned long)(chunksize(av->top)) >=
358
                    (unsigned long)(av->trim_threshold))
359
                __malloc_trim(av->top_pad, av);
360
        }
361
 
362
    }
363
    /*
364
       If the chunk was allocated via mmap, release via munmap()
365
       Note that if HAVE_MMAP is false but chunk_is_mmapped is
366
       true, then user must have overwritten memory. There's nothing
367
       we can do to catch this error unless DEBUG is set, in which case
368
       check_inuse_chunk (above) will have triggered error.
369
       */
370
 
371
    else {
372
        int ret;
373
        size_t offset = p->prev_size;
374
        av->n_mmaps--;
375
        av->mmapped_mem -= (size + offset);
376
        ret = munmap((char*)p - offset, size + offset);
377
        /* munmap returns non-zero on failure */
378
        assert(ret == 0);
379
    }
380
    UNLOCK;
381
}
382
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.