OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [uClibc/] [libc/] [stdlib/] [malloc/] [free.c] - Blame information for rev 1325

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1325 phoenix
/*
2
 * libc/stdlib/malloc/free.c -- free function
3
 *
4
 *  Copyright (C) 2002,03  NEC Electronics Corporation
5
 *  Copyright (C) 2002,03  Miles Bader <miles@gnu.org>
6
 *
7
 * This file is subject to the terms and conditions of the GNU Lesser
8
 * General Public License.  See the file COPYING.LIB in the main
9
 * directory of this archive for more details.
10
 *
11
 * Written by Miles Bader <miles@gnu.org>
12
 */
13
 
14
#include <stdlib.h>
15
#include <unistd.h>
16
#include <sys/mman.h>
17
 
18
#include "malloc.h"
19
#include "heap.h"
20
 
21
 
22
static void
23
free_to_heap (void *mem, struct heap *heap)
24
{
25
  size_t size;
26
  struct heap_free_area *fa;
27
 
28
  /* Check for special cases.  */
29
  if (unlikely (! mem))
30
    return;
31
 
32
  /* Normal free.  */
33
 
34
  MALLOC_DEBUG (1, "free: 0x%lx (base = 0x%lx, total_size = %d)",
35
                (long)mem, (long)MALLOC_BASE (mem), MALLOC_SIZE (mem));
36
 
37
  size = MALLOC_SIZE (mem);
38
  mem = MALLOC_BASE (mem);
39
 
40
  __heap_lock (heap);
41
 
42
  /* Put MEM back in the heap, and get the free-area it was placed in.  */
43
  fa = __heap_free (heap, mem, size);
44
 
45
  /* See if the free-area FA has grown big enough that it should be
46
     unmapped.  */
47
  if (HEAP_FREE_AREA_SIZE (fa) < MALLOC_UNMAP_THRESHOLD)
48
    /* Nope, nothing left to do, just release the lock.  */
49
    __heap_unlock (heap);
50
  else
51
    /* Yup, try to unmap FA.  */
52
    {
53
      unsigned long start = (unsigned long)HEAP_FREE_AREA_START (fa);
54
      unsigned long end = (unsigned long)HEAP_FREE_AREA_END (fa);
55
#ifndef MALLOC_USE_SBRK
56
# ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
57
      struct malloc_mmb *mmb, *prev_mmb;
58
      unsigned long mmb_start, mmb_end;
59
# else /* !__UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
60
      unsigned long unmap_start, unmap_end;
61
# endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
62
#endif /* !MALLOC_USE_SBRK */
63
 
64
#ifdef MALLOC_USE_SBRK
65
      /* Get the sbrk lock so that the two possible calls to sbrk below
66
         are guaranteed to be contiguous.  */
67
      __malloc_lock_sbrk ();
68
      /* When using sbrk, we only shrink the heap from the end.  It would
69
         be possible to allow _both_ -- shrinking via sbrk when possible,
70
         and otherwise shrinking via munmap, but this results in holes in
71
         memory that prevent the brk from every growing back down; since
72
         we only ever grow the heap via sbrk, this tends to produce a
73
         continuously growing brk (though the actual memory is unmapped),
74
         which could eventually run out of address space.  Note that
75
         `sbrk(0)' shouldn't normally do a system call, so this test is
76
         reasonably cheap.  */
77
      if ((void *)end != sbrk (0))
78
        {
79
          MALLOC_DEBUG (-1, "not unmapping: 0x%lx - 0x%lx (%ld bytes)",
80
                        start, end, end - start);
81
          __malloc_unlock_sbrk ();
82
          __heap_unlock (heap);
83
          return;
84
        }
85
#endif
86
 
87
      MALLOC_DEBUG (0, "unmapping: 0x%lx - 0x%lx (%ld bytes)",
88
                    start, end, end - start);
89
 
90
      /* Remove FA from the heap.  */
91
      __heap_delete (heap, fa);
92
 
93
      if (__heap_is_empty (heap))
94
        /* We want to avoid the heap from losing all memory, so reserve
95
           a bit.  This test is only a heuristic -- the existance of
96
           another free area, even if it's smaller than
97
           MALLOC_MIN_SIZE, will cause us not to reserve anything.  */
98
        {
99
          /* Put the reserved memory back in the heap; we asssume that
100
             MALLOC_UNMAP_THRESHOLD is greater than MALLOC_MIN_SIZE, so
101
             we use the latter unconditionally here.  */
102
          __heap_free (heap, (void *)start, MALLOC_MIN_SIZE);
103
          start += MALLOC_MIN_SIZE;
104
        }
105
 
106
#ifdef MALLOC_USE_SBRK
107
 
108
      /* Release the heap lock; we're still holding the sbrk lock.  */
109
      __heap_unlock (heap);
110
      /* Lower the brk.  */
111
      sbrk (start - end);
112
      /* Release the sbrk lock too; now we hold no locks.  */
113
      __malloc_unlock_sbrk ();
114
 
115
#else /* !MALLOC_USE_SBRK */
116
 
117
# ifdef __UCLIBC_UCLINUX_BROKEN_MUNMAP__
118
      /* Using the uClinux broken munmap, we have to only munmap blocks
119
         exactly as we got them from mmap, so scan through our list of
120
         mmapped blocks, and return them in order.  */
121
 
122
      MALLOC_MMB_DEBUG (1, "walking mmb list for region 0x%x[%d]...",
123
                        start, end - start);
124
 
125
      prev_mmb = 0;
126
      mmb = __malloc_mmapped_blocks;
127
      while (mmb
128
             && ((mmb_end = (mmb_start = (unsigned long)mmb->mem) + mmb->size)
129
                 <= end))
130
        {
131
          MALLOC_MMB_DEBUG (1, "considering mmb at 0x%x: 0x%x[%d]",
132
                            (unsigned)mmb, mmb_start, mmb_end - mmb_start);
133
 
134
          if (mmb_start >= start
135
              /* If the space between START and MMB_START is non-zero, but
136
                 too small to return to the heap, we can't unmap MMB.  */
137
              && (start == mmb_start
138
                  || mmb_start - start > HEAP_MIN_FREE_AREA_SIZE))
139
            {
140
              struct malloc_mmb *next_mmb = mmb->next;
141
 
142
              if (mmb_end != end && mmb_end + HEAP_MIN_FREE_AREA_SIZE > end)
143
                /* There's too little space left at the end to deallocate
144
                   this block, so give up.  */
145
                break;
146
 
147
              MALLOC_MMB_DEBUG (1, "unmapping mmb at 0x%x: 0x%x[%d]",
148
                                (unsigned)mmb, mmb_start, mmb_end - mmb_start);
149
 
150
              if (mmb_start != start)
151
                /* We're going to unmap a part of the heap that begins after
152
                   start, so put the intervening region back into the heap.  */
153
                {
154
                  MALLOC_MMB_DEBUG (0, "putting intervening region back into heap: 0x%x[%d]",
155
                                    start, mmb_start - start);
156
                  __heap_free (heap, (void *)start, mmb_start - start);
157
                }
158
 
159
              MALLOC_MMB_DEBUG_INDENT (-1);
160
 
161
              /* Unlink MMB from the list.  */
162
              if (prev_mmb)
163
                prev_mmb->next = next_mmb;
164
              else
165
                __malloc_mmapped_blocks = next_mmb;
166
 
167
              /* Start searching again from the end of this block.  */
168
              start = mmb_end;
169
 
170
              /* We have to unlock the heap before we recurse to free the mmb
171
                 descriptor, because we might be unmapping from the mmb
172
                 heap.  */
173
              __heap_unlock (heap);
174
 
175
              /* Release the descriptor block we used.  */
176
              free_to_heap (mmb, &__malloc_mmb_heap);
177
 
178
              /* Do the actual munmap.  */
179
              munmap ((void *)mmb_start, mmb_end - mmb_start);
180
 
181
              __heap_lock (heap);
182
 
183
#  ifdef __UCLIBC_HAS_THREADS__
184
              /* In a multi-threaded program, it's possible that PREV_MMB has
185
                 been invalidated by another thread when we released the
186
                 heap lock to do the munmap system call, so just start over
187
                 from the beginning of the list.  It sucks, but oh well;
188
                 it's probably not worth the bother to do better.  */
189
              prev_mmb = 0;
190
              mmb = __malloc_mmapped_blocks;
191
#  else
192
              mmb = next_mmb;
193
#  endif
194
            }
195
          else
196
            {
197
              prev_mmb = mmb;
198
              mmb = mmb->next;
199
            }
200
 
201
          MALLOC_MMB_DEBUG_INDENT (-1);
202
        }
203
 
204
      if (start != end)
205
        /* Hmm, well there's something we couldn't unmap, so put it back
206
           into the heap.  */
207
        {
208
          MALLOC_MMB_DEBUG (0, "putting tail region back into heap: 0x%x[%d]",
209
                            start, end - start);
210
          __heap_free (heap, (void *)start, end - start);
211
        }
212
 
213
      /* Finally release the lock for good.  */
214
      __heap_unlock (heap);
215
 
216
      MALLOC_MMB_DEBUG_INDENT (-1);
217
 
218
# else /* !__UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
219
 
220
      /* MEM/LEN may not be page-aligned, so we have to page-align them,
221
         and return any left-over bits on the end to the heap.  */
222
      unmap_start = MALLOC_ROUND_UP_TO_PAGE_SIZE (start);
223
      unmap_end = MALLOC_ROUND_DOWN_TO_PAGE_SIZE (end);
224
 
225
      /* We have to be careful that any left-over bits are large enough to
226
         return.  Note that we _don't check_ to make sure there's room to
227
         grow/shrink the start/end by another page, we just assume that
228
         the unmap threshold is high enough so that this is always safe
229
         (i.e., it should probably be at least 3 pages).  */
230
      if (unmap_start > start)
231
        {
232
          if (unmap_start - start < HEAP_MIN_FREE_AREA_SIZE)
233
            unmap_start += MALLOC_PAGE_SIZE;
234
          __heap_free (heap, (void *)start, unmap_start - start);
235
        }
236
      if (end > unmap_end)
237
        {
238
          if (end - unmap_end < HEAP_MIN_FREE_AREA_SIZE)
239
            unmap_end -= MALLOC_PAGE_SIZE;
240
          __heap_free (heap, (void *)unmap_end, end - unmap_end);
241
        }
242
 
243
      /* Release the heap lock before we do the system call.  */
244
      __heap_unlock (heap);
245
 
246
      if (unmap_end > unmap_start)
247
        /* Finally, actually unmap the memory.  */
248
        munmap ((void *)unmap_start, unmap_end - unmap_start);
249
 
250
# endif /* __UCLIBC_UCLINUX_BROKEN_MUNMAP__ */
251
 
252
#endif /* MALLOC_USE_SBRK */
253
    }
254
 
255
  MALLOC_DEBUG_INDENT (-1);
256
}
257
 
258
void
259
free (void *mem)
260
{
261
  free_to_heap (mem, &__malloc_heap);
262
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.