1 |
721 |
jeremybenn |
/*
|
2 |
|
|
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
|
3 |
|
|
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
|
4 |
|
|
* Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
|
5 |
|
|
* Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
|
6 |
|
|
*
|
7 |
|
|
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
8 |
|
|
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
9 |
|
|
*
|
10 |
|
|
* Permission is hereby granted to use or copy this program
|
11 |
|
|
* for any purpose, provided the above notices are retained on all copies.
|
12 |
|
|
* Permission to modify the code and to distribute modified code is granted,
|
13 |
|
|
* provided the above notices are retained, and a notice that the code was
|
14 |
|
|
* modified is included with the above copyright notice.
|
15 |
|
|
*/
|
16 |
|
|
|
17 |
|
|
/* #define DEBUG */
|
18 |
|
|
#include <stdio.h>
|
19 |
|
|
#include "private/gc_priv.h"
|
20 |
|
|
|
21 |
|
|
GC_bool GC_use_entire_heap = 0;
|
22 |
|
|
|
23 |
|
|
/*
|
24 |
|
|
* Free heap blocks are kept on one of several free lists,
|
25 |
|
|
* depending on the size of the block. Each free list is doubly linked.
|
26 |
|
|
* Adjacent free blocks are coalesced.
|
27 |
|
|
*/
|
28 |
|
|
|
29 |
|
|
|
30 |
|
|
# define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
|
31 |
|
|
/* largest block we will allocate starting on a black */
|
32 |
|
|
/* listed block. Must be >= HBLKSIZE. */
|
33 |
|
|
|
34 |
|
|
|
35 |
|
|
# define UNIQUE_THRESHOLD 32
|
36 |
|
|
/* Sizes up to this many HBLKs each have their own free list */
|
37 |
|
|
# define HUGE_THRESHOLD 256
|
38 |
|
|
/* Sizes of at least this many heap blocks are mapped to a */
|
39 |
|
|
/* single free list. */
|
40 |
|
|
# define FL_COMPRESSION 8
|
41 |
|
|
/* In between sizes map this many distinct sizes to a single */
|
42 |
|
|
/* bin. */
|
43 |
|
|
|
44 |
|
|
# define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
|
45 |
|
|
+ UNIQUE_THRESHOLD
|
46 |
|
|
|
47 |
|
|
struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
|
48 |
|
|
|
49 |
|
|
#ifndef USE_MUNMAP
|
50 |
|
|
|
51 |
|
|
word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
|
52 |
|
|
/* Number of free bytes on each list. */
|
53 |
|
|
|
54 |
|
|
/* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */
|
55 |
|
|
/* > GC_max_large_allocd_bytes? */
|
56 |
|
|
# ifdef __GNUC__
|
57 |
|
|
__inline__
|
58 |
|
|
# endif
|
59 |
|
|
static GC_bool GC_enough_large_bytes_left(bytes,n)
|
60 |
|
|
word bytes;
|
61 |
|
|
int n;
|
62 |
|
|
{
|
63 |
|
|
int i;
|
64 |
|
|
for (i = N_HBLK_FLS; i >= n; --i) {
|
65 |
|
|
bytes += GC_free_bytes[i];
|
66 |
|
|
if (bytes > GC_max_large_allocd_bytes) return TRUE;
|
67 |
|
|
}
|
68 |
|
|
return FALSE;
|
69 |
|
|
}
|
70 |
|
|
|
71 |
|
|
# define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
|
72 |
|
|
|
73 |
|
|
# define FREE_ASSERT(e) GC_ASSERT(e)
|
74 |
|
|
|
75 |
|
|
#else /* USE_MUNMAP */
|
76 |
|
|
|
77 |
|
|
# define INCR_FREE_BYTES(n, b)
|
78 |
|
|
# define FREE_ASSERT(e)
|
79 |
|
|
|
80 |
|
|
#endif /* USE_MUNMAP */
|
81 |
|
|
|
82 |
|
|
/* Map a number of blocks to the appropriate large block free list index. */
|
83 |
|
|
int GC_hblk_fl_from_blocks(blocks_needed)
|
84 |
|
|
word blocks_needed;
|
85 |
|
|
{
|
86 |
|
|
if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
|
87 |
|
|
if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
|
88 |
|
|
return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
|
89 |
|
|
+ UNIQUE_THRESHOLD;
|
90 |
|
|
|
91 |
|
|
}
|
92 |
|
|
|
93 |
|
|
# define PHDR(hhdr) HDR(hhdr -> hb_prev)
|
94 |
|
|
# define NHDR(hhdr) HDR(hhdr -> hb_next)
|
95 |
|
|
|
96 |
|
|
# ifdef USE_MUNMAP
|
97 |
|
|
# define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
|
98 |
|
|
# else /* !USE_MMAP */
|
99 |
|
|
# define IS_MAPPED(hhdr) 1
|
100 |
|
|
# endif /* USE_MUNMAP */
|
101 |
|
|
|
102 |
|
|
# if !defined(NO_DEBUGGING)
|
103 |
|
|
void GC_print_hblkfreelist()
|
104 |
|
|
{
|
105 |
|
|
struct hblk * h;
|
106 |
|
|
word total_free = 0;
|
107 |
|
|
hdr * hhdr;
|
108 |
|
|
word sz;
|
109 |
|
|
int i;
|
110 |
|
|
|
111 |
|
|
for (i = 0; i <= N_HBLK_FLS; ++i) {
|
112 |
|
|
h = GC_hblkfreelist[i];
|
113 |
|
|
# ifdef USE_MUNMAP
|
114 |
|
|
if (0 != h) GC_printf1("Free list %ld:\n",
|
115 |
|
|
(unsigned long)i);
|
116 |
|
|
# else
|
117 |
|
|
if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n",
|
118 |
|
|
(unsigned long)i,
|
119 |
|
|
(unsigned long)GC_free_bytes[i]);
|
120 |
|
|
# endif
|
121 |
|
|
while (h != 0) {
|
122 |
|
|
hhdr = HDR(h);
|
123 |
|
|
sz = hhdr -> hb_sz;
|
124 |
|
|
GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
|
125 |
|
|
total_free += sz;
|
126 |
|
|
if (GC_is_black_listed(h, HBLKSIZE) != 0) {
|
127 |
|
|
GC_printf0("start black listed\n");
|
128 |
|
|
} else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
|
129 |
|
|
GC_printf0("partially black listed\n");
|
130 |
|
|
} else {
|
131 |
|
|
GC_printf0("not black listed\n");
|
132 |
|
|
}
|
133 |
|
|
h = hhdr -> hb_next;
|
134 |
|
|
}
|
135 |
|
|
}
|
136 |
|
|
# ifndef USE_MUNMAP
|
137 |
|
|
if (total_free != GC_large_free_bytes) {
|
138 |
|
|
GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
|
139 |
|
|
(unsigned long) GC_large_free_bytes);
|
140 |
|
|
}
|
141 |
|
|
# endif
|
142 |
|
|
GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
|
143 |
|
|
}
|
144 |
|
|
|
145 |
|
|
/* Return the free list index on which the block described by the header */
|
146 |
|
|
/* appears, or -1 if it appears nowhere. */
|
147 |
|
|
int free_list_index_of(wanted)
|
148 |
|
|
hdr * wanted;
|
149 |
|
|
{
|
150 |
|
|
struct hblk * h;
|
151 |
|
|
hdr * hhdr;
|
152 |
|
|
int i;
|
153 |
|
|
|
154 |
|
|
for (i = 0; i <= N_HBLK_FLS; ++i) {
|
155 |
|
|
h = GC_hblkfreelist[i];
|
156 |
|
|
while (h != 0) {
|
157 |
|
|
hhdr = HDR(h);
|
158 |
|
|
if (hhdr == wanted) return i;
|
159 |
|
|
h = hhdr -> hb_next;
|
160 |
|
|
}
|
161 |
|
|
}
|
162 |
|
|
return -1;
|
163 |
|
|
}
|
164 |
|
|
|
165 |
|
|
void GC_dump_regions()
|
166 |
|
|
{
|
167 |
|
|
unsigned i;
|
168 |
|
|
ptr_t start, end;
|
169 |
|
|
ptr_t p;
|
170 |
|
|
size_t bytes;
|
171 |
|
|
hdr *hhdr;
|
172 |
|
|
for (i = 0; i < GC_n_heap_sects; ++i) {
|
173 |
|
|
start = GC_heap_sects[i].hs_start;
|
174 |
|
|
bytes = GC_heap_sects[i].hs_bytes;
|
175 |
|
|
end = start + bytes;
|
176 |
|
|
/* Merge in contiguous sections. */
|
177 |
|
|
while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
|
178 |
|
|
++i;
|
179 |
|
|
end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
|
180 |
|
|
}
|
181 |
|
|
GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
|
182 |
|
|
for (p = start; p < end;) {
|
183 |
|
|
hhdr = HDR(p);
|
184 |
|
|
GC_printf1("\t0x%lx ", (unsigned long)p);
|
185 |
|
|
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
|
186 |
|
|
GC_printf1("Missing header!!(%ld)\n", hhdr);
|
187 |
|
|
p += HBLKSIZE;
|
188 |
|
|
continue;
|
189 |
|
|
}
|
190 |
|
|
if (HBLK_IS_FREE(hhdr)) {
|
191 |
|
|
int correct_index = GC_hblk_fl_from_blocks(
|
192 |
|
|
divHBLKSZ(hhdr -> hb_sz));
|
193 |
|
|
int actual_index;
|
194 |
|
|
|
195 |
|
|
GC_printf1("\tfree block of size 0x%lx bytes",
|
196 |
|
|
(unsigned long)(hhdr -> hb_sz));
|
197 |
|
|
if (IS_MAPPED(hhdr)) {
|
198 |
|
|
GC_printf0("\n");
|
199 |
|
|
} else {
|
200 |
|
|
GC_printf0("(unmapped)\n");
|
201 |
|
|
}
|
202 |
|
|
actual_index = free_list_index_of(hhdr);
|
203 |
|
|
if (-1 == actual_index) {
|
204 |
|
|
GC_printf1("\t\tBlock not on free list %ld!!\n",
|
205 |
|
|
correct_index);
|
206 |
|
|
} else if (correct_index != actual_index) {
|
207 |
|
|
GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
|
208 |
|
|
actual_index, correct_index);
|
209 |
|
|
}
|
210 |
|
|
p += hhdr -> hb_sz;
|
211 |
|
|
} else {
|
212 |
|
|
GC_printf1("\tused for blocks of size 0x%lx bytes\n",
|
213 |
|
|
(unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
|
214 |
|
|
p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
|
215 |
|
|
}
|
216 |
|
|
}
|
217 |
|
|
}
|
218 |
|
|
}
|
219 |
|
|
|
220 |
|
|
# endif /* NO_DEBUGGING */
|
221 |
|
|
|
222 |
|
|
/* Initialize hdr for a block containing the indicated size and */
|
223 |
|
|
/* kind of objects. */
|
224 |
|
|
/* Return FALSE on failure. */
|
225 |
|
|
static GC_bool setup_header(hhdr, sz, kind, flags)
|
226 |
|
|
register hdr * hhdr;
|
227 |
|
|
word sz; /* object size in words */
|
228 |
|
|
int kind;
|
229 |
|
|
unsigned char flags;
|
230 |
|
|
{
|
231 |
|
|
register word descr;
|
232 |
|
|
|
233 |
|
|
/* Add description of valid object pointers */
|
234 |
|
|
if (!GC_add_map_entry(sz)) return(FALSE);
|
235 |
|
|
hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
|
236 |
|
|
|
237 |
|
|
/* Set size, kind and mark proc fields */
|
238 |
|
|
hhdr -> hb_sz = sz;
|
239 |
|
|
hhdr -> hb_obj_kind = kind;
|
240 |
|
|
hhdr -> hb_flags = flags;
|
241 |
|
|
descr = GC_obj_kinds[kind].ok_descriptor;
|
242 |
|
|
if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
|
243 |
|
|
hhdr -> hb_descr = descr;
|
244 |
|
|
|
245 |
|
|
/* Clear mark bits */
|
246 |
|
|
GC_clear_hdr_marks(hhdr);
|
247 |
|
|
|
248 |
|
|
hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
|
249 |
|
|
return(TRUE);
|
250 |
|
|
}
|
251 |
|
|
|
252 |
|
|
#define FL_UNKNOWN -1
|
253 |
|
|
/*
|
254 |
|
|
* Remove hhdr from the appropriate free list.
|
255 |
|
|
* We assume it is on the nth free list, or on the size
|
256 |
|
|
* appropriate free list if n is FL_UNKNOWN.
|
257 |
|
|
*/
|
258 |
|
|
void GC_remove_from_fl(hhdr, n)
|
259 |
|
|
hdr * hhdr;
|
260 |
|
|
int n;
|
261 |
|
|
{
|
262 |
|
|
int index;
|
263 |
|
|
|
264 |
|
|
GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
|
265 |
|
|
# ifndef USE_MUNMAP
|
266 |
|
|
/* We always need index to mainatin free counts. */
|
267 |
|
|
if (FL_UNKNOWN == n) {
|
268 |
|
|
index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
|
269 |
|
|
} else {
|
270 |
|
|
index = n;
|
271 |
|
|
}
|
272 |
|
|
# endif
|
273 |
|
|
if (hhdr -> hb_prev == 0) {
|
274 |
|
|
# ifdef USE_MUNMAP
|
275 |
|
|
if (FL_UNKNOWN == n) {
|
276 |
|
|
index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
|
277 |
|
|
} else {
|
278 |
|
|
index = n;
|
279 |
|
|
}
|
280 |
|
|
# endif
|
281 |
|
|
GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
|
282 |
|
|
GC_hblkfreelist[index] = hhdr -> hb_next;
|
283 |
|
|
} else {
|
284 |
|
|
hdr *phdr;
|
285 |
|
|
GET_HDR(hhdr -> hb_prev, phdr);
|
286 |
|
|
phdr -> hb_next = hhdr -> hb_next;
|
287 |
|
|
}
|
288 |
|
|
FREE_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
|
289 |
|
|
INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
|
290 |
|
|
if (0 != hhdr -> hb_next) {
|
291 |
|
|
hdr * nhdr;
|
292 |
|
|
GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
|
293 |
|
|
GET_HDR(hhdr -> hb_next, nhdr);
|
294 |
|
|
nhdr -> hb_prev = hhdr -> hb_prev;
|
295 |
|
|
}
|
296 |
|
|
}
|
297 |
|
|
|
298 |
|
|
/*
|
299 |
|
|
* Return a pointer to the free block ending just before h, if any.
|
300 |
|
|
*/
|
301 |
|
|
struct hblk * GC_free_block_ending_at(h)
|
302 |
|
|
struct hblk *h;
|
303 |
|
|
{
|
304 |
|
|
struct hblk * p = h - 1;
|
305 |
|
|
hdr * phdr;
|
306 |
|
|
|
307 |
|
|
GET_HDR(p, phdr);
|
308 |
|
|
while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
|
309 |
|
|
p = FORWARDED_ADDR(p,phdr);
|
310 |
|
|
phdr = HDR(p);
|
311 |
|
|
}
|
312 |
|
|
if (0 != phdr) {
|
313 |
|
|
if(HBLK_IS_FREE(phdr)) {
|
314 |
|
|
return p;
|
315 |
|
|
} else {
|
316 |
|
|
return 0;
|
317 |
|
|
}
|
318 |
|
|
}
|
319 |
|
|
p = GC_prev_block(h - 1);
|
320 |
|
|
if (0 != p) {
|
321 |
|
|
phdr = HDR(p);
|
322 |
|
|
if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
|
323 |
|
|
return p;
|
324 |
|
|
}
|
325 |
|
|
}
|
326 |
|
|
return 0;
|
327 |
|
|
}
|
328 |
|
|
|
329 |
|
|
/*
|
330 |
|
|
* Add hhdr to the appropriate free list.
|
331 |
|
|
* We maintain individual free lists sorted by address.
|
332 |
|
|
*/
|
333 |
|
|
void GC_add_to_fl(h, hhdr)
|
334 |
|
|
struct hblk *h;
|
335 |
|
|
hdr * hhdr;
|
336 |
|
|
{
|
337 |
|
|
int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
|
338 |
|
|
struct hblk *second = GC_hblkfreelist[index];
|
339 |
|
|
hdr * second_hdr;
|
340 |
|
|
# ifdef GC_ASSERTIONS
|
341 |
|
|
struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
|
342 |
|
|
hdr * nexthdr = HDR(next);
|
343 |
|
|
struct hblk *prev = GC_free_block_ending_at(h);
|
344 |
|
|
hdr * prevhdr = HDR(prev);
|
345 |
|
|
GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
|
346 |
|
|
GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
|
347 |
|
|
# endif
|
348 |
|
|
GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
|
349 |
|
|
GC_hblkfreelist[index] = h;
|
350 |
|
|
INCR_FREE_BYTES(index, hhdr -> hb_sz);
|
351 |
|
|
FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
|
352 |
|
|
hhdr -> hb_next = second;
|
353 |
|
|
hhdr -> hb_prev = 0;
|
354 |
|
|
if (0 != second) {
|
355 |
|
|
GET_HDR(second, second_hdr);
|
356 |
|
|
second_hdr -> hb_prev = h;
|
357 |
|
|
}
|
358 |
|
|
GC_invalidate_map(hhdr);
|
359 |
|
|
}
|
360 |
|
|
|
361 |
|
|
#ifdef USE_MUNMAP
|
362 |
|
|
|
363 |
|
|
/* Unmap blocks that haven't been recently touched. This is the only way */
|
364 |
|
|
/* way blocks are ever unmapped. */
|
365 |
|
|
void GC_unmap_old(void)
|
366 |
|
|
{
|
367 |
|
|
struct hblk * h;
|
368 |
|
|
hdr * hhdr;
|
369 |
|
|
word sz;
|
370 |
|
|
unsigned short last_rec, threshold;
|
371 |
|
|
int i;
|
372 |
|
|
# define UNMAP_THRESHOLD 6
|
373 |
|
|
|
374 |
|
|
for (i = 0; i <= N_HBLK_FLS; ++i) {
|
375 |
|
|
for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
|
376 |
|
|
hhdr = HDR(h);
|
377 |
|
|
if (!IS_MAPPED(hhdr)) continue;
|
378 |
|
|
threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
|
379 |
|
|
last_rec = hhdr -> hb_last_reclaimed;
|
380 |
|
|
if ((last_rec > GC_gc_no || last_rec < threshold)
|
381 |
|
|
&& threshold < GC_gc_no /* not recently wrapped */) {
|
382 |
|
|
sz = hhdr -> hb_sz;
|
383 |
|
|
GC_unmap((ptr_t)h, sz);
|
384 |
|
|
hhdr -> hb_flags |= WAS_UNMAPPED;
|
385 |
|
|
}
|
386 |
|
|
}
|
387 |
|
|
}
|
388 |
|
|
}
|
389 |
|
|
|
390 |
|
|
/* Merge all unmapped blocks that are adjacent to other free */
|
391 |
|
|
/* blocks. This may involve remapping, since all blocks are either */
|
392 |
|
|
/* fully mapped or fully unmapped. */
|
393 |
|
|
void GC_merge_unmapped(void)
|
394 |
|
|
{
|
395 |
|
|
struct hblk * h, *next;
|
396 |
|
|
hdr * hhdr, *nexthdr;
|
397 |
|
|
word size, nextsize;
|
398 |
|
|
int i;
|
399 |
|
|
|
400 |
|
|
for (i = 0; i <= N_HBLK_FLS; ++i) {
|
401 |
|
|
h = GC_hblkfreelist[i];
|
402 |
|
|
while (h != 0) {
|
403 |
|
|
GET_HDR(h, hhdr);
|
404 |
|
|
size = hhdr->hb_sz;
|
405 |
|
|
next = (struct hblk *)((word)h + size);
|
406 |
|
|
GET_HDR(next, nexthdr);
|
407 |
|
|
/* Coalesce with successor, if possible */
|
408 |
|
|
if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
|
409 |
|
|
nextsize = nexthdr -> hb_sz;
|
410 |
|
|
if (IS_MAPPED(hhdr)) {
|
411 |
|
|
GC_ASSERT(!IS_MAPPED(nexthdr));
|
412 |
|
|
/* make both consistent, so that we can merge */
|
413 |
|
|
if (size > nextsize) {
|
414 |
|
|
GC_remap((ptr_t)next, nextsize);
|
415 |
|
|
} else {
|
416 |
|
|
GC_unmap((ptr_t)h, size);
|
417 |
|
|
hhdr -> hb_flags |= WAS_UNMAPPED;
|
418 |
|
|
}
|
419 |
|
|
} else if (IS_MAPPED(nexthdr)) {
|
420 |
|
|
GC_ASSERT(!IS_MAPPED(hhdr));
|
421 |
|
|
if (size > nextsize) {
|
422 |
|
|
GC_unmap((ptr_t)next, nextsize);
|
423 |
|
|
} else {
|
424 |
|
|
GC_remap((ptr_t)h, size);
|
425 |
|
|
hhdr -> hb_flags &= ~WAS_UNMAPPED;
|
426 |
|
|
hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
|
427 |
|
|
}
|
428 |
|
|
} else {
|
429 |
|
|
/* Unmap any gap in the middle */
|
430 |
|
|
GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
|
431 |
|
|
}
|
432 |
|
|
/* If they are both unmapped, we merge, but leave unmapped. */
|
433 |
|
|
GC_remove_from_fl(hhdr, i);
|
434 |
|
|
GC_remove_from_fl(nexthdr, FL_UNKNOWN);
|
435 |
|
|
hhdr -> hb_sz += nexthdr -> hb_sz;
|
436 |
|
|
GC_remove_header(next);
|
437 |
|
|
GC_add_to_fl(h, hhdr);
|
438 |
|
|
/* Start over at beginning of list */
|
439 |
|
|
h = GC_hblkfreelist[i];
|
440 |
|
|
} else /* not mergable with successor */ {
|
441 |
|
|
h = hhdr -> hb_next;
|
442 |
|
|
}
|
443 |
|
|
} /* while (h != 0) ... */
|
444 |
|
|
} /* for ... */
|
445 |
|
|
}
|
446 |
|
|
|
447 |
|
|
#endif /* USE_MUNMAP */
|
448 |
|
|
|
449 |
|
|
/*
|
450 |
|
|
* Return a pointer to a block starting at h of length bytes.
|
451 |
|
|
* Memory for the block is mapped.
|
452 |
|
|
* Remove the block from its free list, and return the remainder (if any)
|
453 |
|
|
* to its appropriate free list.
|
454 |
|
|
* May fail by returning 0.
|
455 |
|
|
* The header for the returned block must be set up by the caller.
|
456 |
|
|
* If the return value is not 0, then hhdr is the header for it.
|
457 |
|
|
*/
|
458 |
|
|
struct hblk * GC_get_first_part(h, hhdr, bytes, index)
|
459 |
|
|
struct hblk *h;
|
460 |
|
|
hdr * hhdr;
|
461 |
|
|
word bytes;
|
462 |
|
|
int index;
|
463 |
|
|
{
|
464 |
|
|
word total_size = hhdr -> hb_sz;
|
465 |
|
|
struct hblk * rest;
|
466 |
|
|
hdr * rest_hdr;
|
467 |
|
|
|
468 |
|
|
GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
|
469 |
|
|
GC_remove_from_fl(hhdr, index);
|
470 |
|
|
if (total_size == bytes) return h;
|
471 |
|
|
rest = (struct hblk *)((word)h + bytes);
|
472 |
|
|
rest_hdr = GC_install_header(rest);
|
473 |
|
|
if (0 == rest_hdr) {
|
474 |
|
|
/* This may be very bad news ... */
|
475 |
|
|
WARN("Header allocation failed: Dropping block.\n", 0);
|
476 |
|
|
return(0);
|
477 |
|
|
}
|
478 |
|
|
rest_hdr -> hb_sz = total_size - bytes;
|
479 |
|
|
rest_hdr -> hb_flags = 0;
|
480 |
|
|
# ifdef GC_ASSERTIONS
|
481 |
|
|
/* Mark h not free, to avoid assertion about adjacent free blocks. */
|
482 |
|
|
hhdr -> hb_map = 0;
|
483 |
|
|
# endif
|
484 |
|
|
GC_add_to_fl(rest, rest_hdr);
|
485 |
|
|
return h;
|
486 |
|
|
}
|
487 |
|
|
|
488 |
|
|
/*
|
489 |
|
|
* H is a free block. N points at an address inside it.
|
490 |
|
|
* A new header for n has already been set up. Fix up h's header
|
491 |
|
|
* to reflect the fact that it is being split, move it to the
|
492 |
|
|
* appropriate free list.
|
493 |
|
|
* N replaces h in the original free list.
|
494 |
|
|
*
|
495 |
|
|
* Nhdr is not completely filled in, since it is about to allocated.
|
496 |
|
|
* It may in fact end up on the wrong free list for its size.
|
497 |
|
|
* (Hence adding it to a free list is silly. But this path is hopefully
|
498 |
|
|
* rare enough that it doesn't matter. The code is cleaner this way.)
|
499 |
|
|
*/
|
500 |
|
|
void GC_split_block(h, hhdr, n, nhdr, index)
|
501 |
|
|
struct hblk *h;
|
502 |
|
|
hdr * hhdr;
|
503 |
|
|
struct hblk *n;
|
504 |
|
|
hdr * nhdr;
|
505 |
|
|
int index; /* Index of free list */
|
506 |
|
|
{
|
507 |
|
|
word total_size = hhdr -> hb_sz;
|
508 |
|
|
word h_size = (word)n - (word)h;
|
509 |
|
|
struct hblk *prev = hhdr -> hb_prev;
|
510 |
|
|
struct hblk *next = hhdr -> hb_next;
|
511 |
|
|
|
512 |
|
|
/* Replace h with n on its freelist */
|
513 |
|
|
nhdr -> hb_prev = prev;
|
514 |
|
|
nhdr -> hb_next = next;
|
515 |
|
|
nhdr -> hb_sz = total_size - h_size;
|
516 |
|
|
nhdr -> hb_flags = 0;
|
517 |
|
|
if (0 != prev) {
|
518 |
|
|
HDR(prev) -> hb_next = n;
|
519 |
|
|
} else {
|
520 |
|
|
GC_hblkfreelist[index] = n;
|
521 |
|
|
}
|
522 |
|
|
if (0 != next) {
|
523 |
|
|
HDR(next) -> hb_prev = n;
|
524 |
|
|
}
|
525 |
|
|
INCR_FREE_BYTES(index, -(signed_word)h_size);
|
526 |
|
|
FREE_ASSERT(GC_free_bytes[index] > 0);
|
527 |
|
|
# ifdef GC_ASSERTIONS
|
528 |
|
|
nhdr -> hb_map = 0; /* Don't fail test for consecutive */
|
529 |
|
|
/* free blocks in GC_add_to_fl. */
|
530 |
|
|
# endif
|
531 |
|
|
# ifdef USE_MUNMAP
|
532 |
|
|
hhdr -> hb_last_reclaimed = GC_gc_no;
|
533 |
|
|
# endif
|
534 |
|
|
hhdr -> hb_sz = h_size;
|
535 |
|
|
GC_add_to_fl(h, hhdr);
|
536 |
|
|
GC_invalidate_map(nhdr);
|
537 |
|
|
}
|
538 |
|
|
|
539 |
|
|
struct hblk * GC_allochblk_nth();
|
540 |
|
|
|
541 |
|
|
/*
|
542 |
|
|
* Allocate (and return pointer to) a heap block
|
543 |
|
|
* for objects of size sz words, searching the nth free list.
|
544 |
|
|
*
|
545 |
|
|
* NOTE: We set obj_map field in header correctly.
|
546 |
|
|
* Caller is responsible for building an object freelist in block.
|
547 |
|
|
*
|
548 |
|
|
* Unlike older versions of the collectors, the client is responsible
|
549 |
|
|
* for clearing the block, if necessary.
|
550 |
|
|
*/
|
551 |
|
|
struct hblk *
|
552 |
|
|
GC_allochblk(sz, kind, flags)
|
553 |
|
|
word sz;
|
554 |
|
|
int kind;
|
555 |
|
|
unsigned flags; /* IGNORE_OFF_PAGE or 0 */
|
556 |
|
|
{
|
557 |
|
|
word blocks = OBJ_SZ_TO_BLOCKS(sz);
|
558 |
|
|
int start_list = GC_hblk_fl_from_blocks(blocks);
|
559 |
|
|
int i;
|
560 |
|
|
for (i = start_list; i <= N_HBLK_FLS; ++i) {
|
561 |
|
|
struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
|
562 |
|
|
if (0 != result) {
|
563 |
|
|
return result;
|
564 |
|
|
}
|
565 |
|
|
}
|
566 |
|
|
return 0;
|
567 |
|
|
}
|
568 |
|
|
/*
|
569 |
|
|
* The same, but with search restricted to nth free list.
|
570 |
|
|
*/
|
571 |
|
|
struct hblk *
|
572 |
|
|
GC_allochblk_nth(sz, kind, flags, n)
|
573 |
|
|
word sz;
|
574 |
|
|
int kind;
|
575 |
|
|
unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
|
576 |
|
|
int n;
|
577 |
|
|
{
|
578 |
|
|
register struct hblk *hbp;
|
579 |
|
|
register hdr * hhdr; /* Header corr. to hbp */
|
580 |
|
|
register struct hblk *thishbp;
|
581 |
|
|
register hdr * thishdr; /* Header corr. to hbp */
|
582 |
|
|
signed_word size_needed; /* number of bytes in requested objects */
|
583 |
|
|
signed_word size_avail; /* bytes available in this block */
|
584 |
|
|
|
585 |
|
|
size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
|
586 |
|
|
|
587 |
|
|
/* search for a big enough block in free list */
|
588 |
|
|
hbp = GC_hblkfreelist[n];
|
589 |
|
|
for(; 0 != hbp; hbp = hhdr -> hb_next) {
|
590 |
|
|
GET_HDR(hbp, hhdr);
|
591 |
|
|
size_avail = hhdr->hb_sz;
|
592 |
|
|
if (size_avail < size_needed) continue;
|
593 |
|
|
if (size_avail != size_needed
|
594 |
|
|
&& !GC_use_entire_heap
|
595 |
|
|
&& !GC_dont_gc
|
596 |
|
|
&& USED_HEAP_SIZE >= GC_requested_heapsize
|
597 |
|
|
&& !TRUE_INCREMENTAL && GC_should_collect()) {
|
598 |
|
|
# ifdef USE_MUNMAP
|
599 |
|
|
continue;
|
600 |
|
|
# else
|
601 |
|
|
/* If we have enough large blocks left to cover any */
|
602 |
|
|
/* previous request for large blocks, we go ahead */
|
603 |
|
|
/* and split. Assuming a steady state, that should */
|
604 |
|
|
/* be safe. It means that we can use the full */
|
605 |
|
|
/* heap if we allocate only small objects. */
|
606 |
|
|
if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
|
607 |
|
|
continue;
|
608 |
|
|
}
|
609 |
|
|
/* If we are deallocating lots of memory from */
|
610 |
|
|
/* finalizers, fail and collect sooner rather */
|
611 |
|
|
/* than later. */
|
612 |
|
|
if (WORDS_TO_BYTES(GC_finalizer_mem_freed)
|
613 |
|
|
> (GC_heapsize >> 4)) {
|
614 |
|
|
continue;
|
615 |
|
|
}
|
616 |
|
|
# endif /* !USE_MUNMAP */
|
617 |
|
|
}
|
618 |
|
|
/* If the next heap block is obviously better, go on. */
|
619 |
|
|
/* This prevents us from disassembling a single large block */
|
620 |
|
|
/* to get tiny blocks. */
|
621 |
|
|
{
|
622 |
|
|
signed_word next_size;
|
623 |
|
|
|
624 |
|
|
thishbp = hhdr -> hb_next;
|
625 |
|
|
if (thishbp != 0) {
|
626 |
|
|
GET_HDR(thishbp, thishdr);
|
627 |
|
|
next_size = (signed_word)(thishdr -> hb_sz);
|
628 |
|
|
if (next_size < size_avail
|
629 |
|
|
&& next_size >= size_needed
|
630 |
|
|
&& !GC_is_black_listed(thishbp, (word)size_needed)) {
|
631 |
|
|
continue;
|
632 |
|
|
}
|
633 |
|
|
}
|
634 |
|
|
}
|
635 |
|
|
if ( !IS_UNCOLLECTABLE(kind) &&
|
636 |
|
|
(kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
|
637 |
|
|
struct hblk * lasthbp = hbp;
|
638 |
|
|
ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
|
639 |
|
|
signed_word orig_avail = size_avail;
|
640 |
|
|
signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
|
641 |
|
|
HBLKSIZE
|
642 |
|
|
: size_needed);
|
643 |
|
|
|
644 |
|
|
|
645 |
|
|
while ((ptr_t)lasthbp <= search_end
|
646 |
|
|
&& (thishbp = GC_is_black_listed(lasthbp,
|
647 |
|
|
(word)eff_size_needed))
|
648 |
|
|
!= 0) {
|
649 |
|
|
lasthbp = thishbp;
|
650 |
|
|
}
|
651 |
|
|
size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
|
652 |
|
|
thishbp = lasthbp;
|
653 |
|
|
if (size_avail >= size_needed) {
|
654 |
|
|
if (thishbp != hbp &&
|
655 |
|
|
|
656 |
|
|
/* Make sure it's mapped before we mangle it. */
|
657 |
|
|
# ifdef USE_MUNMAP
|
658 |
|
|
if (!IS_MAPPED(hhdr)) {
|
659 |
|
|
GC_remap((ptr_t)hbp, hhdr -> hb_sz);
|
660 |
|
|
hhdr -> hb_flags &= ~WAS_UNMAPPED;
|
661 |
|
|
}
|
662 |
|
|
# endif
|
663 |
|
|
/* Split the block at thishbp */
|
664 |
|
|
GC_split_block(hbp, hhdr, thishbp, thishdr, n);
|
665 |
|
|
/* Advance to thishbp */
|
666 |
|
|
hbp = thishbp;
|
667 |
|
|
hhdr = thishdr;
|
668 |
|
|
/* We must now allocate thishbp, since it may */
|
669 |
|
|
/* be on the wrong free list. */
|
670 |
|
|
}
|
671 |
|
|
} else if (size_needed > (signed_word)BL_LIMIT
|
672 |
|
|
&& orig_avail - size_needed
|
673 |
|
|
> (signed_word)BL_LIMIT) {
|
674 |
|
|
/* Punt, since anything else risks unreasonable heap growth. */
|
675 |
|
|
if (++GC_large_alloc_warn_suppressed
|
676 |
|
|
>= GC_large_alloc_warn_interval) {
|
677 |
|
|
WARN("Repeated allocation of very large block "
|
678 |
|
|
"(appr. size %ld):\n"
|
679 |
|
|
"\tMay lead to memory leak and poor performance.\n",
|
680 |
|
|
size_needed);
|
681 |
|
|
GC_large_alloc_warn_suppressed = 0;
|
682 |
|
|
}
|
683 |
|
|
size_avail = orig_avail;
|
684 |
|
|
} else if (size_avail == 0 && size_needed == HBLKSIZE
|
685 |
|
|
&& IS_MAPPED(hhdr)) {
|
686 |
|
|
if (!GC_find_leak) {
|
687 |
|
|
static unsigned count = 0;
|
688 |
|
|
|
689 |
|
|
/* The block is completely blacklisted. We need */
|
690 |
|
|
/* to drop some such blocks, since otherwise we spend */
|
691 |
|
|
/* all our time traversing them if pointerfree */
|
692 |
|
|
/* blocks are unpopular. */
|
693 |
|
|
/* A dropped block will be reconsidered at next GC. */
|
694 |
|
|
if ((++count & 3) == 0) {
|
695 |
|
|
/* Allocate and drop the block in small chunks, to */
|
696 |
|
|
/* maximize the chance that we will recover some */
|
697 |
|
|
/* later. */
|
698 |
|
|
word total_size = hhdr -> hb_sz;
|
699 |
|
|
struct hblk * limit = hbp + divHBLKSZ(total_size);
|
700 |
|
|
struct hblk * h;
|
701 |
|
|
struct hblk * prev = hhdr -> hb_prev;
|
702 |
|
|
|
703 |
|
|
GC_words_wasted += BYTES_TO_WORDS(total_size);
|
704 |
|
|
GC_large_free_bytes -= total_size;
|
705 |
|
|
GC_remove_from_fl(hhdr, n);
|
706 |
|
|
for (h = hbp; h < limit; h++) {
|
707 |
|
|
if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
|
708 |
|
|
(void) setup_header(
|
709 |
|
|
hhdr,
|
710 |
|
|
BYTES_TO_WORDS(HBLKSIZE),
|
711 |
|
|
PTRFREE, 0); /* Cant fail */
|
712 |
|
|
if (GC_debugging_started) {
|
713 |
|
|
BZERO(h, HBLKSIZE);
|
714 |
|
|
}
|
715 |
|
|
}
|
716 |
|
|
}
|
717 |
|
|
/* Restore hbp to point at free block */
|
718 |
|
|
hbp = prev;
|
719 |
|
|
if (0 == hbp) {
|
720 |
|
|
return GC_allochblk_nth(sz, kind, flags, n);
|
721 |
|
|
}
|
722 |
|
|
hhdr = HDR(hbp);
|
723 |
|
|
}
|
724 |
|
|
}
|
725 |
|
|
}
|
726 |
|
|
}
|
727 |
|
|
if( size_avail >= size_needed ) {
|
728 |
|
|
# ifdef USE_MUNMAP
|
729 |
|
|
if (!IS_MAPPED(hhdr)) {
|
730 |
|
|
GC_remap((ptr_t)hbp, hhdr -> hb_sz);
|
731 |
|
|
hhdr -> hb_flags &= ~WAS_UNMAPPED;
|
732 |
|
|
}
|
733 |
|
|
# endif
|
734 |
|
|
/* hbp may be on the wrong freelist; the parameter n */
|
735 |
|
|
/* is important. */
|
736 |
|
|
hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
|
737 |
|
|
break;
|
738 |
|
|
}
|
739 |
|
|
}
|
740 |
|
|
|
741 |
|
|
if (0 == hbp) return 0;
|
742 |
|
|
|
743 |
|
|
/* Add it to map of valid blocks */
|
744 |
|
|
if (!GC_install_counts(hbp, (word)size_needed)) return(0);
|
745 |
|
|
/* This leaks memory under very rare conditions. */
|
746 |
|
|
|
747 |
|
|
/* Set up header */
|
748 |
|
|
if (!setup_header(hhdr, sz, kind, flags)) {
|
749 |
|
|
GC_remove_counts(hbp, (word)size_needed);
|
750 |
|
|
return(0); /* ditto */
|
751 |
|
|
}
|
752 |
|
|
|
753 |
|
|
/* Notify virtual dirty bit implementation that we are about to write. */
|
754 |
|
|
/* Ensure that pointerfree objects are not protected if it's avoidable. */
|
755 |
|
|
GC_remove_protection(hbp, divHBLKSZ(size_needed),
|
756 |
|
|
(hhdr -> hb_descr == 0) /* pointer-free */);
|
757 |
|
|
|
758 |
|
|
/* We just successfully allocated a block. Restart count of */
|
759 |
|
|
/* consecutive failures. */
|
760 |
|
|
{
|
761 |
|
|
extern unsigned GC_fail_count;
|
762 |
|
|
|
763 |
|
|
GC_fail_count = 0;
|
764 |
|
|
}
|
765 |
|
|
|
766 |
|
|
GC_large_free_bytes -= size_needed;
|
767 |
|
|
|
768 |
|
|
GC_ASSERT(IS_MAPPED(hhdr));
|
769 |
|
|
return( hbp );
|
770 |
|
|
}
|
771 |
|
|
|
772 |
|
|
struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
|
773 |
|
|
|
774 |
|
|
/*
|
775 |
|
|
* Free a heap block.
|
776 |
|
|
*
|
777 |
|
|
* Coalesce the block with its neighbors if possible.
|
778 |
|
|
*
|
779 |
|
|
* All mark words are assumed to be cleared.
|
780 |
|
|
*/
|
781 |
|
|
void
|
782 |
|
|
GC_freehblk(hbp)
|
783 |
|
|
struct hblk *hbp;
|
784 |
|
|
{
|
785 |
|
|
struct hblk *next, *prev;
|
786 |
|
|
hdr *hhdr, *prevhdr, *nexthdr;
|
787 |
|
|
signed_word size;
|
788 |
|
|
|
789 |
|
|
|
790 |
|
|
GET_HDR(hbp, hhdr);
|
791 |
|
|
size = hhdr->hb_sz;
|
792 |
|
|
size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
|
793 |
|
|
GC_remove_counts(hbp, (word)size);
|
794 |
|
|
hhdr->hb_sz = size;
|
795 |
|
|
# ifdef USE_MUNMAP
|
796 |
|
|
hhdr -> hb_last_reclaimed = GC_gc_no;
|
797 |
|
|
# endif
|
798 |
|
|
|
799 |
|
|
/* Check for duplicate deallocation in the easy case */
|
800 |
|
|
if (HBLK_IS_FREE(hhdr)) {
|
801 |
|
|
GC_printf1("Duplicate large block deallocation of 0x%lx\n",
|
802 |
|
|
(unsigned long) hbp);
|
803 |
|
|
ABORT("Duplicate large block deallocation");
|
804 |
|
|
}
|
805 |
|
|
|
806 |
|
|
GC_ASSERT(IS_MAPPED(hhdr));
|
807 |
|
|
GC_invalidate_map(hhdr);
|
808 |
|
|
next = (struct hblk *)((word)hbp + size);
|
809 |
|
|
GET_HDR(next, nexthdr);
|
810 |
|
|
prev = GC_free_block_ending_at(hbp);
|
811 |
|
|
/* Coalesce with successor, if possible */
|
812 |
|
|
if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
|
813 |
|
|
GC_remove_from_fl(nexthdr, FL_UNKNOWN);
|
814 |
|
|
hhdr -> hb_sz += nexthdr -> hb_sz;
|
815 |
|
|
GC_remove_header(next);
|
816 |
|
|
}
|
817 |
|
|
/* Coalesce with predecessor, if possible. */
|
818 |
|
|
if (0 != prev) {
|
819 |
|
|
prevhdr = HDR(prev);
|
820 |
|
|
if (IS_MAPPED(prevhdr)) {
|
821 |
|
|
GC_remove_from_fl(prevhdr, FL_UNKNOWN);
|
822 |
|
|
prevhdr -> hb_sz += hhdr -> hb_sz;
|
823 |
|
|
# ifdef USE_MUNMAP
|
824 |
|
|
prevhdr -> hb_last_reclaimed = GC_gc_no;
|
825 |
|
|
# endif
|
826 |
|
|
GC_remove_header(hbp);
|
827 |
|
|
hbp = prev;
|
828 |
|
|
hhdr = prevhdr;
|
829 |
|
|
}
|
830 |
|
|
}
|
831 |
|
|
/* FIXME: It is not clear we really always want to do these merges */
|
832 |
|
|
/* with -DUSE_MUNMAP, since it updates ages and hence prevents */
|
833 |
|
|
/* unmapping. */
|
834 |
|
|
|
835 |
|
|
GC_large_free_bytes += size;
|
836 |
|
|
GC_add_to_fl(hbp, hhdr);
|
837 |
|
|
}
|
838 |
|
|
|