1 |
721 |
jeremybenn |
/*
|
2 |
|
|
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
|
3 |
|
|
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
|
4 |
|
|
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
|
5 |
|
|
* Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
|
6 |
|
|
*
|
7 |
|
|
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
8 |
|
|
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
9 |
|
|
*
|
10 |
|
|
* Permission is hereby granted to use or copy this program
|
11 |
|
|
* for any purpose, provided the above notices are retained on all copies.
|
12 |
|
|
* Permission to modify the code and to distribute modified code is granted,
|
13 |
|
|
* provided the above notices are retained, and a notice that the code was
|
14 |
|
|
* modified is included with the above copyright notice.
|
15 |
|
|
*/
|
16 |
|
|
|
17 |
|
|
# include "private/gc_priv.h"
|
18 |
|
|
|
19 |
|
|
# if defined(LINUX) && !defined(POWERPC)
|
20 |
|
|
# include <linux/version.h>
|
21 |
|
|
# if (LINUX_VERSION_CODE <= 0x10400)
|
22 |
|
|
/* Ugly hack to get struct sigcontext_struct definition. Required */
|
23 |
|
|
/* for some early 1.3.X releases. Will hopefully go away soon. */
|
24 |
|
|
/* in some later Linux releases, asm/sigcontext.h may have to */
|
25 |
|
|
/* be included instead. */
|
26 |
|
|
# define __KERNEL__
|
27 |
|
|
# include <asm/signal.h>
|
28 |
|
|
# undef __KERNEL__
|
29 |
|
|
# else
|
30 |
|
|
/* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
|
31 |
|
|
/* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
|
32 |
|
|
/* prototypes, so we have to include the top-level sigcontext.h to */
|
33 |
|
|
/* make sure the former gets defined to be the latter if appropriate. */
|
34 |
|
|
# include <features.h>
|
35 |
|
|
# if 2 <= __GLIBC__
|
36 |
|
|
# if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
|
37 |
|
|
/* glibc 2.1 no longer has sigcontext.h. But signal.h */
|
38 |
|
|
/* has the right declaration for glibc 2.1. */
|
39 |
|
|
# include <sigcontext.h>
|
40 |
|
|
# endif /* 0 == __GLIBC_MINOR__ */
|
41 |
|
|
# else /* not 2 <= __GLIBC__ */
|
42 |
|
|
/* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
|
43 |
|
|
/* one. Check LINUX_VERSION_CODE to see which we should reference. */
|
44 |
|
|
# include <asm/sigcontext.h>
|
45 |
|
|
# endif /* 2 <= __GLIBC__ */
|
46 |
|
|
# endif
|
47 |
|
|
# endif
|
48 |
|
|
# if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
|
49 |
|
|
&& !defined(MSWINCE)
|
50 |
|
|
# include <sys/types.h>
|
51 |
|
|
# if !defined(MSWIN32) && !defined(SUNOS4)
|
52 |
|
|
# include <unistd.h>
|
53 |
|
|
# endif
|
54 |
|
|
# endif
|
55 |
|
|
|
56 |
|
|
# include <stdio.h>
|
57 |
|
|
# if defined(MSWINCE)
|
58 |
|
|
# define SIGSEGV 0 /* value is irrelevant */
|
59 |
|
|
# else
|
60 |
|
|
# include <signal.h>
|
61 |
|
|
# endif
|
62 |
|
|
|
63 |
|
|
#if defined(LINUX) || defined(LINUX_STACKBOTTOM)
|
64 |
|
|
# include <ctype.h>
|
65 |
|
|
#endif
|
66 |
|
|
|
67 |
|
|
/* Blatantly OS dependent routines, except for those that are related */
|
68 |
|
|
/* to dynamic loading. */
|
69 |
|
|
|
70 |
|
|
# if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START)
|
71 |
|
|
# define NEED_FIND_LIMIT
|
72 |
|
|
# endif
|
73 |
|
|
|
74 |
|
|
# if !defined(STACKBOTTOM) && defined(HEURISTIC2)
|
75 |
|
|
# define NEED_FIND_LIMIT
|
76 |
|
|
# endif
|
77 |
|
|
|
78 |
|
|
# if (defined(SUNOS4) && defined(DYNAMIC_LOADING)) && !defined(PCR)
|
79 |
|
|
# define NEED_FIND_LIMIT
|
80 |
|
|
# endif
|
81 |
|
|
|
82 |
|
|
# if (defined(SVR4) || defined(AUX) || defined(DGUX) \
|
83 |
|
|
|| (defined(LINUX) && defined(SPARC))) && !defined(PCR)
|
84 |
|
|
# define NEED_FIND_LIMIT
|
85 |
|
|
# endif
|
86 |
|
|
|
87 |
|
|
#if defined(FREEBSD) && (defined(I386) || defined(X86_64) || defined(powerpc) || defined(__powerpc__))
|
88 |
|
|
# include <machine/trap.h>
|
89 |
|
|
# if !defined(PCR)
|
90 |
|
|
# define NEED_FIND_LIMIT
|
91 |
|
|
# endif
|
92 |
|
|
#endif
|
93 |
|
|
|
94 |
|
|
#if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__) \
|
95 |
|
|
&& !defined(NEED_FIND_LIMIT)
|
96 |
|
|
/* Used by GC_init_netbsd_elf() below. */
|
97 |
|
|
# define NEED_FIND_LIMIT
|
98 |
|
|
#endif
|
99 |
|
|
|
100 |
|
|
#ifdef NEED_FIND_LIMIT
|
101 |
|
|
# include <setjmp.h>
|
102 |
|
|
#endif
|
103 |
|
|
|
104 |
|
|
#ifdef AMIGA
|
105 |
|
|
# define GC_AMIGA_DEF
|
106 |
|
|
# include "AmigaOS.c"
|
107 |
|
|
# undef GC_AMIGA_DEF
|
108 |
|
|
#endif
|
109 |
|
|
|
110 |
|
|
#if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
|
111 |
|
|
# define WIN32_LEAN_AND_MEAN
|
112 |
|
|
# define NOSERVICE
|
113 |
|
|
# include <windows.h>
|
114 |
|
|
#endif
|
115 |
|
|
|
116 |
|
|
#ifdef MACOS
|
117 |
|
|
# include <Processes.h>
|
118 |
|
|
#endif
|
119 |
|
|
|
120 |
|
|
#ifdef IRIX5
|
121 |
|
|
# include <sys/uio.h>
|
122 |
|
|
# include <malloc.h> /* for locking */
|
123 |
|
|
#endif
|
124 |
|
|
#if defined(USE_MMAP) || defined(USE_MUNMAP)
|
125 |
|
|
# ifndef USE_MMAP
|
126 |
|
|
--> USE_MUNMAP requires USE_MMAP
|
127 |
|
|
# endif
|
128 |
|
|
# include <sys/types.h>
|
129 |
|
|
# include <sys/mman.h>
|
130 |
|
|
# include <sys/stat.h>
|
131 |
|
|
# include <errno.h>
|
132 |
|
|
#endif
|
133 |
|
|
|
134 |
|
|
#ifdef UNIX_LIKE
|
135 |
|
|
# include <fcntl.h>
|
136 |
|
|
# if defined(SUNOS5SIGS) && !defined(FREEBSD)
|
137 |
|
|
# include <sys/siginfo.h>
|
138 |
|
|
# endif
|
139 |
|
|
/* Define SETJMP and friends to be the version that restores */
|
140 |
|
|
/* the signal mask. */
|
141 |
|
|
# define SETJMP(env) sigsetjmp(env, 1)
|
142 |
|
|
# define LONGJMP(env, val) siglongjmp(env, val)
|
143 |
|
|
# define JMP_BUF sigjmp_buf
|
144 |
|
|
#else
|
145 |
|
|
# define SETJMP(env) setjmp(env)
|
146 |
|
|
# define LONGJMP(env, val) longjmp(env, val)
|
147 |
|
|
# define JMP_BUF jmp_buf
|
148 |
|
|
#endif
|
149 |
|
|
|
150 |
|
|
#ifdef DARWIN
|
151 |
|
|
/* for get_etext and friends */
|
152 |
|
|
#include <mach-o/getsect.h>
|
153 |
|
|
#endif
|
154 |
|
|
|
155 |
|
|
#ifdef DJGPP
|
156 |
|
|
/* Apparently necessary for djgpp 2.01. May cause problems with */
|
157 |
|
|
/* other versions. */
|
158 |
|
|
typedef long unsigned int caddr_t;
|
159 |
|
|
#endif
|
160 |
|
|
|
161 |
|
|
#ifdef PCR
|
162 |
|
|
# include "il/PCR_IL.h"
|
163 |
|
|
# include "th/PCR_ThCtl.h"
|
164 |
|
|
# include "mm/PCR_MM.h"
|
165 |
|
|
#endif
|
166 |
|
|
|
167 |
|
|
#if !defined(NO_EXECUTE_PERMISSION)
|
168 |
|
|
# define OPT_PROT_EXEC PROT_EXEC
|
169 |
|
|
#else
|
170 |
|
|
# define OPT_PROT_EXEC 0
|
171 |
|
|
#endif
|
172 |
|
|
|
173 |
|
|
#if defined(LINUX) && \
|
174 |
|
|
(defined(USE_PROC_FOR_LIBRARIES) || defined(IA64) || !defined(SMALL_CONFIG))
|
175 |
|
|
|
176 |
|
|
/* We need to parse /proc/self/maps, either to find dynamic libraries, */
|
177 |
|
|
/* and/or to find the register backing store base (IA64). Do it once */
|
178 |
|
|
/* here. */
|
179 |
|
|
|
180 |
|
|
#define READ read
|
181 |
|
|
|
182 |
|
|
/* Repeatedly perform a read call until the buffer is filled or */
|
183 |
|
|
/* we encounter EOF. */
|
184 |
|
|
ssize_t GC_repeat_read(int fd, char *buf, size_t count)
|
185 |
|
|
{
|
186 |
|
|
ssize_t num_read = 0;
|
187 |
|
|
ssize_t result;
|
188 |
|
|
|
189 |
|
|
while (num_read < count) {
|
190 |
|
|
result = READ(fd, buf + num_read, count - num_read);
|
191 |
|
|
if (result < 0) return result;
|
192 |
|
|
if (result == 0) break;
|
193 |
|
|
num_read += result;
|
194 |
|
|
}
|
195 |
|
|
return num_read;
|
196 |
|
|
}
|
197 |
|
|
|
198 |
|
|
/*
|
199 |
|
|
* Apply fn to a buffer containing the contents of /proc/self/maps.
|
200 |
|
|
* Return the result of fn or, if we failed, 0.
|
201 |
|
|
* We currently do nothing to /proc/self/maps other than simply read
|
202 |
|
|
* it. This code could be simplified if we could determine its size
|
203 |
|
|
* ahead of time.
|
204 |
|
|
*/
|
205 |
|
|
|
206 |
|
|
word GC_apply_to_maps(word (*fn)(char *))
|
207 |
|
|
{
|
208 |
|
|
int f;
|
209 |
|
|
int result;
|
210 |
|
|
size_t maps_size = 4000; /* Initial guess. */
|
211 |
|
|
static char init_buf[1];
|
212 |
|
|
static char *maps_buf = init_buf;
|
213 |
|
|
static size_t maps_buf_sz = 1;
|
214 |
|
|
|
215 |
|
|
/* Read /proc/self/maps, growing maps_buf as necessary. */
|
216 |
|
|
/* Note that we may not allocate conventionally, and */
|
217 |
|
|
/* thus can't use stdio. */
|
218 |
|
|
do {
|
219 |
|
|
if (maps_size >= maps_buf_sz) {
|
220 |
|
|
/* Grow only by powers of 2, since we leak "too small" buffers. */
|
221 |
|
|
while (maps_size >= maps_buf_sz) maps_buf_sz *= 2;
|
222 |
|
|
maps_buf = GC_scratch_alloc(maps_buf_sz);
|
223 |
|
|
if (maps_buf == 0) return 0;
|
224 |
|
|
}
|
225 |
|
|
f = open("/proc/self/maps", O_RDONLY);
|
226 |
|
|
if (-1 == f) return 0;
|
227 |
|
|
maps_size = 0;
|
228 |
|
|
do {
|
229 |
|
|
result = GC_repeat_read(f, maps_buf, maps_buf_sz-1);
|
230 |
|
|
if (result <= 0) return 0;
|
231 |
|
|
maps_size += result;
|
232 |
|
|
} while (result == maps_buf_sz-1);
|
233 |
|
|
close(f);
|
234 |
|
|
} while (maps_size >= maps_buf_sz);
|
235 |
|
|
maps_buf[maps_size] = '\0';
|
236 |
|
|
|
237 |
|
|
/* Apply fn to result. */
|
238 |
|
|
return fn(maps_buf);
|
239 |
|
|
}
|
240 |
|
|
|
241 |
|
|
#endif /* Need GC_apply_to_maps */
|
242 |
|
|
|
243 |
|
|
#if defined(LINUX) && (defined(USE_PROC_FOR_LIBRARIES) || defined(IA64))
|
244 |
|
|
//
|
245 |
|
|
// GC_parse_map_entry parses an entry from /proc/self/maps so we can
|
246 |
|
|
// locate all writable data segments that belong to shared libraries.
|
247 |
|
|
// The format of one of these entries and the fields we care about
|
248 |
|
|
// is as follows:
|
249 |
|
|
// XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
|
250 |
|
|
// ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
|
251 |
|
|
// start end prot maj_dev
|
252 |
|
|
//
|
253 |
|
|
// Note that since about auguat 2003 kernels, the columns no longer have
|
254 |
|
|
// fixed offsets on 64-bit kernels. Hence we no longer rely on fixed offsets
|
255 |
|
|
// anywhere, which is safer anyway.
|
256 |
|
|
//
|
257 |
|
|
|
258 |
|
|
/*
|
259 |
|
|
* Assign various fields of the first line in buf_ptr to *start, *end,
|
260 |
|
|
* *prot_buf and *maj_dev. Only *prot_buf may be set for unwritable maps.
|
261 |
|
|
*/
|
262 |
|
|
char *GC_parse_map_entry(char *buf_ptr, word *start, word *end,
|
263 |
|
|
char *prot_buf, unsigned int *maj_dev)
|
264 |
|
|
{
|
265 |
|
|
char *start_start, *end_start, *prot_start, *maj_dev_start;
|
266 |
|
|
char *p;
|
267 |
|
|
char *endp;
|
268 |
|
|
|
269 |
|
|
if (buf_ptr == NULL || *buf_ptr == '\0') {
|
270 |
|
|
return NULL;
|
271 |
|
|
}
|
272 |
|
|
|
273 |
|
|
p = buf_ptr;
|
274 |
|
|
while (isspace(*p)) ++p;
|
275 |
|
|
start_start = p;
|
276 |
|
|
GC_ASSERT(isxdigit(*start_start));
|
277 |
|
|
*start = strtoul(start_start, &endp, 16); p = endp;
|
278 |
|
|
GC_ASSERT(*p=='-');
|
279 |
|
|
|
280 |
|
|
++p;
|
281 |
|
|
end_start = p;
|
282 |
|
|
GC_ASSERT(isxdigit(*end_start));
|
283 |
|
|
*end = strtoul(end_start, &endp, 16); p = endp;
|
284 |
|
|
GC_ASSERT(isspace(*p));
|
285 |
|
|
|
286 |
|
|
while (isspace(*p)) ++p;
|
287 |
|
|
prot_start = p;
|
288 |
|
|
GC_ASSERT(*prot_start == 'r' || *prot_start == '-');
|
289 |
|
|
memcpy(prot_buf, prot_start, 4);
|
290 |
|
|
prot_buf[4] = '\0';
|
291 |
|
|
if (prot_buf[1] == 'w') {/* we can skip the rest if it's not writable. */
|
292 |
|
|
/* Skip past protection field to offset field */
|
293 |
|
|
while (!isspace(*p)) ++p; while (isspace(*p)) ++p;
|
294 |
|
|
GC_ASSERT(isxdigit(*p));
|
295 |
|
|
/* Skip past offset field, which we ignore */
|
296 |
|
|
while (!isspace(*p)) ++p; while (isspace(*p)) ++p;
|
297 |
|
|
maj_dev_start = p;
|
298 |
|
|
GC_ASSERT(isxdigit(*maj_dev_start));
|
299 |
|
|
*maj_dev = strtoul(maj_dev_start, NULL, 16);
|
300 |
|
|
}
|
301 |
|
|
|
302 |
|
|
while (*p && *p++ != '\n');
|
303 |
|
|
|
304 |
|
|
return p;
|
305 |
|
|
}
|
306 |
|
|
|
307 |
|
|
#endif /* Need to parse /proc/self/maps. */
|
308 |
|
|
|
309 |
|
|
#if defined(SEARCH_FOR_DATA_START)
|
310 |
|
|
/* The I386 case can be handled without a search. The Alpha case */
|
311 |
|
|
/* used to be handled differently as well, but the rules changed */
|
312 |
|
|
/* for recent Linux versions. This seems to be the easiest way to */
|
313 |
|
|
/* cover all versions. */
|
314 |
|
|
|
315 |
|
|
# ifdef LINUX
|
316 |
|
|
/* Some Linux distributions arrange to define __data_start. Some */
|
317 |
|
|
/* define data_start as a weak symbol. The latter is technically */
|
318 |
|
|
/* broken, since the user program may define data_start, in which */
|
319 |
|
|
/* case we lose. Nonetheless, we try both, prefering __data_start. */
|
320 |
|
|
/* We assume gcc-compatible pragmas. */
|
321 |
|
|
# pragma weak __data_start
|
322 |
|
|
extern int __data_start[];
|
323 |
|
|
# pragma weak data_start
|
324 |
|
|
extern int data_start[];
|
325 |
|
|
# endif /* LINUX */
|
326 |
|
|
extern int _end[];
|
327 |
|
|
|
328 |
|
|
ptr_t GC_data_start;
|
329 |
|
|
|
330 |
|
|
void GC_init_linux_data_start()
|
331 |
|
|
{
|
332 |
|
|
extern ptr_t GC_find_limit();
|
333 |
|
|
|
334 |
|
|
# ifdef LINUX
|
335 |
|
|
/* Try the easy approaches first: */
|
336 |
|
|
if ((ptr_t)__data_start != 0) {
|
337 |
|
|
GC_data_start = (ptr_t)(__data_start);
|
338 |
|
|
return;
|
339 |
|
|
}
|
340 |
|
|
if ((ptr_t)data_start != 0) {
|
341 |
|
|
GC_data_start = (ptr_t)(data_start);
|
342 |
|
|
return;
|
343 |
|
|
}
|
344 |
|
|
# endif /* LINUX */
|
345 |
|
|
GC_data_start = GC_find_limit((ptr_t)(_end), FALSE);
|
346 |
|
|
}
|
347 |
|
|
#endif
|
348 |
|
|
|
349 |
|
|
# ifdef ECOS
|
350 |
|
|
|
351 |
|
|
# ifndef ECOS_GC_MEMORY_SIZE
|
352 |
|
|
# define ECOS_GC_MEMORY_SIZE (448 * 1024)
|
353 |
|
|
# endif /* ECOS_GC_MEMORY_SIZE */
|
354 |
|
|
|
355 |
|
|
// setjmp() function, as described in ANSI para 7.6.1.1
|
356 |
|
|
#undef SETJMP
|
357 |
|
|
#define SETJMP( __env__ ) hal_setjmp( __env__ )
|
358 |
|
|
|
359 |
|
|
// FIXME: This is a simple way of allocating memory which is
|
360 |
|
|
// compatible with ECOS early releases. Later releases use a more
|
361 |
|
|
// sophisticated means of allocating memory than this simple static
|
362 |
|
|
// allocator, but this method is at least bound to work.
|
363 |
|
|
static char memory[ECOS_GC_MEMORY_SIZE];
|
364 |
|
|
static char *brk = memory;
|
365 |
|
|
|
366 |
|
|
static void *tiny_sbrk(ptrdiff_t increment)
|
367 |
|
|
{
|
368 |
|
|
void *p = brk;
|
369 |
|
|
|
370 |
|
|
brk += increment;
|
371 |
|
|
|
372 |
|
|
if (brk > memory + sizeof memory)
|
373 |
|
|
{
|
374 |
|
|
brk -= increment;
|
375 |
|
|
return NULL;
|
376 |
|
|
}
|
377 |
|
|
|
378 |
|
|
return p;
|
379 |
|
|
}
|
380 |
|
|
#define sbrk tiny_sbrk
|
381 |
|
|
# endif /* ECOS */
|
382 |
|
|
|
383 |
|
|
#if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
|
384 |
|
|
ptr_t GC_data_start;
|
385 |
|
|
|
386 |
|
|
void GC_init_netbsd_elf()
|
387 |
|
|
{
|
388 |
|
|
extern ptr_t GC_find_limit();
|
389 |
|
|
extern char **environ;
|
390 |
|
|
/* This may need to be environ, without the underscore, for */
|
391 |
|
|
/* some versions. */
|
392 |
|
|
GC_data_start = GC_find_limit((ptr_t)&environ, FALSE);
|
393 |
|
|
}
|
394 |
|
|
#endif
|
395 |
|
|
|
396 |
|
|
# ifdef OS2
|
397 |
|
|
|
398 |
|
|
# include <stddef.h>
|
399 |
|
|
|
400 |
|
|
# if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
|
401 |
|
|
|
402 |
|
|
struct exe_hdr {
|
403 |
|
|
unsigned short magic_number;
|
404 |
|
|
unsigned short padding[29];
|
405 |
|
|
long new_exe_offset;
|
406 |
|
|
};
|
407 |
|
|
|
408 |
|
|
#define E_MAGIC(x) (x).magic_number
|
409 |
|
|
#define EMAGIC 0x5A4D
|
410 |
|
|
#define E_LFANEW(x) (x).new_exe_offset
|
411 |
|
|
|
412 |
|
|
struct e32_exe {
|
413 |
|
|
unsigned char magic_number[2];
|
414 |
|
|
unsigned char byte_order;
|
415 |
|
|
unsigned char word_order;
|
416 |
|
|
unsigned long exe_format_level;
|
417 |
|
|
unsigned short cpu;
|
418 |
|
|
unsigned short os;
|
419 |
|
|
unsigned long padding1[13];
|
420 |
|
|
unsigned long object_table_offset;
|
421 |
|
|
unsigned long object_count;
|
422 |
|
|
unsigned long padding2[31];
|
423 |
|
|
};
|
424 |
|
|
|
425 |
|
|
#define E32_MAGIC1(x) (x).magic_number[0]
|
426 |
|
|
#define E32MAGIC1 'L'
|
427 |
|
|
#define E32_MAGIC2(x) (x).magic_number[1]
|
428 |
|
|
#define E32MAGIC2 'X'
|
429 |
|
|
#define E32_BORDER(x) (x).byte_order
|
430 |
|
|
#define E32LEBO 0
|
431 |
|
|
#define E32_WORDER(x) (x).word_order
|
432 |
|
|
#define E32LEWO 0
|
433 |
|
|
#define E32_CPU(x) (x).cpu
|
434 |
|
|
#define E32CPU286 1
|
435 |
|
|
#define E32_OBJTAB(x) (x).object_table_offset
|
436 |
|
|
#define E32_OBJCNT(x) (x).object_count
|
437 |
|
|
|
438 |
|
|
struct o32_obj {
|
439 |
|
|
unsigned long size;
|
440 |
|
|
unsigned long base;
|
441 |
|
|
unsigned long flags;
|
442 |
|
|
unsigned long pagemap;
|
443 |
|
|
unsigned long mapsize;
|
444 |
|
|
unsigned long reserved;
|
445 |
|
|
};
|
446 |
|
|
|
447 |
|
|
#define O32_FLAGS(x) (x).flags
|
448 |
|
|
#define OBJREAD 0x0001L
|
449 |
|
|
#define OBJWRITE 0x0002L
|
450 |
|
|
#define OBJINVALID 0x0080L
|
451 |
|
|
#define O32_SIZE(x) (x).size
|
452 |
|
|
#define O32_BASE(x) (x).base
|
453 |
|
|
|
454 |
|
|
# else /* IBM's compiler */
|
455 |
|
|
|
456 |
|
|
/* A kludge to get around what appears to be a header file bug */
|
457 |
|
|
# ifndef WORD
|
458 |
|
|
# define WORD unsigned short
|
459 |
|
|
# endif
|
460 |
|
|
# ifndef DWORD
|
461 |
|
|
# define DWORD unsigned long
|
462 |
|
|
# endif
|
463 |
|
|
|
464 |
|
|
# define EXE386 1
|
465 |
|
|
# include <newexe.h>
|
466 |
|
|
# include <exe386.h>
|
467 |
|
|
|
468 |
|
|
# endif /* __IBMC__ */
|
469 |
|
|
|
470 |
|
|
# define INCL_DOSEXCEPTIONS
|
471 |
|
|
# define INCL_DOSPROCESS
|
472 |
|
|
# define INCL_DOSERRORS
|
473 |
|
|
# define INCL_DOSMODULEMGR
|
474 |
|
|
# define INCL_DOSMEMMGR
|
475 |
|
|
# include <os2.h>
|
476 |
|
|
|
477 |
|
|
|
478 |
|
|
/* Disable and enable signals during nontrivial allocations */
|
479 |
|
|
|
480 |
|
|
void GC_disable_signals(void)
|
481 |
|
|
{
|
482 |
|
|
ULONG nest;
|
483 |
|
|
|
484 |
|
|
DosEnterMustComplete(&nest);
|
485 |
|
|
if (nest != 1) ABORT("nested GC_disable_signals");
|
486 |
|
|
}
|
487 |
|
|
|
488 |
|
|
void GC_enable_signals(void)
|
489 |
|
|
{
|
490 |
|
|
ULONG nest;
|
491 |
|
|
|
492 |
|
|
DosExitMustComplete(&nest);
|
493 |
|
|
if (nest != 0) ABORT("GC_enable_signals");
|
494 |
|
|
}
|
495 |
|
|
|
496 |
|
|
|
497 |
|
|
# else
|
498 |
|
|
|
499 |
|
|
# if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
|
500 |
|
|
&& !defined(MSWINCE) \
|
501 |
|
|
&& !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \
|
502 |
|
|
&& !defined(NOSYS) && !defined(ECOS)
|
503 |
|
|
|
504 |
|
|
# if defined(SIG_BLOCK)
|
505 |
|
|
/* Use POSIX/SYSV interface */
|
506 |
|
|
# define SIGSET_T sigset_t
|
507 |
|
|
# define SIG_DEL(set, signal) sigdelset(&(set), (signal))
|
508 |
|
|
# define SIG_FILL(set) sigfillset(&set)
|
509 |
|
|
# define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
|
510 |
|
|
# elif defined(sigmask) && !defined(UTS4) && !defined(HURD)
|
511 |
|
|
/* Use the traditional BSD interface */
|
512 |
|
|
# define SIGSET_T int
|
513 |
|
|
# define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
|
514 |
|
|
# define SIG_FILL(set) (set) = 0x7fffffff
|
515 |
|
|
/* Setting the leading bit appears to provoke a bug in some */
|
516 |
|
|
/* longjmp implementations. Most systems appear not to have */
|
517 |
|
|
/* a signal 32. */
|
518 |
|
|
# define SIGSETMASK(old, new) (old) = sigsetmask(new)
|
519 |
|
|
# else
|
520 |
|
|
# error undetectable signal API
|
521 |
|
|
# endif
|
522 |
|
|
|
523 |
|
|
static GC_bool mask_initialized = FALSE;
|
524 |
|
|
|
525 |
|
|
static SIGSET_T new_mask;
|
526 |
|
|
|
527 |
|
|
static SIGSET_T old_mask;
|
528 |
|
|
|
529 |
|
|
static SIGSET_T dummy;
|
530 |
|
|
|
531 |
|
|
#if defined(PRINTSTATS) && !defined(THREADS)
|
532 |
|
|
# define CHECK_SIGNALS
|
533 |
|
|
int GC_sig_disabled = 0;
|
534 |
|
|
#endif
|
535 |
|
|
|
536 |
|
|
void GC_disable_signals()
|
537 |
|
|
{
|
538 |
|
|
if (!mask_initialized) {
|
539 |
|
|
SIG_FILL(new_mask);
|
540 |
|
|
|
541 |
|
|
SIG_DEL(new_mask, SIGSEGV);
|
542 |
|
|
SIG_DEL(new_mask, SIGILL);
|
543 |
|
|
SIG_DEL(new_mask, SIGQUIT);
|
544 |
|
|
# ifdef SIGBUS
|
545 |
|
|
SIG_DEL(new_mask, SIGBUS);
|
546 |
|
|
# endif
|
547 |
|
|
# ifdef SIGIOT
|
548 |
|
|
SIG_DEL(new_mask, SIGIOT);
|
549 |
|
|
# endif
|
550 |
|
|
# ifdef SIGEMT
|
551 |
|
|
SIG_DEL(new_mask, SIGEMT);
|
552 |
|
|
# endif
|
553 |
|
|
# ifdef SIGTRAP
|
554 |
|
|
SIG_DEL(new_mask, SIGTRAP);
|
555 |
|
|
# endif
|
556 |
|
|
mask_initialized = TRUE;
|
557 |
|
|
}
|
558 |
|
|
# ifdef CHECK_SIGNALS
|
559 |
|
|
if (GC_sig_disabled != 0) ABORT("Nested disables");
|
560 |
|
|
GC_sig_disabled++;
|
561 |
|
|
# endif
|
562 |
|
|
SIGSETMASK(old_mask,new_mask);
|
563 |
|
|
}
|
564 |
|
|
|
565 |
|
|
void GC_enable_signals()
|
566 |
|
|
{
|
567 |
|
|
# ifdef CHECK_SIGNALS
|
568 |
|
|
if (GC_sig_disabled != 1) ABORT("Unmatched enable");
|
569 |
|
|
GC_sig_disabled--;
|
570 |
|
|
# endif
|
571 |
|
|
SIGSETMASK(dummy,old_mask);
|
572 |
|
|
}
|
573 |
|
|
|
574 |
|
|
# endif /* !PCR */
|
575 |
|
|
|
576 |
|
|
# endif /*!OS/2 */
|
577 |
|
|
|
578 |
|
|
/* Ivan Demakov: simplest way (to me) */
|
579 |
|
|
#if defined (DOS4GW)
|
580 |
|
|
void GC_disable_signals() { }
|
581 |
|
|
void GC_enable_signals() { }
|
582 |
|
|
#endif
|
583 |
|
|
|
584 |
|
|
/* Find the page size */
|
585 |
|
|
word GC_page_size;
|
586 |
|
|
|
587 |
|
|
# if defined(MSWIN32) || defined(MSWINCE) || defined (CYGWIN32)
|
588 |
|
|
void GC_setpagesize()
|
589 |
|
|
{
|
590 |
|
|
GetSystemInfo(&GC_sysinfo);
|
591 |
|
|
GC_page_size = GC_sysinfo.dwPageSize;
|
592 |
|
|
}
|
593 |
|
|
|
594 |
|
|
# else
|
595 |
|
|
# if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP) \
|
596 |
|
|
|| defined(USE_MUNMAP)
|
597 |
|
|
void GC_setpagesize()
|
598 |
|
|
{
|
599 |
|
|
GC_page_size = GETPAGESIZE();
|
600 |
|
|
}
|
601 |
|
|
# else
|
602 |
|
|
/* It's acceptable to fake it. */
|
603 |
|
|
void GC_setpagesize()
|
604 |
|
|
{
|
605 |
|
|
GC_page_size = HBLKSIZE;
|
606 |
|
|
}
|
607 |
|
|
# endif
|
608 |
|
|
# endif
|
609 |
|
|
|
610 |
|
|
/*
|
611 |
|
|
* Find the base of the stack.
|
612 |
|
|
* Used only in single-threaded environment.
|
613 |
|
|
* With threads, GC_mark_roots needs to know how to do this.
|
614 |
|
|
* Called with allocator lock held.
|
615 |
|
|
*/
|
616 |
|
|
# if defined(MSWIN32) || defined(MSWINCE)
|
617 |
|
|
# define is_writable(prot) ((prot) == PAGE_READWRITE \
|
618 |
|
|
|| (prot) == PAGE_WRITECOPY \
|
619 |
|
|
|| (prot) == PAGE_EXECUTE_READWRITE \
|
620 |
|
|
|| (prot) == PAGE_EXECUTE_WRITECOPY)
|
621 |
|
|
/* Return the number of bytes that are writable starting at p. */
|
622 |
|
|
/* The pointer p is assumed to be page aligned. */
|
623 |
|
|
/* If base is not 0, *base becomes the beginning of the */
|
624 |
|
|
/* allocation region containing p. */
|
625 |
|
|
word GC_get_writable_length(ptr_t p, ptr_t *base)
|
626 |
|
|
{
|
627 |
|
|
MEMORY_BASIC_INFORMATION buf;
|
628 |
|
|
word result;
|
629 |
|
|
word protect;
|
630 |
|
|
|
631 |
|
|
result = VirtualQuery(p, &buf, sizeof(buf));
|
632 |
|
|
if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
|
633 |
|
|
if (base != 0) *base = (ptr_t)(buf.AllocationBase);
|
634 |
|
|
protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
|
635 |
|
|
if (!is_writable(protect)) {
|
636 |
|
|
return(0);
|
637 |
|
|
}
|
638 |
|
|
if (buf.State != MEM_COMMIT) return(0);
|
639 |
|
|
return(buf.RegionSize);
|
640 |
|
|
}
|
641 |
|
|
|
642 |
|
|
ptr_t GC_get_stack_base()
|
643 |
|
|
{
|
644 |
|
|
int dummy;
|
645 |
|
|
ptr_t sp = (ptr_t)(&dummy);
|
646 |
|
|
ptr_t trunc_sp = (ptr_t)((word)sp & ~(GC_page_size - 1));
|
647 |
|
|
word size = GC_get_writable_length(trunc_sp, 0);
|
648 |
|
|
|
649 |
|
|
return(trunc_sp + size);
|
650 |
|
|
}
|
651 |
|
|
|
652 |
|
|
|
653 |
|
|
# endif /* MS Windows */
|
654 |
|
|
|
655 |
|
|
# ifdef BEOS
|
656 |
|
|
# include <kernel/OS.h>
|
657 |
|
|
ptr_t GC_get_stack_base(){
|
658 |
|
|
thread_info th;
|
659 |
|
|
get_thread_info(find_thread(NULL),&th);
|
660 |
|
|
return th.stack_end;
|
661 |
|
|
}
|
662 |
|
|
# endif /* BEOS */
|
663 |
|
|
|
664 |
|
|
|
665 |
|
|
# ifdef OS2
|
666 |
|
|
|
667 |
|
|
ptr_t GC_get_stack_base()
|
668 |
|
|
{
|
669 |
|
|
PTIB ptib;
|
670 |
|
|
PPIB ppib;
|
671 |
|
|
|
672 |
|
|
if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
|
673 |
|
|
GC_err_printf0("DosGetInfoBlocks failed\n");
|
674 |
|
|
ABORT("DosGetInfoBlocks failed\n");
|
675 |
|
|
}
|
676 |
|
|
return((ptr_t)(ptib -> tib_pstacklimit));
|
677 |
|
|
}
|
678 |
|
|
|
679 |
|
|
# endif /* OS2 */
|
680 |
|
|
|
681 |
|
|
# ifdef AMIGA
|
682 |
|
|
# define GC_AMIGA_SB
|
683 |
|
|
# include "AmigaOS.c"
|
684 |
|
|
# undef GC_AMIGA_SB
|
685 |
|
|
# endif /* AMIGA */
|
686 |
|
|
|
687 |
|
|
# if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
|
688 |
|
|
|
689 |
|
|
# ifdef __STDC__
|
690 |
|
|
typedef void (*handler)(int);
|
691 |
|
|
# else
|
692 |
|
|
typedef void (*handler)();
|
693 |
|
|
# endif
|
694 |
|
|
|
695 |
|
|
# if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
|
696 |
|
|
|| defined(HURD) || defined(NETBSD)
|
697 |
|
|
static struct sigaction old_segv_act;
|
698 |
|
|
# if defined(IRIX5) || defined(HPUX) \
|
699 |
|
|
|| defined(HURD) || defined(NETBSD)
|
700 |
|
|
static struct sigaction old_bus_act;
|
701 |
|
|
# endif
|
702 |
|
|
# else
|
703 |
|
|
static handler old_segv_handler, old_bus_handler;
|
704 |
|
|
# endif
|
705 |
|
|
|
706 |
|
|
# ifdef __STDC__
|
707 |
|
|
void GC_set_and_save_fault_handler(handler h)
|
708 |
|
|
# else
|
709 |
|
|
void GC_set_and_save_fault_handler(h)
|
710 |
|
|
handler h;
|
711 |
|
|
# endif
|
712 |
|
|
{
|
713 |
|
|
# if defined(SUNOS5SIGS) || defined(IRIX5) \
|
714 |
|
|
|| defined(OSF1) || defined(HURD) || defined(NETBSD)
|
715 |
|
|
struct sigaction act;
|
716 |
|
|
|
717 |
|
|
act.sa_handler = h;
|
718 |
|
|
# if 0 /* Was necessary for Solaris 2.3 and very temporary */
|
719 |
|
|
/* NetBSD bugs. */
|
720 |
|
|
act.sa_flags = SA_RESTART | SA_NODEFER;
|
721 |
|
|
# else
|
722 |
|
|
act.sa_flags = SA_RESTART;
|
723 |
|
|
# endif
|
724 |
|
|
|
725 |
|
|
(void) sigemptyset(&act.sa_mask);
|
726 |
|
|
# ifdef GC_IRIX_THREADS
|
727 |
|
|
/* Older versions have a bug related to retrieving and */
|
728 |
|
|
/* and setting a handler at the same time. */
|
729 |
|
|
(void) sigaction(SIGSEGV, 0, &old_segv_act);
|
730 |
|
|
(void) sigaction(SIGSEGV, &act, 0);
|
731 |
|
|
(void) sigaction(SIGBUS, 0, &old_bus_act);
|
732 |
|
|
(void) sigaction(SIGBUS, &act, 0);
|
733 |
|
|
# else
|
734 |
|
|
(void) sigaction(SIGSEGV, &act, &old_segv_act);
|
735 |
|
|
# if defined(IRIX5) \
|
736 |
|
|
|| defined(HPUX) || defined(HURD) || defined(NETBSD)
|
737 |
|
|
/* Under Irix 5.x or HP/UX, we may get SIGBUS. */
|
738 |
|
|
/* Pthreads doesn't exist under Irix 5.x, so we */
|
739 |
|
|
/* don't have to worry in the threads case. */
|
740 |
|
|
(void) sigaction(SIGBUS, &act, &old_bus_act);
|
741 |
|
|
# endif
|
742 |
|
|
# endif /* GC_IRIX_THREADS */
|
743 |
|
|
# else
|
744 |
|
|
old_segv_handler = signal(SIGSEGV, h);
|
745 |
|
|
# ifdef SIGBUS
|
746 |
|
|
old_bus_handler = signal(SIGBUS, h);
|
747 |
|
|
# endif
|
748 |
|
|
# endif
|
749 |
|
|
}
|
750 |
|
|
# endif /* NEED_FIND_LIMIT || UNIX_LIKE */
|
751 |
|
|
|
752 |
|
|
# ifdef NEED_FIND_LIMIT
|
753 |
|
|
/* Some tools to implement HEURISTIC2 */
|
754 |
|
|
# define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
|
755 |
|
|
/* static */ JMP_BUF GC_jmp_buf;
|
756 |
|
|
|
757 |
|
|
/*ARGSUSED*/
|
758 |
|
|
void GC_fault_handler(sig)
|
759 |
|
|
int sig;
|
760 |
|
|
{
|
761 |
|
|
LONGJMP(GC_jmp_buf, 1);
|
762 |
|
|
}
|
763 |
|
|
|
764 |
|
|
void GC_setup_temporary_fault_handler()
|
765 |
|
|
{
|
766 |
|
|
GC_set_and_save_fault_handler(GC_fault_handler);
|
767 |
|
|
}
|
768 |
|
|
|
769 |
|
|
void GC_reset_fault_handler()
|
770 |
|
|
{
|
771 |
|
|
# if defined(SUNOS5SIGS) || defined(IRIX5) \
|
772 |
|
|
|| defined(OSF1) || defined(HURD) || defined(NETBSD)
|
773 |
|
|
(void) sigaction(SIGSEGV, &old_segv_act, 0);
|
774 |
|
|
# if defined(IRIX5) \
|
775 |
|
|
|| defined(HPUX) || defined(HURD) || defined(NETBSD)
|
776 |
|
|
(void) sigaction(SIGBUS, &old_bus_act, 0);
|
777 |
|
|
# endif
|
778 |
|
|
# else
|
779 |
|
|
(void) signal(SIGSEGV, old_segv_handler);
|
780 |
|
|
# ifdef SIGBUS
|
781 |
|
|
(void) signal(SIGBUS, old_bus_handler);
|
782 |
|
|
# endif
|
783 |
|
|
# endif
|
784 |
|
|
}
|
785 |
|
|
|
786 |
|
|
/* Return the first nonaddressible location > p (up) or */
|
787 |
|
|
/* the smallest location q s.t. [q,p) is addressable (!up). */
|
788 |
|
|
/* We assume that p (up) or p-1 (!up) is addressable. */
|
789 |
|
|
ptr_t GC_find_limit(p, up)
|
790 |
|
|
ptr_t p;
|
791 |
|
|
GC_bool up;
|
792 |
|
|
{
|
793 |
|
|
static VOLATILE ptr_t result;
|
794 |
|
|
/* Needs to be static, since otherwise it may not be */
|
795 |
|
|
/* preserved across the longjmp. Can safely be */
|
796 |
|
|
/* static since it's only called once, with the */
|
797 |
|
|
/* allocation lock held. */
|
798 |
|
|
|
799 |
|
|
|
800 |
|
|
GC_setup_temporary_fault_handler();
|
801 |
|
|
if (SETJMP(GC_jmp_buf) == 0) {
|
802 |
|
|
result = (ptr_t)(((word)(p))
|
803 |
|
|
& ~(MIN_PAGE_SIZE-1));
|
804 |
|
|
for (;;) {
|
805 |
|
|
if (up) {
|
806 |
|
|
result += MIN_PAGE_SIZE;
|
807 |
|
|
} else {
|
808 |
|
|
result -= MIN_PAGE_SIZE;
|
809 |
|
|
}
|
810 |
|
|
GC_noop1((word)(*result));
|
811 |
|
|
}
|
812 |
|
|
}
|
813 |
|
|
GC_reset_fault_handler();
|
814 |
|
|
if (!up) {
|
815 |
|
|
result += MIN_PAGE_SIZE;
|
816 |
|
|
}
|
817 |
|
|
return(result);
|
818 |
|
|
}
|
819 |
|
|
# endif
|
820 |
|
|
|
821 |
|
|
#if defined(ECOS) || defined(NOSYS)
|
822 |
|
|
ptr_t GC_get_stack_base()
|
823 |
|
|
{
|
824 |
|
|
return STACKBOTTOM;
|
825 |
|
|
}
|
826 |
|
|
#endif
|
827 |
|
|
|
828 |
|
|
#ifdef HPUX_STACKBOTTOM
|
829 |
|
|
|
830 |
|
|
#include <sys/param.h>
|
831 |
|
|
#include <sys/pstat.h>
|
832 |
|
|
|
833 |
|
|
ptr_t GC_get_register_stack_base(void)
|
834 |
|
|
{
|
835 |
|
|
struct pst_vm_status vm_status;
|
836 |
|
|
|
837 |
|
|
int i = 0;
|
838 |
|
|
while (pstat_getprocvm(&vm_status, sizeof(vm_status), 0, i++) == 1) {
|
839 |
|
|
if (vm_status.pst_type == PS_RSESTACK) {
|
840 |
|
|
return (ptr_t) vm_status.pst_vaddr;
|
841 |
|
|
}
|
842 |
|
|
}
|
843 |
|
|
|
844 |
|
|
/* old way to get the register stackbottom */
|
845 |
|
|
return (ptr_t)(((word)GC_stackbottom - BACKING_STORE_DISPLACEMENT - 1)
|
846 |
|
|
& ~(BACKING_STORE_ALIGNMENT - 1));
|
847 |
|
|
}
|
848 |
|
|
|
849 |
|
|
#endif /* HPUX_STACK_BOTTOM */
|
850 |
|
|
|
851 |
|
|
#ifdef LINUX_STACKBOTTOM
|
852 |
|
|
|
853 |
|
|
#include <sys/types.h>
|
854 |
|
|
#include <sys/stat.h>
|
855 |
|
|
|
856 |
|
|
# define STAT_SKIP 27 /* Number of fields preceding startstack */
|
857 |
|
|
/* field in /proc/self/stat */
|
858 |
|
|
|
859 |
|
|
#ifdef USE_LIBC_PRIVATES
|
860 |
|
|
# pragma weak __libc_stack_end
|
861 |
|
|
extern ptr_t __libc_stack_end;
|
862 |
|
|
#endif
|
863 |
|
|
|
864 |
|
|
# ifdef IA64
|
865 |
|
|
/* Try to read the backing store base from /proc/self/maps. */
|
866 |
|
|
/* We look for the writable mapping with a 0 major device, */
|
867 |
|
|
/* which is as close to our frame as possible, but below it.*/
|
868 |
|
|
static word backing_store_base_from_maps(char *maps)
|
869 |
|
|
{
|
870 |
|
|
char prot_buf[5];
|
871 |
|
|
char *buf_ptr = maps;
|
872 |
|
|
word start, end;
|
873 |
|
|
unsigned int maj_dev;
|
874 |
|
|
word current_best = 0;
|
875 |
|
|
word dummy;
|
876 |
|
|
|
877 |
|
|
for (;;) {
|
878 |
|
|
buf_ptr = GC_parse_map_entry(buf_ptr, &start, &end, prot_buf, &maj_dev);
|
879 |
|
|
if (buf_ptr == NULL) return current_best;
|
880 |
|
|
if (prot_buf[1] == 'w' && maj_dev == 0) {
|
881 |
|
|
if (end < (word)(&dummy) && start > current_best) current_best = start;
|
882 |
|
|
}
|
883 |
|
|
}
|
884 |
|
|
return current_best;
|
885 |
|
|
}
|
886 |
|
|
|
887 |
|
|
static word backing_store_base_from_proc(void)
|
888 |
|
|
{
|
889 |
|
|
return GC_apply_to_maps(backing_store_base_from_maps);
|
890 |
|
|
}
|
891 |
|
|
|
892 |
|
|
# ifdef USE_LIBC_PRIVATES
|
893 |
|
|
# pragma weak __libc_ia64_register_backing_store_base
|
894 |
|
|
extern ptr_t __libc_ia64_register_backing_store_base;
|
895 |
|
|
# endif
|
896 |
|
|
|
897 |
|
|
ptr_t GC_get_register_stack_base(void)
|
898 |
|
|
{
|
899 |
|
|
# ifdef USE_LIBC_PRIVATES
|
900 |
|
|
if (0 != &__libc_ia64_register_backing_store_base
|
901 |
|
|
&& 0 != __libc_ia64_register_backing_store_base) {
|
902 |
|
|
/* Glibc 2.2.4 has a bug such that for dynamically linked */
|
903 |
|
|
/* executables __libc_ia64_register_backing_store_base is */
|
904 |
|
|
/* defined but uninitialized during constructor calls. */
|
905 |
|
|
/* Hence we check for both nonzero address and value. */
|
906 |
|
|
return __libc_ia64_register_backing_store_base;
|
907 |
|
|
}
|
908 |
|
|
# endif
|
909 |
|
|
word result = backing_store_base_from_proc();
|
910 |
|
|
if (0 == result) {
|
911 |
|
|
/* Use dumb heuristics. Works only for default configuration. */
|
912 |
|
|
result = (word)GC_stackbottom - BACKING_STORE_DISPLACEMENT;
|
913 |
|
|
result += BACKING_STORE_ALIGNMENT - 1;
|
914 |
|
|
result &= ~(BACKING_STORE_ALIGNMENT - 1);
|
915 |
|
|
/* Verify that it's at least readable. If not, we goofed. */
|
916 |
|
|
GC_noop1(*(word *)result);
|
917 |
|
|
}
|
918 |
|
|
return (ptr_t)result;
|
919 |
|
|
}
|
920 |
|
|
# endif
|
921 |
|
|
|
922 |
|
|
ptr_t GC_linux_stack_base(void)
|
923 |
|
|
{
|
924 |
|
|
/* We read the stack base value from /proc/self/stat. We do this */
|
925 |
|
|
/* using direct I/O system calls in order to avoid calling malloc */
|
926 |
|
|
/* in case REDIRECT_MALLOC is defined. */
|
927 |
|
|
# define STAT_BUF_SIZE 4096
|
928 |
|
|
# define STAT_READ read
|
929 |
|
|
/* Should probably call the real read, if read is wrapped. */
|
930 |
|
|
char stat_buf[STAT_BUF_SIZE];
|
931 |
|
|
int f;
|
932 |
|
|
char c;
|
933 |
|
|
word result = 0;
|
934 |
|
|
size_t i, buf_offset = 0;
|
935 |
|
|
|
936 |
|
|
/* First try the easy way. This should work for glibc 2.2 */
|
937 |
|
|
/* This fails in a prelinked ("prelink" command) executable */
|
938 |
|
|
/* since the correct value of __libc_stack_end never */
|
939 |
|
|
/* becomes visible to us. The second test works around */
|
940 |
|
|
/* this. */
|
941 |
|
|
# ifdef USE_LIBC_PRIVATES
|
942 |
|
|
if (0 != &__libc_stack_end && 0 != __libc_stack_end ) {
|
943 |
|
|
# ifdef IA64
|
944 |
|
|
/* Some versions of glibc set the address 16 bytes too */
|
945 |
|
|
/* low while the initialization code is running. */
|
946 |
|
|
if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) {
|
947 |
|
|
return __libc_stack_end + 0x10;
|
948 |
|
|
} /* Otherwise it's not safe to add 16 bytes and we fall */
|
949 |
|
|
/* back to using /proc. */
|
950 |
|
|
# else
|
951 |
|
|
# ifdef SPARC
|
952 |
|
|
/* Older versions of glibc for 64-bit Sparc do not set
|
953 |
|
|
* this variable correctly, it gets set to either zero
|
954 |
|
|
* or one.
|
955 |
|
|
*/
|
956 |
|
|
if (__libc_stack_end != (ptr_t) (unsigned long)0x1)
|
957 |
|
|
return __libc_stack_end;
|
958 |
|
|
# else
|
959 |
|
|
return __libc_stack_end;
|
960 |
|
|
# endif
|
961 |
|
|
# endif
|
962 |
|
|
}
|
963 |
|
|
# endif
|
964 |
|
|
f = open("/proc/self/stat", O_RDONLY);
|
965 |
|
|
if (f < 0 || STAT_READ(f, stat_buf, STAT_BUF_SIZE) < 2 * STAT_SKIP) {
|
966 |
|
|
ABORT("Couldn't read /proc/self/stat");
|
967 |
|
|
}
|
968 |
|
|
c = stat_buf[buf_offset++];
|
969 |
|
|
/* Skip the required number of fields. This number is hopefully */
|
970 |
|
|
/* constant across all Linux implementations. */
|
971 |
|
|
for (i = 0; i < STAT_SKIP; ++i) {
|
972 |
|
|
while (isspace(c)) c = stat_buf[buf_offset++];
|
973 |
|
|
while (!isspace(c)) c = stat_buf[buf_offset++];
|
974 |
|
|
}
|
975 |
|
|
while (isspace(c)) c = stat_buf[buf_offset++];
|
976 |
|
|
while (isdigit(c)) {
|
977 |
|
|
result *= 10;
|
978 |
|
|
result += c - '0';
|
979 |
|
|
c = stat_buf[buf_offset++];
|
980 |
|
|
}
|
981 |
|
|
close(f);
|
982 |
|
|
if (result < 0x10000000) ABORT("Absurd stack bottom value");
|
983 |
|
|
return (ptr_t)result;
|
984 |
|
|
}
|
985 |
|
|
|
986 |
|
|
#endif /* LINUX_STACKBOTTOM */
|
987 |
|
|
|
988 |
|
|
#ifdef FREEBSD_STACKBOTTOM
|
989 |
|
|
|
990 |
|
|
/* This uses an undocumented sysctl call, but at least one expert */
|
991 |
|
|
/* believes it will stay. */
|
992 |
|
|
|
993 |
|
|
#include <unistd.h>
|
994 |
|
|
#include <sys/types.h>
|
995 |
|
|
#include <sys/sysctl.h>
|
996 |
|
|
|
997 |
|
|
ptr_t GC_freebsd_stack_base(void)
|
998 |
|
|
{
|
999 |
|
|
int nm[2] = {CTL_KERN, KERN_USRSTACK};
|
1000 |
|
|
ptr_t base;
|
1001 |
|
|
size_t len = sizeof(ptr_t);
|
1002 |
|
|
int r = sysctl(nm, 2, &base, &len, NULL, 0);
|
1003 |
|
|
|
1004 |
|
|
if (r) ABORT("Error getting stack base");
|
1005 |
|
|
|
1006 |
|
|
return base;
|
1007 |
|
|
}
|
1008 |
|
|
|
1009 |
|
|
#endif /* FREEBSD_STACKBOTTOM */
|
1010 |
|
|
|
1011 |
|
|
#if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
|
1012 |
|
|
&& !defined(MSWINCE) && !defined(OS2) && !defined(NOSYS) && !defined(ECOS)
|
1013 |
|
|
|
1014 |
|
|
ptr_t GC_get_stack_base()
|
1015 |
|
|
{
|
1016 |
|
|
# if defined(HEURISTIC1) || defined(HEURISTIC2) || \
|
1017 |
|
|
defined(LINUX_STACKBOTTOM) || defined(FREEBSD_STACKBOTTOM)
|
1018 |
|
|
word dummy;
|
1019 |
|
|
ptr_t result;
|
1020 |
|
|
# endif
|
1021 |
|
|
|
1022 |
|
|
# define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
|
1023 |
|
|
|
1024 |
|
|
# ifdef STACKBOTTOM
|
1025 |
|
|
return(STACKBOTTOM);
|
1026 |
|
|
# else
|
1027 |
|
|
# ifdef HEURISTIC1
|
1028 |
|
|
# ifdef STACK_GROWS_DOWN
|
1029 |
|
|
result = (ptr_t)((((word)(&dummy))
|
1030 |
|
|
+ STACKBOTTOM_ALIGNMENT_M1)
|
1031 |
|
|
& ~STACKBOTTOM_ALIGNMENT_M1);
|
1032 |
|
|
# else
|
1033 |
|
|
result = (ptr_t)(((word)(&dummy))
|
1034 |
|
|
& ~STACKBOTTOM_ALIGNMENT_M1);
|
1035 |
|
|
# endif
|
1036 |
|
|
# endif /* HEURISTIC1 */
|
1037 |
|
|
# ifdef LINUX_STACKBOTTOM
|
1038 |
|
|
result = GC_linux_stack_base();
|
1039 |
|
|
# endif
|
1040 |
|
|
# ifdef FREEBSD_STACKBOTTOM
|
1041 |
|
|
result = GC_freebsd_stack_base();
|
1042 |
|
|
# endif
|
1043 |
|
|
# ifdef HEURISTIC2
|
1044 |
|
|
# ifdef STACK_GROWS_DOWN
|
1045 |
|
|
result = GC_find_limit((ptr_t)(&dummy), TRUE);
|
1046 |
|
|
# ifdef HEURISTIC2_LIMIT
|
1047 |
|
|
if (result > HEURISTIC2_LIMIT
|
1048 |
|
|
&& (ptr_t)(&dummy) < HEURISTIC2_LIMIT) {
|
1049 |
|
|
result = HEURISTIC2_LIMIT;
|
1050 |
|
|
}
|
1051 |
|
|
# endif
|
1052 |
|
|
# else
|
1053 |
|
|
result = GC_find_limit((ptr_t)(&dummy), FALSE);
|
1054 |
|
|
# ifdef HEURISTIC2_LIMIT
|
1055 |
|
|
if (result < HEURISTIC2_LIMIT
|
1056 |
|
|
&& (ptr_t)(&dummy) > HEURISTIC2_LIMIT) {
|
1057 |
|
|
result = HEURISTIC2_LIMIT;
|
1058 |
|
|
}
|
1059 |
|
|
# endif
|
1060 |
|
|
# endif
|
1061 |
|
|
|
1062 |
|
|
# endif /* HEURISTIC2 */
|
1063 |
|
|
# ifdef STACK_GROWS_DOWN
|
1064 |
|
|
if (result == 0) result = (ptr_t)(signed_word)(-sizeof(ptr_t));
|
1065 |
|
|
# endif
|
1066 |
|
|
return(result);
|
1067 |
|
|
# endif /* STACKBOTTOM */
|
1068 |
|
|
}
|
1069 |
|
|
|
1070 |
|
|
# endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS, !NOSYS, !ECOS */
|
1071 |
|
|
|
1072 |
|
|
/*
|
1073 |
|
|
* Register static data segment(s) as roots.
|
1074 |
|
|
* If more data segments are added later then they need to be registered
|
1075 |
|
|
* add that point (as we do with SunOS dynamic loading),
|
1076 |
|
|
* or GC_mark_roots needs to check for them (as we do with PCR).
|
1077 |
|
|
* Called with allocator lock held.
|
1078 |
|
|
*/
|
1079 |
|
|
|
1080 |
|
|
# ifdef OS2
|
1081 |
|
|
|
1082 |
|
|
void GC_register_data_segments()
|
1083 |
|
|
{
|
1084 |
|
|
PTIB ptib;
|
1085 |
|
|
PPIB ppib;
|
1086 |
|
|
HMODULE module_handle;
|
1087 |
|
|
# define PBUFSIZ 512
|
1088 |
|
|
UCHAR path[PBUFSIZ];
|
1089 |
|
|
FILE * myexefile;
|
1090 |
|
|
struct exe_hdr hdrdos; /* MSDOS header. */
|
1091 |
|
|
struct e32_exe hdr386; /* Real header for my executable */
|
1092 |
|
|
struct o32_obj seg; /* Currrent segment */
|
1093 |
|
|
int nsegs;
|
1094 |
|
|
|
1095 |
|
|
|
1096 |
|
|
if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
|
1097 |
|
|
GC_err_printf0("DosGetInfoBlocks failed\n");
|
1098 |
|
|
ABORT("DosGetInfoBlocks failed\n");
|
1099 |
|
|
}
|
1100 |
|
|
module_handle = ppib -> pib_hmte;
|
1101 |
|
|
if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
|
1102 |
|
|
GC_err_printf0("DosQueryModuleName failed\n");
|
1103 |
|
|
ABORT("DosGetInfoBlocks failed\n");
|
1104 |
|
|
}
|
1105 |
|
|
myexefile = fopen(path, "rb");
|
1106 |
|
|
if (myexefile == 0) {
|
1107 |
|
|
GC_err_puts("Couldn't open executable ");
|
1108 |
|
|
GC_err_puts(path); GC_err_puts("\n");
|
1109 |
|
|
ABORT("Failed to open executable\n");
|
1110 |
|
|
}
|
1111 |
|
|
if (fread((char *)(&hdrdos), 1, sizeof hdrdos, myexefile) < sizeof hdrdos) {
|
1112 |
|
|
GC_err_puts("Couldn't read MSDOS header from ");
|
1113 |
|
|
GC_err_puts(path); GC_err_puts("\n");
|
1114 |
|
|
ABORT("Couldn't read MSDOS header");
|
1115 |
|
|
}
|
1116 |
|
|
if (E_MAGIC(hdrdos) != EMAGIC) {
|
1117 |
|
|
GC_err_puts("Executable has wrong DOS magic number: ");
|
1118 |
|
|
GC_err_puts(path); GC_err_puts("\n");
|
1119 |
|
|
ABORT("Bad DOS magic number");
|
1120 |
|
|
}
|
1121 |
|
|
if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
|
1122 |
|
|
GC_err_puts("Seek to new header failed in ");
|
1123 |
|
|
GC_err_puts(path); GC_err_puts("\n");
|
1124 |
|
|
ABORT("Bad DOS magic number");
|
1125 |
|
|
}
|
1126 |
|
|
if (fread((char *)(&hdr386), 1, sizeof hdr386, myexefile) < sizeof hdr386) {
|
1127 |
|
|
GC_err_puts("Couldn't read MSDOS header from ");
|
1128 |
|
|
GC_err_puts(path); GC_err_puts("\n");
|
1129 |
|
|
ABORT("Couldn't read OS/2 header");
|
1130 |
|
|
}
|
1131 |
|
|
if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
|
1132 |
|
|
GC_err_puts("Executable has wrong OS/2 magic number:");
|
1133 |
|
|
GC_err_puts(path); GC_err_puts("\n");
|
1134 |
|
|
ABORT("Bad OS/2 magic number");
|
1135 |
|
|
}
|
1136 |
|
|
if ( E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
|
1137 |
|
|
GC_err_puts("Executable %s has wrong byte order: ");
|
1138 |
|
|
GC_err_puts(path); GC_err_puts("\n");
|
1139 |
|
|
ABORT("Bad byte order");
|
1140 |
|
|
}
|
1141 |
|
|
if ( E32_CPU(hdr386) == E32CPU286) {
|
1142 |
|
|
GC_err_puts("GC can't handle 80286 executables: ");
|
1143 |
|
|
GC_err_puts(path); GC_err_puts("\n");
|
1144 |
|
|
EXIT();
|
1145 |
|
|
}
|
1146 |
|
|
if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
|
1147 |
|
|
SEEK_SET) != 0) {
|
1148 |
|
|
GC_err_puts("Seek to object table failed: ");
|
1149 |
|
|
GC_err_puts(path); GC_err_puts("\n");
|
1150 |
|
|
ABORT("Seek to object table failed");
|
1151 |
|
|
}
|
1152 |
|
|
for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
|
1153 |
|
|
int flags;
|
1154 |
|
|
if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) {
|
1155 |
|
|
GC_err_puts("Couldn't read obj table entry from ");
|
1156 |
|
|
GC_err_puts(path); GC_err_puts("\n");
|
1157 |
|
|
ABORT("Couldn't read obj table entry");
|
1158 |
|
|
}
|
1159 |
|
|
flags = O32_FLAGS(seg);
|
1160 |
|
|
if (!(flags & OBJWRITE)) continue;
|
1161 |
|
|
if (!(flags & OBJREAD)) continue;
|
1162 |
|
|
if (flags & OBJINVALID) {
|
1163 |
|
|
GC_err_printf0("Object with invalid pages?\n");
|
1164 |
|
|
continue;
|
1165 |
|
|
}
|
1166 |
|
|
GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE);
|
1167 |
|
|
}
|
1168 |
|
|
}
|
1169 |
|
|
|
1170 |
|
|
# else /* !OS2 */
|
1171 |
|
|
|
1172 |
|
|
# if defined(MSWIN32) || defined(MSWINCE) || defined (CYGWIN32)
|
1173 |
|
|
|
1174 |
|
|
# ifdef CYGWIN32
|
1175 |
|
|
# define GC_no_win32_dlls (FALSE)
|
1176 |
|
|
# endif
|
1177 |
|
|
|
1178 |
|
|
# ifdef MSWIN32
|
1179 |
|
|
/* Unfortunately, we have to handle win32s very differently from NT, */
|
1180 |
|
|
/* Since VirtualQuery has very different semantics. In particular, */
|
1181 |
|
|
/* under win32s a VirtualQuery call on an unmapped page returns an */
|
1182 |
|
|
/* invalid result. Under NT, GC_register_data_segments is a noop and */
|
1183 |
|
|
/* all real work is done by GC_register_dynamic_libraries. Under */
|
1184 |
|
|
/* win32s, we cannot find the data segments associated with dll's. */
|
1185 |
|
|
/* We register the main data segment here. */
|
1186 |
|
|
GC_bool GC_no_win32_dlls = FALSE;
|
1187 |
|
|
/* This used to be set for gcc, to avoid dealing with */
|
1188 |
|
|
/* the structured exception handling issues. But we now have */
|
1189 |
|
|
/* assembly code to do that right. */
|
1190 |
|
|
GC_bool GC_wnt = FALSE;
|
1191 |
|
|
/* This is a Windows NT derivative, i.e. NT, W2K, XP or later. */
|
1192 |
|
|
|
1193 |
|
|
void GC_init_win32()
|
1194 |
|
|
{
|
1195 |
|
|
/* if we're running under win32s, assume that no DLLs will be loaded */
|
1196 |
|
|
DWORD v = GetVersion();
|
1197 |
|
|
GC_wnt = !(v & 0x80000000);
|
1198 |
|
|
GC_no_win32_dlls |= ((!GC_wnt) && (v & 0xff) <= 3);
|
1199 |
|
|
}
|
1200 |
|
|
|
1201 |
|
|
/* Return the smallest address a such that VirtualQuery */
|
1202 |
|
|
/* returns correct results for all addresses between a and start. */
|
1203 |
|
|
/* Assumes VirtualQuery returns correct information for start. */
|
1204 |
|
|
ptr_t GC_least_described_address(ptr_t start)
|
1205 |
|
|
{
|
1206 |
|
|
MEMORY_BASIC_INFORMATION buf;
|
1207 |
|
|
DWORD result;
|
1208 |
|
|
LPVOID limit;
|
1209 |
|
|
ptr_t p;
|
1210 |
|
|
LPVOID q;
|
1211 |
|
|
|
1212 |
|
|
limit = GC_sysinfo.lpMinimumApplicationAddress;
|
1213 |
|
|
p = (ptr_t)((word)start & ~(GC_page_size - 1));
|
1214 |
|
|
for (;;) {
|
1215 |
|
|
q = (LPVOID)(p - GC_page_size);
|
1216 |
|
|
if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
|
1217 |
|
|
result = VirtualQuery(q, &buf, sizeof(buf));
|
1218 |
|
|
if (result != sizeof(buf) || buf.AllocationBase == 0) break;
|
1219 |
|
|
p = (ptr_t)(buf.AllocationBase);
|
1220 |
|
|
}
|
1221 |
|
|
return(p);
|
1222 |
|
|
}
|
1223 |
|
|
# endif
|
1224 |
|
|
|
1225 |
|
|
# ifndef REDIRECT_MALLOC
|
1226 |
|
|
/* We maintain a linked list of AllocationBase values that we know */
|
1227 |
|
|
/* correspond to malloc heap sections. Currently this is only called */
|
1228 |
|
|
/* during a GC. But there is some hope that for long running */
|
1229 |
|
|
/* programs we will eventually see most heap sections. */
|
1230 |
|
|
|
1231 |
|
|
/* In the long run, it would be more reliable to occasionally walk */
|
1232 |
|
|
/* the malloc heap with HeapWalk on the default heap. But that */
|
1233 |
|
|
/* apparently works only for NT-based Windows. */
|
1234 |
|
|
|
1235 |
|
|
/* In the long run, a better data structure would also be nice ... */
|
1236 |
|
|
struct GC_malloc_heap_list {
|
1237 |
|
|
void * allocation_base;
|
1238 |
|
|
struct GC_malloc_heap_list *next;
|
1239 |
|
|
} *GC_malloc_heap_l = 0;
|
1240 |
|
|
|
1241 |
|
|
/* Is p the base of one of the malloc heap sections we already know */
|
1242 |
|
|
/* about? */
|
1243 |
|
|
GC_bool GC_is_malloc_heap_base(ptr_t p)
|
1244 |
|
|
{
|
1245 |
|
|
struct GC_malloc_heap_list *q = GC_malloc_heap_l;
|
1246 |
|
|
|
1247 |
|
|
while (0 != q) {
|
1248 |
|
|
if (q -> allocation_base == p) return TRUE;
|
1249 |
|
|
q = q -> next;
|
1250 |
|
|
}
|
1251 |
|
|
return FALSE;
|
1252 |
|
|
}
|
1253 |
|
|
|
1254 |
|
|
void *GC_get_allocation_base(void *p)
|
1255 |
|
|
{
|
1256 |
|
|
MEMORY_BASIC_INFORMATION buf;
|
1257 |
|
|
DWORD result = VirtualQuery(p, &buf, sizeof(buf));
|
1258 |
|
|
if (result != sizeof(buf)) {
|
1259 |
|
|
ABORT("Weird VirtualQuery result");
|
1260 |
|
|
}
|
1261 |
|
|
return buf.AllocationBase;
|
1262 |
|
|
}
|
1263 |
|
|
|
1264 |
|
|
size_t GC_max_root_size = 100000; /* Appr. largest root size. */
|
1265 |
|
|
|
1266 |
|
|
void GC_add_current_malloc_heap()
|
1267 |
|
|
{
|
1268 |
|
|
struct GC_malloc_heap_list *new_l =
|
1269 |
|
|
malloc(sizeof(struct GC_malloc_heap_list));
|
1270 |
|
|
void * candidate = GC_get_allocation_base(new_l);
|
1271 |
|
|
|
1272 |
|
|
if (new_l == 0) return;
|
1273 |
|
|
if (GC_is_malloc_heap_base(candidate)) {
|
1274 |
|
|
/* Try a little harder to find malloc heap. */
|
1275 |
|
|
size_t req_size = 10000;
|
1276 |
|
|
do {
|
1277 |
|
|
void *p = malloc(req_size);
|
1278 |
|
|
if (0 == p) { free(new_l); return; }
|
1279 |
|
|
candidate = GC_get_allocation_base(p);
|
1280 |
|
|
free(p);
|
1281 |
|
|
req_size *= 2;
|
1282 |
|
|
} while (GC_is_malloc_heap_base(candidate)
|
1283 |
|
|
&& req_size < GC_max_root_size/10 && req_size < 500000);
|
1284 |
|
|
if (GC_is_malloc_heap_base(candidate)) {
|
1285 |
|
|
free(new_l); return;
|
1286 |
|
|
}
|
1287 |
|
|
}
|
1288 |
|
|
# ifdef CONDPRINT
|
1289 |
|
|
if (GC_print_stats)
|
1290 |
|
|
GC_printf1("Found new system malloc AllocationBase at 0x%lx\n",
|
1291 |
|
|
candidate);
|
1292 |
|
|
# endif
|
1293 |
|
|
new_l -> allocation_base = candidate;
|
1294 |
|
|
new_l -> next = GC_malloc_heap_l;
|
1295 |
|
|
GC_malloc_heap_l = new_l;
|
1296 |
|
|
}
|
1297 |
|
|
# endif /* REDIRECT_MALLOC */
|
1298 |
|
|
|
1299 |
|
|
/* Is p the start of either the malloc heap, or of one of our */
|
1300 |
|
|
/* heap sections? */
|
1301 |
|
|
GC_bool GC_is_heap_base (ptr_t p)
|
1302 |
|
|
{
|
1303 |
|
|
|
1304 |
|
|
unsigned i;
|
1305 |
|
|
|
1306 |
|
|
# ifndef REDIRECT_MALLOC
|
1307 |
|
|
static word last_gc_no = -1;
|
1308 |
|
|
|
1309 |
|
|
if (last_gc_no != GC_gc_no) {
|
1310 |
|
|
GC_add_current_malloc_heap();
|
1311 |
|
|
last_gc_no = GC_gc_no;
|
1312 |
|
|
}
|
1313 |
|
|
if (GC_root_size > GC_max_root_size) GC_max_root_size = GC_root_size;
|
1314 |
|
|
if (GC_is_malloc_heap_base(p)) return TRUE;
|
1315 |
|
|
# endif
|
1316 |
|
|
for (i = 0; i < GC_n_heap_bases; i++) {
|
1317 |
|
|
if (GC_heap_bases[i] == p) return TRUE;
|
1318 |
|
|
}
|
1319 |
|
|
return FALSE ;
|
1320 |
|
|
}
|
1321 |
|
|
|
1322 |
|
|
# ifdef MSWIN32
|
1323 |
|
|
void GC_register_root_section(ptr_t static_root)
|
1324 |
|
|
{
|
1325 |
|
|
MEMORY_BASIC_INFORMATION buf;
|
1326 |
|
|
DWORD result;
|
1327 |
|
|
DWORD protect;
|
1328 |
|
|
LPVOID p;
|
1329 |
|
|
char * base;
|
1330 |
|
|
char * limit, * new_limit;
|
1331 |
|
|
|
1332 |
|
|
if (!GC_no_win32_dlls) return;
|
1333 |
|
|
p = base = limit = GC_least_described_address(static_root);
|
1334 |
|
|
while (p < GC_sysinfo.lpMaximumApplicationAddress) {
|
1335 |
|
|
result = VirtualQuery(p, &buf, sizeof(buf));
|
1336 |
|
|
if (result != sizeof(buf) || buf.AllocationBase == 0
|
1337 |
|
|
|| GC_is_heap_base(buf.AllocationBase)) break;
|
1338 |
|
|
new_limit = (char *)p + buf.RegionSize;
|
1339 |
|
|
protect = buf.Protect;
|
1340 |
|
|
if (buf.State == MEM_COMMIT
|
1341 |
|
|
&& is_writable(protect)) {
|
1342 |
|
|
if ((char *)p == limit) {
|
1343 |
|
|
limit = new_limit;
|
1344 |
|
|
} else {
|
1345 |
|
|
if (base != limit) GC_add_roots_inner(base, limit, FALSE);
|
1346 |
|
|
base = p;
|
1347 |
|
|
limit = new_limit;
|
1348 |
|
|
}
|
1349 |
|
|
}
|
1350 |
|
|
if (p > (LPVOID)new_limit /* overflow */) break;
|
1351 |
|
|
p = (LPVOID)new_limit;
|
1352 |
|
|
}
|
1353 |
|
|
if (base != limit) GC_add_roots_inner(base, limit, FALSE);
|
1354 |
|
|
}
|
1355 |
|
|
#endif
|
1356 |
|
|
|
1357 |
|
|
void GC_register_data_segments()
|
1358 |
|
|
{
|
1359 |
|
|
# ifdef MSWIN32
|
1360 |
|
|
static char dummy;
|
1361 |
|
|
GC_register_root_section((ptr_t)(&dummy));
|
1362 |
|
|
# endif
|
1363 |
|
|
}
|
1364 |
|
|
|
1365 |
|
|
# else /* !OS2 && !Windows */
|
1366 |
|
|
|
1367 |
|
|
# if (defined(SVR4) || defined(AUX) || defined(DGUX) \
|
1368 |
|
|
|| (defined(LINUX) && defined(SPARC))) && !defined(PCR)
|
1369 |
|
|
ptr_t GC_SysVGetDataStart(max_page_size, etext_addr)
|
1370 |
|
|
int max_page_size;
|
1371 |
|
|
int * etext_addr;
|
1372 |
|
|
{
|
1373 |
|
|
word text_end = ((word)(etext_addr) + sizeof(word) - 1)
|
1374 |
|
|
& ~(sizeof(word) - 1);
|
1375 |
|
|
/* etext rounded to word boundary */
|
1376 |
|
|
word next_page = ((text_end + (word)max_page_size - 1)
|
1377 |
|
|
& ~((word)max_page_size - 1));
|
1378 |
|
|
word page_offset = (text_end & ((word)max_page_size - 1));
|
1379 |
|
|
VOLATILE char * result = (char *)(next_page + page_offset);
|
1380 |
|
|
/* Note that this isnt equivalent to just adding */
|
1381 |
|
|
/* max_page_size to &etext if &etext is at a page boundary */
|
1382 |
|
|
|
1383 |
|
|
GC_setup_temporary_fault_handler();
|
1384 |
|
|
if (SETJMP(GC_jmp_buf) == 0) {
|
1385 |
|
|
/* Try writing to the address. */
|
1386 |
|
|
*result = *result;
|
1387 |
|
|
GC_reset_fault_handler();
|
1388 |
|
|
} else {
|
1389 |
|
|
GC_reset_fault_handler();
|
1390 |
|
|
/* We got here via a longjmp. The address is not readable. */
|
1391 |
|
|
/* This is known to happen under Solaris 2.4 + gcc, which place */
|
1392 |
|
|
/* string constants in the text segment, but after etext. */
|
1393 |
|
|
/* Use plan B. Note that we now know there is a gap between */
|
1394 |
|
|
/* text and data segments, so plan A bought us something. */
|
1395 |
|
|
result = (char *)GC_find_limit((ptr_t)(DATAEND), FALSE);
|
1396 |
|
|
}
|
1397 |
|
|
return((ptr_t)result);
|
1398 |
|
|
}
|
1399 |
|
|
# endif
|
1400 |
|
|
|
1401 |
|
|
# if defined(FREEBSD) && (defined(I386) || defined(X86_64) || defined(powerpc) || defined(__powerpc__)) && !defined(PCR)
|
1402 |
|
|
/* Its unclear whether this should be identical to the above, or */
|
1403 |
|
|
/* whether it should apply to non-X86 architectures. */
|
1404 |
|
|
/* For now we don't assume that there is always an empty page after */
|
1405 |
|
|
/* etext. But in some cases there actually seems to be slightly more. */
|
1406 |
|
|
/* This also deals with holes between read-only data and writable data. */
|
1407 |
|
|
ptr_t GC_FreeBSDGetDataStart(max_page_size, etext_addr)
|
1408 |
|
|
int max_page_size;
|
1409 |
|
|
int * etext_addr;
|
1410 |
|
|
{
|
1411 |
|
|
word text_end = ((word)(etext_addr) + sizeof(word) - 1)
|
1412 |
|
|
& ~(sizeof(word) - 1);
|
1413 |
|
|
/* etext rounded to word boundary */
|
1414 |
|
|
VOLATILE word next_page = (text_end + (word)max_page_size - 1)
|
1415 |
|
|
& ~((word)max_page_size - 1);
|
1416 |
|
|
VOLATILE ptr_t result = (ptr_t)text_end;
|
1417 |
|
|
GC_setup_temporary_fault_handler();
|
1418 |
|
|
if (SETJMP(GC_jmp_buf) == 0) {
|
1419 |
|
|
/* Try reading at the address. */
|
1420 |
|
|
/* This should happen before there is another thread. */
|
1421 |
|
|
for (; next_page < (word)(DATAEND); next_page += (word)max_page_size)
|
1422 |
|
|
*(VOLATILE char *)next_page;
|
1423 |
|
|
GC_reset_fault_handler();
|
1424 |
|
|
} else {
|
1425 |
|
|
GC_reset_fault_handler();
|
1426 |
|
|
/* As above, we go to plan B */
|
1427 |
|
|
result = GC_find_limit((ptr_t)(DATAEND), FALSE);
|
1428 |
|
|
}
|
1429 |
|
|
return(result);
|
1430 |
|
|
}
|
1431 |
|
|
|
1432 |
|
|
# endif
|
1433 |
|
|
|
1434 |
|
|
|
1435 |
|
|
#ifdef AMIGA
|
1436 |
|
|
|
1437 |
|
|
# define GC_AMIGA_DS
|
1438 |
|
|
# include "AmigaOS.c"
|
1439 |
|
|
# undef GC_AMIGA_DS
|
1440 |
|
|
|
1441 |
|
|
#else /* !OS2 && !Windows && !AMIGA */
|
1442 |
|
|
|
1443 |
|
|
void GC_register_data_segments()
|
1444 |
|
|
{
|
1445 |
|
|
# if !defined(PCR) && !defined(SRC_M3) && !defined(MACOS)
|
1446 |
|
|
# if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
|
1447 |
|
|
/* As of Solaris 2.3, the Solaris threads implementation */
|
1448 |
|
|
/* allocates the data structure for the initial thread with */
|
1449 |
|
|
/* sbrk at process startup. It needs to be scanned, so that */
|
1450 |
|
|
/* we don't lose some malloc allocated data structures */
|
1451 |
|
|
/* hanging from it. We're on thin ice here ... */
|
1452 |
|
|
extern caddr_t sbrk();
|
1453 |
|
|
|
1454 |
|
|
GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);
|
1455 |
|
|
# else
|
1456 |
|
|
GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);
|
1457 |
|
|
# if defined(DATASTART2)
|
1458 |
|
|
GC_add_roots_inner(DATASTART2, (char *)(DATAEND2), FALSE);
|
1459 |
|
|
# endif
|
1460 |
|
|
# endif
|
1461 |
|
|
# endif
|
1462 |
|
|
# if defined(MACOS)
|
1463 |
|
|
{
|
1464 |
|
|
# if defined(THINK_C)
|
1465 |
|
|
extern void* GC_MacGetDataStart(void);
|
1466 |
|
|
/* globals begin above stack and end at a5. */
|
1467 |
|
|
GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
|
1468 |
|
|
(ptr_t)LMGetCurrentA5(), FALSE);
|
1469 |
|
|
# else
|
1470 |
|
|
# if defined(__MWERKS__)
|
1471 |
|
|
# if !__POWERPC__
|
1472 |
|
|
extern void* GC_MacGetDataStart(void);
|
1473 |
|
|
/* MATTHEW: Function to handle Far Globals (CW Pro 3) */
|
1474 |
|
|
# if __option(far_data)
|
1475 |
|
|
extern void* GC_MacGetDataEnd(void);
|
1476 |
|
|
# endif
|
1477 |
|
|
/* globals begin above stack and end at a5. */
|
1478 |
|
|
GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
|
1479 |
|
|
(ptr_t)LMGetCurrentA5(), FALSE);
|
1480 |
|
|
/* MATTHEW: Handle Far Globals */
|
1481 |
|
|
# if __option(far_data)
|
1482 |
|
|
/* Far globals follow he QD globals: */
|
1483 |
|
|
GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
|
1484 |
|
|
(ptr_t)GC_MacGetDataEnd(), FALSE);
|
1485 |
|
|
# endif
|
1486 |
|
|
# else
|
1487 |
|
|
extern char __data_start__[], __data_end__[];
|
1488 |
|
|
GC_add_roots_inner((ptr_t)&__data_start__,
|
1489 |
|
|
(ptr_t)&__data_end__, FALSE);
|
1490 |
|
|
# endif /* __POWERPC__ */
|
1491 |
|
|
# endif /* __MWERKS__ */
|
1492 |
|
|
# endif /* !THINK_C */
|
1493 |
|
|
}
|
1494 |
|
|
# endif /* MACOS */
|
1495 |
|
|
|
1496 |
|
|
/* Dynamic libraries are added at every collection, since they may */
|
1497 |
|
|
/* change. */
|
1498 |
|
|
}
|
1499 |
|
|
|
1500 |
|
|
# endif /* ! AMIGA */
|
1501 |
|
|
# endif /* ! MSWIN32 && ! MSWINCE*/
|
1502 |
|
|
# endif /* ! OS2 */
|
1503 |
|
|
|
1504 |
|
|
/*
|
1505 |
|
|
* Auxiliary routines for obtaining memory from OS.
|
1506 |
|
|
*/
|
1507 |
|
|
|
1508 |
|
|
# if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
|
1509 |
|
|
&& !defined(MSWIN32) && !defined(MSWINCE) \
|
1510 |
|
|
&& !defined(MACOS) && !defined(DOS4GW)
|
1511 |
|
|
|
1512 |
|
|
# ifdef SUNOS4
|
1513 |
|
|
extern caddr_t sbrk();
|
1514 |
|
|
# endif
|
1515 |
|
|
# ifdef __STDC__
|
1516 |
|
|
# define SBRK_ARG_T ptrdiff_t
|
1517 |
|
|
# else
|
1518 |
|
|
# define SBRK_ARG_T int
|
1519 |
|
|
# endif
|
1520 |
|
|
|
1521 |
|
|
|
1522 |
|
|
# if 0 && defined(RS6000) /* We now use mmap */
|
1523 |
|
|
/* The compiler seems to generate speculative reads one past the end of */
|
1524 |
|
|
/* an allocated object. Hence we need to make sure that the page */
|
1525 |
|
|
/* following the last heap page is also mapped. */
|
1526 |
|
|
ptr_t GC_unix_get_mem(bytes)
|
1527 |
|
|
word bytes;
|
1528 |
|
|
{
|
1529 |
|
|
caddr_t cur_brk = (caddr_t)sbrk(0);
|
1530 |
|
|
caddr_t result;
|
1531 |
|
|
SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
|
1532 |
|
|
static caddr_t my_brk_val = 0;
|
1533 |
|
|
|
1534 |
|
|
if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
|
1535 |
|
|
if (lsbs != 0) {
|
1536 |
|
|
if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0);
|
1537 |
|
|
}
|
1538 |
|
|
if (cur_brk == my_brk_val) {
|
1539 |
|
|
/* Use the extra block we allocated last time. */
|
1540 |
|
|
result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
|
1541 |
|
|
if (result == (caddr_t)(-1)) return(0);
|
1542 |
|
|
result -= GC_page_size;
|
1543 |
|
|
} else {
|
1544 |
|
|
result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes);
|
1545 |
|
|
if (result == (caddr_t)(-1)) return(0);
|
1546 |
|
|
}
|
1547 |
|
|
my_brk_val = result + bytes + GC_page_size; /* Always page aligned */
|
1548 |
|
|
return((ptr_t)result);
|
1549 |
|
|
}
|
1550 |
|
|
|
1551 |
|
|
#else /* Not RS6000 */
|
1552 |
|
|
|
1553 |
|
|
#if defined(USE_MMAP) || defined(USE_MUNMAP)
|
1554 |
|
|
|
1555 |
|
|
#ifdef USE_MMAP_FIXED
|
1556 |
|
|
# define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
|
1557 |
|
|
/* Seems to yield better performance on Solaris 2, but can */
|
1558 |
|
|
/* be unreliable if something is already mapped at the address. */
|
1559 |
|
|
#else
|
1560 |
|
|
# define GC_MMAP_FLAGS MAP_PRIVATE
|
1561 |
|
|
#endif
|
1562 |
|
|
|
1563 |
|
|
#ifdef USE_MMAP_ANON
|
1564 |
|
|
# define zero_fd -1
|
1565 |
|
|
# if defined(MAP_ANONYMOUS)
|
1566 |
|
|
# define OPT_MAP_ANON MAP_ANONYMOUS
|
1567 |
|
|
# else
|
1568 |
|
|
# define OPT_MAP_ANON MAP_ANON
|
1569 |
|
|
# endif
|
1570 |
|
|
#else
|
1571 |
|
|
static int zero_fd;
|
1572 |
|
|
# define OPT_MAP_ANON 0
|
1573 |
|
|
#endif
|
1574 |
|
|
|
1575 |
|
|
#endif /* defined(USE_MMAP) || defined(USE_MUNMAP) */
|
1576 |
|
|
|
1577 |
|
|
#if defined(USE_MMAP)
|
1578 |
|
|
/* Tested only under Linux, IRIX5 and Solaris 2 */
|
1579 |
|
|
|
1580 |
|
|
#ifndef HEAP_START
|
1581 |
|
|
# define HEAP_START 0
|
1582 |
|
|
#endif
|
1583 |
|
|
|
1584 |
|
|
ptr_t GC_unix_get_mem(bytes)
|
1585 |
|
|
word bytes;
|
1586 |
|
|
{
|
1587 |
|
|
void *result;
|
1588 |
|
|
static ptr_t last_addr = HEAP_START;
|
1589 |
|
|
|
1590 |
|
|
# ifndef USE_MMAP_ANON
|
1591 |
|
|
static GC_bool initialized = FALSE;
|
1592 |
|
|
|
1593 |
|
|
if (!initialized) {
|
1594 |
|
|
zero_fd = open("/dev/zero", O_RDONLY);
|
1595 |
|
|
fcntl(zero_fd, F_SETFD, FD_CLOEXEC);
|
1596 |
|
|
initialized = TRUE;
|
1597 |
|
|
}
|
1598 |
|
|
# endif
|
1599 |
|
|
|
1600 |
|
|
if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg");
|
1601 |
|
|
result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC,
|
1602 |
|
|
GC_MMAP_FLAGS | OPT_MAP_ANON, zero_fd, 0/* offset */);
|
1603 |
|
|
if (result == MAP_FAILED) return(0);
|
1604 |
|
|
last_addr = (ptr_t)result + bytes + GC_page_size - 1;
|
1605 |
|
|
last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
|
1606 |
|
|
# if !defined(LINUX)
|
1607 |
|
|
if (last_addr == 0) {
|
1608 |
|
|
/* Oops. We got the end of the address space. This isn't */
|
1609 |
|
|
/* usable by arbitrary C code, since one-past-end pointers */
|
1610 |
|
|
/* don't work, so we discard it and try again. */
|
1611 |
|
|
munmap(result, (size_t)(-GC_page_size) - (size_t)result);
|
1612 |
|
|
/* Leave last page mapped, so we can't repeat. */
|
1613 |
|
|
return GC_unix_get_mem(bytes);
|
1614 |
|
|
}
|
1615 |
|
|
# else
|
1616 |
|
|
GC_ASSERT(last_addr != 0);
|
1617 |
|
|
# endif
|
1618 |
|
|
return((ptr_t)result);
|
1619 |
|
|
}
|
1620 |
|
|
|
1621 |
|
|
#else /* Not RS6000, not USE_MMAP */
|
1622 |
|
|
ptr_t GC_unix_get_mem(bytes)
|
1623 |
|
|
word bytes;
|
1624 |
|
|
{
|
1625 |
|
|
ptr_t result;
|
1626 |
|
|
# ifdef IRIX5
|
1627 |
|
|
/* Bare sbrk isn't thread safe. Play by malloc rules. */
|
1628 |
|
|
/* The equivalent may be needed on other systems as well. */
|
1629 |
|
|
__LOCK_MALLOC();
|
1630 |
|
|
# endif
|
1631 |
|
|
{
|
1632 |
|
|
ptr_t cur_brk = (ptr_t)sbrk(0);
|
1633 |
|
|
SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
|
1634 |
|
|
|
1635 |
|
|
if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */
|
1636 |
|
|
if (lsbs != 0) {
|
1637 |
|
|
if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0);
|
1638 |
|
|
}
|
1639 |
|
|
result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
|
1640 |
|
|
if (result == (ptr_t)(-1)) result = 0;
|
1641 |
|
|
}
|
1642 |
|
|
# ifdef IRIX5
|
1643 |
|
|
__UNLOCK_MALLOC();
|
1644 |
|
|
# endif
|
1645 |
|
|
return(result);
|
1646 |
|
|
}
|
1647 |
|
|
|
1648 |
|
|
#endif /* Not USE_MMAP */
|
1649 |
|
|
#endif /* Not RS6000 */
|
1650 |
|
|
|
1651 |
|
|
# endif /* UN*X */
|
1652 |
|
|
|
1653 |
|
|
# ifdef OS2
|
1654 |
|
|
|
1655 |
|
|
void * os2_alloc(size_t bytes)
|
1656 |
|
|
{
|
1657 |
|
|
void * result;
|
1658 |
|
|
|
1659 |
|
|
if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ |
|
1660 |
|
|
PAG_WRITE | PAG_COMMIT)
|
1661 |
|
|
!= NO_ERROR) {
|
1662 |
|
|
return(0);
|
1663 |
|
|
}
|
1664 |
|
|
if (result == 0) return(os2_alloc(bytes));
|
1665 |
|
|
return(result);
|
1666 |
|
|
}
|
1667 |
|
|
|
1668 |
|
|
# endif /* OS2 */
|
1669 |
|
|
|
1670 |
|
|
|
1671 |
|
|
# if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
|
1672 |
|
|
SYSTEM_INFO GC_sysinfo;
|
1673 |
|
|
# endif
|
1674 |
|
|
|
1675 |
|
|
# if defined(MSWIN32) || defined(CYGWIN32)
|
1676 |
|
|
|
1677 |
|
|
word GC_n_heap_bases = 0;
|
1678 |
|
|
|
1679 |
|
|
# ifdef USE_GLOBAL_ALLOC
|
1680 |
|
|
# define GLOBAL_ALLOC_TEST 1
|
1681 |
|
|
# else
|
1682 |
|
|
# define GLOBAL_ALLOC_TEST GC_no_win32_dlls
|
1683 |
|
|
# endif
|
1684 |
|
|
|
1685 |
|
|
ptr_t GC_win32_get_mem(bytes)
|
1686 |
|
|
word bytes;
|
1687 |
|
|
{
|
1688 |
|
|
ptr_t result;
|
1689 |
|
|
|
1690 |
|
|
# ifdef CYGWIN32
|
1691 |
|
|
result = GC_unix_get_mem (bytes);
|
1692 |
|
|
# else
|
1693 |
|
|
if (GLOBAL_ALLOC_TEST) {
|
1694 |
|
|
/* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
|
1695 |
|
|
/* There are also unconfirmed rumors of other */
|
1696 |
|
|
/* problems, so we dodge the issue. */
|
1697 |
|
|
result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
|
1698 |
|
|
result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1));
|
1699 |
|
|
} else {
|
1700 |
|
|
/* VirtualProtect only works on regions returned by a */
|
1701 |
|
|
/* single VirtualAlloc call. Thus we allocate one */
|
1702 |
|
|
/* extra page, which will prevent merging of blocks */
|
1703 |
|
|
/* in separate regions, and eliminate any temptation */
|
1704 |
|
|
/* to call VirtualProtect on a range spanning regions. */
|
1705 |
|
|
/* This wastes a small amount of memory, and risks */
|
1706 |
|
|
/* increased fragmentation. But better alternatives */
|
1707 |
|
|
/* would require effort. */
|
1708 |
|
|
result = (ptr_t) VirtualAlloc(NULL, bytes + 1,
|
1709 |
|
|
MEM_COMMIT | MEM_RESERVE,
|
1710 |
|
|
PAGE_EXECUTE_READWRITE);
|
1711 |
|
|
}
|
1712 |
|
|
#endif
|
1713 |
|
|
if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
|
1714 |
|
|
/* If I read the documentation correctly, this can */
|
1715 |
|
|
/* only happen if HBLKSIZE > 64k or not a power of 2. */
|
1716 |
|
|
if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
|
1717 |
|
|
GC_heap_bases[GC_n_heap_bases++] = result;
|
1718 |
|
|
return(result);
|
1719 |
|
|
}
|
1720 |
|
|
|
1721 |
|
|
void GC_win32_free_heap ()
|
1722 |
|
|
{
|
1723 |
|
|
if (GC_no_win32_dlls) {
|
1724 |
|
|
while (GC_n_heap_bases > 0) {
|
1725 |
|
|
# ifdef CYGWIN32
|
1726 |
|
|
free (GC_heap_bases[--GC_n_heap_bases]);
|
1727 |
|
|
# else
|
1728 |
|
|
GlobalFree (GC_heap_bases[--GC_n_heap_bases]);
|
1729 |
|
|
# endif
|
1730 |
|
|
GC_heap_bases[GC_n_heap_bases] = 0;
|
1731 |
|
|
}
|
1732 |
|
|
}
|
1733 |
|
|
}
|
1734 |
|
|
# endif
|
1735 |
|
|
|
1736 |
|
|
#ifdef AMIGA
|
1737 |
|
|
# define GC_AMIGA_AM
|
1738 |
|
|
# include "AmigaOS.c"
|
1739 |
|
|
# undef GC_AMIGA_AM
|
1740 |
|
|
#endif
|
1741 |
|
|
|
1742 |
|
|
|
1743 |
|
|
# ifdef MSWINCE
|
1744 |
|
|
word GC_n_heap_bases = 0;
|
1745 |
|
|
|
1746 |
|
|
ptr_t GC_wince_get_mem(bytes)
|
1747 |
|
|
word bytes;
|
1748 |
|
|
{
|
1749 |
|
|
ptr_t result;
|
1750 |
|
|
word i;
|
1751 |
|
|
|
1752 |
|
|
/* Round up allocation size to multiple of page size */
|
1753 |
|
|
bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1);
|
1754 |
|
|
|
1755 |
|
|
/* Try to find reserved, uncommitted pages */
|
1756 |
|
|
for (i = 0; i < GC_n_heap_bases; i++) {
|
1757 |
|
|
if (((word)(-(signed_word)GC_heap_lengths[i])
|
1758 |
|
|
& (GC_sysinfo.dwAllocationGranularity-1))
|
1759 |
|
|
>= bytes) {
|
1760 |
|
|
result = GC_heap_bases[i] + GC_heap_lengths[i];
|
1761 |
|
|
break;
|
1762 |
|
|
}
|
1763 |
|
|
}
|
1764 |
|
|
|
1765 |
|
|
if (i == GC_n_heap_bases) {
|
1766 |
|
|
/* Reserve more pages */
|
1767 |
|
|
word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1)
|
1768 |
|
|
& ~(GC_sysinfo.dwAllocationGranularity-1);
|
1769 |
|
|
/* If we ever support MPROTECT_VDB here, we will probably need to */
|
1770 |
|
|
/* ensure that res_bytes is strictly > bytes, so that VirtualProtect */
|
1771 |
|
|
/* never spans regions. It seems to be OK for a VirtualFree argument */
|
1772 |
|
|
/* to span regions, so we should be OK for now. */
|
1773 |
|
|
result = (ptr_t) VirtualAlloc(NULL, res_bytes,
|
1774 |
|
|
MEM_RESERVE | MEM_TOP_DOWN,
|
1775 |
|
|
PAGE_EXECUTE_READWRITE);
|
1776 |
|
|
if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
|
1777 |
|
|
/* If I read the documentation correctly, this can */
|
1778 |
|
|
/* only happen if HBLKSIZE > 64k or not a power of 2. */
|
1779 |
|
|
if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
|
1780 |
|
|
GC_heap_bases[GC_n_heap_bases] = result;
|
1781 |
|
|
GC_heap_lengths[GC_n_heap_bases] = 0;
|
1782 |
|
|
GC_n_heap_bases++;
|
1783 |
|
|
}
|
1784 |
|
|
|
1785 |
|
|
/* Commit pages */
|
1786 |
|
|
result = (ptr_t) VirtualAlloc(result, bytes,
|
1787 |
|
|
MEM_COMMIT,
|
1788 |
|
|
PAGE_EXECUTE_READWRITE);
|
1789 |
|
|
if (result != NULL) {
|
1790 |
|
|
if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
|
1791 |
|
|
GC_heap_lengths[i] += bytes;
|
1792 |
|
|
}
|
1793 |
|
|
|
1794 |
|
|
return(result);
|
1795 |
|
|
}
|
1796 |
|
|
# endif
|
1797 |
|
|
|
1798 |
|
|
#ifdef USE_MUNMAP
|
1799 |
|
|
|
1800 |
|
|
/* For now, this only works on Win32/WinCE and some Unix-like */
|
1801 |
|
|
/* systems. If you have something else, don't define */
|
1802 |
|
|
/* USE_MUNMAP. */
|
1803 |
|
|
/* We assume ANSI C to support this feature. */
|
1804 |
|
|
|
1805 |
|
|
#if !defined(MSWIN32) && !defined(MSWINCE)
|
1806 |
|
|
|
1807 |
|
|
#include <unistd.h>
|
1808 |
|
|
#include <sys/mman.h>
|
1809 |
|
|
#include <sys/stat.h>
|
1810 |
|
|
#include <sys/types.h>
|
1811 |
|
|
|
1812 |
|
|
#endif
|
1813 |
|
|
|
1814 |
|
|
/* Compute a page aligned starting address for the unmap */
|
1815 |
|
|
/* operation on a block of size bytes starting at start. */
|
1816 |
|
|
/* Return 0 if the block is too small to make this feasible. */
|
1817 |
|
|
ptr_t GC_unmap_start(ptr_t start, word bytes)
|
1818 |
|
|
{
|
1819 |
|
|
ptr_t result = start;
|
1820 |
|
|
/* Round start to next page boundary. */
|
1821 |
|
|
result += GC_page_size - 1;
|
1822 |
|
|
result = (ptr_t)((word)result & ~(GC_page_size - 1));
|
1823 |
|
|
if (result + GC_page_size > start + bytes) return 0;
|
1824 |
|
|
return result;
|
1825 |
|
|
}
|
1826 |
|
|
|
1827 |
|
|
/* Compute end address for an unmap operation on the indicated */
|
1828 |
|
|
/* block. */
|
1829 |
|
|
ptr_t GC_unmap_end(ptr_t start, word bytes)
|
1830 |
|
|
{
|
1831 |
|
|
ptr_t end_addr = start + bytes;
|
1832 |
|
|
end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1));
|
1833 |
|
|
return end_addr;
|
1834 |
|
|
}
|
1835 |
|
|
|
1836 |
|
|
/* Under Win32/WinCE we commit (map) and decommit (unmap) */
|
1837 |
|
|
/* memory using VirtualAlloc and VirtualFree. These functions */
|
1838 |
|
|
/* work on individual allocations of virtual memory, made */
|
1839 |
|
|
/* previously using VirtualAlloc with the MEM_RESERVE flag. */
|
1840 |
|
|
/* The ranges we need to (de)commit may span several of these */
|
1841 |
|
|
/* allocations; therefore we use VirtualQuery to check */
|
1842 |
|
|
/* allocation lengths, and split up the range as necessary. */
|
1843 |
|
|
|
1844 |
|
|
/* We assume that GC_remap is called on exactly the same range */
|
1845 |
|
|
/* as a previous call to GC_unmap. It is safe to consistently */
|
1846 |
|
|
/* round the endpoints in both places. */
|
1847 |
|
|
void GC_unmap(ptr_t start, word bytes)
|
1848 |
|
|
{
|
1849 |
|
|
ptr_t start_addr = GC_unmap_start(start, bytes);
|
1850 |
|
|
ptr_t end_addr = GC_unmap_end(start, bytes);
|
1851 |
|
|
word len = end_addr - start_addr;
|
1852 |
|
|
if (0 == start_addr) return;
|
1853 |
|
|
# if defined(MSWIN32) || defined(MSWINCE)
|
1854 |
|
|
while (len != 0) {
|
1855 |
|
|
MEMORY_BASIC_INFORMATION mem_info;
|
1856 |
|
|
GC_word free_len;
|
1857 |
|
|
if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
|
1858 |
|
|
!= sizeof(mem_info))
|
1859 |
|
|
ABORT("Weird VirtualQuery result");
|
1860 |
|
|
free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
|
1861 |
|
|
if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
|
1862 |
|
|
ABORT("VirtualFree failed");
|
1863 |
|
|
GC_unmapped_bytes += free_len;
|
1864 |
|
|
start_addr += free_len;
|
1865 |
|
|
len -= free_len;
|
1866 |
|
|
}
|
1867 |
|
|
# else
|
1868 |
|
|
/* We immediately remap it to prevent an intervening mmap from */
|
1869 |
|
|
/* accidentally grabbing the same address space. */
|
1870 |
|
|
{
|
1871 |
|
|
void * result;
|
1872 |
|
|
result = mmap(start_addr, len, PROT_NONE,
|
1873 |
|
|
MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
|
1874 |
|
|
zero_fd, 0/* offset */);
|
1875 |
|
|
if (result != (void *)start_addr) ABORT("mmap(...PROT_NONE...) failed");
|
1876 |
|
|
}
|
1877 |
|
|
GC_unmapped_bytes += len;
|
1878 |
|
|
# endif
|
1879 |
|
|
}
|
1880 |
|
|
|
1881 |
|
|
|
1882 |
|
|
void GC_remap(ptr_t start, word bytes)
|
1883 |
|
|
{
|
1884 |
|
|
ptr_t start_addr = GC_unmap_start(start, bytes);
|
1885 |
|
|
ptr_t end_addr = GC_unmap_end(start, bytes);
|
1886 |
|
|
word len = end_addr - start_addr;
|
1887 |
|
|
|
1888 |
|
|
# if defined(MSWIN32) || defined(MSWINCE)
|
1889 |
|
|
ptr_t result;
|
1890 |
|
|
|
1891 |
|
|
if (0 == start_addr) return;
|
1892 |
|
|
while (len != 0) {
|
1893 |
|
|
MEMORY_BASIC_INFORMATION mem_info;
|
1894 |
|
|
GC_word alloc_len;
|
1895 |
|
|
if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
|
1896 |
|
|
!= sizeof(mem_info))
|
1897 |
|
|
ABORT("Weird VirtualQuery result");
|
1898 |
|
|
alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
|
1899 |
|
|
result = VirtualAlloc(start_addr, alloc_len,
|
1900 |
|
|
MEM_COMMIT,
|
1901 |
|
|
PAGE_EXECUTE_READWRITE);
|
1902 |
|
|
if (result != start_addr) {
|
1903 |
|
|
ABORT("VirtualAlloc remapping failed");
|
1904 |
|
|
}
|
1905 |
|
|
GC_unmapped_bytes -= alloc_len;
|
1906 |
|
|
start_addr += alloc_len;
|
1907 |
|
|
len -= alloc_len;
|
1908 |
|
|
}
|
1909 |
|
|
# else
|
1910 |
|
|
/* It was already remapped with PROT_NONE. */
|
1911 |
|
|
int result;
|
1912 |
|
|
|
1913 |
|
|
if (0 == start_addr) return;
|
1914 |
|
|
result = mprotect(start_addr, len,
|
1915 |
|
|
PROT_READ | PROT_WRITE | OPT_PROT_EXEC);
|
1916 |
|
|
if (result != 0) {
|
1917 |
|
|
GC_err_printf3(
|
1918 |
|
|
"Mprotect failed at 0x%lx (length %ld) with errno %ld\n",
|
1919 |
|
|
start_addr, len, errno);
|
1920 |
|
|
ABORT("Mprotect remapping failed");
|
1921 |
|
|
}
|
1922 |
|
|
GC_unmapped_bytes -= len;
|
1923 |
|
|
# endif
|
1924 |
|
|
}
|
1925 |
|
|
|
1926 |
|
|
/* Two adjacent blocks have already been unmapped and are about to */
|
1927 |
|
|
/* be merged. Unmap the whole block. This typically requires */
|
1928 |
|
|
/* that we unmap a small section in the middle that was not previously */
|
1929 |
|
|
/* unmapped due to alignment constraints. */
|
1930 |
|
|
void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2)
|
1931 |
|
|
{
|
1932 |
|
|
ptr_t start1_addr = GC_unmap_start(start1, bytes1);
|
1933 |
|
|
ptr_t end1_addr = GC_unmap_end(start1, bytes1);
|
1934 |
|
|
ptr_t start2_addr = GC_unmap_start(start2, bytes2);
|
1935 |
|
|
ptr_t end2_addr = GC_unmap_end(start2, bytes2);
|
1936 |
|
|
ptr_t start_addr = end1_addr;
|
1937 |
|
|
ptr_t end_addr = start2_addr;
|
1938 |
|
|
word len;
|
1939 |
|
|
GC_ASSERT(start1 + bytes1 == start2);
|
1940 |
|
|
if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
|
1941 |
|
|
if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
|
1942 |
|
|
if (0 == start_addr) return;
|
1943 |
|
|
len = end_addr - start_addr;
|
1944 |
|
|
# if defined(MSWIN32) || defined(MSWINCE)
|
1945 |
|
|
while (len != 0) {
|
1946 |
|
|
MEMORY_BASIC_INFORMATION mem_info;
|
1947 |
|
|
GC_word free_len;
|
1948 |
|
|
if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
|
1949 |
|
|
!= sizeof(mem_info))
|
1950 |
|
|
ABORT("Weird VirtualQuery result");
|
1951 |
|
|
free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
|
1952 |
|
|
if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
|
1953 |
|
|
ABORT("VirtualFree failed");
|
1954 |
|
|
GC_unmapped_bytes += free_len;
|
1955 |
|
|
start_addr += free_len;
|
1956 |
|
|
len -= free_len;
|
1957 |
|
|
}
|
1958 |
|
|
# else
|
1959 |
|
|
if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed");
|
1960 |
|
|
GC_unmapped_bytes += len;
|
1961 |
|
|
# endif
|
1962 |
|
|
}
|
1963 |
|
|
|
1964 |
|
|
#endif /* USE_MUNMAP */
|
1965 |
|
|
|
1966 |
|
|
/* Routine for pushing any additional roots. In THREADS */
|
1967 |
|
|
/* environment, this is also responsible for marking from */
|
1968 |
|
|
/* thread stacks. */
|
1969 |
|
|
#ifndef THREADS
|
1970 |
|
|
void (*GC_push_other_roots)() = 0;
|
1971 |
|
|
#else /* THREADS */
|
1972 |
|
|
|
1973 |
|
|
# ifdef PCR
|
1974 |
|
|
PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
|
1975 |
|
|
{
|
1976 |
|
|
struct PCR_ThCtl_TInfoRep info;
|
1977 |
|
|
PCR_ERes result;
|
1978 |
|
|
|
1979 |
|
|
info.ti_stkLow = info.ti_stkHi = 0;
|
1980 |
|
|
result = PCR_ThCtl_GetInfo(t, &info);
|
1981 |
|
|
GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
|
1982 |
|
|
return(result);
|
1983 |
|
|
}
|
1984 |
|
|
|
1985 |
|
|
/* Push the contents of an old object. We treat this as stack */
|
1986 |
|
|
/* data only becasue that makes it robust against mark stack */
|
1987 |
|
|
/* overflow. */
|
1988 |
|
|
PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
|
1989 |
|
|
{
|
1990 |
|
|
GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
|
1991 |
|
|
return(PCR_ERes_okay);
|
1992 |
|
|
}
|
1993 |
|
|
|
1994 |
|
|
|
1995 |
|
|
void GC_default_push_other_roots GC_PROTO((void))
|
1996 |
|
|
{
|
1997 |
|
|
/* Traverse data allocated by previous memory managers. */
|
1998 |
|
|
{
|
1999 |
|
|
extern struct PCR_MM_ProcsRep * GC_old_allocator;
|
2000 |
|
|
|
2001 |
|
|
if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
|
2002 |
|
|
GC_push_old_obj, 0)
|
2003 |
|
|
!= PCR_ERes_okay) {
|
2004 |
|
|
ABORT("Old object enumeration failed");
|
2005 |
|
|
}
|
2006 |
|
|
}
|
2007 |
|
|
/* Traverse all thread stacks. */
|
2008 |
|
|
if (PCR_ERes_IsErr(
|
2009 |
|
|
PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
|
2010 |
|
|
|| PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
|
2011 |
|
|
ABORT("Thread stack marking failed\n");
|
2012 |
|
|
}
|
2013 |
|
|
}
|
2014 |
|
|
|
2015 |
|
|
# endif /* PCR */
|
2016 |
|
|
|
2017 |
|
|
# ifdef SRC_M3
|
2018 |
|
|
|
2019 |
|
|
# ifdef ALL_INTERIOR_POINTERS
|
2020 |
|
|
--> misconfigured
|
2021 |
|
|
# endif
|
2022 |
|
|
|
2023 |
|
|
void GC_push_thread_structures GC_PROTO((void))
|
2024 |
|
|
{
|
2025 |
|
|
/* Not our responsibibility. */
|
2026 |
|
|
}
|
2027 |
|
|
|
2028 |
|
|
extern void ThreadF__ProcessStacks();
|
2029 |
|
|
|
2030 |
|
|
void GC_push_thread_stack(start, stop)
|
2031 |
|
|
word start, stop;
|
2032 |
|
|
{
|
2033 |
|
|
GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));
|
2034 |
|
|
}
|
2035 |
|
|
|
2036 |
|
|
/* Push routine with M3 specific calling convention. */
|
2037 |
|
|
GC_m3_push_root(dummy1, p, dummy2, dummy3)
|
2038 |
|
|
word *p;
|
2039 |
|
|
ptr_t dummy1, dummy2;
|
2040 |
|
|
int dummy3;
|
2041 |
|
|
{
|
2042 |
|
|
word q = *p;
|
2043 |
|
|
|
2044 |
|
|
GC_PUSH_ONE_STACK(q, p);
|
2045 |
|
|
}
|
2046 |
|
|
|
2047 |
|
|
/* M3 set equivalent to RTHeap.TracedRefTypes */
|
2048 |
|
|
typedef struct { int elts[1]; } RefTypeSet;
|
2049 |
|
|
RefTypeSet GC_TracedRefTypes = {{0x1}};
|
2050 |
|
|
|
2051 |
|
|
void GC_default_push_other_roots GC_PROTO((void))
|
2052 |
|
|
{
|
2053 |
|
|
/* Use the M3 provided routine for finding static roots. */
|
2054 |
|
|
/* This is a bit dubious, since it presumes no C roots. */
|
2055 |
|
|
/* We handle the collector roots explicitly in GC_push_roots */
|
2056 |
|
|
RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes);
|
2057 |
|
|
if (GC_words_allocd > 0) {
|
2058 |
|
|
ThreadF__ProcessStacks(GC_push_thread_stack);
|
2059 |
|
|
}
|
2060 |
|
|
/* Otherwise this isn't absolutely necessary, and we have */
|
2061 |
|
|
/* startup ordering problems. */
|
2062 |
|
|
}
|
2063 |
|
|
|
2064 |
|
|
# endif /* SRC_M3 */
|
2065 |
|
|
|
2066 |
|
|
# if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS) || \
|
2067 |
|
|
defined(GC_WIN32_THREADS)
|
2068 |
|
|
|
2069 |
|
|
extern void GC_push_all_stacks();
|
2070 |
|
|
|
2071 |
|
|
void GC_default_push_other_roots GC_PROTO((void))
|
2072 |
|
|
{
|
2073 |
|
|
GC_push_all_stacks();
|
2074 |
|
|
}
|
2075 |
|
|
|
2076 |
|
|
# endif /* GC_SOLARIS_THREADS || GC_PTHREADS */
|
2077 |
|
|
|
2078 |
|
|
void (*GC_push_other_roots) GC_PROTO((void)) = GC_default_push_other_roots;
|
2079 |
|
|
|
2080 |
|
|
#endif /* THREADS */
|
2081 |
|
|
|
2082 |
|
|
/*
|
2083 |
|
|
* Routines for accessing dirty bits on virtual pages.
|
2084 |
|
|
* We plan to eventually implement four strategies for doing so:
|
2085 |
|
|
* DEFAULT_VDB: A simple dummy implementation that treats every page
|
2086 |
|
|
* as possibly dirty. This makes incremental collection
|
2087 |
|
|
* useless, but the implementation is still correct.
|
2088 |
|
|
* PCR_VDB: Use PPCRs virtual dirty bit facility.
|
2089 |
|
|
* PROC_VDB: Use the /proc facility for reading dirty bits. Only
|
2090 |
|
|
* works under some SVR4 variants. Even then, it may be
|
2091 |
|
|
* too slow to be entirely satisfactory. Requires reading
|
2092 |
|
|
* dirty bits for entire address space. Implementations tend
|
2093 |
|
|
* to assume that the client is a (slow) debugger.
|
2094 |
|
|
* MPROTECT_VDB:Protect pages and then catch the faults to keep track of
|
2095 |
|
|
* dirtied pages. The implementation (and implementability)
|
2096 |
|
|
* is highly system dependent. This usually fails when system
|
2097 |
|
|
* calls write to a protected page. We prevent the read system
|
2098 |
|
|
* call from doing so. It is the clients responsibility to
|
2099 |
|
|
* make sure that other system calls are similarly protected
|
2100 |
|
|
* or write only to the stack.
|
2101 |
|
|
*/
|
2102 |
|
|
GC_bool GC_dirty_maintained = FALSE;
|
2103 |
|
|
|
2104 |
|
|
# ifdef DEFAULT_VDB
|
2105 |
|
|
|
2106 |
|
|
/* All of the following assume the allocation lock is held, and */
|
2107 |
|
|
/* signals are disabled. */
|
2108 |
|
|
|
2109 |
|
|
/* The client asserts that unallocated pages in the heap are never */
|
2110 |
|
|
/* written. */
|
2111 |
|
|
|
2112 |
|
|
/* Initialize virtual dirty bit implementation. */
|
2113 |
|
|
void GC_dirty_init()
|
2114 |
|
|
{
|
2115 |
|
|
# ifdef PRINTSTATS
|
2116 |
|
|
GC_printf0("Initializing DEFAULT_VDB...\n");
|
2117 |
|
|
# endif
|
2118 |
|
|
GC_dirty_maintained = TRUE;
|
2119 |
|
|
}
|
2120 |
|
|
|
2121 |
|
|
/* Retrieve system dirty bits for heap to a local buffer. */
|
2122 |
|
|
/* Restore the systems notion of which pages are dirty. */
|
2123 |
|
|
void GC_read_dirty()
|
2124 |
|
|
{}
|
2125 |
|
|
|
2126 |
|
|
/* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
|
2127 |
|
|
/* If the actual page size is different, this returns TRUE if any */
|
2128 |
|
|
/* of the pages overlapping h are dirty. This routine may err on the */
|
2129 |
|
|
/* side of labelling pages as dirty (and this implementation does). */
|
2130 |
|
|
/*ARGSUSED*/
|
2131 |
|
|
GC_bool GC_page_was_dirty(h)
|
2132 |
|
|
struct hblk *h;
|
2133 |
|
|
{
|
2134 |
|
|
return(TRUE);
|
2135 |
|
|
}
|
2136 |
|
|
|
2137 |
|
|
/*
|
2138 |
|
|
* The following two routines are typically less crucial. They matter
|
2139 |
|
|
* most with large dynamic libraries, or if we can't accurately identify
|
2140 |
|
|
* stacks, e.g. under Solaris 2.X. Otherwise the following default
|
2141 |
|
|
* versions are adequate.
|
2142 |
|
|
*/
|
2143 |
|
|
|
2144 |
|
|
/* Could any valid GC heap pointer ever have been written to this page? */
|
2145 |
|
|
/*ARGSUSED*/
|
2146 |
|
|
GC_bool GC_page_was_ever_dirty(h)
|
2147 |
|
|
struct hblk *h;
|
2148 |
|
|
{
|
2149 |
|
|
return(TRUE);
|
2150 |
|
|
}
|
2151 |
|
|
|
2152 |
|
|
/* Reset the n pages starting at h to "was never dirty" status. */
|
2153 |
|
|
void GC_is_fresh(h, n)
|
2154 |
|
|
struct hblk *h;
|
2155 |
|
|
word n;
|
2156 |
|
|
{
|
2157 |
|
|
}
|
2158 |
|
|
|
2159 |
|
|
/* A call that: */
|
2160 |
|
|
/* I) hints that [h, h+nblocks) is about to be written. */
|
2161 |
|
|
/* II) guarantees that protection is removed. */
|
2162 |
|
|
/* (I) may speed up some dirty bit implementations. */
|
2163 |
|
|
/* (II) may be essential if we need to ensure that */
|
2164 |
|
|
/* pointer-free system call buffers in the heap are */
|
2165 |
|
|
/* not protected. */
|
2166 |
|
|
/*ARGSUSED*/
|
2167 |
|
|
void GC_remove_protection(h, nblocks, is_ptrfree)
|
2168 |
|
|
struct hblk *h;
|
2169 |
|
|
word nblocks;
|
2170 |
|
|
GC_bool is_ptrfree;
|
2171 |
|
|
{
|
2172 |
|
|
}
|
2173 |
|
|
|
2174 |
|
|
# endif /* DEFAULT_VDB */
|
2175 |
|
|
|
2176 |
|
|
|
2177 |
|
|
# ifdef MPROTECT_VDB
|
2178 |
|
|
|
2179 |
|
|
/*
|
2180 |
|
|
* See DEFAULT_VDB for interface descriptions.
|
2181 |
|
|
*/
|
2182 |
|
|
|
2183 |
|
|
/*
|
2184 |
|
|
* This implementation maintains dirty bits itself by catching write
|
2185 |
|
|
* faults and keeping track of them. We assume nobody else catches
|
2186 |
|
|
* SIGBUS or SIGSEGV. We assume no write faults occur in system calls.
|
2187 |
|
|
* This means that clients must ensure that system calls don't write
|
2188 |
|
|
* to the write-protected heap. Probably the best way to do this is to
|
2189 |
|
|
* ensure that system calls write at most to POINTERFREE objects in the
|
2190 |
|
|
* heap, and do even that only if we are on a platform on which those
|
2191 |
|
|
* are not protected. Another alternative is to wrap system calls
|
2192 |
|
|
* (see example for read below), but the current implementation holds
|
2193 |
|
|
* a lock across blocking calls, making it problematic for multithreaded
|
2194 |
|
|
* applications.
|
2195 |
|
|
* We assume the page size is a multiple of HBLKSIZE.
|
2196 |
|
|
* We prefer them to be the same. We avoid protecting POINTERFREE
|
2197 |
|
|
* objects only if they are the same.
|
2198 |
|
|
*/
|
2199 |
|
|
|
2200 |
|
|
# if !defined(MSWIN32) && !defined(MSWINCE) && !defined(DARWIN)
|
2201 |
|
|
|
2202 |
|
|
# include <sys/mman.h>
|
2203 |
|
|
# include <signal.h>
|
2204 |
|
|
# include <sys/syscall.h>
|
2205 |
|
|
|
2206 |
|
|
# define PROTECT(addr, len) \
|
2207 |
|
|
if (mprotect((caddr_t)(addr), (size_t)(len), \
|
2208 |
|
|
PROT_READ | OPT_PROT_EXEC) < 0) { \
|
2209 |
|
|
ABORT("mprotect failed"); \
|
2210 |
|
|
}
|
2211 |
|
|
# define UNPROTECT(addr, len) \
|
2212 |
|
|
if (mprotect((caddr_t)(addr), (size_t)(len), \
|
2213 |
|
|
PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
|
2214 |
|
|
ABORT("un-mprotect failed"); \
|
2215 |
|
|
}
|
2216 |
|
|
|
2217 |
|
|
# else
|
2218 |
|
|
|
2219 |
|
|
# ifdef DARWIN
|
2220 |
|
|
/* Using vm_protect (mach syscall) over mprotect (BSD syscall) seems to
|
2221 |
|
|
decrease the likelihood of some of the problems described below. */
|
2222 |
|
|
#include <mach/vm_map.h>
|
2223 |
|
|
static mach_port_t GC_task_self;
|
2224 |
|
|
#define PROTECT(addr,len) \
|
2225 |
|
|
if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
|
2226 |
|
|
FALSE,VM_PROT_READ) != KERN_SUCCESS) { \
|
2227 |
|
|
ABORT("vm_portect failed"); \
|
2228 |
|
|
}
|
2229 |
|
|
#define UNPROTECT(addr,len) \
|
2230 |
|
|
if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
|
2231 |
|
|
FALSE,VM_PROT_READ|VM_PROT_WRITE) != KERN_SUCCESS) { \
|
2232 |
|
|
ABORT("vm_portect failed"); \
|
2233 |
|
|
}
|
2234 |
|
|
# else
|
2235 |
|
|
|
2236 |
|
|
# ifndef MSWINCE
|
2237 |
|
|
# include <signal.h>
|
2238 |
|
|
# endif
|
2239 |
|
|
|
2240 |
|
|
static DWORD protect_junk;
|
2241 |
|
|
# define PROTECT(addr, len) \
|
2242 |
|
|
if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
|
2243 |
|
|
&protect_junk)) { \
|
2244 |
|
|
DWORD last_error = GetLastError(); \
|
2245 |
|
|
GC_printf1("Last error code: %lx\n", last_error); \
|
2246 |
|
|
ABORT("VirtualProtect failed"); \
|
2247 |
|
|
}
|
2248 |
|
|
# define UNPROTECT(addr, len) \
|
2249 |
|
|
if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
|
2250 |
|
|
&protect_junk)) { \
|
2251 |
|
|
ABORT("un-VirtualProtect failed"); \
|
2252 |
|
|
}
|
2253 |
|
|
# endif /* !DARWIN */
|
2254 |
|
|
# endif /* MSWIN32 || MSWINCE || DARWIN */
|
2255 |
|
|
|
2256 |
|
|
#if defined(SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
|
2257 |
|
|
typedef void (* SIG_PF)();
|
2258 |
|
|
#endif /* SUNOS4 || (FREEBSD && !SUNOS5SIGS) */
|
2259 |
|
|
|
2260 |
|
|
#if defined(SUNOS5SIGS) || defined(OSF1) || defined(LINUX) \
|
2261 |
|
|
|| defined(HURD)
|
2262 |
|
|
# ifdef __STDC__
|
2263 |
|
|
typedef void (* SIG_PF)(int);
|
2264 |
|
|
# else
|
2265 |
|
|
typedef void (* SIG_PF)();
|
2266 |
|
|
# endif
|
2267 |
|
|
#endif /* SUNOS5SIGS || OSF1 || LINUX || HURD */
|
2268 |
|
|
|
2269 |
|
|
#if defined(MSWIN32)
|
2270 |
|
|
typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_PF;
|
2271 |
|
|
# undef SIG_DFL
|
2272 |
|
|
# define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
|
2273 |
|
|
#endif
|
2274 |
|
|
#if defined(MSWINCE)
|
2275 |
|
|
typedef LONG (WINAPI *SIG_PF)(struct _EXCEPTION_POINTERS *);
|
2276 |
|
|
# undef SIG_DFL
|
2277 |
|
|
# define SIG_DFL (SIG_PF) (-1)
|
2278 |
|
|
#endif
|
2279 |
|
|
|
2280 |
|
|
#if defined(IRIX5) || defined(OSF1) || defined(HURD)
|
2281 |
|
|
typedef void (* REAL_SIG_PF)(int, int, struct sigcontext *);
|
2282 |
|
|
#endif /* IRIX5 || OSF1 || HURD */
|
2283 |
|
|
|
2284 |
|
|
#if defined(SUNOS5SIGS)
|
2285 |
|
|
# if defined(HPUX) || defined(FREEBSD)
|
2286 |
|
|
# define SIGINFO_T siginfo_t
|
2287 |
|
|
# else
|
2288 |
|
|
# define SIGINFO_T struct siginfo
|
2289 |
|
|
# endif
|
2290 |
|
|
# ifdef __STDC__
|
2291 |
|
|
typedef void (* REAL_SIG_PF)(int, SIGINFO_T *, void *);
|
2292 |
|
|
# else
|
2293 |
|
|
typedef void (* REAL_SIG_PF)();
|
2294 |
|
|
# endif
|
2295 |
|
|
#endif /* SUNOS5SIGS */
|
2296 |
|
|
|
2297 |
|
|
#if defined(LINUX)
|
2298 |
|
|
# if __GLIBC__ > 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2
|
2299 |
|
|
typedef struct sigcontext s_c;
|
2300 |
|
|
# else /* glibc < 2.2 */
|
2301 |
|
|
# include <linux/version.h>
|
2302 |
|
|
# if (LINUX_VERSION_CODE >= 0x20100) && !defined(M68K) || defined(ALPHA) || defined(ARM32)
|
2303 |
|
|
typedef struct sigcontext s_c;
|
2304 |
|
|
# else
|
2305 |
|
|
typedef struct sigcontext_struct s_c;
|
2306 |
|
|
# endif
|
2307 |
|
|
# endif /* glibc < 2.2 */
|
2308 |
|
|
# if defined(ALPHA) || defined(M68K)
|
2309 |
|
|
typedef void (* REAL_SIG_PF)(int, int, s_c *);
|
2310 |
|
|
# else
|
2311 |
|
|
# if defined(IA64) || defined(HP_PA) || defined(X86_64)
|
2312 |
|
|
typedef void (* REAL_SIG_PF)(int, siginfo_t *, s_c *);
|
2313 |
|
|
/* FIXME: */
|
2314 |
|
|
/* According to SUSV3, the last argument should have type */
|
2315 |
|
|
/* void * or ucontext_t * */
|
2316 |
|
|
# else
|
2317 |
|
|
typedef void (* REAL_SIG_PF)(int, s_c);
|
2318 |
|
|
# endif
|
2319 |
|
|
# endif
|
2320 |
|
|
# ifdef ALPHA
|
2321 |
|
|
/* Retrieve fault address from sigcontext structure by decoding */
|
2322 |
|
|
/* instruction. */
|
2323 |
|
|
char * get_fault_addr(s_c *sc) {
|
2324 |
|
|
unsigned instr;
|
2325 |
|
|
word faultaddr;
|
2326 |
|
|
|
2327 |
|
|
instr = *((unsigned *)(sc->sc_pc));
|
2328 |
|
|
faultaddr = sc->sc_regs[(instr >> 16) & 0x1f];
|
2329 |
|
|
faultaddr += (word) (((int)instr << 16) >> 16);
|
2330 |
|
|
return (char *)faultaddr;
|
2331 |
|
|
}
|
2332 |
|
|
# endif /* !ALPHA */
|
2333 |
|
|
# endif /* LINUX */
|
2334 |
|
|
|
2335 |
|
|
#ifndef DARWIN
|
2336 |
|
|
SIG_PF GC_old_bus_handler;
|
2337 |
|
|
SIG_PF GC_old_segv_handler; /* Also old MSWIN32 ACCESS_VIOLATION filter */
|
2338 |
|
|
#endif /* !DARWIN */
|
2339 |
|
|
|
2340 |
|
|
#if defined(THREADS)
|
2341 |
|
|
/* We need to lock around the bitmap update in the write fault handler */
|
2342 |
|
|
/* in order to avoid the risk of losing a bit. We do this with a */
|
2343 |
|
|
/* test-and-set spin lock if we know how to do that. Otherwise we */
|
2344 |
|
|
/* check whether we are already in the handler and use the dumb but */
|
2345 |
|
|
/* safe fallback algorithm of setting all bits in the word. */
|
2346 |
|
|
/* Contention should be very rare, so we do the minimum to handle it */
|
2347 |
|
|
/* correctly. */
|
2348 |
|
|
#ifdef GC_TEST_AND_SET_DEFINED
|
2349 |
|
|
static VOLATILE unsigned int fault_handler_lock = 0;
|
2350 |
|
|
void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
|
2351 |
|
|
while (GC_test_and_set(&fault_handler_lock)) {}
|
2352 |
|
|
/* Could also revert to set_pht_entry_from_index_safe if initial */
|
2353 |
|
|
/* GC_test_and_set fails. */
|
2354 |
|
|
set_pht_entry_from_index(db, index);
|
2355 |
|
|
GC_clear(&fault_handler_lock);
|
2356 |
|
|
}
|
2357 |
|
|
#else /* !GC_TEST_AND_SET_DEFINED */
|
2358 |
|
|
/* THIS IS INCORRECT! The dirty bit vector may be temporarily wrong, */
|
2359 |
|
|
/* just before we notice the conflict and correct it. We may end up */
|
2360 |
|
|
/* looking at it while it's wrong. But this requires contention */
|
2361 |
|
|
/* exactly when a GC is triggered, which seems far less likely to */
|
2362 |
|
|
/* fail than the old code, which had no reported failures. Thus we */
|
2363 |
|
|
/* leave it this way while we think of something better, or support */
|
2364 |
|
|
/* GC_test_and_set on the remaining platforms. */
|
2365 |
|
|
static VOLATILE word currently_updating = 0;
|
2366 |
|
|
void async_set_pht_entry_from_index(VOLATILE page_hash_table db, int index) {
|
2367 |
|
|
unsigned int update_dummy;
|
2368 |
|
|
currently_updating = (word)(&update_dummy);
|
2369 |
|
|
set_pht_entry_from_index(db, index);
|
2370 |
|
|
/* If we get contention in the 10 or so instruction window here, */
|
2371 |
|
|
/* and we get stopped by a GC between the two updates, we lose! */
|
2372 |
|
|
if (currently_updating != (word)(&update_dummy)) {
|
2373 |
|
|
set_pht_entry_from_index_safe(db, index);
|
2374 |
|
|
/* We claim that if two threads concurrently try to update the */
|
2375 |
|
|
/* dirty bit vector, the first one to execute UPDATE_START */
|
2376 |
|
|
/* will see it changed when UPDATE_END is executed. (Note that */
|
2377 |
|
|
/* &update_dummy must differ in two distinct threads.) It */
|
2378 |
|
|
/* will then execute set_pht_entry_from_index_safe, thus */
|
2379 |
|
|
/* returning us to a safe state, though not soon enough. */
|
2380 |
|
|
}
|
2381 |
|
|
}
|
2382 |
|
|
#endif /* !GC_TEST_AND_SET_DEFINED */
|
2383 |
|
|
#else /* !THREADS */
|
2384 |
|
|
# define async_set_pht_entry_from_index(db, index) \
|
2385 |
|
|
set_pht_entry_from_index(db, index)
|
2386 |
|
|
#endif /* !THREADS */
|
2387 |
|
|
|
2388 |
|
|
/*ARGSUSED*/
|
2389 |
|
|
#if !defined(DARWIN)
|
2390 |
|
|
# if defined (SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
|
2391 |
|
|
void GC_write_fault_handler(sig, code, scp, addr)
|
2392 |
|
|
int sig, code;
|
2393 |
|
|
struct sigcontext *scp;
|
2394 |
|
|
char * addr;
|
2395 |
|
|
# ifdef SUNOS4
|
2396 |
|
|
# define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
|
2397 |
|
|
# define CODE_OK (FC_CODE(code) == FC_PROT \
|
2398 |
|
|
|| (FC_CODE(code) == FC_OBJERR \
|
2399 |
|
|
&& FC_ERRNO(code) == FC_PROT))
|
2400 |
|
|
# endif
|
2401 |
|
|
# ifdef FREEBSD
|
2402 |
|
|
# define SIG_OK (sig == SIGBUS)
|
2403 |
|
|
# define CODE_OK TRUE
|
2404 |
|
|
# endif
|
2405 |
|
|
# endif /* SUNOS4 || (FREEBSD && !SUNOS5SIGS) */
|
2406 |
|
|
|
2407 |
|
|
# if defined(IRIX5) || defined(OSF1) || defined(HURD)
|
2408 |
|
|
# include <errno.h>
|
2409 |
|
|
void GC_write_fault_handler(int sig, int code, struct sigcontext *scp)
|
2410 |
|
|
# ifdef OSF1
|
2411 |
|
|
# define SIG_OK (sig == SIGSEGV)
|
2412 |
|
|
# define CODE_OK (code == 2 /* experimentally determined */)
|
2413 |
|
|
# endif
|
2414 |
|
|
# ifdef IRIX5
|
2415 |
|
|
# define SIG_OK (sig == SIGSEGV)
|
2416 |
|
|
# define CODE_OK (code == EACCES)
|
2417 |
|
|
# endif
|
2418 |
|
|
# ifdef HURD
|
2419 |
|
|
# define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
|
2420 |
|
|
# define CODE_OK TRUE
|
2421 |
|
|
# endif
|
2422 |
|
|
# endif /* IRIX5 || OSF1 || HURD */
|
2423 |
|
|
|
2424 |
|
|
# if defined(LINUX)
|
2425 |
|
|
# if defined(ALPHA) || defined(M68K)
|
2426 |
|
|
void GC_write_fault_handler(int sig, int code, s_c * sc)
|
2427 |
|
|
# else
|
2428 |
|
|
# if defined(IA64) || defined(HP_PA) || defined(X86_64)
|
2429 |
|
|
void GC_write_fault_handler(int sig, siginfo_t * si, s_c * scp)
|
2430 |
|
|
# else
|
2431 |
|
|
# if defined(ARM32)
|
2432 |
|
|
void GC_write_fault_handler(int sig, int a2, int a3, int a4, s_c sc)
|
2433 |
|
|
# else
|
2434 |
|
|
void GC_write_fault_handler(int sig, s_c sc)
|
2435 |
|
|
# endif
|
2436 |
|
|
# endif
|
2437 |
|
|
# endif
|
2438 |
|
|
# define SIG_OK (sig == SIGSEGV)
|
2439 |
|
|
# define CODE_OK TRUE
|
2440 |
|
|
/* Empirically c.trapno == 14, on IA32, but is that useful? */
|
2441 |
|
|
/* Should probably consider alignment issues on other */
|
2442 |
|
|
/* architectures. */
|
2443 |
|
|
# endif /* LINUX */
|
2444 |
|
|
|
2445 |
|
|
# if defined(SUNOS5SIGS)
|
2446 |
|
|
# ifdef __STDC__
|
2447 |
|
|
void GC_write_fault_handler(int sig, SIGINFO_T *scp, void * context)
|
2448 |
|
|
# else
|
2449 |
|
|
void GC_write_fault_handler(sig, scp, context)
|
2450 |
|
|
int sig;
|
2451 |
|
|
SIGINFO_T *scp;
|
2452 |
|
|
void * context;
|
2453 |
|
|
# endif
|
2454 |
|
|
# ifdef HPUX
|
2455 |
|
|
# define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
|
2456 |
|
|
# define CODE_OK (scp -> si_code == SEGV_ACCERR) \
|
2457 |
|
|
|| (scp -> si_code == BUS_ADRERR) \
|
2458 |
|
|
|| (scp -> si_code == BUS_UNKNOWN) \
|
2459 |
|
|
|| (scp -> si_code == SEGV_UNKNOWN) \
|
2460 |
|
|
|| (scp -> si_code == BUS_OBJERR)
|
2461 |
|
|
# else
|
2462 |
|
|
# ifdef FREEBSD
|
2463 |
|
|
# define SIG_OK (sig == SIGBUS)
|
2464 |
|
|
# define CODE_OK (scp -> si_code == BUS_PAGE_FAULT)
|
2465 |
|
|
# else
|
2466 |
|
|
# define SIG_OK (sig == SIGSEGV)
|
2467 |
|
|
# define CODE_OK (scp -> si_code == SEGV_ACCERR)
|
2468 |
|
|
# endif
|
2469 |
|
|
# endif
|
2470 |
|
|
# endif /* SUNOS5SIGS */
|
2471 |
|
|
|
2472 |
|
|
# if defined(MSWIN32) || defined(MSWINCE)
|
2473 |
|
|
LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
|
2474 |
|
|
# define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode == \
|
2475 |
|
|
STATUS_ACCESS_VIOLATION)
|
2476 |
|
|
# define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] == 1)
|
2477 |
|
|
/* Write fault */
|
2478 |
|
|
# endif /* MSWIN32 || MSWINCE */
|
2479 |
|
|
{
|
2480 |
|
|
register unsigned i;
|
2481 |
|
|
# if defined(HURD)
|
2482 |
|
|
char *addr = (char *) code;
|
2483 |
|
|
# endif
|
2484 |
|
|
# ifdef IRIX5
|
2485 |
|
|
char * addr = (char *) (size_t) (scp -> sc_badvaddr);
|
2486 |
|
|
# endif
|
2487 |
|
|
# if defined(OSF1) && defined(ALPHA)
|
2488 |
|
|
char * addr = (char *) (scp -> sc_traparg_a0);
|
2489 |
|
|
# endif
|
2490 |
|
|
# ifdef SUNOS5SIGS
|
2491 |
|
|
char * addr = (char *) (scp -> si_addr);
|
2492 |
|
|
# endif
|
2493 |
|
|
# ifdef LINUX
|
2494 |
|
|
# if defined(I386)
|
2495 |
|
|
char * addr = (char *) (sc.cr2);
|
2496 |
|
|
# else
|
2497 |
|
|
# if defined(M68K)
|
2498 |
|
|
char * addr = NULL;
|
2499 |
|
|
|
2500 |
|
|
struct sigcontext *scp = (struct sigcontext *)(sc);
|
2501 |
|
|
|
2502 |
|
|
int format = (scp->sc_formatvec >> 12) & 0xf;
|
2503 |
|
|
unsigned long *framedata = (unsigned long *)(scp + 1);
|
2504 |
|
|
unsigned long ea;
|
2505 |
|
|
|
2506 |
|
|
if (format == 0xa || format == 0xb) {
|
2507 |
|
|
/* 68020/030 */
|
2508 |
|
|
ea = framedata[2];
|
2509 |
|
|
} else if (format == 7) {
|
2510 |
|
|
/* 68040 */
|
2511 |
|
|
ea = framedata[3];
|
2512 |
|
|
if (framedata[1] & 0x08000000) {
|
2513 |
|
|
/* correct addr on misaligned access */
|
2514 |
|
|
ea = (ea+4095)&(~4095);
|
2515 |
|
|
}
|
2516 |
|
|
} else if (format == 4) {
|
2517 |
|
|
/* 68060 */
|
2518 |
|
|
ea = framedata[0];
|
2519 |
|
|
if (framedata[1] & 0x08000000) {
|
2520 |
|
|
/* correct addr on misaligned access */
|
2521 |
|
|
ea = (ea+4095)&(~4095);
|
2522 |
|
|
}
|
2523 |
|
|
}
|
2524 |
|
|
addr = (char *)ea;
|
2525 |
|
|
# else
|
2526 |
|
|
# ifdef ALPHA
|
2527 |
|
|
char * addr = get_fault_addr(sc);
|
2528 |
|
|
# else
|
2529 |
|
|
# if defined(IA64) || defined(HP_PA) || defined(X86_64)
|
2530 |
|
|
char * addr = si -> si_addr;
|
2531 |
|
|
/* I believe this is claimed to work on all platforms for */
|
2532 |
|
|
/* Linux 2.3.47 and later. Hopefully we don't have to */
|
2533 |
|
|
/* worry about earlier kernels on IA64. */
|
2534 |
|
|
# else
|
2535 |
|
|
# if defined(POWERPC)
|
2536 |
|
|
char * addr = (char *) (sc.regs->dar);
|
2537 |
|
|
# else
|
2538 |
|
|
# if defined(ARM32)
|
2539 |
|
|
char * addr = (char *)sc.fault_address;
|
2540 |
|
|
# else
|
2541 |
|
|
# if defined(CRIS)
|
2542 |
|
|
char * addr = (char *)sc.regs.csraddr;
|
2543 |
|
|
# else
|
2544 |
|
|
--> architecture not supported
|
2545 |
|
|
# endif
|
2546 |
|
|
# endif
|
2547 |
|
|
# endif
|
2548 |
|
|
# endif
|
2549 |
|
|
# endif
|
2550 |
|
|
# endif
|
2551 |
|
|
# endif
|
2552 |
|
|
# endif
|
2553 |
|
|
# if defined(MSWIN32) || defined(MSWINCE)
|
2554 |
|
|
char * addr = (char *) (exc_info -> ExceptionRecord
|
2555 |
|
|
-> ExceptionInformation[1]);
|
2556 |
|
|
# define sig SIGSEGV
|
2557 |
|
|
# endif
|
2558 |
|
|
|
2559 |
|
|
if (SIG_OK && CODE_OK) {
|
2560 |
|
|
register struct hblk * h =
|
2561 |
|
|
(struct hblk *)((word)addr & ~(GC_page_size-1));
|
2562 |
|
|
GC_bool in_allocd_block;
|
2563 |
|
|
|
2564 |
|
|
# ifdef SUNOS5SIGS
|
2565 |
|
|
/* Address is only within the correct physical page. */
|
2566 |
|
|
in_allocd_block = FALSE;
|
2567 |
|
|
for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
|
2568 |
|
|
if (HDR(h+i) != 0) {
|
2569 |
|
|
in_allocd_block = TRUE;
|
2570 |
|
|
}
|
2571 |
|
|
}
|
2572 |
|
|
# else
|
2573 |
|
|
in_allocd_block = (HDR(addr) != 0);
|
2574 |
|
|
# endif
|
2575 |
|
|
if (!in_allocd_block) {
|
2576 |
|
|
/* FIXME - We should make sure that we invoke the */
|
2577 |
|
|
/* old handler with the appropriate calling */
|
2578 |
|
|
/* sequence, which often depends on SA_SIGINFO. */
|
2579 |
|
|
|
2580 |
|
|
/* Heap blocks now begin and end on page boundaries */
|
2581 |
|
|
SIG_PF old_handler;
|
2582 |
|
|
|
2583 |
|
|
if (sig == SIGSEGV) {
|
2584 |
|
|
old_handler = GC_old_segv_handler;
|
2585 |
|
|
} else {
|
2586 |
|
|
old_handler = GC_old_bus_handler;
|
2587 |
|
|
}
|
2588 |
|
|
if (old_handler == SIG_DFL) {
|
2589 |
|
|
# if !defined(MSWIN32) && !defined(MSWINCE)
|
2590 |
|
|
GC_err_printf1("Segfault at 0x%lx\n", addr);
|
2591 |
|
|
ABORT("Unexpected bus error or segmentation fault");
|
2592 |
|
|
# else
|
2593 |
|
|
return(EXCEPTION_CONTINUE_SEARCH);
|
2594 |
|
|
# endif
|
2595 |
|
|
} else {
|
2596 |
|
|
# if defined (SUNOS4) \
|
2597 |
|
|
|| (defined(FREEBSD) && !defined(SUNOS5SIGS))
|
2598 |
|
|
(*old_handler) (sig, code, scp, addr);
|
2599 |
|
|
return;
|
2600 |
|
|
# endif
|
2601 |
|
|
# if defined (SUNOS5SIGS)
|
2602 |
|
|
/*
|
2603 |
|
|
* FIXME: For FreeBSD, this code should check if the
|
2604 |
|
|
* old signal handler used the traditional BSD style and
|
2605 |
|
|
* if so call it using that style.
|
2606 |
|
|
*/
|
2607 |
|
|
(*(REAL_SIG_PF)old_handler) (sig, scp, context);
|
2608 |
|
|
return;
|
2609 |
|
|
# endif
|
2610 |
|
|
# if defined (LINUX)
|
2611 |
|
|
# if defined(ALPHA) || defined(M68K)
|
2612 |
|
|
(*(REAL_SIG_PF)old_handler) (sig, code, sc);
|
2613 |
|
|
# else
|
2614 |
|
|
# if defined(IA64) || defined(HP_PA) || defined(X86_64)
|
2615 |
|
|
(*(REAL_SIG_PF)old_handler) (sig, si, scp);
|
2616 |
|
|
# else
|
2617 |
|
|
(*(REAL_SIG_PF)old_handler) (sig, sc);
|
2618 |
|
|
# endif
|
2619 |
|
|
# endif
|
2620 |
|
|
return;
|
2621 |
|
|
# endif
|
2622 |
|
|
# if defined (IRIX5) || defined(OSF1) || defined(HURD)
|
2623 |
|
|
(*(REAL_SIG_PF)old_handler) (sig, code, scp);
|
2624 |
|
|
return;
|
2625 |
|
|
# endif
|
2626 |
|
|
# ifdef MSWIN32
|
2627 |
|
|
return((*old_handler)(exc_info));
|
2628 |
|
|
# endif
|
2629 |
|
|
}
|
2630 |
|
|
}
|
2631 |
|
|
UNPROTECT(h, GC_page_size);
|
2632 |
|
|
/* We need to make sure that no collection occurs between */
|
2633 |
|
|
/* the UNPROTECT and the setting of the dirty bit. Otherwise */
|
2634 |
|
|
/* a write by a third thread might go unnoticed. Reversing */
|
2635 |
|
|
/* the order is just as bad, since we would end up unprotecting */
|
2636 |
|
|
/* a page in a GC cycle during which it's not marked. */
|
2637 |
|
|
/* Currently we do this by disabling the thread stopping */
|
2638 |
|
|
/* signals while this handler is running. An alternative might */
|
2639 |
|
|
/* be to record the fact that we're about to unprotect, or */
|
2640 |
|
|
/* have just unprotected a page in the GC's thread structure, */
|
2641 |
|
|
/* and then to have the thread stopping code set the dirty */
|
2642 |
|
|
/* flag, if necessary. */
|
2643 |
|
|
for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
|
2644 |
|
|
register int index = PHT_HASH(h+i);
|
2645 |
|
|
|
2646 |
|
|
async_set_pht_entry_from_index(GC_dirty_pages, index);
|
2647 |
|
|
}
|
2648 |
|
|
# if defined(OSF1)
|
2649 |
|
|
/* These reset the signal handler each time by default. */
|
2650 |
|
|
signal(SIGSEGV, (SIG_PF) GC_write_fault_handler);
|
2651 |
|
|
# endif
|
2652 |
|
|
/* The write may not take place before dirty bits are read. */
|
2653 |
|
|
/* But then we'll fault again ... */
|
2654 |
|
|
# if defined(MSWIN32) || defined(MSWINCE)
|
2655 |
|
|
return(EXCEPTION_CONTINUE_EXECUTION);
|
2656 |
|
|
# else
|
2657 |
|
|
return;
|
2658 |
|
|
# endif
|
2659 |
|
|
}
|
2660 |
|
|
#if defined(MSWIN32) || defined(MSWINCE)
|
2661 |
|
|
return EXCEPTION_CONTINUE_SEARCH;
|
2662 |
|
|
#else
|
2663 |
|
|
GC_err_printf1("Segfault at 0x%lx\n", addr);
|
2664 |
|
|
ABORT("Unexpected bus error or segmentation fault");
|
2665 |
|
|
#endif
|
2666 |
|
|
}
|
2667 |
|
|
#endif /* !DARWIN */
|
2668 |
|
|
|
2669 |
|
|
/*
|
2670 |
|
|
* We hold the allocation lock. We expect block h to be written
|
2671 |
|
|
* shortly. Ensure that all pages containing any part of the n hblks
|
2672 |
|
|
* starting at h are no longer protected. If is_ptrfree is false,
|
2673 |
|
|
* also ensure that they will subsequently appear to be dirty.
|
2674 |
|
|
*/
|
2675 |
|
|
void GC_remove_protection(h, nblocks, is_ptrfree)
|
2676 |
|
|
struct hblk *h;
|
2677 |
|
|
word nblocks;
|
2678 |
|
|
GC_bool is_ptrfree;
|
2679 |
|
|
{
|
2680 |
|
|
struct hblk * h_trunc; /* Truncated to page boundary */
|
2681 |
|
|
struct hblk * h_end; /* Page boundary following block end */
|
2682 |
|
|
struct hblk * current;
|
2683 |
|
|
GC_bool found_clean;
|
2684 |
|
|
|
2685 |
|
|
if (!GC_dirty_maintained) return;
|
2686 |
|
|
h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
|
2687 |
|
|
h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size-1)
|
2688 |
|
|
& ~(GC_page_size-1));
|
2689 |
|
|
found_clean = FALSE;
|
2690 |
|
|
for (current = h_trunc; current < h_end; ++current) {
|
2691 |
|
|
int index = PHT_HASH(current);
|
2692 |
|
|
|
2693 |
|
|
if (!is_ptrfree || current < h || current >= h + nblocks) {
|
2694 |
|
|
async_set_pht_entry_from_index(GC_dirty_pages, index);
|
2695 |
|
|
}
|
2696 |
|
|
}
|
2697 |
|
|
UNPROTECT(h_trunc, (ptr_t)h_end - (ptr_t)h_trunc);
|
2698 |
|
|
}
|
2699 |
|
|
|
2700 |
|
|
#if !defined(DARWIN)
|
2701 |
|
|
void GC_dirty_init()
|
2702 |
|
|
{
|
2703 |
|
|
# if defined(SUNOS5SIGS) || defined(IRIX5) || defined(LINUX) || \
|
2704 |
|
|
defined(OSF1) || defined(HURD)
|
2705 |
|
|
struct sigaction act, oldact;
|
2706 |
|
|
/* We should probably specify SA_SIGINFO for Linux, and handle */
|
2707 |
|
|
/* the different architectures more uniformly. */
|
2708 |
|
|
# if defined(IRIX5) || defined(LINUX) && !defined(X86_64) \
|
2709 |
|
|
|| defined(OSF1) || defined(HURD)
|
2710 |
|
|
act.sa_flags = SA_RESTART;
|
2711 |
|
|
act.sa_handler = (SIG_PF)GC_write_fault_handler;
|
2712 |
|
|
# else
|
2713 |
|
|
act.sa_flags = SA_RESTART | SA_SIGINFO;
|
2714 |
|
|
act.sa_sigaction = GC_write_fault_handler;
|
2715 |
|
|
# endif
|
2716 |
|
|
(void)sigemptyset(&act.sa_mask);
|
2717 |
|
|
# ifdef SIG_SUSPEND
|
2718 |
|
|
/* Arrange to postpone SIG_SUSPEND while we're in a write fault */
|
2719 |
|
|
/* handler. This effectively makes the handler atomic w.r.t. */
|
2720 |
|
|
/* stopping the world for GC. */
|
2721 |
|
|
(void)sigaddset(&act.sa_mask, SIG_SUSPEND);
|
2722 |
|
|
# endif /* SIG_SUSPEND */
|
2723 |
|
|
# endif
|
2724 |
|
|
# ifdef PRINTSTATS
|
2725 |
|
|
GC_printf0("Inititalizing mprotect virtual dirty bit implementation\n");
|
2726 |
|
|
# endif
|
2727 |
|
|
GC_dirty_maintained = TRUE;
|
2728 |
|
|
if (GC_page_size % HBLKSIZE != 0) {
|
2729 |
|
|
GC_err_printf0("Page size not multiple of HBLKSIZE\n");
|
2730 |
|
|
ABORT("Page size not multiple of HBLKSIZE");
|
2731 |
|
|
}
|
2732 |
|
|
# if defined(SUNOS4) || (defined(FREEBSD) && !defined(SUNOS5SIGS))
|
2733 |
|
|
GC_old_bus_handler = signal(SIGBUS, GC_write_fault_handler);
|
2734 |
|
|
if (GC_old_bus_handler == SIG_IGN) {
|
2735 |
|
|
GC_err_printf0("Previously ignored bus error!?");
|
2736 |
|
|
GC_old_bus_handler = SIG_DFL;
|
2737 |
|
|
}
|
2738 |
|
|
if (GC_old_bus_handler != SIG_DFL) {
|
2739 |
|
|
# ifdef PRINTSTATS
|
2740 |
|
|
GC_err_printf0("Replaced other SIGBUS handler\n");
|
2741 |
|
|
# endif
|
2742 |
|
|
}
|
2743 |
|
|
# endif
|
2744 |
|
|
# if defined(SUNOS4)
|
2745 |
|
|
GC_old_segv_handler = signal(SIGSEGV, (SIG_PF)GC_write_fault_handler);
|
2746 |
|
|
if (GC_old_segv_handler == SIG_IGN) {
|
2747 |
|
|
GC_err_printf0("Previously ignored segmentation violation!?");
|
2748 |
|
|
GC_old_segv_handler = SIG_DFL;
|
2749 |
|
|
}
|
2750 |
|
|
if (GC_old_segv_handler != SIG_DFL) {
|
2751 |
|
|
# ifdef PRINTSTATS
|
2752 |
|
|
GC_err_printf0("Replaced other SIGSEGV handler\n");
|
2753 |
|
|
# endif
|
2754 |
|
|
}
|
2755 |
|
|
# endif
|
2756 |
|
|
# if (defined(SUNOS5SIGS) && !defined(FREEBSD)) || defined(IRIX5) \
|
2757 |
|
|
|| defined(LINUX) || defined(OSF1) || defined(HURD)
|
2758 |
|
|
/* SUNOS5SIGS includes HPUX */
|
2759 |
|
|
# if defined(GC_IRIX_THREADS)
|
2760 |
|
|
sigaction(SIGSEGV, 0, &oldact);
|
2761 |
|
|
sigaction(SIGSEGV, &act, 0);
|
2762 |
|
|
# else
|
2763 |
|
|
{
|
2764 |
|
|
int res = sigaction(SIGSEGV, &act, &oldact);
|
2765 |
|
|
if (res != 0) ABORT("Sigaction failed");
|
2766 |
|
|
}
|
2767 |
|
|
# endif
|
2768 |
|
|
# if defined(_sigargs) || defined(HURD) || !defined(SA_SIGINFO)
|
2769 |
|
|
/* This is Irix 5.x, not 6.x. Irix 5.x does not have */
|
2770 |
|
|
/* sa_sigaction. */
|
2771 |
|
|
GC_old_segv_handler = oldact.sa_handler;
|
2772 |
|
|
# else /* Irix 6.x or SUNOS5SIGS or LINUX */
|
2773 |
|
|
if (oldact.sa_flags & SA_SIGINFO) {
|
2774 |
|
|
GC_old_segv_handler = (SIG_PF)(oldact.sa_sigaction);
|
2775 |
|
|
} else {
|
2776 |
|
|
GC_old_segv_handler = oldact.sa_handler;
|
2777 |
|
|
}
|
2778 |
|
|
# endif
|
2779 |
|
|
if (GC_old_segv_handler == SIG_IGN) {
|
2780 |
|
|
GC_err_printf0("Previously ignored segmentation violation!?");
|
2781 |
|
|
GC_old_segv_handler = SIG_DFL;
|
2782 |
|
|
}
|
2783 |
|
|
if (GC_old_segv_handler != SIG_DFL) {
|
2784 |
|
|
# ifdef PRINTSTATS
|
2785 |
|
|
GC_err_printf0("Replaced other SIGSEGV handler\n");
|
2786 |
|
|
# endif
|
2787 |
|
|
}
|
2788 |
|
|
# endif /* (SUNOS5SIGS && !FREEBSD) || IRIX5 || LINUX || OSF1 || HURD */
|
2789 |
|
|
# if defined(HPUX) || defined(LINUX) || defined(HURD) \
|
2790 |
|
|
|| (defined(FREEBSD) && defined(SUNOS5SIGS))
|
2791 |
|
|
sigaction(SIGBUS, &act, &oldact);
|
2792 |
|
|
GC_old_bus_handler = oldact.sa_handler;
|
2793 |
|
|
if (GC_old_bus_handler == SIG_IGN) {
|
2794 |
|
|
GC_err_printf0("Previously ignored bus error!?");
|
2795 |
|
|
GC_old_bus_handler = SIG_DFL;
|
2796 |
|
|
}
|
2797 |
|
|
if (GC_old_bus_handler != SIG_DFL) {
|
2798 |
|
|
# ifdef PRINTSTATS
|
2799 |
|
|
GC_err_printf0("Replaced other SIGBUS handler\n");
|
2800 |
|
|
# endif
|
2801 |
|
|
}
|
2802 |
|
|
# endif /* HPUX || LINUX || HURD || (FREEBSD && SUNOS5SIGS) */
|
2803 |
|
|
# if defined(MSWIN32)
|
2804 |
|
|
GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
|
2805 |
|
|
if (GC_old_segv_handler != NULL) {
|
2806 |
|
|
# ifdef PRINTSTATS
|
2807 |
|
|
GC_err_printf0("Replaced other UnhandledExceptionFilter\n");
|
2808 |
|
|
# endif
|
2809 |
|
|
} else {
|
2810 |
|
|
GC_old_segv_handler = SIG_DFL;
|
2811 |
|
|
}
|
2812 |
|
|
# endif
|
2813 |
|
|
}
|
2814 |
|
|
#endif /* !DARWIN */
|
2815 |
|
|
|
2816 |
|
|
int GC_incremental_protection_needs()
|
2817 |
|
|
{
|
2818 |
|
|
if (GC_page_size == HBLKSIZE) {
|
2819 |
|
|
return GC_PROTECTS_POINTER_HEAP;
|
2820 |
|
|
} else {
|
2821 |
|
|
return GC_PROTECTS_POINTER_HEAP | GC_PROTECTS_PTRFREE_HEAP;
|
2822 |
|
|
}
|
2823 |
|
|
}
|
2824 |
|
|
|
2825 |
|
|
#define HAVE_INCREMENTAL_PROTECTION_NEEDS
|
2826 |
|
|
|
2827 |
|
|
#define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
|
2828 |
|
|
|
2829 |
|
|
#define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
|
2830 |
|
|
void GC_protect_heap()
|
2831 |
|
|
{
|
2832 |
|
|
ptr_t start;
|
2833 |
|
|
word len;
|
2834 |
|
|
struct hblk * current;
|
2835 |
|
|
struct hblk * current_start; /* Start of block to be protected. */
|
2836 |
|
|
struct hblk * limit;
|
2837 |
|
|
unsigned i;
|
2838 |
|
|
GC_bool protect_all =
|
2839 |
|
|
(0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
|
2840 |
|
|
for (i = 0; i < GC_n_heap_sects; i++) {
|
2841 |
|
|
start = GC_heap_sects[i].hs_start;
|
2842 |
|
|
len = GC_heap_sects[i].hs_bytes;
|
2843 |
|
|
if (protect_all) {
|
2844 |
|
|
PROTECT(start, len);
|
2845 |
|
|
} else {
|
2846 |
|
|
GC_ASSERT(PAGE_ALIGNED(len))
|
2847 |
|
|
GC_ASSERT(PAGE_ALIGNED(start))
|
2848 |
|
|
current_start = current = (struct hblk *)start;
|
2849 |
|
|
limit = (struct hblk *)(start + len);
|
2850 |
|
|
while (current < limit) {
|
2851 |
|
|
hdr * hhdr;
|
2852 |
|
|
word nhblks;
|
2853 |
|
|
GC_bool is_ptrfree;
|
2854 |
|
|
|
2855 |
|
|
GC_ASSERT(PAGE_ALIGNED(current));
|
2856 |
|
|
GET_HDR(current, hhdr);
|
2857 |
|
|
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
|
2858 |
|
|
/* This can happen only if we're at the beginning of a */
|
2859 |
|
|
/* heap segment, and a block spans heap segments. */
|
2860 |
|
|
/* We will handle that block as part of the preceding */
|
2861 |
|
|
/* segment. */
|
2862 |
|
|
GC_ASSERT(current_start == current);
|
2863 |
|
|
current_start = ++current;
|
2864 |
|
|
continue;
|
2865 |
|
|
}
|
2866 |
|
|
if (HBLK_IS_FREE(hhdr)) {
|
2867 |
|
|
GC_ASSERT(PAGE_ALIGNED(hhdr -> hb_sz));
|
2868 |
|
|
nhblks = divHBLKSZ(hhdr -> hb_sz);
|
2869 |
|
|
is_ptrfree = TRUE; /* dirty on alloc */
|
2870 |
|
|
} else {
|
2871 |
|
|
nhblks = OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
|
2872 |
|
|
is_ptrfree = IS_PTRFREE(hhdr);
|
2873 |
|
|
}
|
2874 |
|
|
if (is_ptrfree) {
|
2875 |
|
|
if (current_start < current) {
|
2876 |
|
|
PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
|
2877 |
|
|
}
|
2878 |
|
|
current_start = (current += nhblks);
|
2879 |
|
|
} else {
|
2880 |
|
|
current += nhblks;
|
2881 |
|
|
}
|
2882 |
|
|
}
|
2883 |
|
|
if (current_start < current) {
|
2884 |
|
|
PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
|
2885 |
|
|
}
|
2886 |
|
|
}
|
2887 |
|
|
}
|
2888 |
|
|
}
|
2889 |
|
|
|
2890 |
|
|
/* We assume that either the world is stopped or its OK to lose dirty */
|
2891 |
|
|
/* bits while this is happenning (as in GC_enable_incremental). */
|
2892 |
|
|
void GC_read_dirty()
|
2893 |
|
|
{
|
2894 |
|
|
BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
|
2895 |
|
|
(sizeof GC_dirty_pages));
|
2896 |
|
|
BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
|
2897 |
|
|
GC_protect_heap();
|
2898 |
|
|
}
|
2899 |
|
|
|
2900 |
|
|
GC_bool GC_page_was_dirty(h)
|
2901 |
|
|
struct hblk * h;
|
2902 |
|
|
{
|
2903 |
|
|
register word index = PHT_HASH(h);
|
2904 |
|
|
|
2905 |
|
|
return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
|
2906 |
|
|
}
|
2907 |
|
|
|
2908 |
|
|
/*
|
2909 |
|
|
* Acquiring the allocation lock here is dangerous, since this
|
2910 |
|
|
* can be called from within GC_call_with_alloc_lock, and the cord
|
2911 |
|
|
* package does so. On systems that allow nested lock acquisition, this
|
2912 |
|
|
* happens to work.
|
2913 |
|
|
* On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
|
2914 |
|
|
*/
|
2915 |
|
|
|
2916 |
|
|
static GC_bool syscall_acquired_lock = FALSE; /* Protected by GC lock. */
|
2917 |
|
|
|
2918 |
|
|
void GC_begin_syscall()
|
2919 |
|
|
{
|
2920 |
|
|
if (!I_HOLD_LOCK()) {
|
2921 |
|
|
LOCK();
|
2922 |
|
|
syscall_acquired_lock = TRUE;
|
2923 |
|
|
}
|
2924 |
|
|
}
|
2925 |
|
|
|
2926 |
|
|
void GC_end_syscall()
|
2927 |
|
|
{
|
2928 |
|
|
if (syscall_acquired_lock) {
|
2929 |
|
|
syscall_acquired_lock = FALSE;
|
2930 |
|
|
UNLOCK();
|
2931 |
|
|
}
|
2932 |
|
|
}
|
2933 |
|
|
|
2934 |
|
|
void GC_unprotect_range(addr, len)
|
2935 |
|
|
ptr_t addr;
|
2936 |
|
|
word len;
|
2937 |
|
|
{
|
2938 |
|
|
struct hblk * start_block;
|
2939 |
|
|
struct hblk * end_block;
|
2940 |
|
|
register struct hblk *h;
|
2941 |
|
|
ptr_t obj_start;
|
2942 |
|
|
|
2943 |
|
|
if (!GC_dirty_maintained) return;
|
2944 |
|
|
obj_start = GC_base(addr);
|
2945 |
|
|
if (obj_start == 0) return;
|
2946 |
|
|
if (GC_base(addr + len - 1) != obj_start) {
|
2947 |
|
|
ABORT("GC_unprotect_range(range bigger than object)");
|
2948 |
|
|
}
|
2949 |
|
|
start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
|
2950 |
|
|
end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
|
2951 |
|
|
end_block += GC_page_size/HBLKSIZE - 1;
|
2952 |
|
|
for (h = start_block; h <= end_block; h++) {
|
2953 |
|
|
register word index = PHT_HASH(h);
|
2954 |
|
|
|
2955 |
|
|
async_set_pht_entry_from_index(GC_dirty_pages, index);
|
2956 |
|
|
}
|
2957 |
|
|
UNPROTECT(start_block,
|
2958 |
|
|
((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
|
2959 |
|
|
}
|
2960 |
|
|
|
2961 |
|
|
#if 0
|
2962 |
|
|
|
2963 |
|
|
/* We no longer wrap read by default, since that was causing too many */
|
2964 |
|
|
/* problems. It is preferred that the client instead avoids writing */
|
2965 |
|
|
/* to the write-protected heap with a system call. */
|
2966 |
|
|
/* This still serves as sample code if you do want to wrap system calls.*/
|
2967 |
|
|
|
2968 |
|
|
#if !defined(MSWIN32) && !defined(MSWINCE) && !defined(GC_USE_LD_WRAP)
|
2969 |
|
|
/* Replacement for UNIX system call. */
|
2970 |
|
|
/* Other calls that write to the heap should be handled similarly. */
|
2971 |
|
|
/* Note that this doesn't work well for blocking reads: It will hold */
|
2972 |
|
|
/* the allocation lock for the entire duration of the call. Multithreaded */
|
2973 |
|
|
/* clients should really ensure that it won't block, either by setting */
|
2974 |
|
|
/* the descriptor nonblocking, or by calling select or poll first, to */
|
2975 |
|
|
/* make sure that input is available. */
|
2976 |
|
|
/* Another, preferred alternative is to ensure that system calls never */
|
2977 |
|
|
/* write to the protected heap (see above). */
|
2978 |
|
|
# if defined(__STDC__) && !defined(SUNOS4)
|
2979 |
|
|
# include <unistd.h>
|
2980 |
|
|
# include <sys/uio.h>
|
2981 |
|
|
ssize_t read(int fd, void *buf, size_t nbyte)
|
2982 |
|
|
# else
|
2983 |
|
|
# ifndef LINT
|
2984 |
|
|
int read(fd, buf, nbyte)
|
2985 |
|
|
# else
|
2986 |
|
|
int GC_read(fd, buf, nbyte)
|
2987 |
|
|
# endif
|
2988 |
|
|
int fd;
|
2989 |
|
|
char *buf;
|
2990 |
|
|
int nbyte;
|
2991 |
|
|
# endif
|
2992 |
|
|
{
|
2993 |
|
|
int result;
|
2994 |
|
|
|
2995 |
|
|
GC_begin_syscall();
|
2996 |
|
|
GC_unprotect_range(buf, (word)nbyte);
|
2997 |
|
|
# if defined(IRIX5) || defined(GC_LINUX_THREADS)
|
2998 |
|
|
/* Indirect system call may not always be easily available. */
|
2999 |
|
|
/* We could call _read, but that would interfere with the */
|
3000 |
|
|
/* libpthread interception of read. */
|
3001 |
|
|
/* On Linux, we have to be careful with the linuxthreads */
|
3002 |
|
|
/* read interception. */
|
3003 |
|
|
{
|
3004 |
|
|
struct iovec iov;
|
3005 |
|
|
|
3006 |
|
|
iov.iov_base = buf;
|
3007 |
|
|
iov.iov_len = nbyte;
|
3008 |
|
|
result = readv(fd, &iov, 1);
|
3009 |
|
|
}
|
3010 |
|
|
# else
|
3011 |
|
|
# if defined(HURD)
|
3012 |
|
|
result = __read(fd, buf, nbyte);
|
3013 |
|
|
# else
|
3014 |
|
|
/* The two zero args at the end of this list are because one
|
3015 |
|
|
IA-64 syscall() implementation actually requires six args
|
3016 |
|
|
to be passed, even though they aren't always used. */
|
3017 |
|
|
result = syscall(SYS_read, fd, buf, nbyte, 0, 0);
|
3018 |
|
|
# endif /* !HURD */
|
3019 |
|
|
# endif
|
3020 |
|
|
GC_end_syscall();
|
3021 |
|
|
return(result);
|
3022 |
|
|
}
|
3023 |
|
|
#endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */
|
3024 |
|
|
|
3025 |
|
|
#if defined(GC_USE_LD_WRAP) && !defined(THREADS)
|
3026 |
|
|
/* We use the GNU ld call wrapping facility. */
|
3027 |
|
|
/* This requires that the linker be invoked with "--wrap read". */
|
3028 |
|
|
/* This can be done by passing -Wl,"--wrap read" to gcc. */
|
3029 |
|
|
/* I'm not sure that this actually wraps whatever version of read */
|
3030 |
|
|
/* is called by stdio. That code also mentions __read. */
|
3031 |
|
|
# include <unistd.h>
|
3032 |
|
|
ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
|
3033 |
|
|
{
|
3034 |
|
|
int result;
|
3035 |
|
|
|
3036 |
|
|
GC_begin_syscall();
|
3037 |
|
|
GC_unprotect_range(buf, (word)nbyte);
|
3038 |
|
|
result = __real_read(fd, buf, nbyte);
|
3039 |
|
|
GC_end_syscall();
|
3040 |
|
|
return(result);
|
3041 |
|
|
}
|
3042 |
|
|
|
3043 |
|
|
/* We should probably also do this for __read, or whatever stdio */
|
3044 |
|
|
/* actually calls. */
|
3045 |
|
|
#endif
|
3046 |
|
|
|
3047 |
|
|
#endif /* 0 */
|
3048 |
|
|
|
3049 |
|
|
/*ARGSUSED*/
|
3050 |
|
|
GC_bool GC_page_was_ever_dirty(h)
|
3051 |
|
|
struct hblk *h;
|
3052 |
|
|
{
|
3053 |
|
|
return(TRUE);
|
3054 |
|
|
}
|
3055 |
|
|
|
3056 |
|
|
/* Reset the n pages starting at h to "was never dirty" status. */
|
3057 |
|
|
/*ARGSUSED*/
|
3058 |
|
|
void GC_is_fresh(h, n)
|
3059 |
|
|
struct hblk *h;
|
3060 |
|
|
word n;
|
3061 |
|
|
{
|
3062 |
|
|
}
|
3063 |
|
|
|
3064 |
|
|
# endif /* MPROTECT_VDB */
|
3065 |
|
|
|
3066 |
|
|
# ifdef PROC_VDB
|
3067 |
|
|
|
3068 |
|
|
/*
|
3069 |
|
|
* See DEFAULT_VDB for interface descriptions.
|
3070 |
|
|
*/
|
3071 |
|
|
|
3072 |
|
|
/*
|
3073 |
|
|
* This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
|
3074 |
|
|
* from which we can read page modified bits. This facility is far from
|
3075 |
|
|
* optimal (e.g. we would like to get the info for only some of the
|
3076 |
|
|
* address space), but it avoids intercepting system calls.
|
3077 |
|
|
*/
|
3078 |
|
|
|
3079 |
|
|
#include <errno.h>
|
3080 |
|
|
#include <sys/types.h>
|
3081 |
|
|
#include <sys/signal.h>
|
3082 |
|
|
#include <sys/fault.h>
|
3083 |
|
|
#include <sys/syscall.h>
|
3084 |
|
|
#include <sys/procfs.h>
|
3085 |
|
|
#include <sys/stat.h>
|
3086 |
|
|
|
3087 |
|
|
#define INITIAL_BUF_SZ 16384
|
3088 |
|
|
word GC_proc_buf_size = INITIAL_BUF_SZ;
|
3089 |
|
|
char *GC_proc_buf;
|
3090 |
|
|
|
3091 |
|
|
#ifdef GC_SOLARIS_THREADS
|
3092 |
|
|
/* We don't have exact sp values for threads. So we count on */
|
3093 |
|
|
/* occasionally declaring stack pages to be fresh. Thus we */
|
3094 |
|
|
/* need a real implementation of GC_is_fresh. We can't clear */
|
3095 |
|
|
/* entries in GC_written_pages, since that would declare all */
|
3096 |
|
|
/* pages with the given hash address to be fresh. */
|
3097 |
|
|
# define MAX_FRESH_PAGES 8*1024 /* Must be power of 2 */
|
3098 |
|
|
struct hblk ** GC_fresh_pages; /* A direct mapped cache. */
|
3099 |
|
|
/* Collisions are dropped. */
|
3100 |
|
|
|
3101 |
|
|
# define FRESH_PAGE_SLOT(h) (divHBLKSZ((word)(h)) & (MAX_FRESH_PAGES-1))
|
3102 |
|
|
# define ADD_FRESH_PAGE(h) \
|
3103 |
|
|
GC_fresh_pages[FRESH_PAGE_SLOT(h)] = (h)
|
3104 |
|
|
# define PAGE_IS_FRESH(h) \
|
3105 |
|
|
(GC_fresh_pages[FRESH_PAGE_SLOT(h)] == (h) && (h) != 0)
|
3106 |
|
|
#endif
|
3107 |
|
|
|
3108 |
|
|
/* Add all pages in pht2 to pht1 */
|
3109 |
|
|
void GC_or_pages(pht1, pht2)
|
3110 |
|
|
page_hash_table pht1, pht2;
|
3111 |
|
|
{
|
3112 |
|
|
register int i;
|
3113 |
|
|
|
3114 |
|
|
for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
|
3115 |
|
|
}
|
3116 |
|
|
|
3117 |
|
|
int GC_proc_fd;
|
3118 |
|
|
|
3119 |
|
|
void GC_dirty_init()
|
3120 |
|
|
{
|
3121 |
|
|
int fd;
|
3122 |
|
|
char buf[30];
|
3123 |
|
|
|
3124 |
|
|
GC_dirty_maintained = TRUE;
|
3125 |
|
|
if (GC_words_allocd != 0 || GC_words_allocd_before_gc != 0) {
|
3126 |
|
|
register int i;
|
3127 |
|
|
|
3128 |
|
|
for (i = 0; i < PHT_SIZE; i++) GC_written_pages[i] = (word)(-1);
|
3129 |
|
|
# ifdef PRINTSTATS
|
3130 |
|
|
GC_printf1("Allocated words:%lu:all pages may have been written\n",
|
3131 |
|
|
(unsigned long)
|
3132 |
|
|
(GC_words_allocd + GC_words_allocd_before_gc));
|
3133 |
|
|
# endif
|
3134 |
|
|
}
|
3135 |
|
|
sprintf(buf, "/proc/%d", getpid());
|
3136 |
|
|
fd = open(buf, O_RDONLY);
|
3137 |
|
|
if (fd < 0) {
|
3138 |
|
|
ABORT("/proc open failed");
|
3139 |
|
|
}
|
3140 |
|
|
GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
|
3141 |
|
|
close(fd);
|
3142 |
|
|
syscall(SYS_fcntl, GC_proc_fd, F_SETFD, FD_CLOEXEC);
|
3143 |
|
|
if (GC_proc_fd < 0) {
|
3144 |
|
|
ABORT("/proc ioctl failed");
|
3145 |
|
|
}
|
3146 |
|
|
GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
|
3147 |
|
|
# ifdef GC_SOLARIS_THREADS
|
3148 |
|
|
GC_fresh_pages = (struct hblk **)
|
3149 |
|
|
GC_scratch_alloc(MAX_FRESH_PAGES * sizeof (struct hblk *));
|
3150 |
|
|
if (GC_fresh_pages == 0) {
|
3151 |
|
|
GC_err_printf0("No space for fresh pages\n");
|
3152 |
|
|
EXIT();
|
3153 |
|
|
}
|
3154 |
|
|
BZERO(GC_fresh_pages, MAX_FRESH_PAGES * sizeof (struct hblk *));
|
3155 |
|
|
# endif
|
3156 |
|
|
}
|
3157 |
|
|
|
3158 |
|
|
/* Ignore write hints. They don't help us here. */
|
3159 |
|
|
/*ARGSUSED*/
|
3160 |
|
|
void GC_remove_protection(h, nblocks, is_ptrfree)
|
3161 |
|
|
struct hblk *h;
|
3162 |
|
|
word nblocks;
|
3163 |
|
|
GC_bool is_ptrfree;
|
3164 |
|
|
{
|
3165 |
|
|
}
|
3166 |
|
|
|
3167 |
|
|
#ifdef GC_SOLARIS_THREADS
|
3168 |
|
|
# define READ(fd,buf,nbytes) syscall(SYS_read, fd, buf, nbytes)
|
3169 |
|
|
#else
|
3170 |
|
|
# define READ(fd,buf,nbytes) read(fd, buf, nbytes)
|
3171 |
|
|
#endif
|
3172 |
|
|
|
3173 |
|
|
void GC_read_dirty()
|
3174 |
|
|
{
|
3175 |
|
|
unsigned long ps, np;
|
3176 |
|
|
int nmaps;
|
3177 |
|
|
ptr_t vaddr;
|
3178 |
|
|
struct prasmap * map;
|
3179 |
|
|
char * bufp;
|
3180 |
|
|
ptr_t current_addr, limit;
|
3181 |
|
|
int i;
|
3182 |
|
|
int dummy;
|
3183 |
|
|
|
3184 |
|
|
BZERO(GC_grungy_pages, (sizeof GC_grungy_pages));
|
3185 |
|
|
|
3186 |
|
|
bufp = GC_proc_buf;
|
3187 |
|
|
if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
|
3188 |
|
|
# ifdef PRINTSTATS
|
3189 |
|
|
GC_printf1("/proc read failed: GC_proc_buf_size = %lu\n",
|
3190 |
|
|
GC_proc_buf_size);
|
3191 |
|
|
# endif
|
3192 |
|
|
{
|
3193 |
|
|
/* Retry with larger buffer. */
|
3194 |
|
|
word new_size = 2 * GC_proc_buf_size;
|
3195 |
|
|
char * new_buf = GC_scratch_alloc(new_size);
|
3196 |
|
|
|
3197 |
|
|
if (new_buf != 0) {
|
3198 |
|
|
GC_proc_buf = bufp = new_buf;
|
3199 |
|
|
GC_proc_buf_size = new_size;
|
3200 |
|
|
}
|
3201 |
|
|
if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
|
3202 |
|
|
WARN("Insufficient space for /proc read\n", 0);
|
3203 |
|
|
/* Punt: */
|
3204 |
|
|
memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
|
3205 |
|
|
memset(GC_written_pages, 0xff, sizeof(page_hash_table));
|
3206 |
|
|
# ifdef GC_SOLARIS_THREADS
|
3207 |
|
|
BZERO(GC_fresh_pages,
|
3208 |
|
|
MAX_FRESH_PAGES * sizeof (struct hblk *));
|
3209 |
|
|
# endif
|
3210 |
|
|
return;
|
3211 |
|
|
}
|
3212 |
|
|
}
|
3213 |
|
|
}
|
3214 |
|
|
/* Copy dirty bits into GC_grungy_pages */
|
3215 |
|
|
nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
|
3216 |
|
|
/* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
|
3217 |
|
|
nmaps, PG_REFERENCED, PG_MODIFIED); */
|
3218 |
|
|
bufp = bufp + sizeof(struct prpageheader);
|
3219 |
|
|
for (i = 0; i < nmaps; i++) {
|
3220 |
|
|
map = (struct prasmap *)bufp;
|
3221 |
|
|
vaddr = (ptr_t)(map -> pr_vaddr);
|
3222 |
|
|
ps = map -> pr_pagesize;
|
3223 |
|
|
np = map -> pr_npage;
|
3224 |
|
|
/* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
|
3225 |
|
|
limit = vaddr + ps * np;
|
3226 |
|
|
bufp += sizeof (struct prasmap);
|
3227 |
|
|
for (current_addr = vaddr;
|
3228 |
|
|
current_addr < limit; current_addr += ps){
|
3229 |
|
|
if ((*bufp++) & PG_MODIFIED) {
|
3230 |
|
|
register struct hblk * h = (struct hblk *) current_addr;
|
3231 |
|
|
|
3232 |
|
|
while ((ptr_t)h < current_addr + ps) {
|
3233 |
|
|
register word index = PHT_HASH(h);
|
3234 |
|
|
|
3235 |
|
|
set_pht_entry_from_index(GC_grungy_pages, index);
|
3236 |
|
|
# ifdef GC_SOLARIS_THREADS
|
3237 |
|
|
{
|
3238 |
|
|
register int slot = FRESH_PAGE_SLOT(h);
|
3239 |
|
|
|
3240 |
|
|
if (GC_fresh_pages[slot] == h) {
|
3241 |
|
|
GC_fresh_pages[slot] = 0;
|
3242 |
|
|
}
|
3243 |
|
|
}
|
3244 |
|
|
# endif
|
3245 |
|
|
h++;
|
3246 |
|
|
}
|
3247 |
|
|
}
|
3248 |
|
|
}
|
3249 |
|
|
bufp += sizeof(long) - 1;
|
3250 |
|
|
bufp = (char *)((unsigned long)bufp & ~(sizeof(long)-1));
|
3251 |
|
|
}
|
3252 |
|
|
/* Update GC_written_pages. */
|
3253 |
|
|
GC_or_pages(GC_written_pages, GC_grungy_pages);
|
3254 |
|
|
# ifdef GC_SOLARIS_THREADS
|
3255 |
|
|
/* Make sure that old stacks are considered completely clean */
|
3256 |
|
|
/* unless written again. */
|
3257 |
|
|
GC_old_stacks_are_fresh();
|
3258 |
|
|
# endif
|
3259 |
|
|
}
|
3260 |
|
|
|
3261 |
|
|
#undef READ
|
3262 |
|
|
|
3263 |
|
|
GC_bool GC_page_was_dirty(h)
|
3264 |
|
|
struct hblk *h;
|
3265 |
|
|
{
|
3266 |
|
|
register word index = PHT_HASH(h);
|
3267 |
|
|
register GC_bool result;
|
3268 |
|
|
|
3269 |
|
|
result = get_pht_entry_from_index(GC_grungy_pages, index);
|
3270 |
|
|
# ifdef GC_SOLARIS_THREADS
|
3271 |
|
|
if (result && PAGE_IS_FRESH(h)) result = FALSE;
|
3272 |
|
|
/* This happens only if page was declared fresh since */
|
3273 |
|
|
/* the read_dirty call, e.g. because it's in an unused */
|
3274 |
|
|
/* thread stack. It's OK to treat it as clean, in */
|
3275 |
|
|
/* that case. And it's consistent with */
|
3276 |
|
|
/* GC_page_was_ever_dirty. */
|
3277 |
|
|
# endif
|
3278 |
|
|
return(result);
|
3279 |
|
|
}
|
3280 |
|
|
|
3281 |
|
|
GC_bool GC_page_was_ever_dirty(h)
|
3282 |
|
|
struct hblk *h;
|
3283 |
|
|
{
|
3284 |
|
|
register word index = PHT_HASH(h);
|
3285 |
|
|
register GC_bool result;
|
3286 |
|
|
|
3287 |
|
|
result = get_pht_entry_from_index(GC_written_pages, index);
|
3288 |
|
|
# ifdef GC_SOLARIS_THREADS
|
3289 |
|
|
if (result && PAGE_IS_FRESH(h)) result = FALSE;
|
3290 |
|
|
# endif
|
3291 |
|
|
return(result);
|
3292 |
|
|
}
|
3293 |
|
|
|
3294 |
|
|
/* Caller holds allocation lock. */
|
3295 |
|
|
void GC_is_fresh(h, n)
|
3296 |
|
|
struct hblk *h;
|
3297 |
|
|
word n;
|
3298 |
|
|
{
|
3299 |
|
|
|
3300 |
|
|
register word index;
|
3301 |
|
|
|
3302 |
|
|
# ifdef GC_SOLARIS_THREADS
|
3303 |
|
|
register word i;
|
3304 |
|
|
|
3305 |
|
|
if (GC_fresh_pages != 0) {
|
3306 |
|
|
for (i = 0; i < n; i++) {
|
3307 |
|
|
ADD_FRESH_PAGE(h + i);
|
3308 |
|
|
}
|
3309 |
|
|
}
|
3310 |
|
|
# endif
|
3311 |
|
|
}
|
3312 |
|
|
|
3313 |
|
|
# endif /* PROC_VDB */
|
3314 |
|
|
|
3315 |
|
|
|
3316 |
|
|
# ifdef PCR_VDB
|
3317 |
|
|
|
3318 |
|
|
# include "vd/PCR_VD.h"
|
3319 |
|
|
|
3320 |
|
|
# define NPAGES (32*1024) /* 128 MB */
|
3321 |
|
|
|
3322 |
|
|
PCR_VD_DB GC_grungy_bits[NPAGES];
|
3323 |
|
|
|
3324 |
|
|
ptr_t GC_vd_base; /* Address corresponding to GC_grungy_bits[0] */
|
3325 |
|
|
/* HBLKSIZE aligned. */
|
3326 |
|
|
|
3327 |
|
|
void GC_dirty_init()
|
3328 |
|
|
{
|
3329 |
|
|
GC_dirty_maintained = TRUE;
|
3330 |
|
|
/* For the time being, we assume the heap generally grows up */
|
3331 |
|
|
GC_vd_base = GC_heap_sects[0].hs_start;
|
3332 |
|
|
if (GC_vd_base == 0) {
|
3333 |
|
|
ABORT("Bad initial heap segment");
|
3334 |
|
|
}
|
3335 |
|
|
if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
|
3336 |
|
|
!= PCR_ERes_okay) {
|
3337 |
|
|
ABORT("dirty bit initialization failed");
|
3338 |
|
|
}
|
3339 |
|
|
}
|
3340 |
|
|
|
3341 |
|
|
void GC_read_dirty()
|
3342 |
|
|
{
|
3343 |
|
|
/* lazily enable dirty bits on newly added heap sects */
|
3344 |
|
|
{
|
3345 |
|
|
static int onhs = 0;
|
3346 |
|
|
int nhs = GC_n_heap_sects;
|
3347 |
|
|
for( ; onhs < nhs; onhs++ ) {
|
3348 |
|
|
PCR_VD_WriteProtectEnable(
|
3349 |
|
|
GC_heap_sects[onhs].hs_start,
|
3350 |
|
|
GC_heap_sects[onhs].hs_bytes );
|
3351 |
|
|
}
|
3352 |
|
|
}
|
3353 |
|
|
|
3354 |
|
|
|
3355 |
|
|
if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
|
3356 |
|
|
!= PCR_ERes_okay) {
|
3357 |
|
|
ABORT("dirty bit read failed");
|
3358 |
|
|
}
|
3359 |
|
|
}
|
3360 |
|
|
|
3361 |
|
|
GC_bool GC_page_was_dirty(h)
|
3362 |
|
|
struct hblk *h;
|
3363 |
|
|
{
|
3364 |
|
|
if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
|
3365 |
|
|
return(TRUE);
|
3366 |
|
|
}
|
3367 |
|
|
return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
|
3368 |
|
|
}
|
3369 |
|
|
|
3370 |
|
|
/*ARGSUSED*/
|
3371 |
|
|
void GC_remove_protection(h, nblocks, is_ptrfree)
|
3372 |
|
|
struct hblk *h;
|
3373 |
|
|
word nblocks;
|
3374 |
|
|
GC_bool is_ptrfree;
|
3375 |
|
|
{
|
3376 |
|
|
PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
|
3377 |
|
|
PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
|
3378 |
|
|
}
|
3379 |
|
|
|
3380 |
|
|
# endif /* PCR_VDB */
|
3381 |
|
|
|
3382 |
|
|
#if defined(MPROTECT_VDB) && defined(DARWIN)
|
3383 |
|
|
/* The following sources were used as a *reference* for this exception handling
|
3384 |
|
|
code:
|
3385 |
|
|
1. Apple's mach/xnu documentation
|
3386 |
|
|
2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
|
3387 |
|
|
omnigroup's macosx-dev list.
|
3388 |
|
|
www.omnigroup.com/mailman/archive/macosx-dev/2000-June/014178.html
|
3389 |
|
|
3. macosx-nat.c from Apple's GDB source code.
|
3390 |
|
|
*/
|
3391 |
|
|
|
3392 |
|
|
/* The bug that caused all this trouble should now be fixed. This should
|
3393 |
|
|
eventually be removed if all goes well. */
|
3394 |
|
|
/* define BROKEN_EXCEPTION_HANDLING */
|
3395 |
|
|
|
3396 |
|
|
#include <mach/mach.h>
|
3397 |
|
|
#include <mach/mach_error.h>
|
3398 |
|
|
#include <mach/thread_status.h>
|
3399 |
|
|
#include <mach/exception.h>
|
3400 |
|
|
#include <mach/task.h>
|
3401 |
|
|
#include <pthread.h>
|
3402 |
|
|
|
3403 |
|
|
/* These are not defined in any header, although they are documented */
|
3404 |
|
|
extern boolean_t exc_server(mach_msg_header_t *,mach_msg_header_t *);
|
3405 |
|
|
extern kern_return_t exception_raise(
|
3406 |
|
|
mach_port_t,mach_port_t,mach_port_t,
|
3407 |
|
|
exception_type_t,exception_data_t,mach_msg_type_number_t);
|
3408 |
|
|
extern kern_return_t exception_raise_state(
|
3409 |
|
|
mach_port_t,mach_port_t,mach_port_t,
|
3410 |
|
|
exception_type_t,exception_data_t,mach_msg_type_number_t,
|
3411 |
|
|
thread_state_flavor_t*,thread_state_t,mach_msg_type_number_t,
|
3412 |
|
|
thread_state_t,mach_msg_type_number_t*);
|
3413 |
|
|
extern kern_return_t exception_raise_state_identity(
|
3414 |
|
|
mach_port_t,mach_port_t,mach_port_t,
|
3415 |
|
|
exception_type_t,exception_data_t,mach_msg_type_number_t,
|
3416 |
|
|
thread_state_flavor_t*,thread_state_t,mach_msg_type_number_t,
|
3417 |
|
|
thread_state_t,mach_msg_type_number_t*);
|
3418 |
|
|
|
3419 |
|
|
|
3420 |
|
|
#define MAX_EXCEPTION_PORTS 16
|
3421 |
|
|
|
3422 |
|
|
static struct {
|
3423 |
|
|
mach_msg_type_number_t count;
|
3424 |
|
|
exception_mask_t masks[MAX_EXCEPTION_PORTS];
|
3425 |
|
|
exception_handler_t ports[MAX_EXCEPTION_PORTS];
|
3426 |
|
|
exception_behavior_t behaviors[MAX_EXCEPTION_PORTS];
|
3427 |
|
|
thread_state_flavor_t flavors[MAX_EXCEPTION_PORTS];
|
3428 |
|
|
} GC_old_exc_ports;
|
3429 |
|
|
|
3430 |
|
|
static struct {
|
3431 |
|
|
mach_port_t exception;
|
3432 |
|
|
#if defined(THREADS)
|
3433 |
|
|
mach_port_t reply;
|
3434 |
|
|
#endif
|
3435 |
|
|
} GC_ports;
|
3436 |
|
|
|
3437 |
|
|
typedef struct {
|
3438 |
|
|
mach_msg_header_t head;
|
3439 |
|
|
} GC_msg_t;
|
3440 |
|
|
|
3441 |
|
|
typedef enum {
|
3442 |
|
|
GC_MP_NORMAL, GC_MP_DISCARDING, GC_MP_STOPPED
|
3443 |
|
|
} GC_mprotect_state_t;
|
3444 |
|
|
|
3445 |
|
|
/* FIXME: 1 and 2 seem to be safe to use in the msgh_id field,
|
3446 |
|
|
but it isn't documented. Use the source and see if they
|
3447 |
|
|
should be ok. */
|
3448 |
|
|
#define ID_STOP 1
|
3449 |
|
|
#define ID_RESUME 2
|
3450 |
|
|
|
3451 |
|
|
/* These values are only used on the reply port */
|
3452 |
|
|
#define ID_ACK 3
|
3453 |
|
|
|
3454 |
|
|
#if defined(THREADS)
|
3455 |
|
|
|
3456 |
|
|
GC_mprotect_state_t GC_mprotect_state;
|
3457 |
|
|
|
3458 |
|
|
/* The following should ONLY be called when the world is stopped */
|
3459 |
|
|
static void GC_mprotect_thread_notify(mach_msg_id_t id) {
|
3460 |
|
|
struct {
|
3461 |
|
|
GC_msg_t msg;
|
3462 |
|
|
mach_msg_trailer_t trailer;
|
3463 |
|
|
} buf;
|
3464 |
|
|
mach_msg_return_t r;
|
3465 |
|
|
/* remote, local */
|
3466 |
|
|
buf.msg.head.msgh_bits =
|
3467 |
|
|
MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0);
|
3468 |
|
|
buf.msg.head.msgh_size = sizeof(buf.msg);
|
3469 |
|
|
buf.msg.head.msgh_remote_port = GC_ports.exception;
|
3470 |
|
|
buf.msg.head.msgh_local_port = MACH_PORT_NULL;
|
3471 |
|
|
buf.msg.head.msgh_id = id;
|
3472 |
|
|
|
3473 |
|
|
r = mach_msg(
|
3474 |
|
|
&buf.msg.head,
|
3475 |
|
|
MACH_SEND_MSG|MACH_RCV_MSG|MACH_RCV_LARGE,
|
3476 |
|
|
sizeof(buf.msg),
|
3477 |
|
|
sizeof(buf),
|
3478 |
|
|
GC_ports.reply,
|
3479 |
|
|
MACH_MSG_TIMEOUT_NONE,
|
3480 |
|
|
MACH_PORT_NULL);
|
3481 |
|
|
if(r != MACH_MSG_SUCCESS)
|
3482 |
|
|
ABORT("mach_msg failed in GC_mprotect_thread_notify");
|
3483 |
|
|
if(buf.msg.head.msgh_id != ID_ACK)
|
3484 |
|
|
ABORT("invalid ack in GC_mprotect_thread_notify");
|
3485 |
|
|
}
|
3486 |
|
|
|
3487 |
|
|
/* Should only be called by the mprotect thread */
|
3488 |
|
|
static void GC_mprotect_thread_reply() {
|
3489 |
|
|
GC_msg_t msg;
|
3490 |
|
|
mach_msg_return_t r;
|
3491 |
|
|
/* remote, local */
|
3492 |
|
|
msg.head.msgh_bits =
|
3493 |
|
|
MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,0);
|
3494 |
|
|
msg.head.msgh_size = sizeof(msg);
|
3495 |
|
|
msg.head.msgh_remote_port = GC_ports.reply;
|
3496 |
|
|
msg.head.msgh_local_port = MACH_PORT_NULL;
|
3497 |
|
|
msg.head.msgh_id = ID_ACK;
|
3498 |
|
|
|
3499 |
|
|
r = mach_msg(
|
3500 |
|
|
&msg.head,
|
3501 |
|
|
MACH_SEND_MSG,
|
3502 |
|
|
sizeof(msg),
|
3503 |
|
|
0,
|
3504 |
|
|
MACH_PORT_NULL,
|
3505 |
|
|
MACH_MSG_TIMEOUT_NONE,
|
3506 |
|
|
MACH_PORT_NULL);
|
3507 |
|
|
if(r != MACH_MSG_SUCCESS)
|
3508 |
|
|
ABORT("mach_msg failed in GC_mprotect_thread_reply");
|
3509 |
|
|
}
|
3510 |
|
|
|
3511 |
|
|
void GC_mprotect_stop() {
|
3512 |
|
|
GC_mprotect_thread_notify(ID_STOP);
|
3513 |
|
|
}
|
3514 |
|
|
void GC_mprotect_resume() {
|
3515 |
|
|
GC_mprotect_thread_notify(ID_RESUME);
|
3516 |
|
|
}
|
3517 |
|
|
|
3518 |
|
|
#else /* !THREADS */
|
3519 |
|
|
/* The compiler should optimize away any GC_mprotect_state computations */
|
3520 |
|
|
#define GC_mprotect_state GC_MP_NORMAL
|
3521 |
|
|
#endif
|
3522 |
|
|
|
3523 |
|
|
static void *GC_mprotect_thread(void *arg) {
|
3524 |
|
|
mach_msg_return_t r;
|
3525 |
|
|
/* These two structures contain some private kernel data. We don't need to
|
3526 |
|
|
access any of it so we don't bother defining a proper struct. The
|
3527 |
|
|
correct definitions are in the xnu source code. */
|
3528 |
|
|
struct {
|
3529 |
|
|
mach_msg_header_t head;
|
3530 |
|
|
char data[256];
|
3531 |
|
|
} reply;
|
3532 |
|
|
struct {
|
3533 |
|
|
mach_msg_header_t head;
|
3534 |
|
|
mach_msg_body_t msgh_body;
|
3535 |
|
|
char data[1024];
|
3536 |
|
|
} msg;
|
3537 |
|
|
|
3538 |
|
|
mach_msg_id_t id;
|
3539 |
|
|
|
3540 |
|
|
GC_darwin_register_mach_handler_thread(mach_thread_self());
|
3541 |
|
|
|
3542 |
|
|
for(;;) {
|
3543 |
|
|
r = mach_msg(
|
3544 |
|
|
&msg.head,
|
3545 |
|
|
MACH_RCV_MSG|MACH_RCV_LARGE|
|
3546 |
|
|
(GC_mprotect_state == GC_MP_DISCARDING ? MACH_RCV_TIMEOUT : 0),
|
3547 |
|
|
0,
|
3548 |
|
|
sizeof(msg),
|
3549 |
|
|
GC_ports.exception,
|
3550 |
|
|
GC_mprotect_state == GC_MP_DISCARDING ? 0 : MACH_MSG_TIMEOUT_NONE,
|
3551 |
|
|
MACH_PORT_NULL);
|
3552 |
|
|
|
3553 |
|
|
id = r == MACH_MSG_SUCCESS ? msg.head.msgh_id : -1;
|
3554 |
|
|
|
3555 |
|
|
#if defined(THREADS)
|
3556 |
|
|
if(GC_mprotect_state == GC_MP_DISCARDING) {
|
3557 |
|
|
if(r == MACH_RCV_TIMED_OUT) {
|
3558 |
|
|
GC_mprotect_state = GC_MP_STOPPED;
|
3559 |
|
|
GC_mprotect_thread_reply();
|
3560 |
|
|
continue;
|
3561 |
|
|
}
|
3562 |
|
|
if(r == MACH_MSG_SUCCESS && (id == ID_STOP || id == ID_RESUME))
|
3563 |
|
|
ABORT("out of order mprotect thread request");
|
3564 |
|
|
}
|
3565 |
|
|
#endif
|
3566 |
|
|
|
3567 |
|
|
if(r != MACH_MSG_SUCCESS) {
|
3568 |
|
|
GC_err_printf2("mach_msg failed with %d %s\n",
|
3569 |
|
|
(int)r,mach_error_string(r));
|
3570 |
|
|
ABORT("mach_msg failed");
|
3571 |
|
|
}
|
3572 |
|
|
|
3573 |
|
|
switch(id) {
|
3574 |
|
|
#if defined(THREADS)
|
3575 |
|
|
case ID_STOP:
|
3576 |
|
|
if(GC_mprotect_state != GC_MP_NORMAL)
|
3577 |
|
|
ABORT("Called mprotect_stop when state wasn't normal");
|
3578 |
|
|
GC_mprotect_state = GC_MP_DISCARDING;
|
3579 |
|
|
break;
|
3580 |
|
|
case ID_RESUME:
|
3581 |
|
|
if(GC_mprotect_state != GC_MP_STOPPED)
|
3582 |
|
|
ABORT("Called mprotect_resume when state wasn't stopped");
|
3583 |
|
|
GC_mprotect_state = GC_MP_NORMAL;
|
3584 |
|
|
GC_mprotect_thread_reply();
|
3585 |
|
|
break;
|
3586 |
|
|
#endif /* THREADS */
|
3587 |
|
|
default:
|
3588 |
|
|
/* Handle the message (calls catch_exception_raise) */
|
3589 |
|
|
if(!exc_server(&msg.head,&reply.head))
|
3590 |
|
|
ABORT("exc_server failed");
|
3591 |
|
|
/* Send the reply */
|
3592 |
|
|
r = mach_msg(
|
3593 |
|
|
&reply.head,
|
3594 |
|
|
MACH_SEND_MSG,
|
3595 |
|
|
reply.head.msgh_size,
|
3596 |
|
|
0,
|
3597 |
|
|
MACH_PORT_NULL,
|
3598 |
|
|
MACH_MSG_TIMEOUT_NONE,
|
3599 |
|
|
MACH_PORT_NULL);
|
3600 |
|
|
if(r != MACH_MSG_SUCCESS) {
|
3601 |
|
|
/* This will fail if the thread dies, but the thread shouldn't
|
3602 |
|
|
die... */
|
3603 |
|
|
#ifdef BROKEN_EXCEPTION_HANDLING
|
3604 |
|
|
GC_err_printf2(
|
3605 |
|
|
"mach_msg failed with %d %s while sending exc reply\n",
|
3606 |
|
|
(int)r,mach_error_string(r));
|
3607 |
|
|
#else
|
3608 |
|
|
ABORT("mach_msg failed while sending exception reply");
|
3609 |
|
|
#endif
|
3610 |
|
|
}
|
3611 |
|
|
} /* switch */
|
3612 |
|
|
} /* for(;;) */
|
3613 |
|
|
/* NOT REACHED */
|
3614 |
|
|
return NULL;
|
3615 |
|
|
}
|
3616 |
|
|
|
3617 |
|
|
/* All this SIGBUS code shouldn't be necessary. All protection faults should
|
3618 |
|
|
be going throught the mach exception handler. However, it seems a SIGBUS is
|
3619 |
|
|
occasionally sent for some unknown reason. Even more odd, it seems to be
|
3620 |
|
|
meaningless and safe to ignore. */
|
3621 |
|
|
#ifdef BROKEN_EXCEPTION_HANDLING
|
3622 |
|
|
|
3623 |
|
|
typedef void (* SIG_PF)();
|
3624 |
|
|
static SIG_PF GC_old_bus_handler;
|
3625 |
|
|
|
3626 |
|
|
/* Updates to this aren't atomic, but the SIGBUSs seem pretty rare.
|
3627 |
|
|
Even if this doesn't get updated property, it isn't really a problem */
|
3628 |
|
|
static int GC_sigbus_count;
|
3629 |
|
|
|
3630 |
|
|
static void GC_darwin_sigbus(int num,siginfo_t *sip,void *context) {
|
3631 |
|
|
if(num != SIGBUS) ABORT("Got a non-sigbus signal in the sigbus handler");
|
3632 |
|
|
|
3633 |
|
|
/* Ugh... some seem safe to ignore, but too many in a row probably means
|
3634 |
|
|
trouble. GC_sigbus_count is reset for each mach exception that is
|
3635 |
|
|
handled */
|
3636 |
|
|
if(GC_sigbus_count >= 8) {
|
3637 |
|
|
ABORT("Got more than 8 SIGBUSs in a row!");
|
3638 |
|
|
} else {
|
3639 |
|
|
GC_sigbus_count++;
|
3640 |
|
|
GC_err_printf0("GC: WARNING: Ignoring SIGBUS.\n");
|
3641 |
|
|
}
|
3642 |
|
|
}
|
3643 |
|
|
#endif /* BROKEN_EXCEPTION_HANDLING */
|
3644 |
|
|
|
3645 |
|
|
void GC_dirty_init() {
|
3646 |
|
|
kern_return_t r;
|
3647 |
|
|
mach_port_t me;
|
3648 |
|
|
pthread_t thread;
|
3649 |
|
|
pthread_attr_t attr;
|
3650 |
|
|
exception_mask_t mask;
|
3651 |
|
|
|
3652 |
|
|
# ifdef PRINTSTATS
|
3653 |
|
|
GC_printf0("Inititalizing mach/darwin mprotect virtual dirty bit "
|
3654 |
|
|
"implementation\n");
|
3655 |
|
|
# endif
|
3656 |
|
|
# ifdef BROKEN_EXCEPTION_HANDLING
|
3657 |
|
|
GC_err_printf0("GC: WARNING: Enabling workarounds for various darwin "
|
3658 |
|
|
"exception handling bugs.\n");
|
3659 |
|
|
# endif
|
3660 |
|
|
GC_dirty_maintained = TRUE;
|
3661 |
|
|
if (GC_page_size % HBLKSIZE != 0) {
|
3662 |
|
|
GC_err_printf0("Page size not multiple of HBLKSIZE\n");
|
3663 |
|
|
ABORT("Page size not multiple of HBLKSIZE");
|
3664 |
|
|
}
|
3665 |
|
|
|
3666 |
|
|
GC_task_self = me = mach_task_self();
|
3667 |
|
|
|
3668 |
|
|
r = mach_port_allocate(me,MACH_PORT_RIGHT_RECEIVE,&GC_ports.exception);
|
3669 |
|
|
if(r != KERN_SUCCESS) ABORT("mach_port_allocate failed (exception port)");
|
3670 |
|
|
|
3671 |
|
|
r = mach_port_insert_right(me,GC_ports.exception,GC_ports.exception,
|
3672 |
|
|
MACH_MSG_TYPE_MAKE_SEND);
|
3673 |
|
|
if(r != KERN_SUCCESS)
|
3674 |
|
|
ABORT("mach_port_insert_right failed (exception port)");
|
3675 |
|
|
|
3676 |
|
|
#if defined(THREADS)
|
3677 |
|
|
r = mach_port_allocate(me,MACH_PORT_RIGHT_RECEIVE,&GC_ports.reply);
|
3678 |
|
|
if(r != KERN_SUCCESS) ABORT("mach_port_allocate failed (reply port)");
|
3679 |
|
|
#endif
|
3680 |
|
|
|
3681 |
|
|
/* The exceptions we want to catch */
|
3682 |
|
|
mask = EXC_MASK_BAD_ACCESS;
|
3683 |
|
|
|
3684 |
|
|
r = task_get_exception_ports(
|
3685 |
|
|
me,
|
3686 |
|
|
mask,
|
3687 |
|
|
GC_old_exc_ports.masks,
|
3688 |
|
|
&GC_old_exc_ports.count,
|
3689 |
|
|
GC_old_exc_ports.ports,
|
3690 |
|
|
GC_old_exc_ports.behaviors,
|
3691 |
|
|
GC_old_exc_ports.flavors
|
3692 |
|
|
);
|
3693 |
|
|
if(r != KERN_SUCCESS) ABORT("task_get_exception_ports failed");
|
3694 |
|
|
|
3695 |
|
|
r = task_set_exception_ports(
|
3696 |
|
|
me,
|
3697 |
|
|
mask,
|
3698 |
|
|
GC_ports.exception,
|
3699 |
|
|
EXCEPTION_DEFAULT,
|
3700 |
|
|
GC_MACH_THREAD_STATE
|
3701 |
|
|
);
|
3702 |
|
|
if(r != KERN_SUCCESS) ABORT("task_set_exception_ports failed");
|
3703 |
|
|
|
3704 |
|
|
if(pthread_attr_init(&attr) != 0) ABORT("pthread_attr_init failed");
|
3705 |
|
|
if(pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_DETACHED) != 0)
|
3706 |
|
|
ABORT("pthread_attr_setdetachedstate failed");
|
3707 |
|
|
|
3708 |
|
|
# undef pthread_create
|
3709 |
|
|
/* This will call the real pthread function, not our wrapper */
|
3710 |
|
|
if(pthread_create(&thread,&attr,GC_mprotect_thread,NULL) != 0)
|
3711 |
|
|
ABORT("pthread_create failed");
|
3712 |
|
|
pthread_attr_destroy(&attr);
|
3713 |
|
|
|
3714 |
|
|
/* Setup the sigbus handler for ignoring the meaningless SIGBUSs */
|
3715 |
|
|
#ifdef BROKEN_EXCEPTION_HANDLING
|
3716 |
|
|
{
|
3717 |
|
|
struct sigaction sa, oldsa;
|
3718 |
|
|
sa.sa_handler = (SIG_PF)GC_darwin_sigbus;
|
3719 |
|
|
sigemptyset(&sa.sa_mask);
|
3720 |
|
|
sa.sa_flags = SA_RESTART|SA_SIGINFO;
|
3721 |
|
|
if(sigaction(SIGBUS,&sa,&oldsa) < 0) ABORT("sigaction");
|
3722 |
|
|
GC_old_bus_handler = (SIG_PF)oldsa.sa_handler;
|
3723 |
|
|
if (GC_old_bus_handler != SIG_DFL) {
|
3724 |
|
|
# ifdef PRINTSTATS
|
3725 |
|
|
GC_err_printf0("Replaced other SIGBUS handler\n");
|
3726 |
|
|
# endif
|
3727 |
|
|
}
|
3728 |
|
|
}
|
3729 |
|
|
#endif /* BROKEN_EXCEPTION_HANDLING */
|
3730 |
|
|
}
|
3731 |
|
|
|
3732 |
|
|
/* The source code for Apple's GDB was used as a reference for the exception
|
3733 |
|
|
forwarding code. This code is similar to be GDB code only because there is
|
3734 |
|
|
only one way to do it. */
|
3735 |
|
|
static kern_return_t GC_forward_exception(
|
3736 |
|
|
mach_port_t thread,
|
3737 |
|
|
mach_port_t task,
|
3738 |
|
|
exception_type_t exception,
|
3739 |
|
|
exception_data_t data,
|
3740 |
|
|
mach_msg_type_number_t data_count
|
3741 |
|
|
) {
|
3742 |
|
|
int i;
|
3743 |
|
|
kern_return_t r;
|
3744 |
|
|
mach_port_t port;
|
3745 |
|
|
exception_behavior_t behavior;
|
3746 |
|
|
thread_state_flavor_t flavor;
|
3747 |
|
|
|
3748 |
|
|
thread_state_t thread_state;
|
3749 |
|
|
mach_msg_type_number_t thread_state_count = THREAD_STATE_MAX;
|
3750 |
|
|
|
3751 |
|
|
for(i=0;i<GC_old_exc_ports.count;i++)
|
3752 |
|
|
if(GC_old_exc_ports.masks[i] & (1 << exception))
|
3753 |
|
|
break;
|
3754 |
|
|
if(i==GC_old_exc_ports.count) ABORT("No handler for exception!");
|
3755 |
|
|
|
3756 |
|
|
port = GC_old_exc_ports.ports[i];
|
3757 |
|
|
behavior = GC_old_exc_ports.behaviors[i];
|
3758 |
|
|
flavor = GC_old_exc_ports.flavors[i];
|
3759 |
|
|
|
3760 |
|
|
if(behavior != EXCEPTION_DEFAULT) {
|
3761 |
|
|
r = thread_get_state(thread,flavor,thread_state,&thread_state_count);
|
3762 |
|
|
if(r != KERN_SUCCESS)
|
3763 |
|
|
ABORT("thread_get_state failed in forward_exception");
|
3764 |
|
|
}
|
3765 |
|
|
|
3766 |
|
|
switch(behavior) {
|
3767 |
|
|
case EXCEPTION_DEFAULT:
|
3768 |
|
|
r = exception_raise(port,thread,task,exception,data,data_count);
|
3769 |
|
|
break;
|
3770 |
|
|
case EXCEPTION_STATE:
|
3771 |
|
|
r = exception_raise_state(port,thread,task,exception,data,
|
3772 |
|
|
data_count,&flavor,thread_state,thread_state_count,
|
3773 |
|
|
thread_state,&thread_state_count);
|
3774 |
|
|
break;
|
3775 |
|
|
case EXCEPTION_STATE_IDENTITY:
|
3776 |
|
|
r = exception_raise_state_identity(port,thread,task,exception,data,
|
3777 |
|
|
data_count,&flavor,thread_state,thread_state_count,
|
3778 |
|
|
thread_state,&thread_state_count);
|
3779 |
|
|
break;
|
3780 |
|
|
default:
|
3781 |
|
|
r = KERN_FAILURE; /* make gcc happy */
|
3782 |
|
|
ABORT("forward_exception: unknown behavior");
|
3783 |
|
|
break;
|
3784 |
|
|
}
|
3785 |
|
|
|
3786 |
|
|
if(behavior != EXCEPTION_DEFAULT) {
|
3787 |
|
|
r = thread_set_state(thread,flavor,thread_state,thread_state_count);
|
3788 |
|
|
if(r != KERN_SUCCESS)
|
3789 |
|
|
ABORT("thread_set_state failed in forward_exception");
|
3790 |
|
|
}
|
3791 |
|
|
|
3792 |
|
|
return r;
|
3793 |
|
|
}
|
3794 |
|
|
|
3795 |
|
|
#define FWD() GC_forward_exception(thread,task,exception,code,code_count)
|
3796 |
|
|
|
3797 |
|
|
/* This violates the namespace rules but there isn't anything that can be done
|
3798 |
|
|
about it. The exception handling stuff is hard coded to call this */
|
3799 |
|
|
kern_return_t
|
3800 |
|
|
catch_exception_raise(
|
3801 |
|
|
mach_port_t exception_port,mach_port_t thread,mach_port_t task,
|
3802 |
|
|
exception_type_t exception,exception_data_t code,
|
3803 |
|
|
mach_msg_type_number_t code_count
|
3804 |
|
|
) {
|
3805 |
|
|
kern_return_t r;
|
3806 |
|
|
char *addr;
|
3807 |
|
|
struct hblk *h;
|
3808 |
|
|
int i;
|
3809 |
|
|
# if defined(POWERPC)
|
3810 |
|
|
# if CPP_WORDSZ == 32
|
3811 |
|
|
thread_state_flavor_t flavor = PPC_EXCEPTION_STATE;
|
3812 |
|
|
mach_msg_type_number_t exc_state_count = PPC_EXCEPTION_STATE_COUNT;
|
3813 |
|
|
ppc_exception_state_t exc_state;
|
3814 |
|
|
# else
|
3815 |
|
|
thread_state_flavor_t flavor = PPC_EXCEPTION_STATE64;
|
3816 |
|
|
mach_msg_type_number_t exc_state_count = PPC_EXCEPTION_STATE64_COUNT;
|
3817 |
|
|
ppc_exception_state64_t exc_state;
|
3818 |
|
|
# endif
|
3819 |
|
|
# elif defined(I386) || defined(X86_64)
|
3820 |
|
|
# if CPP_WORDSZ == 32
|
3821 |
|
|
thread_state_flavor_t flavor = x86_EXCEPTION_STATE32;
|
3822 |
|
|
mach_msg_type_number_t exc_state_count = x86_EXCEPTION_STATE32_COUNT;
|
3823 |
|
|
x86_exception_state32_t exc_state;
|
3824 |
|
|
# else
|
3825 |
|
|
thread_state_flavor_t flavor = x86_EXCEPTION_STATE64;
|
3826 |
|
|
mach_msg_type_number_t exc_state_count = x86_EXCEPTION_STATE64_COUNT;
|
3827 |
|
|
x86_exception_state64_t exc_state;
|
3828 |
|
|
# endif
|
3829 |
|
|
# else
|
3830 |
|
|
# error FIXME for non-ppc darwin
|
3831 |
|
|
# endif
|
3832 |
|
|
|
3833 |
|
|
|
3834 |
|
|
if(exception != EXC_BAD_ACCESS || code[0] != KERN_PROTECTION_FAILURE) {
|
3835 |
|
|
#ifdef DEBUG_EXCEPTION_HANDLING
|
3836 |
|
|
/* We aren't interested, pass it on to the old handler */
|
3837 |
|
|
GC_printf3("Exception: 0x%x Code: 0x%x 0x%x in catch....\n",
|
3838 |
|
|
exception,
|
3839 |
|
|
code_count > 0 ? code[0] : -1,
|
3840 |
|
|
code_count > 1 ? code[1] : -1);
|
3841 |
|
|
#endif
|
3842 |
|
|
return FWD();
|
3843 |
|
|
}
|
3844 |
|
|
|
3845 |
|
|
r = thread_get_state(thread,flavor,
|
3846 |
|
|
(natural_t*)&exc_state,&exc_state_count);
|
3847 |
|
|
if(r != KERN_SUCCESS) {
|
3848 |
|
|
/* The thread is supposed to be suspended while the exception handler
|
3849 |
|
|
is called. This shouldn't fail. */
|
3850 |
|
|
#ifdef BROKEN_EXCEPTION_HANDLING
|
3851 |
|
|
GC_err_printf0("thread_get_state failed in "
|
3852 |
|
|
"catch_exception_raise\n");
|
3853 |
|
|
return KERN_SUCCESS;
|
3854 |
|
|
#else
|
3855 |
|
|
ABORT("thread_get_state failed in catch_exception_raise");
|
3856 |
|
|
#endif
|
3857 |
|
|
}
|
3858 |
|
|
|
3859 |
|
|
/* This is the address that caused the fault */
|
3860 |
|
|
#if defined(POWERPC)
|
3861 |
|
|
addr = (char*) exc_state. THREAD_FLD(dar);
|
3862 |
|
|
#elif defined (I386) || defined (X86_64)
|
3863 |
|
|
addr = (char*) exc_state. THREAD_FLD(faultvaddr);
|
3864 |
|
|
#else
|
3865 |
|
|
# error FIXME for non POWERPC/I386
|
3866 |
|
|
#endif
|
3867 |
|
|
|
3868 |
|
|
if((HDR(addr)) == 0) {
|
3869 |
|
|
/* Ugh... just like the SIGBUS problem above, it seems we get a bogus
|
3870 |
|
|
KERN_PROTECTION_FAILURE every once and a while. We wait till we get
|
3871 |
|
|
a bunch in a row before doing anything about it. If a "real" fault
|
3872 |
|
|
ever occurres it'll just keep faulting over and over and we'll hit
|
3873 |
|
|
the limit pretty quickly. */
|
3874 |
|
|
#ifdef BROKEN_EXCEPTION_HANDLING
|
3875 |
|
|
static char *last_fault;
|
3876 |
|
|
static int last_fault_count;
|
3877 |
|
|
|
3878 |
|
|
if(addr != last_fault) {
|
3879 |
|
|
last_fault = addr;
|
3880 |
|
|
last_fault_count = 0;
|
3881 |
|
|
}
|
3882 |
|
|
if(++last_fault_count < 32) {
|
3883 |
|
|
if(last_fault_count == 1)
|
3884 |
|
|
GC_err_printf1(
|
3885 |
|
|
"GC: WARNING: Ignoring KERN_PROTECTION_FAILURE at %p\n",
|
3886 |
|
|
addr);
|
3887 |
|
|
return KERN_SUCCESS;
|
3888 |
|
|
}
|
3889 |
|
|
|
3890 |
|
|
GC_err_printf1("Unexpected KERN_PROTECTION_FAILURE at %p\n",addr);
|
3891 |
|
|
/* Can't pass it along to the signal handler because that is
|
3892 |
|
|
ignoring SIGBUS signals. We also shouldn't call ABORT here as
|
3893 |
|
|
signals don't always work too well from the exception handler. */
|
3894 |
|
|
GC_err_printf0("Aborting\n");
|
3895 |
|
|
exit(EXIT_FAILURE);
|
3896 |
|
|
#else /* BROKEN_EXCEPTION_HANDLING */
|
3897 |
|
|
/* Pass it along to the next exception handler
|
3898 |
|
|
(which should call SIGBUS/SIGSEGV) */
|
3899 |
|
|
return FWD();
|
3900 |
|
|
#endif /* !BROKEN_EXCEPTION_HANDLING */
|
3901 |
|
|
}
|
3902 |
|
|
|
3903 |
|
|
#ifdef BROKEN_EXCEPTION_HANDLING
|
3904 |
|
|
/* Reset the number of consecutive SIGBUSs */
|
3905 |
|
|
GC_sigbus_count = 0;
|
3906 |
|
|
#endif
|
3907 |
|
|
|
3908 |
|
|
if(GC_mprotect_state == GC_MP_NORMAL) { /* common case */
|
3909 |
|
|
h = (struct hblk*)((word)addr & ~(GC_page_size-1));
|
3910 |
|
|
UNPROTECT(h, GC_page_size);
|
3911 |
|
|
for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
|
3912 |
|
|
register int index = PHT_HASH(h+i);
|
3913 |
|
|
async_set_pht_entry_from_index(GC_dirty_pages, index);
|
3914 |
|
|
}
|
3915 |
|
|
} else if(GC_mprotect_state == GC_MP_DISCARDING) {
|
3916 |
|
|
/* Lie to the thread for now. No sense UNPROTECT()ing the memory
|
3917 |
|
|
when we're just going to PROTECT() it again later. The thread
|
3918 |
|
|
will just fault again once it resumes */
|
3919 |
|
|
} else {
|
3920 |
|
|
/* Shouldn't happen, i don't think */
|
3921 |
|
|
GC_printf0("KERN_PROTECTION_FAILURE while world is stopped\n");
|
3922 |
|
|
return FWD();
|
3923 |
|
|
}
|
3924 |
|
|
return KERN_SUCCESS;
|
3925 |
|
|
}
|
3926 |
|
|
#undef FWD
|
3927 |
|
|
|
3928 |
|
|
/* These should never be called, but just in case... */
|
3929 |
|
|
kern_return_t catch_exception_raise_state(mach_port_name_t exception_port,
|
3930 |
|
|
int exception, exception_data_t code, mach_msg_type_number_t codeCnt,
|
3931 |
|
|
int flavor, thread_state_t old_state, int old_stateCnt,
|
3932 |
|
|
thread_state_t new_state, int new_stateCnt)
|
3933 |
|
|
{
|
3934 |
|
|
ABORT("catch_exception_raise_state");
|
3935 |
|
|
return(KERN_INVALID_ARGUMENT);
|
3936 |
|
|
}
|
3937 |
|
|
kern_return_t catch_exception_raise_state_identity(
|
3938 |
|
|
mach_port_name_t exception_port, mach_port_t thread, mach_port_t task,
|
3939 |
|
|
int exception, exception_data_t code, mach_msg_type_number_t codeCnt,
|
3940 |
|
|
int flavor, thread_state_t old_state, int old_stateCnt,
|
3941 |
|
|
thread_state_t new_state, int new_stateCnt)
|
3942 |
|
|
{
|
3943 |
|
|
ABORT("catch_exception_raise_state_identity");
|
3944 |
|
|
return(KERN_INVALID_ARGUMENT);
|
3945 |
|
|
}
|
3946 |
|
|
|
3947 |
|
|
|
3948 |
|
|
#endif /* DARWIN && MPROTECT_VDB */
|
3949 |
|
|
|
3950 |
|
|
# ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS
|
3951 |
|
|
int GC_incremental_protection_needs()
|
3952 |
|
|
{
|
3953 |
|
|
return GC_PROTECTS_NONE;
|
3954 |
|
|
}
|
3955 |
|
|
# endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */
|
3956 |
|
|
|
3957 |
|
|
/*
|
3958 |
|
|
* Call stack save code for debugging.
|
3959 |
|
|
* Should probably be in mach_dep.c, but that requires reorganization.
|
3960 |
|
|
*/
|
3961 |
|
|
|
3962 |
|
|
/* I suspect the following works for most X86 *nix variants, so */
|
3963 |
|
|
/* long as the frame pointer is explicitly stored. In the case of gcc, */
|
3964 |
|
|
/* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
|
3965 |
|
|
#if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
|
3966 |
|
|
# include <features.h>
|
3967 |
|
|
|
3968 |
|
|
struct frame {
|
3969 |
|
|
struct frame *fr_savfp;
|
3970 |
|
|
long fr_savpc;
|
3971 |
|
|
long fr_arg[NARGS]; /* All the arguments go here. */
|
3972 |
|
|
};
|
3973 |
|
|
#endif
|
3974 |
|
|
|
3975 |
|
|
#if defined(SPARC)
|
3976 |
|
|
# if defined(LINUX)
|
3977 |
|
|
# include <features.h>
|
3978 |
|
|
|
3979 |
|
|
struct frame {
|
3980 |
|
|
long fr_local[8];
|
3981 |
|
|
long fr_arg[6];
|
3982 |
|
|
struct frame *fr_savfp;
|
3983 |
|
|
long fr_savpc;
|
3984 |
|
|
# ifndef __arch64__
|
3985 |
|
|
char *fr_stret;
|
3986 |
|
|
# endif
|
3987 |
|
|
long fr_argd[6];
|
3988 |
|
|
long fr_argx[0];
|
3989 |
|
|
};
|
3990 |
|
|
# else
|
3991 |
|
|
# if defined(SUNOS4)
|
3992 |
|
|
# include <machine/frame.h>
|
3993 |
|
|
# else
|
3994 |
|
|
# if defined (DRSNX)
|
3995 |
|
|
# include <sys/sparc/frame.h>
|
3996 |
|
|
# else
|
3997 |
|
|
# if defined(OPENBSD)
|
3998 |
|
|
# include <frame.h>
|
3999 |
|
|
# else
|
4000 |
|
|
# if defined(FREEBSD) || defined(NETBSD)
|
4001 |
|
|
# include <machine/frame.h>
|
4002 |
|
|
# else
|
4003 |
|
|
# include <sys/frame.h>
|
4004 |
|
|
# endif
|
4005 |
|
|
# endif
|
4006 |
|
|
# endif
|
4007 |
|
|
# endif
|
4008 |
|
|
# endif
|
4009 |
|
|
# if NARGS > 6
|
4010 |
|
|
--> We only know how to to get the first 6 arguments
|
4011 |
|
|
# endif
|
4012 |
|
|
#endif /* SPARC */
|
4013 |
|
|
|
4014 |
|
|
#ifdef NEED_CALLINFO
|
4015 |
|
|
/* Fill in the pc and argument information for up to NFRAMES of my */
|
4016 |
|
|
/* callers. Ignore my frame and my callers frame. */
|
4017 |
|
|
|
4018 |
|
|
#ifdef LINUX
|
4019 |
|
|
# include <unistd.h>
|
4020 |
|
|
#endif
|
4021 |
|
|
|
4022 |
|
|
#endif /* NEED_CALLINFO */
|
4023 |
|
|
|
4024 |
|
|
#if defined(GC_HAVE_BUILTIN_BACKTRACE)
|
4025 |
|
|
# include <execinfo.h>
|
4026 |
|
|
#endif
|
4027 |
|
|
|
4028 |
|
|
#ifdef SAVE_CALL_CHAIN
|
4029 |
|
|
|
4030 |
|
|
#if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
|
4031 |
|
|
&& defined(GC_HAVE_BUILTIN_BACKTRACE)
|
4032 |
|
|
|
4033 |
|
|
#ifdef REDIRECT_MALLOC
|
4034 |
|
|
/* Deal with possible malloc calls in backtrace by omitting */
|
4035 |
|
|
/* the infinitely recursing backtrace. */
|
4036 |
|
|
# ifdef THREADS
|
4037 |
|
|
__thread /* If your compiler doesn't understand this */
|
4038 |
|
|
/* you could use something like pthread_getspecific. */
|
4039 |
|
|
# endif
|
4040 |
|
|
GC_in_save_callers = FALSE;
|
4041 |
|
|
#endif
|
4042 |
|
|
|
4043 |
|
|
void GC_save_callers (info)
|
4044 |
|
|
struct callinfo info[NFRAMES];
|
4045 |
|
|
{
|
4046 |
|
|
void * tmp_info[NFRAMES + 1];
|
4047 |
|
|
int npcs, i;
|
4048 |
|
|
# define IGNORE_FRAMES 1
|
4049 |
|
|
|
4050 |
|
|
/* We retrieve NFRAMES+1 pc values, but discard the first, since it */
|
4051 |
|
|
/* points to our own frame. */
|
4052 |
|
|
# ifdef REDIRECT_MALLOC
|
4053 |
|
|
if (GC_in_save_callers) {
|
4054 |
|
|
info[0].ci_pc = (word)(&GC_save_callers);
|
4055 |
|
|
for (i = 1; i < NFRAMES; ++i) info[i].ci_pc = 0;
|
4056 |
|
|
return;
|
4057 |
|
|
}
|
4058 |
|
|
GC_in_save_callers = TRUE;
|
4059 |
|
|
# endif
|
4060 |
|
|
GC_ASSERT(sizeof(struct callinfo) == sizeof(void *));
|
4061 |
|
|
npcs = backtrace((void **)tmp_info, NFRAMES + IGNORE_FRAMES);
|
4062 |
|
|
BCOPY(tmp_info+IGNORE_FRAMES, info, (npcs - IGNORE_FRAMES) * sizeof(void *));
|
4063 |
|
|
for (i = npcs - IGNORE_FRAMES; i < NFRAMES; ++i) info[i].ci_pc = 0;
|
4064 |
|
|
# ifdef REDIRECT_MALLOC
|
4065 |
|
|
GC_in_save_callers = FALSE;
|
4066 |
|
|
# endif
|
4067 |
|
|
}
|
4068 |
|
|
|
4069 |
|
|
#else /* No builtin backtrace; do it ourselves */
|
4070 |
|
|
|
4071 |
|
|
#if (defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD)) && defined(SPARC)
|
4072 |
|
|
# define FR_SAVFP fr_fp
|
4073 |
|
|
# define FR_SAVPC fr_pc
|
4074 |
|
|
#else
|
4075 |
|
|
# define FR_SAVFP fr_savfp
|
4076 |
|
|
# define FR_SAVPC fr_savpc
|
4077 |
|
|
#endif
|
4078 |
|
|
|
4079 |
|
|
#if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
|
4080 |
|
|
# define BIAS 2047
|
4081 |
|
|
#else
|
4082 |
|
|
# define BIAS 0
|
4083 |
|
|
#endif
|
4084 |
|
|
|
4085 |
|
|
void GC_save_callers (info)
|
4086 |
|
|
struct callinfo info[NFRAMES];
|
4087 |
|
|
{
|
4088 |
|
|
struct frame *frame;
|
4089 |
|
|
struct frame *fp;
|
4090 |
|
|
int nframes = 0;
|
4091 |
|
|
# ifdef I386
|
4092 |
|
|
/* We assume this is turned on only with gcc as the compiler. */
|
4093 |
|
|
asm("movl %%ebp,%0" : "=r"(frame));
|
4094 |
|
|
fp = frame;
|
4095 |
|
|
# else
|
4096 |
|
|
frame = (struct frame *) GC_save_regs_in_stack ();
|
4097 |
|
|
fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
|
4098 |
|
|
#endif
|
4099 |
|
|
|
4100 |
|
|
for (; (!(fp HOTTER_THAN frame) && !(GC_stackbottom HOTTER_THAN (ptr_t)fp)
|
4101 |
|
|
&& (nframes < NFRAMES));
|
4102 |
|
|
fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
|
4103 |
|
|
register int i;
|
4104 |
|
|
|
4105 |
|
|
info[nframes].ci_pc = fp->FR_SAVPC;
|
4106 |
|
|
# if NARGS > 0
|
4107 |
|
|
for (i = 0; i < NARGS; i++) {
|
4108 |
|
|
info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
|
4109 |
|
|
}
|
4110 |
|
|
# endif /* NARGS > 0 */
|
4111 |
|
|
}
|
4112 |
|
|
if (nframes < NFRAMES) info[nframes].ci_pc = 0;
|
4113 |
|
|
}
|
4114 |
|
|
|
4115 |
|
|
#endif /* No builtin backtrace */
|
4116 |
|
|
|
4117 |
|
|
#endif /* SAVE_CALL_CHAIN */
|
4118 |
|
|
|
4119 |
|
|
#ifdef NEED_CALLINFO
|
4120 |
|
|
|
4121 |
|
|
/* Print info to stderr. We do NOT hold the allocation lock */
|
4122 |
|
|
void GC_print_callers (info)
|
4123 |
|
|
struct callinfo info[NFRAMES];
|
4124 |
|
|
{
|
4125 |
|
|
register int i;
|
4126 |
|
|
static int reentry_count = 0;
|
4127 |
|
|
GC_bool stop = FALSE;
|
4128 |
|
|
|
4129 |
|
|
/* FIXME: This should probably use a different lock, so that we */
|
4130 |
|
|
/* become callable with or without the allocation lock. */
|
4131 |
|
|
LOCK();
|
4132 |
|
|
++reentry_count;
|
4133 |
|
|
UNLOCK();
|
4134 |
|
|
|
4135 |
|
|
# if NFRAMES == 1
|
4136 |
|
|
GC_err_printf0("\tCaller at allocation:\n");
|
4137 |
|
|
# else
|
4138 |
|
|
GC_err_printf0("\tCall chain at allocation:\n");
|
4139 |
|
|
# endif
|
4140 |
|
|
for (i = 0; i < NFRAMES && !stop ; i++) {
|
4141 |
|
|
if (info[i].ci_pc == 0) break;
|
4142 |
|
|
# if NARGS > 0
|
4143 |
|
|
{
|
4144 |
|
|
int j;
|
4145 |
|
|
|
4146 |
|
|
GC_err_printf0("\t\targs: ");
|
4147 |
|
|
for (j = 0; j < NARGS; j++) {
|
4148 |
|
|
if (j != 0) GC_err_printf0(", ");
|
4149 |
|
|
GC_err_printf2("%d (0x%X)", ~(info[i].ci_arg[j]),
|
4150 |
|
|
~(info[i].ci_arg[j]));
|
4151 |
|
|
}
|
4152 |
|
|
GC_err_printf0("\n");
|
4153 |
|
|
}
|
4154 |
|
|
# endif
|
4155 |
|
|
if (reentry_count > 1) {
|
4156 |
|
|
/* We were called during an allocation during */
|
4157 |
|
|
/* a previous GC_print_callers call; punt. */
|
4158 |
|
|
GC_err_printf1("\t\t##PC##= 0x%lx\n", info[i].ci_pc);
|
4159 |
|
|
continue;
|
4160 |
|
|
}
|
4161 |
|
|
{
|
4162 |
|
|
# ifdef LINUX
|
4163 |
|
|
FILE *pipe;
|
4164 |
|
|
# endif
|
4165 |
|
|
# if defined(GC_HAVE_BUILTIN_BACKTRACE) \
|
4166 |
|
|
&& !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
|
4167 |
|
|
char **sym_name =
|
4168 |
|
|
backtrace_symbols((void **)(&(info[i].ci_pc)), 1);
|
4169 |
|
|
char *name = sym_name[0];
|
4170 |
|
|
# else
|
4171 |
|
|
char buf[40];
|
4172 |
|
|
char *name = buf;
|
4173 |
|
|
sprintf(buf, "##PC##= 0x%lx", info[i].ci_pc);
|
4174 |
|
|
# endif
|
4175 |
|
|
# if defined(LINUX) && !defined(SMALL_CONFIG)
|
4176 |
|
|
/* Try for a line number. */
|
4177 |
|
|
{
|
4178 |
|
|
# define EXE_SZ 100
|
4179 |
|
|
static char exe_name[EXE_SZ];
|
4180 |
|
|
# define CMD_SZ 200
|
4181 |
|
|
char cmd_buf[CMD_SZ];
|
4182 |
|
|
# define RESULT_SZ 200
|
4183 |
|
|
static char result_buf[RESULT_SZ];
|
4184 |
|
|
size_t result_len;
|
4185 |
|
|
char *old_preload;
|
4186 |
|
|
# define PRELOAD_SZ 200
|
4187 |
|
|
char preload_buf[PRELOAD_SZ];
|
4188 |
|
|
static GC_bool found_exe_name = FALSE;
|
4189 |
|
|
static GC_bool will_fail = FALSE;
|
4190 |
|
|
int ret_code;
|
4191 |
|
|
/* Try to get it via a hairy and expensive scheme. */
|
4192 |
|
|
/* First we get the name of the executable: */
|
4193 |
|
|
if (will_fail) goto out;
|
4194 |
|
|
if (!found_exe_name) {
|
4195 |
|
|
ret_code = readlink("/proc/self/exe", exe_name, EXE_SZ);
|
4196 |
|
|
if (ret_code < 0 || ret_code >= EXE_SZ
|
4197 |
|
|
|| exe_name[0] != '/') {
|
4198 |
|
|
will_fail = TRUE; /* Dont try again. */
|
4199 |
|
|
goto out;
|
4200 |
|
|
}
|
4201 |
|
|
exe_name[ret_code] = '\0';
|
4202 |
|
|
found_exe_name = TRUE;
|
4203 |
|
|
}
|
4204 |
|
|
/* Then we use popen to start addr2line -e <exe> <addr> */
|
4205 |
|
|
/* There are faster ways to do this, but hopefully this */
|
4206 |
|
|
/* isn't time critical. */
|
4207 |
|
|
sprintf(cmd_buf, "/usr/bin/addr2line -f -e %s 0x%lx", exe_name,
|
4208 |
|
|
(unsigned long)info[i].ci_pc);
|
4209 |
|
|
old_preload = getenv ("LD_PRELOAD");
|
4210 |
|
|
if (0 != old_preload) {
|
4211 |
|
|
if (strlen (old_preload) >= PRELOAD_SZ) {
|
4212 |
|
|
will_fail = TRUE;
|
4213 |
|
|
goto out;
|
4214 |
|
|
}
|
4215 |
|
|
strcpy (preload_buf, old_preload);
|
4216 |
|
|
unsetenv ("LD_PRELOAD");
|
4217 |
|
|
}
|
4218 |
|
|
pipe = popen(cmd_buf, "r");
|
4219 |
|
|
if (0 != old_preload
|
4220 |
|
|
&& 0 != setenv ("LD_PRELOAD", preload_buf, 0)) {
|
4221 |
|
|
WARN("Failed to reset LD_PRELOAD\n", 0);
|
4222 |
|
|
}
|
4223 |
|
|
if (pipe == NULL
|
4224 |
|
|
|| (result_len = fread(result_buf, 1, RESULT_SZ - 1, pipe))
|
4225 |
|
|
== 0) {
|
4226 |
|
|
if (pipe != NULL) pclose(pipe);
|
4227 |
|
|
will_fail = TRUE;
|
4228 |
|
|
goto out;
|
4229 |
|
|
}
|
4230 |
|
|
if (result_buf[result_len - 1] == '\n') --result_len;
|
4231 |
|
|
result_buf[result_len] = 0;
|
4232 |
|
|
if (result_buf[0] == '?'
|
4233 |
|
|
|| result_buf[result_len-2] == ':'
|
4234 |
|
|
&& result_buf[result_len-1] == '0') {
|
4235 |
|
|
pclose(pipe);
|
4236 |
|
|
goto out;
|
4237 |
|
|
}
|
4238 |
|
|
/* Get rid of embedded newline, if any. Test for "main" */
|
4239 |
|
|
{
|
4240 |
|
|
char * nl = strchr(result_buf, '\n');
|
4241 |
|
|
if (nl != NULL && nl < result_buf + result_len) {
|
4242 |
|
|
*nl = ':';
|
4243 |
|
|
}
|
4244 |
|
|
if (strncmp(result_buf, "main", nl - result_buf) == 0) {
|
4245 |
|
|
stop = TRUE;
|
4246 |
|
|
}
|
4247 |
|
|
}
|
4248 |
|
|
if (result_len < RESULT_SZ - 25) {
|
4249 |
|
|
/* Add in hex address */
|
4250 |
|
|
sprintf(result_buf + result_len, " [0x%lx]",
|
4251 |
|
|
(unsigned long)info[i].ci_pc);
|
4252 |
|
|
}
|
4253 |
|
|
name = result_buf;
|
4254 |
|
|
pclose(pipe);
|
4255 |
|
|
out:;
|
4256 |
|
|
}
|
4257 |
|
|
# endif /* LINUX */
|
4258 |
|
|
GC_err_printf1("\t\t%s\n", name);
|
4259 |
|
|
# if defined(GC_HAVE_BUILTIN_BACKTRACE) \
|
4260 |
|
|
&& !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
|
4261 |
|
|
free(sym_name); /* May call GC_free; that's OK */
|
4262 |
|
|
# endif
|
4263 |
|
|
}
|
4264 |
|
|
}
|
4265 |
|
|
LOCK();
|
4266 |
|
|
--reentry_count;
|
4267 |
|
|
UNLOCK();
|
4268 |
|
|
}
|
4269 |
|
|
|
4270 |
|
|
#endif /* NEED_CALLINFO */
|
4271 |
|
|
|
4272 |
|
|
|
4273 |
|
|
|
4274 |
|
|
#if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
|
4275 |
|
|
|
4276 |
|
|
/* Dump /proc/self/maps to GC_stderr, to enable looking up names for
|
4277 |
|
|
addresses in FIND_LEAK output. */
|
4278 |
|
|
|
4279 |
|
|
static word dump_maps(char *maps)
|
4280 |
|
|
{
|
4281 |
|
|
GC_err_write(maps, strlen(maps));
|
4282 |
|
|
return 1;
|
4283 |
|
|
}
|
4284 |
|
|
|
4285 |
|
|
void GC_print_address_map()
|
4286 |
|
|
{
|
4287 |
|
|
GC_err_printf0("---------- Begin address map ----------\n");
|
4288 |
|
|
GC_apply_to_maps(dump_maps);
|
4289 |
|
|
GC_err_printf0("---------- End address map ----------\n");
|
4290 |
|
|
}
|
4291 |
|
|
|
4292 |
|
|
#endif
|
4293 |
|
|
|
4294 |
|
|
|