1 |
721 |
jeremybenn |
/*
|
2 |
|
|
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
|
3 |
|
|
* Copyright (c) 1996 by Silicon Graphics. All rights reserved.
|
4 |
|
|
* Copyright (c) 1998 by Fergus Henderson. All rights reserved.
|
5 |
|
|
* Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
|
6 |
|
|
*
|
7 |
|
|
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
8 |
|
|
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
9 |
|
|
*
|
10 |
|
|
* Permission is hereby granted to use or copy this program
|
11 |
|
|
* for any purpose, provided the above notices are retained on all copies.
|
12 |
|
|
* Permission to modify the code and to distribute modified code is granted,
|
13 |
|
|
* provided the above notices are retained, and a notice that the code was
|
14 |
|
|
* modified is included with the above copyright notice.
|
15 |
|
|
*/
|
16 |
|
|
/*
|
17 |
|
|
* Support code for LinuxThreads, the clone()-based kernel
|
18 |
|
|
* thread package for Linux which is included in libc6.
|
19 |
|
|
*
|
20 |
|
|
* This code relies on implementation details of LinuxThreads,
|
21 |
|
|
* (i.e. properties not guaranteed by the Pthread standard),
|
22 |
|
|
* though this version now does less of that than the other Pthreads
|
23 |
|
|
* support code.
|
24 |
|
|
*
|
25 |
|
|
* Note that there is a lot of code duplication between linux_threads.c
|
26 |
|
|
* and thread support for some of the other Posix platforms; any changes
|
27 |
|
|
* made here may need to be reflected there too.
|
28 |
|
|
*/
|
29 |
|
|
/* DG/UX ix86 support <takis@xfree86.org> */
|
30 |
|
|
/*
|
31 |
|
|
* Linux_threads.c now also includes some code to support HPUX and
|
32 |
|
|
* OSF1 (Compaq Tru64 Unix, really). The OSF1 support is based on Eric Benson's
|
33 |
|
|
* patch.
|
34 |
|
|
*
|
35 |
|
|
* Eric also suggested an alternate basis for a lock implementation in
|
36 |
|
|
* his code:
|
37 |
|
|
* + #elif defined(OSF1)
|
38 |
|
|
* + unsigned long GC_allocate_lock = 0;
|
39 |
|
|
* + msemaphore GC_allocate_semaphore;
|
40 |
|
|
* + # define GC_TRY_LOCK() \
|
41 |
|
|
* + ((msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) == 0) \
|
42 |
|
|
* + ? (GC_allocate_lock = 1) \
|
43 |
|
|
* + : 0)
|
44 |
|
|
* + # define GC_LOCK_TAKEN GC_allocate_lock
|
45 |
|
|
*/
|
46 |
|
|
|
47 |
|
|
/*#define DEBUG_THREADS 1*/
|
48 |
|
|
/*#define GC_ASSERTIONS*/
|
49 |
|
|
|
50 |
|
|
#include "gc_config.h"
|
51 |
|
|
|
52 |
|
|
#ifdef GC_PTHREAD_SYM_VERSION
|
53 |
|
|
#define _GNU_SOURCE
|
54 |
|
|
#include <dlfcn.h>
|
55 |
|
|
#endif
|
56 |
|
|
|
57 |
|
|
# include "gc.h"
|
58 |
|
|
# include "private/pthread_support.h"
|
59 |
|
|
|
60 |
|
|
# if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
|
61 |
|
|
&& !defined(GC_WIN32_THREADS)
|
62 |
|
|
|
63 |
|
|
# if defined(GC_HPUX_THREADS) && !defined(USE_PTHREAD_SPECIFIC) \
|
64 |
|
|
&& !defined(USE_COMPILER_TLS)
|
65 |
|
|
# ifdef __GNUC__
|
66 |
|
|
# define USE_PTHREAD_SPECIFIC
|
67 |
|
|
/* Empirically, as of gcc 3.3, USE_COMPILER_TLS doesn't work. */
|
68 |
|
|
# else
|
69 |
|
|
# define USE_COMPILER_TLS
|
70 |
|
|
# endif
|
71 |
|
|
# endif
|
72 |
|
|
|
73 |
|
|
# if defined USE_HPUX_TLS
|
74 |
|
|
--> Macro replaced by USE_COMPILER_TLS
|
75 |
|
|
# endif
|
76 |
|
|
|
77 |
|
|
# if (defined(GC_DGUX386_THREADS) || defined(GC_OSF1_THREADS) || \
|
78 |
|
|
defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS)) \
|
79 |
|
|
&& !defined(USE_PTHREAD_SPECIFIC)
|
80 |
|
|
# define USE_PTHREAD_SPECIFIC
|
81 |
|
|
# endif
|
82 |
|
|
|
83 |
|
|
# if defined(GC_DGUX386_THREADS) && !defined(_POSIX4A_DRAFT10_SOURCE)
|
84 |
|
|
# define _POSIX4A_DRAFT10_SOURCE 1
|
85 |
|
|
# endif
|
86 |
|
|
|
87 |
|
|
# if defined(GC_DGUX386_THREADS) && !defined(_USING_POSIX4A_DRAFT10)
|
88 |
|
|
# define _USING_POSIX4A_DRAFT10 1
|
89 |
|
|
# endif
|
90 |
|
|
|
91 |
|
|
# ifdef THREAD_LOCAL_ALLOC
|
92 |
|
|
# if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_COMPILER_TLS)
|
93 |
|
|
# include "private/specific.h"
|
94 |
|
|
# endif
|
95 |
|
|
# if defined(USE_PTHREAD_SPECIFIC)
|
96 |
|
|
# define GC_getspecific pthread_getspecific
|
97 |
|
|
# define GC_setspecific pthread_setspecific
|
98 |
|
|
# define GC_key_create pthread_key_create
|
99 |
|
|
typedef pthread_key_t GC_key_t;
|
100 |
|
|
# endif
|
101 |
|
|
# if defined(USE_COMPILER_TLS)
|
102 |
|
|
# define GC_getspecific(x) (x)
|
103 |
|
|
# define GC_setspecific(key, v) ((key) = (v), 0)
|
104 |
|
|
# define GC_key_create(key, d) 0
|
105 |
|
|
typedef void * GC_key_t;
|
106 |
|
|
# endif
|
107 |
|
|
# endif
|
108 |
|
|
# include <stdlib.h>
|
109 |
|
|
# include <pthread.h>
|
110 |
|
|
# include <sched.h>
|
111 |
|
|
# include <time.h>
|
112 |
|
|
# include <errno.h>
|
113 |
|
|
# include <unistd.h>
|
114 |
|
|
# include <sys/mman.h>
|
115 |
|
|
# include <sys/time.h>
|
116 |
|
|
# include <sys/types.h>
|
117 |
|
|
# include <sys/stat.h>
|
118 |
|
|
# include <fcntl.h>
|
119 |
|
|
# include <signal.h>
|
120 |
|
|
|
121 |
|
|
#if defined(GC_DARWIN_THREADS)
|
122 |
|
|
# include "private/darwin_semaphore.h"
|
123 |
|
|
#else
|
124 |
|
|
# include <semaphore.h>
|
125 |
|
|
#endif /* !GC_DARWIN_THREADS */
|
126 |
|
|
|
127 |
|
|
#if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS)
|
128 |
|
|
# include <sys/sysctl.h>
|
129 |
|
|
#endif /* GC_DARWIN_THREADS */
|
130 |
|
|
|
131 |
|
|
|
132 |
|
|
|
133 |
|
|
#if defined(GC_DGUX386_THREADS)
|
134 |
|
|
# include <sys/dg_sys_info.h>
|
135 |
|
|
# include <sys/_int_psem.h>
|
136 |
|
|
/* sem_t is an uint in DG/UX */
|
137 |
|
|
typedef unsigned int sem_t;
|
138 |
|
|
#endif /* GC_DGUX386_THREADS */
|
139 |
|
|
|
140 |
|
|
#ifndef __GNUC__
|
141 |
|
|
# define __inline__
|
142 |
|
|
#endif
|
143 |
|
|
|
144 |
|
|
#ifdef GC_USE_LD_WRAP
|
145 |
|
|
# define WRAP_FUNC(f) __wrap_##f
|
146 |
|
|
# define REAL_FUNC(f) __real_##f
|
147 |
|
|
#else
|
148 |
|
|
# define WRAP_FUNC(f) GC_##f
|
149 |
|
|
# if !defined(GC_DGUX386_THREADS)
|
150 |
|
|
# define REAL_FUNC(f) f
|
151 |
|
|
# else /* GC_DGUX386_THREADS */
|
152 |
|
|
# define REAL_FUNC(f) __d10_##f
|
153 |
|
|
# endif /* GC_DGUX386_THREADS */
|
154 |
|
|
# undef pthread_create
|
155 |
|
|
# if !defined(GC_DARWIN_THREADS)
|
156 |
|
|
# undef pthread_sigmask
|
157 |
|
|
# endif
|
158 |
|
|
# undef pthread_join
|
159 |
|
|
# undef pthread_detach
|
160 |
|
|
# if defined(GC_OSF1_THREADS) && defined(_PTHREAD_USE_MANGLED_NAMES_) \
|
161 |
|
|
&& !defined(_PTHREAD_USE_PTDNAM_)
|
162 |
|
|
/* Restore the original mangled names on Tru64 UNIX. */
|
163 |
|
|
# define pthread_create __pthread_create
|
164 |
|
|
# define pthread_join __pthread_join
|
165 |
|
|
# define pthread_detach __pthread_detach
|
166 |
|
|
# endif
|
167 |
|
|
#endif
|
168 |
|
|
|
169 |
|
|
void GC_thr_init();
|
170 |
|
|
|
171 |
|
|
static GC_bool parallel_initialized = FALSE;
|
172 |
|
|
|
173 |
|
|
void GC_init_parallel();
|
174 |
|
|
|
175 |
|
|
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
|
176 |
|
|
|
177 |
|
|
/* We don't really support thread-local allocation with DBG_HDRS_ALL */
|
178 |
|
|
|
179 |
|
|
#ifdef USE_COMPILER_TLS
|
180 |
|
|
__thread
|
181 |
|
|
#endif
|
182 |
|
|
GC_key_t GC_thread_key;
|
183 |
|
|
|
184 |
|
|
static GC_bool keys_initialized;
|
185 |
|
|
|
186 |
|
|
/* Recover the contents of the freelist array fl into the global one gfl.*/
|
187 |
|
|
/* Note that the indexing scheme differs, in that gfl has finer size */
|
188 |
|
|
/* resolution, even if not all entries are used. */
|
189 |
|
|
/* We hold the allocator lock. */
|
190 |
|
|
static void return_freelists(ptr_t *fl, ptr_t *gfl)
|
191 |
|
|
{
|
192 |
|
|
int i;
|
193 |
|
|
ptr_t q, *qptr;
|
194 |
|
|
size_t nwords;
|
195 |
|
|
|
196 |
|
|
for (i = 1; i < NFREELISTS; ++i) {
|
197 |
|
|
nwords = i * (GRANULARITY/sizeof(word));
|
198 |
|
|
qptr = fl + i;
|
199 |
|
|
q = *qptr;
|
200 |
|
|
if ((word)q >= HBLKSIZE) {
|
201 |
|
|
if (gfl[nwords] == 0) {
|
202 |
|
|
gfl[nwords] = q;
|
203 |
|
|
} else {
|
204 |
|
|
/* Concatenate: */
|
205 |
|
|
for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
|
206 |
|
|
GC_ASSERT(0 == q);
|
207 |
|
|
*qptr = gfl[nwords];
|
208 |
|
|
gfl[nwords] = fl[i];
|
209 |
|
|
}
|
210 |
|
|
}
|
211 |
|
|
/* Clear fl[i], since the thread structure may hang around. */
|
212 |
|
|
/* Do it in a way that is likely to trap if we access it. */
|
213 |
|
|
fl[i] = (ptr_t)HBLKSIZE;
|
214 |
|
|
}
|
215 |
|
|
}
|
216 |
|
|
|
217 |
|
|
/* We statically allocate a single "size 0" object. It is linked to */
|
218 |
|
|
/* itself, and is thus repeatedly reused for all size 0 allocation */
|
219 |
|
|
/* requests. (Size 0 gcj allocation requests are incorrect, and */
|
220 |
|
|
/* we arrange for those to fault asap.) */
|
221 |
|
|
static ptr_t size_zero_object = (ptr_t)(&size_zero_object);
|
222 |
|
|
|
223 |
|
|
/* Each thread structure must be initialized. */
|
224 |
|
|
/* This call must be made from the new thread. */
|
225 |
|
|
/* Caller holds allocation lock. */
|
226 |
|
|
void GC_init_thread_local(GC_thread p)
|
227 |
|
|
{
|
228 |
|
|
int i;
|
229 |
|
|
|
230 |
|
|
if (!keys_initialized) {
|
231 |
|
|
if (0 != GC_key_create(&GC_thread_key, 0)) {
|
232 |
|
|
ABORT("Failed to create key for local allocator");
|
233 |
|
|
}
|
234 |
|
|
keys_initialized = TRUE;
|
235 |
|
|
}
|
236 |
|
|
if (0 != GC_setspecific(GC_thread_key, p)) {
|
237 |
|
|
ABORT("Failed to set thread specific allocation pointers");
|
238 |
|
|
}
|
239 |
|
|
for (i = 1; i < NFREELISTS; ++i) {
|
240 |
|
|
p -> ptrfree_freelists[i] = (ptr_t)1;
|
241 |
|
|
p -> normal_freelists[i] = (ptr_t)1;
|
242 |
|
|
# ifdef GC_GCJ_SUPPORT
|
243 |
|
|
p -> gcj_freelists[i] = (ptr_t)1;
|
244 |
|
|
# endif
|
245 |
|
|
}
|
246 |
|
|
/* Set up the size 0 free lists. */
|
247 |
|
|
p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
|
248 |
|
|
p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
|
249 |
|
|
# ifdef GC_GCJ_SUPPORT
|
250 |
|
|
p -> gcj_freelists[0] = (ptr_t)(-1);
|
251 |
|
|
# endif
|
252 |
|
|
}
|
253 |
|
|
|
254 |
|
|
#ifdef GC_GCJ_SUPPORT
|
255 |
|
|
extern ptr_t * GC_gcjobjfreelist;
|
256 |
|
|
#endif
|
257 |
|
|
|
258 |
|
|
/* We hold the allocator lock. */
|
259 |
|
|
void GC_destroy_thread_local(GC_thread p)
|
260 |
|
|
{
|
261 |
|
|
/* We currently only do this from the thread itself or from */
|
262 |
|
|
/* the fork handler for a child process. */
|
263 |
|
|
# ifndef HANDLE_FORK
|
264 |
|
|
GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
|
265 |
|
|
# endif
|
266 |
|
|
return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
|
267 |
|
|
return_freelists(p -> normal_freelists, GC_objfreelist);
|
268 |
|
|
# ifdef GC_GCJ_SUPPORT
|
269 |
|
|
return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
|
270 |
|
|
# endif
|
271 |
|
|
}
|
272 |
|
|
|
273 |
|
|
extern GC_PTR GC_generic_malloc_many();
|
274 |
|
|
|
275 |
|
|
GC_PTR GC_local_malloc(size_t bytes)
|
276 |
|
|
{
|
277 |
|
|
if (EXPECT(!SMALL_ENOUGH(bytes),0)) {
|
278 |
|
|
return(GC_malloc(bytes));
|
279 |
|
|
} else {
|
280 |
|
|
int index = INDEX_FROM_BYTES(bytes);
|
281 |
|
|
ptr_t * my_fl;
|
282 |
|
|
ptr_t my_entry;
|
283 |
|
|
# if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
|
284 |
|
|
GC_key_t k = GC_thread_key;
|
285 |
|
|
# endif
|
286 |
|
|
void * tsd;
|
287 |
|
|
|
288 |
|
|
# if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
|
289 |
|
|
if (EXPECT(0 == k, 0)) {
|
290 |
|
|
/* This can happen if we get called when the world is */
|
291 |
|
|
/* being initialized. Whether we can actually complete */
|
292 |
|
|
/* the initialization then is unclear. */
|
293 |
|
|
GC_init_parallel();
|
294 |
|
|
k = GC_thread_key;
|
295 |
|
|
}
|
296 |
|
|
# endif
|
297 |
|
|
tsd = GC_getspecific(GC_thread_key);
|
298 |
|
|
# ifdef GC_ASSERTIONS
|
299 |
|
|
LOCK();
|
300 |
|
|
GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self()));
|
301 |
|
|
UNLOCK();
|
302 |
|
|
# endif
|
303 |
|
|
my_fl = ((GC_thread)tsd) -> normal_freelists + index;
|
304 |
|
|
my_entry = *my_fl;
|
305 |
|
|
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
|
306 |
|
|
ptr_t next = obj_link(my_entry);
|
307 |
|
|
GC_PTR result = (GC_PTR)my_entry;
|
308 |
|
|
*my_fl = next;
|
309 |
|
|
obj_link(my_entry) = 0;
|
310 |
|
|
PREFETCH_FOR_WRITE(next);
|
311 |
|
|
return result;
|
312 |
|
|
} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
|
313 |
|
|
*my_fl = my_entry + index + 1;
|
314 |
|
|
return GC_malloc(bytes);
|
315 |
|
|
} else {
|
316 |
|
|
GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
|
317 |
|
|
if (*my_fl == 0) return GC_oom_fn(bytes);
|
318 |
|
|
return GC_local_malloc(bytes);
|
319 |
|
|
}
|
320 |
|
|
}
|
321 |
|
|
}
|
322 |
|
|
|
323 |
|
|
GC_PTR GC_local_malloc_atomic(size_t bytes)
|
324 |
|
|
{
|
325 |
|
|
if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
|
326 |
|
|
return(GC_malloc_atomic(bytes));
|
327 |
|
|
} else {
|
328 |
|
|
int index = INDEX_FROM_BYTES(bytes);
|
329 |
|
|
ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
|
330 |
|
|
-> ptrfree_freelists + index;
|
331 |
|
|
ptr_t my_entry = *my_fl;
|
332 |
|
|
|
333 |
|
|
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
|
334 |
|
|
GC_PTR result = (GC_PTR)my_entry;
|
335 |
|
|
*my_fl = obj_link(my_entry);
|
336 |
|
|
return result;
|
337 |
|
|
} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
|
338 |
|
|
*my_fl = my_entry + index + 1;
|
339 |
|
|
return GC_malloc_atomic(bytes);
|
340 |
|
|
} else {
|
341 |
|
|
GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
|
342 |
|
|
/* *my_fl is updated while the collector is excluded; */
|
343 |
|
|
/* the free list is always visible to the collector as */
|
344 |
|
|
/* such. */
|
345 |
|
|
if (*my_fl == 0) return GC_oom_fn(bytes);
|
346 |
|
|
return GC_local_malloc_atomic(bytes);
|
347 |
|
|
}
|
348 |
|
|
}
|
349 |
|
|
}
|
350 |
|
|
|
351 |
|
|
#ifdef GC_GCJ_SUPPORT
|
352 |
|
|
|
353 |
|
|
#include "include/gc_gcj.h"
|
354 |
|
|
|
355 |
|
|
#ifdef GC_ASSERTIONS
|
356 |
|
|
extern GC_bool GC_gcj_malloc_initialized;
|
357 |
|
|
#endif
|
358 |
|
|
|
359 |
|
|
extern int GC_gcj_kind;
|
360 |
|
|
|
361 |
|
|
GC_PTR GC_local_gcj_malloc(size_t bytes,
|
362 |
|
|
void * ptr_to_struct_containing_descr)
|
363 |
|
|
{
|
364 |
|
|
GC_ASSERT(GC_gcj_malloc_initialized);
|
365 |
|
|
if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
|
366 |
|
|
return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
|
367 |
|
|
} else {
|
368 |
|
|
int index = INDEX_FROM_BYTES(bytes);
|
369 |
|
|
ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
|
370 |
|
|
-> gcj_freelists + index;
|
371 |
|
|
ptr_t my_entry = *my_fl;
|
372 |
|
|
if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
|
373 |
|
|
GC_PTR result = (GC_PTR)my_entry;
|
374 |
|
|
GC_ASSERT(!GC_incremental);
|
375 |
|
|
/* We assert that any concurrent marker will stop us. */
|
376 |
|
|
/* Thus it is impossible for a mark procedure to see the */
|
377 |
|
|
/* allocation of the next object, but to see this object */
|
378 |
|
|
/* still containing a free list pointer. Otherwise the */
|
379 |
|
|
/* marker might find a random "mark descriptor". */
|
380 |
|
|
*(volatile ptr_t *)my_fl = obj_link(my_entry);
|
381 |
|
|
/* We must update the freelist before we store the pointer. */
|
382 |
|
|
/* Otherwise a GC at this point would see a corrupted */
|
383 |
|
|
/* free list. */
|
384 |
|
|
/* A memory barrier is probably never needed, since the */
|
385 |
|
|
/* action of stopping this thread will cause prior writes */
|
386 |
|
|
/* to complete. */
|
387 |
|
|
GC_ASSERT(((void * volatile *)result)[1] == 0);
|
388 |
|
|
*(void * volatile *)result = ptr_to_struct_containing_descr;
|
389 |
|
|
return result;
|
390 |
|
|
} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
|
391 |
|
|
if (!GC_incremental) *my_fl = my_entry + index + 1;
|
392 |
|
|
/* In the incremental case, we always have to take this */
|
393 |
|
|
/* path. Thus we leave the counter alone. */
|
394 |
|
|
return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
|
395 |
|
|
} else {
|
396 |
|
|
GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
|
397 |
|
|
if (*my_fl == 0) return GC_oom_fn(bytes);
|
398 |
|
|
return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
|
399 |
|
|
}
|
400 |
|
|
}
|
401 |
|
|
}
|
402 |
|
|
|
403 |
|
|
#endif /* GC_GCJ_SUPPORT */
|
404 |
|
|
|
405 |
|
|
# else /* !THREAD_LOCAL_ALLOC && !DBG_HDRS_ALL */
|
406 |
|
|
|
407 |
|
|
# define GC_destroy_thread_local(t)
|
408 |
|
|
|
409 |
|
|
# endif /* !THREAD_LOCAL_ALLOC */
|
410 |
|
|
|
411 |
|
|
#if 0
|
412 |
|
|
/*
|
413 |
|
|
To make sure that we're using LinuxThreads and not some other thread
|
414 |
|
|
package, we generate a dummy reference to `pthread_kill_other_threads_np'
|
415 |
|
|
(was `__pthread_initial_thread_bos' but that disappeared),
|
416 |
|
|
which is a symbol defined in LinuxThreads, but (hopefully) not in other
|
417 |
|
|
thread packages.
|
418 |
|
|
|
419 |
|
|
We no longer do this, since this code is now portable enough that it might
|
420 |
|
|
actually work for something else.
|
421 |
|
|
*/
|
422 |
|
|
void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
|
423 |
|
|
#endif /* 0 */
|
424 |
|
|
|
425 |
|
|
long GC_nprocs = 1; /* Number of processors. We may not have */
|
426 |
|
|
/* access to all of them, but this is as good */
|
427 |
|
|
/* a guess as any ... */
|
428 |
|
|
|
429 |
|
|
#ifdef PARALLEL_MARK
|
430 |
|
|
|
431 |
|
|
# ifndef MAX_MARKERS
|
432 |
|
|
# define MAX_MARKERS 16
|
433 |
|
|
# endif
|
434 |
|
|
|
435 |
|
|
static ptr_t marker_sp[MAX_MARKERS] = {0};
|
436 |
|
|
|
437 |
|
|
void * GC_mark_thread(void * id)
|
438 |
|
|
{
|
439 |
|
|
word my_mark_no = 0;
|
440 |
|
|
|
441 |
|
|
marker_sp[(word)id] = GC_approx_sp();
|
442 |
|
|
for (;; ++my_mark_no) {
|
443 |
|
|
/* GC_mark_no is passed only to allow GC_help_marker to terminate */
|
444 |
|
|
/* promptly. This is important if it were called from the signal */
|
445 |
|
|
/* handler or from the GC lock acquisition code. Under Linux, it's */
|
446 |
|
|
/* not safe to call it from a signal handler, since it uses mutexes */
|
447 |
|
|
/* and condition variables. Since it is called only here, the */
|
448 |
|
|
/* argument is unnecessary. */
|
449 |
|
|
if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {
|
450 |
|
|
/* resynchronize if we get far off, e.g. because GC_mark_no */
|
451 |
|
|
/* wrapped. */
|
452 |
|
|
my_mark_no = GC_mark_no;
|
453 |
|
|
}
|
454 |
|
|
# ifdef DEBUG_THREADS
|
455 |
|
|
GC_printf1("Starting mark helper for mark number %ld\n", my_mark_no);
|
456 |
|
|
# endif
|
457 |
|
|
GC_help_marker(my_mark_no);
|
458 |
|
|
}
|
459 |
|
|
}
|
460 |
|
|
|
461 |
|
|
extern long GC_markers; /* Number of mark threads we would */
|
462 |
|
|
/* like to have. Includes the */
|
463 |
|
|
/* initiating thread. */
|
464 |
|
|
|
465 |
|
|
pthread_t GC_mark_threads[MAX_MARKERS];
|
466 |
|
|
|
467 |
|
|
#define PTHREAD_CREATE REAL_FUNC(pthread_create)
|
468 |
|
|
|
469 |
|
|
static void start_mark_threads()
|
470 |
|
|
{
|
471 |
|
|
unsigned i;
|
472 |
|
|
pthread_attr_t attr;
|
473 |
|
|
|
474 |
|
|
if (GC_markers > MAX_MARKERS) {
|
475 |
|
|
WARN("Limiting number of mark threads\n", 0);
|
476 |
|
|
GC_markers = MAX_MARKERS;
|
477 |
|
|
}
|
478 |
|
|
if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
|
479 |
|
|
|
480 |
|
|
if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
|
481 |
|
|
ABORT("pthread_attr_setdetachstate failed");
|
482 |
|
|
|
483 |
|
|
# if defined(HPUX) || defined(GC_DGUX386_THREADS)
|
484 |
|
|
/* Default stack size is usually too small: fix it. */
|
485 |
|
|
/* Otherwise marker threads or GC may run out of */
|
486 |
|
|
/* space. */
|
487 |
|
|
# define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))
|
488 |
|
|
{
|
489 |
|
|
size_t old_size;
|
490 |
|
|
int code;
|
491 |
|
|
|
492 |
|
|
if (pthread_attr_getstacksize(&attr, &old_size) != 0)
|
493 |
|
|
ABORT("pthread_attr_getstacksize failed\n");
|
494 |
|
|
if (old_size < MIN_STACK_SIZE) {
|
495 |
|
|
if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
|
496 |
|
|
ABORT("pthread_attr_setstacksize failed\n");
|
497 |
|
|
}
|
498 |
|
|
}
|
499 |
|
|
# endif /* HPUX || GC_DGUX386_THREADS */
|
500 |
|
|
# ifdef CONDPRINT
|
501 |
|
|
if (GC_print_stats) {
|
502 |
|
|
GC_printf1("Starting %ld marker threads\n", GC_markers - 1);
|
503 |
|
|
}
|
504 |
|
|
# endif
|
505 |
|
|
for (i = 0; i < GC_markers - 1; ++i) {
|
506 |
|
|
if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr,
|
507 |
|
|
GC_mark_thread, (void *)(word)i)) {
|
508 |
|
|
WARN("Marker thread creation failed, errno = %ld.\n", errno);
|
509 |
|
|
}
|
510 |
|
|
}
|
511 |
|
|
}
|
512 |
|
|
|
513 |
|
|
#else /* !PARALLEL_MARK */
|
514 |
|
|
|
515 |
|
|
static __inline__ void start_mark_threads()
|
516 |
|
|
{
|
517 |
|
|
}
|
518 |
|
|
|
519 |
|
|
#endif /* !PARALLEL_MARK */
|
520 |
|
|
|
521 |
|
|
GC_bool GC_thr_initialized = FALSE;
|
522 |
|
|
|
523 |
|
|
volatile GC_thread GC_threads[THREAD_TABLE_SZ];
|
524 |
|
|
|
525 |
|
|
void GC_push_thread_structures GC_PROTO((void))
|
526 |
|
|
{
|
527 |
|
|
GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
|
528 |
|
|
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
|
529 |
|
|
GC_push_all((ptr_t)(&GC_thread_key),
|
530 |
|
|
(ptr_t)(&GC_thread_key)+sizeof(&GC_thread_key));
|
531 |
|
|
# endif
|
532 |
|
|
}
|
533 |
|
|
|
534 |
|
|
#ifdef THREAD_LOCAL_ALLOC
|
535 |
|
|
/* We must explicitly mark ptrfree and gcj free lists, since the free */
|
536 |
|
|
/* list links wouldn't otherwise be found. We also set them in the */
|
537 |
|
|
/* normal free lists, since that involves touching less memory than if */
|
538 |
|
|
/* we scanned them normally. */
|
539 |
|
|
void GC_mark_thread_local_free_lists(void)
|
540 |
|
|
{
|
541 |
|
|
int i, j;
|
542 |
|
|
GC_thread p;
|
543 |
|
|
ptr_t q;
|
544 |
|
|
|
545 |
|
|
for (i = 0; i < THREAD_TABLE_SZ; ++i) {
|
546 |
|
|
for (p = GC_threads[i]; 0 != p; p = p -> next) {
|
547 |
|
|
for (j = 1; j < NFREELISTS; ++j) {
|
548 |
|
|
q = p -> ptrfree_freelists[j];
|
549 |
|
|
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
|
550 |
|
|
q = p -> normal_freelists[j];
|
551 |
|
|
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
|
552 |
|
|
# ifdef GC_GCJ_SUPPORT
|
553 |
|
|
q = p -> gcj_freelists[j];
|
554 |
|
|
if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
|
555 |
|
|
# endif /* GC_GCJ_SUPPORT */
|
556 |
|
|
}
|
557 |
|
|
}
|
558 |
|
|
}
|
559 |
|
|
}
|
560 |
|
|
#endif /* THREAD_LOCAL_ALLOC */
|
561 |
|
|
|
562 |
|
|
static struct GC_Thread_Rep first_thread;
|
563 |
|
|
|
564 |
|
|
/* Add a thread to GC_threads. We assume it wasn't already there. */
|
565 |
|
|
/* Caller holds allocation lock. */
|
566 |
|
|
GC_thread GC_new_thread(pthread_t id)
|
567 |
|
|
{
|
568 |
|
|
int hv = ((word)id) % THREAD_TABLE_SZ;
|
569 |
|
|
GC_thread result;
|
570 |
|
|
static GC_bool first_thread_used = FALSE;
|
571 |
|
|
|
572 |
|
|
if (!first_thread_used) {
|
573 |
|
|
result = &first_thread;
|
574 |
|
|
first_thread_used = TRUE;
|
575 |
|
|
} else {
|
576 |
|
|
result = (struct GC_Thread_Rep *)
|
577 |
|
|
GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
|
578 |
|
|
}
|
579 |
|
|
if (result == 0) return(0);
|
580 |
|
|
result -> id = id;
|
581 |
|
|
result -> next = GC_threads[hv];
|
582 |
|
|
GC_threads[hv] = result;
|
583 |
|
|
GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
|
584 |
|
|
return(result);
|
585 |
|
|
}
|
586 |
|
|
|
587 |
|
|
/* Delete a thread from GC_threads. We assume it is there. */
|
588 |
|
|
/* (The code intentionally traps if it wasn't.) */
|
589 |
|
|
/* Caller holds allocation lock. */
|
590 |
|
|
void GC_delete_thread(pthread_t id)
|
591 |
|
|
{
|
592 |
|
|
int hv = ((word)id) % THREAD_TABLE_SZ;
|
593 |
|
|
register GC_thread p = GC_threads[hv];
|
594 |
|
|
register GC_thread prev = 0;
|
595 |
|
|
|
596 |
|
|
while (!pthread_equal(p -> id, id)) {
|
597 |
|
|
prev = p;
|
598 |
|
|
p = p -> next;
|
599 |
|
|
}
|
600 |
|
|
if (prev == 0) {
|
601 |
|
|
GC_threads[hv] = p -> next;
|
602 |
|
|
} else {
|
603 |
|
|
prev -> next = p -> next;
|
604 |
|
|
}
|
605 |
|
|
|
606 |
|
|
if (p != &first_thread)
|
607 |
|
|
GC_INTERNAL_FREE(p);
|
608 |
|
|
}
|
609 |
|
|
|
610 |
|
|
/* If a thread has been joined, but we have not yet */
|
611 |
|
|
/* been notified, then there may be more than one thread */
|
612 |
|
|
/* in the table with the same pthread id. */
|
613 |
|
|
/* This is OK, but we need a way to delete a specific one. */
|
614 |
|
|
void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
|
615 |
|
|
{
|
616 |
|
|
int hv = ((word)id) % THREAD_TABLE_SZ;
|
617 |
|
|
register GC_thread p = GC_threads[hv];
|
618 |
|
|
register GC_thread prev = 0;
|
619 |
|
|
|
620 |
|
|
while (p != gc_id) {
|
621 |
|
|
prev = p;
|
622 |
|
|
p = p -> next;
|
623 |
|
|
}
|
624 |
|
|
if (prev == 0) {
|
625 |
|
|
GC_threads[hv] = p -> next;
|
626 |
|
|
} else {
|
627 |
|
|
prev -> next = p -> next;
|
628 |
|
|
}
|
629 |
|
|
GC_INTERNAL_FREE(p);
|
630 |
|
|
}
|
631 |
|
|
|
632 |
|
|
/* Return a GC_thread corresponding to a given pthread_t. */
|
633 |
|
|
/* Returns 0 if it's not there. */
|
634 |
|
|
/* Caller holds allocation lock or otherwise inhibits */
|
635 |
|
|
/* updates. */
|
636 |
|
|
/* If there is more than one thread with the given id we */
|
637 |
|
|
/* return the most recent one. */
|
638 |
|
|
GC_thread GC_lookup_thread(pthread_t id)
|
639 |
|
|
{
|
640 |
|
|
int hv = ((word)id) % THREAD_TABLE_SZ;
|
641 |
|
|
register GC_thread p = GC_threads[hv];
|
642 |
|
|
|
643 |
|
|
while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
|
644 |
|
|
return(p);
|
645 |
|
|
}
|
646 |
|
|
|
647 |
|
|
#ifdef HANDLE_FORK
|
648 |
|
|
/* Remove all entries from the GC_threads table, except the */
|
649 |
|
|
/* one for the current thread. We need to do this in the child */
|
650 |
|
|
/* process after a fork(), since only the current thread */
|
651 |
|
|
/* survives in the child. */
|
652 |
|
|
void GC_remove_all_threads_but_me(void)
|
653 |
|
|
{
|
654 |
|
|
pthread_t self = pthread_self();
|
655 |
|
|
int hv;
|
656 |
|
|
GC_thread p, next, me;
|
657 |
|
|
|
658 |
|
|
for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
|
659 |
|
|
me = 0;
|
660 |
|
|
for (p = GC_threads[hv]; 0 != p; p = next) {
|
661 |
|
|
next = p -> next;
|
662 |
|
|
if (p -> id == self) {
|
663 |
|
|
me = p;
|
664 |
|
|
p -> next = 0;
|
665 |
|
|
} else {
|
666 |
|
|
# ifdef THREAD_LOCAL_ALLOC
|
667 |
|
|
if (!(p -> flags & FINISHED)) {
|
668 |
|
|
GC_destroy_thread_local(p);
|
669 |
|
|
}
|
670 |
|
|
# endif /* THREAD_LOCAL_ALLOC */
|
671 |
|
|
if (p != &first_thread) GC_INTERNAL_FREE(p);
|
672 |
|
|
}
|
673 |
|
|
}
|
674 |
|
|
GC_threads[hv] = me;
|
675 |
|
|
}
|
676 |
|
|
}
|
677 |
|
|
#endif /* HANDLE_FORK */
|
678 |
|
|
|
679 |
|
|
#ifdef USE_PROC_FOR_LIBRARIES
|
680 |
|
|
int GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
|
681 |
|
|
{
|
682 |
|
|
int i;
|
683 |
|
|
GC_thread p;
|
684 |
|
|
|
685 |
|
|
# ifdef PARALLEL_MARK
|
686 |
|
|
for (i = 0; i < GC_markers; ++i) {
|
687 |
|
|
if (marker_sp[i] > lo & marker_sp[i] < hi) return 1;
|
688 |
|
|
}
|
689 |
|
|
# endif
|
690 |
|
|
for (i = 0; i < THREAD_TABLE_SZ; i++) {
|
691 |
|
|
for (p = GC_threads[i]; p != 0; p = p -> next) {
|
692 |
|
|
if (0 != p -> stack_end) {
|
693 |
|
|
# ifdef STACK_GROWS_UP
|
694 |
|
|
if (p -> stack_end >= lo && p -> stack_end < hi) return 1;
|
695 |
|
|
# else /* STACK_GROWS_DOWN */
|
696 |
|
|
if (p -> stack_end > lo && p -> stack_end <= hi) return 1;
|
697 |
|
|
# endif
|
698 |
|
|
}
|
699 |
|
|
}
|
700 |
|
|
}
|
701 |
|
|
return 0;
|
702 |
|
|
}
|
703 |
|
|
#endif /* USE_PROC_FOR_LIBRARIES */
|
704 |
|
|
|
705 |
|
|
#ifdef GC_LINUX_THREADS
|
706 |
|
|
/* Return the number of processors, or i<= 0 if it can't be determined. */
|
707 |
|
|
int GC_get_nprocs()
|
708 |
|
|
{
|
709 |
|
|
/* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
|
710 |
|
|
/* appears to be buggy in many cases. */
|
711 |
|
|
/* We look for lines "cpu<n>" in /proc/stat. */
|
712 |
|
|
# define STAT_BUF_SIZE 4096
|
713 |
|
|
# define STAT_READ read
|
714 |
|
|
/* If read is wrapped, this may need to be redefined to call */
|
715 |
|
|
/* the real one. */
|
716 |
|
|
char stat_buf[STAT_BUF_SIZE];
|
717 |
|
|
int f;
|
718 |
|
|
word result = 1;
|
719 |
|
|
/* Some old kernels only have a single "cpu nnnn ..." */
|
720 |
|
|
/* entry in /proc/stat. We identify those as */
|
721 |
|
|
/* uniprocessors. */
|
722 |
|
|
size_t i, len = 0;
|
723 |
|
|
|
724 |
|
|
f = open("/proc/stat", O_RDONLY);
|
725 |
|
|
if (f < 0 || (len = STAT_READ(f, stat_buf, STAT_BUF_SIZE)) < 100) {
|
726 |
|
|
WARN("Couldn't read /proc/stat\n", 0);
|
727 |
|
|
return -1;
|
728 |
|
|
}
|
729 |
|
|
for (i = 0; i < len - 100; ++i) {
|
730 |
|
|
if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
|
731 |
|
|
&& stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
|
732 |
|
|
int cpu_no = atoi(stat_buf + i + 4);
|
733 |
|
|
if (cpu_no >= result) result = cpu_no + 1;
|
734 |
|
|
}
|
735 |
|
|
}
|
736 |
|
|
close(f);
|
737 |
|
|
return result;
|
738 |
|
|
}
|
739 |
|
|
#endif /* GC_LINUX_THREADS */
|
740 |
|
|
|
741 |
|
|
/* We hold the GC lock. Wait until an in-progress GC has finished. */
|
742 |
|
|
/* Repeatedly RELEASES GC LOCK in order to wait. */
|
743 |
|
|
/* If wait_for_all is true, then we exit with the GC lock held and no */
|
744 |
|
|
/* collection in progress; otherwise we just wait for the current GC */
|
745 |
|
|
/* to finish. */
|
746 |
|
|
extern GC_bool GC_collection_in_progress();
|
747 |
|
|
void GC_wait_for_gc_completion(GC_bool wait_for_all)
|
748 |
|
|
{
|
749 |
|
|
if (GC_incremental && GC_collection_in_progress()) {
|
750 |
|
|
int old_gc_no = GC_gc_no;
|
751 |
|
|
|
752 |
|
|
/* Make sure that no part of our stack is still on the mark stack, */
|
753 |
|
|
/* since it's about to be unmapped. */
|
754 |
|
|
while (GC_incremental && GC_collection_in_progress()
|
755 |
|
|
&& (wait_for_all || old_gc_no == GC_gc_no)) {
|
756 |
|
|
ENTER_GC();
|
757 |
|
|
GC_in_thread_creation = TRUE;
|
758 |
|
|
GC_collect_a_little_inner(1);
|
759 |
|
|
GC_in_thread_creation = FALSE;
|
760 |
|
|
EXIT_GC();
|
761 |
|
|
UNLOCK();
|
762 |
|
|
sched_yield();
|
763 |
|
|
LOCK();
|
764 |
|
|
}
|
765 |
|
|
}
|
766 |
|
|
}
|
767 |
|
|
|
768 |
|
|
#ifdef HANDLE_FORK
|
769 |
|
|
/* Procedures called before and after a fork. The goal here is to make */
|
770 |
|
|
/* it safe to call GC_malloc() in a forked child. It's unclear that is */
|
771 |
|
|
/* attainable, since the single UNIX spec seems to imply that one */
|
772 |
|
|
/* should only call async-signal-safe functions, and we probably can't */
|
773 |
|
|
/* quite guarantee that. But we give it our best shot. (That same */
|
774 |
|
|
/* spec also implies that it's not safe to call the system malloc */
|
775 |
|
|
/* between fork() and exec(). Thus we're doing no worse than it. */
|
776 |
|
|
|
777 |
|
|
/* Called before a fork() */
|
778 |
|
|
void GC_fork_prepare_proc(void)
|
779 |
|
|
{
|
780 |
|
|
/* Acquire all relevant locks, so that after releasing the locks */
|
781 |
|
|
/* the child will see a consistent state in which monitor */
|
782 |
|
|
/* invariants hold. Unfortunately, we can't acquire libc locks */
|
783 |
|
|
/* we might need, and there seems to be no guarantee that libc */
|
784 |
|
|
/* must install a suitable fork handler. */
|
785 |
|
|
/* Wait for an ongoing GC to finish, since we can't finish it in */
|
786 |
|
|
/* the (one remaining thread in) the child. */
|
787 |
|
|
LOCK();
|
788 |
|
|
# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
|
789 |
|
|
GC_wait_for_reclaim();
|
790 |
|
|
# endif
|
791 |
|
|
GC_wait_for_gc_completion(TRUE);
|
792 |
|
|
# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
|
793 |
|
|
GC_acquire_mark_lock();
|
794 |
|
|
# endif
|
795 |
|
|
}
|
796 |
|
|
|
797 |
|
|
/* Called in parent after a fork() */
|
798 |
|
|
void GC_fork_parent_proc(void)
|
799 |
|
|
{
|
800 |
|
|
# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
|
801 |
|
|
GC_release_mark_lock();
|
802 |
|
|
# endif
|
803 |
|
|
UNLOCK();
|
804 |
|
|
}
|
805 |
|
|
|
806 |
|
|
/* Called in child after a fork() */
|
807 |
|
|
void GC_fork_child_proc(void)
|
808 |
|
|
{
|
809 |
|
|
/* Clean up the thread table, so that just our thread is left. */
|
810 |
|
|
# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
|
811 |
|
|
GC_release_mark_lock();
|
812 |
|
|
# endif
|
813 |
|
|
GC_remove_all_threads_but_me();
|
814 |
|
|
# ifdef PARALLEL_MARK
|
815 |
|
|
/* Turn off parallel marking in the child, since we are probably */
|
816 |
|
|
/* just going to exec, and we would have to restart mark threads. */
|
817 |
|
|
GC_markers = 1;
|
818 |
|
|
GC_parallel = FALSE;
|
819 |
|
|
# endif /* PARALLEL_MARK */
|
820 |
|
|
UNLOCK();
|
821 |
|
|
}
|
822 |
|
|
#endif /* HANDLE_FORK */
|
823 |
|
|
|
824 |
|
|
#if defined(GC_DGUX386_THREADS)
|
825 |
|
|
/* Return the number of processors, or i<= 0 if it can't be determined. */
|
826 |
|
|
int GC_get_nprocs()
|
827 |
|
|
{
|
828 |
|
|
/* <takis@XFree86.Org> */
|
829 |
|
|
int numCpus;
|
830 |
|
|
struct dg_sys_info_pm_info pm_sysinfo;
|
831 |
|
|
int status =0;
|
832 |
|
|
|
833 |
|
|
status = dg_sys_info((long int *) &pm_sysinfo,
|
834 |
|
|
DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION);
|
835 |
|
|
if (status < 0)
|
836 |
|
|
/* set -1 for error */
|
837 |
|
|
numCpus = -1;
|
838 |
|
|
else
|
839 |
|
|
/* Active CPUs */
|
840 |
|
|
numCpus = pm_sysinfo.idle_vp_count;
|
841 |
|
|
|
842 |
|
|
# ifdef DEBUG_THREADS
|
843 |
|
|
GC_printf1("Number of active CPUs in this system: %d\n", numCpus);
|
844 |
|
|
# endif
|
845 |
|
|
return(numCpus);
|
846 |
|
|
}
|
847 |
|
|
#endif /* GC_DGUX386_THREADS */
|
848 |
|
|
|
849 |
|
|
/* We hold the allocation lock. */
|
850 |
|
|
void GC_thr_init()
|
851 |
|
|
{
|
852 |
|
|
# ifndef GC_DARWIN_THREADS
|
853 |
|
|
int dummy;
|
854 |
|
|
# endif
|
855 |
|
|
GC_thread t;
|
856 |
|
|
|
857 |
|
|
if (GC_thr_initialized) return;
|
858 |
|
|
GC_thr_initialized = TRUE;
|
859 |
|
|
|
860 |
|
|
# ifdef HANDLE_FORK
|
861 |
|
|
/* Prepare for a possible fork. */
|
862 |
|
|
pthread_atfork(GC_fork_prepare_proc, GC_fork_parent_proc,
|
863 |
|
|
GC_fork_child_proc);
|
864 |
|
|
# endif /* HANDLE_FORK */
|
865 |
|
|
/* Add the initial thread, so we can stop it. */
|
866 |
|
|
t = GC_new_thread(pthread_self());
|
867 |
|
|
# ifdef GC_DARWIN_THREADS
|
868 |
|
|
t -> stop_info.mach_thread = mach_thread_self();
|
869 |
|
|
# else
|
870 |
|
|
t -> stop_info.stack_ptr = (ptr_t)(&dummy);
|
871 |
|
|
# endif
|
872 |
|
|
t -> flags = DETACHED | MAIN_THREAD;
|
873 |
|
|
|
874 |
|
|
GC_stop_init();
|
875 |
|
|
|
876 |
|
|
/* Set GC_nprocs. */
|
877 |
|
|
{
|
878 |
|
|
char * nprocs_string = GETENV("GC_NPROCS");
|
879 |
|
|
GC_nprocs = -1;
|
880 |
|
|
if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
|
881 |
|
|
}
|
882 |
|
|
if (GC_nprocs <= 0) {
|
883 |
|
|
# if defined(GC_HPUX_THREADS)
|
884 |
|
|
GC_nprocs = pthread_num_processors_np();
|
885 |
|
|
# endif
|
886 |
|
|
# if defined(GC_OSF1_THREADS) || defined(GC_AIX_THREADS) \
|
887 |
|
|
|| defined(GC_SOLARIS_PTHREADS)
|
888 |
|
|
GC_nprocs = sysconf(_SC_NPROCESSORS_ONLN);
|
889 |
|
|
if (GC_nprocs <= 0) GC_nprocs = 1;
|
890 |
|
|
# endif
|
891 |
|
|
# if defined(GC_IRIX_THREADS)
|
892 |
|
|
GC_nprocs = sysconf(_SC_NPROC_ONLN);
|
893 |
|
|
if (GC_nprocs <= 0) GC_nprocs = 1;
|
894 |
|
|
# endif
|
895 |
|
|
# if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS)
|
896 |
|
|
int ncpus = 1;
|
897 |
|
|
size_t len = sizeof(ncpus);
|
898 |
|
|
sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0);
|
899 |
|
|
GC_nprocs = ncpus;
|
900 |
|
|
# endif
|
901 |
|
|
# if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)
|
902 |
|
|
GC_nprocs = GC_get_nprocs();
|
903 |
|
|
# endif
|
904 |
|
|
}
|
905 |
|
|
if (GC_nprocs <= 0) {
|
906 |
|
|
WARN("GC_get_nprocs() returned %ld\n", GC_nprocs);
|
907 |
|
|
GC_nprocs = 2;
|
908 |
|
|
# ifdef PARALLEL_MARK
|
909 |
|
|
GC_markers = 1;
|
910 |
|
|
# endif
|
911 |
|
|
} else {
|
912 |
|
|
# ifdef PARALLEL_MARK
|
913 |
|
|
{
|
914 |
|
|
char * markers_string = GETENV("GC_MARKERS");
|
915 |
|
|
if (markers_string != NULL) {
|
916 |
|
|
GC_markers = atoi(markers_string);
|
917 |
|
|
} else {
|
918 |
|
|
GC_markers = GC_nprocs;
|
919 |
|
|
}
|
920 |
|
|
}
|
921 |
|
|
# endif
|
922 |
|
|
}
|
923 |
|
|
# ifdef PARALLEL_MARK
|
924 |
|
|
# ifdef CONDPRINT
|
925 |
|
|
if (GC_print_stats) {
|
926 |
|
|
GC_printf2("Number of processors = %ld, "
|
927 |
|
|
"number of marker threads = %ld\n", GC_nprocs, GC_markers);
|
928 |
|
|
}
|
929 |
|
|
# endif
|
930 |
|
|
if (GC_markers == 1) {
|
931 |
|
|
GC_parallel = FALSE;
|
932 |
|
|
# ifdef CONDPRINT
|
933 |
|
|
if (GC_print_stats) {
|
934 |
|
|
GC_printf0("Single marker thread, turning off parallel marking\n");
|
935 |
|
|
}
|
936 |
|
|
# endif
|
937 |
|
|
} else {
|
938 |
|
|
GC_parallel = TRUE;
|
939 |
|
|
/* Disable true incremental collection, but generational is OK. */
|
940 |
|
|
GC_time_limit = GC_TIME_UNLIMITED;
|
941 |
|
|
}
|
942 |
|
|
/* If we are using a parallel marker, actually start helper threads. */
|
943 |
|
|
if (GC_parallel) start_mark_threads();
|
944 |
|
|
# endif
|
945 |
|
|
}
|
946 |
|
|
|
947 |
|
|
|
948 |
|
|
/* Perform all initializations, including those that */
|
949 |
|
|
/* may require allocation. */
|
950 |
|
|
/* Called without allocation lock. */
|
951 |
|
|
/* Must be called before a second thread is created. */
|
952 |
|
|
/* Called without allocation lock. */
|
953 |
|
|
void GC_init_parallel()
|
954 |
|
|
{
|
955 |
|
|
if (parallel_initialized) return;
|
956 |
|
|
parallel_initialized = TRUE;
|
957 |
|
|
|
958 |
|
|
/* GC_init() calls us back, so set flag first. */
|
959 |
|
|
if (!GC_is_initialized) GC_init();
|
960 |
|
|
/* Initialize thread local free lists if used. */
|
961 |
|
|
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
|
962 |
|
|
LOCK();
|
963 |
|
|
GC_init_thread_local(GC_lookup_thread(pthread_self()));
|
964 |
|
|
UNLOCK();
|
965 |
|
|
# endif
|
966 |
|
|
}
|
967 |
|
|
|
968 |
|
|
|
969 |
|
|
#if !defined(GC_DARWIN_THREADS)
|
970 |
|
|
int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
|
971 |
|
|
{
|
972 |
|
|
sigset_t fudged_set;
|
973 |
|
|
|
974 |
|
|
if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
|
975 |
|
|
fudged_set = *set;
|
976 |
|
|
sigdelset(&fudged_set, SIG_SUSPEND);
|
977 |
|
|
set = &fudged_set;
|
978 |
|
|
}
|
979 |
|
|
return(REAL_FUNC(pthread_sigmask)(how, set, oset));
|
980 |
|
|
}
|
981 |
|
|
#endif /* !GC_DARWIN_THREADS */
|
982 |
|
|
|
983 |
|
|
/* Wrappers for functions that are likely to block for an appreciable */
|
984 |
|
|
/* length of time. Must be called in pairs, if at all. */
|
985 |
|
|
/* Nothing much beyond the system call itself should be executed */
|
986 |
|
|
/* between these. */
|
987 |
|
|
|
988 |
|
|
void GC_start_blocking(void) {
|
989 |
|
|
# define SP_SLOP 128
|
990 |
|
|
GC_thread me;
|
991 |
|
|
LOCK();
|
992 |
|
|
me = GC_lookup_thread(pthread_self());
|
993 |
|
|
GC_ASSERT(!(me -> thread_blocked));
|
994 |
|
|
# ifdef SPARC
|
995 |
|
|
me -> stop_info.stack_ptr = (ptr_t)GC_save_regs_in_stack();
|
996 |
|
|
# else
|
997 |
|
|
# ifndef GC_DARWIN_THREADS
|
998 |
|
|
me -> stop_info.stack_ptr = (ptr_t)GC_approx_sp();
|
999 |
|
|
# endif
|
1000 |
|
|
# endif
|
1001 |
|
|
# ifdef IA64
|
1002 |
|
|
me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack() + SP_SLOP;
|
1003 |
|
|
# endif
|
1004 |
|
|
/* Add some slop to the stack pointer, since the wrapped call may */
|
1005 |
|
|
/* end up pushing more callee-save registers. */
|
1006 |
|
|
# ifndef GC_DARWIN_THREADS
|
1007 |
|
|
# ifdef STACK_GROWS_UP
|
1008 |
|
|
me -> stop_info.stack_ptr += SP_SLOP;
|
1009 |
|
|
# else
|
1010 |
|
|
me -> stop_info.stack_ptr -= SP_SLOP;
|
1011 |
|
|
# endif
|
1012 |
|
|
# endif
|
1013 |
|
|
me -> thread_blocked = TRUE;
|
1014 |
|
|
UNLOCK();
|
1015 |
|
|
}
|
1016 |
|
|
|
1017 |
|
|
void GC_end_blocking(void) {
|
1018 |
|
|
GC_thread me;
|
1019 |
|
|
LOCK(); /* This will block if the world is stopped. */
|
1020 |
|
|
me = GC_lookup_thread(pthread_self());
|
1021 |
|
|
GC_ASSERT(me -> thread_blocked);
|
1022 |
|
|
me -> thread_blocked = FALSE;
|
1023 |
|
|
UNLOCK();
|
1024 |
|
|
}
|
1025 |
|
|
|
1026 |
|
|
#if defined(GC_DGUX386_THREADS)
|
1027 |
|
|
#define __d10_sleep sleep
|
1028 |
|
|
#endif /* GC_DGUX386_THREADS */
|
1029 |
|
|
|
1030 |
|
|
/* A wrapper for the standard C sleep function */
|
1031 |
|
|
int WRAP_FUNC(sleep) (unsigned int seconds)
|
1032 |
|
|
{
|
1033 |
|
|
int result;
|
1034 |
|
|
|
1035 |
|
|
GC_start_blocking();
|
1036 |
|
|
result = REAL_FUNC(sleep)(seconds);
|
1037 |
|
|
GC_end_blocking();
|
1038 |
|
|
return result;
|
1039 |
|
|
}
|
1040 |
|
|
|
1041 |
|
|
struct start_info {
|
1042 |
|
|
void *(*start_routine)(void *);
|
1043 |
|
|
void *arg;
|
1044 |
|
|
word flags;
|
1045 |
|
|
sem_t registered; /* 1 ==> in our thread table, but */
|
1046 |
|
|
/* parent hasn't yet noticed. */
|
1047 |
|
|
};
|
1048 |
|
|
|
1049 |
|
|
/* Called at thread exit. */
|
1050 |
|
|
/* Never called for main thread. That's OK, since it */
|
1051 |
|
|
/* results in at most a tiny one-time leak. And */
|
1052 |
|
|
/* linuxthreads doesn't reclaim the main threads */
|
1053 |
|
|
/* resources or id anyway. */
|
1054 |
|
|
void GC_thread_exit_proc(void *arg)
|
1055 |
|
|
{
|
1056 |
|
|
GC_thread me;
|
1057 |
|
|
|
1058 |
|
|
LOCK();
|
1059 |
|
|
me = GC_lookup_thread(pthread_self());
|
1060 |
|
|
GC_destroy_thread_local(me);
|
1061 |
|
|
if (me -> flags & DETACHED) {
|
1062 |
|
|
GC_delete_thread(pthread_self());
|
1063 |
|
|
} else {
|
1064 |
|
|
me -> flags |= FINISHED;
|
1065 |
|
|
}
|
1066 |
|
|
# if defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
|
1067 |
|
|
&& !defined(USE_COMPILER_TLS) && !defined(DBG_HDRS_ALL)
|
1068 |
|
|
GC_remove_specific(GC_thread_key);
|
1069 |
|
|
# endif
|
1070 |
|
|
/* The following may run the GC from "nonexistent" thread. */
|
1071 |
|
|
GC_wait_for_gc_completion(FALSE);
|
1072 |
|
|
UNLOCK();
|
1073 |
|
|
}
|
1074 |
|
|
|
1075 |
|
|
int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
|
1076 |
|
|
{
|
1077 |
|
|
int result;
|
1078 |
|
|
GC_thread thread_gc_id;
|
1079 |
|
|
|
1080 |
|
|
LOCK();
|
1081 |
|
|
thread_gc_id = GC_lookup_thread(thread);
|
1082 |
|
|
/* This is guaranteed to be the intended one, since the thread id */
|
1083 |
|
|
/* cant have been recycled by pthreads. */
|
1084 |
|
|
UNLOCK();
|
1085 |
|
|
result = REAL_FUNC(pthread_join)(thread, retval);
|
1086 |
|
|
# if defined (GC_FREEBSD_THREADS)
|
1087 |
|
|
/* On FreeBSD, the wrapped pthread_join() sometimes returns (what
|
1088 |
|
|
appears to be) a spurious EINTR which caused the test and real code
|
1089 |
|
|
to gratuitously fail. Having looked at system pthread library source
|
1090 |
|
|
code, I see how this return code may be generated. In one path of
|
1091 |
|
|
code, pthread_join() just returns the errno setting of the thread
|
1092 |
|
|
being joined. This does not match the POSIX specification or the
|
1093 |
|
|
local man pages thus I have taken the liberty to catch this one
|
1094 |
|
|
spurious return value properly conditionalized on GC_FREEBSD_THREADS. */
|
1095 |
|
|
if (result == EINTR) result = 0;
|
1096 |
|
|
# endif
|
1097 |
|
|
if (result == 0) {
|
1098 |
|
|
LOCK();
|
1099 |
|
|
/* Here the pthread thread id may have been recycled. */
|
1100 |
|
|
GC_delete_gc_thread(thread, thread_gc_id);
|
1101 |
|
|
UNLOCK();
|
1102 |
|
|
}
|
1103 |
|
|
return result;
|
1104 |
|
|
}
|
1105 |
|
|
|
1106 |
|
|
int
|
1107 |
|
|
WRAP_FUNC(pthread_detach)(pthread_t thread)
|
1108 |
|
|
{
|
1109 |
|
|
int result;
|
1110 |
|
|
GC_thread thread_gc_id;
|
1111 |
|
|
|
1112 |
|
|
LOCK();
|
1113 |
|
|
thread_gc_id = GC_lookup_thread(thread);
|
1114 |
|
|
UNLOCK();
|
1115 |
|
|
result = REAL_FUNC(pthread_detach)(thread);
|
1116 |
|
|
if (result == 0) {
|
1117 |
|
|
LOCK();
|
1118 |
|
|
thread_gc_id -> flags |= DETACHED;
|
1119 |
|
|
/* Here the pthread thread id may have been recycled. */
|
1120 |
|
|
if (thread_gc_id -> flags & FINISHED) {
|
1121 |
|
|
GC_delete_gc_thread(thread, thread_gc_id);
|
1122 |
|
|
}
|
1123 |
|
|
UNLOCK();
|
1124 |
|
|
}
|
1125 |
|
|
return result;
|
1126 |
|
|
}
|
1127 |
|
|
|
1128 |
|
|
GC_bool GC_in_thread_creation = FALSE;
|
1129 |
|
|
|
1130 |
|
|
GC_PTR GC_get_thread_stack_base()
|
1131 |
|
|
{
|
1132 |
|
|
# ifdef HAVE_PTHREAD_GETATTR_NP
|
1133 |
|
|
pthread_t my_pthread;
|
1134 |
|
|
pthread_attr_t attr;
|
1135 |
|
|
ptr_t stack_addr;
|
1136 |
|
|
size_t stack_size;
|
1137 |
|
|
|
1138 |
|
|
my_pthread = pthread_self();
|
1139 |
|
|
if (pthread_getattr_np (my_pthread, &attr) != 0)
|
1140 |
|
|
{
|
1141 |
|
|
# ifdef DEBUG_THREADS
|
1142 |
|
|
GC_printf0("Can not determine stack base for attached thread");
|
1143 |
|
|
# endif
|
1144 |
|
|
return 0;
|
1145 |
|
|
}
|
1146 |
|
|
pthread_attr_getstack (&attr, (void **) &stack_addr, &stack_size);
|
1147 |
|
|
pthread_attr_destroy (&attr);
|
1148 |
|
|
|
1149 |
|
|
# ifdef DEBUG_THREADS
|
1150 |
|
|
GC_printf1("attached thread stack address: 0x%x\n", stack_addr);
|
1151 |
|
|
# endif
|
1152 |
|
|
|
1153 |
|
|
# ifdef STACK_GROWS_DOWN
|
1154 |
|
|
return stack_addr + stack_size;
|
1155 |
|
|
# else
|
1156 |
|
|
return stack_addr;
|
1157 |
|
|
# endif
|
1158 |
|
|
|
1159 |
|
|
# else
|
1160 |
|
|
# ifdef DEBUG_THREADS
|
1161 |
|
|
GC_printf0("Can not determine stack base for attached thread");
|
1162 |
|
|
# endif
|
1163 |
|
|
return 0;
|
1164 |
|
|
# endif
|
1165 |
|
|
}
|
1166 |
|
|
|
1167 |
|
|
void GC_register_my_thread()
|
1168 |
|
|
{
|
1169 |
|
|
GC_thread me;
|
1170 |
|
|
pthread_t my_pthread;
|
1171 |
|
|
|
1172 |
|
|
my_pthread = pthread_self();
|
1173 |
|
|
# ifdef DEBUG_THREADS
|
1174 |
|
|
GC_printf1("Attaching thread 0x%lx\n", my_pthread);
|
1175 |
|
|
GC_printf1("pid = %ld\n", (long) getpid());
|
1176 |
|
|
# endif
|
1177 |
|
|
|
1178 |
|
|
/* Check to ensure this thread isn't attached already. */
|
1179 |
|
|
LOCK();
|
1180 |
|
|
me = GC_lookup_thread (my_pthread);
|
1181 |
|
|
UNLOCK();
|
1182 |
|
|
if (me != 0)
|
1183 |
|
|
{
|
1184 |
|
|
# ifdef DEBUG_THREADS
|
1185 |
|
|
GC_printf1("Attempt to re-attach known thread 0x%lx\n", my_pthread);
|
1186 |
|
|
# endif
|
1187 |
|
|
return;
|
1188 |
|
|
}
|
1189 |
|
|
|
1190 |
|
|
LOCK();
|
1191 |
|
|
GC_in_thread_creation = TRUE;
|
1192 |
|
|
me = GC_new_thread(my_pthread);
|
1193 |
|
|
GC_in_thread_creation = FALSE;
|
1194 |
|
|
|
1195 |
|
|
me -> flags |= DETACHED;
|
1196 |
|
|
|
1197 |
|
|
#ifdef GC_DARWIN_THREADS
|
1198 |
|
|
me -> stop_info.mach_thread = mach_thread_self();
|
1199 |
|
|
#else
|
1200 |
|
|
me -> stack_end = GC_get_thread_stack_base();
|
1201 |
|
|
if (me -> stack_end == 0)
|
1202 |
|
|
GC_abort("Can not determine stack base for attached thread");
|
1203 |
|
|
|
1204 |
|
|
# ifdef STACK_GROWS_DOWN
|
1205 |
|
|
me -> stop_info.stack_ptr = me -> stack_end - 0x10;
|
1206 |
|
|
# else
|
1207 |
|
|
me -> stop_info.stack_ptr = me -> stack_end + 0x10;
|
1208 |
|
|
# endif
|
1209 |
|
|
#endif
|
1210 |
|
|
|
1211 |
|
|
# ifdef IA64
|
1212 |
|
|
me -> backing_store_end = (ptr_t)
|
1213 |
|
|
(GC_save_regs_in_stack() & ~(GC_page_size - 1));
|
1214 |
|
|
/* This is also < 100% convincing. We should also read this */
|
1215 |
|
|
/* from /proc, but the hook to do so isn't there yet. */
|
1216 |
|
|
# endif /* IA64 */
|
1217 |
|
|
|
1218 |
|
|
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
|
1219 |
|
|
GC_init_thread_local(me);
|
1220 |
|
|
# endif
|
1221 |
|
|
UNLOCK();
|
1222 |
|
|
}
|
1223 |
|
|
|
1224 |
|
|
void GC_unregister_my_thread()
|
1225 |
|
|
{
|
1226 |
|
|
pthread_t my_pthread;
|
1227 |
|
|
|
1228 |
|
|
my_pthread = pthread_self();
|
1229 |
|
|
|
1230 |
|
|
# ifdef DEBUG_THREADS
|
1231 |
|
|
GC_printf1("Detaching thread 0x%lx\n", my_pthread);
|
1232 |
|
|
# endif
|
1233 |
|
|
|
1234 |
|
|
GC_thread_exit_proc (0);
|
1235 |
|
|
}
|
1236 |
|
|
|
1237 |
|
|
void * GC_start_routine(void * arg)
|
1238 |
|
|
{
|
1239 |
|
|
int dummy;
|
1240 |
|
|
struct start_info * si = arg;
|
1241 |
|
|
void * result;
|
1242 |
|
|
GC_thread me;
|
1243 |
|
|
pthread_t my_pthread;
|
1244 |
|
|
void *(*start)(void *);
|
1245 |
|
|
void *start_arg;
|
1246 |
|
|
|
1247 |
|
|
my_pthread = pthread_self();
|
1248 |
|
|
# ifdef DEBUG_THREADS
|
1249 |
|
|
GC_printf1("Starting thread 0x%lx\n", my_pthread);
|
1250 |
|
|
GC_printf1("pid = %ld\n", (long) getpid());
|
1251 |
|
|
GC_printf1("sp = 0x%lx\n", (long) &arg);
|
1252 |
|
|
# endif
|
1253 |
|
|
LOCK();
|
1254 |
|
|
GC_in_thread_creation = TRUE;
|
1255 |
|
|
me = GC_new_thread(my_pthread);
|
1256 |
|
|
GC_in_thread_creation = FALSE;
|
1257 |
|
|
#ifdef GC_DARWIN_THREADS
|
1258 |
|
|
me -> stop_info.mach_thread = mach_thread_self();
|
1259 |
|
|
#else
|
1260 |
|
|
me -> stop_info.stack_ptr = 0;
|
1261 |
|
|
#endif
|
1262 |
|
|
me -> flags = si -> flags;
|
1263 |
|
|
/* me -> stack_end = GC_linux_stack_base(); -- currently (11/99) */
|
1264 |
|
|
/* doesn't work because the stack base in /proc/self/stat is the */
|
1265 |
|
|
/* one for the main thread. There is a strong argument that that's */
|
1266 |
|
|
/* a kernel bug, but a pervasive one. */
|
1267 |
|
|
# ifdef STACK_GROWS_DOWN
|
1268 |
|
|
me -> stack_end = (ptr_t)(((word)(&dummy) + (GC_page_size - 1))
|
1269 |
|
|
& ~(GC_page_size - 1));
|
1270 |
|
|
# ifndef GC_DARWIN_THREADS
|
1271 |
|
|
me -> stop_info.stack_ptr = me -> stack_end - 0x10;
|
1272 |
|
|
# endif
|
1273 |
|
|
/* Needs to be plausible, since an asynchronous stack mark */
|
1274 |
|
|
/* should not crash. */
|
1275 |
|
|
# else
|
1276 |
|
|
me -> stack_end = (ptr_t)((word)(&dummy) & ~(GC_page_size - 1));
|
1277 |
|
|
me -> stop_info.stack_ptr = me -> stack_end + 0x10;
|
1278 |
|
|
# endif
|
1279 |
|
|
/* This is dubious, since we may be more than a page into the stack, */
|
1280 |
|
|
/* and hence skip some of it, though it's not clear that matters. */
|
1281 |
|
|
# ifdef IA64
|
1282 |
|
|
me -> backing_store_end = (ptr_t)
|
1283 |
|
|
(GC_save_regs_in_stack() & ~(GC_page_size - 1));
|
1284 |
|
|
/* This is also < 100% convincing. We should also read this */
|
1285 |
|
|
/* from /proc, but the hook to do so isn't there yet. */
|
1286 |
|
|
# endif /* IA64 */
|
1287 |
|
|
UNLOCK();
|
1288 |
|
|
start = si -> start_routine;
|
1289 |
|
|
# ifdef DEBUG_THREADS
|
1290 |
|
|
GC_printf1("start_routine = 0x%lx\n", start);
|
1291 |
|
|
# endif
|
1292 |
|
|
start_arg = si -> arg;
|
1293 |
|
|
sem_post(&(si -> registered)); /* Last action on si. */
|
1294 |
|
|
/* OK to deallocate. */
|
1295 |
|
|
pthread_cleanup_push(GC_thread_exit_proc, 0);
|
1296 |
|
|
# if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
|
1297 |
|
|
LOCK();
|
1298 |
|
|
GC_init_thread_local(me);
|
1299 |
|
|
UNLOCK();
|
1300 |
|
|
# endif
|
1301 |
|
|
result = (*start)(start_arg);
|
1302 |
|
|
#if DEBUG_THREADS
|
1303 |
|
|
GC_printf1("Finishing thread 0x%x\n", pthread_self());
|
1304 |
|
|
#endif
|
1305 |
|
|
me -> status = result;
|
1306 |
|
|
pthread_cleanup_pop(1);
|
1307 |
|
|
/* Cleanup acquires lock, ensuring that we can't exit */
|
1308 |
|
|
/* while a collection that thinks we're alive is trying to stop */
|
1309 |
|
|
/* us. */
|
1310 |
|
|
return(result);
|
1311 |
|
|
}
|
1312 |
|
|
|
1313 |
|
|
int
|
1314 |
|
|
WRAP_FUNC(pthread_create)(pthread_t *new_thread,
|
1315 |
|
|
const pthread_attr_t *attr,
|
1316 |
|
|
void *(*start_routine)(void *), void *arg)
|
1317 |
|
|
{
|
1318 |
|
|
int result;
|
1319 |
|
|
int detachstate;
|
1320 |
|
|
word my_flags = 0;
|
1321 |
|
|
struct start_info * si;
|
1322 |
|
|
/* This is otherwise saved only in an area mmapped by the thread */
|
1323 |
|
|
/* library, which isn't visible to the collector. */
|
1324 |
|
|
|
1325 |
|
|
/* We resist the temptation to muck with the stack size here, */
|
1326 |
|
|
/* even if the default is unreasonably small. That's the client's */
|
1327 |
|
|
/* responsibility. */
|
1328 |
|
|
|
1329 |
|
|
LOCK();
|
1330 |
|
|
si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info),
|
1331 |
|
|
NORMAL);
|
1332 |
|
|
UNLOCK();
|
1333 |
|
|
if (!parallel_initialized) GC_init_parallel();
|
1334 |
|
|
if (0 == si) return(ENOMEM);
|
1335 |
|
|
sem_init(&(si -> registered), 0, 0);
|
1336 |
|
|
si -> start_routine = start_routine;
|
1337 |
|
|
si -> arg = arg;
|
1338 |
|
|
LOCK();
|
1339 |
|
|
if (!GC_thr_initialized) GC_thr_init();
|
1340 |
|
|
# ifdef GC_ASSERTIONS
|
1341 |
|
|
{
|
1342 |
|
|
size_t stack_size;
|
1343 |
|
|
if (NULL == attr) {
|
1344 |
|
|
pthread_attr_t my_attr;
|
1345 |
|
|
pthread_attr_init(&my_attr);
|
1346 |
|
|
pthread_attr_getstacksize(&my_attr, &stack_size);
|
1347 |
|
|
} else {
|
1348 |
|
|
pthread_attr_getstacksize(attr, &stack_size);
|
1349 |
|
|
}
|
1350 |
|
|
# ifdef PARALLEL_MARK
|
1351 |
|
|
GC_ASSERT(stack_size >= (8*HBLKSIZE*sizeof(word)));
|
1352 |
|
|
# else
|
1353 |
|
|
/* FreeBSD-5.3/Alpha: default pthread stack is 64K, */
|
1354 |
|
|
/* HBLKSIZE=8192, sizeof(word)=8 */
|
1355 |
|
|
GC_ASSERT(stack_size >= 65536);
|
1356 |
|
|
# endif
|
1357 |
|
|
/* Our threads may need to do some work for the GC. */
|
1358 |
|
|
/* Ridiculously small threads won't work, and they */
|
1359 |
|
|
/* probably wouldn't work anyway. */
|
1360 |
|
|
}
|
1361 |
|
|
# endif
|
1362 |
|
|
if (NULL == attr) {
|
1363 |
|
|
detachstate = PTHREAD_CREATE_JOINABLE;
|
1364 |
|
|
} else {
|
1365 |
|
|
pthread_attr_getdetachstate(attr, &detachstate);
|
1366 |
|
|
}
|
1367 |
|
|
if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
|
1368 |
|
|
si -> flags = my_flags;
|
1369 |
|
|
UNLOCK();
|
1370 |
|
|
# ifdef DEBUG_THREADS
|
1371 |
|
|
GC_printf1("About to start new thread from thread 0x%X\n",
|
1372 |
|
|
pthread_self());
|
1373 |
|
|
# endif
|
1374 |
|
|
|
1375 |
|
|
result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
|
1376 |
|
|
|
1377 |
|
|
# ifdef DEBUG_THREADS
|
1378 |
|
|
GC_printf1("Started thread 0x%X\n", *new_thread);
|
1379 |
|
|
# endif
|
1380 |
|
|
/* Wait until child has been added to the thread table. */
|
1381 |
|
|
/* This also ensures that we hold onto si until the child is done */
|
1382 |
|
|
/* with it. Thus it doesn't matter whether it is otherwise */
|
1383 |
|
|
/* visible to the collector. */
|
1384 |
|
|
if (0 == result) {
|
1385 |
|
|
while (0 != sem_wait(&(si -> registered))) {
|
1386 |
|
|
if (EINTR != errno) ABORT("sem_wait failed");
|
1387 |
|
|
}
|
1388 |
|
|
}
|
1389 |
|
|
sem_destroy(&(si -> registered));
|
1390 |
|
|
LOCK();
|
1391 |
|
|
GC_INTERNAL_FREE(si);
|
1392 |
|
|
UNLOCK();
|
1393 |
|
|
|
1394 |
|
|
return(result);
|
1395 |
|
|
}
|
1396 |
|
|
|
1397 |
|
|
#ifdef GENERIC_COMPARE_AND_SWAP
|
1398 |
|
|
pthread_mutex_t GC_compare_and_swap_lock = PTHREAD_MUTEX_INITIALIZER;
|
1399 |
|
|
|
1400 |
|
|
GC_bool GC_compare_and_exchange(volatile GC_word *addr,
|
1401 |
|
|
GC_word old, GC_word new_val)
|
1402 |
|
|
{
|
1403 |
|
|
GC_bool result;
|
1404 |
|
|
pthread_mutex_lock(&GC_compare_and_swap_lock);
|
1405 |
|
|
if (*addr == old) {
|
1406 |
|
|
*addr = new_val;
|
1407 |
|
|
result = TRUE;
|
1408 |
|
|
} else {
|
1409 |
|
|
result = FALSE;
|
1410 |
|
|
}
|
1411 |
|
|
pthread_mutex_unlock(&GC_compare_and_swap_lock);
|
1412 |
|
|
return result;
|
1413 |
|
|
}
|
1414 |
|
|
|
1415 |
|
|
GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much)
|
1416 |
|
|
{
|
1417 |
|
|
GC_word old;
|
1418 |
|
|
pthread_mutex_lock(&GC_compare_and_swap_lock);
|
1419 |
|
|
old = *addr;
|
1420 |
|
|
*addr = old + how_much;
|
1421 |
|
|
pthread_mutex_unlock(&GC_compare_and_swap_lock);
|
1422 |
|
|
return old;
|
1423 |
|
|
}
|
1424 |
|
|
|
1425 |
|
|
#endif /* GENERIC_COMPARE_AND_SWAP */
|
1426 |
|
|
/* Spend a few cycles in a way that can't introduce contention with */
|
1427 |
|
|
/* othre threads. */
|
1428 |
|
|
void GC_pause()
|
1429 |
|
|
{
|
1430 |
|
|
int i;
|
1431 |
|
|
# if !defined(__GNUC__) || defined(__INTEL_COMPILER)
|
1432 |
|
|
volatile word dummy = 0;
|
1433 |
|
|
# endif
|
1434 |
|
|
|
1435 |
|
|
for (i = 0; i < 10; ++i) {
|
1436 |
|
|
# if defined(__GNUC__) && !defined(__INTEL_COMPILER)
|
1437 |
|
|
__asm__ __volatile__ (" " : : : "memory");
|
1438 |
|
|
# else
|
1439 |
|
|
/* Something that's unlikely to be optimized away. */
|
1440 |
|
|
GC_noop(++dummy);
|
1441 |
|
|
# endif
|
1442 |
|
|
}
|
1443 |
|
|
}
|
1444 |
|
|
|
1445 |
|
|
#define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */
|
1446 |
|
|
/* give up. */
|
1447 |
|
|
|
1448 |
|
|
VOLATILE GC_bool GC_collecting = 0;
|
1449 |
|
|
/* A hint that we're in the collector and */
|
1450 |
|
|
/* holding the allocation lock for an */
|
1451 |
|
|
/* extended period. */
|
1452 |
|
|
|
1453 |
|
|
#if !defined(USE_SPIN_LOCK) || defined(PARALLEL_MARK)
|
1454 |
|
|
/* If we don't want to use the below spinlock implementation, either */
|
1455 |
|
|
/* because we don't have a GC_test_and_set implementation, or because */
|
1456 |
|
|
/* we don't want to risk sleeping, we can still try spinning on */
|
1457 |
|
|
/* pthread_mutex_trylock for a while. This appears to be very */
|
1458 |
|
|
/* beneficial in many cases. */
|
1459 |
|
|
/* I suspect that under high contention this is nearly always better */
|
1460 |
|
|
/* than the spin lock. But it's a bit slower on a uniprocessor. */
|
1461 |
|
|
/* Hence we still default to the spin lock. */
|
1462 |
|
|
/* This is also used to acquire the mark lock for the parallel */
|
1463 |
|
|
/* marker. */
|
1464 |
|
|
|
1465 |
|
|
/* Here we use a strict exponential backoff scheme. I don't know */
|
1466 |
|
|
/* whether that's better or worse than the above. We eventually */
|
1467 |
|
|
/* yield by calling pthread_mutex_lock(); it never makes sense to */
|
1468 |
|
|
/* explicitly sleep. */
|
1469 |
|
|
|
1470 |
|
|
#define LOCK_STATS
|
1471 |
|
|
#ifdef LOCK_STATS
|
1472 |
|
|
unsigned long GC_spin_count = 0;
|
1473 |
|
|
unsigned long GC_block_count = 0;
|
1474 |
|
|
unsigned long GC_unlocked_count = 0;
|
1475 |
|
|
#endif
|
1476 |
|
|
|
1477 |
|
|
void GC_generic_lock(pthread_mutex_t * lock)
|
1478 |
|
|
{
|
1479 |
|
|
#ifndef NO_PTHREAD_TRYLOCK
|
1480 |
|
|
unsigned pause_length = 1;
|
1481 |
|
|
unsigned i;
|
1482 |
|
|
|
1483 |
|
|
if (0 == pthread_mutex_trylock(lock)) {
|
1484 |
|
|
# ifdef LOCK_STATS
|
1485 |
|
|
++GC_unlocked_count;
|
1486 |
|
|
# endif
|
1487 |
|
|
return;
|
1488 |
|
|
}
|
1489 |
|
|
for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
|
1490 |
|
|
for (i = 0; i < pause_length; ++i) {
|
1491 |
|
|
GC_pause();
|
1492 |
|
|
}
|
1493 |
|
|
switch(pthread_mutex_trylock(lock)) {
|
1494 |
|
|
case 0:
|
1495 |
|
|
# ifdef LOCK_STATS
|
1496 |
|
|
++GC_spin_count;
|
1497 |
|
|
# endif
|
1498 |
|
|
return;
|
1499 |
|
|
case EBUSY:
|
1500 |
|
|
break;
|
1501 |
|
|
default:
|
1502 |
|
|
ABORT("Unexpected error from pthread_mutex_trylock");
|
1503 |
|
|
}
|
1504 |
|
|
}
|
1505 |
|
|
#endif /* !NO_PTHREAD_TRYLOCK */
|
1506 |
|
|
# ifdef LOCK_STATS
|
1507 |
|
|
++GC_block_count;
|
1508 |
|
|
# endif
|
1509 |
|
|
pthread_mutex_lock(lock);
|
1510 |
|
|
}
|
1511 |
|
|
|
1512 |
|
|
#endif /* !USE_SPIN_LOCK || PARALLEL_MARK */
|
1513 |
|
|
|
1514 |
|
|
#if defined(USE_SPIN_LOCK)
|
1515 |
|
|
|
1516 |
|
|
/* Reasonably fast spin locks. Basically the same implementation */
|
1517 |
|
|
/* as STL alloc.h. This isn't really the right way to do this. */
|
1518 |
|
|
/* but until the POSIX scheduling mess gets straightened out ... */
|
1519 |
|
|
|
1520 |
|
|
volatile unsigned int GC_allocate_lock = 0;
|
1521 |
|
|
|
1522 |
|
|
|
1523 |
|
|
void GC_lock()
|
1524 |
|
|
{
|
1525 |
|
|
# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
|
1526 |
|
|
# define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
|
1527 |
|
|
static unsigned spin_max = low_spin_max;
|
1528 |
|
|
unsigned my_spin_max;
|
1529 |
|
|
static unsigned last_spins = 0;
|
1530 |
|
|
unsigned my_last_spins;
|
1531 |
|
|
int i;
|
1532 |
|
|
|
1533 |
|
|
if (!GC_test_and_set(&GC_allocate_lock)) {
|
1534 |
|
|
return;
|
1535 |
|
|
}
|
1536 |
|
|
my_spin_max = spin_max;
|
1537 |
|
|
my_last_spins = last_spins;
|
1538 |
|
|
for (i = 0; i < my_spin_max; i++) {
|
1539 |
|
|
if (GC_collecting || GC_nprocs == 1) goto yield;
|
1540 |
|
|
if (i < my_last_spins/2 || GC_allocate_lock) {
|
1541 |
|
|
GC_pause();
|
1542 |
|
|
continue;
|
1543 |
|
|
}
|
1544 |
|
|
if (!GC_test_and_set(&GC_allocate_lock)) {
|
1545 |
|
|
/*
|
1546 |
|
|
* got it!
|
1547 |
|
|
* Spinning worked. Thus we're probably not being scheduled
|
1548 |
|
|
* against the other process with which we were contending.
|
1549 |
|
|
* Thus it makes sense to spin longer the next time.
|
1550 |
|
|
*/
|
1551 |
|
|
last_spins = i;
|
1552 |
|
|
spin_max = high_spin_max;
|
1553 |
|
|
return;
|
1554 |
|
|
}
|
1555 |
|
|
}
|
1556 |
|
|
/* We are probably being scheduled against the other process. Sleep. */
|
1557 |
|
|
spin_max = low_spin_max;
|
1558 |
|
|
yield:
|
1559 |
|
|
for (i = 0;; ++i) {
|
1560 |
|
|
if (!GC_test_and_set(&GC_allocate_lock)) {
|
1561 |
|
|
return;
|
1562 |
|
|
}
|
1563 |
|
|
# define SLEEP_THRESHOLD 12
|
1564 |
|
|
/* Under Linux very short sleeps tend to wait until */
|
1565 |
|
|
/* the current time quantum expires. On old Linux */
|
1566 |
|
|
/* kernels nanosleep(<= 2ms) just spins under Linux. */
|
1567 |
|
|
/* (Under 2.4, this happens only for real-time */
|
1568 |
|
|
/* processes.) We want to minimize both behaviors */
|
1569 |
|
|
/* here. */
|
1570 |
|
|
if (i < SLEEP_THRESHOLD) {
|
1571 |
|
|
sched_yield();
|
1572 |
|
|
} else {
|
1573 |
|
|
struct timespec ts;
|
1574 |
|
|
|
1575 |
|
|
if (i > 24) i = 24;
|
1576 |
|
|
/* Don't wait for more than about 15msecs, even */
|
1577 |
|
|
/* under extreme contention. */
|
1578 |
|
|
ts.tv_sec = 0;
|
1579 |
|
|
ts.tv_nsec = 1 << i;
|
1580 |
|
|
nanosleep(&ts, 0);
|
1581 |
|
|
}
|
1582 |
|
|
}
|
1583 |
|
|
}
|
1584 |
|
|
|
1585 |
|
|
#else /* !USE_SPINLOCK */
|
1586 |
|
|
void GC_lock()
|
1587 |
|
|
{
|
1588 |
|
|
#ifndef NO_PTHREAD_TRYLOCK
|
1589 |
|
|
if (1 == GC_nprocs || GC_collecting) {
|
1590 |
|
|
pthread_mutex_lock(&GC_allocate_ml);
|
1591 |
|
|
} else {
|
1592 |
|
|
GC_generic_lock(&GC_allocate_ml);
|
1593 |
|
|
}
|
1594 |
|
|
#else /* !NO_PTHREAD_TRYLOCK */
|
1595 |
|
|
pthread_mutex_lock(&GC_allocate_ml);
|
1596 |
|
|
#endif /* !NO_PTHREAD_TRYLOCK */
|
1597 |
|
|
}
|
1598 |
|
|
|
1599 |
|
|
#endif /* !USE_SPINLOCK */
|
1600 |
|
|
|
1601 |
|
|
#if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
|
1602 |
|
|
|
1603 |
|
|
#ifdef GC_ASSERTIONS
|
1604 |
|
|
pthread_t GC_mark_lock_holder = NO_THREAD;
|
1605 |
|
|
#endif
|
1606 |
|
|
|
1607 |
|
|
#if 0
|
1608 |
|
|
/* Ugly workaround for a linux threads bug in the final versions */
|
1609 |
|
|
/* of glibc2.1. Pthread_mutex_trylock sets the mutex owner */
|
1610 |
|
|
/* field even when it fails to acquire the mutex. This causes */
|
1611 |
|
|
/* pthread_cond_wait to die. Remove for glibc2.2. */
|
1612 |
|
|
/* According to the man page, we should use */
|
1613 |
|
|
/* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */
|
1614 |
|
|
/* defined. */
|
1615 |
|
|
static pthread_mutex_t mark_mutex =
|
1616 |
|
|
{0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
|
1617 |
|
|
#else
|
1618 |
|
|
static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
|
1619 |
|
|
#endif
|
1620 |
|
|
|
1621 |
|
|
static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
|
1622 |
|
|
|
1623 |
|
|
void GC_acquire_mark_lock()
|
1624 |
|
|
{
|
1625 |
|
|
/*
|
1626 |
|
|
if (pthread_mutex_lock(&mark_mutex) != 0) {
|
1627 |
|
|
ABORT("pthread_mutex_lock failed");
|
1628 |
|
|
}
|
1629 |
|
|
*/
|
1630 |
|
|
GC_generic_lock(&mark_mutex);
|
1631 |
|
|
# ifdef GC_ASSERTIONS
|
1632 |
|
|
GC_mark_lock_holder = pthread_self();
|
1633 |
|
|
# endif
|
1634 |
|
|
}
|
1635 |
|
|
|
1636 |
|
|
void GC_release_mark_lock()
|
1637 |
|
|
{
|
1638 |
|
|
GC_ASSERT(GC_mark_lock_holder == pthread_self());
|
1639 |
|
|
# ifdef GC_ASSERTIONS
|
1640 |
|
|
GC_mark_lock_holder = NO_THREAD;
|
1641 |
|
|
# endif
|
1642 |
|
|
if (pthread_mutex_unlock(&mark_mutex) != 0) {
|
1643 |
|
|
ABORT("pthread_mutex_unlock failed");
|
1644 |
|
|
}
|
1645 |
|
|
}
|
1646 |
|
|
|
1647 |
|
|
/* Collector must wait for a freelist builders for 2 reasons: */
|
1648 |
|
|
/* 1) Mark bits may still be getting examined without lock. */
|
1649 |
|
|
/* 2) Partial free lists referenced only by locals may not be scanned */
|
1650 |
|
|
/* correctly, e.g. if they contain "pointer-free" objects, since the */
|
1651 |
|
|
/* free-list link may be ignored. */
|
1652 |
|
|
void GC_wait_builder()
|
1653 |
|
|
{
|
1654 |
|
|
GC_ASSERT(GC_mark_lock_holder == pthread_self());
|
1655 |
|
|
# ifdef GC_ASSERTIONS
|
1656 |
|
|
GC_mark_lock_holder = NO_THREAD;
|
1657 |
|
|
# endif
|
1658 |
|
|
if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
|
1659 |
|
|
ABORT("pthread_cond_wait failed");
|
1660 |
|
|
}
|
1661 |
|
|
GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
|
1662 |
|
|
# ifdef GC_ASSERTIONS
|
1663 |
|
|
GC_mark_lock_holder = pthread_self();
|
1664 |
|
|
# endif
|
1665 |
|
|
}
|
1666 |
|
|
|
1667 |
|
|
void GC_wait_for_reclaim()
|
1668 |
|
|
{
|
1669 |
|
|
GC_acquire_mark_lock();
|
1670 |
|
|
while (GC_fl_builder_count > 0) {
|
1671 |
|
|
GC_wait_builder();
|
1672 |
|
|
}
|
1673 |
|
|
GC_release_mark_lock();
|
1674 |
|
|
}
|
1675 |
|
|
|
1676 |
|
|
void GC_notify_all_builder()
|
1677 |
|
|
{
|
1678 |
|
|
GC_ASSERT(GC_mark_lock_holder == pthread_self());
|
1679 |
|
|
if (pthread_cond_broadcast(&builder_cv) != 0) {
|
1680 |
|
|
ABORT("pthread_cond_broadcast failed");
|
1681 |
|
|
}
|
1682 |
|
|
}
|
1683 |
|
|
|
1684 |
|
|
#endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
|
1685 |
|
|
|
1686 |
|
|
#ifdef PARALLEL_MARK
|
1687 |
|
|
|
1688 |
|
|
static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
|
1689 |
|
|
|
1690 |
|
|
void GC_wait_marker()
|
1691 |
|
|
{
|
1692 |
|
|
GC_ASSERT(GC_mark_lock_holder == pthread_self());
|
1693 |
|
|
# ifdef GC_ASSERTIONS
|
1694 |
|
|
GC_mark_lock_holder = NO_THREAD;
|
1695 |
|
|
# endif
|
1696 |
|
|
if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
|
1697 |
|
|
ABORT("pthread_cond_wait failed");
|
1698 |
|
|
}
|
1699 |
|
|
GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
|
1700 |
|
|
# ifdef GC_ASSERTIONS
|
1701 |
|
|
GC_mark_lock_holder = pthread_self();
|
1702 |
|
|
# endif
|
1703 |
|
|
}
|
1704 |
|
|
|
1705 |
|
|
void GC_notify_all_marker()
|
1706 |
|
|
{
|
1707 |
|
|
if (pthread_cond_broadcast(&mark_cv) != 0) {
|
1708 |
|
|
ABORT("pthread_cond_broadcast failed");
|
1709 |
|
|
}
|
1710 |
|
|
}
|
1711 |
|
|
|
1712 |
|
|
#endif /* PARALLEL_MARK */
|
1713 |
|
|
|
1714 |
|
|
# endif /* GC_LINUX_THREADS and friends */
|
1715 |
|
|
|