1 |
721 |
jeremybenn |
#include "private/pthread_support.h"
|
2 |
|
|
|
3 |
|
|
#if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
|
4 |
|
|
&& !defined(GC_WIN32_THREADS) && !defined(GC_DARWIN_THREADS)
|
5 |
|
|
|
6 |
|
|
#include <signal.h>
|
7 |
|
|
#include <semaphore.h>
|
8 |
|
|
#include <errno.h>
|
9 |
|
|
#include <unistd.h>
|
10 |
|
|
#include <sys/time.h>
|
11 |
|
|
#ifndef HPUX
|
12 |
|
|
# include <sys/select.h>
|
13 |
|
|
/* Doesn't exist on HP/UX 11.11. */
|
14 |
|
|
#endif
|
15 |
|
|
|
16 |
|
|
void suspend_self();
|
17 |
|
|
|
18 |
|
|
#if DEBUG_THREADS
|
19 |
|
|
|
20 |
|
|
#ifndef NSIG
|
21 |
|
|
# if defined(MAXSIG)
|
22 |
|
|
# define NSIG (MAXSIG+1)
|
23 |
|
|
# elif defined(_NSIG)
|
24 |
|
|
# define NSIG _NSIG
|
25 |
|
|
# elif defined(__SIGRTMAX)
|
26 |
|
|
# define NSIG (__SIGRTMAX+1)
|
27 |
|
|
# else
|
28 |
|
|
--> please fix it
|
29 |
|
|
# endif
|
30 |
|
|
#endif
|
31 |
|
|
|
32 |
|
|
void GC_print_sig_mask()
|
33 |
|
|
{
|
34 |
|
|
sigset_t blocked;
|
35 |
|
|
int i;
|
36 |
|
|
|
37 |
|
|
if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
|
38 |
|
|
ABORT("pthread_sigmask");
|
39 |
|
|
GC_printf0("Blocked: ");
|
40 |
|
|
for (i = 1; i < NSIG; i++) {
|
41 |
|
|
if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
|
42 |
|
|
}
|
43 |
|
|
GC_printf0("\n");
|
44 |
|
|
}
|
45 |
|
|
|
46 |
|
|
#endif
|
47 |
|
|
|
48 |
|
|
/* Remove the signals that we want to allow in thread stopping */
|
49 |
|
|
/* handler from a set. */
|
50 |
|
|
void GC_remove_allowed_signals(sigset_t *set)
|
51 |
|
|
{
|
52 |
|
|
# ifdef NO_SIGNALS
|
53 |
|
|
if (sigdelset(set, SIGINT) != 0
|
54 |
|
|
|| sigdelset(set, SIGQUIT) != 0
|
55 |
|
|
|| sigdelset(set, SIGABRT) != 0
|
56 |
|
|
|| sigdelset(set, SIGTERM) != 0) {
|
57 |
|
|
ABORT("sigdelset() failed");
|
58 |
|
|
}
|
59 |
|
|
# endif
|
60 |
|
|
|
61 |
|
|
# ifdef MPROTECT_VDB
|
62 |
|
|
/* Handlers write to the thread structure, which is in the heap, */
|
63 |
|
|
/* and hence can trigger a protection fault. */
|
64 |
|
|
if (sigdelset(set, SIGSEGV) != 0
|
65 |
|
|
# ifdef SIGBUS
|
66 |
|
|
|| sigdelset(set, SIGBUS) != 0
|
67 |
|
|
# endif
|
68 |
|
|
) {
|
69 |
|
|
ABORT("sigdelset() failed");
|
70 |
|
|
}
|
71 |
|
|
# endif
|
72 |
|
|
}
|
73 |
|
|
|
74 |
|
|
static sigset_t suspend_handler_mask;
|
75 |
|
|
|
76 |
|
|
volatile sig_atomic_t GC_stop_count;
|
77 |
|
|
/* Incremented at the beginning of GC_stop_world. */
|
78 |
|
|
|
79 |
|
|
volatile sig_atomic_t GC_world_is_stopped = FALSE;
|
80 |
|
|
/* FALSE ==> it is safe for threads to restart, i.e. */
|
81 |
|
|
/* they will see another suspend signal before they */
|
82 |
|
|
/* are expected to stop (unless they have voluntarily */
|
83 |
|
|
/* stopped). */
|
84 |
|
|
|
85 |
|
|
void GC_brief_async_signal_safe_sleep()
|
86 |
|
|
{
|
87 |
|
|
struct timeval tv;
|
88 |
|
|
tv.tv_sec = 0;
|
89 |
|
|
tv.tv_usec = 1000 * TIME_LIMIT / 2;
|
90 |
|
|
select(0, 0, 0, 0, &tv);
|
91 |
|
|
}
|
92 |
|
|
|
93 |
|
|
#ifdef GC_OSF1_THREADS
|
94 |
|
|
GC_bool GC_retry_signals = TRUE;
|
95 |
|
|
#else
|
96 |
|
|
GC_bool GC_retry_signals = FALSE;
|
97 |
|
|
#endif
|
98 |
|
|
|
99 |
|
|
/*
|
100 |
|
|
* We use signals to stop threads during GC.
|
101 |
|
|
*
|
102 |
|
|
* Suspended threads wait in signal handler for SIG_THR_RESTART.
|
103 |
|
|
* That's more portable than semaphores or condition variables.
|
104 |
|
|
* (We do use sem_post from a signal handler, but that should be portable.)
|
105 |
|
|
*
|
106 |
|
|
* The thread suspension signal SIG_SUSPEND is now defined in gc_priv.h.
|
107 |
|
|
* Note that we can't just stop a thread; we need it to save its stack
|
108 |
|
|
* pointer(s) and acknowledge.
|
109 |
|
|
*/
|
110 |
|
|
|
111 |
|
|
#ifndef SIG_THR_RESTART
|
112 |
|
|
# if defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS)
|
113 |
|
|
# ifdef _SIGRTMIN
|
114 |
|
|
# define SIG_THR_RESTART _SIGRTMIN + 5
|
115 |
|
|
# else
|
116 |
|
|
# define SIG_THR_RESTART SIGRTMIN + 5
|
117 |
|
|
# endif
|
118 |
|
|
# else
|
119 |
|
|
# define SIG_THR_RESTART SIGXCPU
|
120 |
|
|
# endif
|
121 |
|
|
#endif
|
122 |
|
|
|
123 |
|
|
sem_t GC_suspend_ack_sem;
|
124 |
|
|
|
125 |
|
|
void GC_suspend_handler_inner(ptr_t sig_arg);
|
126 |
|
|
|
127 |
|
|
#if defined(IA64) || defined(HP_PA) || defined(M68K)
|
128 |
|
|
extern void GC_with_callee_saves_pushed();
|
129 |
|
|
|
130 |
|
|
void GC_suspend_handler(int sig)
|
131 |
|
|
{
|
132 |
|
|
GC_thread me = GC_lookup_thread (pthread_self());
|
133 |
|
|
if (me -> flags & SUSPENDED)
|
134 |
|
|
suspend_self();
|
135 |
|
|
else {
|
136 |
|
|
int old_errno = errno;
|
137 |
|
|
GC_with_callee_saves_pushed(GC_suspend_handler_inner, (ptr_t)(word)sig);
|
138 |
|
|
errno = old_errno;
|
139 |
|
|
}
|
140 |
|
|
}
|
141 |
|
|
|
142 |
|
|
#else
|
143 |
|
|
/* We believe that in all other cases the full context is already */
|
144 |
|
|
/* in the signal handler frame. */
|
145 |
|
|
void GC_suspend_handler(int sig)
|
146 |
|
|
{
|
147 |
|
|
GC_thread me = GC_lookup_thread(pthread_self());
|
148 |
|
|
if (me -> flags & SUSPENDED)
|
149 |
|
|
suspend_self();
|
150 |
|
|
else {
|
151 |
|
|
int old_errno = errno;
|
152 |
|
|
GC_suspend_handler_inner((ptr_t)(word)sig);
|
153 |
|
|
errno = old_errno;
|
154 |
|
|
}
|
155 |
|
|
}
|
156 |
|
|
#endif
|
157 |
|
|
|
158 |
|
|
void GC_suspend_handler_inner(ptr_t sig_arg)
|
159 |
|
|
{
|
160 |
|
|
int sig = (int)(word)sig_arg;
|
161 |
|
|
int dummy;
|
162 |
|
|
pthread_t my_thread = pthread_self();
|
163 |
|
|
GC_thread me;
|
164 |
|
|
# ifdef PARALLEL_MARK
|
165 |
|
|
word my_mark_no = GC_mark_no;
|
166 |
|
|
/* Marker can't proceed until we acknowledge. Thus this is */
|
167 |
|
|
/* guaranteed to be the mark_no correspending to our */
|
168 |
|
|
/* suspension, i.e. the marker can't have incremented it yet. */
|
169 |
|
|
# endif
|
170 |
|
|
word my_stop_count = GC_stop_count;
|
171 |
|
|
|
172 |
|
|
if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
|
173 |
|
|
|
174 |
|
|
#if DEBUG_THREADS
|
175 |
|
|
GC_printf1("Suspending 0x%lx\n", my_thread);
|
176 |
|
|
#endif
|
177 |
|
|
|
178 |
|
|
me = GC_lookup_thread(my_thread);
|
179 |
|
|
/* The lookup here is safe, since I'm doing this on behalf */
|
180 |
|
|
/* of a thread which holds the allocation lock in order */
|
181 |
|
|
/* to stop the world. Thus concurrent modification of the */
|
182 |
|
|
/* data structure is impossible. */
|
183 |
|
|
if (me -> stop_info.last_stop_count == my_stop_count) {
|
184 |
|
|
/* Duplicate signal. OK if we are retrying. */
|
185 |
|
|
if (!GC_retry_signals) {
|
186 |
|
|
WARN("Duplicate suspend signal in thread %lx\n",
|
187 |
|
|
pthread_self());
|
188 |
|
|
}
|
189 |
|
|
return;
|
190 |
|
|
}
|
191 |
|
|
# ifdef SPARC
|
192 |
|
|
me -> stop_info.stack_ptr = (ptr_t)GC_save_regs_in_stack();
|
193 |
|
|
# else
|
194 |
|
|
me -> stop_info.stack_ptr = (ptr_t)(&dummy);
|
195 |
|
|
# endif
|
196 |
|
|
# ifdef IA64
|
197 |
|
|
me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack();
|
198 |
|
|
# endif
|
199 |
|
|
|
200 |
|
|
/* Tell the thread that wants to stop the world that this */
|
201 |
|
|
/* thread has been stopped. Note that sem_post() is */
|
202 |
|
|
/* the only async-signal-safe primitive in LinuxThreads. */
|
203 |
|
|
sem_post(&GC_suspend_ack_sem);
|
204 |
|
|
me -> stop_info.last_stop_count = my_stop_count;
|
205 |
|
|
|
206 |
|
|
/* Wait until that thread tells us to restart by sending */
|
207 |
|
|
/* this thread a SIG_THR_RESTART signal. */
|
208 |
|
|
/* SIG_THR_RESTART should be masked at this point. Thus there */
|
209 |
|
|
/* is no race. */
|
210 |
|
|
/* We do not continue until we receive a SIG_THR_RESTART, */
|
211 |
|
|
/* but we do not take that as authoritative. (We may be */
|
212 |
|
|
/* accidentally restarted by one of the user signals we */
|
213 |
|
|
/* don't block.) After we receive the signal, we use a */
|
214 |
|
|
/* primitive and expensive mechanism to wait until it's */
|
215 |
|
|
/* really safe to proceed. Under normal circumstances, */
|
216 |
|
|
/* this code should not be executed. */
|
217 |
|
|
sigsuspend(&suspend_handler_mask); /* Wait for signal */
|
218 |
|
|
while (GC_world_is_stopped && GC_stop_count == my_stop_count) {
|
219 |
|
|
GC_brief_async_signal_safe_sleep();
|
220 |
|
|
# if DEBUG_THREADS
|
221 |
|
|
GC_err_printf0("Sleeping in signal handler");
|
222 |
|
|
# endif
|
223 |
|
|
}
|
224 |
|
|
/* If the RESTART signal gets lost, we can still lose. That should be */
|
225 |
|
|
/* less likely than losing the SUSPEND signal, since we don't do much */
|
226 |
|
|
/* between the sem_post and sigsuspend. */
|
227 |
|
|
/* We'd need more handshaking to work around that. */
|
228 |
|
|
/* Simply dropping the sigsuspend call should be safe, but is unlikely */
|
229 |
|
|
/* to be efficient. */
|
230 |
|
|
|
231 |
|
|
#if DEBUG_THREADS
|
232 |
|
|
GC_printf1("Continuing 0x%lx\n", my_thread);
|
233 |
|
|
#endif
|
234 |
|
|
}
|
235 |
|
|
|
236 |
|
|
void GC_restart_handler(int sig)
|
237 |
|
|
{
|
238 |
|
|
pthread_t my_thread = pthread_self();
|
239 |
|
|
|
240 |
|
|
if (sig != SIG_THR_RESTART) ABORT("Bad signal in suspend_handler");
|
241 |
|
|
|
242 |
|
|
/*
|
243 |
|
|
** Note: even if we don't do anything useful here,
|
244 |
|
|
** it would still be necessary to have a signal handler,
|
245 |
|
|
** rather than ignoring the signals, otherwise
|
246 |
|
|
** the signals will not be delivered at all, and
|
247 |
|
|
** will thus not interrupt the sigsuspend() above.
|
248 |
|
|
*/
|
249 |
|
|
|
250 |
|
|
#if DEBUG_THREADS
|
251 |
|
|
GC_printf1("In GC_restart_handler for 0x%lx\n", pthread_self());
|
252 |
|
|
#endif
|
253 |
|
|
}
|
254 |
|
|
|
255 |
|
|
# ifdef IA64
|
256 |
|
|
# define IF_IA64(x) x
|
257 |
|
|
# else
|
258 |
|
|
# define IF_IA64(x)
|
259 |
|
|
# endif
|
260 |
|
|
/* We hold allocation lock. Should do exactly the right thing if the */
|
261 |
|
|
/* world is stopped. Should not fail if it isn't. */
|
262 |
|
|
void GC_push_all_stacks()
|
263 |
|
|
{
|
264 |
|
|
GC_bool found_me = FALSE;
|
265 |
|
|
int i;
|
266 |
|
|
GC_thread p;
|
267 |
|
|
ptr_t lo, hi;
|
268 |
|
|
/* On IA64, we also need to scan the register backing store. */
|
269 |
|
|
IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
|
270 |
|
|
pthread_t me = pthread_self();
|
271 |
|
|
|
272 |
|
|
if (!GC_thr_initialized) GC_thr_init();
|
273 |
|
|
#if DEBUG_THREADS
|
274 |
|
|
GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
|
275 |
|
|
#endif
|
276 |
|
|
for (i = 0; i < THREAD_TABLE_SZ; i++) {
|
277 |
|
|
for (p = GC_threads[i]; p != 0; p = p -> next) {
|
278 |
|
|
if (p -> flags & FINISHED) continue;
|
279 |
|
|
if (pthread_equal(p -> id, me)) {
|
280 |
|
|
# ifdef SPARC
|
281 |
|
|
lo = (ptr_t)GC_save_regs_in_stack();
|
282 |
|
|
# else
|
283 |
|
|
lo = GC_approx_sp();
|
284 |
|
|
# endif
|
285 |
|
|
found_me = TRUE;
|
286 |
|
|
IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
|
287 |
|
|
} else {
|
288 |
|
|
lo = p -> stop_info.stack_ptr;
|
289 |
|
|
IF_IA64(bs_hi = p -> backing_store_ptr;)
|
290 |
|
|
}
|
291 |
|
|
if ((p -> flags & MAIN_THREAD) == 0) {
|
292 |
|
|
hi = p -> stack_end;
|
293 |
|
|
IF_IA64(bs_lo = p -> backing_store_end);
|
294 |
|
|
} else {
|
295 |
|
|
/* The original stack. */
|
296 |
|
|
hi = GC_stackbottom;
|
297 |
|
|
IF_IA64(bs_lo = BACKING_STORE_BASE;)
|
298 |
|
|
}
|
299 |
|
|
#if DEBUG_THREADS
|
300 |
|
|
GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
|
301 |
|
|
(unsigned long) p -> id,
|
302 |
|
|
(unsigned long) lo, (unsigned long) hi);
|
303 |
|
|
#endif
|
304 |
|
|
if (0 == lo) ABORT("GC_push_all_stacks: sp not set!\n");
|
305 |
|
|
# ifdef STACK_GROWS_UP
|
306 |
|
|
/* We got them backwards! */
|
307 |
|
|
GC_push_all_stack(hi, lo);
|
308 |
|
|
# else
|
309 |
|
|
GC_push_all_stack(lo, hi);
|
310 |
|
|
# endif
|
311 |
|
|
# ifdef IA64
|
312 |
|
|
# if DEBUG_THREADS
|
313 |
|
|
GC_printf3("Reg stack for thread 0x%lx = [%lx,%lx)\n",
|
314 |
|
|
(unsigned long) p -> id,
|
315 |
|
|
(unsigned long) bs_lo, (unsigned long) bs_hi);
|
316 |
|
|
# endif
|
317 |
|
|
if (pthread_equal(p -> id, me)) {
|
318 |
|
|
GC_push_all_eager(bs_lo, bs_hi);
|
319 |
|
|
} else {
|
320 |
|
|
GC_push_all_stack(bs_lo, bs_hi);
|
321 |
|
|
}
|
322 |
|
|
# endif
|
323 |
|
|
}
|
324 |
|
|
}
|
325 |
|
|
if (!found_me && !GC_in_thread_creation)
|
326 |
|
|
ABORT("Collecting from unknown thread.");
|
327 |
|
|
}
|
328 |
|
|
|
329 |
|
|
/* There seems to be a very rare thread stopping problem. To help us */
|
330 |
|
|
/* debug that, we save the ids of the stopping thread. */
|
331 |
|
|
pthread_t GC_stopping_thread;
|
332 |
|
|
int GC_stopping_pid;
|
333 |
|
|
|
334 |
|
|
/* We hold the allocation lock. Suspend all threads that might */
|
335 |
|
|
/* still be running. Return the number of suspend signals that */
|
336 |
|
|
/* were sent. */
|
337 |
|
|
int GC_suspend_all()
|
338 |
|
|
{
|
339 |
|
|
int n_live_threads = 0;
|
340 |
|
|
int i;
|
341 |
|
|
GC_thread p;
|
342 |
|
|
int result;
|
343 |
|
|
pthread_t my_thread = pthread_self();
|
344 |
|
|
|
345 |
|
|
GC_stopping_thread = my_thread; /* debugging only. */
|
346 |
|
|
GC_stopping_pid = getpid(); /* debugging only. */
|
347 |
|
|
for (i = 0; i < THREAD_TABLE_SZ; i++) {
|
348 |
|
|
for (p = GC_threads[i]; p != 0; p = p -> next) {
|
349 |
|
|
if (p -> id != my_thread) {
|
350 |
|
|
if (p -> flags & FINISHED) continue;
|
351 |
|
|
if (p -> stop_info.last_stop_count == GC_stop_count) continue;
|
352 |
|
|
if (p -> thread_blocked) /* Will wait */ continue;
|
353 |
|
|
n_live_threads++;
|
354 |
|
|
#if DEBUG_THREADS
|
355 |
|
|
GC_printf1("Sending suspend signal to 0x%lx\n", p -> id);
|
356 |
|
|
#endif
|
357 |
|
|
|
358 |
|
|
result = pthread_kill(p -> id, SIG_SUSPEND);
|
359 |
|
|
switch(result) {
|
360 |
|
|
case ESRCH:
|
361 |
|
|
/* Not really there anymore. Possible? */
|
362 |
|
|
n_live_threads--;
|
363 |
|
|
break;
|
364 |
|
|
case 0:
|
365 |
|
|
break;
|
366 |
|
|
default:
|
367 |
|
|
ABORT("pthread_kill failed");
|
368 |
|
|
}
|
369 |
|
|
}
|
370 |
|
|
}
|
371 |
|
|
}
|
372 |
|
|
return n_live_threads;
|
373 |
|
|
}
|
374 |
|
|
|
375 |
|
|
/* Caller holds allocation lock. */
|
376 |
|
|
void GC_stop_world()
|
377 |
|
|
{
|
378 |
|
|
int i;
|
379 |
|
|
int n_live_threads;
|
380 |
|
|
int code;
|
381 |
|
|
|
382 |
|
|
#if DEBUG_THREADS
|
383 |
|
|
GC_printf1("Stopping the world from 0x%lx\n", pthread_self());
|
384 |
|
|
#endif
|
385 |
|
|
|
386 |
|
|
/* Make sure all free list construction has stopped before we start. */
|
387 |
|
|
/* No new construction can start, since free list construction is */
|
388 |
|
|
/* required to acquire and release the GC lock before it starts, */
|
389 |
|
|
/* and we have the lock. */
|
390 |
|
|
# ifdef PARALLEL_MARK
|
391 |
|
|
GC_acquire_mark_lock();
|
392 |
|
|
GC_ASSERT(GC_fl_builder_count == 0);
|
393 |
|
|
/* We should have previously waited for it to become zero. */
|
394 |
|
|
# endif /* PARALLEL_MARK */
|
395 |
|
|
++GC_stop_count;
|
396 |
|
|
GC_world_is_stopped = TRUE;
|
397 |
|
|
n_live_threads = GC_suspend_all();
|
398 |
|
|
|
399 |
|
|
if (GC_retry_signals) {
|
400 |
|
|
unsigned long wait_usecs = 0; /* Total wait since retry. */
|
401 |
|
|
# define WAIT_UNIT 3000
|
402 |
|
|
# define RETRY_INTERVAL 100000
|
403 |
|
|
for (;;) {
|
404 |
|
|
int ack_count;
|
405 |
|
|
|
406 |
|
|
sem_getvalue(&GC_suspend_ack_sem, &ack_count);
|
407 |
|
|
if (ack_count == n_live_threads) break;
|
408 |
|
|
if (wait_usecs > RETRY_INTERVAL) {
|
409 |
|
|
int newly_sent = GC_suspend_all();
|
410 |
|
|
|
411 |
|
|
# ifdef CONDPRINT
|
412 |
|
|
if (GC_print_stats) {
|
413 |
|
|
GC_printf1("Resent %ld signals after timeout\n",
|
414 |
|
|
newly_sent);
|
415 |
|
|
}
|
416 |
|
|
# endif
|
417 |
|
|
sem_getvalue(&GC_suspend_ack_sem, &ack_count);
|
418 |
|
|
if (newly_sent < n_live_threads - ack_count) {
|
419 |
|
|
WARN("Lost some threads during GC_stop_world?!\n",0);
|
420 |
|
|
n_live_threads = ack_count + newly_sent;
|
421 |
|
|
}
|
422 |
|
|
wait_usecs = 0;
|
423 |
|
|
}
|
424 |
|
|
usleep(WAIT_UNIT);
|
425 |
|
|
wait_usecs += WAIT_UNIT;
|
426 |
|
|
}
|
427 |
|
|
}
|
428 |
|
|
for (i = 0; i < n_live_threads; i++) {
|
429 |
|
|
while (0 != (code = sem_wait(&GC_suspend_ack_sem))) {
|
430 |
|
|
if (errno != EINTR) {
|
431 |
|
|
GC_err_printf1("Sem_wait returned %ld\n", (unsigned long)code);
|
432 |
|
|
ABORT("sem_wait for handler failed");
|
433 |
|
|
}
|
434 |
|
|
}
|
435 |
|
|
}
|
436 |
|
|
# ifdef PARALLEL_MARK
|
437 |
|
|
GC_release_mark_lock();
|
438 |
|
|
# endif
|
439 |
|
|
#if DEBUG_THREADS
|
440 |
|
|
GC_printf1("World stopped from 0x%lx\n", pthread_self());
|
441 |
|
|
#endif
|
442 |
|
|
GC_stopping_thread = 0; /* debugging only */
|
443 |
|
|
}
|
444 |
|
|
|
445 |
|
|
void suspend_self() {
|
446 |
|
|
GC_thread me = GC_lookup_thread(pthread_self());
|
447 |
|
|
if (me == NULL)
|
448 |
|
|
ABORT("attempting to suspend unknown thread");
|
449 |
|
|
|
450 |
|
|
me -> flags |= SUSPENDED;
|
451 |
|
|
GC_start_blocking();
|
452 |
|
|
while (me -> flags & SUSPENDED)
|
453 |
|
|
GC_brief_async_signal_safe_sleep();
|
454 |
|
|
GC_end_blocking();
|
455 |
|
|
}
|
456 |
|
|
|
457 |
|
|
void GC_suspend_thread(pthread_t thread) {
|
458 |
|
|
if (thread == pthread_self())
|
459 |
|
|
suspend_self();
|
460 |
|
|
else {
|
461 |
|
|
int result;
|
462 |
|
|
GC_thread t = GC_lookup_thread(thread);
|
463 |
|
|
if (t == NULL)
|
464 |
|
|
ABORT("attempting to suspend unknown thread");
|
465 |
|
|
|
466 |
|
|
t -> flags |= SUSPENDED;
|
467 |
|
|
result = pthread_kill (t -> id, SIG_SUSPEND);
|
468 |
|
|
switch (result) {
|
469 |
|
|
case ESRCH:
|
470 |
|
|
case 0:
|
471 |
|
|
break;
|
472 |
|
|
default:
|
473 |
|
|
ABORT("pthread_kill failed");
|
474 |
|
|
}
|
475 |
|
|
}
|
476 |
|
|
}
|
477 |
|
|
|
478 |
|
|
void GC_resume_thread(pthread_t thread) {
|
479 |
|
|
GC_thread t = GC_lookup_thread(thread);
|
480 |
|
|
if (t == NULL)
|
481 |
|
|
ABORT("attempting to resume unknown thread");
|
482 |
|
|
|
483 |
|
|
t -> flags &= ~SUSPENDED;
|
484 |
|
|
}
|
485 |
|
|
|
486 |
|
|
int GC_is_thread_suspended(pthread_t thread) {
|
487 |
|
|
GC_thread t = GC_lookup_thread(thread);
|
488 |
|
|
if (t == NULL)
|
489 |
|
|
ABORT("querying suspension state of unknown thread");
|
490 |
|
|
|
491 |
|
|
return (t -> flags & SUSPENDED);
|
492 |
|
|
}
|
493 |
|
|
|
494 |
|
|
/* Caller holds allocation lock, and has held it continuously since */
|
495 |
|
|
/* the world stopped. */
|
496 |
|
|
void GC_start_world()
|
497 |
|
|
{
|
498 |
|
|
pthread_t my_thread = pthread_self();
|
499 |
|
|
register int i;
|
500 |
|
|
register GC_thread p;
|
501 |
|
|
register int n_live_threads = 0;
|
502 |
|
|
register int result;
|
503 |
|
|
|
504 |
|
|
# if DEBUG_THREADS
|
505 |
|
|
GC_printf0("World starting\n");
|
506 |
|
|
# endif
|
507 |
|
|
|
508 |
|
|
GC_world_is_stopped = FALSE;
|
509 |
|
|
for (i = 0; i < THREAD_TABLE_SZ; i++) {
|
510 |
|
|
for (p = GC_threads[i]; p != 0; p = p -> next) {
|
511 |
|
|
if (p -> id != my_thread) {
|
512 |
|
|
if (p -> flags & FINISHED) continue;
|
513 |
|
|
if (p -> thread_blocked) continue;
|
514 |
|
|
n_live_threads++;
|
515 |
|
|
#if DEBUG_THREADS
|
516 |
|
|
GC_printf1("Sending restart signal to 0x%lx\n", p -> id);
|
517 |
|
|
#endif
|
518 |
|
|
result = pthread_kill(p -> id, SIG_THR_RESTART);
|
519 |
|
|
switch(result) {
|
520 |
|
|
case ESRCH:
|
521 |
|
|
/* Not really there anymore. Possible? */
|
522 |
|
|
n_live_threads--;
|
523 |
|
|
break;
|
524 |
|
|
case 0:
|
525 |
|
|
break;
|
526 |
|
|
default:
|
527 |
|
|
ABORT("pthread_kill failed");
|
528 |
|
|
}
|
529 |
|
|
}
|
530 |
|
|
}
|
531 |
|
|
}
|
532 |
|
|
#if DEBUG_THREADS
|
533 |
|
|
GC_printf0("World started\n");
|
534 |
|
|
#endif
|
535 |
|
|
}
|
536 |
|
|
|
537 |
|
|
void GC_stop_init() {
|
538 |
|
|
struct sigaction act;
|
539 |
|
|
|
540 |
|
|
if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
|
541 |
|
|
ABORT("sem_init failed");
|
542 |
|
|
|
543 |
|
|
act.sa_flags = SA_RESTART;
|
544 |
|
|
if (sigfillset(&act.sa_mask) != 0) {
|
545 |
|
|
ABORT("sigfillset() failed");
|
546 |
|
|
}
|
547 |
|
|
GC_remove_allowed_signals(&act.sa_mask);
|
548 |
|
|
/* SIG_THR_RESTART is set in the resulting mask. */
|
549 |
|
|
/* It is unmasked by the handler when necessary. */
|
550 |
|
|
act.sa_handler = GC_suspend_handler;
|
551 |
|
|
if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
|
552 |
|
|
ABORT("Cannot set SIG_SUSPEND handler");
|
553 |
|
|
}
|
554 |
|
|
|
555 |
|
|
act.sa_handler = GC_restart_handler;
|
556 |
|
|
if (sigaction(SIG_THR_RESTART, &act, NULL) != 0) {
|
557 |
|
|
ABORT("Cannot set SIG_THR_RESTART handler");
|
558 |
|
|
}
|
559 |
|
|
|
560 |
|
|
/* Inititialize suspend_handler_mask. It excludes SIG_THR_RESTART. */
|
561 |
|
|
if (sigfillset(&suspend_handler_mask) != 0) ABORT("sigfillset() failed");
|
562 |
|
|
GC_remove_allowed_signals(&suspend_handler_mask);
|
563 |
|
|
if (sigdelset(&suspend_handler_mask, SIG_THR_RESTART) != 0)
|
564 |
|
|
ABORT("sigdelset() failed");
|
565 |
|
|
|
566 |
|
|
/* Check for GC_RETRY_SIGNALS. */
|
567 |
|
|
if (0 != GETENV("GC_RETRY_SIGNALS")) {
|
568 |
|
|
GC_retry_signals = TRUE;
|
569 |
|
|
}
|
570 |
|
|
if (0 != GETENV("GC_NO_RETRY_SIGNALS")) {
|
571 |
|
|
GC_retry_signals = FALSE;
|
572 |
|
|
}
|
573 |
|
|
# ifdef CONDPRINT
|
574 |
|
|
if (GC_print_stats && GC_retry_signals) {
|
575 |
|
|
GC_printf0("Will retry suspend signal if necessary.\n");
|
576 |
|
|
}
|
577 |
|
|
# endif
|
578 |
|
|
}
|
579 |
|
|
|
580 |
|
|
#endif
|