1 |
721 |
jeremybenn |
/*
|
2 |
|
|
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
|
3 |
|
|
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
|
4 |
|
|
*
|
5 |
|
|
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
|
6 |
|
|
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
7 |
|
|
*
|
8 |
|
|
* Permission is hereby granted to use or copy this program
|
9 |
|
|
* for any purpose, provided the above notices are retained on all copies.
|
10 |
|
|
* Permission to modify the code and to distribute modified code is granted,
|
11 |
|
|
* provided the above notices are retained, and a notice that the code was
|
12 |
|
|
* modified is included with the above copyright notice.
|
13 |
|
|
*/
|
14 |
|
|
/* Boehm, November 17, 1995 12:13 pm PST */
|
15 |
|
|
# include "private/gc_priv.h"
|
16 |
|
|
# include <stdio.h>
|
17 |
|
|
# include <setjmp.h>
|
18 |
|
|
# if defined(OS2) || defined(CX_UX)
|
19 |
|
|
# define _setjmp(b) setjmp(b)
|
20 |
|
|
# define _longjmp(b,v) longjmp(b,v)
|
21 |
|
|
# endif
|
22 |
|
|
# ifdef AMIGA
|
23 |
|
|
# ifndef __GNUC__
|
24 |
|
|
# include <dos.h>
|
25 |
|
|
# else
|
26 |
|
|
# include <machine/reg.h>
|
27 |
|
|
# endif
|
28 |
|
|
# endif
|
29 |
|
|
|
30 |
|
|
#if defined(RS6000) || defined(POWERPC)
|
31 |
|
|
# include <ucontext.h>
|
32 |
|
|
#endif
|
33 |
|
|
|
34 |
|
|
#if defined(__MWERKS__) && !defined(POWERPC)
|
35 |
|
|
|
36 |
|
|
asm static void PushMacRegisters()
|
37 |
|
|
{
|
38 |
|
|
sub.w #4,sp // reserve space for one parameter.
|
39 |
|
|
move.l a2,(sp)
|
40 |
|
|
jsr GC_push_one
|
41 |
|
|
move.l a3,(sp)
|
42 |
|
|
jsr GC_push_one
|
43 |
|
|
move.l a4,(sp)
|
44 |
|
|
jsr GC_push_one
|
45 |
|
|
# if !__option(a6frames)
|
46 |
|
|
// <pcb> perhaps a6 should be pushed if stack frames are not being used.
|
47 |
|
|
move.l a6,(sp)
|
48 |
|
|
jsr GC_push_one
|
49 |
|
|
# endif
|
50 |
|
|
// skip a5 (globals), a6 (frame pointer), and a7 (stack pointer)
|
51 |
|
|
move.l d2,(sp)
|
52 |
|
|
jsr GC_push_one
|
53 |
|
|
move.l d3,(sp)
|
54 |
|
|
jsr GC_push_one
|
55 |
|
|
move.l d4,(sp)
|
56 |
|
|
jsr GC_push_one
|
57 |
|
|
move.l d5,(sp)
|
58 |
|
|
jsr GC_push_one
|
59 |
|
|
move.l d6,(sp)
|
60 |
|
|
jsr GC_push_one
|
61 |
|
|
move.l d7,(sp)
|
62 |
|
|
jsr GC_push_one
|
63 |
|
|
add.w #4,sp // fix stack.
|
64 |
|
|
rts
|
65 |
|
|
}
|
66 |
|
|
|
67 |
|
|
#endif /* __MWERKS__ */
|
68 |
|
|
|
69 |
|
|
# if defined(SPARC) || defined(IA64)
|
70 |
|
|
/* Value returned from register flushing routine; either sp (SPARC) */
|
71 |
|
|
/* or ar.bsp (IA64) */
|
72 |
|
|
word GC_save_regs_ret_val;
|
73 |
|
|
# endif
|
74 |
|
|
|
75 |
|
|
/* Routine to mark from registers that are preserved by the C compiler. */
|
76 |
|
|
/* This must be ported to every new architecture. There is a generic */
|
77 |
|
|
/* version at the end, that is likely, but not guaranteed to work */
|
78 |
|
|
/* on your architecture. Run the test_setjmp program to see whether */
|
79 |
|
|
/* there is any chance it will work. */
|
80 |
|
|
|
81 |
|
|
#if !defined(USE_GENERIC_PUSH_REGS) && !defined(USE_ASM_PUSH_REGS)
|
82 |
|
|
#undef HAVE_PUSH_REGS
|
83 |
|
|
void GC_push_regs()
|
84 |
|
|
{
|
85 |
|
|
# ifdef RT
|
86 |
|
|
register long TMP_SP; /* must be bound to r11 */
|
87 |
|
|
# endif
|
88 |
|
|
|
89 |
|
|
# ifdef VAX
|
90 |
|
|
/* VAX - generic code below does not work under 4.2 */
|
91 |
|
|
/* r1 through r5 are caller save, and therefore */
|
92 |
|
|
/* on the stack or dead. */
|
93 |
|
|
asm("pushl r11"); asm("calls $1,_GC_push_one");
|
94 |
|
|
asm("pushl r10"); asm("calls $1,_GC_push_one");
|
95 |
|
|
asm("pushl r9"); asm("calls $1,_GC_push_one");
|
96 |
|
|
asm("pushl r8"); asm("calls $1,_GC_push_one");
|
97 |
|
|
asm("pushl r7"); asm("calls $1,_GC_push_one");
|
98 |
|
|
asm("pushl r6"); asm("calls $1,_GC_push_one");
|
99 |
|
|
# define HAVE_PUSH_REGS
|
100 |
|
|
# endif
|
101 |
|
|
# if defined(M68K) && (defined(SUNOS4) || defined(NEXT))
|
102 |
|
|
/* M68K SUNOS - could be replaced by generic code */
|
103 |
|
|
/* a0, a1 and d1 are caller save */
|
104 |
|
|
/* and therefore are on stack or dead. */
|
105 |
|
|
|
106 |
|
|
asm("subqw #0x4,sp"); /* allocate word on top of stack */
|
107 |
|
|
|
108 |
|
|
asm("movl a2,sp@"); asm("jbsr _GC_push_one");
|
109 |
|
|
asm("movl a3,sp@"); asm("jbsr _GC_push_one");
|
110 |
|
|
asm("movl a4,sp@"); asm("jbsr _GC_push_one");
|
111 |
|
|
asm("movl a5,sp@"); asm("jbsr _GC_push_one");
|
112 |
|
|
/* Skip frame pointer and stack pointer */
|
113 |
|
|
asm("movl d1,sp@"); asm("jbsr _GC_push_one");
|
114 |
|
|
asm("movl d2,sp@"); asm("jbsr _GC_push_one");
|
115 |
|
|
asm("movl d3,sp@"); asm("jbsr _GC_push_one");
|
116 |
|
|
asm("movl d4,sp@"); asm("jbsr _GC_push_one");
|
117 |
|
|
asm("movl d5,sp@"); asm("jbsr _GC_push_one");
|
118 |
|
|
asm("movl d6,sp@"); asm("jbsr _GC_push_one");
|
119 |
|
|
asm("movl d7,sp@"); asm("jbsr _GC_push_one");
|
120 |
|
|
|
121 |
|
|
asm("addqw #0x4,sp"); /* put stack back where it was */
|
122 |
|
|
# define HAVE_PUSH_REGS
|
123 |
|
|
# endif
|
124 |
|
|
|
125 |
|
|
# if defined(M68K) && defined(HP)
|
126 |
|
|
/* M68K HP - could be replaced by generic code */
|
127 |
|
|
/* a0, a1 and d1 are caller save. */
|
128 |
|
|
|
129 |
|
|
asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
|
130 |
|
|
|
131 |
|
|
asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
|
132 |
|
|
asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
|
133 |
|
|
asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
|
134 |
|
|
asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
|
135 |
|
|
/* Skip frame pointer and stack pointer */
|
136 |
|
|
asm("mov.l %d1,(%sp)"); asm("jsr _GC_push_one");
|
137 |
|
|
asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
|
138 |
|
|
asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
|
139 |
|
|
asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
|
140 |
|
|
asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
|
141 |
|
|
asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
|
142 |
|
|
asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
|
143 |
|
|
|
144 |
|
|
asm("addq.w &0x4,%sp"); /* put stack back where it was */
|
145 |
|
|
# define HAVE_PUSH_REGS
|
146 |
|
|
# endif /* M68K HP */
|
147 |
|
|
|
148 |
|
|
# if defined(M68K) && defined(AMIGA)
|
149 |
|
|
/* AMIGA - could be replaced by generic code */
|
150 |
|
|
/* a0, a1, d0 and d1 are caller save */
|
151 |
|
|
|
152 |
|
|
# ifdef __GNUC__
|
153 |
|
|
asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
|
154 |
|
|
|
155 |
|
|
asm("mov.l %a2,(%sp)"); asm("jsr _GC_push_one");
|
156 |
|
|
asm("mov.l %a3,(%sp)"); asm("jsr _GC_push_one");
|
157 |
|
|
asm("mov.l %a4,(%sp)"); asm("jsr _GC_push_one");
|
158 |
|
|
asm("mov.l %a5,(%sp)"); asm("jsr _GC_push_one");
|
159 |
|
|
asm("mov.l %a6,(%sp)"); asm("jsr _GC_push_one");
|
160 |
|
|
/* Skip frame pointer and stack pointer */
|
161 |
|
|
asm("mov.l %d2,(%sp)"); asm("jsr _GC_push_one");
|
162 |
|
|
asm("mov.l %d3,(%sp)"); asm("jsr _GC_push_one");
|
163 |
|
|
asm("mov.l %d4,(%sp)"); asm("jsr _GC_push_one");
|
164 |
|
|
asm("mov.l %d5,(%sp)"); asm("jsr _GC_push_one");
|
165 |
|
|
asm("mov.l %d6,(%sp)"); asm("jsr _GC_push_one");
|
166 |
|
|
asm("mov.l %d7,(%sp)"); asm("jsr _GC_push_one");
|
167 |
|
|
|
168 |
|
|
asm("addq.w &0x4,%sp"); /* put stack back where it was */
|
169 |
|
|
# define HAVE_PUSH_REGS
|
170 |
|
|
# else /* !__GNUC__ */
|
171 |
|
|
GC_push_one(getreg(REG_A2));
|
172 |
|
|
GC_push_one(getreg(REG_A3));
|
173 |
|
|
# ifndef __SASC
|
174 |
|
|
/* Can probably be changed to #if 0 -Kjetil M. (a4=globals)*/
|
175 |
|
|
GC_push_one(getreg(REG_A4));
|
176 |
|
|
# endif
|
177 |
|
|
GC_push_one(getreg(REG_A5));
|
178 |
|
|
GC_push_one(getreg(REG_A6));
|
179 |
|
|
/* Skip stack pointer */
|
180 |
|
|
GC_push_one(getreg(REG_D2));
|
181 |
|
|
GC_push_one(getreg(REG_D3));
|
182 |
|
|
GC_push_one(getreg(REG_D4));
|
183 |
|
|
GC_push_one(getreg(REG_D5));
|
184 |
|
|
GC_push_one(getreg(REG_D6));
|
185 |
|
|
GC_push_one(getreg(REG_D7));
|
186 |
|
|
# define HAVE_PUSH_REGS
|
187 |
|
|
# endif /* !__GNUC__ */
|
188 |
|
|
# endif /* AMIGA */
|
189 |
|
|
|
190 |
|
|
# if defined(M68K) && defined(MACOS)
|
191 |
|
|
# if defined(THINK_C)
|
192 |
|
|
# define PushMacReg(reg) \
|
193 |
|
|
move.l reg,(sp) \
|
194 |
|
|
jsr GC_push_one
|
195 |
|
|
asm {
|
196 |
|
|
sub.w #4,sp ; reserve space for one parameter.
|
197 |
|
|
PushMacReg(a2);
|
198 |
|
|
PushMacReg(a3);
|
199 |
|
|
PushMacReg(a4);
|
200 |
|
|
; skip a5 (globals), a6 (frame pointer), and a7 (stack pointer)
|
201 |
|
|
PushMacReg(d2);
|
202 |
|
|
PushMacReg(d3);
|
203 |
|
|
PushMacReg(d4);
|
204 |
|
|
PushMacReg(d5);
|
205 |
|
|
PushMacReg(d6);
|
206 |
|
|
PushMacReg(d7);
|
207 |
|
|
add.w #4,sp ; fix stack.
|
208 |
|
|
}
|
209 |
|
|
# define HAVE_PUSH_REGS
|
210 |
|
|
# undef PushMacReg
|
211 |
|
|
# endif /* THINK_C */
|
212 |
|
|
# if defined(__MWERKS__)
|
213 |
|
|
PushMacRegisters();
|
214 |
|
|
# define HAVE_PUSH_REGS
|
215 |
|
|
# endif /* __MWERKS__ */
|
216 |
|
|
# endif /* MACOS */
|
217 |
|
|
|
218 |
|
|
# if defined(I386) &&!defined(OS2) &&!defined(SVR4) \
|
219 |
|
|
&& (defined(__MINGW32__) || !defined(MSWIN32)) \
|
220 |
|
|
&& !defined(SCO) && !defined(SCO_ELF) \
|
221 |
|
|
&& !(defined(LINUX) && defined(__ELF__)) \
|
222 |
|
|
&& !(defined(FREEBSD) && defined(__ELF__)) \
|
223 |
|
|
&& !(defined(NETBSD) && defined(__ELF__)) \
|
224 |
|
|
&& !(defined(OPENBSD) && defined(__ELF__)) \
|
225 |
|
|
&& !(defined(BEOS) && defined(__ELF__)) \
|
226 |
|
|
&& !defined(DOS4GW) && !defined(HURD)
|
227 |
|
|
/* I386 code, generic code does not appear to work */
|
228 |
|
|
/* It does appear to work under OS2, and asms dont */
|
229 |
|
|
/* This is used for some 38g UNIX variants and for CYGWIN32 */
|
230 |
|
|
asm("pushl %eax"); asm("call _GC_push_one"); asm("addl $4,%esp");
|
231 |
|
|
asm("pushl %ecx"); asm("call _GC_push_one"); asm("addl $4,%esp");
|
232 |
|
|
asm("pushl %edx"); asm("call _GC_push_one"); asm("addl $4,%esp");
|
233 |
|
|
asm("pushl %ebp"); asm("call _GC_push_one"); asm("addl $4,%esp");
|
234 |
|
|
asm("pushl %esi"); asm("call _GC_push_one"); asm("addl $4,%esp");
|
235 |
|
|
asm("pushl %edi"); asm("call _GC_push_one"); asm("addl $4,%esp");
|
236 |
|
|
asm("pushl %ebx"); asm("call _GC_push_one"); asm("addl $4,%esp");
|
237 |
|
|
# define HAVE_PUSH_REGS
|
238 |
|
|
# endif
|
239 |
|
|
|
240 |
|
|
# if ( defined(I386) && defined(LINUX) && defined(__ELF__) ) \
|
241 |
|
|
|| ( defined(I386) && defined(FREEBSD) && defined(__ELF__) ) \
|
242 |
|
|
|| ( defined(I386) && defined(NETBSD) && defined(__ELF__) ) \
|
243 |
|
|
|| ( defined(I386) && defined(OPENBSD) && defined(__ELF__) ) \
|
244 |
|
|
|| ( defined(I386) && defined(HURD) && defined(__ELF__) ) \
|
245 |
|
|
|| ( defined(I386) && defined(DGUX) )
|
246 |
|
|
|
247 |
|
|
/* This is modified for Linux with ELF (Note: _ELF_ only) */
|
248 |
|
|
/* This section handles FreeBSD with ELF. */
|
249 |
|
|
/* Eax is caller-save and dead here. Other caller-save */
|
250 |
|
|
/* registers could also be skipped. We assume there are no */
|
251 |
|
|
/* pointers in MMX registers, etc. */
|
252 |
|
|
/* We combine instructions in a single asm to prevent gcc from */
|
253 |
|
|
/* inserting code in the middle. */
|
254 |
|
|
asm("pushl %ecx; call GC_push_one; addl $4,%esp");
|
255 |
|
|
asm("pushl %edx; call GC_push_one; addl $4,%esp");
|
256 |
|
|
asm("pushl %ebp; call GC_push_one; addl $4,%esp");
|
257 |
|
|
asm("pushl %esi; call GC_push_one; addl $4,%esp");
|
258 |
|
|
asm("pushl %edi; call GC_push_one; addl $4,%esp");
|
259 |
|
|
asm("pushl %ebx; call GC_push_one; addl $4,%esp");
|
260 |
|
|
# define HAVE_PUSH_REGS
|
261 |
|
|
# endif
|
262 |
|
|
|
263 |
|
|
# if ( defined(I386) && defined(BEOS) && defined(__ELF__) )
|
264 |
|
|
/* As far as I can understand from */
|
265 |
|
|
/* http://www.beunited.org/articles/jbq/nasm.shtml, */
|
266 |
|
|
/* only ebp, esi, edi and ebx are not scratch. How MMX */
|
267 |
|
|
/* etc. registers should be treated, I have no idea. */
|
268 |
|
|
asm("pushl %ebp; call GC_push_one; addl $4,%esp");
|
269 |
|
|
asm("pushl %esi; call GC_push_one; addl $4,%esp");
|
270 |
|
|
asm("pushl %edi; call GC_push_one; addl $4,%esp");
|
271 |
|
|
asm("pushl %ebx; call GC_push_one; addl $4,%esp");
|
272 |
|
|
# define HAVE_PUSH_REGS
|
273 |
|
|
# endif
|
274 |
|
|
|
275 |
|
|
# if defined(I386) && defined(MSWIN32) && !defined(__MINGW32__) \
|
276 |
|
|
&& !defined(USE_GENERIC)
|
277 |
|
|
/* I386 code, Microsoft variant */
|
278 |
|
|
__asm push eax
|
279 |
|
|
__asm call GC_push_one
|
280 |
|
|
__asm add esp,4
|
281 |
|
|
__asm push ebx
|
282 |
|
|
__asm call GC_push_one
|
283 |
|
|
__asm add esp,4
|
284 |
|
|
__asm push ecx
|
285 |
|
|
__asm call GC_push_one
|
286 |
|
|
__asm add esp,4
|
287 |
|
|
__asm push edx
|
288 |
|
|
__asm call GC_push_one
|
289 |
|
|
__asm add esp,4
|
290 |
|
|
__asm push ebp
|
291 |
|
|
__asm call GC_push_one
|
292 |
|
|
__asm add esp,4
|
293 |
|
|
__asm push esi
|
294 |
|
|
__asm call GC_push_one
|
295 |
|
|
__asm add esp,4
|
296 |
|
|
__asm push edi
|
297 |
|
|
__asm call GC_push_one
|
298 |
|
|
__asm add esp,4
|
299 |
|
|
# define HAVE_PUSH_REGS
|
300 |
|
|
# endif
|
301 |
|
|
|
302 |
|
|
# if defined(I386) && (defined(SVR4) || defined(SCO) || defined(SCO_ELF))
|
303 |
|
|
/* I386 code, SVR4 variant, generic code does not appear to work */
|
304 |
|
|
asm("pushl %eax"); asm("call GC_push_one"); asm("addl $4,%esp");
|
305 |
|
|
asm("pushl %ebx"); asm("call GC_push_one"); asm("addl $4,%esp");
|
306 |
|
|
asm("pushl %ecx"); asm("call GC_push_one"); asm("addl $4,%esp");
|
307 |
|
|
asm("pushl %edx"); asm("call GC_push_one"); asm("addl $4,%esp");
|
308 |
|
|
asm("pushl %ebp"); asm("call GC_push_one"); asm("addl $4,%esp");
|
309 |
|
|
asm("pushl %esi"); asm("call GC_push_one"); asm("addl $4,%esp");
|
310 |
|
|
asm("pushl %edi"); asm("call GC_push_one"); asm("addl $4,%esp");
|
311 |
|
|
# define HAVE_PUSH_REGS
|
312 |
|
|
# endif
|
313 |
|
|
|
314 |
|
|
# ifdef NS32K
|
315 |
|
|
asm ("movd r3, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
|
316 |
|
|
asm ("movd r4, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
|
317 |
|
|
asm ("movd r5, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
|
318 |
|
|
asm ("movd r6, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
|
319 |
|
|
asm ("movd r7, tos"); asm ("bsr ?_GC_push_one"); asm ("adjspb $-4");
|
320 |
|
|
# define HAVE_PUSH_REGS
|
321 |
|
|
# endif
|
322 |
|
|
|
323 |
|
|
# if defined(SPARC)
|
324 |
|
|
GC_save_regs_ret_val = GC_save_regs_in_stack();
|
325 |
|
|
# define HAVE_PUSH_REGS
|
326 |
|
|
# endif
|
327 |
|
|
|
328 |
|
|
# ifdef RT
|
329 |
|
|
GC_push_one(TMP_SP); /* GC_push_one from r11 */
|
330 |
|
|
|
331 |
|
|
asm("cas r11, r6, r0"); GC_push_one(TMP_SP); /* r6 */
|
332 |
|
|
asm("cas r11, r7, r0"); GC_push_one(TMP_SP); /* through */
|
333 |
|
|
asm("cas r11, r8, r0"); GC_push_one(TMP_SP); /* r10 */
|
334 |
|
|
asm("cas r11, r9, r0"); GC_push_one(TMP_SP);
|
335 |
|
|
asm("cas r11, r10, r0"); GC_push_one(TMP_SP);
|
336 |
|
|
|
337 |
|
|
asm("cas r11, r12, r0"); GC_push_one(TMP_SP); /* r12 */
|
338 |
|
|
asm("cas r11, r13, r0"); GC_push_one(TMP_SP); /* through */
|
339 |
|
|
asm("cas r11, r14, r0"); GC_push_one(TMP_SP); /* r15 */
|
340 |
|
|
asm("cas r11, r15, r0"); GC_push_one(TMP_SP);
|
341 |
|
|
# define HAVE_PUSH_REGS
|
342 |
|
|
# endif
|
343 |
|
|
|
344 |
|
|
# if defined(M68K) && defined(SYSV)
|
345 |
|
|
/* Once again similar to SUN and HP, though setjmp appears to work.
|
346 |
|
|
--Parag
|
347 |
|
|
*/
|
348 |
|
|
# ifdef __GNUC__
|
349 |
|
|
asm("subqw #0x4,%sp"); /* allocate word on top of stack */
|
350 |
|
|
|
351 |
|
|
asm("movl %a2,%sp@"); asm("jbsr GC_push_one");
|
352 |
|
|
asm("movl %a3,%sp@"); asm("jbsr GC_push_one");
|
353 |
|
|
asm("movl %a4,%sp@"); asm("jbsr GC_push_one");
|
354 |
|
|
asm("movl %a5,%sp@"); asm("jbsr GC_push_one");
|
355 |
|
|
/* Skip frame pointer and stack pointer */
|
356 |
|
|
asm("movl %d1,%sp@"); asm("jbsr GC_push_one");
|
357 |
|
|
asm("movl %d2,%sp@"); asm("jbsr GC_push_one");
|
358 |
|
|
asm("movl %d3,%sp@"); asm("jbsr GC_push_one");
|
359 |
|
|
asm("movl %d4,%sp@"); asm("jbsr GC_push_one");
|
360 |
|
|
asm("movl %d5,%sp@"); asm("jbsr GC_push_one");
|
361 |
|
|
asm("movl %d6,%sp@"); asm("jbsr GC_push_one");
|
362 |
|
|
asm("movl %d7,%sp@"); asm("jbsr GC_push_one");
|
363 |
|
|
|
364 |
|
|
asm("addqw #0x4,%sp"); /* put stack back where it was */
|
365 |
|
|
# define HAVE_PUSH_REGS
|
366 |
|
|
# else /* !__GNUC__*/
|
367 |
|
|
asm("subq.w &0x4,%sp"); /* allocate word on top of stack */
|
368 |
|
|
|
369 |
|
|
asm("mov.l %a2,(%sp)"); asm("jsr GC_push_one");
|
370 |
|
|
asm("mov.l %a3,(%sp)"); asm("jsr GC_push_one");
|
371 |
|
|
asm("mov.l %a4,(%sp)"); asm("jsr GC_push_one");
|
372 |
|
|
asm("mov.l %a5,(%sp)"); asm("jsr GC_push_one");
|
373 |
|
|
/* Skip frame pointer and stack pointer */
|
374 |
|
|
asm("mov.l %d1,(%sp)"); asm("jsr GC_push_one");
|
375 |
|
|
asm("mov.l %d2,(%sp)"); asm("jsr GC_push_one");
|
376 |
|
|
asm("mov.l %d3,(%sp)"); asm("jsr GC_push_one");
|
377 |
|
|
asm("mov.l %d4,(%sp)"); asm("jsr GC_push_one");
|
378 |
|
|
asm("mov.l %d5,(%sp)"); asm("jsr GC_push_one");
|
379 |
|
|
asm("mov.l %d6,(%sp)"); asm("jsr GC_push_one");
|
380 |
|
|
asm("mov.l %d7,(%sp)"); asm("jsr GC_push_one");
|
381 |
|
|
|
382 |
|
|
asm("addq.w &0x4,%sp"); /* put stack back where it was */
|
383 |
|
|
# define HAVE_PUSH_REGS
|
384 |
|
|
# endif /* !__GNUC__ */
|
385 |
|
|
# endif /* M68K/SYSV */
|
386 |
|
|
|
387 |
|
|
# if defined(PJ)
|
388 |
|
|
{
|
389 |
|
|
register int * sp asm ("optop");
|
390 |
|
|
extern int *__libc_stack_end;
|
391 |
|
|
|
392 |
|
|
GC_push_all_stack (sp, __libc_stack_end);
|
393 |
|
|
# define HAVE_PUSH_REGS
|
394 |
|
|
/* Isn't this redundant with the code to push the stack? */
|
395 |
|
|
}
|
396 |
|
|
# endif
|
397 |
|
|
|
398 |
|
|
/* other machines... */
|
399 |
|
|
# if !defined(HAVE_PUSH_REGS)
|
400 |
|
|
--> We just generated an empty GC_push_regs, which
|
401 |
|
|
--> is almost certainly broken. Try defining
|
402 |
|
|
--> USE_GENERIC_PUSH_REGS instead.
|
403 |
|
|
# endif
|
404 |
|
|
}
|
405 |
|
|
#endif /* !USE_GENERIC_PUSH_REGS && !USE_ASM_PUSH_REGS */
|
406 |
|
|
|
407 |
|
|
void GC_with_callee_saves_pushed(fn, arg)
|
408 |
|
|
void (*fn)();
|
409 |
|
|
ptr_t arg;
|
410 |
|
|
{
|
411 |
|
|
word dummy;
|
412 |
|
|
|
413 |
|
|
# if defined(USE_GENERIC_PUSH_REGS)
|
414 |
|
|
# ifdef HAVE_BUILTIN_UNWIND_INIT
|
415 |
|
|
/* This was suggested by Richard Henderson as the way to */
|
416 |
|
|
/* force callee-save registers and register windows onto */
|
417 |
|
|
/* the stack. */
|
418 |
|
|
__builtin_unwind_init();
|
419 |
|
|
# else /* !HAVE_BUILTIN_UNWIND_INIT */
|
420 |
|
|
# if defined(RS6000) || defined(POWERPC)
|
421 |
|
|
/* FIXME: RS6000 means AIX. */
|
422 |
|
|
/* This should probably be used in all Posix/non-gcc */
|
423 |
|
|
/* settings. We defer that change to minimize risk. */
|
424 |
|
|
ucontext_t ctxt;
|
425 |
|
|
getcontext(&ctxt);
|
426 |
|
|
# else
|
427 |
|
|
/* Generic code */
|
428 |
|
|
/* The idea is due to Parag Patel at HP. */
|
429 |
|
|
/* We're not sure whether he would like */
|
430 |
|
|
/* to be he acknowledged for it or not. */
|
431 |
|
|
jmp_buf regs;
|
432 |
|
|
register word * i = (word *) regs;
|
433 |
|
|
register ptr_t lim = (ptr_t)(regs) + (sizeof regs);
|
434 |
|
|
|
435 |
|
|
/* Setjmp doesn't always clear all of the buffer. */
|
436 |
|
|
/* That tends to preserve garbage. Clear it. */
|
437 |
|
|
for (; (char *)i < lim; i++) {
|
438 |
|
|
*i = 0;
|
439 |
|
|
}
|
440 |
|
|
# if defined(MSWIN32) || defined(MSWINCE) \
|
441 |
|
|
|| defined(UTS4) || defined(LINUX) || defined(EWS4800)
|
442 |
|
|
(void) setjmp(regs);
|
443 |
|
|
# else
|
444 |
|
|
(void) _setjmp(regs);
|
445 |
|
|
/* We don't want to mess with signals. According to */
|
446 |
|
|
/* SUSV3, setjmp() may or may not save signal mask. */
|
447 |
|
|
/* _setjmp won't, but is less portable. */
|
448 |
|
|
# endif
|
449 |
|
|
# endif /* !AIX ... */
|
450 |
|
|
# endif /* !HAVE_BUILTIN_UNWIND_INIT */
|
451 |
|
|
# else
|
452 |
|
|
# if defined(PTHREADS) && !defined(MSWIN32) /* !USE_GENERIC_PUSH_REGS */
|
453 |
|
|
/* We may still need this to save thread contexts. */
|
454 |
|
|
ucontext_t ctxt;
|
455 |
|
|
getcontext(&ctxt);
|
456 |
|
|
# else /* Shouldn't be needed */
|
457 |
|
|
ABORT("Unexpected call to GC_with_callee_saves_pushed");
|
458 |
|
|
# endif
|
459 |
|
|
# endif
|
460 |
|
|
# if (defined(SPARC) && !defined(HAVE_BUILTIN_UNWIND_INIT)) \
|
461 |
|
|
|| defined(IA64)
|
462 |
|
|
/* On a register window machine, we need to save register */
|
463 |
|
|
/* contents on the stack for this to work. The setjmp */
|
464 |
|
|
/* is probably not needed on SPARC, since pointers are */
|
465 |
|
|
/* only stored in windowed or scratch registers. It is */
|
466 |
|
|
/* needed on IA64, since some non-windowed registers are */
|
467 |
|
|
/* preserved. */
|
468 |
|
|
{
|
469 |
|
|
GC_save_regs_ret_val = GC_save_regs_in_stack();
|
470 |
|
|
/* On IA64 gcc, could use __builtin_ia64_flushrs() and */
|
471 |
|
|
/* __builtin_ia64_flushrs(). The latter will be done */
|
472 |
|
|
/* implicitly by __builtin_unwind_init() for gcc3.0.1 */
|
473 |
|
|
/* and later. */
|
474 |
|
|
}
|
475 |
|
|
# endif
|
476 |
|
|
fn(arg);
|
477 |
|
|
/* Strongly discourage the compiler from treating the above */
|
478 |
|
|
/* as a tail-call, since that would pop the register */
|
479 |
|
|
/* contents before we get a chance to look at them. */
|
480 |
|
|
GC_noop1((word)(&dummy));
|
481 |
|
|
}
|
482 |
|
|
|
483 |
|
|
#if defined(USE_GENERIC_PUSH_REGS)
|
484 |
|
|
void GC_generic_push_regs(cold_gc_frame)
|
485 |
|
|
ptr_t cold_gc_frame;
|
486 |
|
|
{
|
487 |
|
|
GC_with_callee_saves_pushed(GC_push_current_stack, cold_gc_frame);
|
488 |
|
|
}
|
489 |
|
|
#endif /* USE_GENERIC_PUSH_REGS */
|
490 |
|
|
|
491 |
|
|
/* On register window machines, we need a way to force registers into */
|
492 |
|
|
/* the stack. Return sp. */
|
493 |
|
|
# ifdef SPARC
|
494 |
|
|
asm(" .seg \"text\"");
|
495 |
|
|
# if defined(SVR4) || defined(NETBSD) || defined(FREEBSD)
|
496 |
|
|
asm(" .globl GC_save_regs_in_stack");
|
497 |
|
|
asm("GC_save_regs_in_stack:");
|
498 |
|
|
asm(" .type GC_save_regs_in_stack,#function");
|
499 |
|
|
# else
|
500 |
|
|
asm(" .globl _GC_save_regs_in_stack");
|
501 |
|
|
asm("_GC_save_regs_in_stack:");
|
502 |
|
|
# endif
|
503 |
|
|
# if defined(__arch64__) || defined(__sparcv9)
|
504 |
|
|
asm(" save %sp,-128,%sp");
|
505 |
|
|
asm(" flushw");
|
506 |
|
|
asm(" ret");
|
507 |
|
|
asm(" restore %sp,2047+128,%o0");
|
508 |
|
|
# else
|
509 |
|
|
asm(" ta 0x3 ! ST_FLUSH_WINDOWS");
|
510 |
|
|
asm(" retl");
|
511 |
|
|
asm(" mov %sp,%o0");
|
512 |
|
|
# endif
|
513 |
|
|
# ifdef SVR4
|
514 |
|
|
asm(" .GC_save_regs_in_stack_end:");
|
515 |
|
|
asm(" .size GC_save_regs_in_stack,.GC_save_regs_in_stack_end-GC_save_regs_in_stack");
|
516 |
|
|
# endif
|
517 |
|
|
# ifdef LINT
|
518 |
|
|
word GC_save_regs_in_stack() { return(0 /* sp really */);}
|
519 |
|
|
# endif
|
520 |
|
|
# endif
|
521 |
|
|
|
522 |
|
|
/* On IA64, we also need to flush register windows. But they end */
|
523 |
|
|
/* up on the other side of the stack segment. */
|
524 |
|
|
/* Returns the backing store pointer for the register stack. */
|
525 |
|
|
/* We now implement this as a separate assembly file, since inline */
|
526 |
|
|
/* assembly code here doesn't work with either the Intel or HP */
|
527 |
|
|
/* compilers. */
|
528 |
|
|
# if 0
|
529 |
|
|
# ifdef LINUX
|
530 |
|
|
asm(" .text");
|
531 |
|
|
asm(" .psr abi64");
|
532 |
|
|
asm(" .psr lsb");
|
533 |
|
|
asm(" .lsb");
|
534 |
|
|
asm("");
|
535 |
|
|
asm(" .text");
|
536 |
|
|
asm(" .align 16");
|
537 |
|
|
asm(" .global GC_save_regs_in_stack");
|
538 |
|
|
asm(" .proc GC_save_regs_in_stack");
|
539 |
|
|
asm("GC_save_regs_in_stack:");
|
540 |
|
|
asm(" .body");
|
541 |
|
|
asm(" flushrs");
|
542 |
|
|
asm(" ;;");
|
543 |
|
|
asm(" mov r8=ar.bsp");
|
544 |
|
|
asm(" br.ret.sptk.few rp");
|
545 |
|
|
asm(" .endp GC_save_regs_in_stack");
|
546 |
|
|
# endif /* LINUX */
|
547 |
|
|
# if 0 /* Other alternatives that don't work on HP/UX */
|
548 |
|
|
word GC_save_regs_in_stack() {
|
549 |
|
|
# if USE_BUILTINS
|
550 |
|
|
__builtin_ia64_flushrs();
|
551 |
|
|
return __builtin_ia64_bsp();
|
552 |
|
|
# else
|
553 |
|
|
# ifdef HPUX
|
554 |
|
|
_asm(" flushrs");
|
555 |
|
|
_asm(" ;;");
|
556 |
|
|
_asm(" mov r8=ar.bsp");
|
557 |
|
|
_asm(" br.ret.sptk.few rp");
|
558 |
|
|
# else
|
559 |
|
|
asm(" flushrs");
|
560 |
|
|
asm(" ;;");
|
561 |
|
|
asm(" mov r8=ar.bsp");
|
562 |
|
|
asm(" br.ret.sptk.few rp");
|
563 |
|
|
# endif
|
564 |
|
|
# endif
|
565 |
|
|
}
|
566 |
|
|
# endif
|
567 |
|
|
# endif
|
568 |
|
|
|
569 |
|
|
/* GC_clear_stack_inner(arg, limit) clears stack area up to limit and */
|
570 |
|
|
/* returns arg. Stack clearing is crucial on SPARC, so we supply */
|
571 |
|
|
/* an assembly version that's more careful. Assumes limit is hotter */
|
572 |
|
|
/* than sp, and limit is 8 byte aligned. */
|
573 |
|
|
#if defined(ASM_CLEAR_CODE)
|
574 |
|
|
#ifndef SPARC
|
575 |
|
|
--> fix it
|
576 |
|
|
#endif
|
577 |
|
|
# ifdef SUNOS4
|
578 |
|
|
asm(".globl _GC_clear_stack_inner");
|
579 |
|
|
asm("_GC_clear_stack_inner:");
|
580 |
|
|
# else
|
581 |
|
|
asm(".globl GC_clear_stack_inner");
|
582 |
|
|
asm("GC_clear_stack_inner:");
|
583 |
|
|
asm(".type GC_save_regs_in_stack,#function");
|
584 |
|
|
# endif
|
585 |
|
|
#if defined(__arch64__) || defined(__sparcv9)
|
586 |
|
|
asm("mov %sp,%o2"); /* Save sp */
|
587 |
|
|
asm("add %sp,2047-8,%o3"); /* p = sp+bias-8 */
|
588 |
|
|
asm("add %o1,-2047-192,%sp"); /* Move sp out of the way, */
|
589 |
|
|
/* so that traps still work. */
|
590 |
|
|
/* Includes some extra words */
|
591 |
|
|
/* so we can be sloppy below. */
|
592 |
|
|
asm("loop:");
|
593 |
|
|
asm("stx %g0,[%o3]"); /* *(long *)p = 0 */
|
594 |
|
|
asm("cmp %o3,%o1");
|
595 |
|
|
asm("bgu,pt %xcc, loop"); /* if (p > limit) goto loop */
|
596 |
|
|
asm("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
|
597 |
|
|
asm("retl");
|
598 |
|
|
asm("mov %o2,%sp"); /* Restore sp., delay slot */
|
599 |
|
|
#else
|
600 |
|
|
asm("mov %sp,%o2"); /* Save sp */
|
601 |
|
|
asm("add %sp,-8,%o3"); /* p = sp-8 */
|
602 |
|
|
asm("clr %g1"); /* [g0,g1] = 0 */
|
603 |
|
|
asm("add %o1,-0x60,%sp"); /* Move sp out of the way, */
|
604 |
|
|
/* so that traps still work. */
|
605 |
|
|
/* Includes some extra words */
|
606 |
|
|
/* so we can be sloppy below. */
|
607 |
|
|
asm("loop:");
|
608 |
|
|
asm("std %g0,[%o3]"); /* *(long long *)p = 0 */
|
609 |
|
|
asm("cmp %o3,%o1");
|
610 |
|
|
asm("bgu loop "); /* if (p > limit) goto loop */
|
611 |
|
|
asm("add %o3,-8,%o3"); /* p -= 8 (delay slot) */
|
612 |
|
|
asm("retl");
|
613 |
|
|
asm("mov %o2,%sp"); /* Restore sp., delay slot */
|
614 |
|
|
#endif /* old SPARC */
|
615 |
|
|
/* First argument = %o0 = return value */
|
616 |
|
|
# ifdef SVR4
|
617 |
|
|
asm(" .GC_clear_stack_inner_end:");
|
618 |
|
|
asm(" .size GC_clear_stack_inner,.GC_clear_stack_inner_end-GC_clear_stack_inner");
|
619 |
|
|
# endif
|
620 |
|
|
|
621 |
|
|
# ifdef LINT
|
622 |
|
|
/*ARGSUSED*/
|
623 |
|
|
ptr_t GC_clear_stack_inner(arg, limit)
|
624 |
|
|
ptr_t arg; word limit;
|
625 |
|
|
{ return(arg); }
|
626 |
|
|
# endif
|
627 |
|
|
#endif
|