1 |
1275 |
phoenix |
/*
|
2 |
|
|
* ia64/kernel/entry.S
|
3 |
|
|
*
|
4 |
|
|
* Kernel entry points.
|
5 |
|
|
*
|
6 |
|
|
* Copyright (C) 2002-2003
|
7 |
|
|
* Suresh Siddha
|
8 |
|
|
* Fenghua Yu
|
9 |
|
|
* Copyright (C) 1998-2002 Hewlett-Packard Co
|
10 |
|
|
* David Mosberger-Tang
|
11 |
|
|
* Copyright (C) 1999 VA Linux Systems
|
12 |
|
|
* Copyright (C) 1999 Walt Drummond
|
13 |
|
|
* Copyright (C) 1999 Asit Mallick
|
14 |
|
|
* Copyright (C) 1999 Don Dugger
|
15 |
|
|
*/
|
16 |
|
|
/*
|
17 |
|
|
* ia64_switch_to now places correct virtual mapping in in TR2 for
|
18 |
|
|
* kernel stack. This allows us to handle interrupts without changing
|
19 |
|
|
* to physical mode.
|
20 |
|
|
*
|
21 |
|
|
* Jonathan Nicklin
|
22 |
|
|
* Patrick O'Rourke
|
23 |
|
|
* 11/07/2000
|
24 |
|
|
/
|
25 |
|
|
/*
|
26 |
|
|
* Global (preserved) predicate usage on syscall entry/exit path:
|
27 |
|
|
*
|
28 |
|
|
* pKern: See entry.h.
|
29 |
|
|
* pUser: See entry.h.
|
30 |
|
|
* pSys: See entry.h.
|
31 |
|
|
* pNonSys: !pSys
|
32 |
|
|
*/
|
33 |
|
|
|
34 |
|
|
#include
|
35 |
|
|
|
36 |
|
|
#include
|
37 |
|
|
#include
|
38 |
|
|
#include
|
39 |
|
|
#include
|
40 |
|
|
#include
|
41 |
|
|
#include
|
42 |
|
|
#include
|
43 |
|
|
#include
|
44 |
|
|
|
45 |
|
|
#include "minstate.h"
|
46 |
|
|
|
47 |
|
|
/*
|
48 |
|
|
* execve() is special because in case of success, we need to
|
49 |
|
|
* setup a null register window frame.
|
50 |
|
|
*/
|
51 |
|
|
ENTRY(ia64_execve)
|
52 |
|
|
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3)
|
53 |
|
|
alloc loc1=ar.pfs,3,2,4,0
|
54 |
|
|
/* Leave from kernel and restore all pt_regs to correspending registers. This is special
|
55 |
|
|
* because ia32 application needs scratch registers after return from execve.
|
56 |
|
|
*/
|
57 |
|
|
movl loc0=ia64_ret_from_execve_syscall
|
58 |
|
|
.body
|
59 |
|
|
mov out0=in0 // filename
|
60 |
|
|
;; // stop bit between alloc and call
|
61 |
|
|
mov out1=in1 // argv
|
62 |
|
|
mov out2=in2 // envp
|
63 |
|
|
add out3=16,sp // regs
|
64 |
|
|
br.call.sptk.many rp=sys_execve
|
65 |
|
|
.ret0: cmp4.ge p6,p7=r8,r0
|
66 |
|
|
mov ar.pfs=loc1 // restore ar.pfs
|
67 |
|
|
sxt4 r8=r8 // return 64-bit result
|
68 |
|
|
;;
|
69 |
|
|
stf.spill [sp]=f0
|
70 |
|
|
(p6) cmp.ne pKern,pUser=r0,r0 // a successful execve() lands us in user-mode...
|
71 |
|
|
mov rp=loc0
|
72 |
|
|
(p6) mov ar.pfs=r0 // clear ar.pfs on success
|
73 |
|
|
(p7) br.ret.sptk.many rp
|
74 |
|
|
|
75 |
|
|
/*
|
76 |
|
|
* In theory, we'd have to zap this state only to prevent leaking of
|
77 |
|
|
* security sensitive state (e.g., if current->mm->dumpable is zero). However,
|
78 |
|
|
* this executes in less than 20 cycles even on Itanium, so it's not worth
|
79 |
|
|
* optimizing for...).
|
80 |
|
|
*/
|
81 |
|
|
mov ar.unat=0; mov ar.lc=0;
|
82 |
|
|
mov r4=0; mov f2=f0; mov b1=r0
|
83 |
|
|
mov r5=0; mov f3=f0; mov b2=r0
|
84 |
|
|
mov r6=0; mov f4=f0; mov b3=r0
|
85 |
|
|
mov r7=0; mov f5=f0; mov b4=r0
|
86 |
|
|
ldf.fill f12=[sp]; mov f13=f0; mov b5=r0
|
87 |
|
|
ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0
|
88 |
|
|
ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0
|
89 |
|
|
ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0
|
90 |
|
|
ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0
|
91 |
|
|
ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0
|
92 |
|
|
ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0
|
93 |
|
|
br.ret.sptk.many rp
|
94 |
|
|
END(ia64_execve)
|
95 |
|
|
|
96 |
|
|
GLOBAL_ENTRY(sys_clone2)
|
97 |
|
|
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
|
98 |
|
|
alloc r16=ar.pfs,3,2,4,0
|
99 |
|
|
DO_SAVE_SWITCH_STACK
|
100 |
|
|
mov loc0=rp
|
101 |
|
|
mov loc1=r16 // save ar.pfs across do_fork
|
102 |
|
|
.body
|
103 |
|
|
mov out1=in1
|
104 |
|
|
mov out3=in2
|
105 |
|
|
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s
|
106 |
|
|
mov out0=in0 // out0 = clone_flags
|
107 |
|
|
br.call.sptk.many rp=do_fork
|
108 |
|
|
.ret1: .restore sp
|
109 |
|
|
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
|
110 |
|
|
mov ar.pfs=loc1
|
111 |
|
|
mov rp=loc0
|
112 |
|
|
br.ret.sptk.many rp
|
113 |
|
|
END(sys_clone2)
|
114 |
|
|
|
115 |
|
|
GLOBAL_ENTRY(sys_clone)
|
116 |
|
|
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
|
117 |
|
|
alloc r16=ar.pfs,2,2,4,0
|
118 |
|
|
DO_SAVE_SWITCH_STACK
|
119 |
|
|
mov loc0=rp
|
120 |
|
|
mov loc1=r16 // save ar.pfs across do_fork
|
121 |
|
|
.body
|
122 |
|
|
mov out1=in1
|
123 |
|
|
mov out3=16 // stacksize (compensates for 16-byte scratch area)
|
124 |
|
|
adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s
|
125 |
|
|
mov out0=in0 // out0 = clone_flags
|
126 |
|
|
br.call.sptk.many rp=do_fork
|
127 |
|
|
.ret2: .restore sp
|
128 |
|
|
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
|
129 |
|
|
mov ar.pfs=loc1
|
130 |
|
|
mov rp=loc0
|
131 |
|
|
br.ret.sptk.many rp
|
132 |
|
|
END(sys_clone)
|
133 |
|
|
|
134 |
|
|
/*
|
135 |
|
|
* prev_task <- ia64_switch_to(struct task_struct *next)
|
136 |
|
|
*/
|
137 |
|
|
GLOBAL_ENTRY(ia64_switch_to)
|
138 |
|
|
.prologue
|
139 |
|
|
alloc r16=ar.pfs,1,0,0,0
|
140 |
|
|
DO_SAVE_SWITCH_STACK
|
141 |
|
|
.body
|
142 |
|
|
|
143 |
|
|
adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
|
144 |
|
|
mov r27=IA64_KR(CURRENT_STACK)
|
145 |
|
|
dep r20=0,in0,61,3 // physical address of "current"
|
146 |
|
|
;;
|
147 |
|
|
st8 [r22]=sp // save kernel stack pointer of old task
|
148 |
|
|
shr.u r26=r20,IA64_GRANULE_SHIFT
|
149 |
|
|
shr.u r17=r20,KERNEL_TR_PAGE_SHIFT
|
150 |
|
|
;;
|
151 |
|
|
cmp.ne p6,p7=KERNEL_TR_PAGE_NUM,r17
|
152 |
|
|
adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
|
153 |
|
|
;;
|
154 |
|
|
/*
|
155 |
|
|
* If we've already mapped this task's page, we can skip doing it again.
|
156 |
|
|
*/
|
157 |
|
|
(p6) cmp.eq p7,p6=r26,r27
|
158 |
|
|
(p6) br.cond.dpnt .map
|
159 |
|
|
;;
|
160 |
|
|
.done:
|
161 |
|
|
(p6) ssm psr.ic // if we we had to map, renable the psr.ic bit FIRST!!!
|
162 |
|
|
;;
|
163 |
|
|
(p6) srlz.d
|
164 |
|
|
ld8 sp=[r21] // load kernel stack pointer of new task
|
165 |
|
|
mov IA64_KR(CURRENT)=r20 // update "current" application register
|
166 |
|
|
mov r8=r13 // return pointer to previously running task
|
167 |
|
|
mov r13=in0 // set "current" pointer
|
168 |
|
|
;;
|
169 |
|
|
ssm psr.i // renable psr.i AFTER the ic bit is serialized
|
170 |
|
|
DO_LOAD_SWITCH_STACK
|
171 |
|
|
|
172 |
|
|
#ifdef CONFIG_SMP
|
173 |
|
|
sync.i // ensure "fc"s done by this CPU are visible on other CPUs
|
174 |
|
|
#endif
|
175 |
|
|
br.ret.sptk.many rp // boogie on out in new context
|
176 |
|
|
|
177 |
|
|
.map:
|
178 |
|
|
rsm psr.i | psr.ic
|
179 |
|
|
movl r25=PAGE_KERNEL
|
180 |
|
|
;;
|
181 |
|
|
srlz.d
|
182 |
|
|
or r23=r25,r20 // construct PA | page properties
|
183 |
|
|
mov r25=IA64_GRANULE_SHIFT<<2
|
184 |
|
|
;;
|
185 |
|
|
mov cr.itir=r25
|
186 |
|
|
mov cr.ifa=in0 // VA of next task...
|
187 |
|
|
;;
|
188 |
|
|
mov r25=IA64_TR_CURRENT_STACK
|
189 |
|
|
mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped...
|
190 |
|
|
;;
|
191 |
|
|
itr.d dtr[r25]=r23 // wire in new mapping...
|
192 |
|
|
br.cond.sptk .done
|
193 |
|
|
END(ia64_switch_to)
|
194 |
|
|
|
195 |
|
|
/*
|
196 |
|
|
* Note that interrupts are enabled during save_switch_stack and
|
197 |
|
|
* load_switch_stack. This means that we may get an interrupt with
|
198 |
|
|
* "sp" pointing to the new kernel stack while ar.bspstore is still
|
199 |
|
|
* pointing to the old kernel backing store area. Since ar.rsc,
|
200 |
|
|
* ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts,
|
201 |
|
|
* this is not a problem. Also, we don't need to specify unwind
|
202 |
|
|
* information for preserved registers that are not modified in
|
203 |
|
|
* save_switch_stack as the right unwind information is already
|
204 |
|
|
* specified at the call-site of save_switch_stack.
|
205 |
|
|
*/
|
206 |
|
|
|
207 |
|
|
/*
|
208 |
|
|
* save_switch_stack:
|
209 |
|
|
* - r16 holds ar.pfs
|
210 |
|
|
* - b7 holds address to return to
|
211 |
|
|
* - rp (b0) holds return address to save
|
212 |
|
|
*/
|
213 |
|
|
GLOBAL_ENTRY(save_switch_stack)
|
214 |
|
|
.prologue
|
215 |
|
|
.altrp b7
|
216 |
|
|
flushrs // flush dirty regs to backing store (must be first in insn group)
|
217 |
|
|
.save @priunat,r17
|
218 |
|
|
mov r17=ar.unat // preserve caller's
|
219 |
|
|
.body
|
220 |
|
|
adds r3=80,sp
|
221 |
|
|
;;
|
222 |
|
|
lfetch.fault.excl.nt1 [r3],128
|
223 |
|
|
mov ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0
|
224 |
|
|
adds r2=16+128,sp
|
225 |
|
|
;;
|
226 |
|
|
lfetch.fault.excl.nt1 [r2],128
|
227 |
|
|
lfetch.fault.excl.nt1 [r3],128
|
228 |
|
|
adds r14=SW(R4)+16,sp
|
229 |
|
|
;;
|
230 |
|
|
lfetch.fault.excl [r2]
|
231 |
|
|
lfetch.fault.excl [r3]
|
232 |
|
|
adds r15=SW(R5)+16,sp
|
233 |
|
|
;;
|
234 |
|
|
mov r18=ar.fpsr // preserve fpsr
|
235 |
|
|
mov r19=ar.rnat
|
236 |
|
|
add r2=SW(F2)+16,sp // r2 = &sw->f2
|
237 |
|
|
.mem.offset 0,0; st8.spill [r14]=r4,16 // spill r4
|
238 |
|
|
.mem.offset 8,0; st8.spill [r15]=r5,16 // spill r5
|
239 |
|
|
add r3=SW(F3)+16,sp // r3 = &sw->f3
|
240 |
|
|
;;
|
241 |
|
|
stf.spill [r2]=f2,32
|
242 |
|
|
stf.spill [r3]=f3,32
|
243 |
|
|
mov r21=b0
|
244 |
|
|
.mem.offset 0,0; st8.spill [r14]=r6,16 // spill r6
|
245 |
|
|
.mem.offset 8,0; st8.spill [r15]=r7,16 // spill r7
|
246 |
|
|
mov r22=b1
|
247 |
|
|
;;
|
248 |
|
|
// since we're done with the spills, read and save ar.unat:
|
249 |
|
|
mov r29=ar.unat // M-unit
|
250 |
|
|
mov r20=ar.bspstore // M-unit
|
251 |
|
|
mov r23=b2
|
252 |
|
|
stf.spill [r2]=f4,32
|
253 |
|
|
stf.spill [r3]=f5,32
|
254 |
|
|
mov r24=b3
|
255 |
|
|
;;
|
256 |
|
|
st8 [r14]=r21,16 // save b0
|
257 |
|
|
st8 [r15]=r22,16 // save b1
|
258 |
|
|
mov r25=b4
|
259 |
|
|
mov r26=b5
|
260 |
|
|
;;
|
261 |
|
|
st8 [r14]=r23,16 // save b2
|
262 |
|
|
st8 [r15]=r24,16 // save b3
|
263 |
|
|
mov r21=ar.lc // I-unit
|
264 |
|
|
stf.spill [r2]=f12,32
|
265 |
|
|
stf.spill [r3]=f13,32
|
266 |
|
|
;;
|
267 |
|
|
st8 [r14]=r25,16 // save b4
|
268 |
|
|
st8 [r15]=r26,16 // save b5
|
269 |
|
|
stf.spill [r2]=f14,32
|
270 |
|
|
stf.spill [r3]=f15,32
|
271 |
|
|
;;
|
272 |
|
|
st8 [r14]=r16 // save ar.pfs
|
273 |
|
|
st8 [r15]=r21 // save ar.lc
|
274 |
|
|
stf.spill [r2]=f16,32
|
275 |
|
|
stf.spill [r3]=f17,32
|
276 |
|
|
;;
|
277 |
|
|
stf.spill [r2]=f18,32
|
278 |
|
|
stf.spill [r3]=f19,32
|
279 |
|
|
;;
|
280 |
|
|
stf.spill [r2]=f20,32
|
281 |
|
|
stf.spill [r3]=f21,32
|
282 |
|
|
;;
|
283 |
|
|
stf.spill [r2]=f22,32
|
284 |
|
|
stf.spill [r3]=f23,32
|
285 |
|
|
;;
|
286 |
|
|
stf.spill [r2]=f24,32
|
287 |
|
|
stf.spill [r3]=f25,32
|
288 |
|
|
add r14=SW(CALLER_UNAT)+16,sp
|
289 |
|
|
;;
|
290 |
|
|
stf.spill [r2]=f26,32
|
291 |
|
|
stf.spill [r3]=f27,32
|
292 |
|
|
add r15=SW(AR_FPSR)+16,sp
|
293 |
|
|
;;
|
294 |
|
|
stf.spill [r2]=f28,32
|
295 |
|
|
stf.spill [r3]=f29,32
|
296 |
|
|
st8 [r14]=r17 // save caller_unat
|
297 |
|
|
st8 [r15]=r18 // save fpsr
|
298 |
|
|
mov r21=pr
|
299 |
|
|
;;
|
300 |
|
|
stf.spill [r2]=f30,(SW(AR_UNAT)-SW(F30))
|
301 |
|
|
stf.spill [r3]=f31,(SW(AR_RNAT)-SW(F31))
|
302 |
|
|
;;
|
303 |
|
|
st8 [r2]=r29,16 // save ar.unat
|
304 |
|
|
st8 [r3]=r19,16 // save ar.rnat
|
305 |
|
|
;;
|
306 |
|
|
st8 [r2]=r20 // save ar.bspstore
|
307 |
|
|
st8 [r3]=r21 // save predicate registers
|
308 |
|
|
mov ar.rsc=3 // put RSE back into eager mode, pl 0
|
309 |
|
|
br.cond.sptk.many b7
|
310 |
|
|
END(save_switch_stack)
|
311 |
|
|
|
312 |
|
|
/*
|
313 |
|
|
* load_switch_stack:
|
314 |
|
|
* - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK)
|
315 |
|
|
* - b7 holds address to return to
|
316 |
|
|
* - must not touch r8-r11
|
317 |
|
|
*/
|
318 |
|
|
ENTRY(load_switch_stack)
|
319 |
|
|
.prologue
|
320 |
|
|
.altrp b7
|
321 |
|
|
|
322 |
|
|
.body
|
323 |
|
|
lfetch.fault.nt1 [sp]
|
324 |
|
|
adds r2=SW(AR_BSPSTORE)+16,sp
|
325 |
|
|
adds r3=SW(AR_UNAT)+16,sp
|
326 |
|
|
mov ar.rsc=0 // put RSE into enforced lazy mode
|
327 |
|
|
adds r14=SW(CALLER_UNAT)+16,sp
|
328 |
|
|
adds r15=SW(AR_FPSR)+16,sp
|
329 |
|
|
;;
|
330 |
|
|
ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE)) // bspstore
|
331 |
|
|
ld8 r29=[r3],(SW(B1)-SW(AR_UNAT)) // unat
|
332 |
|
|
;;
|
333 |
|
|
ld8 r21=[r2],16 // restore b0
|
334 |
|
|
ld8 r22=[r3],16 // restore b1
|
335 |
|
|
;;
|
336 |
|
|
ld8 r23=[r2],16 // restore b2
|
337 |
|
|
ld8 r24=[r3],16 // restore b3
|
338 |
|
|
;;
|
339 |
|
|
ld8 r25=[r2],16 // restore b4
|
340 |
|
|
ld8 r26=[r3],16 // restore b5
|
341 |
|
|
;;
|
342 |
|
|
ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs
|
343 |
|
|
ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc
|
344 |
|
|
;;
|
345 |
|
|
ld8 r28=[r2] // restore pr
|
346 |
|
|
ld8 r30=[r3] // restore rnat
|
347 |
|
|
;;
|
348 |
|
|
ld8 r18=[r14],16 // restore caller's unat
|
349 |
|
|
ld8 r19=[r15],24 // restore fpsr
|
350 |
|
|
;;
|
351 |
|
|
ldf.fill f2=[r14],32
|
352 |
|
|
ldf.fill f3=[r15],32
|
353 |
|
|
;;
|
354 |
|
|
ldf.fill f4=[r14],32
|
355 |
|
|
ldf.fill f5=[r15],32
|
356 |
|
|
;;
|
357 |
|
|
ldf.fill f12=[r14],32
|
358 |
|
|
ldf.fill f13=[r15],32
|
359 |
|
|
;;
|
360 |
|
|
ldf.fill f14=[r14],32
|
361 |
|
|
ldf.fill f15=[r15],32
|
362 |
|
|
;;
|
363 |
|
|
ldf.fill f16=[r14],32
|
364 |
|
|
ldf.fill f17=[r15],32
|
365 |
|
|
;;
|
366 |
|
|
ldf.fill f18=[r14],32
|
367 |
|
|
ldf.fill f19=[r15],32
|
368 |
|
|
mov b0=r21
|
369 |
|
|
;;
|
370 |
|
|
ldf.fill f20=[r14],32
|
371 |
|
|
ldf.fill f21=[r15],32
|
372 |
|
|
mov b1=r22
|
373 |
|
|
;;
|
374 |
|
|
ldf.fill f22=[r14],32
|
375 |
|
|
ldf.fill f23=[r15],32
|
376 |
|
|
mov b2=r23
|
377 |
|
|
;;
|
378 |
|
|
mov ar.bspstore=r27
|
379 |
|
|
mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7
|
380 |
|
|
mov b3=r24
|
381 |
|
|
;;
|
382 |
|
|
ldf.fill f24=[r14],32
|
383 |
|
|
ldf.fill f25=[r15],32
|
384 |
|
|
mov b4=r25
|
385 |
|
|
;;
|
386 |
|
|
ldf.fill f26=[r14],32
|
387 |
|
|
ldf.fill f27=[r15],32
|
388 |
|
|
mov b5=r26
|
389 |
|
|
;;
|
390 |
|
|
ldf.fill f28=[r14],32
|
391 |
|
|
ldf.fill f29=[r15],32
|
392 |
|
|
mov ar.pfs=r16
|
393 |
|
|
;;
|
394 |
|
|
ldf.fill f30=[r14],32
|
395 |
|
|
ldf.fill f31=[r15],24
|
396 |
|
|
mov ar.lc=r17
|
397 |
|
|
;;
|
398 |
|
|
ld8.fill r4=[r14],16
|
399 |
|
|
ld8.fill r5=[r15],16
|
400 |
|
|
mov pr=r28,-1
|
401 |
|
|
;;
|
402 |
|
|
ld8.fill r6=[r14],16
|
403 |
|
|
ld8.fill r7=[r15],16
|
404 |
|
|
|
405 |
|
|
mov ar.unat=r18 // restore caller's unat
|
406 |
|
|
mov ar.rnat=r30 // must restore after bspstore but before rsc!
|
407 |
|
|
mov ar.fpsr=r19 // restore fpsr
|
408 |
|
|
mov ar.rsc=3 // put RSE back into eager mode, pl 0
|
409 |
|
|
br.cond.sptk.many b7
|
410 |
|
|
END(load_switch_stack)
|
411 |
|
|
|
412 |
|
|
GLOBAL_ENTRY(__ia64_syscall)
|
413 |
|
|
.regstk 6,0,0,0
|
414 |
|
|
mov r15=in5 // put syscall number in place
|
415 |
|
|
break __BREAK_SYSCALL
|
416 |
|
|
movl r2=errno
|
417 |
|
|
cmp.eq p6,p7=-1,r10
|
418 |
|
|
;;
|
419 |
|
|
(p6) st4 [r2]=r8
|
420 |
|
|
(p6) mov r8=-1
|
421 |
|
|
br.ret.sptk.many rp
|
422 |
|
|
END(__ia64_syscall)
|
423 |
|
|
|
424 |
|
|
/*
|
425 |
|
|
* We invoke syscall_trace through this intermediate function to
|
426 |
|
|
* ensure that the syscall input arguments are not clobbered. We
|
427 |
|
|
* also use it to preserve b6, which contains the syscall entry point.
|
428 |
|
|
*/
|
429 |
|
|
GLOBAL_ENTRY(invoke_syscall_trace)
|
430 |
|
|
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
|
431 |
|
|
alloc loc1=ar.pfs,8,3,0,0
|
432 |
|
|
mov loc0=rp
|
433 |
|
|
.body
|
434 |
|
|
mov loc2=b6
|
435 |
|
|
;;
|
436 |
|
|
br.call.sptk.many rp=syscall_trace
|
437 |
|
|
.ret3: mov rp=loc0
|
438 |
|
|
mov ar.pfs=loc1
|
439 |
|
|
mov b6=loc2
|
440 |
|
|
br.ret.sptk.many rp
|
441 |
|
|
END(invoke_syscall_trace)
|
442 |
|
|
|
443 |
|
|
/*
|
444 |
|
|
* Invoke a system call, but do some tracing before and after the call.
|
445 |
|
|
* We MUST preserve the current register frame throughout this routine
|
446 |
|
|
* because some system calls (such as ia64_execve) directly
|
447 |
|
|
* manipulate ar.pfs.
|
448 |
|
|
*
|
449 |
|
|
* Input:
|
450 |
|
|
* r15 = syscall number
|
451 |
|
|
* b6 = syscall entry point
|
452 |
|
|
*/
|
453 |
|
|
.global ia64_strace_leave_kernel
|
454 |
|
|
|
455 |
|
|
GLOBAL_ENTRY(ia64_trace_syscall)
|
456 |
|
|
PT_REGS_UNWIND_INFO(0)
|
457 |
|
|
{ /*
|
458 |
|
|
* Some versions of gas generate bad unwind info if the first instruction of a
|
459 |
|
|
* procedure doesn't go into the first slot of a bundle. This is a workaround.
|
460 |
|
|
*/
|
461 |
|
|
nop.m 0
|
462 |
|
|
nop.i 0
|
463 |
|
|
br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch syscall args
|
464 |
|
|
}
|
465 |
|
|
.ret6: br.call.sptk.many rp=b6 // do the syscall
|
466 |
|
|
strace_check_retval:
|
467 |
|
|
cmp.lt p6,p0=r8,r0 // syscall failed?
|
468 |
|
|
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
|
469 |
|
|
adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
|
470 |
|
|
mov r10=0
|
471 |
|
|
(p6) br.cond.sptk strace_error // syscall failed ->
|
472 |
|
|
;; // avoid RAW on r10
|
473 |
|
|
strace_save_retval:
|
474 |
|
|
.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
|
475 |
|
|
.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
|
476 |
|
|
ia64_strace_leave_kernel:
|
477 |
|
|
br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch return value
|
478 |
|
|
.rety: br.cond.sptk ia64_leave_syscall
|
479 |
|
|
|
480 |
|
|
strace_error:
|
481 |
|
|
ld8 r3=[r2] // load pt_regs.r8
|
482 |
|
|
sub r9=0,r8 // negate return value to get errno value
|
483 |
|
|
;;
|
484 |
|
|
cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0?
|
485 |
|
|
adds r3=16,r2 // r3=&pt_regs.r10
|
486 |
|
|
;;
|
487 |
|
|
(p6) mov r10=-1
|
488 |
|
|
(p6) mov r8=r9
|
489 |
|
|
br.cond.sptk strace_save_retval
|
490 |
|
|
END(ia64_trace_syscall)
|
491 |
|
|
|
492 |
|
|
GLOBAL_ENTRY(ia64_ret_from_clone)
|
493 |
|
|
PT_REGS_UNWIND_INFO(0)
|
494 |
|
|
{ /*
|
495 |
|
|
* Some versions of gas generate bad unwind info if the first instruction of a
|
496 |
|
|
* procedure doesn't go into the first slot of a bundle. This is a workaround.
|
497 |
|
|
*/
|
498 |
|
|
nop.m 0
|
499 |
|
|
nop.i 0
|
500 |
|
|
/*
|
501 |
|
|
* We need to call schedule_tail() to complete the scheduling process.
|
502 |
|
|
* Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
|
503 |
|
|
* address of the previously executing task.
|
504 |
|
|
*/
|
505 |
|
|
br.call.sptk.many rp=ia64_invoke_schedule_tail
|
506 |
|
|
}
|
507 |
|
|
.ret8:
|
508 |
|
|
adds r2=IA64_TASK_PTRACE_OFFSET,r13
|
509 |
|
|
;;
|
510 |
|
|
ld8 r2=[r2]
|
511 |
|
|
;;
|
512 |
|
|
mov r8=0
|
513 |
|
|
tbit.nz p6,p0=r2,PT_TRACESYS_BIT
|
514 |
|
|
(p6) br.cond.spnt strace_check_retval
|
515 |
|
|
;; // added stop bits to prevent r8 dependency
|
516 |
|
|
END(ia64_ret_from_clone)
|
517 |
|
|
// fall through
|
518 |
|
|
GLOBAL_ENTRY(ia64_ret_from_syscall)
|
519 |
|
|
PT_REGS_UNWIND_INFO(0)
|
520 |
|
|
cmp.ge p6,p7=r8,r0 // syscall executed successfully?
|
521 |
|
|
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
|
522 |
|
|
adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
|
523 |
|
|
;;
|
524 |
|
|
.mem.offset 0,0
|
525 |
|
|
(p6) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
|
526 |
|
|
.mem.offset 8,0
|
527 |
|
|
(p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
|
528 |
|
|
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
|
529 |
|
|
END(ia64_ret_from_syscall)
|
530 |
|
|
// fall through
|
531 |
|
|
/*
|
532 |
|
|
* ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
|
533 |
|
|
* need to switch to bank 0 and doesn't restore the scratch registers.
|
534 |
|
|
* To avoid leaking kernel bits, the scratch registers are set to
|
535 |
|
|
* the following known-to-be-safe values:
|
536 |
|
|
*
|
537 |
|
|
* r1: restored (global pointer)
|
538 |
|
|
* r2: cleared
|
539 |
|
|
* r3: cleared
|
540 |
|
|
* r8-r11: restored (syscall return value(s))
|
541 |
|
|
* r12: restored (user-level stack pointer)
|
542 |
|
|
* r13: restored (user-level thread pointer)
|
543 |
|
|
* r14: cleared
|
544 |
|
|
* r15: restored (syscall #)
|
545 |
|
|
* r16-r19: cleared
|
546 |
|
|
* r20: user-level ar.fpsr
|
547 |
|
|
* r21: user-level b0
|
548 |
|
|
* r22: cleared
|
549 |
|
|
* r23: user-level ar.bspstore
|
550 |
|
|
* r24: user-level ar.rnat
|
551 |
|
|
* r25: user-level ar.unat
|
552 |
|
|
* r26: user-level ar.pfs
|
553 |
|
|
* r27: user-level ar.rsc
|
554 |
|
|
* r28: user-level ip
|
555 |
|
|
* r29: user-level psr
|
556 |
|
|
* r30: user-level cfm
|
557 |
|
|
* r31: user-level pr
|
558 |
|
|
* f6-f11: cleared
|
559 |
|
|
* pr: restored (user-level pr)
|
560 |
|
|
* b0: restored (user-level rp)
|
561 |
|
|
* b6: cleared
|
562 |
|
|
* b7: cleared
|
563 |
|
|
* ar.unat: restored (user-level ar.unat)
|
564 |
|
|
* ar.pfs: restored (user-level ar.pfs)
|
565 |
|
|
* ar.rsc: restored (user-level ar.rsc)
|
566 |
|
|
* ar.rnat: restored (user-level ar.rnat)
|
567 |
|
|
* ar.bspstore: restored (user-level ar.bspstore)
|
568 |
|
|
* ar.fpsr: restored (user-level ar.fpsr)
|
569 |
|
|
* ar.ccv: cleared
|
570 |
|
|
* ar.csd: cleared
|
571 |
|
|
* ar.ssd: cleared
|
572 |
|
|
*/
|
573 |
|
|
GLOBAL_ENTRY(ia64_leave_syscall)
|
574 |
|
|
PT_REGS_UNWIND_INFO(0)
|
575 |
|
|
lfetch.fault [sp]
|
576 |
|
|
movl r14=.restart1
|
577 |
|
|
;;
|
578 |
|
|
mov.ret.sptk rp=r14,.restart1
|
579 |
|
|
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
|
580 |
|
|
.restart1:
|
581 |
|
|
// need_resched and signals atomic test
|
582 |
|
|
(pUser) rsm psr.i
|
583 |
|
|
adds r17=IA64_TASK_NEED_RESCHED_OFFSET,r13
|
584 |
|
|
adds r18=IA64_TASK_SIGPENDING_OFFSET,r13
|
585 |
|
|
#ifdef CONFIG_PERFMON
|
586 |
|
|
adds r19=IA64_TASK_PFM_OVFL_BLOCK_RESET_OFFSET,r13
|
587 |
|
|
#endif
|
588 |
|
|
;;
|
589 |
|
|
#ifdef CONFIG_PERFMON
|
590 |
|
|
(pUser) ld8 r19=[r19] // load current->thread.pfm_ovfl_block_reset
|
591 |
|
|
#endif
|
592 |
|
|
(pUser) ld8 r17=[r17] // load current->need_resched
|
593 |
|
|
(pUser) ld4 r18=[r18] // load current->sigpending
|
594 |
|
|
;;
|
595 |
|
|
#ifdef CONFIG_PERFMON
|
596 |
|
|
(pUser) cmp.ne.unc p9,p0=r19,r0 // current->thread.pfm_ovfl_block_reset != 0?
|
597 |
|
|
#endif
|
598 |
|
|
(pUser) cmp.ne.unc p7,p0=r17,r0 // current->need_resched != 0?
|
599 |
|
|
(pUser) cmp.ne.unc p8,p0=r18,r0 // current->sigpending != 0?
|
600 |
|
|
;;
|
601 |
|
|
#ifdef CONFIG_PERFMON
|
602 |
|
|
(p9) br.call.spnt.many b7=pfm_ovfl_block_reset
|
603 |
|
|
#endif
|
604 |
|
|
#if __GNUC__ < 3
|
605 |
|
|
(p7) br.call.spnt.many b7=invoke_schedule
|
606 |
|
|
#else
|
607 |
|
|
(p7) br.call.spnt.many b7=schedule
|
608 |
|
|
#endif
|
609 |
|
|
(p8) br.call.spnt.many rp=handle_signal_delivery // check & deliver pending signals (once)
|
610 |
|
|
|
611 |
|
|
mov ar.csd=r0
|
612 |
|
|
mov ar.ssd=r0
|
613 |
|
|
adds r16=PT(LOADRS)+16,r12
|
614 |
|
|
adds r17=PT(AR_BSPSTORE)+16, r12
|
615 |
|
|
mov f6=f0 // clear f6
|
616 |
|
|
;;
|
617 |
|
|
ld8 r19=[r16],PT(R8)-PT(LOADRS) // load ar.rsc value for "loadrs"
|
618 |
|
|
ld8 r23=[r17],PT(R9)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
|
619 |
|
|
mov r22=r0 // clear r22
|
620 |
|
|
;;
|
621 |
|
|
// start restoring the state saved on the kernel stack (struct pt_regs):
|
622 |
|
|
ld8.fill r8=[r16],16
|
623 |
|
|
ld8.fill r9=[r17],16
|
624 |
|
|
mov f7=f0 // clear f7
|
625 |
|
|
;;
|
626 |
|
|
ld8.fill r10=[r16],16
|
627 |
|
|
ld8.fill r11=[r17],16
|
628 |
|
|
mov f8=f0 // clear f8
|
629 |
|
|
;;
|
630 |
|
|
ld8 r29=[r16],16 // load cr.ipsr
|
631 |
|
|
ld8 r28=[r17],16 // load cr.iip
|
632 |
|
|
mov b7=r0 // clear b7
|
633 |
|
|
;;
|
634 |
|
|
ld8 r30=[r16],16 // load cr.ifs
|
635 |
|
|
ld8 r25=[r17],16 // load ar.unat
|
636 |
|
|
cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
|
637 |
|
|
;;
|
638 |
|
|
rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
|
639 |
|
|
invala // invalidate ALAT
|
640 |
|
|
mov f9=f0 // clear f9
|
641 |
|
|
;;
|
642 |
|
|
ld8 r26=[r16],16 // load ar.pfs
|
643 |
|
|
ld8 r27=[r17],PT(PR)-PT(AR_RSC)// load ar.rsc
|
644 |
|
|
mov f10=f0 // clear f10
|
645 |
|
|
;;
|
646 |
|
|
ld8 r24=[r16],PT(B0)-PT(AR_RNAT)// load ar.rnat (may be garbage)
|
647 |
|
|
ld8 r31=[r17],PT(R1)-PT(PR) // load predicates
|
648 |
|
|
mov f11=f0 // clear f11
|
649 |
|
|
;;
|
650 |
|
|
ld8 r21=[r16],PT(R12)-PT(B0)// load b0
|
651 |
|
|
ld8.fill r1=[r17],16 // load r1
|
652 |
|
|
mov r3=r0 // clear r3
|
653 |
|
|
;;
|
654 |
|
|
ld8.fill r12=[r16],16
|
655 |
|
|
ld8.fill r13=[r17],16
|
656 |
|
|
mov r2=r0 // clear r2
|
657 |
|
|
;;
|
658 |
|
|
ld8 r20=[r16] // ar.fpsr
|
659 |
|
|
ld8.fill r15=[r17] // load r15
|
660 |
|
|
adds r18=16,r16
|
661 |
|
|
;;
|
662 |
|
|
mov r16=ar.bsp // get existing backing store pointer
|
663 |
|
|
movl r17=PERCPU_ADDR+IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET
|
664 |
|
|
srlz.i // ensure interruption collection is off
|
665 |
|
|
mov ar.ccv=r0 // clear ar.ccv
|
666 |
|
|
mov b6=r0 // clear b6
|
667 |
|
|
;;
|
668 |
|
|
ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
|
669 |
|
|
mov r14=r0 // clear r14
|
670 |
|
|
(pKern) br.cond.dpnt skip_rbs_switch
|
671 |
|
|
/*
|
672 |
|
|
* Restore user backing store.
|
673 |
|
|
*
|
674 |
|
|
* NOTE: alloc, loadrs, and cover can't be predicated.
|
675 |
|
|
*/
|
676 |
|
|
cover // add current frame into dirty partition
|
677 |
|
|
shr.u r18=r19,16 // get byte size of existing "dirty" partition
|
678 |
|
|
;;
|
679 |
|
|
mov r19=ar.bsp // get new backing store pointer
|
680 |
|
|
sub r16=r16,r18 // krbs = old bsp - size of dirty partition
|
681 |
|
|
cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
|
682 |
|
|
;;
|
683 |
|
|
sub r19=r19,r16 // calculate total byte size of dirty partition
|
684 |
|
|
add r18=64,r18 // don't force in0-in7 into memory...
|
685 |
|
|
;;
|
686 |
|
|
shl r19=r19,16 // shift size of dirty partition into loadrs position
|
687 |
|
|
br.few dont_preserve_current_frame
|
688 |
|
|
;;
|
689 |
|
|
END(ia64_leave_syscall)
|
690 |
|
|
|
691 |
|
|
GLOBAL_ENTRY(ia64_ret_from_execve_syscall)
|
692 |
|
|
PT_REGS_UNWIND_INFO(0)
|
693 |
|
|
cmp.ge p6,p7=r8,r0 // syscall executed successfully?
|
694 |
|
|
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
|
695 |
|
|
adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
|
696 |
|
|
;;
|
697 |
|
|
.mem.offset 0,0
|
698 |
|
|
(p6) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
|
699 |
|
|
.mem.offset 8,0
|
700 |
|
|
(p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
|
701 |
|
|
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
|
702 |
|
|
END(ia64_ret_from_execve_syscall)
|
703 |
|
|
// fall through
|
704 |
|
|
GLOBAL_ENTRY(ia64_leave_kernel)
|
705 |
|
|
PT_REGS_UNWIND_INFO(0)
|
706 |
|
|
lfetch.fault [sp]
|
707 |
|
|
movl r14=.restart
|
708 |
|
|
;;
|
709 |
|
|
mov.ret.sptk rp=r14,.restart
|
710 |
|
|
cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
|
711 |
|
|
.restart:
|
712 |
|
|
// need_resched and signals atomic test
|
713 |
|
|
(pUser) rsm psr.i
|
714 |
|
|
adds r17=IA64_TASK_NEED_RESCHED_OFFSET,r13
|
715 |
|
|
adds r18=IA64_TASK_SIGPENDING_OFFSET,r13
|
716 |
|
|
#ifdef CONFIG_PERFMON
|
717 |
|
|
adds r19=IA64_TASK_PFM_OVFL_BLOCK_RESET_OFFSET,r13
|
718 |
|
|
#endif
|
719 |
|
|
;;
|
720 |
|
|
#ifdef CONFIG_PERFMON
|
721 |
|
|
(pUser) ld8 r19=[r19] // load current->thread.pfm_ovfl_block_reset
|
722 |
|
|
#endif
|
723 |
|
|
(pUser) ld8 r17=[r17] // load current->need_resched
|
724 |
|
|
(pUser) ld4 r18=[r18] // load current->sigpending
|
725 |
|
|
;;
|
726 |
|
|
#ifdef CONFIG_PERFMON
|
727 |
|
|
(pUser) cmp.ne.unc p9,p0=r19,r0 // current->thread.pfm_ovfl_block_reset != 0?
|
728 |
|
|
#endif
|
729 |
|
|
(pUser) cmp.ne.unc p7,p0=r17,r0 // current->need_resched != 0?
|
730 |
|
|
(pUser) cmp.ne.unc p8,p0=r18,r0 // current->sigpending != 0?
|
731 |
|
|
;;
|
732 |
|
|
#ifdef CONFIG_PERFMON
|
733 |
|
|
(p9) br.call.spnt.many b7=pfm_ovfl_block_reset
|
734 |
|
|
#endif
|
735 |
|
|
#if __GNUC__ < 3
|
736 |
|
|
(p7) br.call.spnt.many b7=invoke_schedule
|
737 |
|
|
#else
|
738 |
|
|
(p7) br.call.spnt.many b7=schedule
|
739 |
|
|
#endif
|
740 |
|
|
(p8) br.call.spnt.many rp=handle_signal_delivery // check & deliver pending signals (once)
|
741 |
|
|
|
742 |
|
|
adds r20=PT(CR_IPSR)+16,r12
|
743 |
|
|
adds r21=PT(PR)+16,r12
|
744 |
|
|
;;
|
745 |
|
|
lfetch.fault.excl [r20]
|
746 |
|
|
lfetch.fault.excl [r21]
|
747 |
|
|
adds r2=PT(B6)+16,r12
|
748 |
|
|
adds r3=PT(R16)+16,r12
|
749 |
|
|
mov r29=PT(R24)-PT(B6)
|
750 |
|
|
mov r30=PT(B7)-PT(R24)
|
751 |
|
|
;;
|
752 |
|
|
// start restoring the state saved on the kernel stack (struct pt_regs):
|
753 |
|
|
ld8 r28=[r2],r29 // b6
|
754 |
|
|
ld8.fill r16=[r3],128
|
755 |
|
|
mov r31=PT(AR_CSD)-PT(AR_CCV)
|
756 |
|
|
;;
|
757 |
|
|
ld8.fill r24=[r2],r30
|
758 |
|
|
ld8 r15=[r3],r31
|
759 |
|
|
;;
|
760 |
|
|
ld8 r29=[r2],16 // b7
|
761 |
|
|
ld8 r30=[r3],16 // ar.csd
|
762 |
|
|
;;
|
763 |
|
|
ld8 r31=[r2],16 // ar.ssd
|
764 |
|
|
ld8.fill r8=[r3],16
|
765 |
|
|
;;
|
766 |
|
|
ld8.fill r9=[r2],16
|
767 |
|
|
ld8.fill r10=[r3],PT(R17)-PT(R10)
|
768 |
|
|
;;
|
769 |
|
|
ld8.fill r11=[r2],PT(R18)-PT(R11)
|
770 |
|
|
ld8.fill r17=[r3],16
|
771 |
|
|
;;
|
772 |
|
|
ld8.fill r18=[r2],16
|
773 |
|
|
ld8.fill r19=[r3],16
|
774 |
|
|
;;
|
775 |
|
|
ld8.fill r20=[r2],16
|
776 |
|
|
ld8.fill r21=[r3],16
|
777 |
|
|
mov ar.csd=r30
|
778 |
|
|
mov ar.ssd=r31
|
779 |
|
|
;;
|
780 |
|
|
rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
|
781 |
|
|
invala // invalidate ALAT
|
782 |
|
|
;;
|
783 |
|
|
ld8.fill r22=[r2],24
|
784 |
|
|
ld8.fill r23=[r3],24
|
785 |
|
|
mov b6=r28
|
786 |
|
|
;;
|
787 |
|
|
ld8.fill r25=[r2],16
|
788 |
|
|
ld8.fill r26=[r3],16
|
789 |
|
|
mov b7=r29
|
790 |
|
|
;;
|
791 |
|
|
ld8.fill r27=[r2],16
|
792 |
|
|
ld8.fill r28=[r3],16
|
793 |
|
|
;;
|
794 |
|
|
ld8.fill r29=[r2],16
|
795 |
|
|
ld8.fill r30=[r3],24
|
796 |
|
|
;;
|
797 |
|
|
ld8.fill r31=[r2],32
|
798 |
|
|
ldf.fill f6=[r3],32
|
799 |
|
|
;;
|
800 |
|
|
ldf.fill f7=[r2],32
|
801 |
|
|
ldf.fill f8=[r3],32
|
802 |
|
|
;;
|
803 |
|
|
srlz.i // ensure interruption collection is off
|
804 |
|
|
mov ar.ccv=r15
|
805 |
|
|
;;
|
806 |
|
|
ldf.fill f9=[r2],32
|
807 |
|
|
ldf.fill f10=[r3],32
|
808 |
|
|
bsw.0 // switch back to bank 0
|
809 |
|
|
;;
|
810 |
|
|
ldf.fill f11=[r2]
|
811 |
|
|
adds r16=PT(CR_IPSR)+16,r12
|
812 |
|
|
adds r17=PT(CR_IIP)+16,r12
|
813 |
|
|
;;
|
814 |
|
|
ld8 r29=[r16],16 // load cr.ipsr
|
815 |
|
|
ld8 r28=[r17],16 // load cr.iip
|
816 |
|
|
;;
|
817 |
|
|
ld8 r30=[r16],16 // load cr.ifs
|
818 |
|
|
ld8 r25=[r17],16 // load ar.unat
|
819 |
|
|
cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
|
820 |
|
|
;;
|
821 |
|
|
ld8 r26=[r16],16 // load ar.pfs
|
822 |
|
|
ld8 r27=[r17],16 // load ar.rsc
|
823 |
|
|
;;
|
824 |
|
|
ld8 r24=[r16],16 // load ar.rnat (may be garbage)
|
825 |
|
|
ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
|
826 |
|
|
;;
|
827 |
|
|
ld8 r31=[r16],16 // load predicates
|
828 |
|
|
ld8 r21=[r17],16 // load b0
|
829 |
|
|
;;
|
830 |
|
|
ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
|
831 |
|
|
ld8.fill r1=[r17],16 // load r1
|
832 |
|
|
;;
|
833 |
|
|
ld8.fill r12=[r16],16
|
834 |
|
|
ld8.fill r13=[r17],16
|
835 |
|
|
;;
|
836 |
|
|
ld8 r20=[r16],16
|
837 |
|
|
ld8.fill r15=[r17],16
|
838 |
|
|
;;
|
839 |
|
|
ld8.fill r14=[r16]
|
840 |
|
|
ld8.fill r2=[r17],16
|
841 |
|
|
adds r18=16,r16
|
842 |
|
|
;;
|
843 |
|
|
mov r16=ar.bsp // get existing backing store pointer
|
844 |
|
|
movl r17=PERCPU_ADDR+IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET
|
845 |
|
|
;;
|
846 |
|
|
ld8.fill r3=[r18]
|
847 |
|
|
ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
|
848 |
|
|
shr.u r18=r19,16 // get byte size of existing "dirty" partition
|
849 |
|
|
(pKern) br.cond.dpnt skip_rbs_switch
|
850 |
|
|
/*
|
851 |
|
|
* Restore user backing store.
|
852 |
|
|
*
|
853 |
|
|
* NOTE: alloc, loadrs, and cover can't be predicated.
|
854 |
|
|
*/
|
855 |
|
|
(pNonSys) br.cond.dpnt dont_preserve_current_frame
|
856 |
|
|
cover // add current frame into dirty partition and set cr.ifs
|
857 |
|
|
;;
|
858 |
|
|
mov r19=ar.bsp // get new backing store pointer
|
859 |
|
|
sub r16=r16,r18 // krbs = old bsp - size of dirty partition
|
860 |
|
|
cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
|
861 |
|
|
;;
|
862 |
|
|
sub r19=r19,r16 // calculate total byte size of dirty partition
|
863 |
|
|
add r18=64,r18 // don't force in0-in7 into memory...
|
864 |
|
|
;;
|
865 |
|
|
shl r19=r19,16 // shift size of dirty partition into loadrs position
|
866 |
|
|
;;
|
867 |
|
|
dont_preserve_current_frame:
|
868 |
|
|
/*
|
869 |
|
|
* To prevent leaking bits between the kernel and user-space,
|
870 |
|
|
* we must clear the stacked registers in the "invalid" partition here.
|
871 |
|
|
* Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
|
872 |
|
|
* 5 registers/cycle on McKinley).
|
873 |
|
|
*/
|
874 |
|
|
# define pRecurse p6
|
875 |
|
|
# define pReturn p7
|
876 |
|
|
#ifdef CONFIG_ITANIUM
|
877 |
|
|
# define Nregs 10
|
878 |
|
|
#else
|
879 |
|
|
# define Nregs 14
|
880 |
|
|
#endif
|
881 |
|
|
alloc loc0=ar.pfs,2,Nregs-2,2,0
|
882 |
|
|
shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
|
883 |
|
|
sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
|
884 |
|
|
;;
|
885 |
|
|
mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
|
886 |
|
|
shladd in0=loc1,3,r17
|
887 |
|
|
mov in1=0
|
888 |
|
|
;;
|
889 |
|
|
rse_clear_invalid:
|
890 |
|
|
#ifdef CONFIG_ITANIUM
|
891 |
|
|
// cycle 0
|
892 |
|
|
{ .mii
|
893 |
|
|
alloc loc0=ar.pfs,2,Nregs-2,2,0
|
894 |
|
|
cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
|
895 |
|
|
add out0=-Nregs*8,in0
|
896 |
|
|
}{ .mfb
|
897 |
|
|
add out1=1,in1 // increment recursion count
|
898 |
|
|
nop.f 0
|
899 |
|
|
nop.b 0 // can't do br.call here because of alloc (WAW on CFM)
|
900 |
|
|
;;
|
901 |
|
|
}{ .mfi // cycle 1
|
902 |
|
|
mov loc1=0
|
903 |
|
|
nop.f 0
|
904 |
|
|
mov loc2=0
|
905 |
|
|
}{ .mib
|
906 |
|
|
mov loc3=0
|
907 |
|
|
mov loc4=0
|
908 |
|
|
(pRecurse) br.call.sptk.many b0=rse_clear_invalid
|
909 |
|
|
|
910 |
|
|
}{ .mfi // cycle 2
|
911 |
|
|
mov loc5=0
|
912 |
|
|
nop.f 0
|
913 |
|
|
cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
|
914 |
|
|
}{ .mib
|
915 |
|
|
mov loc6=0
|
916 |
|
|
mov loc7=0
|
917 |
|
|
(pReturn) br.ret.sptk.many b0
|
918 |
|
|
}
|
919 |
|
|
#else /* !CONFIG_ITANIUM */
|
920 |
|
|
alloc loc0=ar.pfs,2,Nregs-2,2,0
|
921 |
|
|
cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
|
922 |
|
|
add out0=-Nregs*8,in0
|
923 |
|
|
add out1=1,in1 // increment recursion count
|
924 |
|
|
mov loc1=0
|
925 |
|
|
mov loc2=0
|
926 |
|
|
;;
|
927 |
|
|
mov loc3=0
|
928 |
|
|
mov loc4=0
|
929 |
|
|
mov loc5=0
|
930 |
|
|
mov loc6=0
|
931 |
|
|
mov loc7=0
|
932 |
|
|
(pRecurse) br.call.sptk.many b0=rse_clear_invalid
|
933 |
|
|
;;
|
934 |
|
|
mov loc8=0
|
935 |
|
|
mov loc9=0
|
936 |
|
|
cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
|
937 |
|
|
mov loc10=0
|
938 |
|
|
mov loc11=0
|
939 |
|
|
(pReturn) br.ret.sptk.many b0
|
940 |
|
|
#endif /* !CONFIG_ITANIUM */
|
941 |
|
|
# undef pRecurse
|
942 |
|
|
# undef pReturn
|
943 |
|
|
;;
|
944 |
|
|
alloc r17=ar.pfs,0,0,0,0 // drop current register frame
|
945 |
|
|
;;
|
946 |
|
|
loadrs
|
947 |
|
|
;;
|
948 |
|
|
skip_rbs_switch:
|
949 |
|
|
(pLvSys)mov r19=r0 // clear r19 for leave_syscall, no-op otherwise
|
950 |
|
|
mov b0=r21
|
951 |
|
|
mov ar.pfs=r26
|
952 |
|
|
(pUser) mov ar.bspstore=r23
|
953 |
|
|
(p9) mov cr.ifs=r30
|
954 |
|
|
(pLvSys)mov r16=r0 // clear r16 for leave_syscall, no-op otherwise
|
955 |
|
|
mov cr.ipsr=r29
|
956 |
|
|
mov ar.fpsr=r20
|
957 |
|
|
(pLvSys)mov r17=r0 // clear r17 for leave_syscall, no-op otherwise
|
958 |
|
|
mov cr.iip=r28
|
959 |
|
|
;;
|
960 |
|
|
(pUser) mov ar.rnat=r24 // must happen with RSE in lazy mode
|
961 |
|
|
(pLvSys)mov r18=r0 // clear r18 for leave_syscall, no-op otherwise
|
962 |
|
|
mov ar.rsc=r27
|
963 |
|
|
mov ar.unat=r25
|
964 |
|
|
mov pr=r31,-1
|
965 |
|
|
rfi
|
966 |
|
|
END(ia64_leave_kernel)
|
967 |
|
|
|
968 |
|
|
ENTRY(handle_syscall_error)
|
969 |
|
|
/*
|
970 |
|
|
* Some system calls (e.g., ptrace, mmap) can return arbitrary
|
971 |
|
|
* values which could lead us to mistake a negative return
|
972 |
|
|
* value as a failed syscall. Those syscall must deposit
|
973 |
|
|
* a non-zero value in pt_regs.r8 to indicate an error.
|
974 |
|
|
* If pt_regs.r8 is zero, we assume that the call completed
|
975 |
|
|
* successfully.
|
976 |
|
|
*/
|
977 |
|
|
PT_REGS_UNWIND_INFO(0)
|
978 |
|
|
ld8 r3=[r2] // load pt_regs.r8
|
979 |
|
|
sub r9=0,r8 // negate return value to get errno
|
980 |
|
|
;;
|
981 |
|
|
mov r10=-1 // return -1 in pt_regs.r10 to indicate error
|
982 |
|
|
cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0?
|
983 |
|
|
adds r3=16,r2 // r3=&pt_regs.r10
|
984 |
|
|
;;
|
985 |
|
|
(p6) mov r9=r8
|
986 |
|
|
(p6) mov r10=0
|
987 |
|
|
;;
|
988 |
|
|
.mem.offset 0,0; st8.spill [r2]=r9 // store errno in pt_regs.r8 and set unat bit
|
989 |
|
|
.mem.offset 8,0; st8.spill [r3]=r10 // store error indication in pt_regs.r10 and set unat bit
|
990 |
|
|
br.cond.sptk ia64_leave_syscall
|
991 |
|
|
END(handle_syscall_error)
|
992 |
|
|
|
993 |
|
|
/*
|
994 |
|
|
* Invoke schedule_tail(task) while preserving in0-in7, which may be needed
|
995 |
|
|
* in case a system call gets restarted.
|
996 |
|
|
*/
|
997 |
|
|
GLOBAL_ENTRY(ia64_invoke_schedule_tail)
|
998 |
|
|
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
|
999 |
|
|
alloc loc1=ar.pfs,8,2,1,0
|
1000 |
|
|
mov loc0=rp
|
1001 |
|
|
mov out0=r8 // Address of previous task
|
1002 |
|
|
;;
|
1003 |
|
|
br.call.sptk.many rp=schedule_tail
|
1004 |
|
|
.ret11: mov ar.pfs=loc1
|
1005 |
|
|
mov rp=loc0
|
1006 |
|
|
br.ret.sptk.many rp
|
1007 |
|
|
END(ia64_invoke_schedule_tail)
|
1008 |
|
|
|
1009 |
|
|
#if __GNUC__ < 3
|
1010 |
|
|
|
1011 |
|
|
/*
|
1012 |
|
|
* Invoke schedule() while preserving in0-in7, which may be needed
|
1013 |
|
|
* in case a system call gets restarted. Note that declaring schedule()
|
1014 |
|
|
* with asmlinkage() is NOT enough because that will only preserve as many
|
1015 |
|
|
* registers as there are formal arguments.
|
1016 |
|
|
*
|
1017 |
|
|
* XXX fix me: with gcc 3.0, we won't need this anymore because syscall_linkage
|
1018 |
|
|
* renders all eight input registers (in0-in7) as "untouchable".
|
1019 |
|
|
*/
|
1020 |
|
|
ENTRY(invoke_schedule)
|
1021 |
|
|
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
|
1022 |
|
|
alloc loc1=ar.pfs,8,2,0,0
|
1023 |
|
|
mov loc0=rp
|
1024 |
|
|
;;
|
1025 |
|
|
.body
|
1026 |
|
|
br.call.sptk.many rp=schedule
|
1027 |
|
|
.ret14: mov ar.pfs=loc1
|
1028 |
|
|
mov rp=loc0
|
1029 |
|
|
br.ret.sptk.many rp
|
1030 |
|
|
END(invoke_schedule)
|
1031 |
|
|
|
1032 |
|
|
#endif /* __GNUC__ < 3 */
|
1033 |
|
|
|
1034 |
|
|
/*
|
1035 |
|
|
* Setup stack and call ia64_do_signal. Note that pSys and pNonSys need to
|
1036 |
|
|
* be set up by the caller. We declare 8 input registers so the system call
|
1037 |
|
|
* args get preserved, in case we need to restart a system call.
|
1038 |
|
|
*/
|
1039 |
|
|
ENTRY(handle_signal_delivery)
|
1040 |
|
|
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
|
1041 |
|
|
alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
|
1042 |
|
|
mov r9=ar.unat
|
1043 |
|
|
mov loc0=rp // save return address
|
1044 |
|
|
mov out0=0 // there is no "oldset"
|
1045 |
|
|
adds out1=8,sp // out1=&sigscratch->ar_pfs
|
1046 |
|
|
(pSys) mov out2=1 // out2==1 => we're in a syscall
|
1047 |
|
|
;;
|
1048 |
|
|
(pNonSys) mov out2=0 // out2==0 => not a syscall
|
1049 |
|
|
.fframe 16
|
1050 |
|
|
.spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!)
|
1051 |
|
|
st8 [sp]=r9,-16 // allocate space for ar.unat and save it
|
1052 |
|
|
st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch
|
1053 |
|
|
.body
|
1054 |
|
|
br.call.sptk.many rp=ia64_do_signal
|
1055 |
|
|
.ret15: .restore sp
|
1056 |
|
|
adds sp=16,sp // pop scratch stack space
|
1057 |
|
|
;;
|
1058 |
|
|
ld8 r9=[sp] // load new unat from sw->caller_unat
|
1059 |
|
|
mov rp=loc0
|
1060 |
|
|
;;
|
1061 |
|
|
mov ar.unat=r9
|
1062 |
|
|
mov ar.pfs=loc1
|
1063 |
|
|
br.ret.sptk.many rp
|
1064 |
|
|
END(handle_signal_delivery)
|
1065 |
|
|
|
1066 |
|
|
GLOBAL_ENTRY(sys_rt_sigsuspend)
|
1067 |
|
|
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
|
1068 |
|
|
alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
|
1069 |
|
|
mov r9=ar.unat
|
1070 |
|
|
mov loc0=rp // save return address
|
1071 |
|
|
mov out0=in0 // mask
|
1072 |
|
|
mov out1=in1 // sigsetsize
|
1073 |
|
|
adds out2=8,sp // out2=&sigscratch->ar_pfs
|
1074 |
|
|
;;
|
1075 |
|
|
.fframe 16
|
1076 |
|
|
.spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!)
|
1077 |
|
|
st8 [sp]=r9,-16 // allocate space for ar.unat and save it
|
1078 |
|
|
st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch
|
1079 |
|
|
.body
|
1080 |
|
|
br.call.sptk.many rp=ia64_rt_sigsuspend
|
1081 |
|
|
.ret17: .restore sp
|
1082 |
|
|
adds sp=16,sp // pop scratch stack space
|
1083 |
|
|
;;
|
1084 |
|
|
ld8 r9=[sp] // load new unat from sw->caller_unat
|
1085 |
|
|
mov rp=loc0
|
1086 |
|
|
;;
|
1087 |
|
|
mov ar.unat=r9
|
1088 |
|
|
mov ar.pfs=loc1
|
1089 |
|
|
br.ret.sptk.many rp
|
1090 |
|
|
END(sys_rt_sigsuspend)
|
1091 |
|
|
|
1092 |
|
|
ENTRY(sys_rt_sigreturn)
|
1093 |
|
|
PT_REGS_UNWIND_INFO(0)
|
1094 |
|
|
alloc r2=ar.pfs,0,0,1,0
|
1095 |
|
|
.prologue
|
1096 |
|
|
PT_REGS_SAVES(16)
|
1097 |
|
|
adds sp=-16,sp
|
1098 |
|
|
.body
|
1099 |
|
|
cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal syscall...
|
1100 |
|
|
;;
|
1101 |
|
|
/* After signal handler, live registers f6-f11 are restored to the previous
|
1102 |
|
|
* executing context values for synchronized signals(from exceptions); or they
|
1103 |
|
|
* are cleared to 0 for asynchronized signals(from syscalls). These live registers
|
1104 |
|
|
* will be put into pt_regs to return back to user space.
|
1105 |
|
|
*/
|
1106 |
|
|
adds r16=PT(F6)+32,sp
|
1107 |
|
|
adds r17=PT(F7)+32,sp
|
1108 |
|
|
;;
|
1109 |
|
|
stf.spill [r16]=f6,32
|
1110 |
|
|
stf.spill [r17]=f7,32
|
1111 |
|
|
;;
|
1112 |
|
|
stf.spill [r16]=f8,32
|
1113 |
|
|
stf.spill [r17]=f9,32
|
1114 |
|
|
;;
|
1115 |
|
|
stf.spill [r16]=f10
|
1116 |
|
|
stf.spill [r17]=f11
|
1117 |
|
|
adds out0=16,sp // out0 = &sigscratch
|
1118 |
|
|
br.call.sptk.many rp=ia64_rt_sigreturn
|
1119 |
|
|
.ret19: .restore sp 0
|
1120 |
|
|
adds sp=16,sp
|
1121 |
|
|
;;
|
1122 |
|
|
ld8 r9=[sp] // load new ar.unat
|
1123 |
|
|
mov.sptk b7=r8,ia64_leave_kernel
|
1124 |
|
|
;;
|
1125 |
|
|
mov ar.unat=r9
|
1126 |
|
|
br.many b7
|
1127 |
|
|
END(sys_rt_sigreturn)
|
1128 |
|
|
|
1129 |
|
|
GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
|
1130 |
|
|
.prologue
|
1131 |
|
|
/*
|
1132 |
|
|
* r16 = fake ar.pfs, we simply need to make sure privilege is still 0
|
1133 |
|
|
*/
|
1134 |
|
|
mov r16=r0
|
1135 |
|
|
DO_SAVE_SWITCH_STACK
|
1136 |
|
|
br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt
|
1137 |
|
|
.ret21: .body
|
1138 |
|
|
DO_LOAD_SWITCH_STACK
|
1139 |
|
|
br.cond.sptk.many rp // goes to ia64_leave_kernel
|
1140 |
|
|
END(ia64_prepare_handle_unaligned)
|
1141 |
|
|
|
1142 |
|
|
//
|
1143 |
|
|
// unw_init_running(void (*callback)(info, arg), void *arg)
|
1144 |
|
|
//
|
1145 |
|
|
# define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15)
|
1146 |
|
|
|
1147 |
|
|
GLOBAL_ENTRY(unw_init_running)
|
1148 |
|
|
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
|
1149 |
|
|
alloc loc1=ar.pfs,2,3,3,0
|
1150 |
|
|
;;
|
1151 |
|
|
ld8 loc2=[in0],8
|
1152 |
|
|
mov loc0=rp
|
1153 |
|
|
mov r16=loc1
|
1154 |
|
|
DO_SAVE_SWITCH_STACK
|
1155 |
|
|
.body
|
1156 |
|
|
|
1157 |
|
|
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
|
1158 |
|
|
.fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
|
1159 |
|
|
SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
|
1160 |
|
|
adds sp=-EXTRA_FRAME_SIZE,sp
|
1161 |
|
|
.body
|
1162 |
|
|
;;
|
1163 |
|
|
adds out0=16,sp // &info
|
1164 |
|
|
mov out1=r13 // current
|
1165 |
|
|
adds out2=16+EXTRA_FRAME_SIZE,sp // &switch_stack
|
1166 |
|
|
br.call.sptk.many rp=unw_init_frame_info
|
1167 |
|
|
1: adds out0=16,sp // &info
|
1168 |
|
|
mov b6=loc2
|
1169 |
|
|
mov loc2=gp // save gp across indirect function call
|
1170 |
|
|
;;
|
1171 |
|
|
ld8 gp=[in0]
|
1172 |
|
|
mov out1=in1 // arg
|
1173 |
|
|
br.call.sptk.many rp=b6 // invoke the callback function
|
1174 |
|
|
1: mov gp=loc2 // restore gp
|
1175 |
|
|
|
1176 |
|
|
// For now, we don't allow changing registers from within
|
1177 |
|
|
// unw_init_running; if we ever want to allow that, we'd
|
1178 |
|
|
// have to do a load_switch_stack here:
|
1179 |
|
|
.restore sp
|
1180 |
|
|
adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
|
1181 |
|
|
|
1182 |
|
|
mov ar.pfs=loc1
|
1183 |
|
|
mov rp=loc0
|
1184 |
|
|
br.ret.sptk.many rp
|
1185 |
|
|
END(unw_init_running)
|
1186 |
|
|
|
1187 |
|
|
.rodata
|
1188 |
|
|
.align 8
|
1189 |
|
|
.globl sys_call_table
|
1190 |
|
|
sys_call_table:
|
1191 |
|
|
data8 sys_ni_syscall // This must be sys_ni_syscall! See ivt.S.
|
1192 |
|
|
data8 sys_exit // 1025
|
1193 |
|
|
data8 sys_read
|
1194 |
|
|
data8 sys_write
|
1195 |
|
|
data8 sys_open
|
1196 |
|
|
data8 sys_close
|
1197 |
|
|
data8 sys_creat // 1030
|
1198 |
|
|
data8 sys_link
|
1199 |
|
|
data8 sys_unlink
|
1200 |
|
|
data8 ia64_execve
|
1201 |
|
|
data8 sys_chdir
|
1202 |
|
|
data8 sys_fchdir // 1035
|
1203 |
|
|
data8 sys_utimes
|
1204 |
|
|
data8 sys_mknod
|
1205 |
|
|
data8 sys_chmod
|
1206 |
|
|
data8 sys_chown
|
1207 |
|
|
data8 sys_lseek // 1040
|
1208 |
|
|
data8 sys_getpid
|
1209 |
|
|
data8 sys_getppid
|
1210 |
|
|
data8 sys_mount
|
1211 |
|
|
data8 sys_umount
|
1212 |
|
|
data8 sys_setuid // 1045
|
1213 |
|
|
data8 sys_getuid
|
1214 |
|
|
data8 sys_geteuid
|
1215 |
|
|
data8 sys_ptrace
|
1216 |
|
|
data8 sys_access
|
1217 |
|
|
data8 sys_sync // 1050
|
1218 |
|
|
data8 sys_fsync
|
1219 |
|
|
data8 sys_fdatasync
|
1220 |
|
|
data8 sys_kill
|
1221 |
|
|
data8 sys_rename
|
1222 |
|
|
data8 sys_mkdir // 1055
|
1223 |
|
|
data8 sys_rmdir
|
1224 |
|
|
data8 sys_dup
|
1225 |
|
|
data8 sys_pipe
|
1226 |
|
|
data8 sys_times
|
1227 |
|
|
data8 ia64_brk // 1060
|
1228 |
|
|
data8 sys_setgid
|
1229 |
|
|
data8 sys_getgid
|
1230 |
|
|
data8 sys_getegid
|
1231 |
|
|
data8 sys_acct
|
1232 |
|
|
data8 sys_ioctl // 1065
|
1233 |
|
|
data8 sys_fcntl
|
1234 |
|
|
data8 sys_umask
|
1235 |
|
|
data8 sys_chroot
|
1236 |
|
|
data8 sys_ustat
|
1237 |
|
|
data8 sys_dup2 // 1070
|
1238 |
|
|
data8 sys_setreuid
|
1239 |
|
|
data8 sys_setregid
|
1240 |
|
|
data8 sys_getresuid
|
1241 |
|
|
data8 sys_setresuid
|
1242 |
|
|
data8 sys_getresgid // 1075
|
1243 |
|
|
data8 sys_setresgid
|
1244 |
|
|
data8 sys_getgroups
|
1245 |
|
|
data8 sys_setgroups
|
1246 |
|
|
data8 sys_getpgid
|
1247 |
|
|
data8 sys_setpgid // 1080
|
1248 |
|
|
data8 sys_setsid
|
1249 |
|
|
data8 sys_getsid
|
1250 |
|
|
data8 sys_sethostname
|
1251 |
|
|
data8 sys_setrlimit
|
1252 |
|
|
data8 sys_getrlimit // 1085
|
1253 |
|
|
data8 sys_getrusage
|
1254 |
|
|
data8 sys_gettimeofday
|
1255 |
|
|
data8 sys_settimeofday
|
1256 |
|
|
data8 sys_select
|
1257 |
|
|
data8 sys_poll // 1090
|
1258 |
|
|
data8 sys_symlink
|
1259 |
|
|
data8 sys_readlink
|
1260 |
|
|
data8 sys_uselib
|
1261 |
|
|
data8 sys_swapon
|
1262 |
|
|
data8 sys_swapoff // 1095
|
1263 |
|
|
data8 sys_reboot
|
1264 |
|
|
data8 sys_truncate
|
1265 |
|
|
data8 sys_ftruncate
|
1266 |
|
|
data8 sys_fchmod
|
1267 |
|
|
data8 sys_fchown // 1100
|
1268 |
|
|
data8 ia64_getpriority
|
1269 |
|
|
data8 sys_setpriority
|
1270 |
|
|
data8 sys_statfs
|
1271 |
|
|
data8 sys_fstatfs
|
1272 |
|
|
data8 sys_gettid // 1105
|
1273 |
|
|
data8 sys_semget
|
1274 |
|
|
data8 sys_semop
|
1275 |
|
|
data8 sys_semctl
|
1276 |
|
|
data8 sys_msgget
|
1277 |
|
|
data8 sys_msgsnd // 1110
|
1278 |
|
|
data8 sys_msgrcv
|
1279 |
|
|
data8 sys_msgctl
|
1280 |
|
|
data8 sys_shmget
|
1281 |
|
|
data8 ia64_shmat
|
1282 |
|
|
data8 sys_shmdt // 1115
|
1283 |
|
|
data8 sys_shmctl
|
1284 |
|
|
data8 sys_syslog
|
1285 |
|
|
data8 sys_setitimer
|
1286 |
|
|
data8 sys_getitimer
|
1287 |
|
|
data8 ia64_oldstat // 1120
|
1288 |
|
|
data8 ia64_oldlstat
|
1289 |
|
|
data8 ia64_oldfstat
|
1290 |
|
|
data8 sys_vhangup
|
1291 |
|
|
data8 sys_lchown
|
1292 |
|
|
data8 sys_vm86 // 1125
|
1293 |
|
|
data8 sys_wait4
|
1294 |
|
|
data8 sys_sysinfo
|
1295 |
|
|
data8 sys_clone
|
1296 |
|
|
data8 sys_setdomainname
|
1297 |
|
|
data8 sys_newuname // 1130
|
1298 |
|
|
data8 sys_adjtimex
|
1299 |
|
|
data8 ia64_create_module
|
1300 |
|
|
data8 sys_init_module
|
1301 |
|
|
data8 sys_delete_module
|
1302 |
|
|
data8 sys_get_kernel_syms // 1135
|
1303 |
|
|
data8 sys_query_module
|
1304 |
|
|
data8 sys_quotactl
|
1305 |
|
|
data8 sys_bdflush
|
1306 |
|
|
data8 sys_sysfs
|
1307 |
|
|
data8 sys_personality // 1140
|
1308 |
|
|
data8 ia64_ni_syscall // sys_afs_syscall
|
1309 |
|
|
data8 sys_setfsuid
|
1310 |
|
|
data8 sys_setfsgid
|
1311 |
|
|
data8 sys_getdents
|
1312 |
|
|
data8 sys_flock // 1145
|
1313 |
|
|
data8 sys_readv
|
1314 |
|
|
data8 sys_writev
|
1315 |
|
|
data8 sys_pread
|
1316 |
|
|
data8 sys_pwrite
|
1317 |
|
|
data8 sys_sysctl // 1150
|
1318 |
|
|
data8 sys_mmap
|
1319 |
|
|
data8 sys_munmap
|
1320 |
|
|
data8 sys_mlock
|
1321 |
|
|
data8 sys_mlockall
|
1322 |
|
|
data8 sys_mprotect // 1155
|
1323 |
|
|
data8 ia64_mremap
|
1324 |
|
|
data8 sys_msync
|
1325 |
|
|
data8 sys_munlock
|
1326 |
|
|
data8 sys_munlockall
|
1327 |
|
|
data8 sys_sched_getparam // 1160
|
1328 |
|
|
data8 sys_sched_setparam
|
1329 |
|
|
data8 sys_sched_getscheduler
|
1330 |
|
|
data8 sys_sched_setscheduler
|
1331 |
|
|
data8 sys_sched_yield
|
1332 |
|
|
data8 sys_sched_get_priority_max // 1165
|
1333 |
|
|
data8 sys_sched_get_priority_min
|
1334 |
|
|
data8 sys_sched_rr_get_interval
|
1335 |
|
|
data8 sys_nanosleep
|
1336 |
|
|
data8 sys_nfsservctl
|
1337 |
|
|
data8 sys_prctl // 1170
|
1338 |
|
|
data8 sys_getpagesize
|
1339 |
|
|
data8 sys_mmap2
|
1340 |
|
|
data8 sys_pciconfig_read
|
1341 |
|
|
data8 sys_pciconfig_write
|
1342 |
|
|
data8 sys_perfmonctl // 1175
|
1343 |
|
|
data8 sys_sigaltstack
|
1344 |
|
|
data8 sys_rt_sigaction
|
1345 |
|
|
data8 sys_rt_sigpending
|
1346 |
|
|
data8 sys_rt_sigprocmask
|
1347 |
|
|
data8 sys_rt_sigqueueinfo // 1180
|
1348 |
|
|
data8 sys_rt_sigreturn
|
1349 |
|
|
data8 sys_rt_sigsuspend
|
1350 |
|
|
data8 sys_rt_sigtimedwait
|
1351 |
|
|
data8 sys_getcwd
|
1352 |
|
|
data8 sys_capget // 1185
|
1353 |
|
|
data8 sys_capset
|
1354 |
|
|
data8 sys_sendfile
|
1355 |
|
|
data8 sys_ni_syscall // sys_getpmsg (STREAMS)
|
1356 |
|
|
data8 sys_ni_syscall // sys_putpmsg (STREAMS)
|
1357 |
|
|
data8 sys_socket // 1190
|
1358 |
|
|
data8 sys_bind
|
1359 |
|
|
data8 sys_connect
|
1360 |
|
|
data8 sys_listen
|
1361 |
|
|
data8 sys_accept
|
1362 |
|
|
data8 sys_getsockname // 1195
|
1363 |
|
|
data8 sys_getpeername
|
1364 |
|
|
data8 sys_socketpair
|
1365 |
|
|
data8 sys_send
|
1366 |
|
|
data8 sys_sendto
|
1367 |
|
|
data8 sys_recv // 1200
|
1368 |
|
|
data8 sys_recvfrom
|
1369 |
|
|
data8 sys_shutdown
|
1370 |
|
|
data8 sys_setsockopt
|
1371 |
|
|
data8 sys_getsockopt
|
1372 |
|
|
data8 sys_sendmsg // 1205
|
1373 |
|
|
data8 sys_recvmsg
|
1374 |
|
|
data8 sys_pivot_root
|
1375 |
|
|
data8 sys_mincore
|
1376 |
|
|
data8 sys_madvise
|
1377 |
|
|
data8 sys_newstat // 1210
|
1378 |
|
|
data8 sys_newlstat
|
1379 |
|
|
data8 sys_newfstat
|
1380 |
|
|
data8 sys_clone2
|
1381 |
|
|
data8 sys_getdents64
|
1382 |
|
|
data8 sys_getunwind // 1215
|
1383 |
|
|
data8 sys_readahead
|
1384 |
|
|
data8 sys_setxattr
|
1385 |
|
|
data8 sys_lsetxattr
|
1386 |
|
|
data8 sys_fsetxattr
|
1387 |
|
|
data8 sys_getxattr // 1220
|
1388 |
|
|
data8 sys_lgetxattr
|
1389 |
|
|
data8 sys_fgetxattr
|
1390 |
|
|
data8 sys_listxattr
|
1391 |
|
|
data8 sys_llistxattr
|
1392 |
|
|
data8 sys_flistxattr // 1225
|
1393 |
|
|
data8 sys_removexattr
|
1394 |
|
|
data8 sys_lremovexattr
|
1395 |
|
|
data8 sys_fremovexattr
|
1396 |
|
|
data8 sys_tkill
|
1397 |
|
|
data8 ia64_ni_syscall // 1230
|
1398 |
|
|
data8 ia64_ni_syscall
|
1399 |
|
|
data8 ia64_ni_syscall
|
1400 |
|
|
data8 ia64_ni_syscall
|
1401 |
|
|
data8 ia64_ni_syscall
|
1402 |
|
|
data8 ia64_ni_syscall // 1235
|
1403 |
|
|
data8 ia64_ni_syscall
|
1404 |
|
|
data8 ia64_ni_syscall
|
1405 |
|
|
data8 ia64_ni_syscall
|
1406 |
|
|
data8 ia64_ni_syscall
|
1407 |
|
|
data8 ia64_ni_syscall // 1240
|
1408 |
|
|
data8 ia64_ni_syscall
|
1409 |
|
|
data8 ia64_ni_syscall
|
1410 |
|
|
data8 ia64_ni_syscall
|
1411 |
|
|
data8 ia64_ni_syscall
|
1412 |
|
|
data8 ia64_ni_syscall // 1245
|
1413 |
|
|
data8 ia64_ni_syscall
|
1414 |
|
|
data8 ia64_ni_syscall
|
1415 |
|
|
data8 ia64_ni_syscall
|
1416 |
|
|
data8 ia64_ni_syscall
|
1417 |
|
|
data8 ia64_ni_syscall // 1250
|
1418 |
|
|
data8 ia64_ni_syscall
|
1419 |
|
|
data8 ia64_ni_syscall
|
1420 |
|
|
data8 ia64_ni_syscall
|
1421 |
|
|
data8 ia64_ni_syscall
|
1422 |
|
|
data8 ia64_ni_syscall // 1255
|
1423 |
|
|
data8 ia64_ni_syscall
|
1424 |
|
|
data8 ia64_ni_syscall
|
1425 |
|
|
data8 ia64_ni_syscall
|
1426 |
|
|
data8 ia64_ni_syscall
|
1427 |
|
|
data8 ia64_ni_syscall // 1260
|
1428 |
|
|
data8 ia64_ni_syscall
|
1429 |
|
|
data8 ia64_ni_syscall
|
1430 |
|
|
data8 ia64_ni_syscall
|
1431 |
|
|
data8 ia64_ni_syscall
|
1432 |
|
|
data8 ia64_ni_syscall // 1265
|
1433 |
|
|
data8 ia64_ni_syscall
|
1434 |
|
|
data8 ia64_ni_syscall
|
1435 |
|
|
data8 ia64_ni_syscall
|
1436 |
|
|
data8 ia64_ni_syscall
|
1437 |
|
|
data8 ia64_ni_syscall // 1270
|
1438 |
|
|
data8 ia64_ni_syscall
|
1439 |
|
|
data8 ia64_ni_syscall
|
1440 |
|
|
data8 ia64_ni_syscall
|
1441 |
|
|
data8 ia64_ni_syscall
|
1442 |
|
|
data8 ia64_ni_syscall // 1275
|
1443 |
|
|
data8 ia64_ni_syscall
|
1444 |
|
|
data8 ia64_ni_syscall
|
1445 |
|
|
data8 ia64_ni_syscall
|