OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [arch/] [s390/] [kernel/] [entry64.S] - Blame information for rev 63

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 63 marcus.erl
/*
2
 *  arch/s390/kernel/entry64.S
3
 *    S390 low-level entry points.
4
 *
5
 *    Copyright (C) IBM Corp. 1999,2006
6
 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7
 *               Hartmut Penner (hp@de.ibm.com),
8
 *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9
 *               Heiko Carstens 
10
 */
11
 
12
#include 
13
#include 
14
#include 
15
#include 
16
#include 
17
#include 
18
#include 
19
#include 
20
#include 
21
#include 
22
 
23
/*
24
 * Stack layout for the system_call stack entry.
25
 * The first few entries are identical to the user_regs_struct.
26
 */
27
SP_PTREGS    =  STACK_FRAME_OVERHEAD
28
SP_ARGS      =  STACK_FRAME_OVERHEAD + __PT_ARGS
29
SP_PSW       =  STACK_FRAME_OVERHEAD + __PT_PSW
30
SP_R0        =  STACK_FRAME_OVERHEAD + __PT_GPRS
31
SP_R1        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 8
32
SP_R2        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 16
33
SP_R3        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 24
34
SP_R4        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 32
35
SP_R5        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 40
36
SP_R6        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 48
37
SP_R7        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 56
38
SP_R8        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 64
39
SP_R9        =  STACK_FRAME_OVERHEAD + __PT_GPRS + 72
40
SP_R10       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 80
41
SP_R11       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 88
42
SP_R12       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 96
43
SP_R13       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 104
44
SP_R14       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 112
45
SP_R15       =  STACK_FRAME_OVERHEAD + __PT_GPRS + 120
46
SP_ORIG_R2   =  STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
47
SP_ILC       =  STACK_FRAME_OVERHEAD + __PT_ILC
48
SP_TRAP      =  STACK_FRAME_OVERHEAD + __PT_TRAP
49
SP_SIZE      =  STACK_FRAME_OVERHEAD + __PT_SIZE
50
 
51
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
52
STACK_SIZE  = 1 << STACK_SHIFT
53
 
54
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
55
                 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
56
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
57
                 _TIF_MCCK_PENDING)
58
 
59
#define BASED(name) name-system_call(%r13)
60
 
61
#ifdef CONFIG_TRACE_IRQFLAGS
62
        .macro  TRACE_IRQS_ON
63
         brasl  %r14,trace_hardirqs_on
64
        .endm
65
 
66
        .macro  TRACE_IRQS_OFF
67
         brasl  %r14,trace_hardirqs_off
68
        .endm
69
 
70
        .macro TRACE_IRQS_CHECK
71
        tm      SP_PSW(%r15),0x03       # irqs enabled?
72
        jz      0f
73
        brasl   %r14,trace_hardirqs_on
74
        j       1f
75
0:      brasl   %r14,trace_hardirqs_off
76
1:
77
        .endm
78
#else
79
#define TRACE_IRQS_ON
80
#define TRACE_IRQS_OFF
81
#define TRACE_IRQS_CHECK
82
#endif
83
 
84
#ifdef CONFIG_LOCKDEP
85
        .macro  LOCKDEP_SYS_EXIT
86
        tm      SP_PSW+1(%r15),0x01     # returning to user ?
87
        jz      0f
88
        brasl   %r14,lockdep_sys_exit
89
0:
90
        .endm
91
#else
92
#define LOCKDEP_SYS_EXIT
93
#endif
94
 
95
        .macro  STORE_TIMER lc_offset
96
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
97
        stpt    \lc_offset
98
#endif
99
        .endm
100
 
101
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
102
        .macro  UPDATE_VTIME lc_from,lc_to,lc_sum
103
        lg      %r10,\lc_from
104
        slg     %r10,\lc_to
105
        alg     %r10,\lc_sum
106
        stg     %r10,\lc_sum
107
        .endm
108
#endif
109
 
110
/*
111
 * Register usage in interrupt handlers:
112
 *    R9  - pointer to current task structure
113
 *    R13 - pointer to literal pool
114
 *    R14 - return register for function calls
115
 *    R15 - kernel stack pointer
116
 */
117
 
118
        .macro  SAVE_ALL_BASE savearea
119
        stmg    %r12,%r15,\savearea
120
        larl    %r13,system_call
121
        .endm
122
 
123
        .macro  SAVE_ALL_SVC psworg,savearea
124
        la      %r12,\psworg
125
        lg      %r15,__LC_KERNEL_STACK  # problem state -> load ksp
126
        .endm
127
 
128
        .macro  SAVE_ALL_SYNC psworg,savearea
129
        la      %r12,\psworg
130
        tm      \psworg+1,0x01          # test problem state bit
131
        jz      2f                      # skip stack setup save
132
        lg      %r15,__LC_KERNEL_STACK  # problem state -> load ksp
133
#ifdef CONFIG_CHECK_STACK
134
        j       3f
135
2:      tml     %r15,STACK_SIZE - CONFIG_STACK_GUARD
136
        jz      stack_overflow
137
3:
138
#endif
139
2:
140
        .endm
141
 
142
        .macro  SAVE_ALL_ASYNC psworg,savearea
143
        la      %r12,\psworg
144
        tm      \psworg+1,0x01          # test problem state bit
145
        jnz     1f                      # from user -> load kernel stack
146
        clc     \psworg+8(8),BASED(.Lcritical_end)
147
        jhe     0f
148
        clc     \psworg+8(8),BASED(.Lcritical_start)
149
        jl      0f
150
        brasl   %r14,cleanup_critical
151
        tm      1(%r12),0x01            # retest problem state after cleanup
152
        jnz     1f
153
0:      lg      %r14,__LC_ASYNC_STACK   # are we already on the async. stack ?
154
        slgr    %r14,%r15
155
        srag    %r14,%r14,STACK_SHIFT
156
        jz      2f
157
1:      lg      %r15,__LC_ASYNC_STACK   # load async stack
158
#ifdef CONFIG_CHECK_STACK
159
        j       3f
160
2:      tml     %r15,STACK_SIZE - CONFIG_STACK_GUARD
161
        jz      stack_overflow
162
3:
163
#endif
164
2:
165
        .endm
166
 
167
        .macro  CREATE_STACK_FRAME psworg,savearea
168
        aghi    %r15,-SP_SIZE           # make room for registers & psw
169
        mvc     SP_PSW(16,%r15),0(%r12) # move user PSW to stack
170
        la      %r12,\psworg
171
        stg     %r2,SP_ORIG_R2(%r15)    # store original content of gpr 2
172
        icm     %r12,12,__LC_SVC_ILC
173
        stmg    %r0,%r11,SP_R0(%r15)    # store gprs %r0-%r11 to kernel stack
174
        st      %r12,SP_ILC(%r15)
175
        mvc     SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
176
        la      %r12,0
177
        stg     %r12,__SF_BACKCHAIN(%r15)
178
        .endm
179
 
180
        .macro  RESTORE_ALL psworg,sync
181
        mvc     \psworg(16),SP_PSW(%r15) # move user PSW to lowcore
182
        .if !\sync
183
        ni      \psworg+1,0xfd          # clear wait state bit
184
        .endif
185
        lmg     %r0,%r15,SP_R0(%r15)    # load gprs 0-15 of user
186
        STORE_TIMER __LC_EXIT_TIMER
187
        lpswe   \psworg                 # back to caller
188
        .endm
189
 
190
/*
191
 * Scheduler resume function, called by switch_to
192
 *  gpr2 = (task_struct *) prev
193
 *  gpr3 = (task_struct *) next
194
 * Returns:
195
 *  gpr2 = prev
196
 */
197
        .globl  __switch_to
198
__switch_to:
199
        tm      __THREAD_per+4(%r3),0xe8 # is the new process using per ?
200
        jz      __switch_to_noper               # if not we're fine
201
        stctg   %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff
202
        clc     __THREAD_per(24,%r3),__SF_EMPTY(%r15)
203
        je      __switch_to_noper            # we got away without bashing TLB's
204
        lctlg   %c9,%c11,__THREAD_per(%r3)      # Nope we didn't
205
__switch_to_noper:
206
        lg      %r4,__THREAD_info(%r2)              # get thread_info of prev
207
        tm      __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
208
        jz      __switch_to_no_mcck
209
        ni      __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
210
        lg      %r4,__THREAD_info(%r3)              # get thread_info of next
211
        oi      __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next
212
__switch_to_no_mcck:
213
        stmg    %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
214
        stg     %r15,__THREAD_ksp(%r2)  # store kernel stack to prev->tss.ksp
215
        lg      %r15,__THREAD_ksp(%r3)  # load kernel stack from next->tss.ksp
216
        lmg     %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
217
        stg     %r3,__LC_CURRENT        # __LC_CURRENT = current task struct
218
        lctl    %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
219
        lg      %r3,__THREAD_info(%r3)  # load thread_info from task struct
220
        stg     %r3,__LC_THREAD_INFO
221
        aghi    %r3,STACK_SIZE
222
        stg     %r3,__LC_KERNEL_STACK   # __LC_KERNEL_STACK = new kernel stack
223
        br      %r14
224
 
225
__critical_start:
226
/*
227
 * SVC interrupt handler routine. System calls are synchronous events and
228
 * are executed with interrupts enabled.
229
 */
230
 
231
        .globl  system_call
232
system_call:
233
        STORE_TIMER __LC_SYNC_ENTER_TIMER
234
sysc_saveall:
235
        SAVE_ALL_BASE __LC_SAVE_AREA
236
        SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
237
        CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
238
        llgh    %r7,__LC_SVC_INT_CODE   # get svc number from lowcore
239
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
240
sysc_vtime:
241
        UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
242
sysc_stime:
243
        UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
244
sysc_update:
245
        mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
246
#endif
247
sysc_do_svc:
248
        lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
249
        slag    %r7,%r7,2       # *4 and test for svc 0
250
        jnz     sysc_nr_ok
251
        # svc 0: system call number in %r1
252
        cl      %r1,BASED(.Lnr_syscalls)
253
        jnl     sysc_nr_ok
254
        lgfr    %r7,%r1         # clear high word in r1
255
        slag    %r7,%r7,2       # svc 0: system call number in %r1
256
sysc_nr_ok:
257
        mvc     SP_ARGS(8,%r15),SP_R7(%r15)
258
sysc_do_restart:
259
        larl    %r10,sys_call_table
260
#ifdef CONFIG_COMPAT
261
        tm      __TI_flags+5(%r9),(_TIF_31BIT>>16)  # running in 31 bit mode ?
262
        jno     sysc_noemu
263
        larl    %r10,sys_call_table_emu  # use 31 bit emulation system calls
264
sysc_noemu:
265
#endif
266
        tm      __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
267
        lgf     %r8,0(%r7,%r10) # load address of system call routine
268
        jnz     sysc_tracesys
269
        basr    %r14,%r8        # call sys_xxxx
270
        stg     %r2,SP_R2(%r15) # store return value (change R2 on stack)
271
 
272
sysc_return:
273
        tm      SP_PSW+1(%r15),0x01     # returning to user ?
274
        jno     sysc_restore
275
        tm      __TI_flags+7(%r9),_TIF_WORK_SVC
276
        jnz     sysc_work       # there is work to do (signals etc.)
277
sysc_restore:
278
#ifdef CONFIG_TRACE_IRQFLAGS
279
        larl    %r1,sysc_restore_trace_psw
280
        lpswe   0(%r1)
281
sysc_restore_trace:
282
        TRACE_IRQS_CHECK
283
        LOCKDEP_SYS_EXIT
284
#endif
285
sysc_leave:
286
        RESTORE_ALL __LC_RETURN_PSW,1
287
sysc_done:
288
 
289
#ifdef CONFIG_TRACE_IRQFLAGS
290
        .align  8
291
        .globl sysc_restore_trace_psw
292
sysc_restore_trace_psw:
293
        .quad   0, sysc_restore_trace
294
#endif
295
 
296
#
297
# recheck if there is more work to do
298
#
299
sysc_work_loop:
300
        tm      __TI_flags+7(%r9),_TIF_WORK_SVC
301
        jz      sysc_restore      # there is no work to do
302
#
303
# One of the work bits is on. Find out which one.
304
#
305
sysc_work:
306
        tm      __TI_flags+7(%r9),_TIF_MCCK_PENDING
307
        jo      sysc_mcck_pending
308
        tm      __TI_flags+7(%r9),_TIF_NEED_RESCHED
309
        jo      sysc_reschedule
310
        tm      __TI_flags+7(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
311
        jnz     sysc_sigpending
312
        tm      __TI_flags+7(%r9),_TIF_RESTART_SVC
313
        jo      sysc_restart
314
        tm      __TI_flags+7(%r9),_TIF_SINGLE_STEP
315
        jo      sysc_singlestep
316
        j       sysc_restore
317
sysc_work_done:
318
 
319
#
320
# _TIF_NEED_RESCHED is set, call schedule
321
#
322
sysc_reschedule:
323
        larl    %r14,sysc_work_loop
324
        jg      schedule        # return point is sysc_return
325
 
326
#
327
# _TIF_MCCK_PENDING is set, call handler
328
#
329
sysc_mcck_pending:
330
        larl    %r14,sysc_work_loop
331
        jg      s390_handle_mcck        # TIF bit will be cleared by handler
332
 
333
#
334
# _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
335
#
336
sysc_sigpending:
337
        ni      __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
338
        la      %r2,SP_PTREGS(%r15)     # load pt_regs
339
        brasl   %r14,do_signal          # call do_signal
340
        tm      __TI_flags+7(%r9),_TIF_RESTART_SVC
341
        jo      sysc_restart
342
        tm      __TI_flags+7(%r9),_TIF_SINGLE_STEP
343
        jo      sysc_singlestep
344
        j       sysc_work_loop
345
 
346
#
347
# _TIF_RESTART_SVC is set, set up registers and restart svc
348
#
349
sysc_restart:
350
        ni      __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
351
        lg      %r7,SP_R2(%r15)         # load new svc number
352
        slag    %r7,%r7,2               # *4
353
        mvc     SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
354
        lmg     %r2,%r6,SP_R2(%r15)     # load svc arguments
355
        j       sysc_do_restart         # restart svc
356
 
357
#
358
# _TIF_SINGLE_STEP is set, call do_single_step
359
#
360
sysc_singlestep:
361
        ni      __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
362
        lhi     %r0,__LC_PGM_OLD_PSW
363
        sth     %r0,SP_TRAP(%r15)       # set trap indication to pgm check
364
        la      %r2,SP_PTREGS(%r15)     # address of register-save area
365
        larl    %r14,sysc_return        # load adr. of system return
366
        jg      do_single_step          # branch to do_sigtrap
367
 
368
#
369
# call syscall_trace before and after system call
370
# special linkage: %r12 contains the return address for trace_svc
371
#
372
sysc_tracesys:
373
        la      %r2,SP_PTREGS(%r15)     # load pt_regs
374
        la      %r3,0
375
        srl     %r7,2
376
        stg     %r7,SP_R2(%r15)
377
        brasl   %r14,syscall_trace
378
        lghi    %r0,NR_syscalls
379
        clg     %r0,SP_R2(%r15)
380
        jnh     sysc_tracenogo
381
        lg      %r7,SP_R2(%r15)         # strace might have changed the
382
        sll     %r7,2                   # system call
383
        lgf     %r8,0(%r7,%r10)
384
sysc_tracego:
385
        lmg     %r3,%r6,SP_R3(%r15)
386
        lg      %r2,SP_ORIG_R2(%r15)
387
        basr    %r14,%r8                # call sys_xxx
388
        stg     %r2,SP_R2(%r15)         # store return value
389
sysc_tracenogo:
390
        tm      __TI_flags+7(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
391
        jz      sysc_return
392
        la      %r2,SP_PTREGS(%r15)     # load pt_regs
393
        la      %r3,1
394
        larl    %r14,sysc_return        # return point is sysc_return
395
        jg      syscall_trace
396
 
397
#
398
# a new process exits the kernel with ret_from_fork
399
#
400
        .globl  ret_from_fork
401
ret_from_fork:
402
        lg      %r13,__LC_SVC_NEW_PSW+8
403
        lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
404
        tm      SP_PSW+1(%r15),0x01     # forking a kernel thread ?
405
        jo      0f
406
        stg     %r15,SP_R15(%r15)       # store stack pointer for new kthread
407
0:      brasl   %r14,schedule_tail
408
        TRACE_IRQS_ON
409
        stosm   24(%r15),0x03           # reenable interrupts
410
        j       sysc_return
411
 
412
#
413
# kernel_execve function needs to deal with pt_regs that is not
414
# at the usual place
415
#
416
        .globl  kernel_execve
417
kernel_execve:
418
        stmg    %r12,%r15,96(%r15)
419
        lgr     %r14,%r15
420
        aghi    %r15,-SP_SIZE
421
        stg     %r14,__SF_BACKCHAIN(%r15)
422
        la      %r12,SP_PTREGS(%r15)
423
        xc      0(__PT_SIZE,%r12),0(%r12)
424
        lgr     %r5,%r12
425
        brasl   %r14,do_execve
426
        ltgfr   %r2,%r2
427
        je      0f
428
        aghi    %r15,SP_SIZE
429
        lmg     %r12,%r15,96(%r15)
430
        br      %r14
431
        # execve succeeded.
432
0:      stnsm   __SF_EMPTY(%r15),0xfc   # disable interrupts
433
        lg      %r15,__LC_KERNEL_STACK  # load ksp
434
        aghi    %r15,-SP_SIZE           # make room for registers & psw
435
        lg      %r13,__LC_SVC_NEW_PSW+8
436
        lg      %r9,__LC_THREAD_INFO
437
        mvc     SP_PTREGS(__PT_SIZE,%r15),0(%r12)       # copy pt_regs
438
        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
439
        stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
440
        brasl   %r14,execve_tail
441
        j       sysc_return
442
 
443
/*
444
 * Program check handler routine
445
 */
446
 
447
        .globl  pgm_check_handler
448
pgm_check_handler:
449
/*
450
 * First we need to check for a special case:
451
 * Single stepping an instruction that disables the PER event mask will
452
 * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
453
 * For a single stepped SVC the program check handler gets control after
454
 * the SVC new PSW has been loaded. But we want to execute the SVC first and
455
 * then handle the PER event. Therefore we update the SVC old PSW to point
456
 * to the pgm_check_handler and branch to the SVC handler after we checked
457
 * if we have to load the kernel stack register.
458
 * For every other possible cause for PER event without the PER mask set
459
 * we just ignore the PER event (FIXME: is there anything we have to do
460
 * for LPSW?).
461
 */
462
        STORE_TIMER __LC_SYNC_ENTER_TIMER
463
        SAVE_ALL_BASE __LC_SAVE_AREA
464
        tm      __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
465
        jnz     pgm_per                  # got per exception -> special case
466
        SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
467
        CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
468
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
469
        tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
470
        jz      pgm_no_vtime
471
        UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
472
        UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
473
        mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
474
pgm_no_vtime:
475
#endif
476
        lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
477
        TRACE_IRQS_OFF
478
        lgf     %r3,__LC_PGM_ILC        # load program interruption code
479
        lghi    %r8,0x7f
480
        ngr     %r8,%r3
481
pgm_do_call:
482
        sll     %r8,3
483
        larl    %r1,pgm_check_table
484
        lg      %r1,0(%r8,%r1)          # load address of handler routine
485
        la      %r2,SP_PTREGS(%r15)     # address of register-save area
486
        larl    %r14,sysc_return
487
        br      %r1                     # branch to interrupt-handler
488
 
489
#
490
# handle per exception
491
#
492
pgm_per:
493
        tm      __LC_PGM_OLD_PSW,0x40   # test if per event recording is on
494
        jnz     pgm_per_std             # ok, normal per event from user space
495
# ok its one of the special cases, now we need to find out which one
496
        clc     __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
497
        je      pgm_svcper
498
# no interesting special case, ignore PER event
499
        lmg     %r12,%r15,__LC_SAVE_AREA
500
        lpswe   __LC_PGM_OLD_PSW
501
 
502
#
503
# Normal per exception
504
#
505
pgm_per_std:
506
        SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
507
        CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
508
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
509
        tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
510
        jz      pgm_no_vtime2
511
        UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
512
        UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
513
        mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
514
pgm_no_vtime2:
515
#endif
516
        lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
517
        TRACE_IRQS_OFF
518
        lg      %r1,__TI_task(%r9)
519
        tm      SP_PSW+1(%r15),0x01     # kernel per event ?
520
        jz      kernel_per
521
        mvc     __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
522
        mvc     __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
523
        mvc     __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
524
        oi      __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
525
        lgf     %r3,__LC_PGM_ILC        # load program interruption code
526
        lghi    %r8,0x7f
527
        ngr     %r8,%r3                 # clear per-event-bit and ilc
528
        je      sysc_return
529
        j       pgm_do_call
530
 
531
#
532
# it was a single stepped SVC that is causing all the trouble
533
#
534
pgm_svcper:
535
        SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
536
        CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
537
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
538
        UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
539
        UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
540
        mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
541
#endif
542
        llgh    %r7,__LC_SVC_INT_CODE   # get svc number from lowcore
543
        lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
544
        lg      %r1,__TI_task(%r9)
545
        mvc     __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
546
        mvc     __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
547
        mvc     __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
548
        oi      __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
549
        TRACE_IRQS_ON
550
        stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
551
        j       sysc_do_svc
552
 
553
#
554
# per was called from kernel, must be kprobes
555
#
556
kernel_per:
557
        lhi     %r0,__LC_PGM_OLD_PSW
558
        sth     %r0,SP_TRAP(%r15)       # set trap indication to pgm check
559
        la      %r2,SP_PTREGS(%r15)     # address of register-save area
560
        larl    %r14,sysc_restore       # load adr. of system ret, no work
561
        jg      do_single_step          # branch to do_single_step
562
 
563
/*
564
 * IO interrupt handler routine
565
 */
566
        .globl io_int_handler
567
io_int_handler:
568
        STORE_TIMER __LC_ASYNC_ENTER_TIMER
569
        stck    __LC_INT_CLOCK
570
        SAVE_ALL_BASE __LC_SAVE_AREA+32
571
        SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
572
        CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
573
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
574
        tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
575
        jz      io_no_vtime
576
        UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
577
        UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
578
        mvc     __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
579
io_no_vtime:
580
#endif
581
        lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
582
        TRACE_IRQS_OFF
583
        la      %r2,SP_PTREGS(%r15)     # address of register-save area
584
        brasl   %r14,do_IRQ             # call standard irq handler
585
io_return:
586
        tm      SP_PSW+1(%r15),0x01     # returning to user ?
587
#ifdef CONFIG_PREEMPT
588
        jno     io_preempt              # no -> check for preemptive scheduling
589
#else
590
        jno     io_restore              # no-> skip resched & signal
591
#endif
592
        tm      __TI_flags+7(%r9),_TIF_WORK_INT
593
        jnz     io_work                 # there is work to do (signals etc.)
594
io_restore:
595
#ifdef CONFIG_TRACE_IRQFLAGS
596
        larl    %r1,io_restore_trace_psw
597
        lpswe   0(%r1)
598
io_restore_trace:
599
        TRACE_IRQS_CHECK
600
        LOCKDEP_SYS_EXIT
601
#endif
602
io_leave:
603
        RESTORE_ALL __LC_RETURN_PSW,0
604
io_done:
605
 
606
#ifdef CONFIG_TRACE_IRQFLAGS
607
        .align  8
608
        .globl io_restore_trace_psw
609
io_restore_trace_psw:
610
        .quad   0, io_restore_trace
611
#endif
612
 
613
#ifdef CONFIG_PREEMPT
614
io_preempt:
615
        icm     %r0,15,__TI_precount(%r9)
616
        jnz     io_restore
617
        # switch to kernel stack
618
        lg      %r1,SP_R15(%r15)
619
        aghi    %r1,-SP_SIZE
620
        mvc     SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
621
        xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
622
        lgr     %r15,%r1
623
io_resume_loop:
624
        tm      __TI_flags+7(%r9),_TIF_NEED_RESCHED
625
        jno     io_restore
626
        larl    %r14,io_resume_loop
627
        jg      preempt_schedule_irq
628
#endif
629
 
630
#
631
# switch to kernel stack, then check TIF bits
632
#
633
io_work:
634
        lg      %r1,__LC_KERNEL_STACK
635
        aghi    %r1,-SP_SIZE
636
        mvc     SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
637
        xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
638
        lgr     %r15,%r1
639
#
640
# One of the work bits is on. Find out which one.
641
# Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGPENDING, _TIF_NEED_RESCHED
642
#              and _TIF_MCCK_PENDING
643
#
644
io_work_loop:
645
        tm      __TI_flags+7(%r9),_TIF_MCCK_PENDING
646
        jo      io_mcck_pending
647
        tm      __TI_flags+7(%r9),_TIF_NEED_RESCHED
648
        jo      io_reschedule
649
        tm      __TI_flags+7(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
650
        jnz     io_sigpending
651
        j       io_restore
652
io_work_done:
653
 
654
#
655
# _TIF_MCCK_PENDING is set, call handler
656
#
657
io_mcck_pending:
658
        brasl   %r14,s390_handle_mcck   # TIF bit will be cleared by handler
659
        j       io_work_loop
660
 
661
#
662
# _TIF_NEED_RESCHED is set, call schedule
663
#
664
io_reschedule:
665
        TRACE_IRQS_ON
666
        stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
667
        brasl   %r14,schedule           # call scheduler
668
        stnsm   __SF_EMPTY(%r15),0xfc   # disable I/O and ext. interrupts
669
        TRACE_IRQS_OFF
670
        tm      __TI_flags+7(%r9),_TIF_WORK_INT
671
        jz      io_restore              # there is no work to do
672
        j       io_work_loop
673
 
674
#
675
# _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
676
#
677
io_sigpending:
678
        TRACE_IRQS_ON
679
        stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
680
        la      %r2,SP_PTREGS(%r15)     # load pt_regs
681
        brasl   %r14,do_signal          # call do_signal
682
        stnsm   __SF_EMPTY(%r15),0xfc   # disable I/O and ext. interrupts
683
        TRACE_IRQS_OFF
684
        j       io_work_loop
685
 
686
/*
687
 * External interrupt handler routine
688
 */
689
        .globl  ext_int_handler
690
ext_int_handler:
691
        STORE_TIMER __LC_ASYNC_ENTER_TIMER
692
        stck    __LC_INT_CLOCK
693
        SAVE_ALL_BASE __LC_SAVE_AREA+32
694
        SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
695
        CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
696
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
697
        tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
698
        jz      ext_no_vtime
699
        UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
700
        UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
701
        mvc     __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
702
ext_no_vtime:
703
#endif
704
        lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
705
        TRACE_IRQS_OFF
706
        la      %r2,SP_PTREGS(%r15)     # address of register-save area
707
        llgh    %r3,__LC_EXT_INT_CODE   # get interruption code
708
        brasl   %r14,do_extint
709
        j       io_return
710
 
711
__critical_end:
712
 
713
/*
714
 * Machine check handler routines
715
 */
716
        .globl mcck_int_handler
717
mcck_int_handler:
718
        la      %r1,4095                # revalidate r1
719
        spt     __LC_CPU_TIMER_SAVE_AREA-4095(%r1)      # revalidate cpu timer
720
        lmg     %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
721
        SAVE_ALL_BASE __LC_SAVE_AREA+64
722
        la      %r12,__LC_MCK_OLD_PSW
723
        tm      __LC_MCCK_CODE,0x80     # system damage?
724
        jo      mcck_int_main           # yes -> rest of mcck code invalid
725
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
726
        la      %r14,4095
727
        mvc     __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER
728
        mvc     __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
729
        tm      __LC_MCCK_CODE+5,0x02   # stored cpu timer value valid?
730
        jo      1f
731
        la      %r14,__LC_SYNC_ENTER_TIMER
732
        clc     0(8,%r14),__LC_ASYNC_ENTER_TIMER
733
        jl      0f
734
        la      %r14,__LC_ASYNC_ENTER_TIMER
735
0:      clc     0(8,%r14),__LC_EXIT_TIMER
736
        jl      0f
737
        la      %r14,__LC_EXIT_TIMER
738
0:      clc     0(8,%r14),__LC_LAST_UPDATE_TIMER
739
        jl      0f
740
        la      %r14,__LC_LAST_UPDATE_TIMER
741
0:      spt     0(%r14)
742
        mvc     __LC_ASYNC_ENTER_TIMER(8),0(%r14)
743
1:
744
#endif
745
        tm      __LC_MCCK_CODE+2,0x09   # mwp + ia of old psw valid?
746
        jno     mcck_int_main           # no -> skip cleanup critical
747
        tm      __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
748
        jnz     mcck_int_main           # from user -> load kernel stack
749
        clc     __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end)
750
        jhe     mcck_int_main
751
        clc     __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start)
752
        jl      mcck_int_main
753
        brasl   %r14,cleanup_critical
754
mcck_int_main:
755
        lg      %r14,__LC_PANIC_STACK   # are we already on the panic stack?
756
        slgr    %r14,%r15
757
        srag    %r14,%r14,PAGE_SHIFT
758
        jz      0f
759
        lg      %r15,__LC_PANIC_STACK   # load panic stack
760
0:      CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64
761
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
762
        tm      __LC_MCCK_CODE+2,0x08   # mwp of old psw valid?
763
        jno     mcck_no_vtime           # no -> no timer update
764
        tm      SP_PSW+1(%r15),0x01     # interrupting from user ?
765
        jz      mcck_no_vtime
766
        UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
767
        UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
768
        mvc     __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
769
mcck_no_vtime:
770
#endif
771
        lg      %r9,__LC_THREAD_INFO    # load pointer to thread_info struct
772
        la      %r2,SP_PTREGS(%r15)     # load pt_regs
773
        brasl   %r14,s390_do_machine_check
774
        tm      SP_PSW+1(%r15),0x01     # returning to user ?
775
        jno     mcck_return
776
        lg      %r1,__LC_KERNEL_STACK   # switch to kernel stack
777
        aghi    %r1,-SP_SIZE
778
        mvc     SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
779
        xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
780
        lgr     %r15,%r1
781
        stosm   __SF_EMPTY(%r15),0x04   # turn dat on
782
        tm      __TI_flags+7(%r9),_TIF_MCCK_PENDING
783
        jno     mcck_return
784
        TRACE_IRQS_OFF
785
        brasl   %r14,s390_handle_mcck
786
        TRACE_IRQS_ON
787
mcck_return:
788
        mvc     __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
789
        ni      __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
790
        lmg     %r0,%r15,SP_R0(%r15)    # load gprs 0-15
791
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
792
        mvc     __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104
793
        tm      __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
794
        jno     0f
795
        stpt    __LC_EXIT_TIMER
796
0:
797
#endif
798
        lpswe   __LC_RETURN_MCCK_PSW    # back to caller
799
 
800
/*
801
 * Restart interruption handler, kick starter for additional CPUs
802
 */
803
#ifdef CONFIG_SMP
804
#ifndef CONFIG_HOTPLUG_CPU
805
        .section .init.text,"ax"
806
#endif
807
        .globl restart_int_handler
808
restart_int_handler:
809
        lg      %r15,__LC_SAVE_AREA+120 # load ksp
810
        lghi    %r10,__LC_CREGS_SAVE_AREA
811
        lctlg   %c0,%c15,0(%r10) # get new ctl regs
812
        lghi    %r10,__LC_AREGS_SAVE_AREA
813
        lam     %a0,%a15,0(%r10)
814
        lmg     %r6,%r15,__SF_GPRS(%r15) # load registers from clone
815
        stosm   __SF_EMPTY(%r15),0x04   # now we can turn dat on
816
        jg      start_secondary
817
#ifndef CONFIG_HOTPLUG_CPU
818
        .previous
819
#endif
820
#else
821
/*
822
 * If we do not run with SMP enabled, let the new CPU crash ...
823
 */
824
        .globl restart_int_handler
825
restart_int_handler:
826
        basr    %r1,0
827
restart_base:
828
        lpswe   restart_crash-restart_base(%r1)
829
        .align 8
830
restart_crash:
831
        .long  0x000a0000,0x00000000,0x00000000,0x00000000
832
restart_go:
833
#endif
834
 
835
#ifdef CONFIG_CHECK_STACK
836
/*
837
 * The synchronous or the asynchronous stack overflowed. We are dead.
838
 * No need to properly save the registers, we are going to panic anyway.
839
 * Setup a pt_regs so that show_trace can provide a good call trace.
840
 */
841
stack_overflow:
842
        lg      %r15,__LC_PANIC_STACK   # change to panic stack
843
        aghi    %r15,-SP_SIZE
844
        mvc     SP_PSW(16,%r15),0(%r12) # move user PSW to stack
845
        stmg    %r0,%r11,SP_R0(%r15)    # store gprs %r0-%r11 to kernel stack
846
        la      %r1,__LC_SAVE_AREA
847
        chi     %r12,__LC_SVC_OLD_PSW
848
        je      0f
849
        chi     %r12,__LC_PGM_OLD_PSW
850
        je      0f
851
        la      %r1,__LC_SAVE_AREA+32
852
0:      mvc     SP_R12(32,%r15),0(%r1)  # move %r12-%r15 to stack
853
        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
854
        la      %r2,SP_PTREGS(%r15)     # load pt_regs
855
        jg      kernel_stack_overflow
856
#endif
857
 
858
cleanup_table_system_call:
859
        .quad   system_call, sysc_do_svc
860
cleanup_table_sysc_return:
861
        .quad   sysc_return, sysc_leave
862
cleanup_table_sysc_leave:
863
        .quad   sysc_leave, sysc_done
864
cleanup_table_sysc_work_loop:
865
        .quad   sysc_work_loop, sysc_work_done
866
cleanup_table_io_return:
867
        .quad   io_return, io_leave
868
cleanup_table_io_leave:
869
        .quad   io_leave, io_done
870
cleanup_table_io_work_loop:
871
        .quad   io_work_loop, io_work_done
872
 
873
cleanup_critical:
874
        clc     8(8,%r12),BASED(cleanup_table_system_call)
875
        jl      0f
876
        clc     8(8,%r12),BASED(cleanup_table_system_call+8)
877
        jl      cleanup_system_call
878
0:
879
        clc     8(8,%r12),BASED(cleanup_table_sysc_return)
880
        jl      0f
881
        clc     8(8,%r12),BASED(cleanup_table_sysc_return+8)
882
        jl      cleanup_sysc_return
883
0:
884
        clc     8(8,%r12),BASED(cleanup_table_sysc_leave)
885
        jl      0f
886
        clc     8(8,%r12),BASED(cleanup_table_sysc_leave+8)
887
        jl      cleanup_sysc_leave
888
0:
889
        clc     8(8,%r12),BASED(cleanup_table_sysc_work_loop)
890
        jl      0f
891
        clc     8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
892
        jl      cleanup_sysc_return
893
0:
894
        clc     8(8,%r12),BASED(cleanup_table_io_return)
895
        jl      0f
896
        clc     8(8,%r12),BASED(cleanup_table_io_return+8)
897
        jl      cleanup_io_return
898
0:
899
        clc     8(8,%r12),BASED(cleanup_table_io_leave)
900
        jl      0f
901
        clc     8(8,%r12),BASED(cleanup_table_io_leave+8)
902
        jl      cleanup_io_leave
903
0:
904
        clc     8(8,%r12),BASED(cleanup_table_io_work_loop)
905
        jl      0f
906
        clc     8(8,%r12),BASED(cleanup_table_io_work_loop+8)
907
        jl      cleanup_io_return
908
0:
909
        br      %r14
910
 
911
cleanup_system_call:
912
        mvc     __LC_RETURN_PSW(16),0(%r12)
913
        cghi    %r12,__LC_MCK_OLD_PSW
914
        je      0f
915
        la      %r12,__LC_SAVE_AREA+32
916
        j       1f
917
0:      la      %r12,__LC_SAVE_AREA+64
918
1:
919
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
920
        clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
921
        jh      0f
922
        mvc     __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
923
0:      clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
924
        jhe     cleanup_vtime
925
#endif
926
        clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
927
        jh      0f
928
        mvc     __LC_SAVE_AREA(32),0(%r12)
929
0:      stg     %r13,8(%r12)
930
        stg     %r12,__LC_SAVE_AREA+96  # argh
931
        SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
932
        CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
933
        lg      %r12,__LC_SAVE_AREA+96  # argh
934
        stg     %r15,24(%r12)
935
        llgh    %r7,__LC_SVC_INT_CODE
936
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
937
cleanup_vtime:
938
        clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
939
        jhe     cleanup_stime
940
        UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
941
cleanup_stime:
942
        clc     __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32)
943
        jh      cleanup_update
944
        UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
945
cleanup_update:
946
        mvc     __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
947
#endif
948
        mvc     __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
949
        la      %r12,__LC_RETURN_PSW
950
        br      %r14
951
cleanup_system_call_insn:
952
        .quad   sysc_saveall
953
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
954
        .quad   system_call
955
        .quad   sysc_vtime
956
        .quad   sysc_stime
957
        .quad   sysc_update
958
#endif
959
 
960
cleanup_sysc_return:
961
        mvc     __LC_RETURN_PSW(8),0(%r12)
962
        mvc     __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return)
963
        la      %r12,__LC_RETURN_PSW
964
        br      %r14
965
 
966
cleanup_sysc_leave:
967
        clc     8(8,%r12),BASED(cleanup_sysc_leave_insn)
968
        je      2f
969
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
970
        mvc     __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
971
        clc     8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
972
        je      2f
973
#endif
974
        mvc     __LC_RETURN_PSW(16),SP_PSW(%r15)
975
        cghi    %r12,__LC_MCK_OLD_PSW
976
        jne     0f
977
        mvc     __LC_SAVE_AREA+64(32),SP_R12(%r15)
978
        j       1f
979
0:      mvc     __LC_SAVE_AREA+32(32),SP_R12(%r15)
980
1:      lmg     %r0,%r11,SP_R0(%r15)
981
        lg      %r15,SP_R15(%r15)
982
2:      la      %r12,__LC_RETURN_PSW
983
        br      %r14
984
cleanup_sysc_leave_insn:
985
        .quad   sysc_done - 4
986
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
987
        .quad   sysc_done - 8
988
#endif
989
 
990
cleanup_io_return:
991
        mvc     __LC_RETURN_PSW(8),0(%r12)
992
        mvc     __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop)
993
        la      %r12,__LC_RETURN_PSW
994
        br      %r14
995
 
996
cleanup_io_leave:
997
        clc     8(8,%r12),BASED(cleanup_io_leave_insn)
998
        je      2f
999
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
1000
        mvc     __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
1001
        clc     8(8,%r12),BASED(cleanup_io_leave_insn+8)
1002
        je      2f
1003
#endif
1004
        mvc     __LC_RETURN_PSW(16),SP_PSW(%r15)
1005
        cghi    %r12,__LC_MCK_OLD_PSW
1006
        jne     0f
1007
        mvc     __LC_SAVE_AREA+64(32),SP_R12(%r15)
1008
        j       1f
1009
0:      mvc     __LC_SAVE_AREA+32(32),SP_R12(%r15)
1010
1:      lmg     %r0,%r11,SP_R0(%r15)
1011
        lg      %r15,SP_R15(%r15)
1012
2:      la      %r12,__LC_RETURN_PSW
1013
        br      %r14
1014
cleanup_io_leave_insn:
1015
        .quad   io_done - 4
1016
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
1017
        .quad   io_done - 8
1018
#endif
1019
 
1020
/*
1021
 * Integer constants
1022
 */
1023
                .align  4
1024
.Lconst:
1025
.Lnr_syscalls:  .long   NR_syscalls
1026
.L0x0130:       .short  0x130
1027
.L0x0140:       .short  0x140
1028
.L0x0150:       .short  0x150
1029
.L0x0160:       .short  0x160
1030
.L0x0170:       .short  0x170
1031
.Lcritical_start:
1032
                .quad   __critical_start
1033
.Lcritical_end:
1034
                .quad   __critical_end
1035
 
1036
                .section .rodata, "a"
1037
#define SYSCALL(esa,esame,emu)  .long esame
1038
sys_call_table:
1039
#include "syscalls.S"
1040
#undef SYSCALL
1041
 
1042
#ifdef CONFIG_COMPAT
1043
 
1044
#define SYSCALL(esa,esame,emu)  .long emu
1045
sys_call_table_emu:
1046
#include "syscalls.S"
1047
#undef SYSCALL
1048
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.