OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [sparc/] [kernel/] [entry.S] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/* arch/sparc/kernel/entry.S:  Sparc trap low-level entry points.
2
 *
3
 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
4
 * Copyright (C) 1996 Eddie C. Dost   (ecd@skynet.be)
5
 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6
 * Copyright (C) 1996-1999 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
7
 * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
8
 */
9
 
10
#include 
11
 
12
#include 
13
#include 
14
#include 
15
#include 
16
#include 
17
#include 
18
#include 
19
#include 
20
#include 
21
#include 
22
#include 
23
#ifdef CONFIG_SUN4
24
#include 
25
#else
26
#include 
27
#endif
28
#include 
29
#include 
30
#include 
31
#include 
32
#include 
33
#include 
34
#include 
35
 
36
#include 
37
 
38
#define curptr      g6
39
 
40
/* These are just handy. */
41
#define _SV     save    %sp, -STACKFRAME_SZ, %sp
42
#define _RS     restore
43
 
44
#define FLUSH_ALL_KERNEL_WINDOWS \
45
        _SV; _SV; _SV; _SV; _SV; _SV; _SV; \
46
        _RS; _RS; _RS; _RS; _RS; _RS; _RS;
47
 
48
/* First, KGDB low level things.  This is a rewrite
49
 * of the routines found in the sparc-stub.c asm() statement
50
 * from the gdb distribution.  This is also dual-purpose
51
 * as a software trap for userlevel programs.
52
 */
53
        .data
54
        .align  4
55
 
56
in_trap_handler:
57
        .word   0
58
 
59
        .text
60
        .align  4
61
 
62
#if 0 /* kgdb is dropped from 2.5.33 */
63
! This function is called when any SPARC trap (except window overflow or
64
! underflow) occurs.  It makes sure that the invalid register window is still
65
! available before jumping into C code.  It will also restore the world if you
66
! return from handle_exception.
67
 
68
        .globl  trap_low
69
trap_low:
70
        rd      %wim, %l3
71
        SAVE_ALL
72
 
73
        sethi   %hi(in_trap_handler), %l4
74
        ld      [%lo(in_trap_handler) + %l4], %l5
75
        inc     %l5
76
        st      %l5, [%lo(in_trap_handler) + %l4]
77
 
78
        /* Make sure kgdb sees the same state we just saved. */
79
        LOAD_PT_GLOBALS(sp)
80
        LOAD_PT_INS(sp)
81
        ld      [%sp + STACKFRAME_SZ + PT_Y], %l4
82
        ld      [%sp + STACKFRAME_SZ + PT_WIM], %l3
83
        ld      [%sp + STACKFRAME_SZ + PT_PSR], %l0
84
        ld      [%sp + STACKFRAME_SZ + PT_PC], %l1
85
        ld      [%sp + STACKFRAME_SZ + PT_NPC], %l2
86
        rd      %tbr, %l5       /* Never changes... */
87
 
88
        /* Make kgdb exception frame. */
89
        sub     %sp,(16+1+6+1+72)*4,%sp ! Make room for input & locals
90
                                        ! + hidden arg + arg spill
91
                                        ! + doubleword alignment
92
                                        ! + registers[72] local var
93
        SAVE_KGDB_GLOBALS(sp)
94
        SAVE_KGDB_INS(sp)
95
        SAVE_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
96
 
97
        /* We are increasing PIL, so two writes. */
98
        or      %l0, PSR_PIL, %l0
99
        wr      %l0, 0, %psr
100
        WRITE_PAUSE
101
        wr      %l0, PSR_ET, %psr
102
        WRITE_PAUSE
103
 
104
        call    handle_exception
105
         add    %sp, STACKFRAME_SZ, %o0 ! Pass address of registers
106
 
107
        /* Load new kgdb register set. */
108
        LOAD_KGDB_GLOBALS(sp)
109
        LOAD_KGDB_INS(sp)
110
        LOAD_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
111
        wr      %l4, 0x0, %y
112
 
113
        sethi   %hi(in_trap_handler), %l4
114
        ld      [%lo(in_trap_handler) + %l4], %l5
115
        dec     %l5
116
        st      %l5, [%lo(in_trap_handler) + %l4]
117
 
118
        add     %sp,(16+1+6+1+72)*4,%sp ! Undo the kgdb trap frame.
119
 
120
        /* Now take what kgdb did and place it into the pt_regs
121
         * frame which SparcLinux RESTORE_ALL understands.,
122
         */
123
        STORE_PT_INS(sp)
124
        STORE_PT_GLOBALS(sp)
125
        STORE_PT_YREG(sp, g2)
126
        STORE_PT_PRIV(sp, l0, l1, l2)
127
 
128
        RESTORE_ALL
129
#endif
130
 
131
#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
132
        .text
133
        .align  4
134
        .globl  floppy_hardint
135
floppy_hardint:
136
        /*
137
         * This code cannot touch registers %l0 %l1 and %l2
138
         * because SAVE_ALL depends on their values. It depends
139
         * on %l3 also, but we regenerate it before a call.
140
         * Other registers are:
141
         * %l3 -- base address of fdc registers
142
         * %l4 -- pdma_vaddr
143
         * %l5 -- scratch for ld/st address
144
         * %l6 -- pdma_size
145
         * %l7 -- scratch [floppy byte, ld/st address, aux. data]
146
         */
147
 
148
        /* Do we have work to do? */
149
        sethi   %hi(doing_pdma), %l7
150
        ld      [%l7 + %lo(doing_pdma)], %l7
151
        cmp     %l7, 0
152
        be      floppy_dosoftint
153
         nop
154
 
155
        /* Load fdc register base */
156
        sethi   %hi(fdc_status), %l3
157
        ld      [%l3 + %lo(fdc_status)], %l3
158
 
159
        /* Setup register addresses */
160
        sethi   %hi(pdma_vaddr), %l5    ! transfer buffer
161
        ld      [%l5 + %lo(pdma_vaddr)], %l4
162
        sethi   %hi(pdma_size), %l5     ! bytes to go
163
        ld      [%l5 + %lo(pdma_size)], %l6
164
next_byte:
165
        ldub    [%l3], %l7
166
 
167
        andcc   %l7, 0x80, %g0          ! Does fifo still have data
168
        bz      floppy_fifo_emptied     ! fifo has been emptied...
169
         andcc  %l7, 0x20, %g0          ! in non-dma mode still?
170
        bz      floppy_overrun          ! nope, overrun
171
         andcc  %l7, 0x40, %g0          ! 0=write 1=read
172
        bz      floppy_write
173
         sub    %l6, 0x1, %l6
174
 
175
        /* Ok, actually read this byte */
176
        ldub    [%l3 + 1], %l7
177
        orcc    %g0, %l6, %g0
178
        stb     %l7, [%l4]
179
        bne     next_byte
180
         add    %l4, 0x1, %l4
181
 
182
        b       floppy_tdone
183
         nop
184
 
185
floppy_write:
186
        /* Ok, actually write this byte */
187
        ldub    [%l4], %l7
188
        orcc    %g0, %l6, %g0
189
        stb     %l7, [%l3 + 1]
190
        bne     next_byte
191
         add    %l4, 0x1, %l4
192
 
193
        /* fall through... */
194
floppy_tdone:
195
        sethi   %hi(pdma_vaddr), %l5
196
        st      %l4, [%l5 + %lo(pdma_vaddr)]
197
        sethi   %hi(pdma_size), %l5
198
        st      %l6, [%l5 + %lo(pdma_size)]
199
        /* Flip terminal count pin */
200
        set     auxio_register, %l7
201
        ld      [%l7], %l7
202
 
203
        set     sparc_cpu_model, %l5
204
        ld      [%l5], %l5
205
        subcc   %l5, 1, %g0             /* enum { sun4c = 1 }; */
206
        be      1f
207
         ldub   [%l7], %l5
208
 
209
        or      %l5, 0xc2, %l5
210
        stb     %l5, [%l7]
211
        andn    %l5, 0x02, %l5
212
        b       2f
213
         nop
214
 
215
1:
216
        or      %l5, 0xf4, %l5
217
        stb     %l5, [%l7]
218
        andn    %l5, 0x04, %l5
219
 
220
2:
221
        /* Kill some time so the bits set */
222
        WRITE_PAUSE
223
        WRITE_PAUSE
224
 
225
        stb     %l5, [%l7]
226
 
227
        /* Prevent recursion */
228
        sethi   %hi(doing_pdma), %l7
229
        b       floppy_dosoftint
230
         st     %g0, [%l7 + %lo(doing_pdma)]
231
 
232
        /* We emptied the FIFO, but we haven't read everything
233
         * as of yet.  Store the current transfer address and
234
         * bytes left to read so we can continue when the next
235
         * fast IRQ comes in.
236
         */
237
floppy_fifo_emptied:
238
        sethi   %hi(pdma_vaddr), %l5
239
        st      %l4, [%l5 + %lo(pdma_vaddr)]
240
        sethi   %hi(pdma_size), %l7
241
        st      %l6, [%l7 + %lo(pdma_size)]
242
 
243
        /* Restore condition codes */
244
        wr      %l0, 0x0, %psr
245
        WRITE_PAUSE
246
 
247
        jmp     %l1
248
        rett    %l2
249
 
250
floppy_overrun:
251
        sethi   %hi(pdma_vaddr), %l5
252
        st      %l4, [%l5 + %lo(pdma_vaddr)]
253
        sethi   %hi(pdma_size), %l5
254
        st      %l6, [%l5 + %lo(pdma_size)]
255
        /* Prevent recursion */
256
        sethi   %hi(doing_pdma), %l7
257
        st      %g0, [%l7 + %lo(doing_pdma)]
258
 
259
        /* fall through... */
260
floppy_dosoftint:
261
        rd      %wim, %l3
262
        SAVE_ALL
263
 
264
        /* Set all IRQs off. */
265
        or      %l0, PSR_PIL, %l4
266
        wr      %l4, 0x0, %psr
267
        WRITE_PAUSE
268
        wr      %l4, PSR_ET, %psr
269
        WRITE_PAUSE
270
 
271
        mov     11, %o0                 ! floppy irq level (unused anyway)
272
        mov     %g0, %o1                ! devid is not used in fast interrupts
273
        call    sparc_floppy_irq
274
         add    %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs
275
 
276
        RESTORE_ALL
277
 
278
#endif /* (CONFIG_BLK_DEV_FD) */
279
 
280
        /* Bad trap handler */
281
        .globl  bad_trap_handler
282
bad_trap_handler:
283
        SAVE_ALL
284
 
285
        wr      %l0, PSR_ET, %psr
286
        WRITE_PAUSE
287
 
288
        add     %sp, STACKFRAME_SZ, %o0 ! pt_regs
289
        call    do_hw_interrupt
290
         mov    %l7, %o1                ! trap number
291
 
292
        RESTORE_ALL
293
 
294
/* For now all IRQ's not registered get sent here. handler_irq() will
295
 * see if a routine is registered to handle this interrupt and if not
296
 * it will say so on the console.
297
 */
298
 
299
        .align  4
300
        .globl  real_irq_entry, patch_handler_irq
301
real_irq_entry:
302
        SAVE_ALL
303
 
304
#ifdef CONFIG_SMP
305
        .globl  patchme_maybe_smp_msg
306
 
307
        cmp     %l7, 12
308
patchme_maybe_smp_msg:
309
        bgu     maybe_smp4m_msg
310
         nop
311
#endif
312
 
313
real_irq_continue:
314
        or      %l0, PSR_PIL, %g2
315
        wr      %g2, 0x0, %psr
316
        WRITE_PAUSE
317
        wr      %g2, PSR_ET, %psr
318
        WRITE_PAUSE
319
        mov     %l7, %o0                ! irq level
320
patch_handler_irq:
321
        call    handler_irq
322
         add    %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
323
        or      %l0, PSR_PIL, %g2       ! restore PIL after handler_irq
324
        wr      %g2, PSR_ET, %psr       ! keep ET up
325
        WRITE_PAUSE
326
 
327
        RESTORE_ALL
328
 
329
#ifdef CONFIG_SMP
330
        /* SMP per-cpu ticker interrupts are handled specially. */
331
smp4m_ticker:
332
        bne     real_irq_continue+4
333
         or     %l0, PSR_PIL, %g2
334
        wr      %g2, 0x0, %psr
335
        WRITE_PAUSE
336
        wr      %g2, PSR_ET, %psr
337
        WRITE_PAUSE
338
        call    smp4m_percpu_timer_interrupt
339
         add    %sp, STACKFRAME_SZ, %o0
340
        wr      %l0, PSR_ET, %psr
341
        WRITE_PAUSE
342
        RESTORE_ALL
343
 
344
        /* Here is where we check for possible SMP IPI passed to us
345
         * on some level other than 15 which is the NMI and only used
346
         * for cross calls.  That has a separate entry point below.
347
         */
348
maybe_smp4m_msg:
349
        GET_PROCESSOR4M_ID(o3)
350
        set     sun4m_interrupts, %l5
351
        ld      [%l5], %o5
352
        sethi   %hi(0x40000000), %o2
353
        sll     %o3, 12, %o3
354
        ld      [%o5 + %o3], %o1
355
        andcc   %o1, %o2, %g0
356
        be,a    smp4m_ticker
357
         cmp    %l7, 14
358
        st      %o2, [%o5 + 0x4]
359
        WRITE_PAUSE
360
        ld      [%o5], %g0
361
        WRITE_PAUSE
362
        or      %l0, PSR_PIL, %l4
363
        wr      %l4, 0x0, %psr
364
        WRITE_PAUSE
365
        wr      %l4, PSR_ET, %psr
366
        WRITE_PAUSE
367
        call    smp_reschedule_irq
368
         nop
369
 
370
        RESTORE_ALL
371
 
372
        .align  4
373
        .globl  linux_trap_ipi15_sun4m
374
linux_trap_ipi15_sun4m:
375
        SAVE_ALL
376
        sethi   %hi(0x80000000), %o2
377
        GET_PROCESSOR4M_ID(o0)
378
        set     sun4m_interrupts, %l5
379
        ld      [%l5], %o5
380
        sll     %o0, 12, %o0
381
        add     %o5, %o0, %o5
382
        ld      [%o5], %o3
383
        andcc   %o3, %o2, %g0
384
        be      1f                      ! Must be an NMI async memory error
385
         st     %o2, [%o5 + 4]
386
        WRITE_PAUSE
387
        ld      [%o5], %g0
388
        WRITE_PAUSE
389
        or      %l0, PSR_PIL, %l4
390
        wr      %l4, 0x0, %psr
391
        WRITE_PAUSE
392
        wr      %l4, PSR_ET, %psr
393
        WRITE_PAUSE
394
        call    smp4m_cross_call_irq
395
         nop
396
        b       ret_trap_lockless_ipi
397
         clr    %l6
398
1:
399
        /* NMI async memory error handling. */
400
        sethi   %hi(0x80000000), %l4
401
        sethi   %hi(0x4000), %o3
402
        sub     %o5, %o0, %o5
403
        add     %o5, %o3, %l5
404
        st      %l4, [%l5 + 0xc]
405
        WRITE_PAUSE
406
        ld      [%l5], %g0
407
        WRITE_PAUSE
408
        or      %l0, PSR_PIL, %l4
409
        wr      %l4, 0x0, %psr
410
        WRITE_PAUSE
411
        wr      %l4, PSR_ET, %psr
412
        WRITE_PAUSE
413
        call    sun4m_nmi
414
         nop
415
        st      %l4, [%l5 + 0x8]
416
        WRITE_PAUSE
417
        ld      [%l5], %g0
418
        WRITE_PAUSE
419
        RESTORE_ALL
420
 
421
        .globl  smp4d_ticker
422
        /* SMP per-cpu ticker interrupts are handled specially. */
423
smp4d_ticker:
424
        SAVE_ALL
425
        or      %l0, PSR_PIL, %g2
426
        sethi   %hi(CC_ICLR), %o0
427
        sethi   %hi(1 << 14), %o1
428
        or      %o0, %lo(CC_ICLR), %o0
429
        stha    %o1, [%o0] ASI_M_MXCC   /* Clear PIL 14 in MXCC's ICLR */
430
        wr      %g2, 0x0, %psr
431
        WRITE_PAUSE
432
        wr      %g2, PSR_ET, %psr
433
        WRITE_PAUSE
434
        call    smp4d_percpu_timer_interrupt
435
         add    %sp, STACKFRAME_SZ, %o0
436
        wr      %l0, PSR_ET, %psr
437
        WRITE_PAUSE
438
        RESTORE_ALL
439
 
440
        .align  4
441
        .globl  linux_trap_ipi15_sun4d
442
linux_trap_ipi15_sun4d:
443
        SAVE_ALL
444
        sethi   %hi(CC_BASE), %o4
445
        sethi   %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
446
        or      %o4, (CC_EREG - CC_BASE), %o0
447
        ldda    [%o0] ASI_M_MXCC, %o0
448
        andcc   %o0, %o2, %g0
449
        bne     1f
450
         sethi  %hi(BB_STAT2), %o2
451
        lduba   [%o2] ASI_M_CTL, %o2
452
        andcc   %o2, BB_STAT2_MASK, %g0
453
        bne     2f
454
         or     %o4, (CC_ICLR - CC_BASE), %o0
455
        sethi   %hi(1 << 15), %o1
456
        stha    %o1, [%o0] ASI_M_MXCC   /* Clear PIL 15 in MXCC's ICLR */
457
        or      %l0, PSR_PIL, %l4
458
        wr      %l4, 0x0, %psr
459
        WRITE_PAUSE
460
        wr      %l4, PSR_ET, %psr
461
        WRITE_PAUSE
462
        call    smp4d_cross_call_irq
463
         nop
464
        b       ret_trap_lockless_ipi
465
         clr    %l6
466
 
467
1:      /* MXCC error */
468
2:      /* BB error */
469
        /* Disable PIL 15 */
470
        set     CC_IMSK, %l4
471
        lduha   [%l4] ASI_M_MXCC, %l5
472
        sethi   %hi(1 << 15), %l7
473
        or      %l5, %l7, %l5
474
        stha    %l5, [%l4] ASI_M_MXCC
475
        /* FIXME */
476
1:      b,a     1b
477
 
478
#endif /* CONFIG_SMP */
479
 
480
        /* This routine handles illegal instructions and privileged
481
         * instruction attempts from user code.
482
         */
483
        .align  4
484
        .globl  bad_instruction
485
bad_instruction:
486
        sethi   %hi(0xc1f80000), %l4
487
        ld      [%l1], %l5
488
        sethi   %hi(0x81d80000), %l7
489
        and     %l5, %l4, %l5
490
        cmp     %l5, %l7
491
        be      1f
492
        SAVE_ALL
493
 
494
        wr      %l0, PSR_ET, %psr               ! re-enable traps
495
        WRITE_PAUSE
496
 
497
        add     %sp, STACKFRAME_SZ, %o0
498
        mov     %l1, %o1
499
        mov     %l2, %o2
500
        call    do_illegal_instruction
501
         mov    %l0, %o3
502
 
503
        RESTORE_ALL
504
 
505
1:      /* unimplemented flush - just skip */
506
        jmpl    %l2, %g0
507
         rett   %l2 + 4
508
 
509
        .align  4
510
        .globl  priv_instruction
511
priv_instruction:
512
        SAVE_ALL
513
 
514
        wr      %l0, PSR_ET, %psr
515
        WRITE_PAUSE
516
 
517
        add     %sp, STACKFRAME_SZ, %o0
518
        mov     %l1, %o1
519
        mov     %l2, %o2
520
        call    do_priv_instruction
521
         mov    %l0, %o3
522
 
523
        RESTORE_ALL
524
 
525
        /* This routine handles unaligned data accesses. */
526
        .align  4
527
        .globl  mna_handler
528
mna_handler:
529
        andcc   %l0, PSR_PS, %g0
530
        be      mna_fromuser
531
         nop
532
 
533
        SAVE_ALL
534
 
535
        wr      %l0, PSR_ET, %psr
536
        WRITE_PAUSE
537
 
538
        ld      [%l1], %o1
539
        call    kernel_unaligned_trap
540
         add    %sp, STACKFRAME_SZ, %o0
541
 
542
        RESTORE_ALL
543
 
544
mna_fromuser:
545
        SAVE_ALL
546
 
547
        wr      %l0, PSR_ET, %psr               ! re-enable traps
548
        WRITE_PAUSE
549
 
550
        ld      [%l1], %o1
551
        call    user_unaligned_trap
552
         add    %sp, STACKFRAME_SZ, %o0
553
 
554
        RESTORE_ALL
555
 
556
        /* This routine handles floating point disabled traps. */
557
        .align  4
558
        .globl  fpd_trap_handler
559
fpd_trap_handler:
560
        SAVE_ALL
561
 
562
        wr      %l0, PSR_ET, %psr               ! re-enable traps
563
        WRITE_PAUSE
564
 
565
        add     %sp, STACKFRAME_SZ, %o0
566
        mov     %l1, %o1
567
        mov     %l2, %o2
568
        call    do_fpd_trap
569
         mov    %l0, %o3
570
 
571
        RESTORE_ALL
572
 
573
        /* This routine handles Floating Point Exceptions. */
574
        .align  4
575
        .globl  fpe_trap_handler
576
fpe_trap_handler:
577
        set     fpsave_magic, %l5
578
        cmp     %l1, %l5
579
        be      1f
580
         sethi  %hi(fpsave), %l5
581
        or      %l5, %lo(fpsave), %l5
582
        cmp     %l1, %l5
583
        bne     2f
584
         sethi  %hi(fpsave_catch2), %l5
585
        or      %l5, %lo(fpsave_catch2), %l5
586
        wr      %l0, 0x0, %psr
587
        WRITE_PAUSE
588
        jmp     %l5
589
         rett   %l5 + 4
590
1:
591
        sethi   %hi(fpsave_catch), %l5
592
        or      %l5, %lo(fpsave_catch), %l5
593
        wr      %l0, 0x0, %psr
594
        WRITE_PAUSE
595
        jmp     %l5
596
         rett   %l5 + 4
597
 
598
2:
599
        SAVE_ALL
600
 
601
        wr      %l0, PSR_ET, %psr               ! re-enable traps
602
        WRITE_PAUSE
603
 
604
        add     %sp, STACKFRAME_SZ, %o0
605
        mov     %l1, %o1
606
        mov     %l2, %o2
607
        call    do_fpe_trap
608
         mov    %l0, %o3
609
 
610
        RESTORE_ALL
611
 
612
        /* This routine handles Tag Overflow Exceptions. */
613
        .align  4
614
        .globl  do_tag_overflow
615
do_tag_overflow:
616
        SAVE_ALL
617
 
618
        wr      %l0, PSR_ET, %psr               ! re-enable traps
619
        WRITE_PAUSE
620
 
621
        add     %sp, STACKFRAME_SZ, %o0
622
        mov     %l1, %o1
623
        mov     %l2, %o2
624
        call    handle_tag_overflow
625
         mov    %l0, %o3
626
 
627
        RESTORE_ALL
628
 
629
        /* This routine handles Watchpoint Exceptions. */
630
        .align  4
631
        .globl  do_watchpoint
632
do_watchpoint:
633
        SAVE_ALL
634
 
635
        wr      %l0, PSR_ET, %psr               ! re-enable traps
636
        WRITE_PAUSE
637
 
638
        add     %sp, STACKFRAME_SZ, %o0
639
        mov     %l1, %o1
640
        mov     %l2, %o2
641
        call    handle_watchpoint
642
         mov    %l0, %o3
643
 
644
        RESTORE_ALL
645
 
646
        /* This routine handles Register Access Exceptions. */
647
        .align  4
648
        .globl  do_reg_access
649
do_reg_access:
650
        SAVE_ALL
651
 
652
        wr      %l0, PSR_ET, %psr               ! re-enable traps
653
        WRITE_PAUSE
654
 
655
        add     %sp, STACKFRAME_SZ, %o0
656
        mov     %l1, %o1
657
        mov     %l2, %o2
658
        call    handle_reg_access
659
         mov    %l0, %o3
660
 
661
        RESTORE_ALL
662
 
663
        /* This routine handles Co-Processor Disabled Exceptions. */
664
        .align  4
665
        .globl  do_cp_disabled
666
do_cp_disabled:
667
        SAVE_ALL
668
 
669
        wr      %l0, PSR_ET, %psr               ! re-enable traps
670
        WRITE_PAUSE
671
 
672
        add     %sp, STACKFRAME_SZ, %o0
673
        mov     %l1, %o1
674
        mov     %l2, %o2
675
        call    handle_cp_disabled
676
         mov    %l0, %o3
677
 
678
        RESTORE_ALL
679
 
680
        /* This routine handles Co-Processor Exceptions. */
681
        .align  4
682
        .globl  do_cp_exception
683
do_cp_exception:
684
        SAVE_ALL
685
 
686
        wr      %l0, PSR_ET, %psr               ! re-enable traps
687
        WRITE_PAUSE
688
 
689
        add     %sp, STACKFRAME_SZ, %o0
690
        mov     %l1, %o1
691
        mov     %l2, %o2
692
        call    handle_cp_exception
693
         mov    %l0, %o3
694
 
695
        RESTORE_ALL
696
 
697
        /* This routine handles Hardware Divide By Zero Exceptions. */
698
        .align  4
699
        .globl  do_hw_divzero
700
do_hw_divzero:
701
        SAVE_ALL
702
 
703
        wr      %l0, PSR_ET, %psr               ! re-enable traps
704
        WRITE_PAUSE
705
 
706
        add     %sp, STACKFRAME_SZ, %o0
707
        mov     %l1, %o1
708
        mov     %l2, %o2
709
        call    handle_hw_divzero
710
         mov    %l0, %o3
711
 
712
        RESTORE_ALL
713
 
714
        .align  4
715
        .globl  do_flush_windows
716
do_flush_windows:
717
        SAVE_ALL
718
 
719
        wr      %l0, PSR_ET, %psr
720
        WRITE_PAUSE
721
 
722
        andcc   %l0, PSR_PS, %g0
723
        bne     dfw_kernel
724
         nop
725
 
726
        call    flush_user_windows
727
         nop
728
 
729
        /* Advance over the trap instruction. */
730
        ld      [%sp + STACKFRAME_SZ + PT_NPC], %l1
731
        add     %l1, 0x4, %l2
732
        st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
733
        st      %l2, [%sp + STACKFRAME_SZ + PT_NPC]
734
 
735
        RESTORE_ALL
736
 
737
        .globl  flush_patch_one
738
 
739
        /* We get these for debugging routines using __builtin_return_address() */
740
dfw_kernel:
741
flush_patch_one:
742
        FLUSH_ALL_KERNEL_WINDOWS
743
 
744
        /* Advance over the trap instruction. */
745
        ld      [%sp + STACKFRAME_SZ + PT_NPC], %l1
746
        add     %l1, 0x4, %l2
747
        st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
748
        st      %l2, [%sp + STACKFRAME_SZ + PT_NPC]
749
 
750
        RESTORE_ALL
751
 
752
        /* The getcc software trap.  The user wants the condition codes from
753
         * the %psr in register %g1.
754
         */
755
 
756
        .align  4
757
        .globl  getcc_trap_handler
758
getcc_trap_handler:
759
        srl     %l0, 20, %g1    ! give user
760
        and     %g1, 0xf, %g1   ! only ICC bits in %psr
761
        jmp     %l2             ! advance over trap instruction
762
        rett    %l2 + 0x4       ! like this...
763
 
764
        /* The setcc software trap.  The user has condition codes in %g1
765
         * that it would like placed in the %psr.  Be careful not to flip
766
         * any unintentional bits!
767
         */
768
 
769
        .align  4
770
        .globl  setcc_trap_handler
771
setcc_trap_handler:
772
        sll     %g1, 0x14, %l4
773
        set     PSR_ICC, %l5
774
        andn    %l0, %l5, %l0   ! clear ICC bits in %psr
775
        and     %l4, %l5, %l4   ! clear non-ICC bits in user value
776
        or      %l4, %l0, %l4   ! or them in... mix mix mix
777
 
778
        wr      %l4, 0x0, %psr  ! set new %psr
779
        WRITE_PAUSE             ! TI scumbags...
780
 
781
        jmp     %l2             ! advance over trap instruction
782
        rett    %l2 + 0x4       ! like this...
783
 
784
        .align  4
785
        .globl  linux_trap_nmi_sun4c
786
linux_trap_nmi_sun4c:
787
        SAVE_ALL
788
 
789
        /* Ugh, we need to clear the IRQ line.  This is now
790
         * a very sun4c specific trap handler...
791
         */
792
        sethi   %hi(interrupt_enable), %l5
793
        ld      [%l5 + %lo(interrupt_enable)], %l5
794
        ldub    [%l5], %l6
795
        andn    %l6, INTS_ENAB, %l6
796
        stb     %l6, [%l5]
797
 
798
        /* Now it is safe to re-enable traps without recursion. */
799
        or      %l0, PSR_PIL, %l0
800
        wr      %l0, PSR_ET, %psr
801
        WRITE_PAUSE
802
 
803
        /* Now call the c-code with the pt_regs frame ptr and the
804
         * memory error registers as arguments.  The ordering chosen
805
         * here is due to unlatching semantics.
806
         */
807
        sethi   %hi(AC_SYNC_ERR), %o0
808
        add     %o0, 0x4, %o0
809
        lda     [%o0] ASI_CONTROL, %o2  ! sync vaddr
810
        sub     %o0, 0x4, %o0
811
        lda     [%o0] ASI_CONTROL, %o1  ! sync error
812
        add     %o0, 0xc, %o0
813
        lda     [%o0] ASI_CONTROL, %o4  ! async vaddr
814
        sub     %o0, 0x4, %o0
815
        lda     [%o0] ASI_CONTROL, %o3  ! async error
816
        call    sparc_lvl15_nmi
817
         add    %sp, STACKFRAME_SZ, %o0
818
 
819
        RESTORE_ALL
820
 
821
        .align  4
822
        .globl  invalid_segment_patch1_ff
823
        .globl  invalid_segment_patch2_ff
824
invalid_segment_patch1_ff:      cmp     %l4, 0xff
825
invalid_segment_patch2_ff:      mov     0xff, %l3
826
 
827
        .align  4
828
        .globl  invalid_segment_patch1_1ff
829
        .globl  invalid_segment_patch2_1ff
830
invalid_segment_patch1_1ff:     cmp     %l4, 0x1ff
831
invalid_segment_patch2_1ff:     mov     0x1ff, %l3
832
 
833
        .align  4
834
        .globl  num_context_patch1_16, num_context_patch2_16
835
num_context_patch1_16:          mov     0x10, %l7
836
num_context_patch2_16:          mov     0x10, %l7
837
 
838
        .align  4
839
        .globl  vac_linesize_patch_32
840
vac_linesize_patch_32:          subcc   %l7, 32, %l7
841
 
842
        .align  4
843
        .globl  vac_hwflush_patch1_on, vac_hwflush_patch2_on
844
 
845
/*
846
 * Ugly, but we cant use hardware flushing on the sun4 and we'd require
847
 * two instructions (Anton)
848
 */
849
#ifdef CONFIG_SUN4
850
vac_hwflush_patch1_on:          nop
851
#else
852
vac_hwflush_patch1_on:          addcc   %l7, -PAGE_SIZE, %l7
853
#endif
854
 
855
vac_hwflush_patch2_on:          sta     %g0, [%l3 + %l7] ASI_HWFLUSHSEG
856
 
857
        .globl  invalid_segment_patch1, invalid_segment_patch2
858
        .globl  num_context_patch1
859
        .globl  vac_linesize_patch, vac_hwflush_patch1
860
        .globl  vac_hwflush_patch2
861
 
862
        .align  4
863
        .globl  sun4c_fault
864
 
865
! %l0 = %psr
866
! %l1 = %pc
867
! %l2 = %npc
868
! %l3 = %wim
869
! %l7 = 1 for textfault
870
! We want error in %l5, vaddr in %l6
871
sun4c_fault:
872
#ifdef CONFIG_SUN4
873
        sethi   %hi(sun4c_memerr_reg), %l4
874
        ld      [%l4+%lo(sun4c_memerr_reg)], %l4  ! memerr ctrl reg addr
875
        ld      [%l4], %l6              ! memerr ctrl reg
876
        ld      [%l4 + 4], %l5          ! memerr vaddr reg
877
        andcc   %l6, 0x80, %g0          ! check for error type
878
        st      %g0, [%l4 + 4]          ! clear the error
879
        be      0f                      ! normal error
880
         sethi  %hi(AC_BUS_ERROR), %l4  ! bus err reg addr
881
 
882
        call    prom_halt       ! something weird happened
883
                                        ! what exactly did happen?
884
                                        ! what should we do here?
885
 
886
0:      or      %l4, %lo(AC_BUS_ERROR), %l4     ! bus err reg addr
887
        lduba   [%l4] ASI_CONTROL, %l6  ! bus err reg
888
 
889
        cmp    %l7, 1                   ! text fault?
890
        be      1f                      ! yes
891
         nop
892
 
893
        ld     [%l1], %l4               ! load instruction that caused fault
894
        srl     %l4, 21, %l4
895
        andcc   %l4, 1, %g0             ! store instruction?
896
 
897
        be      1f                      ! no
898
         sethi  %hi(SUN4C_SYNC_BADWRITE), %l4 ! yep
899
                                        ! %lo(SUN4C_SYNC_BADWRITE) = 0
900
        or      %l4, %l6, %l6           ! set write bit to emulate sun4c
901
1:
902
#else
903
        sethi   %hi(AC_SYNC_ERR), %l4
904
        add     %l4, 0x4, %l6                   ! AC_SYNC_VA in %l6
905
        lda     [%l6] ASI_CONTROL, %l5          ! Address
906
        lda     [%l4] ASI_CONTROL, %l6          ! Error, retained for a bit
907
#endif
908
 
909
        andn    %l5, 0xfff, %l5                 ! Encode all info into l7
910
        srl     %l6, 14, %l4
911
 
912
        and     %l4, 2, %l4
913
        or      %l5, %l4, %l4
914
 
915
        or      %l4, %l7, %l7                   ! l7 = [addr,write,txtfault]
916
 
917
        andcc   %l0, PSR_PS, %g0
918
        be      sun4c_fault_fromuser
919
         andcc  %l7, 1, %g0                     ! Text fault?
920
 
921
        be      1f
922
         sethi  %hi(KERNBASE), %l4
923
 
924
        mov     %l1, %l5                        ! PC
925
 
926
1:
927
        cmp     %l5, %l4
928
        blu     sun4c_fault_fromuser
929
         sethi  %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4
930
 
931
        /* If the kernel references a bum kernel pointer, or a pte which
932
         * points to a non existant page in ram, we will run this code
933
         * _forever_ and lock up the machine!!!!! So we must check for
934
         * this condition, the AC_SYNC_ERR bits are what we must examine.
935
         * Also a parity error would make this happen as well.  So we just
936
         * check that we are in fact servicing a tlb miss and not some
937
         * other type of fault for the kernel.
938
         */
939
        andcc   %l6, 0x80, %g0
940
        be      sun4c_fault_fromuser
941
         and    %l5, %l4, %l5
942
 
943
        /* Test for NULL pte_t * in vmalloc area. */
944
        sethi   %hi(VMALLOC_START), %l4
945
        cmp     %l5, %l4
946
        blu,a   invalid_segment_patch1
947
         lduXa  [%l5] ASI_SEGMAP, %l4
948
 
949
        sethi   %hi(swapper_pg_dir), %l4
950
        srl     %l5, SUN4C_PGDIR_SHIFT, %l6
951
        or      %l4, %lo(swapper_pg_dir), %l4
952
        sll     %l6, 2, %l6
953
        ld      [%l4 + %l6], %l4
954
#ifdef CONFIG_SUN4
955
        sethi   %hi(PAGE_MASK), %l6
956
        andcc   %l4, %l6, %g0
957
#else
958
        andcc   %l4, PAGE_MASK, %g0
959
#endif
960
        be      sun4c_fault_fromuser
961
         lduXa  [%l5] ASI_SEGMAP, %l4
962
 
963
invalid_segment_patch1:
964
        cmp     %l4, 0x7f
965
        bne     1f
966
         sethi  %hi(sun4c_kfree_ring), %l4
967
        or      %l4, %lo(sun4c_kfree_ring), %l4
968
        ld      [%l4 + 0x18], %l3
969
        deccc   %l3                     ! do we have a free entry?
970
        bcs,a   2f                      ! no, unmap one.
971
         sethi  %hi(sun4c_kernel_ring), %l4
972
 
973
        st      %l3, [%l4 + 0x18]       ! sun4c_kfree_ring.num_entries--
974
 
975
        ld      [%l4 + 0x00], %l6       ! entry = sun4c_kfree_ring.ringhd.next
976
        st      %l5, [%l6 + 0x08]       ! entry->vaddr = address
977
 
978
        ld      [%l6 + 0x00], %l3       ! next = entry->next
979
        ld      [%l6 + 0x04], %l7       ! entry->prev
980
 
981
        st      %l7, [%l3 + 0x04]       ! next->prev = entry->prev
982
        st      %l3, [%l7 + 0x00]       ! entry->prev->next = next
983
 
984
        sethi   %hi(sun4c_kernel_ring), %l4
985
        or      %l4, %lo(sun4c_kernel_ring), %l4
986
                                        ! head = &sun4c_kernel_ring.ringhd
987
 
988
        ld      [%l4 + 0x00], %l7       ! head->next
989
 
990
        st      %l4, [%l6 + 0x04]       ! entry->prev = head
991
        st      %l7, [%l6 + 0x00]       ! entry->next = head->next
992
        st      %l6, [%l7 + 0x04]       ! head->next->prev = entry
993
 
994
        st      %l6, [%l4 + 0x00]       ! head->next = entry
995
 
996
        ld      [%l4 + 0x18], %l3
997
        inc     %l3                     ! sun4c_kernel_ring.num_entries++
998
        st      %l3, [%l4 + 0x18]
999
        b       4f
1000
         ld     [%l6 + 0x08], %l5
1001
 
1002
2:
1003
        or      %l4, %lo(sun4c_kernel_ring), %l4
1004
                                        ! head = &sun4c_kernel_ring.ringhd
1005
 
1006
        ld      [%l4 + 0x04], %l6       ! entry = head->prev
1007
 
1008
        ld      [%l6 + 0x08], %l3       ! tmp = entry->vaddr
1009
 
1010
        ! Flush segment from the cache.
1011
#ifdef CONFIG_SUN4
1012
        sethi   %hi((128 * 1024)), %l7
1013
#else
1014
        sethi   %hi((64 * 1024)), %l7
1015
#endif
1016
9:
1017
vac_hwflush_patch1:
1018
vac_linesize_patch:
1019
        subcc   %l7, 16, %l7
1020
        bne     9b
1021
vac_hwflush_patch2:
1022
         sta    %g0, [%l3 + %l7] ASI_FLUSHSEG
1023
 
1024
        st      %l5, [%l6 + 0x08]       ! entry->vaddr = address
1025
 
1026
        ld      [%l6 + 0x00], %l5       ! next = entry->next
1027
        ld      [%l6 + 0x04], %l7       ! entry->prev
1028
 
1029
        st      %l7, [%l5 + 0x04]       ! next->prev = entry->prev
1030
        st      %l5, [%l7 + 0x00]       ! entry->prev->next = next
1031
        st      %l4, [%l6 + 0x04]       ! entry->prev = head
1032
 
1033
        ld      [%l4 + 0x00], %l7       ! head->next
1034
 
1035
        st      %l7, [%l6 + 0x00]       ! entry->next = head->next
1036
        st      %l6, [%l7 + 0x04]       ! head->next->prev = entry
1037
        st      %l6, [%l4 + 0x00]       ! head->next = entry
1038
 
1039
        mov     %l3, %l5                ! address = tmp
1040
 
1041
4:
1042
num_context_patch1:
1043
        mov     0x08, %l7
1044
 
1045
        ld      [%l6 + 0x08], %l4
1046
        ldub    [%l6 + 0x0c], %l3
1047
        or      %l4, %l3, %l4           ! encode new vaddr/pseg into l4
1048
 
1049
        sethi   %hi(AC_CONTEXT), %l3
1050
        lduba   [%l3] ASI_CONTROL, %l6
1051
 
1052
        /* Invalidate old mapping, instantiate new mapping,
1053
         * for each context.  Registers l6/l7 are live across
1054
         * this loop.
1055
         */
1056
3:      deccc   %l7
1057
        sethi   %hi(AC_CONTEXT), %l3
1058
        stba    %l7, [%l3] ASI_CONTROL
1059
invalid_segment_patch2:
1060
        mov     0x7f, %l3
1061
        stXa    %l3, [%l5] ASI_SEGMAP
1062
        andn    %l4, 0x1ff, %l3
1063
        bne     3b
1064
         stXa   %l4, [%l3] ASI_SEGMAP
1065
 
1066
        sethi   %hi(AC_CONTEXT), %l3
1067
        stba    %l6, [%l3] ASI_CONTROL
1068
 
1069
        andn    %l4, 0x1ff, %l5
1070
 
1071
1:
1072
        sethi   %hi(VMALLOC_START), %l4
1073
        cmp     %l5, %l4
1074
 
1075
        bgeu    1f
1076
         mov    1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7
1077
 
1078
        sethi   %hi(KERNBASE), %l6
1079
 
1080
        sub     %l5, %l6, %l4
1081
        srl     %l4, PAGE_SHIFT, %l4
1082
        sethi   %hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3
1083
        or      %l3, %l4, %l3
1084
 
1085
        sethi   %hi(PAGE_SIZE), %l4
1086
 
1087
2:
1088
        sta     %l3, [%l5] ASI_PTE
1089
        deccc   %l7
1090
        inc     %l3
1091
        bne     2b
1092
         add    %l5, %l4, %l5
1093
 
1094
        b       7f
1095
         sethi  %hi(sun4c_kernel_faults), %l4
1096
 
1097
1:
1098
        srl     %l5, SUN4C_PGDIR_SHIFT, %l3
1099
        sethi   %hi(swapper_pg_dir), %l4
1100
        or      %l4, %lo(swapper_pg_dir), %l4
1101
        sll     %l3, 2, %l3
1102
        ld      [%l4 + %l3], %l4
1103
#ifndef CONFIG_SUN4
1104
        and     %l4, PAGE_MASK, %l4
1105
#else
1106
        sethi   %hi(PAGE_MASK), %l6
1107
        and     %l4, %l6, %l4
1108
#endif
1109
 
1110
        srl     %l5, (PAGE_SHIFT - 2), %l6
1111
        and     %l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6
1112
        add     %l6, %l4, %l6
1113
 
1114
        sethi   %hi(PAGE_SIZE), %l4
1115
 
1116
2:
1117
        ld      [%l6], %l3
1118
        deccc   %l7
1119
        sta     %l3, [%l5] ASI_PTE
1120
        add     %l6, 0x4, %l6
1121
        bne     2b
1122
         add    %l5, %l4, %l5
1123
 
1124
        sethi   %hi(sun4c_kernel_faults), %l4
1125
7:
1126
        ld      [%l4 + %lo(sun4c_kernel_faults)], %l3
1127
        inc     %l3
1128
        st      %l3, [%l4 + %lo(sun4c_kernel_faults)]
1129
 
1130
        /* Restore condition codes */
1131
        wr      %l0, 0x0, %psr
1132
        WRITE_PAUSE
1133
        jmp     %l1
1134
         rett   %l2
1135
 
1136
sun4c_fault_fromuser:
1137
        SAVE_ALL
1138
         nop
1139
 
1140
        mov     %l7, %o1                ! Decode the info from %l7
1141
        mov     %l7, %o2
1142
        and     %o1, 1, %o1             ! arg2 = text_faultp
1143
        mov     %l7, %o3
1144
        and     %o2, 2, %o2             ! arg3 = writep
1145
        andn    %o3, 0xfff, %o3         ! arg4 = faulting address
1146
 
1147
        wr      %l0, PSR_ET, %psr
1148
        WRITE_PAUSE
1149
 
1150
        call    do_sun4c_fault
1151
         add    %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
1152
 
1153
        RESTORE_ALL
1154
 
1155
        .align  4
1156
        .globl  srmmu_fault
1157
srmmu_fault:
1158
        mov     0x400, %l5
1159
        mov     0x300, %l4
1160
 
1161
        lda     [%l5] ASI_M_MMUREGS, %l6        ! read sfar first
1162
        lda     [%l4] ASI_M_MMUREGS, %l5        ! read sfsr last
1163
 
1164
        andn    %l6, 0xfff, %l6
1165
        srl     %l5, 6, %l5                     ! and encode all info into l7
1166
 
1167
        and     %l5, 2, %l5
1168
        or      %l5, %l6, %l6
1169
 
1170
        or      %l6, %l7, %l7                   ! l7 = [addr,write,txtfault]
1171
 
1172
        SAVE_ALL
1173
 
1174
        mov     %l7, %o1
1175
        mov     %l7, %o2
1176
        and     %o1, 1, %o1             ! arg2 = text_faultp
1177
        mov     %l7, %o3
1178
        and     %o2, 2, %o2             ! arg3 = writep
1179
        andn    %o3, 0xfff, %o3         ! arg4 = faulting address
1180
 
1181
        wr      %l0, PSR_ET, %psr
1182
        WRITE_PAUSE
1183
 
1184
        call    do_sparc_fault
1185
         add    %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
1186
 
1187
        RESTORE_ALL
1188
 
1189
#ifdef CONFIG_SUNOS_EMUL
1190
        /* SunOS uses syscall zero as the 'indirect syscall' it looks
1191
         * like indir_syscall(scall_num, arg0, arg1, arg2...);  etc.
1192
         * This is complete brain damage.
1193
         */
1194
        .globl  sunos_indir
1195
sunos_indir:
1196
        mov     %o7, %l4
1197
        cmp     %o0, NR_SYSCALLS
1198
        blu,a   1f
1199
         sll    %o0, 0x2, %o0
1200
 
1201
        sethi   %hi(sunos_nosys), %l6
1202
        b       2f
1203
         or     %l6, %lo(sunos_nosys), %l6
1204
 
1205
1:
1206
        set     sunos_sys_table, %l7
1207
        ld      [%l7 + %o0], %l6
1208
 
1209
2:
1210
        mov     %o1, %o0
1211
        mov     %o2, %o1
1212
        mov     %o3, %o2
1213
        mov     %o4, %o3
1214
        mov     %o5, %o4
1215
        call    %l6
1216
         mov    %l4, %o7
1217
#endif
1218
 
1219
        .align  4
1220
        .globl  sys_nis_syscall
1221
sys_nis_syscall:
1222
        mov     %o7, %l5
1223
        add     %sp, STACKFRAME_SZ, %o0         ! pt_regs *regs arg
1224
        call    c_sys_nis_syscall
1225
         mov    %l5, %o7
1226
 
1227
        .align 4
1228
        .globl  sys_ptrace
1229
sys_ptrace:
1230
        call    do_ptrace
1231
         add    %sp, STACKFRAME_SZ, %o0
1232
 
1233
        ld      [%curptr + TI_FLAGS], %l5
1234
        andcc   %l5, _TIF_SYSCALL_TRACE, %g0
1235
        be      1f
1236
         nop
1237
 
1238
        call    syscall_trace
1239
         nop
1240
 
1241
1:
1242
        RESTORE_ALL
1243
 
1244
        .align  4
1245
        .globl  sys_execve
1246
sys_execve:
1247
        mov     %o7, %l5
1248
        add     %sp, STACKFRAME_SZ, %o0         ! pt_regs *regs arg
1249
        call    sparc_execve
1250
         mov    %l5, %o7
1251
 
1252
        .align  4
1253
        .globl  sys_pipe
1254
sys_pipe:
1255
        mov     %o7, %l5
1256
        add     %sp, STACKFRAME_SZ, %o0         ! pt_regs *regs arg
1257
        call    sparc_pipe
1258
         mov    %l5, %o7
1259
 
1260
        .align  4
1261
        .globl  sys_sigaltstack
1262
sys_sigaltstack:
1263
        mov     %o7, %l5
1264
        mov     %fp, %o2
1265
        call    do_sigaltstack
1266
         mov    %l5, %o7
1267
 
1268
        .align  4
1269
        .globl  sys_sigstack
1270
sys_sigstack:
1271
        mov     %o7, %l5
1272
        mov     %fp, %o2
1273
        call    do_sys_sigstack
1274
         mov    %l5, %o7
1275
 
1276
        .align  4
1277
        .globl  sys_sigreturn
1278
sys_sigreturn:
1279
        call    do_sigreturn
1280
         add    %sp, STACKFRAME_SZ, %o0
1281
 
1282
        ld      [%curptr + TI_FLAGS], %l5
1283
        andcc   %l5, _TIF_SYSCALL_TRACE, %g0
1284
        be      1f
1285
         nop
1286
 
1287
        call    syscall_trace
1288
         nop
1289
 
1290
1:
1291
        /* We don't want to muck with user registers like a
1292
         * normal syscall, just return.
1293
         */
1294
        RESTORE_ALL
1295
 
1296
        .align  4
1297
        .globl  sys_rt_sigreturn
1298
sys_rt_sigreturn:
1299
        call    do_rt_sigreturn
1300
         add    %sp, STACKFRAME_SZ, %o0
1301
 
1302
        ld      [%curptr + TI_FLAGS], %l5
1303
        andcc   %l5, _TIF_SYSCALL_TRACE, %g0
1304
        be      1f
1305
         nop
1306
 
1307
        call    syscall_trace
1308
         nop
1309
 
1310
1:
1311
        /* We are returning to a signal handler. */
1312
        RESTORE_ALL
1313
 
1314
        /* Now that we have a real sys_clone, sys_fork() is
1315
         * implemented in terms of it.  Our _real_ implementation
1316
         * of SunOS vfork() will use sys_vfork().
1317
         *
1318
         * XXX These three should be consolidated into mostly shared
1319
         * XXX code just like on sparc64... -DaveM
1320
         */
1321
        .align  4
1322
        .globl  sys_fork, flush_patch_two
1323
sys_fork:
1324
        mov     %o7, %l5
1325
flush_patch_two:
1326
        FLUSH_ALL_KERNEL_WINDOWS;
1327
        ld      [%curptr + TI_TASK], %o4
1328
        rd      %psr, %g4
1329
        WRITE_PAUSE
1330
        mov     SIGCHLD, %o0                    ! arg0: clone flags
1331
        rd      %wim, %g5
1332
        WRITE_PAUSE
1333
        mov     %fp, %o1                        ! arg1: usp
1334
        std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1335
        add     %sp, STACKFRAME_SZ, %o2         ! arg2: pt_regs ptr
1336
        mov     0, %o3
1337
        call    sparc_do_fork
1338
         mov    %l5, %o7
1339
 
1340
        /* Whee, kernel threads! */
1341
        .globl  sys_clone, flush_patch_three
1342
sys_clone:
1343
        mov     %o7, %l5
1344
flush_patch_three:
1345
        FLUSH_ALL_KERNEL_WINDOWS;
1346
        ld      [%curptr + TI_TASK], %o4
1347
        rd      %psr, %g4
1348
        WRITE_PAUSE
1349
 
1350
        /* arg0,1: flags,usp  -- loaded already */
1351
        cmp     %o1, 0x0                        ! Is new_usp NULL?
1352
        rd      %wim, %g5
1353
        WRITE_PAUSE
1354
        be,a    1f
1355
         mov    %fp, %o1                        ! yes, use callers usp
1356
        andn    %o1, 7, %o1                     ! no, align to 8 bytes
1357
1:
1358
        std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1359
        add     %sp, STACKFRAME_SZ, %o2         ! arg2: pt_regs ptr
1360
        mov     0, %o3
1361
        call    sparc_do_fork
1362
         mov    %l5, %o7
1363
 
1364
        /* Whee, real vfork! */
1365
        .globl  sys_vfork, flush_patch_four
1366
sys_vfork:
1367
flush_patch_four:
1368
        FLUSH_ALL_KERNEL_WINDOWS;
1369
        ld      [%curptr + TI_TASK], %o4
1370
        rd      %psr, %g4
1371
        WRITE_PAUSE
1372
        rd      %wim, %g5
1373
        WRITE_PAUSE
1374
        std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
1375
        sethi   %hi(0x4000 | 0x0100 | SIGCHLD), %o0
1376
        mov     %fp, %o1
1377
        or      %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
1378
        sethi   %hi(sparc_do_fork), %l1
1379
        mov     0, %o3
1380
        jmpl    %l1 + %lo(sparc_do_fork), %g0
1381
         add    %sp, STACKFRAME_SZ, %o2
1382
 
1383
        .align  4
1384
linux_sparc_ni_syscall:
1385
        sethi   %hi(sys_ni_syscall), %l7
1386
        b       syscall_is_too_hard
1387
         or     %l7, %lo(sys_ni_syscall), %l7
1388
 
1389
linux_fast_syscall:
1390
        andn    %l7, 3, %l7
1391
        mov     %i0, %o0
1392
        mov     %i1, %o1
1393
        mov     %i2, %o2
1394
        jmpl    %l7 + %g0, %g0
1395
         mov    %i3, %o3
1396
 
1397
linux_syscall_trace:
1398
        call    syscall_trace
1399
         nop
1400
        mov     %i0, %o0
1401
        mov     %i1, %o1
1402
        mov     %i2, %o2
1403
        mov     %i3, %o3
1404
        b       2f
1405
         mov    %i4, %o4
1406
 
1407
        .globl  ret_from_fork
1408
ret_from_fork:
1409
        call    schedule_tail
1410
         mov    %g3, %o0
1411
        b       ret_sys_call
1412
         ld     [%sp + STACKFRAME_SZ + PT_I0], %o0
1413
 
1414
        /* Linux native and SunOS system calls enter here... */
1415
        .align  4
1416
        .globl  linux_sparc_syscall
1417
linux_sparc_syscall:
1418
        /* Direct access to user regs, must faster. */
1419
        cmp     %g1, NR_SYSCALLS
1420
        bgeu    linux_sparc_ni_syscall
1421
         sll    %g1, 2, %l4
1422
        ld      [%l7 + %l4], %l7
1423
        andcc   %l7, 1, %g0
1424
        bne     linux_fast_syscall
1425
         /* Just do first insn from SAVE_ALL in the delay slot */
1426
 
1427
        .globl  syscall_is_too_hard
1428
syscall_is_too_hard:
1429
        SAVE_ALL_HEAD
1430
         rd     %wim, %l3
1431
 
1432
        wr      %l0, PSR_ET, %psr
1433
        mov     %i0, %o0
1434
        mov     %i1, %o1
1435
        mov     %i2, %o2
1436
 
1437
        ld      [%curptr + TI_FLAGS], %l5
1438
        mov     %i3, %o3
1439
        andcc   %l5, _TIF_SYSCALL_TRACE, %g0
1440
        mov     %i4, %o4
1441
        bne     linux_syscall_trace
1442
         mov    %i0, %l5
1443
2:
1444
        call    %l7
1445
         mov    %i5, %o5
1446
 
1447
        st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
1448
 
1449
        .globl  ret_sys_call
1450
ret_sys_call:
1451
        ld      [%curptr + TI_FLAGS], %l6
1452
        cmp     %o0, -ERESTART_RESTARTBLOCK
1453
        ld      [%sp + STACKFRAME_SZ + PT_PSR], %g3
1454
        set     PSR_C, %g2
1455
        bgeu    1f
1456
         andcc  %l6, _TIF_SYSCALL_TRACE, %g0
1457
 
1458
        /* System call success, clear Carry condition code. */
1459
        andn    %g3, %g2, %g3
1460
        clr     %l6
1461
        st      %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1462
        bne     linux_syscall_trace2
1463
         ld     [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1464
        add     %l1, 0x4, %l2                   /* npc = npc+4 */
1465
        st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
1466
        b       ret_trap_entry
1467
         st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1468
1:
1469
        /* System call failure, set Carry condition code.
1470
         * Also, get abs(errno) to return to the process.
1471
         */
1472
        sub     %g0, %o0, %o0
1473
        or      %g3, %g2, %g3
1474
        st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
1475
        mov     1, %l6
1476
        st      %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1477
        bne     linux_syscall_trace2
1478
         ld     [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
1479
        add     %l1, 0x4, %l2                   /* npc = npc+4 */
1480
        st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
1481
        b       ret_trap_entry
1482
         st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1483
 
1484
linux_syscall_trace2:
1485
        call    syscall_trace
1486
         add    %l1, 0x4, %l2                   /* npc = npc+4 */
1487
        st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
1488
        b       ret_trap_entry
1489
         st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1490
 
1491
 
1492
        /*
1493
         * Solaris system calls and indirect system calls enter here.
1494
         *
1495
         * I have named the solaris indirect syscalls like that because
1496
         * it seems like Solaris has some fast path syscalls that can
1497
         * be handled as indirect system calls. - mig
1498
         */
1499
 
1500
linux_syscall_for_solaris:
1501
        sethi   %hi(sys_call_table), %l7
1502
        b       linux_sparc_syscall
1503
         or     %l7, %lo(sys_call_table), %l7
1504
 
1505
        .align  4
1506
        .globl  solaris_syscall
1507
solaris_syscall:
1508
        cmp     %g1,59
1509
        be      linux_syscall_for_solaris
1510
         cmp    %g1,2
1511
        be      linux_syscall_for_solaris
1512
         cmp    %g1,42
1513
        be      linux_syscall_for_solaris
1514
         cmp    %g1,119
1515
        be,a    linux_syscall_for_solaris
1516
         mov    2, %g1
1517
1:
1518
        SAVE_ALL_HEAD
1519
         rd     %wim, %l3
1520
 
1521
        wr      %l0, PSR_ET, %psr
1522
        nop
1523
        nop
1524
        mov     %i0, %l5
1525
 
1526
        call    do_solaris_syscall
1527
         add    %sp, STACKFRAME_SZ, %o0
1528
 
1529
        st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
1530
        set     PSR_C, %g2
1531
        cmp     %o0, -ERESTART_RESTARTBLOCK
1532
        bgeu    1f
1533
         ld     [%sp + STACKFRAME_SZ + PT_PSR], %g3
1534
 
1535
        /* System call success, clear Carry condition code. */
1536
        andn    %g3, %g2, %g3
1537
        clr     %l6
1538
        b       2f
1539
         st     %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1540
 
1541
1:
1542
        /* System call failure, set Carry condition code.
1543
         * Also, get abs(errno) to return to the process.
1544
         */
1545
        sub     %g0, %o0, %o0
1546
        mov     1, %l6
1547
        st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
1548
        or      %g3, %g2, %g3
1549
        st      %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1550
 
1551
        /* Advance the pc and npc over the trap instruction.
1552
         * If the npc is unaligned (has a 1 in the lower byte), it means
1553
         * the kernel does not want us to play magic (ie, skipping over
1554
         * traps).  Mainly when the Solaris code wants to set some PC and
1555
         * nPC (setcontext).
1556
         */
1557
2:
1558
        ld      [%sp + STACKFRAME_SZ + PT_NPC], %l1     /* pc  = npc   */
1559
        andcc   %l1, 1, %g0
1560
        bne     1f
1561
         add    %l1, 0x4, %l2                   /* npc = npc+4 */
1562
        st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
1563
        b       ret_trap_entry
1564
         st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1565
 
1566
        /* kernel knows what it is doing, fixup npc and continue */
1567
1:
1568
        sub     %l1, 1, %l1
1569
        b       ret_trap_entry
1570
         st     %l1, [%sp + STACKFRAME_SZ + PT_NPC]
1571
 
1572
#ifndef CONFIG_SUNOS_EMUL
1573
        .align  4
1574
        .globl  sunos_syscall
1575
sunos_syscall:
1576
        SAVE_ALL_HEAD
1577
         rd     %wim, %l3
1578
        wr      %l0, PSR_ET, %psr
1579
        nop
1580
        nop
1581
        mov     %i0, %l5
1582
        call    do_sunos_syscall
1583
         add    %sp, STACKFRAME_SZ, %o0
1584
#endif
1585
 
1586
        /* {net, open}bsd system calls enter here... */
1587
        .align  4
1588
        .globl  bsd_syscall
1589
bsd_syscall:
1590
        /* Direct access to user regs, must faster. */
1591
        cmp     %g1, NR_SYSCALLS
1592
        blu,a   1f
1593
         sll    %g1, 2, %l4
1594
 
1595
        set     sys_ni_syscall, %l7
1596
        b       bsd_is_too_hard
1597
         nop
1598
 
1599
1:
1600
        ld      [%l7 + %l4], %l7
1601
 
1602
        .globl  bsd_is_too_hard
1603
bsd_is_too_hard:
1604
        rd      %wim, %l3
1605
        SAVE_ALL
1606
 
1607
        wr      %l0, PSR_ET, %psr
1608
        WRITE_PAUSE
1609
 
1610
2:
1611
        mov     %i0, %o0
1612
        mov     %i1, %o1
1613
        mov     %i2, %o2
1614
        mov     %i0, %l5
1615
        mov     %i3, %o3
1616
        mov     %i4, %o4
1617
        call    %l7
1618
         mov    %i5, %o5
1619
 
1620
        st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
1621
        set     PSR_C, %g2
1622
        cmp     %o0, -ERESTART_RESTARTBLOCK
1623
        bgeu    1f
1624
         ld     [%sp + STACKFRAME_SZ + PT_PSR], %g3
1625
 
1626
        /* System call success, clear Carry condition code. */
1627
        andn    %g3, %g2, %g3
1628
        clr     %l6
1629
        b       2f
1630
         st     %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1631
 
1632
1:
1633
        /* System call failure, set Carry condition code.
1634
         * Also, get abs(errno) to return to the process.
1635
         */
1636
        sub     %g0, %o0, %o0
1637
#if 0 /* XXX todo XXX */
1638
        sethi   %hi(bsd_xlatb_rorl), %o3
1639
        or      %o3, %lo(bsd_xlatb_rorl), %o3
1640
        sll     %o0, 2, %o0
1641
        ld      [%o3 + %o0], %o0
1642
#endif
1643
        mov     1, %l6
1644
        st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
1645
        or      %g3, %g2, %g3
1646
        st      %g3, [%sp + STACKFRAME_SZ + PT_PSR]
1647
 
1648
        /* Advance the pc and npc over the trap instruction. */
1649
2:
1650
        ld      [%sp + STACKFRAME_SZ + PT_NPC], %l1     /* pc  = npc   */
1651
        add     %l1, 0x4, %l2                   /* npc = npc+4 */
1652
        st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
1653
        b       ret_trap_entry
1654
         st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
1655
 
1656
/* Saving and restoring the FPU state is best done from lowlevel code.
1657
 *
1658
 * void fpsave(unsigned long *fpregs, unsigned long *fsr,
1659
 *             void *fpqueue, unsigned long *fpqdepth)
1660
 */
1661
 
1662
        .globl  fpsave
1663
fpsave:
1664
        st      %fsr, [%o1]     ! this can trap on us if fpu is in bogon state
1665
        ld      [%o1], %g1
1666
        set     0x2000, %g4
1667
        andcc   %g1, %g4, %g0
1668
        be      2f
1669
         mov    0, %g2
1670
 
1671
        /* We have an fpqueue to save. */
1672
1:
1673
        std     %fq, [%o2]
1674
fpsave_magic:
1675
        st      %fsr, [%o1]
1676
        ld      [%o1], %g3
1677
        andcc   %g3, %g4, %g0
1678
        add     %g2, 1, %g2
1679
        bne     1b
1680
         add    %o2, 8, %o2
1681
 
1682
2:
1683
        st      %g2, [%o3]
1684
 
1685
        std     %f0, [%o0 + 0x00]
1686
        std     %f2, [%o0 + 0x08]
1687
        std     %f4, [%o0 + 0x10]
1688
        std     %f6, [%o0 + 0x18]
1689
        std     %f8, [%o0 + 0x20]
1690
        std     %f10, [%o0 + 0x28]
1691
        std     %f12, [%o0 + 0x30]
1692
        std     %f14, [%o0 + 0x38]
1693
        std     %f16, [%o0 + 0x40]
1694
        std     %f18, [%o0 + 0x48]
1695
        std     %f20, [%o0 + 0x50]
1696
        std     %f22, [%o0 + 0x58]
1697
        std     %f24, [%o0 + 0x60]
1698
        std     %f26, [%o0 + 0x68]
1699
        std     %f28, [%o0 + 0x70]
1700
        retl
1701
         std    %f30, [%o0 + 0x78]
1702
 
1703
        /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
1704
         * code for pointing out this possible deadlock, while we save state
1705
         * above we could trap on the fsr store so our low level fpu trap
1706
         * code has to know how to deal with this.
1707
         */
1708
fpsave_catch:
1709
        b       fpsave_magic + 4
1710
         st     %fsr, [%o1]
1711
 
1712
fpsave_catch2:
1713
        b       fpsave + 4
1714
         st     %fsr, [%o1]
1715
 
1716
        /* void fpload(unsigned long *fpregs, unsigned long *fsr); */
1717
 
1718
        .globl  fpload
1719
fpload:
1720
        ldd     [%o0 + 0x00], %f0
1721
        ldd     [%o0 + 0x08], %f2
1722
        ldd     [%o0 + 0x10], %f4
1723
        ldd     [%o0 + 0x18], %f6
1724
        ldd     [%o0 + 0x20], %f8
1725
        ldd     [%o0 + 0x28], %f10
1726
        ldd     [%o0 + 0x30], %f12
1727
        ldd     [%o0 + 0x38], %f14
1728
        ldd     [%o0 + 0x40], %f16
1729
        ldd     [%o0 + 0x48], %f18
1730
        ldd     [%o0 + 0x50], %f20
1731
        ldd     [%o0 + 0x58], %f22
1732
        ldd     [%o0 + 0x60], %f24
1733
        ldd     [%o0 + 0x68], %f26
1734
        ldd     [%o0 + 0x70], %f28
1735
        ldd     [%o0 + 0x78], %f30
1736
        ld      [%o1], %fsr
1737
        retl
1738
         nop
1739
 
1740
        /* __ndelay and __udelay take two arguments:
1741
         * 0 - nsecs or usecs to delay
1742
         * 1 - per_cpu udelay_val (loops per jiffy)
1743
         *
1744
         * Note that ndelay gives HZ times higher resolution but has a 10ms
1745
         * limit.  udelay can handle up to 1s.
1746
         */
1747
        .globl  __ndelay
1748
__ndelay:
1749
        save    %sp, -STACKFRAME_SZ, %sp
1750
        mov     %i0, %o0
1751
        call    .umul                   ! round multiplier up so large ns ok
1752
         mov    0x1ae, %o1              ! 2**32 / (1 000 000 000 / HZ)
1753
        call    .umul
1754
         mov    %i1, %o1                ! udelay_val
1755
        ba      delay_continue
1756
         mov    %o1, %o0                ! >>32 later for better resolution
1757
 
1758
        .globl  __udelay
1759
__udelay:
1760
        save    %sp, -STACKFRAME_SZ, %sp
1761
        mov     %i0, %o0
1762
        sethi   %hi(0x10c7), %o1        ! round multiplier up so large us ok
1763
        call    .umul
1764
         or     %o1, %lo(0x10c7), %o1   ! 2**32 / 1 000 000
1765
        call    .umul
1766
         mov    %i1, %o1                ! udelay_val
1767
        sethi   %hi(0x028f4b62), %l0    ! Add in rounding constant * 2**32,
1768
        or      %g0, %lo(0x028f4b62), %l0
1769
        addcc   %o0, %l0, %o0           ! 2**32 * 0.009 999
1770
        bcs,a   3f
1771
         add    %o1, 0x01, %o1
1772
3:
1773
        call    .umul
1774
         mov    HZ, %o0                 ! >>32 earlier for wider range
1775
 
1776
delay_continue:
1777
        cmp     %o0, 0x0
1778
1:
1779
        bne     1b
1780
         subcc  %o0, 1, %o0
1781
 
1782
        ret
1783
        restore
1784
 
1785
        /* Handle a software breakpoint */
1786
        /* We have to inform parent that child has stopped */
1787
        .align 4
1788
        .globl breakpoint_trap
1789
breakpoint_trap:
1790
        rd      %wim,%l3
1791
        SAVE_ALL
1792
        wr      %l0, PSR_ET, %psr
1793
        WRITE_PAUSE
1794
 
1795
        st      %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
1796
        call    sparc_breakpoint
1797
         add    %sp, STACKFRAME_SZ, %o0
1798
 
1799
        RESTORE_ALL
1800
 
1801
        .align  4
1802
        .globl  __handle_exception, flush_patch_exception
1803
__handle_exception:
1804
flush_patch_exception:
1805
        FLUSH_ALL_KERNEL_WINDOWS;
1806
        ldd     [%o0], %o6
1807
        jmpl    %o7 + 0xc, %g0                  ! see asm-sparc/processor.h
1808
         mov    1, %g1                          ! signal EFAULT condition
1809
 
1810
        .align  4
1811
        .globl  kill_user_windows, kuw_patch1_7win
1812
        .globl  kuw_patch1
1813
kuw_patch1_7win:        sll     %o3, 6, %o3
1814
 
1815
        /* No matter how much overhead this routine has in the worst
1816
         * case scenerio, it is several times better than taking the
1817
         * traps with the old method of just doing flush_user_windows().
1818
         */
1819
kill_user_windows:
1820
        ld      [%g6 + TI_UWINMASK], %o0        ! get current umask
1821
        orcc    %g0, %o0, %g0                   ! if no bits set, we are done
1822
        be      3f                              ! nothing to do
1823
         rd     %psr, %o5                       ! must clear interrupts
1824
        or      %o5, PSR_PIL, %o4               ! or else that could change
1825
        wr      %o4, 0x0, %psr                  ! the uwinmask state
1826
        WRITE_PAUSE                             ! burn them cycles
1827
1:
1828
        ld      [%g6 + TI_UWINMASK], %o0        ! get consistent state
1829
        orcc    %g0, %o0, %g0                   ! did an interrupt come in?
1830
        be      4f                              ! yep, we are done
1831
         rd     %wim, %o3                       ! get current wim
1832
        srl     %o3, 1, %o4                     ! simulate a save
1833
kuw_patch1:
1834
        sll     %o3, 7, %o3                     ! compute next wim
1835
        or      %o4, %o3, %o3                   ! result
1836
        andncc  %o0, %o3, %o0                   ! clean this bit in umask
1837
        bne     kuw_patch1                      ! not done yet
1838
         srl    %o3, 1, %o4                     ! begin another save simulation
1839
        wr      %o3, 0x0, %wim                  ! set the new wim
1840
        st      %g0, [%g6 + TI_UWINMASK]        ! clear uwinmask
1841
4:
1842
        wr      %o5, 0x0, %psr                  ! re-enable interrupts
1843
        WRITE_PAUSE                             ! burn baby burn
1844
3:
1845
        retl                                    ! return
1846
         st     %g0, [%g6 + TI_W_SAVED]         ! no windows saved
1847
 
1848
        .align  4
1849
        .globl  restore_current
1850
restore_current:
1851
        LOAD_CURRENT(g6, o0)
1852
        retl
1853
         nop
1854
 
1855
#ifdef CONFIG_PCI
1856
#include 
1857
 
1858
        .align  4
1859
        .globl  linux_trap_ipi15_pcic
1860
linux_trap_ipi15_pcic:
1861
        rd      %wim, %l3
1862
        SAVE_ALL
1863
 
1864
        /*
1865
         * First deactivate NMI
1866
         * or we cannot drop ET, cannot get window spill traps.
1867
         * The busy loop is necessary because the PIO error
1868
         * sometimes does not go away quickly and we trap again.
1869
         */
1870
        sethi   %hi(pcic_regs), %o1
1871
        ld      [%o1 + %lo(pcic_regs)], %o2
1872
 
1873
        ! Get pending status for printouts later.
1874
        ld      [%o2 + PCI_SYS_INT_PENDING], %o0
1875
 
1876
        mov     PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
1877
        stb     %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
1878
1:
1879
        ld      [%o2 + PCI_SYS_INT_PENDING], %o1
1880
        andcc   %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
1881
        bne     1b
1882
         nop
1883
 
1884
        or      %l0, PSR_PIL, %l4
1885
        wr      %l4, 0x0, %psr
1886
        WRITE_PAUSE
1887
        wr      %l4, PSR_ET, %psr
1888
        WRITE_PAUSE
1889
 
1890
        call    pcic_nmi
1891
         add    %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
1892
        RESTORE_ALL
1893
 
1894
        .globl  pcic_nmi_trap_patch
1895
pcic_nmi_trap_patch:
1896
        sethi   %hi(linux_trap_ipi15_pcic), %l3
1897
        jmpl    %l3 + %lo(linux_trap_ipi15_pcic), %g0
1898
         rd     %psr, %l0
1899
        .word   0
1900
 
1901
#endif /* CONFIG_PCI */
1902
 
1903
/* End of entry.S */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.