OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [rtos/] [rtems/] [c/] [src/] [exec/] [score/] [cpu/] [sparc/] [cpu_asm.S] - Blame information for rev 773

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 30 unneback
/*  cpu_asm.s
2
 *
3
 *  This file contains the basic algorithms for all assembly code used
4
 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5
 *  in assembly language.
6
 *
7
 *  COPYRIGHT (c) 1989-1999.
8
 *  On-Line Applications Research Corporation (OAR).
9
 *
10
 *  The license and distribution terms for this file may be
11
 *  found in the file LICENSE in this distribution or at
12
 *  http://www.OARcorp.com/rtems/license.html.
13
 *
14
 *  Ported to ERC32 implementation of the SPARC by On-Line Applications
15
 *  Research Corporation (OAR) under contract to the European Space
16
 *  Agency (ESA).
17
 *
18
 *  ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995.
19
 *  European Space Agency.
20
 *
21
 *  $Id: cpu_asm.S,v 1.2 2001-09-27 11:59:30 chris Exp $
22
 */
23
 
24
#include 
25
 
26
#if (SPARC_HAS_FPU == 1)
27
 
28
/*
29
 *  void _CPU_Context_save_fp(
30
 *    void **fp_context_ptr
31
 *  )
32
 *
33
 *  This routine is responsible for saving the FP context
34
 *  at *fp_context_ptr.  If the point to load the FP context
35
 *  from is changed then the pointer is modified by this routine.
36
 *
37
 *  NOTE: See the README in this directory for information on the
38
 *        management of the "EF" bit in the PSR.
39
 */
40
 
41
        .align 4
42
        PUBLIC(_CPU_Context_save_fp)
43
SYM(_CPU_Context_save_fp):
44
        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
45
 
46
        /*
47
         *  The following enables the floating point unit.
48
         */
49
 
50
        mov     %psr, %l0
51
        sethi   %hi(SPARC_PSR_EF_MASK), %l1
52
        or      %l1, %lo(SPARC_PSR_EF_MASK), %l1
53
        or      %l0, %l1, %l0
54
        mov     %l0, %psr                  ! **** ENABLE FLOAT ACCESS ****
55
 
56
        ld      [%i0], %l0
57
        std     %f0, [%l0 + FO_F1_OFFSET]
58
        std     %f2, [%l0 + F2_F3_OFFSET]
59
        std     %f4, [%l0 + F4_F5_OFFSET]
60
        std     %f6, [%l0 + F6_F7_OFFSET]
61
        std     %f8, [%l0 + F8_F9_OFFSET]
62
        std     %f10, [%l0 + F1O_F11_OFFSET]
63
        std     %f12, [%l0 + F12_F13_OFFSET]
64
        std     %f14, [%l0 + F14_F15_OFFSET]
65
        std     %f16, [%l0 + F16_F17_OFFSET]
66
        std     %f18, [%l0 + F18_F19_OFFSET]
67
        std     %f20, [%l0 + F2O_F21_OFFSET]
68
        std     %f22, [%l0 + F22_F23_OFFSET]
69
        std     %f24, [%l0 + F24_F25_OFFSET]
70
        std     %f26, [%l0 + F26_F27_OFFSET]
71
        std     %f28, [%l0 + F28_F29_OFFSET]
72
        std     %f30, [%l0 + F3O_F31_OFFSET]
73
        st      %fsr, [%l0 + FSR_OFFSET]
74
        ret
75
        restore
76
 
77
/*
78
 *  void _CPU_Context_restore_fp(
79
 *    void **fp_context_ptr
80
 *  )
81
 *
82
 *  This routine is responsible for restoring the FP context
83
 *  at *fp_context_ptr.  If the point to load the FP context
84
 *  from is changed then the pointer is modified by this routine.
85
 *
86
 *  NOTE: See the README in this directory for information on the
87
 *        management of the "EF" bit in the PSR.
88
 */
89
 
90
        .align 4
91
        PUBLIC(_CPU_Context_restore_fp)
92
SYM(_CPU_Context_restore_fp):
93
        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE , %sp
94
 
95
        /*
96
         *  The following enables the floating point unit.
97
         */
98
 
99
        mov     %psr, %l0
100
        sethi   %hi(SPARC_PSR_EF_MASK), %l1
101
        or      %l1, %lo(SPARC_PSR_EF_MASK), %l1
102
        or      %l0, %l1, %l0
103
        mov     %l0, %psr                  ! **** ENABLE FLOAT ACCESS ****
104
 
105
        ld      [%i0], %l0
106
        ldd     [%l0 + FO_F1_OFFSET], %f0
107
        ldd     [%l0 + F2_F3_OFFSET], %f2
108
        ldd     [%l0 + F4_F5_OFFSET], %f4
109
        ldd     [%l0 + F6_F7_OFFSET], %f6
110
        ldd     [%l0 + F8_F9_OFFSET], %f8
111
        ldd     [%l0 + F1O_F11_OFFSET], %f10
112
        ldd     [%l0 + F12_F13_OFFSET], %f12
113
        ldd     [%l0 + F14_F15_OFFSET], %f14
114
        ldd     [%l0 + F16_F17_OFFSET], %f16
115
        ldd     [%l0 + F18_F19_OFFSET], %f18
116
        ldd     [%l0 + F2O_F21_OFFSET], %f20
117
        ldd     [%l0 + F22_F23_OFFSET], %f22
118
        ldd     [%l0 + F24_F25_OFFSET], %f24
119
        ldd     [%l0 + F26_F27_OFFSET], %f26
120
        ldd     [%l0 + F28_F29_OFFSET], %f28
121
        ldd     [%l0 + F3O_F31_OFFSET], %f30
122
        ld      [%l0 + FSR_OFFSET], %fsr
123
        ret
124
        restore
125
 
126
#endif /* SPARC_HAS_FPU */
127
 
128
/*
129
 *  void _CPU_Context_switch(
130
 *    Context_Control  *run,
131
 *    Context_Control  *heir
132
 *  )
133
 *
134
 *  This routine performs a normal non-FP context switch.
135
 */
136
 
137
        .align 4
138
        PUBLIC(_CPU_Context_switch)
139
SYM(_CPU_Context_switch):
140
        ! skip g0
141
        st      %g1, [%o0 + G1_OFFSET]       ! save the global registers
142
        std     %g2, [%o0 + G2_OFFSET]
143
        std     %g4, [%o0 + G4_OFFSET]
144
        std     %g6, [%o0 + G6_OFFSET]
145
 
146
        std     %l0, [%o0 + L0_OFFSET]       ! save the local registers
147
        std     %l2, [%o0 + L2_OFFSET]
148
        std     %l4, [%o0 + L4_OFFSET]
149
        std     %l6, [%o0 + L6_OFFSET]
150
 
151
        std     %i0, [%o0 + I0_OFFSET]       ! save the input registers
152
        std     %i2, [%o0 + I2_OFFSET]
153
        std     %i4, [%o0 + I4_OFFSET]
154
        std     %i6, [%o0 + I6_FP_OFFSET]
155
 
156
        std     %o0, [%o0 + O0_OFFSET]       ! save the output registers
157
        std     %o2, [%o0 + O2_OFFSET]
158
        std     %o4, [%o0 + O4_OFFSET]
159
        std     %o6, [%o0 + O6_SP_OFFSET]
160
 
161
        rd      %psr, %o2
162
        st      %o2, [%o0 + PSR_OFFSET]      ! save status register
163
 
164
        /*
165
         *  This is entered from _CPU_Context_restore with:
166
         *    o1 = context to restore
167
         *    o2 = psr
168
         */
169
 
170
        PUBLIC(_CPU_Context_restore_heir)
171
SYM(_CPU_Context_restore_heir):
172
        /*
173
         *  Flush all windows with valid contents except the current one.
174
         *  In examining the set register windows, one may logically divide
175
         *  the windows into sets (some of which may be empty) based on their
176
         *  current status:
177
         *
178
         *    + current (i.e. in use),
179
         *    + used (i.e. a restore would not trap)
180
         *    + invalid (i.e. 1 in corresponding bit in WIM)
181
         *    + unused
182
         *
183
         *  Either the used or unused set of windows may be empty.
184
         *
185
         *  NOTE: We assume only one bit is set in the WIM at a time.
186
         *
187
         *  Given a CWP of 5 and a WIM of 0x1, the registers are divided
188
         *  into sets as follows:
189
         *
190
         *    + 0   - invalid
191
         *    + 1-4 - unused
192
         *    + 5   - current
193
         *    + 6-7 - used
194
         *
195
         *  In this case, we only would save the used windows -- 6 and 7.
196
         *
197
         *   Traps are disabled for the same logical period as in a
198
         *     flush all windows trap handler.
199
         *
200
         *    Register Usage while saving the windows:
201
         *      g1 = current PSR
202
         *      g2 = current wim
203
         *      g3 = CWP
204
         *      g4 = wim scratch
205
         *      g5 = scratch
206
         */
207
 
208
        ld      [%o1 + PSR_OFFSET], %g1       ! g1 = saved psr
209
 
210
        and     %o2, SPARC_PSR_CWP_MASK, %g3  ! g3 = CWP
211
                                              ! g1 = psr w/o cwp
212
        andn    %g1, SPARC_PSR_ET_MASK | SPARC_PSR_CWP_MASK, %g1
213
        or      %g1, %g3, %g1                 ! g1 = heirs psr
214
        mov     %g1, %psr                     ! restore status register and
215
                                              ! **** DISABLE TRAPS ****
216
        mov     %wim, %g2                     ! g2 = wim
217
        mov     1, %g4
218
        sll     %g4, %g3, %g4                 ! g4 = WIM mask for CW invalid
219
 
220
save_frame_loop:
221
        sll     %g4, 1, %g5                   ! rotate the "wim" left 1
222
        srl     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4
223
        or      %g4, %g5, %g4                 ! g4 = wim if we do one restore
224
 
225
        /*
226
         *  If a restore would not underflow, then continue.
227
         */
228
 
229
        andcc   %g4, %g2, %g0                 ! Any windows to flush?
230
        bnz     done_flushing                 ! No, then continue
231
        nop
232
 
233
        restore                               ! back one window
234
 
235
        /*
236
         *  Now save the window just as if we overflowed to it.
237
         */
238
 
239
        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
240
        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
241
        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
242
        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
243
 
244
        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
245
        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
246
        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
247
        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
248
 
249
        ba      save_frame_loop
250
        nop
251
 
252
done_flushing:
253
 
254
        add     %g3, 1, %g3                   ! calculate desired WIM
255
        and     %g3, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g3
256
        mov     1, %g4
257
        sll     %g4, %g3, %g4                 ! g4 = new WIM
258
        mov     %g4, %wim
259
 
260
        or      %g1, SPARC_PSR_ET_MASK, %g1
261
        mov     %g1, %psr                     ! **** ENABLE TRAPS ****
262
                                              !   and restore CWP
263
        nop
264
        nop
265
        nop
266
 
267
        ! skip g0
268
        ld      [%o1 + G1_OFFSET], %g1        ! restore the global registers
269
        ldd     [%o1 + G2_OFFSET], %g2
270
        ldd     [%o1 + G4_OFFSET], %g4
271
        ldd     [%o1 + G6_OFFSET], %g6
272
 
273
        ldd     [%o1 + L0_OFFSET], %l0        ! restore the local registers
274
        ldd     [%o1 + L2_OFFSET], %l2
275
        ldd     [%o1 + L4_OFFSET], %l4
276
        ldd     [%o1 + L6_OFFSET], %l6
277
 
278
        ldd     [%o1 + I0_OFFSET], %i0        ! restore the output registers
279
        ldd     [%o1 + I2_OFFSET], %i2
280
        ldd     [%o1 + I4_OFFSET], %i4
281
        ldd     [%o1 + I6_FP_OFFSET], %i6
282
 
283
        ldd     [%o1 + O2_OFFSET], %o2        ! restore the output registers
284
        ldd     [%o1 + O4_OFFSET], %o4
285
        ldd     [%o1 + O6_SP_OFFSET], %o6
286
        ! do o0/o1 last to avoid destroying heir context pointer
287
        ldd     [%o1 + O0_OFFSET], %o0        ! overwrite heir pointer
288
 
289
        jmp     %o7 + 8                       ! return
290
        nop                                   ! delay slot
291
 
292
/*
293
 *  void _CPU_Context_restore(
294
 *    Context_Control *new_context
295
 *  )
296
 *
297
 *  This routine is generally used only to perform restart self.
298
 *
299
 *  NOTE: It is unnecessary to reload some registers.
300
 */
301
 
302
        .align 4
303
        PUBLIC(_CPU_Context_restore)
304
SYM(_CPU_Context_restore):
305
        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
306
        rd      %psr, %o2
307
        ba      SYM(_CPU_Context_restore_heir)
308
        mov     %i0, %o1                      ! in the delay slot
309
 
310
/*
311
 *  void _ISR_Handler()
312
 *
313
 *  This routine provides the RTEMS interrupt management.
314
 *
315
 *  We enter this handler from the 4 instructions in the trap table with
316
 *  the following registers assumed to be set as shown:
317
 *
318
 *    l0 = PSR
319
 *    l1 = PC
320
 *    l2 = nPC
321
 *    l3 = trap type
322
 *
323
 *  NOTE: By an executive defined convention, trap type is between 0 and 255 if
324
 *        it is an asynchonous trap and 256 and 511 if it is synchronous.
325
 */
326
 
327
        .align 4
328
        PUBLIC(_ISR_Handler)
329
SYM(_ISR_Handler):
330
        /*
331
         *  Fix the return address for synchronous traps.
332
         */
333
 
334
        andcc   %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
335
                                      ! Is this a synchronous trap?
336
        be,a    win_ovflow            ! No, then skip the adjustment
337
        nop                           ! DELAY
338
        mov     %l1, %l6              ! save trapped pc for debug info
339
        mov     %l2, %l1              ! do not return to the instruction
340
        add     %l2, 4, %l2           ! indicated
341
 
342
win_ovflow:
343
        /*
344
         *  Save the globals this block uses.
345
         *
346
         *  These registers are not restored from the locals.  Their contents
347
         *  are saved directly from the locals into the ISF below.
348
         */
349
 
350
        mov     %g4, %l4                 ! save the globals this block uses
351
        mov     %g5, %l5
352
 
353
        /*
354
         *  When at a "window overflow" trap, (wim == (1 << cwp)).
355
         *  If we get here like that, then process a window overflow.
356
         */
357
 
358
        rd      %wim, %g4
359
        srl     %g4, %l0, %g5            ! g5 = win >> cwp ; shift count and CWP
360
                                         !   are LS 5 bits ; how convenient :)
361
        cmp     %g5, 1                   ! Is this an invalid window?
362
        bne     dont_do_the_window       ! No, then skip all this stuff
363
        ! we are using the delay slot
364
 
365
        /*
366
         *  The following is same as a 1 position right rotate of WIM
367
         */
368
 
369
        srl     %g4, 1, %g5              ! g5 = WIM >> 1
370
        sll     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4
371
                                         ! g4 = WIM << (Number Windows - 1)
372
        or      %g4, %g5, %g4            ! g4 = (WIM >> 1) |
373
                                         !      (WIM << (Number Windows - 1))
374
 
375
        /*
376
         *  At this point:
377
         *
378
         *    g4 = the new WIM
379
         *    g5 is free
380
         */
381
 
382
        /*
383
         *  Since we are tinkering with the register windows, we need to
384
         *  make sure that all the required information is in global registers.
385
         */
386
 
387
        save                          ! Save into the window
388
        wr      %g4, 0, %wim          ! WIM = new WIM
389
        nop                           ! delay slots
390
        nop
391
        nop
392
 
393
        /*
394
         *  Now save the window just as if we overflowed to it.
395
         */
396
 
397
        std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
398
        std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
399
        std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
400
        std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
401
 
402
        std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
403
        std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
404
        std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
405
        std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
406
 
407
        restore
408
        nop
409
 
410
dont_do_the_window:
411
        /*
412
         *  Global registers %g4 and %g5 are saved directly from %l4 and
413
         *  %l5 directly into the ISF below.
414
         */
415
 
416
save_isf:
417
 
418
        /*
419
         *  Save the state of the interrupted task -- especially the global
420
         *  registers -- in the Interrupt Stack Frame.  Note that the ISF
421
         *  includes a regular minimum stack frame which will be used if
422
         *  needed by register window overflow and underflow handlers.
423
         *
424
         *  REGISTERS SAME AS AT _ISR_Handler
425
         */
426
 
427
        sub     %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
428
                                               ! make space for ISF
429
 
430
        std     %l0, [%sp + ISF_PSR_OFFSET]    ! save psr, PC
431
        st      %l2, [%sp + ISF_NPC_OFFSET]    ! save nPC
432
        st      %g1, [%sp + ISF_G1_OFFSET]     ! save g1
433
        std     %g2, [%sp + ISF_G2_OFFSET]     ! save g2, g3
434
        std     %l4, [%sp + ISF_G4_OFFSET]     ! save g4, g5 -- see above
435
        std     %g6, [%sp + ISF_G6_OFFSET]     ! save g6, g7
436
 
437
        std     %i0, [%sp + ISF_I0_OFFSET]     ! save i0, i1
438
        std     %i2, [%sp + ISF_I2_OFFSET]     ! save i2, i3
439
        std     %i4, [%sp + ISF_I4_OFFSET]     ! save i4, i5
440
        std     %i6, [%sp + ISF_I6_FP_OFFSET]  ! save i6/fp, i7
441
 
442
        rd      %y, %g1
443
        st      %g1, [%sp + ISF_Y_OFFSET]      ! save y
444
        st      %l6, [%sp + ISF_TPC_OFFSET]    ! save real trapped pc
445
 
446
        mov     %sp, %o1                       ! 2nd arg to ISR Handler
447
 
448
        /*
449
         *  Increment ISR nest level and Thread dispatch disable level.
450
         *
451
         *  Register usage for this section:
452
         *
453
         *    l4 = _Thread_Dispatch_disable_level pointer
454
         *    l5 = _ISR_Nest_level pointer
455
         *    l6 = _Thread_Dispatch_disable_level value
456
         *    l7 = _ISR_Nest_level value
457
         *
458
         *  NOTE: It is assumed that l4 - l7 will be preserved until the ISR
459
         *        nest and thread dispatch disable levels are unnested.
460
         */
461
 
462
        sethi    %hi(SYM(_Thread_Dispatch_disable_level)), %l4
463
        ld       [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
464
        sethi    %hi(SYM(_ISR_Nest_level)), %l5
465
        ld       [%l5 + %lo(SYM(_ISR_Nest_level))], %l7
466
 
467
        add      %l6, 1, %l6
468
        st       %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
469
 
470
        add      %l7, 1, %l7
471
        st       %l7, [%l5 + %lo(SYM(_ISR_Nest_level))]
472
 
473
        /*
474
         *  If ISR nest level was zero (now 1), then switch stack.
475
         */
476
 
477
        mov      %sp, %fp
478
        subcc    %l7, 1, %l7             ! outermost interrupt handler?
479
        bnz      dont_switch_stacks      ! No, then do not switch stacks
480
 
481
        sethi    %hi(SYM(_CPU_Interrupt_stack_high)), %g4
482
        ld       [%g4 + %lo(SYM(_CPU_Interrupt_stack_high))], %sp
483
 
484
dont_switch_stacks:
485
        /*
486
         *  Make sure we have a place on the stack for the window overflow
487
         *  trap handler to write into.  At this point it is safe to
488
         *  enable traps again.
489
         */
490
 
491
        sub      %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
492
 
493
        /*
494
         *  Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
495
         *  set the PIL in the %psr to mask off interrupts with lower priority.
496
         *  The original %psr in %l0 is not modified since it will be restored
497
         *  when the interrupt handler returns.
498
         */
499
 
500
/* This is a fix for ERC32 with FPU rev.B or rev.C */
501
 
502
#if defined(FPU_REVB)
503
 
504
 
505
        mov      %l0, %g5
506
        and      %l3, 0x0ff, %g4
507
        subcc    %g4, 0x08, %g0
508
        be       fpu_revb
509
        subcc    %g4, 0x11, %g0
510
        bl       dont_fix_pil
511
        subcc    %g4, 0x1f, %g0
512
        bg       dont_fix_pil
513
        sll      %g4, 8, %g4
514
        and      %g4, SPARC_PSR_PIL_MASK, %g4
515
        andn     %l0, SPARC_PSR_PIL_MASK, %g5
516
        or       %g4, %g5, %g5
517
        srl      %l0, 12, %g4
518
        andcc    %g4, 1, %g0
519
        be       dont_fix_pil
520
        nop
521
        ba,a     enable_irq
522
 
523
 
524
fpu_revb:
525
        srl      %l0, 12, %g4   ! check if EF is set in %psr
526
        andcc    %g4, 1, %g0
527
        be       dont_fix_pil   ! if FPU disabled than continue as normal
528
        and      %l3, 0xff, %g4
529
        subcc    %g4, 0x08, %g0
530
        bne      enable_irq     ! if not a FPU exception then do two fmovs
531
        set      __sparc_fq, %g4
532
        st       %fsr, [%g4]    ! if FQ is not empty and FQ[1] = fmovs
533
        ld       [%g4], %g4     ! than this is bug 3.14
534
        srl      %g4, 13, %g4
535
        andcc    %g4, 1, %g0
536
        be       dont_fix_pil
537
        set      __sparc_fq, %g4
538
        std      %fq, [%g4]
539
        ld       [%g4+4], %g4
540
        set      0x81a00020, %g5
541
        subcc    %g4, %g5, %g0
542
        bne,a    dont_fix_pil2
543
        wr       %l0, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
544
        ba,a     simple_return
545
 
546
enable_irq:
547
        or       %g5, SPARC_PSR_PIL_MASK, %g4
548
        wr       %g4, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
549
        nop; nop; nop
550
        fmovs    %f0, %f0
551
        ba       dont_fix_pil
552
        fmovs    %f0, %f0
553
 
554
        .data
555
        .global __sparc_fq
556
        .align 8
557
__sparc_fq:
558
        .word 0,0
559
 
560
        .text
561
/* end of ERC32 FPU rev.B/C fix */
562
 
563
#else
564
 
565
        mov      %l0, %g5
566
        subcc    %g4, 0x11, %g0
567
        bl       dont_fix_pil
568
        subcc    %g4, 0x1f, %g0
569
        bg       dont_fix_pil
570
        sll      %g4, 8, %g4
571
        and      %g4, SPARC_PSR_PIL_MASK, %g4
572
        andn     %l0, SPARC_PSR_PIL_MASK, %g5
573
        or       %g4, %g5, %g5
574
#endif
575
 
576
dont_fix_pil:
577
        wr       %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
578
dont_fix_pil2:
579
 
580
        /*
581
         *  Vector to user's handler.
582
         *
583
         *  NOTE: TBR may no longer have vector number in it since
584
         *        we just enabled traps.  It is definitely in l3.
585
         */
586
 
587
        sethi    %hi(SYM(_ISR_Vector_table)), %g4
588
        or       %g4, %lo(SYM(_ISR_Vector_table)), %g4
589
        and      %l3, 0xFF, %g5         ! remove synchronous trap indicator
590
        sll      %g5, 2, %g5            ! g5 = offset into table
591
        ld       [%g4 + %g5], %g4       ! g4 = _ISR_Vector_table[ vector ]
592
 
593
 
594
                                        ! o1 = 2nd arg = address of the ISF
595
                                        !   WAS LOADED WHEN ISF WAS SAVED!!!
596
        mov      %l3, %o0               ! o0 = 1st arg = vector number
597
        call     %g4, 0
598
        nop                             ! delay slot
599
 
600
        /*
601
         *  Redisable traps so we can finish up the interrupt processing.
602
         *  This is a VERY conservative place to do this.
603
         *
604
         *  NOTE: %l0 has the PSR which was in place when we took the trap.
605
         */
606
 
607
        mov      %l0, %psr             ! **** DISABLE TRAPS ****
608
 
609
        /*
610
         *  Decrement ISR nest level and Thread dispatch disable level.
611
         *
612
         *  Register usage for this section:
613
         *
614
         *    l4 = _Thread_Dispatch_disable_level pointer
615
         *    l5 = _ISR_Nest_level pointer
616
         *    l6 = _Thread_Dispatch_disable_level value
617
         *    l7 = _ISR_Nest_level value
618
         */
619
 
620
        sub      %l6, 1, %l6
621
        st       %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
622
 
623
        st       %l7, [%l5 + %lo(SYM(_ISR_Nest_level))]
624
 
625
        /*
626
         *  If dispatching is disabled (includes nested interrupt case),
627
         *  then do a "simple" exit.
628
         */
629
 
630
        orcc     %l6, %g0, %g0   ! Is dispatching disabled?
631
        bnz      simple_return   ! Yes, then do a "simple" exit
632
        nop                      ! delay slot
633
 
634
        /*
635
         *  If a context switch is necessary, then do fudge stack to
636
         *  return to the interrupt dispatcher.
637
         */
638
 
639
        sethi    %hi(SYM(_Context_Switch_necessary)), %l4
640
        ld       [%l4 + %lo(SYM(_Context_Switch_necessary))], %l5
641
 
642
        orcc     %l5, %g0, %g0   ! Is thread switch necessary?
643
        bnz      SYM(_ISR_Dispatch) ! yes, then invoke the dispatcher
644
        nop                      ! delay slot
645
 
646
        /*
647
         *  Finally, check to see if signals were sent to the currently
648
         *  executing task.  If so, we need to invoke the interrupt dispatcher.
649
         */
650
 
651
        sethi    %hi(SYM(_ISR_Signals_to_thread_executing)), %l6
652
        ld       [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))], %l7
653
 
654
        orcc     %l7, %g0, %g0   ! Were signals sent to the currently
655
                                 !   executing thread?
656
        bz       simple_return   ! yes, then invoke the dispatcher
657
                                 ! use the delay slot to clear the signals
658
                                 !   to the currently executing task flag
659
        st       %g0, [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))]
660
 
661
 
662
        /*
663
         *  Invoke interrupt dispatcher.
664
         */
665
 
666
        PUBLIC(_ISR_Dispatch)
667
SYM(_ISR_Dispatch):
668
 
669
        /*
670
         *  The following subtract should get us back on the interrupted
671
         *  tasks stack and add enough room to invoke the dispatcher.
672
         *  When we enable traps, we are mostly back in the context
673
         *  of the task and subsequent interrupts can operate normally.
674
         */
675
 
676
        sub      %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
677
 
678
        or      %l0, SPARC_PSR_ET_MASK, %l7    ! l7 = PSR with ET=1
679
        mov     %l7, %psr                      !  **** ENABLE TRAPS ****
680
        nop
681
        nop
682
        nop
683
 
684
        call    SYM(_Thread_Dispatch), 0
685
        nop
686
 
687
        /*
688
         *  The CWP in place at this point may be different from
689
         *  that which was in effect at the beginning of the ISR if we
690
         *  have been context switched between the beginning of this invocation
691
         *  of _ISR_Handler and this point.  Thus the CWP and WIM should
692
         *  not be changed back to their values at ISR entry time.  Any
693
         *  changes to the PSR must preserve the CWP.
694
         */
695
 
696
simple_return:
697
        ld      [%fp + ISF_Y_OFFSET], %l5      ! restore y
698
        wr      %l5, 0, %y
699
 
700
        ldd     [%fp + ISF_PSR_OFFSET], %l0    ! restore psr, PC
701
        ld      [%fp + ISF_NPC_OFFSET], %l2    ! restore nPC
702
        rd      %psr, %l3
703
        and     %l3, SPARC_PSR_CWP_MASK, %l3   ! want "current" CWP
704
        andn    %l0, SPARC_PSR_CWP_MASK, %l0   ! want rest from task
705
        or      %l3, %l0, %l0                  ! install it later...
706
        andn    %l0, SPARC_PSR_ET_MASK, %l0
707
 
708
        /*
709
         *  Restore tasks global and out registers
710
         */
711
 
712
        mov    %fp, %g1
713
 
714
                                              ! g1 is restored later
715
        ldd     [%fp + ISF_G2_OFFSET], %g2    ! restore g2, g3
716
        ldd     [%fp + ISF_G4_OFFSET], %g4    ! restore g4, g5
717
        ldd     [%fp + ISF_G6_OFFSET], %g6    ! restore g6, g7
718
 
719
        ldd     [%fp + ISF_I0_OFFSET], %i0    ! restore i0, i1
720
        ldd     [%fp + ISF_I2_OFFSET], %i2    ! restore i2, i3
721
        ldd     [%fp + ISF_I4_OFFSET], %i4    ! restore i4, i5
722
        ldd     [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7
723
 
724
        /*
725
         *  Registers:
726
         *
727
         *   ALL global registers EXCEPT G1 and the input registers have
728
         *   already been restored and thuse off limits.
729
         *
730
         *   The following is the contents of the local registers:
731
         *
732
         *     l0 = original psr
733
         *     l1 = return address (i.e. PC)
734
         *     l2 = nPC
735
         *     l3 = CWP
736
         */
737
 
738
        /*
739
         *  if (CWP + 1) is an invalid window then we need to reload it.
740
         *
741
         *  WARNING: Traps should now be disabled
742
         */
743
 
744
        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
745
        nop
746
        nop
747
        nop
748
        rd      %wim, %l4
749
        add     %l0, 1, %l6                ! l6 = cwp + 1
750
        and     %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it
751
        srl     %l4, %l6, %l5              ! l5 = win >> cwp + 1 ; shift count
752
                                           !  and CWP are conveniently LS 5 bits
753
        cmp     %l5, 1                     ! Is tasks window invalid?
754
        bne     good_task_window
755
 
756
        /*
757
         *  The following code is the same as a 1 position left rotate of WIM.
758
         */
759
 
760
        sll     %l4, 1, %l5                ! l5 = WIM << 1
761
        srl     %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4
762
                                           ! l4 = WIM >> (Number Windows - 1)
763
        or      %l4, %l5, %l4              ! l4 = (WIM << 1) |
764
                                           !      (WIM >> (Number Windows - 1))
765
 
766
        /*
767
         *  Now restore the window just as if we underflowed to it.
768
         */
769
 
770
        wr      %l4, 0, %wim               ! WIM = new WIM
771
        nop                                ! must delay after writing WIM
772
        nop
773
        nop
774
        restore                            ! now into the tasks window
775
 
776
        ldd     [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0
777
        ldd     [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2
778
        ldd     [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4
779
        ldd     [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6
780
        ldd     [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0
781
        ldd     [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2
782
        ldd     [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4
783
        ldd     [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
784
                                           ! reload of sp clobbers ISF
785
        save                               ! Back to ISR dispatch window
786
 
787
good_task_window:
788
 
789
        mov     %l0, %psr                  !  **** DISABLE TRAPS ****
790
                                           !  and restore condition codes.
791
        ld      [%g1 + ISF_G1_OFFSET], %g1 ! restore g1
792
        jmp     %l1                        ! transfer control and
793
        rett    %l2                        ! go back to tasks window
794
 
795
/* end of file */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.