OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [rtems-20020807/] [cpukit/] [score/] [cpu/] [mips/] [cpu_asm.S] - Blame information for rev 1026

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1026 ivang
/*
2
 *  This file contains the basic algorithms for all assembly code used
3
 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
4
 *  in assembly language
5
 *
6
 *  History:
7
 *    Baseline: no_cpu
8
 *    1996:     Ported to MIPS64ORION by Craig Lebakken 
9
 *          COPYRIGHT (c) 1996 by Transition Networks Inc.
10
 *          To anyone who acknowledges that the modifications to this file to
11
 *          port it to the MIPS64ORION are provided "AS IS" without any
12
 *          express or implied warranty:
13
 *             permission to use, copy, modify, and distribute this file
14
 *             for any purpose is hereby granted without fee, provided that
15
 *             the above copyright notice and this notice appears in all
16
 *             copies, and that the name of Transition Networks not be used in
17
 *             advertising or publicity pertaining to distribution of the
18
 *             software without specific, written prior permission. Transition
19
 *             Networks makes no representations about the suitability
20
 *             of this software for any purpose.
21
 *    2000: Reworked by Alan Cudmore  to become
22
 *          the baseline of the more general MIPS port.
23
 *    2001: Joel Sherrill  continued this rework,
24
 *          rewriting as much as possible in C and added the JMR3904 BSP
25
 *          so testing could be performed on a simulator.
26
 *    2001: Greg Menke , bench tested ISR
27
 *          performance, tweaking this code and the isr vectoring routines
28
 *          to reduce overhead & latencies.  Added optional
29
 *          instrumentation as well.
30
 *    2002: Greg Menke , overhauled cpu_asm.S,
31
 *          cpu.c and cpu.h to manage FP vs int only tasks, interrupt levels
32
 *          and deferred FP contexts.
33
 *    2002: Joel Sherrill  enhanced the exception processing
34
 *          by increasing the amount of context saved/restored.
35
 *
36
 *  COPYRIGHT (c) 1989-2002.
37
 *  On-Line Applications Research Corporation (OAR).
38
 *
39
 *  The license and distribution terms for this file may be
40
 *  found in the file LICENSE in this distribution or at
41
 *  http://www.OARcorp.com/rtems/license.html.
42
 *
43
 *  cpu_asm.S,v 1.27 2002/07/16 22:26:14 joel Exp
44
 */
45
 
46
#include 
47
#include "iregdef.h"
48
#include "idtcpu.h"
49
 
50
#define ASSEMBLY_ONLY
51
#include 
52
 
53
 
54
/* enable debugging shadow writes to misc ram, this is a vestigal
55
* Mongoose-ism debug tool- but may be handy in the future so we
56
* left it in...
57
*/
58
 
59
/* #define INSTRUMENT_ISR_VECTORING */
60
/* #define INSTRUMENT_EXECUTING_THREAD */
61
 
62
 
63
 
64
/*  Ifdefs prevent the duplication of code for MIPS ISA Level 3 ( R4xxx )
65
 *  and MIPS ISA Level 1 (R3xxx).
66
 */
67
 
68
#if __mips == 3
69
/* 64 bit register operations */
70
#define NOP
71
#define ADD     dadd
72
#define STREG   sd
73
#define LDREG   ld
74
#define MFCO    dmfc0
75
#define MTCO    dmtc0
76
#define ADDU    addu
77
#define ADDIU   addiu
78
#define R_SZ    8
79
#define F_SZ    8
80
#define SZ_INT  8
81
#define SZ_INT_POW2 3
82
 
83
/* XXX if we don't always want 64 bit register ops, then another ifdef */
84
 
85
#elif __mips == 1
86
/* 32 bit register operations*/
87
#define NOP     nop
88
#define ADD     add
89
#define STREG   sw
90
#define LDREG   lw
91
#define MFCO    mfc0
92
#define MTCO    mtc0
93
#define ADDU    add
94
#define ADDIU   addi
95
#define R_SZ    4
96
#define F_SZ    4
97
#define SZ_INT  4
98
#define SZ_INT_POW2 2
99
#else
100
#error "mips assembly: what size registers do I deal with?"
101
#endif
102
 
103
 
104
#define ISR_VEC_SIZE    4
105
#define EXCP_STACK_SIZE (NREGS*R_SZ)
106
 
107
 
108
#ifdef __GNUC__
109
#define ASM_EXTERN(x,size) .extern x,size
110
#else
111
#define ASM_EXTERN(x,size)
112
#endif
113
 
114
/* NOTE: these constants must match the Context_Control structure in cpu.h */
115
#define S0_OFFSET 0
116
#define S1_OFFSET 1
117
#define S2_OFFSET 2
118
#define S3_OFFSET 3
119
#define S4_OFFSET 4
120
#define S5_OFFSET 5
121
#define S6_OFFSET 6
122
#define S7_OFFSET 7
123
#define SP_OFFSET 8
124
#define FP_OFFSET 9
125
#define RA_OFFSET 10
126
#define C0_SR_OFFSET 11
127
#define C0_EPC_OFFSET 12
128
 
129
/* NOTE: these constants must match the Context_Control_fp structure in cpu.h */
130
#define FP0_OFFSET  0
131
#define FP1_OFFSET  1
132
#define FP2_OFFSET  2
133
#define FP3_OFFSET  3
134
#define FP4_OFFSET  4
135
#define FP5_OFFSET  5
136
#define FP6_OFFSET  6
137
#define FP7_OFFSET  7
138
#define FP8_OFFSET  8
139
#define FP9_OFFSET  9
140
#define FP10_OFFSET 10
141
#define FP11_OFFSET 11
142
#define FP12_OFFSET 12
143
#define FP13_OFFSET 13
144
#define FP14_OFFSET 14
145
#define FP15_OFFSET 15
146
#define FP16_OFFSET 16
147
#define FP17_OFFSET 17
148
#define FP18_OFFSET 18
149
#define FP19_OFFSET 19
150
#define FP20_OFFSET 20
151
#define FP21_OFFSET 21
152
#define FP22_OFFSET 22
153
#define FP23_OFFSET 23
154
#define FP24_OFFSET 24
155
#define FP25_OFFSET 25
156
#define FP26_OFFSET 26
157
#define FP27_OFFSET 27
158
#define FP28_OFFSET 28
159
#define FP29_OFFSET 29
160
#define FP30_OFFSET 30
161
#define FP31_OFFSET 31
162
 
163
 
164
ASM_EXTERN(__exceptionStackFrame, SZ_INT)
165
 
166
 
167
 
168
/*
169
 *  _CPU_Context_save_fp_context
170
 *
171
 *  This routine is responsible for saving the FP context
172
 *  at *fp_context_ptr.  If the point to load the FP context
173
 *  from is changed then the pointer is modified by this routine.
174
 *
175
 *  Sometimes a macro implementation of this is in cpu.h which dereferences
176
 *  the ** and a similarly named routine in this file is passed something
177
 *  like a (Context_Control_fp *).  The general rule on making this decision
178
 *  is to avoid writing assembly language.
179
 */
180
 
181
/* void _CPU_Context_save_fp(
182
 *   void **fp_context_ptr
183
 * );
184
 */
185
 
186
#if ( CPU_HARDWARE_FP == TRUE )
187
FRAME(_CPU_Context_save_fp,sp,0,ra)
188
        .set noreorder
189
        .set noat
190
 
191
        /*
192
        ** Make sure the FPU is on before we save state.  This code
193
        ** is here because the FPU context switch might occur when an
194
        ** integer task is switching out with a FP task switching in.
195
        */
196
        MFC0    t0,C0_SR
197
        li      t2,SR_CU1
198
        move    t1,t0
199
        or      t0,t2           /* turn on the fpu */
200
#if __mips == 3
201
        li      t2,SR_EXL | SR_IE
202
#elif __mips == 1
203
        li      t2,SR_IEC
204
#endif
205
        not     t2
206
        and     t0,t2           /* turn off interrupts */
207
        MTC0    t0,C0_SR
208
 
209
        ld      a1,(a0)
210
        move    t0,ra
211
        jal     _CPU_Context_save_fp_from_exception
212
        NOP
213
 
214
        /*
215
        ** Reassert the task's state because we've not saved it yet.
216
        */
217
        MTC0    t1,C0_SR
218
        j       t0
219
        NOP
220
 
221
        .globl _CPU_Context_save_fp_from_exception
222
_CPU_Context_save_fp_from_exception:
223
        swc1 $f0,FP0_OFFSET*F_SZ(a1)
224
        swc1 $f1,FP1_OFFSET*F_SZ(a1)
225
        swc1 $f2,FP2_OFFSET*F_SZ(a1)
226
        swc1 $f3,FP3_OFFSET*F_SZ(a1)
227
        swc1 $f4,FP4_OFFSET*F_SZ(a1)
228
        swc1 $f5,FP5_OFFSET*F_SZ(a1)
229
        swc1 $f6,FP6_OFFSET*F_SZ(a1)
230
        swc1 $f7,FP7_OFFSET*F_SZ(a1)
231
        swc1 $f8,FP8_OFFSET*F_SZ(a1)
232
        swc1 $f9,FP9_OFFSET*F_SZ(a1)
233
        swc1 $f10,FP10_OFFSET*F_SZ(a1)
234
        swc1 $f11,FP11_OFFSET*F_SZ(a1)
235
        swc1 $f12,FP12_OFFSET*F_SZ(a1)
236
        swc1 $f13,FP13_OFFSET*F_SZ(a1)
237
        swc1 $f14,FP14_OFFSET*F_SZ(a1)
238
        swc1 $f15,FP15_OFFSET*F_SZ(a1)
239
        swc1 $f16,FP16_OFFSET*F_SZ(a1)
240
        swc1 $f17,FP17_OFFSET*F_SZ(a1)
241
        swc1 $f18,FP18_OFFSET*F_SZ(a1)
242
        swc1 $f19,FP19_OFFSET*F_SZ(a1)
243
        swc1 $f20,FP20_OFFSET*F_SZ(a1)
244
        swc1 $f21,FP21_OFFSET*F_SZ(a1)
245
        swc1 $f22,FP22_OFFSET*F_SZ(a1)
246
        swc1 $f23,FP23_OFFSET*F_SZ(a1)
247
        swc1 $f24,FP24_OFFSET*F_SZ(a1)
248
        swc1 $f25,FP25_OFFSET*F_SZ(a1)
249
        swc1 $f26,FP26_OFFSET*F_SZ(a1)
250
        swc1 $f27,FP27_OFFSET*F_SZ(a1)
251
        swc1 $f28,FP28_OFFSET*F_SZ(a1)
252
        swc1 $f29,FP29_OFFSET*F_SZ(a1)
253
        swc1 $f30,FP30_OFFSET*F_SZ(a1)
254
        swc1 $f31,FP31_OFFSET*F_SZ(a1)
255
        j ra
256
        NOP
257
        .set at
258
ENDFRAME(_CPU_Context_save_fp)
259
#endif
260
 
261
/*
262
 *  _CPU_Context_restore_fp_context
263
 *
264
 *  This routine is responsible for restoring the FP context
265
 *  at *fp_context_ptr.  If the point to load the FP context
266
 *  from is changed then the pointer is modified by this routine.
267
 *
268
 *  Sometimes a macro implementation of this is in cpu.h which dereferences
269
 *  the ** and a similarly named routine in this file is passed something
270
 *  like a (Context_Control_fp *).  The general rule on making this decision
271
 *  is to avoid writing assembly language.
272
 */
273
 
274
/* void _CPU_Context_restore_fp(
275
 *   void **fp_context_ptr
276
 * )
277
 */
278
 
279
#if ( CPU_HARDWARE_FP == TRUE )
280
FRAME(_CPU_Context_restore_fp,sp,0,ra)
281
        .set noat
282
        .set noreorder
283
 
284
        /*
285
        ** Make sure the FPU is on before we retrieve state.  This code
286
        ** is here because the FPU context switch might occur when an
287
        ** integer task is switching out with a FP task switching in.
288
        */
289
        MFC0    t0,C0_SR
290
        li      t2,SR_CU1
291
        move    t1,t0
292
        or      t0,t2           /* turn on the fpu */
293
#if __mips == 3
294
        li      t2,SR_EXL | SR_IE
295
#elif __mips == 1
296
        li      t2,SR_IEC
297
#endif
298
        not     t2
299
        and     t0,t2           /* turn off interrupts */
300
        MTC0    t0,C0_SR
301
 
302
        ld      a1,(a0)
303
        move    t0,ra
304
        jal     _CPU_Context_restore_fp_from_exception
305
        NOP
306
 
307
        /*
308
        ** Reassert the old task's state because we've not restored the
309
        ** new one yet.
310
        */
311
        MTC0    t1,C0_SR
312
        j       t0
313
        NOP
314
 
315
        .globl _CPU_Context_restore_fp_from_exception
316
_CPU_Context_restore_fp_from_exception:
317
        lwc1 $f0,FP0_OFFSET*4(a1)
318
        lwc1 $f1,FP1_OFFSET*4(a1)
319
        lwc1 $f2,FP2_OFFSET*4(a1)
320
        lwc1 $f3,FP3_OFFSET*4(a1)
321
        lwc1 $f4,FP4_OFFSET*4(a1)
322
        lwc1 $f5,FP5_OFFSET*4(a1)
323
        lwc1 $f6,FP6_OFFSET*4(a1)
324
        lwc1 $f7,FP7_OFFSET*4(a1)
325
        lwc1 $f8,FP8_OFFSET*4(a1)
326
        lwc1 $f9,FP9_OFFSET*4(a1)
327
        lwc1 $f10,FP10_OFFSET*4(a1)
328
        lwc1 $f11,FP11_OFFSET*4(a1)
329
        lwc1 $f12,FP12_OFFSET*4(a1)
330
        lwc1 $f13,FP13_OFFSET*4(a1)
331
        lwc1 $f14,FP14_OFFSET*4(a1)
332
        lwc1 $f15,FP15_OFFSET*4(a1)
333
        lwc1 $f16,FP16_OFFSET*4(a1)
334
        lwc1 $f17,FP17_OFFSET*4(a1)
335
        lwc1 $f18,FP18_OFFSET*4(a1)
336
        lwc1 $f19,FP19_OFFSET*4(a1)
337
        lwc1 $f20,FP20_OFFSET*4(a1)
338
        lwc1 $f21,FP21_OFFSET*4(a1)
339
        lwc1 $f22,FP22_OFFSET*4(a1)
340
        lwc1 $f23,FP23_OFFSET*4(a1)
341
        lwc1 $f24,FP24_OFFSET*4(a1)
342
        lwc1 $f25,FP25_OFFSET*4(a1)
343
        lwc1 $f26,FP26_OFFSET*4(a1)
344
        lwc1 $f27,FP27_OFFSET*4(a1)
345
        lwc1 $f28,FP28_OFFSET*4(a1)
346
        lwc1 $f29,FP29_OFFSET*4(a1)
347
        lwc1 $f30,FP30_OFFSET*4(a1)
348
        lwc1 $f31,FP31_OFFSET*4(a1)
349
        j ra
350
        NOP
351
        .set at
352
ENDFRAME(_CPU_Context_restore_fp)
353
#endif
354
 
355
/*  _CPU_Context_switch
356
 *
357
 *  This routine performs a normal non-FP context switch.
358
 */
359
 
360
/* void _CPU_Context_switch(
361
 *   Context_Control  *run,
362
 *   Context_Control  *heir
363
 * )
364
 */
365
 
366
FRAME(_CPU_Context_switch,sp,0,ra)
367
        .set noreorder
368
 
369
        MFC0    t0,C0_SR
370
#if __mips == 3
371
        li      t1,SR_EXL | SR_IE
372
#elif __mips == 1
373
        li      t1,SR_IEC
374
#endif
375
        STREG   t0,C0_SR_OFFSET*R_SZ(a0)        /* save the task's SR */
376
        not     t1
377
        and     t0,t1                           /* mask off interrupts while we context switch */
378
        MTC0    t0,C0_SR
379
        NOP
380
 
381
        STREG ra,RA_OFFSET*R_SZ(a0)             /* save current context */
382
        STREG sp,SP_OFFSET*R_SZ(a0)
383
        STREG fp,FP_OFFSET*R_SZ(a0)
384
        STREG s0,S0_OFFSET*R_SZ(a0)
385
        STREG s1,S1_OFFSET*R_SZ(a0)
386
        STREG s2,S2_OFFSET*R_SZ(a0)
387
        STREG s3,S3_OFFSET*R_SZ(a0)
388
        STREG s4,S4_OFFSET*R_SZ(a0)
389
        STREG s5,S5_OFFSET*R_SZ(a0)
390
        STREG s6,S6_OFFSET*R_SZ(a0)
391
        STREG s7,S7_OFFSET*R_SZ(a0)
392
 
393
 
394
        /*
395
        ** this code grabs the userspace EPC if we're dispatching from
396
        ** an interrupt frame or fakes an address as the EPC if we're
397
        ** not.  This is for the gdbstub's benefit so it can know
398
        **  where each thread is running.
399
        **
400
        ** Its value is only set when calling threadDispatch from
401
        ** the interrupt handler and is cleared immediately when this
402
        ** routine gets it.
403
        */
404
 
405
        la      t0,__exceptionStackFrame        /* see if we're coming in from an exception */
406
        LDREG   t1, (t0)
407
        NOP
408
        beqz    t1,1f
409
 
410
        STREG   zero, (t0)                      /* and clear it */
411
        NOP
412
        LDREG   t0,R_EPC*R_SZ(t1)               /* get the userspace EPC from the frame */
413
        b       2f
414
 
415
1:      la    t0,_Thread_Dispatch               /* if ==0, we're switched out */
416
 
417
2:      STREG   t0,C0_EPC_OFFSET*R_SZ(a0)
418
 
419
 
420
_CPU_Context_switch_restore:
421
        LDREG ra,RA_OFFSET*R_SZ(a1)             /* restore context */
422
        LDREG sp,SP_OFFSET*R_SZ(a1)
423
        LDREG fp,FP_OFFSET*R_SZ(a1)
424
        LDREG s0,S0_OFFSET*R_SZ(a1)
425
        LDREG s1,S1_OFFSET*R_SZ(a1)
426
        LDREG s2,S2_OFFSET*R_SZ(a1)
427
        LDREG s3,S3_OFFSET*R_SZ(a1)
428
        LDREG s4,S4_OFFSET*R_SZ(a1)
429
        LDREG s5,S5_OFFSET*R_SZ(a1)
430
        LDREG s6,S6_OFFSET*R_SZ(a1)
431
        LDREG s7,S7_OFFSET*R_SZ(a1)
432
 
433
        LDREG t0, C0_SR_OFFSET*R_SZ(a1)
434
 
435
//      NOP
436
//#if __mips == 3
437
//        andi  t0,SR_EXL
438
//        bnez  t0,_CPU_Context_1   /* set exception level from restore context */
439
//        li    t0,~SR_EXL
440
//        MFC0  t1,C0_SR
441
//        NOP
442
//        and   t1,t0
443
//        MTC0  t1,C0_SR
444
//
445
//#elif __mips == 1
446
//
447
//        andi  t0,(SR_INTERRUPT_ENABLE_BITS) /* we know 0 disabled */
448
//        beq   t0,$0,_CPU_Context_1          /* set level from restore context */
449
//        MFC0  t0,C0_SR
450
//        NOP
451
//        or    t0,(SR_INTERRUPT_ENABLE_BITS) /* new_sr = old sr with enabled  */
452
//        MTC0  t0,C0_SR                      /* set with enabled */
453
//        NOP
454
 
455
 
456
/*
457
** Incorporate the new task's FP coprocessor state and interrupt mask/enable
458
** into the status register.  We jump thru the requisite hoops to ensure we
459
** maintain all other SR bits as global values.
460
**
461
** Get the thread's FPU enable, int mask & int enable bits.  Although we keep the
462
** software int enables on a per-task basis, the rtems_task_create
463
** Interrupt Level & int level manipulation functions cannot enable/disable them,
464
** so they are automatically enabled for all tasks.  To turn them off, a thread
465
** must itself manipulate the SR register.
466
**
467
** Although something of a hack on this processor, we treat the SR register
468
** int enables as the RTEMS interrupt level.  We use the int level
469
** value as a bitmask, not as any sort of greater than/less than metric.
470
** Manipulation of a task's interrupt level directly corresponds to manipulation
471
** of that task's SR bits, as seen in cpu.c
472
**
473
** Note, interrupts are disabled before context is saved, though the thread's
474
** interrupt enable state is recorded.  The task swapping in will apply its
475
** specific SR bits, including interrupt enable.  If further task-specific
476
** SR bits are arranged, it is this code, the cpu.c interrupt level stuff and
477
** cpu.h task initialization code that will be affected.
478
*/
479
 
480
        li      t2,SR_CU1
481
        or      t2,SR_IMASK
482
 
483
        /* int enable bits */
484
#if __mips == 3
485
        or      t2,SR_EXL + SR_IE
486
#elif __mips == 1
487
        or      t2,SR_IEC + SR_IEP + SR_IEO     /* save current & previous int enable */
488
#endif
489
        and     t0,t2           /* keep only the per-task bits */
490
 
491
        MFC0    t1,C0_SR        /* grab the current SR */
492
        not     t2
493
        and     t1,t2           /* mask off the old task's bits */
494
        or      t1,t0           /* or in the new task's bits */
495
        MTC0    t1,C0_SR        /* and load the new SR */
496
        NOP
497
 
498
/* _CPU_Context_1: */
499
        j       ra
500
        NOP
501
ENDFRAME(_CPU_Context_switch)
502
 
503
 
504
/*
505
 *  _CPU_Context_restore
506
 *
507
 *  This routine is generally used only to restart self in an
508
 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
509
 *
510
 *  NOTE: May be unnecessary to reload some registers.
511
 *
512
 *  void _CPU_Context_restore(
513
 *    Context_Control *new_context
514
 *  );
515
 */
516
 
517
FRAME(_CPU_Context_restore,sp,0,ra)
518
        .set noreorder
519
        move    a1,a0
520
        j       _CPU_Context_switch_restore
521
        NOP
522
 
523
ENDFRAME(_CPU_Context_restore)
524
 
525
 
526
ASM_EXTERN(_ISR_Nest_level, SZ_INT)
527
ASM_EXTERN(_Thread_Dispatch_disable_level,SZ_INT)
528
ASM_EXTERN(_Context_Switch_necessary,SZ_INT)
529
ASM_EXTERN(_ISR_Signals_to_thread_executing,SZ_INT)
530
ASM_EXTERN(_Thread_Executing,SZ_INT)
531
 
532
.extern _Thread_Dispatch
533
.extern _ISR_Vector_table
534
 
535
 
536
 
537
 
538
 
539
/*  void _DBG_Handler()
540
 *
541
 *  This routine services the (at least) MIPS1 debug vector,
542
 *  only used the the hardware debugging features.  This code,
543
 *  while optional, is best located here because its intrinsically
544
 *  associated with exceptions in general & thus tied pretty
545
 *  closely to _ISR_Handler.
546
 *
547
 */
548
 
549
 
550
FRAME(_DBG_Handler,sp,0,ra)
551
        .set noreorder
552
        la      k0,_ISR_Handler
553
        j       k0
554
        NOP
555
        .set reorder
556
ENDFRAME(_DBG_Handler)
557
 
558
 
559
 
560
 
561
 
562
/*  void __ISR_Handler()
563
 *
564
 *  This routine provides the RTEMS interrupt management.
565
 *
566
 *  void _ISR_Handler()
567
 *
568
 *
569
 *  This discussion ignores a lot of the ugly details in a real
570
 *  implementation such as saving enough registers/state to be
571
 *  able to do something real.  Keep in mind that the goal is
572
 *  to invoke a user's ISR handler which is written in C and
573
 *  uses a certain set of registers.
574
 *
575
 *  Also note that the exact order is to a large extent flexible.
576
 *  Hardware will dictate a sequence for a certain subset of
577
 *  _ISR_Handler while requirements for setting
578
 *
579
 *  At entry to "common" _ISR_Handler, the vector number must be
580
 *  available.  On some CPUs the hardware puts either the vector
581
 *  number or the offset into the vector table for this ISR in a
582
 *  known place.  If the hardware does not give us this information,
583
 *  then the assembly portion of RTEMS for this port will contain
584
 *  a set of distinct interrupt entry points which somehow place
585
 *  the vector number in a known place (which is safe if another
586
 *  interrupt nests this one) and branches to _ISR_Handler.
587
 *
588
 */
589
 
590
FRAME(_ISR_Handler,sp,0,ra)
591
        .set noreorder
592
 
593
        /* Q: _ISR_Handler, not using IDT/SIM ...save extra regs? */
594
 
595
        /* wastes a lot of stack space for context?? */
596
        ADDIU    sp,sp,-EXCP_STACK_SIZE
597
 
598
        STREG ra, R_RA*R_SZ(sp)  /* store ra on the stack */
599
        STREG v0, R_V0*R_SZ(sp)
600
        STREG v1, R_V1*R_SZ(sp)
601
        STREG a0, R_A0*R_SZ(sp)
602
        STREG a1, R_A1*R_SZ(sp)
603
        STREG a2, R_A2*R_SZ(sp)
604
        STREG a3, R_A3*R_SZ(sp)
605
        STREG t0, R_T0*R_SZ(sp)
606
        STREG t1, R_T1*R_SZ(sp)
607
        STREG t2, R_T2*R_SZ(sp)
608
        STREG t3, R_T3*R_SZ(sp)
609
        STREG t4, R_T4*R_SZ(sp)
610
        STREG t5, R_T5*R_SZ(sp)
611
        STREG t6, R_T6*R_SZ(sp)
612
        STREG t7, R_T7*R_SZ(sp)
613
        mflo  t0
614
        STREG t8, R_T8*R_SZ(sp)
615
        STREG t0, R_MDLO*R_SZ(sp)
616
        STREG t9, R_T9*R_SZ(sp)
617
        mfhi  t0
618
        STREG gp, R_GP*R_SZ(sp)
619
        STREG t0, R_MDHI*R_SZ(sp)
620
        STREG fp, R_FP*R_SZ(sp)
621
 
622
        .set noat
623
        STREG AT, R_AT*R_SZ(sp)
624
        .set at
625
 
626
        MFC0     t0,C0_SR
627
        MFC0     t1,C0_EPC
628
        STREG    t0,R_SR*R_SZ(sp)
629
        STREG    t1,R_EPC*R_SZ(sp)
630
 
631
 
632
#ifdef INSTRUMENT_EXECUTING_THREAD
633
        lw t2, _Thread_Executing
634
        NOP
635
        sw t2, 0x8001FFF0
636
#endif
637
 
638
        /* determine if an interrupt generated this exception */
639
 
640
        MFC0     t0,C0_CAUSE
641
        NOP
642
 
643
        and      t1,t0,CAUSE_EXCMASK
644
        beq      t1, 0, _ISR_Handler_1
645
 
646
_ISR_Handler_Exception:
647
 
648
        /*
649
        sw      t0,0x8001FF00
650
        sw      t1,0x8001FF04
651
        */
652
 
653
        /*  If we return from the exception, it is assumed nothing
654
         *  bad is going on and we can continue to run normally.
655
         *  But we want to save the entire CPU context so exception
656
         *  handlers can look at it and change it.
657
         *
658
         *  NOTE: This is the path the debugger stub will take.
659
         */
660
 
661
        /* already got t0 = cause in the interrupt test above */
662
        STREG    t0,R_CAUSE*R_SZ(sp)
663
 
664
        STREG    sp, R_SP*R_SZ(sp)
665
 
666
        STREG    s0,R_S0*R_SZ(sp)     /* save s0 - s7 */
667
        STREG    s1,R_S1*R_SZ(sp)
668
        STREG    s2,R_S2*R_SZ(sp)
669
        STREG    s3,R_S3*R_SZ(sp)
670
        STREG    s4,R_S4*R_SZ(sp)
671
        STREG    s5,R_S5*R_SZ(sp)
672
        STREG    s6,R_S6*R_SZ(sp)
673
        STREG    s7,R_S7*R_SZ(sp)
674
 
675
        /* CP0 special registers */
676
 
677
#if __mips == 1
678
        MFC0     t0,C0_TAR
679
#endif
680
        MFC0     t1,C0_BADVADDR
681
 
682
#if __mips == 1
683
        STREG    t0,R_TAR*R_SZ(sp)
684
#else
685
        NOP
686
#endif
687
        STREG    t1,R_BADVADDR*R_SZ(sp)
688
 
689
#if ( CPU_HARDWARE_FP == TRUE )
690
        MFC0     t0,C0_SR                 /* FPU is enabled, save state */
691
        NOP
692
        srl      t0,t0,16
693
        andi     t0,t0,(SR_CU1 >> 16)
694
        beqz     t0, 1f
695
        NOP
696
 
697
        la       a1,R_F0*R_SZ(sp)
698
        jal      _CPU_Context_save_fp_from_exception
699
        NOP
700
        MFC1     t0,C1_REVISION
701
        MFC1     t1,C1_STATUS
702
        STREG    t0,R_FEIR*R_SZ(sp)
703
        STREG    t1,R_FCSR*R_SZ(sp)
704
 
705
1:
706
#endif
707
 
708
        move     a0,sp
709
        jal      mips_vector_exceptions
710
        NOP
711
 
712
 
713
        /*
714
        ** note, if the exception vector returns, rely on it to have
715
        ** adjusted EPC so we will return to some correct address.  If
716
        ** this is not done, we might get stuck in an infinite loop because
717
        ** we'll return to the instruction where the exception occured and
718
        ** it could throw again.
719
        **
720
        ** It is expected the only code using the exception processing is
721
        ** either the gdb stub or some user code which is either going to
722
        ** panic or do something useful.
723
        */
724
 
725
 
726
/* *********************************************************************
727
        * compute the address of the instruction we'll return to *
728
 
729
        LDREG   t1, R_CAUSE*R_SZ(sp)
730
        LDREG   t0, R_EPC*R_SZ(sp)
731
 
732
        * first see if the exception happened in the delay slot *
733
        li      t3,CAUSE_BD
734
        AND     t4,t1,t3
735
        beqz    t4,excnodelay
736
        NOP
737
 
738
        * it did, now see if the branch occured or not *
739
        li      t3,CAUSE_BT
740
        AND     t4,t1,t3
741
        beqz    t4,excnobranch
742
        NOP
743
 
744
        * branch was taken, we resume at the branch target *
745
        LDREG   t0, R_TAR*R_SZ(sp)
746
        j       excreturn
747
        NOP
748
 
749
excnobranch:
750
        ADDU    t0,R_SZ
751
 
752
excnodelay:
753
        ADDU    t0,R_SZ
754
 
755
excreturn:
756
        STREG   t0, R_EPC*R_SZ(sp)
757
        NOP
758
********************************************************************* */
759
 
760
 
761
 /* if we're returning into mips_break, move to the next instruction */
762
 
763
        LDREG   t0,R_EPC*R_SZ(sp)
764
        la      t1,mips_break
765
        xor     t2,t0,t1
766
        bnez    t2,3f
767
 
768
        addu    t0,R_SZ
769
        STREG   t0,R_EPC*R_SZ(sp)
770
        NOP
771
3:
772
 
773
 
774
 
775
 
776
#if ( CPU_HARDWARE_FP == TRUE )
777
        MFC0     t0,C0_SR               /* FPU is enabled, restore state */
778
        NOP
779
        srl      t0,t0,16
780
        andi     t0,t0,(SR_CU1 >> 16)
781
        beqz     t0, 2f
782
        NOP
783
 
784
        la       a1,R_F0*R_SZ(sp)
785
        jal      _CPU_Context_restore_fp_from_exception
786
        NOP
787
        LDREG    t0,R_FEIR*R_SZ(sp)
788
        LDREG    t1,R_FCSR*R_SZ(sp)
789
        MTC1     t0,C1_REVISION
790
        MTC1     t1,C1_STATUS
791
2:
792
#endif
793
        LDREG    s0,R_S0*R_SZ(sp)    /* restore s0 - s7 */
794
        LDREG    s1,R_S1*R_SZ(sp)
795
        LDREG    s2,R_S2*R_SZ(sp)
796
        LDREG    s3,R_S3*R_SZ(sp)
797
        LDREG    s4,R_S4*R_SZ(sp)
798
        LDREG    s5,R_S5*R_SZ(sp)
799
        LDREG    s6,R_S6*R_SZ(sp)
800
        LDREG    s7,R_S7*R_SZ(sp)
801
 
802
        /* do NOT restore the sp as this could mess up the world */
803
        /* do NOT restore the cause as this could mess up the world */
804
 
805
        j        _ISR_Handler_exit
806
        NOP
807
 
808
_ISR_Handler_1:
809
 
810
        MFC0     t1,C0_SR
811
        and      t0,CAUSE_IPMASK
812
        and      t0,t1
813
 
814
        /* external interrupt not enabled, ignore */
815
        /* but if it's not an exception or an interrupt, */
816
        /* Then where did it come from??? */
817
 
818
        beq      t0,zero,_ISR_Handler_exit
819
 
820
 
821
 
822
 
823
  /*
824
   *  save some or all context on stack
825
   *  may need to save some special interrupt information for exit
826
   *
827
   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
828
   *    if ( _ISR_Nest_level == 0 )
829
   *      switch to software interrupt stack
830
   *  #endif
831
   */
832
 
833
  /*
834
   *  _ISR_Nest_level++;
835
   */
836
        LDREG  t0,_ISR_Nest_level
837
        NOP
838
        ADD    t0,t0,1
839
        STREG  t0,_ISR_Nest_level
840
  /*
841
   *  _Thread_Dispatch_disable_level++;
842
   */
843
        LDREG  t1,_Thread_Dispatch_disable_level
844
        NOP
845
        ADD    t1,t1,1
846
        STREG  t1,_Thread_Dispatch_disable_level
847
 
848
  /*
849
   *  Call the CPU model or BSP specific routine to decode the
850
   *  interrupt source and actually vector to device ISR handlers.
851
   */
852
 
853
#ifdef INSTRUMENT_ISR_VECTORING
854
        NOP
855
        li      t1, 1
856
        sw      t1, 0x8001e000
857
#endif
858
 
859
        move     a0,sp
860
        jal      mips_vector_isr_handlers
861
        NOP
862
 
863
#ifdef INSTRUMENT_ISR_VECTORING
864
        li      t1, 0
865
        sw      t1, 0x8001e000
866
        NOP
867
#endif
868
 
869
  /*
870
   *  --_ISR_Nest_level;
871
   */
872
        LDREG  t2,_ISR_Nest_level
873
        NOP
874
        ADD    t2,t2,-1
875
        STREG  t2,_ISR_Nest_level
876
  /*
877
   *  --_Thread_Dispatch_disable_level;
878
   */
879
        LDREG  t1,_Thread_Dispatch_disable_level
880
        NOP
881
        ADD    t1,t1,-1
882
        STREG  t1,_Thread_Dispatch_disable_level
883
  /*
884
   *  if ( _Thread_Dispatch_disable_level || _ISR_Nest_level )
885
   *    goto the label "exit interrupt (simple case)"
886
   */
887
        or  t0,t2,t1
888
        bne t0,zero,_ISR_Handler_exit
889
        NOP
890
 
891
 
892
 
893
 
894
  /*
895
   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
896
   *    restore stack
897
   *  #endif
898
   *
899
   *  if ( !_Context_Switch_necessary && !_ISR_Signals_to_thread_executing )
900
   *    goto the label "exit interrupt (simple case)"
901
   */
902
        LDREG t0,_Context_Switch_necessary
903
        LDREG t1,_ISR_Signals_to_thread_executing
904
        NOP
905
        or    t0,t0,t1
906
        beq   t0,zero,_ISR_Handler_exit
907
        NOP
908
 
909
 
910
 
911
#ifdef INSTRUMENT_EXECUTING_THREAD
912
        lw      t0,_Thread_Executing
913
        NOP
914
        sw      t0,0x8001FFF4
915
#endif
916
 
917
/*
918
** Turn on interrupts before entering Thread_Dispatch which
919
** will run for a while, thus allowing new interrupts to
920
** be serviced.  Observe the Thread_Dispatch_disable_level interlock
921
** that prevents recursive entry into Thread_Dispatch.
922
*/
923
 
924
        MFC0    t0, C0_SR
925
#if __mips == 3
926
        li      t1,SR_EXL | SR_IE
927
#elif __mips == 1
928
        li      t1,SR_IEC
929
#endif
930
        or      t0, t1
931
        MTC0    t0, C0_SR
932
        NOP
933
 
934
        /* save off our stack frame so the context switcher can get to it */
935
        la      t0,__exceptionStackFrame
936
        STREG   sp,(t0)
937
 
938
        jal     _Thread_Dispatch
939
        NOP
940
 
941
        /* and make sure its clear in case we didn't dispatch.  if we did, its
942
        ** already cleared */
943
        la      t0,__exceptionStackFrame
944
        STREG   zero,(t0)
945
        NOP
946
 
947
/*
948
** turn interrupts back off while we restore context so
949
** a badly timed interrupt won't accidentally mess things up
950
*/
951
        MFC0    t0, C0_SR
952
#if __mips == 3
953
        li      t1,SR_EXL | SR_IE
954
#elif __mips == 1
955
        /* ints off, current & prev kernel mode on (kernel mode enabled is bit clear..argh!) */
956
        li      t1,SR_IEC | SR_KUP | SR_KUC
957
#endif
958
        not     t1
959
        and     t0, t1
960
 
961
#if __mips == 1
962
        /* make sure previous int enable is on  because we're returning from an interrupt
963
        ** which means interrupts have to be enabled
964
        */
965
        li      t1,SR_IEP
966
        or      t0,t1
967
#endif
968
        MTC0    t0, C0_SR
969
        NOP
970
 
971
#ifdef INSTRUMENT_EXECUTING_THREAD
972
        lw      t0,_Thread_Executing
973
        NOP
974
        sw      t0,0x8001FFF8
975
#endif
976
 
977
 
978
  /*
979
   *  prepare to get out of interrupt
980
   *  return from interrupt  (maybe to _ISR_Dispatch)
981
   *
982
   *  LABEL "exit interrupt (simple case):"
983
   *  prepare to get out of interrupt
984
   *  return from interrupt
985
   */
986
 
987
_ISR_Handler_exit:
988
/*
989
** Skip the SR restore because its a global register. _CPU_Context_switch_restore
990
** adjusts it according to each task's configuration.  If we didn't dispatch, the
991
** SR value isn't changed, so all we need to do is return.
992
**
993
*/
994
        /* restore context from stack */
995
 
996
#ifdef INSTRUMENT_EXECUTING_THREAD
997
        lw      t0,_Thread_Executing
998
        NOP
999
        sw      t0, 0x8001FFFC
1000
#endif
1001
 
1002
        LDREG t8, R_MDLO*R_SZ(sp)
1003
        LDREG t0, R_T0*R_SZ(sp)
1004
        mtlo  t8
1005
        LDREG t8, R_MDHI*R_SZ(sp)
1006
        LDREG t1, R_T1*R_SZ(sp)
1007
        mthi  t8
1008
        LDREG t2, R_T2*R_SZ(sp)
1009
        LDREG t3, R_T3*R_SZ(sp)
1010
        LDREG t4, R_T4*R_SZ(sp)
1011
        LDREG t5, R_T5*R_SZ(sp)
1012
        LDREG t6, R_T6*R_SZ(sp)
1013
        LDREG t7, R_T7*R_SZ(sp)
1014
        LDREG t8, R_T8*R_SZ(sp)
1015
        LDREG t9, R_T9*R_SZ(sp)
1016
        LDREG gp, R_GP*R_SZ(sp)
1017
        LDREG fp, R_FP*R_SZ(sp)
1018
        LDREG ra, R_RA*R_SZ(sp)
1019
        LDREG a0, R_A0*R_SZ(sp)
1020
        LDREG a1, R_A1*R_SZ(sp)
1021
        LDREG a2, R_A2*R_SZ(sp)
1022
        LDREG a3, R_A3*R_SZ(sp)
1023
        LDREG v1, R_V1*R_SZ(sp)
1024
        LDREG v0, R_V0*R_SZ(sp)
1025
 
1026
        LDREG     k1, R_EPC*R_SZ(sp)
1027
 
1028
        .set noat
1029
        LDREG     AT, R_AT*R_SZ(sp)
1030
        .set at
1031
 
1032
        ADDIU     sp,sp,EXCP_STACK_SIZE
1033
        j         k1
1034
        rfe
1035
        NOP
1036
 
1037
       .set    reorder
1038
ENDFRAME(_ISR_Handler)
1039
 
1040
 
1041
 
1042
 
1043
FRAME(mips_break,sp,0,ra)
1044
        .set noreorder
1045
        break   0x0     /* this statement must be first in this function, assumed so by mips-stub.c */
1046
        NOP
1047
        j       ra
1048
        NOP
1049
       .set    reorder
1050
ENDFRAME(mips_break)
1051
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.