OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [rtos/] [rtems/] [c/] [src/] [exec/] [score/] [cpu/] [or1k/] [cpu_asm.c] - Blame information for rev 778

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 30 unneback
/*  cpu_asm.c  ===> cpu_asm.S or cpu_asm.s
2
 *
3
 *  This file contains the basic algorithms for all assembly code used
4
 *  in an specific CPU port of RTEMS.  These algorithms must be implemented
5
 *  in assembly language
6
 *
7
 *  NOTE:  This is supposed to be a .S or .s file NOT a C file.
8
 *
9
 *  COPYRIGHT (c) 1989-1999.
10
 *  On-Line Applications Research Corporation (OAR).
11
 *
12
 *  The license and distribution terms for this file may be
13
 *  found in the file LICENSE in this distribution or at
14
 *  http://www.OARcorp.com/rtems/license.html.
15
 *
16
 *  This file adapted from no_bsp board library of the RTEMS distribution.
17
 *  The body has been modified for the Bender Or1k implementation by
18
 *  Chris Ziomkowski. <chris@asics.ws>
19
 */
20
 
21
/*
22
 *  This is supposed to be an assembly file.  This means that system.h
23
 *  and cpu.h should not be included in a "real" cpu_asm file.  An
24
 *  implementation in assembly should include "cpu_asm.h>
25
 */
26
 
27
#include <rtems/system.h>
28
#include <rtems/score/cpu.h>
29
/* #include "cpu_asm.h> */
30
 
31
/*
32
 *  _CPU_Context_save_fp_context
33
 *
34
 *  This routine is responsible for saving the FP context
35
 *  at *fp_context_ptr.  If the point to load the FP context
36
 *  from is changed then the pointer is modified by this routine.
37
 *
38
 *  Sometimes a macro implementation of this is in cpu.h which dereferences
39
 *  the ** and a similarly named routine in this file is passed something
40
 *  like a (Context_Control_fp *).  The general rule on making this decision
41
 *  is to avoid writing assembly language.
42
 *
43
 *  or1k specific Information:
44
 *
45
 *  This implementation of RTEMS considers the concept of
46
 *  "fast context switching", as defined in the or1k architecture
47
 *  specification. Whether or not this makes a significant
48
 *  impact on speed is dubious, however it is not a significant
49
 *  impediment to include it. It probably wastes a few cycles on
50
 *  every floating point context switch.
51
 *
52
 *  This implementation will currently not work on a processor where
53
 *  the integer unit and floating point unit are not the same size. I
54
 *  am waiting on an architecture change to make this feasible. It
55
 *  should work fine on 64 bit architectures, except for the fact that
56
 *  the variables are declared as 32 bits. This shouldn't really make
57
 *  a difference, as the fact that they must be registers should force
58
 *  them into a 64 bit word anyway.
59
 *
60
 *  The decision as to whether to do 32 or 64 bit saves is performed
61
 *  at run time based on the configuration of the CPUCFGR register. This
62
 *  takes a performance hit of a few cycles, but this should be a very
63
 *  small percentage of the total number of cycles necessary to do the
64
 *  save, and doesn't require special code for 32 or 64 bit versions.
65
 *
66
 *  ADDITIONAL INFORMATION:
67
 *
68
 *  It has been unanimously agreed that floating point will not be
69
 *  included in the initial releases of the Or1k chips, and that
70
 *  significant changes to the floating point architecture may
71
 *  occur before any such release will ever be implemented. The code
72
 *  below is therefore never called and never used.
73
 */
74
 
75
void _CPU_Context_save_fp(
76
  void **fp_context_ptr
77
)
78
{
79
  register unsigned32 temp;
80
  register unsigned32 address = (unsigned32)(*fp_context_ptr);
81
  register unsigned32 xfer;
82
  register unsigned32 loop;
83
 
84
  /* %0 is a temporary register which is used for several
85
     values throughout the code. %3 contains the address
86
     to save the context, and is modified during the course
87
     of the context save. %1 is a second dummy register
88
     which is used during transfer of the floating point
89
     value to memory. %2 is an end of loop marker which
90
     is compared against the pointer %3. */
91
 
92
  asm volatile ("l.mfspr  %0,r0,0x02    \n\t"  /* CPUCFGR */
93
               "l.andi   %0,%0,0x380   \n\t"  /* OF32S or OV64S or OF64S */
94
               "l.sfnei  %0,0x0        \n\t"
95
               "l.bf     _L_nofps      \n\t"  /* exit if no floating point  */
96
               "l.sfeqi  %0,0x080      \n\t"  /* (DELAY) single precision?  */
97
               "l.mfspr  %0,r0,0x11    \n\t"  /* Load Status Register       */
98
               "l.srli   %0,%0,58      \n\t"  /* Move CID into low byte*32  */
99
               "l.bnf    _L_spfp_loops \n\t"  /* Branch on single precision */
100
               "l.addi   %2,%0,0x20    \n"    /* Terminating condition      */
101
               /**** Double Precision Floating Point Section ****/
102
               "_L_dpfp_loops:         \n\t"
103
               "l.mfspr  %1,%0,0x600   \n\t"  /* Load VFRx                  */
104
               "l.sd     0(%3),%1      \n\t"  /* Save VFRx                  */
105
               "l.addi   %0,%0,0x01    \n\t"  /* Increment counter          */
106
               "l.sfeq   %0,%2         \n\t"  /* Branch if incomplete       */
107
               "l.bf     _L_dpfp_loops \n\t"
108
               "l.addi   %3,%3,0x08    \n\t"  /* (DELAY) update pointer     */
109
               "l.bnf    _L_nofps      \n\t"  /* exit                       */
110
               "l.nop                  \n"
111
               /**** Single Precision Floating Point Section ****/
112
               "_L_spfp_loops:         \n\t"
113
               "l.mfspr  %1,%0,0x600   \n\t"  /* Load VFRx                  */
114
               "l.sw     0(%3),%1      \n\t"  /* Save VFRx                  */
115
               "l.addi   %0,%0,0x01    \n\t"  /* Increment counter          */
116
               "l.sfeq   %0,%2         \n\t"  /* Branch if incomplete       */
117
               "l.bf     _L_spfp_loops \n\t"
118
               "l.addi   %3,%3,0x04    \n"    /* (DELAY) update pointer     */
119
               "_L_nofps:              \n\t"  /* End of context save        */
120
                : "=&r" (temp), "=r" (xfer), "=&r" (loop), "+r" (address));
121
}
122
 
123
/*
124
 *  _CPU_Context_restore_fp_context
125
 *
126
 *  This routine is responsible for restoring the FP context
127
 *  at *fp_context_ptr.  If the point to load the FP context
128
 *  from is changed then the pointer is modified by this routine.
129
 *
130
 *
131
 */
132
 
133
void _CPU_Context_restore_fp(
134
  void **fp_context_ptr
135
)
136
{
137
  register unsigned32 temp;
138
  register unsigned32 address = (unsigned32)(*fp_context_ptr);
139
  register unsigned32 xfer;
140
  register unsigned32 loop;
141
 
142
  /* The reverse of Context_save_fp */
143
  /* %0 is a temporary register which is used for several
144
     values throughout the code. %1 contains the address
145
     to save the context, and is modified during the course
146
     of the context save. %2 is a second dummy register
147
     which is used during transfer of the floating point
148
     value to memory. %3 is an end of loop marker which
149
     is compared against the pointer %1. */
150
 
151
  asm volatile ("l.mfspr  %0,r0,0x02    \n\t"  /* CPUCFGR */
152
               "l.andi   %0,%0,0x380   \n\t"  /* OF32S or OV64S or OF64S */
153
               "l.sfnei  %0,0x0        \n\t"
154
               "l.bf     _L_nofpr      \n\t"  /* exit if no floating point  */
155
               "l.sfeqi  %0,0x080      \n\t"  /* (DELAY) single precision?  */
156
               "l.mfspr  %0,r0,0x11    \n\t"  /* Load Status Register       */
157
               "l.srli   %0,%0,58      \n\t"  /* Move CID into low byte*32  */
158
               "l.bnf    _L_spfp_loopr \n\t"  /* Branch on single precision */
159
               "l.addi   %3,%0,0x20    \n"    /* Terminating condition      */
160
               /**** Double Precision Floating Point Section ****/
161
               "_L_dpfp_loopr:          \n\t"
162
               "l.mfspr  %2,%0,0x600   \n\t"  /* Load VFRx                  */
163
               "l.sd     0(%1),%2      \n\t"  /* Save VFRx                  */
164
               "l.addi   %0,%0,0x01    \n\t"  /* Increment counter          */
165
               "l.sfeq   %0,%3         \n\t"  /* Branch if incomplete       */
166
               "l.bf     _L_dpfp_loopr \n\t"
167
               "l.addi   %1,%1,0x08    \n\t"  /* (DELAY) update pointer     */
168
               "l.bnf    _L_nofpr      \n\t"  /* exit                       */
169
               "l.nop                  \n"
170
               /**** Single Precision Floating Point Section ****/
171
               "_L_spfp_loopr:         \n\t"
172
               "l.mfspr  %2,%0,0x600   \n\t"  /* Load VFRx                  */
173
               "l.sw     0(%1),%2      \n\t"  /* Save VFRx                  */
174
               "l.addi   %0,%0,0x01    \n\t"  /* Increment counter          */
175
               "l.sfeq   %0,%3         \n\t"  /* Branch if incomplete       */
176
               "l.bf     _L_spfp_loopr \n\t"
177
               "l.addi   %1,%1,0x04    \n"    /* (DELAY) update pointer     */
178
               "_L_nofpr:              \n\t"  /* End of context save        */
179
               : "=&r" (temp), "+r" (address), "=r" (xfer), "=&r" (loop));
180
}
181
 
182
/*  _CPU_Context_switch
183
 *
184
 *  This routine performs a normal non-FP context switch.
185
 *
186
 *  NO_CPU Specific Information:
187
 *
188
 *  XXX document implementation including references if appropriate
189
 */
190
 
191
void _CPU_Context_switch(
192
  Context_Control  *run,
193
  Context_Control  *heir
194
)
195
{
196
  register unsigned32 temp1 = 0;
197
  register unsigned32 temp2 = 0;
198
 
199
  /* This function is really tricky. When this function is called,
200
     we should save our state as we need it, and then grab the
201
     new state from the pointer. We then do a longjump to this
202
     code, replacing the current stack pointer with the new
203
     environment. This function never returns. Instead, at some
204
     later time, another person will call context switch with
205
     our pointer in the heir variable, and they will longjump
206
     to us. We will then continue. Let's see how this works... */
207
 
208
  /* Technically, we could probably not worry about saving r3
209
     and r4, since these are parameters guaranteed to be saved
210
     by the calling function. We could also probably get away
211
     without saving r11, as that is filled in by the return
212
     statement. But as a first cut I'm in favor of just saving
213
     everything.... */
214
 
215
  /* We could be more efficient and use compile time directives
216
     for 32 or 64 bit, but this will allow the code to run on
217
     everything without modification. Feel free to comment the
218
     relevant sections out if you don't need it. */
219
 
220
  /* We should probably write this whole routine in assembly
221
     so that we can have seperate entry points for self restore
222
     or context switch. You can't jump to local labels from
223
     inline assembly across function calls, and I don't feel
224
     like embedding all the .global directives here...it really
225
     screws up the debugger. Oh well, what's 2 more instructions
226
     and a branch really cost... */
227
 
228
  /* One thing which we should do is check for 32 or 64 bit models
229
     first, and then do one branch to the appropriate code section.
230
     Currently, we check the architecture bit in CPUCFGR twice. Once
231
     during the load section and again during restore. That is inefficient,
232
     and considering this code is huge anyway, saving the few bytes
233
     simply doesn't make any practical sense. FIX THIS LATER. */
234
 
235
  /* Note that this routine assumes software context switches are
236
     done with the same CID. In other words, it will not manage
237
     the CIDs and assign a new one as necessary. If you tell it
238
     to restore a context at CID 2, and the current one is at CID
239
     4, it will do what it is told. It will overwrite the registers
240
     for context ID 2, meaning they are irretrievably lost. I hope
241
     you saved them earlier.... */
242
 
243
  /* Note that you can have a context jump anywhere you want, although
244
     by default we will jump to the L_restore label. If you then modify
245
     the location in the Context_Control structure, it will continue
246
     whereever you told it to go. Note however that you had better
247
     also have cleaned up the stack and frame pointers though, because
248
     they are probably still set with the values obtained from
249
     entering this function... */
250
 
251
  asm volatile ("l.sfeqi   %3,0x0        \n\t"  /* Is this a self restore? */
252
               "l.bf     _L_restore    \n\t"  /* Yes it is...go there */
253
               "l.nop                  \n\t"
254
 
255
               "l.lwz    %0,0(%3)      \n\t"  /* Prefetch new context */
256
               "l.mfspr  %2,r0,0x11    \n\t"  /* Status Register */
257
               "l.sw     0(%1),%2      \n\t"  /* Save it */
258
               "l.srli   %2,%2,28      \n\t"  /* Move CID into low byte */
259
               "l.mfspr  %0,%2,0x20    \n\t"  /* Offset from EPCR */
260
               "l.sw     4(%1),%0      \n\t"  /* Store it */
261
               "l.mfspr  %0,%2,0x30    \n\t"  /* Offset from EEAR */
262
               "l.sw     8(%1),%0      \n\t"  /* Store it */
263
               "l.mfspr  %0,%2,0x40    \n\t"  /* Offset from ESR */
264
               "l.sw     12(%1),%0     \n\t"  /* Store it */
265
               "l.mfspr  %0,r0,0x02    \n\t"  /* CPUCFGR */
266
               "l.andi   %0,%0,0x40    \n\t"  /* OB64S */
267
               "l.sfnei  %0,0x0        \n\t"
268
               "l.bf     _L_64bit      \n\t"  /* 64 bit architecture */
269
               "l.movhi  %0,hi(_L_restore)\n\t"
270
 
271
               /****  32 bit implementation  ****/
272
               "l.ori    %0,%0,lo(_L_restore)\n\t"
273
               "l.sw     140(%1),%0    \n\t"   /* Save the PC */
274
               "l.lwz    %0,140(%3)    \n\t"   /* New PC. Expect cache miss */
275
               "l.sw     16(%1),r1     \n\t"
276
               "l.sw     20(%1),r2     \n\t"
277
               "l.sw     24(%1),r3     \n\t"
278
               "l.sw     28(%1),r4     \n\t"
279
               "l.sw     32(%1),r5     \n\t"
280
               "l.sw     36(%1),r6     \n\t"
281
               "l.sw     40(%1),r7     \n\t"
282
               "l.sw     44(%1),r8     \n\t"
283
               "l.sw     48(%1),r9     \n\t"
284
               "l.sw     52(%1),r10    \n\t"
285
               "l.sw     56(%1),r11    \n\t"
286
               "l.sw     60(%1),r12    \n\t"
287
               "l.sw     64(%1),r13    \n\t"
288
               "l.sw     68(%1),r14    \n\t"
289
               "l.sw     72(%1),r15    \n\t"
290
               "l.sw     76(%1),r16    \n\t"
291
               "l.sw     80(%1),r17    \n\t"
292
               "l.sw     84(%1),r18    \n\t"
293
               "l.sw     88(%1),r19    \n\t"
294
               "l.sw     92(%1),r20    \n\t"
295
               "l.sw     96(%1),r21    \n\t"
296
               "l.sw     100(%1),r22   \n\t"
297
               "l.sw     104(%1),r23   \n\t"
298
               "l.sw     108(%1),r24   \n\t"
299
               "l.sw     112(%1),r25   \n\t"
300
               "l.sw     116(%1),r26   \n\t"
301
               "l.sw     120(%1),r27   \n\t"
302
               "l.sw     124(%1),r28   \n\t"
303
               "l.sw     128(%1),r29   \n\t"
304
               "l.sw     132(%1),r30   \n\t"
305
               "l.jr     %0            \n\t"  /* Go there */
306
               "l.sw     136(%1),r31   \n"    /* Store the last reg */
307
 
308
               /**** 64 bit implementation ****/
309
               "_L_64bit:              \n\t"
310
               "l.ori    %0,%0,lo(_L_restore)\n\t"
311
               "l.sw     264(%1),%0    \n\t"
312
               "l.sd     16(%1),r1     \n\t"
313
               "l.sd     24(%1),r2     \n\t"
314
               "l.sd     32(%1),r3     \n\t"
315
               "l.sd     40(%1),r4     \n\t"
316
               "l.sd     48(%1),r5     \n\t"
317
               "l.sd     56(%1),r6     \n\t"
318
               "l.sd     64(%1),r7     \n\t"
319
               "l.sd     72(%1),r8     \n\t"
320
               "l.sd     80(%1),r9     \n\t"
321
               "l.sd     88(%1),r10    \n\t"
322
               "l.sd     96(%1),r11    \n\t"
323
               "l.sd     104(%1),r12   \n\t"
324
               "l.sd     112(%1),r13   \n\t"
325
               "l.sd     120(%1),r14   \n\t"
326
               "l.sd     128(%1),r15   \n\t"
327
               "l.sd     136(%1),r16   \n\t"
328
               "l.sd     144(%1),r17   \n\t"
329
               "l.sd     152(%1),r18   \n\t"
330
               "l.sd     160(%1),r19   \n\t"
331
               "l.sd     168(%1),r20   \n\t"
332
               "l.sd     176(%1),r21   \n\t"
333
               "l.sd     184(%1),r22   \n\t"
334
               "l.sd     192(%1),r23   \n\t"
335
               "l.sd     200(%1),r24   \n\t"
336
               "l.sd     208(%1),r25   \n\t"
337
               "l.sd     216(%1),r26   \n\t"
338
               "l.sd     224(%1),r27   \n\t"
339
               "l.sd     232(%1),r28   \n\t"
340
               "l.sd     240(%1),r29   \n\t"
341
               "l.sd     248(%1),r30   \n\t"
342
               "l.jr     %0            \n\t"  /* Go to the new PC */
343
               "l.sd     256(%1),r31   \n"    /* Store the last reg */
344
 
345
               /**** The restoration routine. ****/
346
 
347
               /* Note that when we return from this function,
348
                  we will actually be returning to a different
349
                  context than when we left. The debugger might
350
                  have conniptions over this, but we'll have to
351
                  reengineer that later. The stack and status
352
                  registers will all be changed, however we
353
                  will not touch the global interrupt mask. */
354
 
355
               /* Also note, when doing any restore, the most
356
                  important registers are r1, r2, and r9. These
357
                  will be accessed immediately upon exiting the
358
                  routine, and so we want to make sure we load
359
                  them as early as possible in case they are
360
                  not in cache */
361
 
362
               "_L_restore:            \n\t"  /* Restore "heir" */
363
               "l.mfspr  %2,r0,0x11    \n\t"  /* Status Register */
364
               "l.movhi  %0,0x07FF     \n\t"  /* ~SR mask  */
365
               "l.ori    %0,%0,0xD1FF  \n\t"
366
               "l.and    %2,%0,%2      \n\t"  /* save the global bits */
367
               "l.movhi  %0,0xF800     \n\t"  /* SR mask  */
368
               "l.ori    %0,%0,0x2E00  \n\t"
369
               "l.lwz    %1,0(%3)      \n\t"  /* Get the previous SR */
370
               "l.and    %0,%1,%0      \n\t"  /* Mask out the global bits */
371
               "l.or     %2,%2,%0      \n\t"  /* Combine local/global */
372
               "l.mtspr  r0,%2,0x11    \n\t"  /* Restore the status register */
373
 
374
               "l.mfspr  %0,r0,0x02    \n\t"  /* CPUCFGR */
375
               "l.andi   %0,%0,0x40    \n\t"  /* OB64S */
376
               "l.sfnei  %0,0x0        \n\t"  /* Save the 64 bit flag */
377
 
378
               "l.srli   %2,%2,28      \n\t"  /* Move CID into low byte */
379
               "l.lwz    %0,4(%3)      \n\t"
380
               "l.mtspr  %2,%0,0x20    \n\t"  /* Offset from EPCR */
381
               "l.lwz    %0,8(%3)      \n\t"
382
               "l.mtspr  %2,%0,0x30    \n\t"  /* Offset from EEAR */
383
               "l.lwz    %0,12(%3)     \n\t"
384
 
385
               "l.bf     _L_r64bit     \n\t"  /* 64 bit architecture */
386
               "l.mtspr  %2,%0,0x30    \n\t"  /* Offset from EEAR (DELAY) */
387
 
388
               /**** 32 bit restore ****/
389
               "l.lwz   r1,16(%3)      \n\t"
390
               "l.lwz   r2,20(%3)      \n\t"
391
               "l.lwz   r9,48(%3)      \n\t"
392
               "l.lwz   r3,24(%3)      \n\t"
393
               "l.lwz   r4,28(%3)      \n\t"
394
               "l.lwz   r5,32(%3)      \n\t"
395
               "l.lwz   r6,36(%3)      \n\t"
396
               "l.lwz   r7,40(%3)      \n\t"
397
               "l.lwz   r8,44(%3)      \n\t"
398
               "l.lwz   r10,52(%3)     \n\t"
399
               "l.lwz   r11,56(%3)     \n\t"
400
               "l.lwz   r12,60(%3)     \n\t"
401
               "l.lwz   r13,64(%3)     \n\t"
402
               "l.lwz   r14,68(%3)     \n\t"
403
               "l.lwz   r15,72(%3)     \n\t"
404
               "l.lwz   r16,76(%3)     \n\t"
405
               "l.lwz   r17,80(%3)     \n\t"
406
               "l.lwz   r18,84(%3)     \n\t"
407
               "l.lwz   r19,88(%3)     \n\t"
408
               "l.lwz   r20,92(%3)     \n\t"
409
               "l.lwz   r21,96(%3)     \n\t"
410
               "l.lwz   r22,100(%3)    \n\t"
411
               "l.lwz   r23,104(%3)    \n\t"
412
               "l.lwz   r24,108(%3)    \n\t"
413
               "l.lwz   r25,112(%3)    \n\t"
414
               "l.lwz   r26,116(%3)    \n\t"
415
               "l.lwz   r27,120(%3)    \n\t"
416
               "l.lwz   r28,124(%3)    \n\t"
417
               "l.lwz   r29,128(%3)    \n\t"
418
               "l.lwz   r30,132(%3)    \n\t"
419
               "l.j     _L_return      \n\t"
420
               "l.lwz   r31,136(%3)    \n"
421
 
422
               /****  64 bit restore ****/
423
               "_L_r64bit:             \n\t"
424
               "l.ld    r1,16(%3)      \n\t"
425
               "l.ld    r2,24(%3)      \n\t"
426
               "l.ld   r9,80(%3)       \n\t"
427
               "l.ld   r3,32(%3)       \n\t"
428
               "l.ld   r4,40(%3)       \n\t"
429
               "l.ld   r5,48(%3)       \n\t"
430
               "l.ld   r6,56(%3)       \n\t"
431
               "l.ld   r7,64(%3)       \n\t"
432
               "l.ld   r8,72(%3)       \n\t"
433
               "l.ld   r10,88(%3)      \n\t"
434
               "l.ld   r11,96(%3)      \n\t"
435
               "l.ld   r12,104(%3)     \n\t"
436
               "l.ld   r13,112(%3)     \n\t"
437
               "l.ld   r14,120(%3)     \n\t"
438
               "l.ld   r15,128(%3)     \n\t"
439
               "l.ld   r16,136(%3)     \n\t"
440
               "l.ld   r17,144(%3)     \n\t"
441
               "l.ld   r18,152(%3)     \n\t"
442
               "l.ld   r19,160(%3)     \n\t"
443
               "l.ld   r20,168(%3)     \n\t"
444
               "l.ld   r21,176(%3)     \n\t"
445
               "l.ld   r22,184(%3)     \n\t"
446
               "l.ld   r23,192(%3)     \n\t"
447
               "l.ld   r24,200(%3)     \n\t"
448
               "l.ld   r25,208(%3)     \n\t"
449
               "l.ld   r26,216(%3)     \n\t"
450
               "l.ld   r27,224(%3)     \n\t"
451
               "l.ld   r28,232(%3)     \n\t"
452
               "l.ld   r29,240(%3)     \n\t"
453
               "l.ld   r30,248(%3)     \n\t"
454
               "l.ld   r31,256(%3)     \n"
455
 
456
               "_L_return:             \n\t"  /* End of routine */
457
 
458
                : "=&r" (temp1), "+r" (run), "=&r" (temp2)
459
                : "r" (heir));
460
 
461
  /* Note that some registers were used for parameter passing and
462
     temporary registeres (temp1 and temp2). These values were
463
     saved and restored across context calls, but the values that
464
     the caller needs should have been stored on the stack. The
465
     C code should now restore these from the stack, since r1 and
466
     r2 have been restored, and return to the location specified
467
     by r9. Then, all should be happy in the world. */
468
}
469
 
470
/*
471
 *  _CPU_Context_restore
472
 *
473
 *  This routine is generally used only to restart self in an
474
 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
475
 *
476
 *  NOTE: May be unnecessary to reload some registers.
477
 *
478
 *  Or1k Specific Information:
479
 *
480
 *  In our implementation, this simply redirects to swich context
481
 */
482
 
483
void _CPU_Context_restore(
484
  Context_Control  *run
485
)
486
{
487
  _CPU_Context_switch(run,NULL);
488
}
489
 
490
 
491
/*  void __ISR_Handler()
492
 *
493
 *  This routine provides the RTEMS interrupt management.
494
 *
495
 *  Or1k Specific Information:
496
 *
497
 *  Based on the Or1k interrupt architecture described in chapter 16
498
 *  and the exception architecture described in chapter 9
499
 */
500
 
501
void _ISR_Handler(unsigned32 vector,unsigned32 ProgramCounter,
502
                  unsigned32 EffectiveAddress,unsigned32 StatusRegister)
503
{
504
   /*
505
    *  This discussion ignores a lot of the ugly details in a real
506
    *  implementation such as saving enough registers/state to be
507
    *  able to do something real.  Keep in mind that the goal is
508
    *  to invoke a user's ISR handler which is written in C and
509
    *  uses a certain set of registers.
510
    *
511
    *  Also note that the exact order is to a large extent flexible.
512
    *  Hardware will dictate a sequence for a certain subset of
513
    *  _ISR_Handler while requirements for setting
514
    */
515
 
516
  /*
517
   *  At entry to "common" _ISR_Handler, the vector number must be
518
   *  available.  On some CPUs the hardware puts either the vector
519
   *  number or the offset into the vector table for this ISR in a
520
   *  known place.  If the hardware does not give us this information,
521
   *  then the assembly portion of RTEMS for this port will contain
522
   *  a set of distinct interrupt entry points which somehow place
523
   *  the vector number in a known place (which is safe if another
524
   *  interrupt nests this one) and branches to _ISR_Handler.
525
   *
526
   *  save some or all context on stack
527
   *  may need to save some special interrupt information for exit
528
   *
529
   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
530
   *    if ( _ISR_Nest_level == 0 )
531
   *      switch to software interrupt stack
532
   *  #endif
533
   *
534
   *  _ISR_Nest_level++;
535
   *
536
   *  _Thread_Dispatch_disable_level++;
537
   *
538
   *  (*_ISR_Vector_table[ vector ])( vector );
539
   *
540
   *  --_ISR_Nest_level;
541
   *
542
   *  if ( _ISR_Nest_level )
543
   *    goto the label "exit interrupt (simple case)"
544
   *
545
   *  #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE )
546
   *    restore stack
547
   *  #endif
548
   *
549
   *  if ( !_Context_Switch_necessary )
550
   *    goto the label "exit interrupt (simple case)"
551
   *
552
   *  if ( !_ISR_Signals_to_thread_executing )
553
   *    _ISR_Signals_to_thread_executing = FALSE;
554
   *    goto the label "exit interrupt (simple case)"
555
   *
556
   *  call _Thread_Dispatch() or prepare to return to _ISR_Dispatch
557
   *
558
   *  prepare to get out of interrupt
559
   *  return from interrupt  (maybe to _ISR_Dispatch)
560
   *
561
   *  LABEL "exit interrupt (simple case):
562
   *  prepare to get out of interrupt
563
   *  return from interrupt
564
   */
565
 
566
  /* In the Or1k architecture, exceptions are handled in the
567
     startup code of the board support package. Thus, this
568
     routine is never called. Or1k exception routines are called
569
     with the following prototype:
570
 
571
     function(int vector#, int PC, int Address, int StatusRegister);
572
 
573
     These parameters are snapshots of the system when the exception
574
     was encountered. If virtual memory is active, things like the
575
     PC and Address may have little meaning, as they are referenced
576
     in physical space, not the virtual space of the process.
577
  */
578
}
579
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.