Line 11... |
Line 11... |
*
|
*
|
* The license and distribution terms for this file may be
|
* The license and distribution terms for this file may be
|
* found in the file LICENSE in this distribution or at
|
* found in the file LICENSE in this distribution or at
|
* http://www.OARcorp.com/rtems/license.html.
|
* http://www.OARcorp.com/rtems/license.html.
|
*
|
*
|
|
* This file adapted from no_bsp board library of the RTEMS distribution.
|
|
* The body has been modified for the Bender Or1k implementation by
|
|
* Chris Ziomkowski. <chris@asics.ws>
|
*/
|
*/
|
|
|
/*
|
/*
|
* This is supposed to be an assembly file. This means that system.h
|
* This is supposed to be an assembly file. This means that system.h
|
* and cpu.h should not be included in a "real" cpu_asm file. An
|
* and cpu.h should not be included in a "real" cpu_asm file. An
|
Line 77... |
Line 80... |
register unsigned32 address = (unsigned32)(*fp_context_ptr);
|
register unsigned32 address = (unsigned32)(*fp_context_ptr);
|
register unsigned32 xfer;
|
register unsigned32 xfer;
|
register unsigned32 loop;
|
register unsigned32 loop;
|
|
|
/* %0 is a temporary register which is used for several
|
/* %0 is a temporary register which is used for several
|
values throughout the code. %1 contains the address
|
values throughout the code. %3 contains the address
|
to save the context, and is modified during the course
|
to save the context, and is modified during the course
|
of the context save. %2 is a second dummy register
|
of the context save. %1 is a second dummy register
|
which is used during transfer of the floating point
|
which is used during transfer of the floating point
|
value to memory. %3 is an end of loop marker which
|
value to memory. %2 is an end of loop marker which
|
is compared against the pointer %1. */
|
is compared against the pointer %3. */
|
|
|
volatile asm("l.mfspr %0,r0,0x02 \n\t" /* CPUCFGR */
|
asm volatile ("l.mfspr %0,r0,0x02 \n\t" /* CPUCFGR */
|
"l.andi %0,%0,0x380 \n\t" /* OF32S or OV64S or OF64S */
|
"l.andi %0,%0,0x380 \n\t" /* OF32S or OV64S or OF64S */
|
"l.sfnei %0,0x0 \n\t"
|
"l.sfnei %0,0x0 \n\t"
|
"l.bf L_nofp \n\t" /* exit if no floating point */
|
"l.bf _L_nofps \n\t" /* exit if no floating point */
|
"l.sfeq %0,0x080 \n\t" /* (DELAY) single precision? */
|
"l.sfeqi %0,0x080 \n\t" /* (DELAY) single precision? */
|
"l.mfspr %0,r0,0x11 \n\t" /* Load Status Register */
|
"l.mfspr %0,r0,0x11 \n\t" /* Load Status Register */
|
"l.srli %0,%0,58 \n\t" /* Move CID into low byte*32 */
|
"l.srli %0,%0,58 \n\t" /* Move CID into low byte*32 */
|
"l.bt L_spfp \n\t" /* Branch on single precision */
|
"l.bnf _L_spfp_loops \n\t" /* Branch on single precision */
|
"l.addi %3,%0,0x20 \n" /* Terminating condition */
|
"l.addi %2,%0,0x20 \n" /* Terminating condition */
|
/**** Double Precision Floating Point Section ****/
|
/**** Double Precision Floating Point Section ****/
|
"L_dpfp_loop \n\t"
|
"_L_dpfp_loops: \n\t"
|
"l.mfspr %2,%0,0x600 \n\t" /* Load VFRx */
|
"l.mfspr %1,%0,0x600 \n\t" /* Load VFRx */
|
"l.sd 0(%1),%2 \n\t" /* Save VFRx */
|
"l.sd 0(%3),%1 \n\t" /* Save VFRx */
|
"l.addi %0,%0,0x01 \n\t" /* Increment counter */
|
"l.addi %0,%0,0x01 \n\t" /* Increment counter */
|
"l.sfeq %0,%3 \n\t" /* Branch if incomplete */
|
"l.sfeq %0,%2 \n\t" /* Branch if incomplete */
|
"l.bf L_dpfp_loop \n\t"
|
"l.bf _L_dpfp_loops \n\t"
|
"l.addi %1,%1,0x08 \n\t" /* (DELAY) update pointer */
|
"l.addi %3,%3,0x08 \n\t" /* (DELAY) update pointer */
|
"l.bt L_nofp \n\t" /* exit */
|
"l.bnf _L_nofps \n\t" /* exit */
|
"l.nop \n"
|
"l.nop \n"
|
/**** Single Precision Floating Point Section ****/
|
/**** Single Precision Floating Point Section ****/
|
"L_spfp_loop \n\t"
|
"_L_spfp_loops: \n\t"
|
"l.mfspr %2,%0,0x600 \n\t" /* Load VFRx */
|
"l.mfspr %1,%0,0x600 \n\t" /* Load VFRx */
|
"l.sw 0(%1),%2 \n\t" /* Save VFRx */
|
"l.sw 0(%3),%1 \n\t" /* Save VFRx */
|
"l.addi %0,%0,0x01 \n\t" /* Increment counter */
|
"l.addi %0,%0,0x01 \n\t" /* Increment counter */
|
"l.sfeq %0,%3 \n\t" /* Branch if incomplete */
|
"l.sfeq %0,%2 \n\t" /* Branch if incomplete */
|
"l.bf L_spfp_loop \n\t"
|
"l.bf _L_spfp_loops \n\t"
|
"l.addi %1,%1,0x04 \n" /* (DELAY) update pointer */
|
"l.addi %3,%3,0x04 \n" /* (DELAY) update pointer */
|
"L_nofp \n\t" /* End of context save */
|
"_L_nofps: \n\t" /* End of context save */
|
: /* No outputs */
|
: "=&r" (temp), "=r" (xfer), "=&r" (loop), "+r" (address));
|
: "=&r" (temp), "+r" (address), "=r" (xfer), "=&r" (loop));
|
|
}
|
}
|
|
|
/*
|
/*
|
* _CPU_Context_restore_fp_context
|
* _CPU_Context_restore_fp_context
|
*
|
*
|
Line 144... |
Line 146... |
of the context save. %2 is a second dummy register
|
of the context save. %2 is a second dummy register
|
which is used during transfer of the floating point
|
which is used during transfer of the floating point
|
value to memory. %3 is an end of loop marker which
|
value to memory. %3 is an end of loop marker which
|
is compared against the pointer %1. */
|
is compared against the pointer %1. */
|
|
|
volatile asm("l.mfspr %0,r0,0x02 \n\t" /* CPUCFGR */
|
asm volatile ("l.mfspr %0,r0,0x02 \n\t" /* CPUCFGR */
|
"l.andi %0,%0,0x380 \n\t" /* OF32S or OV64S or OF64S */
|
"l.andi %0,%0,0x380 \n\t" /* OF32S or OV64S or OF64S */
|
"l.sfnei %0,0x0 \n\t"
|
"l.sfnei %0,0x0 \n\t"
|
"l.bf L_nofp \n\t" /* exit if no floating point */
|
"l.bf _L_nofpr \n\t" /* exit if no floating point */
|
"l.sfeq %0,0x080 \n\t" /* (DELAY) single precision? */
|
"l.sfeqi %0,0x080 \n\t" /* (DELAY) single precision? */
|
"l.mfspr %0,r0,0x11 \n\t" /* Load Status Register */
|
"l.mfspr %0,r0,0x11 \n\t" /* Load Status Register */
|
"l.srli %0,%0,58 \n\t" /* Move CID into low byte*32 */
|
"l.srli %0,%0,58 \n\t" /* Move CID into low byte*32 */
|
"l.bt L_spfp \n\t" /* Branch on single precision */
|
"l.bnf _L_spfp_loopr \n\t" /* Branch on single precision */
|
"l.addi %3,%0,0x20 \n" /* Terminating condition */
|
"l.addi %3,%0,0x20 \n" /* Terminating condition */
|
/**** Double Precision Floating Point Section ****/
|
/**** Double Precision Floating Point Section ****/
|
"L_dpfp_loop \n\t"
|
"_L_dpfp_loopr: \n\t"
|
"l.mfspr %2,%0,0x600 \n\t" /* Load VFRx */
|
"l.mfspr %2,%0,0x600 \n\t" /* Load VFRx */
|
"l.sd 0(%1),%2 \n\t" /* Save VFRx */
|
"l.sd 0(%1),%2 \n\t" /* Save VFRx */
|
"l.addi %0,%0,0x01 \n\t" /* Increment counter */
|
"l.addi %0,%0,0x01 \n\t" /* Increment counter */
|
"l.sfeq %0,%3 \n\t" /* Branch if incomplete */
|
"l.sfeq %0,%3 \n\t" /* Branch if incomplete */
|
"l.bf L_dpfp_loop \n\t"
|
"l.bf _L_dpfp_loopr \n\t"
|
"l.addi %1,%1,0x08 \n\t" /* (DELAY) update pointer */
|
"l.addi %1,%1,0x08 \n\t" /* (DELAY) update pointer */
|
"l.bt L_nofp \n\t" /* exit */
|
"l.bnf _L_nofpr \n\t" /* exit */
|
"l.nop \n"
|
"l.nop \n"
|
/**** Single Precision Floating Point Section ****/
|
/**** Single Precision Floating Point Section ****/
|
"L_spfp_loop \n\t"
|
"_L_spfp_loopr: \n\t"
|
"l.mfspr %2,%0,0x600 \n\t" /* Load VFRx */
|
"l.mfspr %2,%0,0x600 \n\t" /* Load VFRx */
|
"l.sw 0(%1),%2 \n\t" /* Save VFRx */
|
"l.sw 0(%1),%2 \n\t" /* Save VFRx */
|
"l.addi %0,%0,0x01 \n\t" /* Increment counter */
|
"l.addi %0,%0,0x01 \n\t" /* Increment counter */
|
"l.sfeq %0,%3 \n\t" /* Branch if incomplete */
|
"l.sfeq %0,%3 \n\t" /* Branch if incomplete */
|
"l.bf L_spfp_loop \n\t"
|
"l.bf _L_spfp_loopr \n\t"
|
"l.addi %1,%1,0x04 \n" /* (DELAY) update pointer */
|
"l.addi %1,%1,0x04 \n" /* (DELAY) update pointer */
|
"L_nofp \n\t" /* End of context save */
|
"_L_nofpr: \n\t" /* End of context save */
|
: /* No outputs */
|
|
: "=&r" (temp), "+r" (address), "=r" (xfer), "=&r" (loop));
|
: "=&r" (temp), "+r" (address), "=r" (xfer), "=&r" (loop));
|
}
|
}
|
|
|
/* _CPU_Context_switch
|
/* _CPU_Context_switch
|
*
|
*
|
Line 190... |
Line 191... |
void _CPU_Context_switch(
|
void _CPU_Context_switch(
|
Context_Control *run,
|
Context_Control *run,
|
Context_Control *heir
|
Context_Control *heir
|
)
|
)
|
{
|
{
|
register unsigned32 temp1;
|
register unsigned32 temp1 = 0;
|
register unsigned32 temp2;
|
register unsigned32 temp2 = 0;
|
|
|
/* This function is really tricky. When this function is called,
|
/* This function is really tricky. When this function is called,
|
we should save our state as we need it, and then grab the
|
we should save our state as we need it, and then grab the
|
new state from the pointer. We then do a longjump to this
|
new state from the pointer. We then do a longjump to this
|
code, replacing the current stack pointer with the new
|
code, replacing the current stack pointer with the new
|
Line 245... |
Line 246... |
where ever you told it to go. Note however that you had better
|
where ever you told it to go. Note however that you had better
|
also have cleaned up the stack and frame pointers though, because
|
also have cleaned up the stack and frame pointers though, because
|
they are probably still set with the values obtained from
|
they are probably still set with the values obtained from
|
entering this function... */
|
entering this function... */
|
|
|
volatile asm("l.sfei %1,0x0 \n\t" /* Is this a self restore? */
|
asm volatile ("l.sfeqi %3,0x0 \n\t" /* Is this a self restore? */
|
"l.bt L_restore \n\t" /* Yes it is...go there */
|
"l.bf _L_restore \n\t" /* Yes it is...go there */
|
"l.lwz %0,0(%2) \n\t" /* Prefetch new context */
|
"l.nop \n\t"
|
|
|
"l.mfspr %3,r0,0x11 \n\t" /* Status Register */
|
"l.lwz %0,0(%3) \n\t" /* Prefetch new context */
|
"l.sw 0(%1),%3 \n\t" /* Save it */
|
"l.mfspr %2,r0,0x11 \n\t" /* Status Register */
|
"l.srli %3,%3,28 \n\t" /* Move CID into low byte */
|
"l.sw 0(%1),%2 \n\t" /* Save it */
|
"l.mfspr %0,%3,0x20 \n\t" /* Offset from EPCR */
|
"l.srli %2,%2,28 \n\t" /* Move CID into low byte */
|
|
"l.mfspr %0,%2,0x20 \n\t" /* Offset from EPCR */
|
"l.sw 4(%1),%0 \n\t" /* Store it */
|
"l.sw 4(%1),%0 \n\t" /* Store it */
|
"l.mfspr %0,%3,0x30 \n\t" /* Offset from EEAR */
|
"l.mfspr %0,%2,0x30 \n\t" /* Offset from EEAR */
|
"l.sw 8(%1),%0 \n\t" /* Store it */
|
"l.sw 8(%1),%0 \n\t" /* Store it */
|
"l.mfspr %0,%3,0x40 \n\t" /* Offset from ESR */
|
"l.mfspr %0,%2,0x40 \n\t" /* Offset from ESR */
|
"l.sw 12(%1),%0 \n\t" /* Store it */
|
"l.sw 12(%1),%0 \n\t" /* Store it */
|
"l.mfspr %0,r0,0x02 \n\t" /* CPUCFGR */
|
"l.mfspr %0,r0,0x02 \n\t" /* CPUCFGR */
|
"l.andi %0,%0,0x40 \n\t" /* OB64S */
|
"l.andi %0,%0,0x40 \n\t" /* OB64S */
|
"l.sfnei %0,0x0 \n\t"
|
"l.sfnei %0,0x0 \n\t"
|
"l.bt L_64bit \n\t" /* 64 bit architecture */
|
"l.bf _L_64bit \n\t" /* 64 bit architecture */
|
"l.movhi %0,(L_restore >> 16)\n\t"
|
"l.movhi %0,hi(_L_restore)\n\t"
|
|
|
/**** 32 bit implementation ****/
|
/**** 32 bit implementation ****/
|
"l.addi %0,%0,(L_restore & 0xFFFF)\n\t"
|
"l.ori %0,%0,lo(_L_restore)\n\t"
|
"l.sw 140(%1),%0 \n\t" /* Save the PC */
|
"l.sw 140(%1),%0 \n\t" /* Save the PC */
|
"l.lwz %0,140(%2) \n\t" /* New PC. Expect cache miss */
|
"l.lwz %0,140(%3) \n\t" /* New PC. Expect cache miss */
|
"l.sw 16(%1),r1 \n\t"
|
"l.sw 16(%1),r1 \n\t"
|
"l.sw 20(%1),r2 \n\t"
|
"l.sw 20(%1),r2 \n\t"
|
"l.sw 24(%1),r3 \n\t"
|
"l.sw 24(%1),r3 \n\t"
|
"l.sw 28(%1),r4 \n\t"
|
"l.sw 28(%1),r4 \n\t"
|
"l.sw 32(%1),r5 \n\t"
|
"l.sw 32(%1),r5 \n\t"
|
Line 302... |
Line 304... |
"l.sw 132(%1),r30 \n\t"
|
"l.sw 132(%1),r30 \n\t"
|
"l.jr %0 \n\t" /* Go there */
|
"l.jr %0 \n\t" /* Go there */
|
"l.sw 136(%1),r31 \n" /* Store the last reg */
|
"l.sw 136(%1),r31 \n" /* Store the last reg */
|
|
|
/**** 64 bit implementation ****/
|
/**** 64 bit implementation ****/
|
"L_64bit \n\t"
|
"_L_64bit: \n\t"
|
"l.addi %0,%0,(L_restore & 0xFFFF)\n\t"
|
"l.ori %0,%0,lo(_L_restore)\n\t"
|
"l.sw 264(%1),%0 \n\t"
|
"l.sw 264(%1),%0 \n\t"
|
"l.sd 16(%1),r1 \n\t"
|
"l.sd 16(%1),r1 \n\t"
|
"l.sd 24(%1),r2 \n\t"
|
"l.sd 24(%1),r2 \n\t"
|
"l.sd 32(%1),r3 \n\t"
|
"l.sd 32(%1),r3 \n\t"
|
"l.sd 40(%1),r4 \n\t"
|
"l.sd 40(%1),r4 \n\t"
|
Line 355... |
Line 357... |
will be accessed immediately upon exiting the
|
will be accessed immediately upon exiting the
|
routine, and so we want to make sure we load
|
routine, and so we want to make sure we load
|
them as early as possible in case they are
|
them as early as possible in case they are
|
not in cache */
|
not in cache */
|
|
|
"L_restore \n\t" /* Restore "heir" */
|
"_L_restore: \n\t" /* Restore "heir" */
|
"l.mfspr %3,r0,0x11 \n\t" /* Status Register */
|
"l.mfspr %2,r0,0x11 \n\t" /* Status Register */
|
"l.addi %0,r0,0xD1FF \n\t"
|
|
"l.movhi %0,0x07FF \n\t" /* ~SR mask */
|
"l.movhi %0,0x07FF \n\t" /* ~SR mask */
|
"l.and %3,%0,%3 \n\t" /* save the global bits */
|
"l.ori %0,%0,0xD1FF \n\t"
|
"l.addi %0,r0,0x2E00 \n\t"
|
"l.and %2,%0,%2 \n\t" /* save the global bits */
|
"l.movhi %0,0xF800 \n\t" /* SR mask */
|
"l.movhi %0,0xF800 \n\t" /* SR mask */
|
"l.lwz %1,0(%2) \n\t" /* Get the previous SR */
|
"l.ori %0,%0,0x2E00 \n\t"
|
|
"l.lwz %1,0(%3) \n\t" /* Get the previous SR */
|
"l.and %0,%1,%0 \n\t" /* Mask out the global bits */
|
"l.and %0,%1,%0 \n\t" /* Mask out the global bits */
|
"l.or %3,%3,%0 \n\t" /* Combine local/global */
|
"l.or %2,%2,%0 \n\t" /* Combine local/global */
|
"l.mtspr r0,%3,0x11 \n\t" /* Restore the status register */
|
"l.mtspr r0,%2,0x11 \n\t" /* Restore the status register */
|
|
|
"l.mfspr %0,r0,0x02 \n\t" /* CPUCFGR */
|
"l.mfspr %0,r0,0x02 \n\t" /* CPUCFGR */
|
"l.andi %0,%0,0x40 \n\t" /* OB64S */
|
"l.andi %0,%0,0x40 \n\t" /* OB64S */
|
"l.sfnei %0,0x0 \n\t" /* Save the 64 bit flag */
|
"l.sfnei %0,0x0 \n\t" /* Save the 64 bit flag */
|
|
|
"l.srli %3,%3,28 \n\t" /* Move CID into low byte */
|
"l.srli %2,%2,28 \n\t" /* Move CID into low byte */
|
"l.lwz %0,4(%2) \n\t"
|
"l.lwz %0,4(%3) \n\t"
|
"l.mtspr %3,%0,0x20 \n\t" /* Offset from EPCR */
|
"l.mtspr %2,%0,0x20 \n\t" /* Offset from EPCR */
|
"l.lwz %0,8(%2) \n\t"
|
"l.lwz %0,8(%3) \n\t"
|
"l.mtspr %3,%0,0x30 \n\t" /* Offset from EEAR */
|
"l.mtspr %2,%0,0x30 \n\t" /* Offset from EEAR */
|
"l.lwz %0,12(%2) \n\t"
|
"l.lwz %0,12(%3) \n\t"
|
|
|
"l.bt L_r64bit \n\t" /* 64 bit architecture */
|
"l.bf _L_r64bit \n\t" /* 64 bit architecture */
|
"l.mtspr %3,%0,0x30 \n\t" /* Offset from EEAR (DELAY) */
|
"l.mtspr %2,%0,0x30 \n\t" /* Offset from EEAR (DELAY) */
|
|
|
/**** 32 bit restore ****/
|
/**** 32 bit restore ****/
|
"l.lwz r1,16(%2) \n\t"
|
"l.lwz r1,16(%3) \n\t"
|
"l.lwz r2,20(%2) \n\t"
|
"l.lwz r2,20(%3) \n\t"
|
"l.lwz r9,48(%2) \n\t"
|
"l.lwz r9,48(%3) \n\t"
|
"l.lwz r3,24(%2) \n\t"
|
"l.lwz r3,24(%3) \n\t"
|
"l.lwz r4,28(%2) \n\t"
|
"l.lwz r4,28(%3) \n\t"
|
"l.lwz r5,32(%2) \n\t"
|
"l.lwz r5,32(%3) \n\t"
|
"l.lwz r6,36(%2) \n\t"
|
"l.lwz r6,36(%3) \n\t"
|
"l.lwz r7,40(%2) \n\t"
|
"l.lwz r7,40(%3) \n\t"
|
"l.lwz r8,44(%2) \n\t"
|
"l.lwz r8,44(%3) \n\t"
|
"l.lwz r10,52(%2) \n\t"
|
"l.lwz r10,52(%3) \n\t"
|
"l.lwz r11,56(%2) \n\t"
|
"l.lwz r11,56(%3) \n\t"
|
"l.lwz r12,60(%2) \n\t"
|
"l.lwz r12,60(%3) \n\t"
|
"l.lwz r13,64(%2) \n\t"
|
"l.lwz r13,64(%3) \n\t"
|
"l.lwz r14,68(%2) \n\t"
|
"l.lwz r14,68(%3) \n\t"
|
"l.lwz r15,72(%2) \n\t"
|
"l.lwz r15,72(%3) \n\t"
|
"l.lwz r16,76(%2) \n\t"
|
"l.lwz r16,76(%3) \n\t"
|
"l.lwz r17,80(%2) \n\t"
|
"l.lwz r17,80(%3) \n\t"
|
"l.lwz r18,84(%2) \n\t"
|
"l.lwz r18,84(%3) \n\t"
|
"l.lwz r19,88(%2) \n\t"
|
"l.lwz r19,88(%3) \n\t"
|
"l.lwz r20,92(%2) \n\t"
|
"l.lwz r20,92(%3) \n\t"
|
"l.lwz r21,96(%2) \n\t"
|
"l.lwz r21,96(%3) \n\t"
|
"l.lwz r22,100(%2) \n\t"
|
"l.lwz r22,100(%3) \n\t"
|
"l.lwz r23,104(%2) \n\t"
|
"l.lwz r23,104(%3) \n\t"
|
"l.lwz r24,108(%2) \n\t"
|
"l.lwz r24,108(%3) \n\t"
|
"l.lwz r25,112(%2) \n\t"
|
"l.lwz r25,112(%3) \n\t"
|
"l.lwz r26,116(%2) \n\t"
|
"l.lwz r26,116(%3) \n\t"
|
"l.lwz r27,120(%2) \n\t"
|
"l.lwz r27,120(%3) \n\t"
|
"l.lwz r28,124(%2) \n\t"
|
"l.lwz r28,124(%3) \n\t"
|
"l.lwz r29,128(%2) \n\t"
|
"l.lwz r29,128(%3) \n\t"
|
"l.lwz r30,132(%2) \n\t"
|
"l.lwz r30,132(%3) \n\t"
|
"l.j L_return \n\t"
|
"l.j _L_return \n\t"
|
"l.lwz r31,136(%2) \n\t"
|
"l.lwz r31,136(%3) \n"
|
|
|
/**** 64 bit restore ****/
|
/**** 64 bit restore ****/
|
"l.ld r1,16(%2) \n\t"
|
"_L_r64bit: \n\t"
|
"l.ld r2,24(%2) \n\t"
|
"l.ld r1,16(%3) \n\t"
|
"l.ld r9,80(%2) \n\t"
|
"l.ld r2,24(%3) \n\t"
|
"l.ld r3,32(%2) \n\t"
|
"l.ld r9,80(%3) \n\t"
|
"l.ld r4,40(%2) \n\t"
|
"l.ld r3,32(%3) \n\t"
|
"l.ld r5,48(%2) \n\t"
|
"l.ld r4,40(%3) \n\t"
|
"l.ld r6,56(%2) \n\t"
|
"l.ld r5,48(%3) \n\t"
|
"l.ld r7,64(%2) \n\t"
|
"l.ld r6,56(%3) \n\t"
|
"l.ld r8,72(%2) \n\t"
|
"l.ld r7,64(%3) \n\t"
|
"l.ld r10,88(%2) \n\t"
|
"l.ld r8,72(%3) \n\t"
|
"l.ld r11,96(%2) \n\t"
|
"l.ld r10,88(%3) \n\t"
|
"l.ld r12,104(%2) \n\t"
|
"l.ld r11,96(%3) \n\t"
|
"l.ld r13,112(%2) \n\t"
|
"l.ld r12,104(%3) \n\t"
|
"l.ld r14,120(%2) \n\t"
|
"l.ld r13,112(%3) \n\t"
|
"l.ld r15,128(%2) \n\t"
|
"l.ld r14,120(%3) \n\t"
|
"l.ld r16,136(%2) \n\t"
|
"l.ld r15,128(%3) \n\t"
|
"l.ld r17,144(%2) \n\t"
|
"l.ld r16,136(%3) \n\t"
|
"l.ld r18,152(%2) \n\t"
|
"l.ld r17,144(%3) \n\t"
|
"l.ld r19,160(%2) \n\t"
|
"l.ld r18,152(%3) \n\t"
|
"l.ld r20,168(%2) \n\t"
|
"l.ld r19,160(%3) \n\t"
|
"l.ld r21,176(%2) \n\t"
|
"l.ld r20,168(%3) \n\t"
|
"l.ld r22,184(%2) \n\t"
|
"l.ld r21,176(%3) \n\t"
|
"l.ld r23,192(%2) \n\t"
|
"l.ld r22,184(%3) \n\t"
|
"l.ld r24,200(%2) \n\t"
|
"l.ld r23,192(%3) \n\t"
|
"l.ld r25,208(%2) \n\t"
|
"l.ld r24,200(%3) \n\t"
|
"l.ld r26,216(%2) \n\t"
|
"l.ld r25,208(%3) \n\t"
|
"l.ld r27,224(%2) \n\t"
|
"l.ld r26,216(%3) \n\t"
|
"l.ld r28,232(%2) \n\t"
|
"l.ld r27,224(%3) \n\t"
|
"l.ld r29,240(%2) \n\t"
|
"l.ld r28,232(%3) \n\t"
|
"l.ld r30,248(%2) \n\t"
|
"l.ld r29,240(%3) \n\t"
|
"l.ld r31,256(%2) \n"
|
"l.ld r30,248(%3) \n\t"
|
|
"l.ld r31,256(%3) \n"
|
|
|
"L_return \n\t" /* End of routine */
|
"_L_return: \n\t" /* End of routine */
|
|
|
: /* No outputs */
|
: "=&r" (temp1), "+r" (run), "=&r" (temp2)
|
: "=&r" (temp1), "+r" (run), "r" (heir), "=&r" (temp2));
|
: "r" (heir));
|
|
|
/* Note that some registers were used for parameter passing and
|
/* Note that some registers were used for parameter passing and
|
temporary registeres (temp1 and temp2). These values were
|
temporary registeres (temp1 and temp2). These values were
|
saved and restored across context calls, but the values that
|
saved and restored across context calls, but the values that
|
the caller needs should have been stored on the stack. The
|
the caller needs should have been stored on the stack. The
|
Line 475... |
Line 478... |
* Or1k Specific Information:
|
* Or1k Specific Information:
|
*
|
*
|
* In our implementation, this simply redirects to swich context
|
* In our implementation, this simply redirects to swich context
|
*/
|
*/
|
|
|
#define _CPU_Context_restore(x) _CPU_Context_switch(NULL,x)
|
void _CPU_Context_restore(
|
|
Context_Control *run
|
|
)
|
|
{
|
|
_CPU_Context_switch(run,NULL);
|
|
}
|
|
|
|
|
/* void __ISR_Handler()
|
/* void __ISR_Handler()
|
*
|
*
|
* This routine provides the RTEMS interrupt management.
|
* This routine provides the RTEMS interrupt management.
|
*
|
*
|
* NO_CPU Specific Information:
|
* Or1k Specific Information:
|
*
|
*
|
* XXX document implementation including references if appropriate
|
* Based on the Or1k interrupt architecture described in chapter 16
|
|
* and the exception architecture described in chapter 9
|
*/
|
*/
|
|
|
void _ISR_Handler()
|
void _ISR_Handler(unsigned32 vector,unsigned32 ProgramCounter,
|
|
unsigned32 EffectiveAddress,unsigned32 StatusRegister)
|
{
|
{
|
/*
|
/*
|
* This discussion ignores a lot of the ugly details in a real
|
* This discussion ignores a lot of the ugly details in a real
|
* implementation such as saving enough registers/state to be
|
* implementation such as saving enough registers/state to be
|
* able to do something real. Keep in mind that the goal is
|
* able to do something real. Keep in mind that the goal is
|
Line 550... |
Line 560... |
*
|
*
|
* LABEL "exit interrupt (simple case):
|
* LABEL "exit interrupt (simple case):
|
* prepare to get out of interrupt
|
* prepare to get out of interrupt
|
* return from interrupt
|
* return from interrupt
|
*/
|
*/
|
|
|
|
/* In the Or1k architecture, exceptions are handled in the
|
|
startup code of the board support package. Thus, this
|
|
routine is never called. Or1k exception routines are called
|
|
with the following prototype:
|
|
|
|
function(int vector#, int PC, int Address, int StatusRegister);
|
|
|
|
These parameters are snapshots of the system when the exception
|
|
was encountered. If virtual memory is active, things like the
|
|
PC and Address may have little meaning, as they are referenced
|
|
in physical space, not the virtual space of the process.
|
|
*/
|
}
|
}
|
|
|
|
|
No newline at end of file
|
No newline at end of file
|