OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [src/] [arch/] [arm/] [vectors.S.ARM] - Blame information for rev 6

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * The vectors page. Includes all exception handlers.
3
 *
4
 * Copyright (C) 2007 Bahadir Balban
5
 */
6
 
7
#include INC_ARCH(asm.h)
8
#include INC_ARCH(asm-macros.S)
9
 
10
.section .data.vectors
11
__vector_vaddr:
12
 
13
BEGIN_PROC(arm_high_vector)
14
        b       arm_reset_exception
15
        b       arm_undef_exception_reentrant
16
        b       arm_swi_exception
17
        b       arm_prefetch_abort_exception_reentrant
18
        b       arm_data_abort_exception_reentrant
19
        nop
20
        b       arm_irq_exception_reentrant_with_schedule
21
        b       arm_fiq_exception
22
END_PROC(arm_high_vector)
23
 
24
.balign 4
25
 
26
/*
27
 * vect_reset
28
 *
29
 * Upon Entry:
30
 * - All registers are undefined and insignificant,
31
 * - FIQ/IRQs are disabled.
32
 * - PC:        0x00000000
33
 *
34
 *
35
 * PURPOSE:
36
 * CPU always starts executing from this vector
37
 * upon a HW reset. It may also be used as a SW reset.
38
 */
39
BEGIN_PROC(arm_reset_exception)
40
END_PROC(arm_reset_exception)
41
 
42
 
43
#if defined(CONFIG_SUBARCH_V5)
44
        .macro disable_irqs rx
45
                mrs     \rx, cpsr_fc
46
                orr     \rx, #ARM_IRQ_BIT
47
                msr     cpsr_fc, \rx
48
        .endm
49
        .macro enable_irqs rx
50
                mrs     \rx, cpsr_fc
51
                bic     \rx, #ARM_IRQ_BIT
52
                msr     cpsr_fc, \rx
53
        .endm
54
#endif
55
 
56
#if defined (CONFIG_SUBARCH_V7) || defined(CONFIG_SUBARCH_V6)
57
        .macro disable_irqs rx
58
                cpsid   ia
59
        .endm
60
        .macro enable_irqs rx
61
                cpsie   ia
62
        .endm
63
#endif
64
 
65
#if defined (CONFIG_SUBARCH_V7)
66
        .macro clear_exclusive
67
                clrex
68
        .endm
69
#else
70
        .macro clear_exclusive
71
        .endm
72
#endif
73
 
74
        /* Only works in SVC MODE. Know what you are doing! */
75
        .macro get_current rx
76
                bic     \rx, sp, #0xFF0
77
                bic     \rx, \rx, #0xF
78
        .endm
79
        /* Saves the address of system call argument registers pushed to stack
80
         * to the current task's ktcb. */
81
        .macro  ktcb_ref_saved_regs regs_addr, ktcb, regs_off
82
                get_current \ktcb
83
                ldr     \regs_off, =syscall_regs_offset
84
                ldr     \regs_off, [\regs_off]
85
                str     \regs_addr, [\ktcb, \regs_off]
86
        .endm
87
        /* Depending on the SPSR condition determines whether irqs should be enabled
88
         * during abort handling. If abort occured in userspace it orders irqs
89
         * should be enabled. Else if irqs come from kernel mode, it orders irqs are
90
         * enabled only if they were alreday enabled before the abort. */
91
        .macro  can_abort_enable_irqs temp1, r_spsr
92
                and \temp1, \r_spsr, #ARM_MODE_MASK
93
                cmp \temp1, #ARM_MODE_USR        @ Usermode indicates irqs can be enabled.
94
                beq 1f                      @ Z flag set. Which indicates "can enable"
95
                and \temp1, \r_spsr, #ARM_IRQ_BIT @ Clear irq bit indicates irqs were enabled
96
                cmp \temp1, #0              @ before the abort and can be safely enabled.
97
        1:                                  @ Z flag must be set for "can enable" here.
98
        .endm
99
 
100
        /* Pushes the user sp and lr to stack, updates the stack pointer */
101
        .macro push_user_sp_lr sp
102
                @ stack state: (Low) |..|..|->(Original)| (High)
103
                stmfd   \sp, {sp, lr}^  @ Push USR banked regs to stack.
104
                nop                     @ Need a NOOP after push/popping user registers.
105
                @ stack state: (Low) |SP_USR|LR_USR|->(Original)| (High)
106
                sub     \sp, \sp, #8    @ Adjust SP, since stack op on banked regs is no writeback.
107
                @ stack state: (Low) |->SP_USR|LR_USR|(Original)| (High)
108
        .endm
109
 
110
        .macro is_psr_usr rx
111
                and     \rx, \rx, #ARM_MODE_MASK
112
                cmp     \rx, #ARM_MODE_USR
113
        .endm
114
 
115
/* These really both read the same unified FSR and FAR registers */
116
#if defined (CONFIG_SUBARCH_V5)
117
        .macro  cp15_read_ifsr rx
118
                mrc     p15, 0, \rx, c5, c0, 0  @ Read FSR (Tells why the fault occured)
119
 
120
        .endm
121
        .macro cp15_read_ifar rx
122
                mrc     p15, 0, \rx, c6, c0, 0  @ Read FAR (Contains the faulted data address)
123
        .endm
124
        .macro  cp15_read_dfsr rx
125
                mrc     p15, 0, \rx, c5, c0, 0  @ Read FSR (Tells why the fault occured)
126
 
127
        .endm
128
        .macro cp15_read_dfar rx
129
                mrc     p15, 0, \rx, c6, c0, 0  @ Read FAR (Contains the faulted data address)
130
        .endm
131
#endif
132
 
133
/* These read the distinguished IFSR, IFAR, DFSR and DFAR registers */
134
#if defined (CONFIG_SUBARCH_V6) || defined (CONFIG_SUBARCH_V7)
135
        .macro  cp15_read_ifsr rx
136
                mrc     p15, 0, \rx, c5, c0, 1  @ Read IFSR (Tells why the fault occured)
137
 
138
        .endm
139
        .macro cp15_read_ifar rx
140
                mrc     p15, 0, \rx, c6, c0, 2  @ Read IFAR (Contains the faulted data address)
141
        .endm
142
        .macro  cp15_read_dfsr rx
143
                mrc     p15, 0, \rx, c5, c0, 0  @ Read DFSR (Tells why the fault occured)
144
 
145
        .endm
146
        .macro cp15_read_dfar rx
147
                mrc     p15, 0, \rx, c6, c0, 0  @ Read DFAR (Contains the faulted data address)
148
        .endm
149
#endif
150
 
151
#define UNDEF_R0        0
152
#define UNDEF_SPSR      -4
153
#define UNDEF_R14       -8
154
 
155
/*
156
 * vect_undef
157
 *
158
 * Upon Entry:
159
 * - R14:       Address of next instruction after undefined instruction
160
 * - PC:        0x00000004
161
 * - IRQs are disabled (CPSR[7] = 1)
162
 *
163
 *
164
 * PURPOSE:
165
 * A co-processor instruction not supported by the core can be
166
 * emulated here. Also unrecognised/invalid instructions are handled.
167
 */
168
BEGIN_PROC(arm_undef_exception_reentrant)
169
        clear_exclusive
170
        str     lr, [sp, #UNDEF_R14]    @ Store undef address
171
        mrs     lr, spsr                @ Get SPSR
172
        str     lr, [sp, #UNDEF_SPSR]   @ Store SPSR
173
        str     r0, [sp, #UNDEF_R0]     @ Store r0
174
        @ NOTE: Can increase undef nest here.
175
        mov     r0, sp                  @ Keep current sp point in R0
176
        mrs     lr, cpsr                @ Change to SVC mode.
177
        bic     lr, #ARM_MODE_MASK
178
        orr     lr, lr, #ARM_MODE_SVC
179
        msr     cpsr_fc, r14
180
        @ FIXME: Ensure 8-byte stack here.
181
        str     lr, [sp, #-8]!  @ Save lr_svc 2 words down from interrupted SP_SVC
182
        @ Transfer Undef state to SVC
183
        ldr     lr, [r0, #UNDEF_R14]
184
        str     lr, [sp, #4]
185
        @ Stack state:  |LR_SVC<-|LR_UNDEF|{original SP_SVC}|
186
        ldr     lr, [r0, #UNDEF_SPSR]
187
        ldr     r0, [r0, #UNDEF_R0]
188
        stmfd   sp!, {r0-r3,r12,lr}
189
        @ Stack state:  |R0<-|R1|R2|R3|R12|UNDEF_SPSR|LR_SVC|LR_DUNDEF|{original SP_SVC}|
190
        push_user_sp_lr sp      @ NOTE: These must be pushed to avoid trashing them if preempted
191
        @ Stack state: |SP_USR<-|LR_USR|R0<-|R1|R2|R3|R12|UNDEF_SPSR|LR_SVC|LR_DUNDEF|{original SP_SVC}|
192
 
193
        @ All undef state saved. Can safely enable irqs here, if need be.
194
        ldr     r3, [sp, #28]           @ Load UNDEF_SPSR
195
        can_abort_enable_irqs r0, r3    @ Judge if irqs can be enabled depending on prev state.
196
        bne     1f                      @ Branch here based on previous irq judgement.
197
        enable_irqs r3
198
1:
199
        /* Now check in what mode exception occured, and return that mode's LR in R4
200
         * Also poplulate r0,r1,r2 parameters for undefined_instr_handler
201
        */
202
        ldr     r1, [sp, #28]           @ Load UNDEF_SPSR
203
        is_psr_usr r0                   @ Test if UNDEF_SPSR was user mode.
204
        ldrne   r2, [sp, #32]           @ Abort occured in kernel, load LR_SVC
205
        ldreq   r2, [sp, #4]            @ Abort occured in user, load LR_USR
206
        ldr     r0, [sp, #36]           @ Load LR_UNDEF saved previously.
207
        mov     lr, pc
208
        ldr     pc, =undefined_instr_handler    @ Jump to function outside this page.
209
        disable_irqs r0                 @ Disable irqs to avoid corrupting spsr.
210
                                        @ (i.e. an interrupt could overwrite spsr with current psr)
211
        ldmfd   sp, {sp, lr}^           @ Restore user sp and lr which might have been corrupt on preemption
212
        nop                             @ User reg mod requires nop
213
        add     sp, sp, #8              @ Update SP.
214
        ldmfd   sp!, {r0-r3,r12,lr}     @ Restore previous context. (note, lr has spsr)
215
        msr     spsr_cxsf, r14          @ Restore spsr register from lr.
216
        @ Stack state: |LR_SVC<-|LR_PREV(UNDEF)|{original SP_SVC}|
217
        ldmfd   sp!, {r14, pc}^         @ Return, restoring cpsr. Note r14 gets r14_svc,
218
                                        @ and pc gets lr_undef. Saved at #4 and #8 offsets
219
                                        @ down from where svc stack had left.
220
END_PROC(arm_undef_exception_reentrant)
221
 
222
/*
223
 * vect_swi
224
 *
225
 * Upon Entry:
226
 * - R14:       Address of next instruction after the SWI
227
 * - PC:        0x00000008
228
 * - R0-R12:    Depending on the system call some of them contain
229
 *              indicators of what the exception means.
230
 * - IRQs are disabled (CPSR[7] = 1)
231
 * - SWI instruction's bits [7:0] may contain SWI indicator
232
 *
233
 * PURPOSE:
234
 * Used for trapping into a debugger or OS kernel via system calls.
235
 * Argument registers from R0 up to R12 and [7:0] of the causing SWI
236
 * instruction contains hints of what to do with this exception. What
237
 * R0-R12 contains depends on what userspace has put in them. Note this
238
 * is the only exception that userspace can generate and thus has control
239
 * on what it put into r0-rx.
240
 *
241
 * RECAP:
242
 * Normally across a function call, only r0-r3 are used for passing parameters.
243
 * Why r0-r3 only but not r4, r5...? See APCS (ARM procedure call standard)
244
 * Short answer: r4-r12 must be preserved across procedures but r0-r3 can be
245
 * trashed because they're set aside for argument passing. Arguments more than 4
246
 * go on the stack. Note APCS is a *suggestion*, rather than enforcement. So if
247
 * a userspace stub library is created that say, preserves and uses r0-r9 for a
248
 * system call, and the system call handler (this) knows about it, it is a
249
 * perfectly valid setup. In fact this is what we do here, we don't strictly use
250
 * r0-r3. Depending on the system call, the set of input registers (and output
251
 * registers to return results from the system call) may be redefined. These are
252
 * documented for each system call in the reference manual.
253
 * Another caveat to note in SWI usage is that we use the address offset of the
254
 * SWI instruction to see which offset it has in the system call vector, to
255
 * determine the correct system call, rather than [7:0] bits of the SWI.
256
 */
257
BEGIN_PROC(arm_swi_exception)
258
        clear_exclusive
259
        sub     lr, lr, #4      @ Get address of swi instruction user executed.
260
        stmfd   sp, {r0-r12,sp,lr}^ @ Push arguments, LR_USR and SP_USR to stack.
261
        nop
262
 
263
        @ Future optimisation 1:
264
        @ For all syscalls we need not push any more than r8 but we push up to
265
        @ r12 because upon a fork, a child's easiest way to restore user
266
        @ registers is to pop it from stack during return_from_syscall. In future
267
        @ fork function could return back to here, save all context into child
268
        @ from actual registers instead of reading from stack, and then return.
269
 
270
        @ Future optimisation 2:
271
        @ SP_USR MUST be pushed here, otherwise a kernel preemption could
272
        @ cause user mode of another process to overwrite SP_USR. The reason we
273
        @ save it here is because the preemption path does not currently save it
274
        @ if it is a kernel preemption. User SP can also be used here, as the
275
        @ user might have pushed data to its stack to be used by system calls.
276
        @ But we dont plan to pass data to kernel in this way, so saving of
277
        @ SP_USR can be done in preemption path as an optimisation.
278
 
279
        /*
280
         * The LR_usr is important here, because the user application uses a BL
281
         * to jump to the system call SWI, so the LR_usr contains the return
282
         * address, i.e. the next instruction after the *jumping* instruction to
283
         * the system call SWI (not the one after the swi itself, which is in
284
         * LR_svc).
285
         */
286
 
287
        sub     sp, sp, #60     @ stmfd on user registers can't writeback the SP. We do it manually.
288
        mrs     r0, spsr_fc     @ psr also need saving in case this context is interrupted.
289
        stmfd   sp!, {r0}
290
        enable_irqs r0
291
        mov     r0, sp          @ Current SP has pointer to all saved context.
292
        ktcb_ref_saved_regs r0, r1, r2 @ Save syscall context pointer in ktcb
293
        mov     r1, lr          @ Pass swi instruction address in LR as arg1
294
        mov     lr, pc
295
        ldr     pc, =syscall
296
 
297
.global return_from_syscall;    @ Newly created threads use this path to return,
298
return_from_syscall:            @ if they duplicated another thread's address space.
299
        disable_irqs r1         @ Not disabling irqs at this point causes the SP_USR and spsr
300
                                @ to get corrupt causing havoc.
301
        ldmfd   sp!, {r1}
302
        msr     spsr, r1
303
        add     sp, sp, #4      @ Skip, r0's location, since r0 already has returned result.
304
                                @ Note we're obliged to preserve at least r3-r8 because they're MRs.
305
        ldmfd   sp!, {r1-r12}   @ Restore r1-r8 pushed to stack earlier. r0 already has return result.
306
        ldmfd   sp, {sp}^       @ Restore user stack pointer, which might have been corrupt on preemption
307
        nop
308
        add     sp, sp, #4      @ Update sp.
309
        ldmfd   sp!, {lr}       @ Load userspace return address
310
        movs    pc, lr
311
END_PROC(arm_swi_exception)
312
 
313
/* Minimal abort state saved on data abort stack right after abort vector enters: */
314
#define ABT_R0          0
315
#define ABT_SPSR        -4
316
#define ABT_R14         -8
317
 
318
/* Minimal prefetch abort state saved on abort stack upon entry. */
319
#define ABT_R0          0
320
#define ABT_SPSR        -4
321
#define ABT_R14         -8
322
 
323
/*
324
 * vect_pabt
325
 *
326
 * Upon Entry:
327
 * - R14_abt:   Address of next instruction after aborted instruction
328
 * - R14_usr:   Address of return instruction in last function call**
329
 * - PC:        0x0000000c
330
 * - IRQs are disabled (CPSR[7] = 1)
331
 *
332
 *
333
 * PURPOSE:
334
 * Used for handling instructions that caused *memory aborts* during
335
 * the *prefetching* of the instruction. The instruction is also marked
336
 * as invalid by the core. It handles the cause for the memory abort.
337
 *
338
 * (One reason why a memory abort would occur is when we were entering
339
 * into a new page region that contained executable code and was not
340
 * present in memory, or its physical-to-virtual translation was not
341
 * present in the page tables. See other causes for memory aborts)
342
 *
343
 * **In case abort occured in userspace. This is useful if the abort
344
 * was due to a null/invalid function pointer call. Since R14_abt
345
 * includes the aborting instruction itself, R14_usr gives the clue to
346
 * where this call came from.
347
 */
348
BEGIN_PROC(arm_prefetch_abort_exception_reentrant)
349
        clear_exclusive
350
        sub     lr, lr, #4              @ lr-4 points at aborted instruction
351
        str     lr, [r13, #ABT_R14]     @ Store abort address.
352
        mrs     lr, spsr                @ Get SPSR
353
        str     lr, [r13, #ABT_SPSR]    @ Store SPSR
354
        str     r0, [r13, #ABT_R0]      @ Store R0 to use as temp register.
355
        mov     r0, r13                 @ SP to R0
356
        mrs     lr, cpsr                @ Change to SVC mode.
357
        bic     lr, #ARM_MODE_MASK
358
        orr     lr, lr, #ARM_MODE_SVC
359
        msr     cpsr_fc, r14
360
        @ FIXME: Ensure 8-byte stack here.
361
        str     lr, [sp, #-8]!  @ NOTE: Switched mode! Save LR_SVC 2 words down from SP_SVC.
362
transfer_pabt_state_to_svc:     @ Move data saved on PABT stack to SVC stack.
363
        ldr     lr, [r0, #ABT_R14]
364
        str     lr, [sp, #4]
365
        @ Stack state: |LR_SVC<-|LR_PABT|{original SP_SVC}|
366
        ldr     lr, [r0, #ABT_SPSR]
367
        ldr     r0, [r0, #ABT_R0]
368
        stmfd   sp!, {r0-r3,r12,lr}
369
        @ Stack state:  |R0<-|R1|R2|R3|R12|PABT_SPSR|LR_SVC|LR_PABT|{original SP_SVC}|
370
        push_user_sp_lr sp      @ NOTE: These must be pushed to avoid trashing if preempted
371
        @ Stack state:  |SP_USR<-|LR_USR|R0|R1|R2|R3|R12|PABT_SPSR|LR_SVC|LR_PABT|{original SP_SVC}|
372
read_pabt_state:
373
        cp15_read_ifsr r1       @ Reads FSR on ARMv5, IFSR on ARMv6-v7. Fault status information
374
        cp15_read_ifar r2       @ Reads FAR on ARMv5, IFAR on ARMv6-v7. Fault address information
375
        @ All abort state and (FAR/FSR) saved. Can safely enable irqs here, if need be.
376
        ldr     r3, [sp, #28]           @ Load PABT_SPSR
377
        can_abort_enable_irqs r0, r3    @ Judge if irqs can be enabled depending on prev state.
378
        bne     1f                      @ Branch here based on previous irq judgement.
379
        enable_irqs r3
380
1:
381
        ldr     r3, [sp, #28]           @ Load PABT_SPSR to r3, the spsr for the aborted mode
382
        ldr     r0, [sp, #36]           @ Load LR_PABT - 4 saved previously. (Address that aborted)
383
        mov     lr, pc
384
        ldr     pc, =prefetch_abort_handler @ Jump to function outside this page.
385
        disable_irqs r0                 @ Disable irqs to avoid corrupting spsr.
386
                                        @ (i.e. an interrupt could overwrite spsr with current psr)
387
        ldmfd   sp, {sp, lr}^           @ Restore user sp and lr which might have been corrupt on preemption
388
        nop                             @ User reg mod requires nop
389
        add     sp, sp, #8              @ Update SP.
390
        ldmfd   sp!, {r0-r3,r12,lr}     @ Restore previous context. (note, lr has spsr)
391
        msr     spsr_cxsf, r14          @ Restore spsr register from lr.
392
        @ Stack state: |LR_SVC<-|LR_PREV(PABT)|{original SP_SVC}|
393
        ldmfd   r13!, {r14, pc}^        @ Return, restoring cpsr. Note r14 gets r14_svc,
394
                                        @ and pc gets lr_dabt. Saved at #4 and #8 offsets
395
                                        @ down from where svc stack had left.
396
END_PROC(arm_prefetch_abort_exception_reentrant)
397
 
398
/*
399
 * vect_dabt
400
 *
401
 * Upon Entry:
402
 * - R14_abt:   Address of next instruction after aborted instruction
403
 * - PC:        0x00000010
404
 * - IRQs are disabled (CPSR[7] = 1)
405
 *
406
 *
407
 * PURPOSE:
408
 * Used for handling instructions that caused *memory aborts* during
409
 * the *execution* of the current instruction. This may happen if the
410
 * instruction accessed a memory address (e.g LDR/STR) that is not
411
 * defined as part of the currently executing process (aka illegal
412
 * access). Another possibility is the address is within the address
413
 * space of the process, but it is not mapped, i.e. does not have
414
 * physical-to-virtual translation entry in the page tables.
415
 */
416
BEGIN_PROC(arm_data_abort_exception)
417
        sub     lr, lr, #8      @ lr-8 points at aborted instruction
418
        mrc     p15, 0, r2, c5, c0, 0 @ Read FSR
419
        mrc     p15, 0, r1, c6, c0, 0 @ Read FAR
420
        mov     r0, lr          @ Get data abort address
421
        mov     r5, lr          @ Save it in r5 in case r0 will get trashed
422
        mov     lr, pc          @ Save return address
423
        ldr     pc, =data_abort_handler @ Jump to function outside this page.
424
1:
425
        b       1b
426
END_PROC(arm_data_abort_exception)
427
 
428
/*
429
 * The method of saving abort state to svc stack is identical with that of
430
 * reentrant irq vector. Natural to this, Restoring of the previous state
431
 * is also identical.
432
 */
433
BEGIN_PROC(arm_data_abort_exception_reentrant)
434
        clear_exclusive
435
        sub     lr, lr, #8              @ Get abort address
436
        str     lr, [r13, #ABT_R14]     @ Store abort address
437
        mrs     lr, spsr                @ Get SPSR
438
        str     lr, [r13, #ABT_SPSR]    @ Store SPSR
439
        str     r0, [r13, #ABT_R0]      @ Store r0
440
        @ NOTE: Can increase data abort nest here.
441
        mov     r0, r13                 @ Keep current sp point in R0
442
        mrs     lr, cpsr                @ Change to SVC mode.
443
        bic     lr, #ARM_MODE_MASK
444
        orr     lr, lr, #ARM_MODE_SVC
445
        msr     cpsr_fc, r14
446
        @ FIXME: Ensure 8-byte stack here.
447
        str     lr, [sp, #-8]!  @ Save lr_svc 2 words down from interrupted SP_SVC
448
transfer_dabt_state_to_svc:
449
        ldr     lr, [r0, #ABT_R14]
450
        str     lr, [sp, #4]
451
        @ Stack state:  |LR_SVC<-|LR_DABT|{original SP_SVC}|
452
        ldr     lr, [r0, #ABT_SPSR]
453
        ldr     r0, [r0, #ABT_R0]
454
        stmfd   sp!, {r0-r3,r12,lr}
455
        @ Stack state:  |R0<-|R1|R2|R3|R12|DABT_SPSR|LR_SVC|LR_DABT|{original SP_SVC}|
456
        push_user_sp_lr sp
457
        @ Stack state:  |SP_USR<-|LR_USR|R0|R1|R2|R3|R12|DABT_SPSR|LR_SVC|LR_DABT|{original SP_SVC}|
458
read_dabt_state:
459
        cp15_read_dfsr r1       @ Read DFSR (Tells why the fault occured)
460
        cp15_read_dfar r2       @ Read DFAR (Contains the faulted data address)
461
        @ All abort state and (FAR/FSR) saved. Can safely enable irqs here, if need be.
462
        ldr     r3, [sp, #28]           @ Load DABT_SPSR
463
        can_abort_enable_irqs r0, r3    @ Judge if irqs can be enabled depending on prev state.
464
        bne     1f                      @ Branch here based on previous irq judgement.
465
        enable_irqs r3
466
1:
467
        ldr     r0, [sp, #36]           @ Load LR_DABT saved previously.
468
        mov     lr, pc
469
        ldr     pc, =data_abort_handler @ Jump to function outside this page.
470
        disable_irqs r0                 @ Disable irqs to avoid corrupting spsr.
471
        ldmfd   sp, {sp, lr}^           @ Restore user sp and lr which might have been corrupt on preemption
472
        nop                             @ User reg mod requires nop
473
        add     sp, sp, #8              @ Update SP.
474
        ldmfd   sp!, {r0-r3,r12,lr}     @ Restore previous context. (note, lr has spsr)
475
        msr     spsr_cxsf, r14          @ Restore spsr register from lr.
476
        @ Stack state: |LR_SVC<-|LR_PREV(DABT)|{original SP_SVC}|
477
        ldmfd   r13!, {r14, pc}^        @ Return, restoring cpsr. Note r14 gets r14_svc,
478
                                        @ and pc gets lr_dabt. Saved at #4 and #8 offsets
479
                                        @ down from where svc stack had left.
480
END_PROC(arm_data_abort_exception_reentrant)
481
 
482
/*
483
 * vect_irq
484
 *
485
 * Upon Entry:
486
 * - R14:       Address of next instruction after interrupted instruction.
487
 * - PC:        0x00000018
488
 * - IRQs are disabled (CPSR[7] = 1)
489
 * - A vectored interrupt controller would also provide where to jump in
490
 *   order to handle the interrupt, or an irq controller in general would
491
 *   provide registers that indicate what kind of interrupt has occured.
492
 *
493
 *
494
 * PURPOSE:
495
 * Used for handling IRQs. IRQs have lower priority compared to other
496
 * types of exceptions.
497
 */
498
 
499
/* The most basic handler where neither context switching nor re-entry can occur. */
500
BEGIN_PROC(arm_irq_exception_basic)
501
        sub     lr, lr, #4
502
        stmfd   sp!, {r0-r3,lr}
503
        mov     lr, pc
504
        ldr     pc, =do_irq
505
        ldmfd   sp!, {r0-r3, pc}^
506
END_PROC(arm_irq_exception)
507
 
508
/* Minimal IRQ state saved on irq stack right after irq vector enters: */
509
#define IRQ_R0          0
510
#define IRQ_SPSR        -4
511
#define IRQ_R14         -8
512
 
513
/* A reentrant handler that uses svc mode stack to prevent banked lr_irq corruption. */
514
BEGIN_PROC(arm_irq_exception_reentrant)
515
        sub     lr, lr, #4
516
@ Save minimal state to irq stack:
517
        str     r14, [r13, #IRQ_R14]    @ Save lr_irq
518
        mrs     r14, spsr               @ Copy spsr
519
        str     r14, [r13, #IRQ_SPSR]   @ Save spsr on irq stack
520
        str     r0, [r13, #IRQ_R0]      @ Save r0.
521
        mov     r0, r13                 @ Using r0 to keep banked sp_irq when mode is switched.
522
        mrs     r14, cpsr               @ Get current psr (irq)
523
        bic     r14, #ARM_MODE_MASK     @ Clear mode part from psr
524
        orr     r14, r14, #ARM_MODE_SVC @ Write SVC mode bits.
525
        msr     cpsr_fc, r14            @ Change to SVC mode.
526
        str     r14, [r13, #-8]!        @ Save lr_svc 2 words down from where svc stack left.
527
@ Transfer minimal irq state saved to svc stack:
528
        ldr     r14, [r0, #IRQ_R14]     @ Load lr_irq to lr using r0 that contains sp_irq.
529
        str     r14, [r13, #4]          @ Save lr_irq 1 word down from where svc stack left.
530
        ldr     r14, [r0, #IRQ_SPSR]    @ Load irq spsr.
531
        ldr     r0, [r0, #IRQ_R0]       @ Restore r0.
532
        stmfd   sp!, {r0-r3,r12,lr}     @ Save all of rest of irq context to svc stack.
533
        bl      do_irq                  @ Read irq number etc. Free to re-enable irqs here.
534
        ldmfd   sp!, {r0-r3-r12,lr}     @ Restore previous context. (note, lr has spsr)
535
        msr     spsr_cxsf, lr           @ Restore spsr register from lr.
536
        ldmfd   r13!, {r14, pc}^        @ Return, restoring cpsr. Note r14 gets r14_svc,
537
                                        @ and pc gets lr_irq. Saved at #4 and #8 offsets
538
                                        @ down from where svc stack had left.
539
END_PROC(arm_irq_exception_reentrant)
540
 
541
        .macro  need_resched rx, ry
542
        get_current \rx
543
        ldr     \ry, =need_resched_offset
544
        ldr     \ry, [\ry]
545
        ldr     \ry, [\rx, \ry]
546
        cmp     \ry, #1
547
        .endm
548
 
549
/*
550
 * Keeps the PSR of the last pre-empted process. This helps to tell
551
 * what mode the process was in when it was preempted.
552
 */
553
.global preempted_psr;
554
preempted_psr:
555
.word   0
556
.word   0
557
.word   0
558
.word   0
559
 
560
/* Keeps track of how many nests of irqs have happened. */
561
.global current_irq_nest_count;
562
current_irq_nest_count:
563
.word   0
564
.word   0
565
.word   0
566
.word   0
567
 
568
#if defined (CONFIG_SMP)
569
        @ Rx contains the address of per cpu variable
570
        .macro per_cpu adr, temp, varname
571
                get_cpuid \temp
572
                ldr \adr, =\varname
573
                add \adr, \adr, \temp, lsl #2
574
        .endm
575
#else
576
        .macro per_cpu adr, temp, varname
577
                ldr \adr, =\varname
578
        .endm
579
#endif
580
 
581
/*
582
 * FIXME: current_irq_nest_count also counts for any preempt_disable() calls.
583
 * However this nesting check assumes all nests come from real irqs.
584
 * We should make this check just the real ones.
585
 */
586
#define IRQ_NESTING_MAX                 32
587
        .macro  inc_irq_cnt_with_overnest_check rx, ry
588
        per_cpu \rx, \ry, current_irq_nest_count @ Get per-cpu address of variable
589
        ldr     \ry, [\rx]
590
        add     \ry, \ry, #1                    @ No need for atomic inc since irqs are disabled.
591
        str     \ry, [\rx]
592
        cmp     \ry, #IRQ_NESTING_MAX           @ Check no more than max nests, and die miserably if so.
593
        ldrge   pc, =irq_overnest_error
594
        .endm
595
 
596
        @ This decrement need not be atomic because if you are *decrementing* this, then it means
597
        @ Preemption is already *disabled*. Ruling out preemption, only race could be against irqs.
598
        @ If an irq preempts it during decrement and modifies it, it is still responsible to change
599
        @ it back to the original value as it was when we read it, before it returns. So effectively
600
        @ anything that runs during the decrement does not affect the value of the count.
601
        .macro  dec_irq_nest_cnt rx, ry
602
        per_cpu \ry, \rx, current_irq_nest_count
603
        ldr     \rx, [\ry]
604
        sub     \rx, \rx, #1
605
        str     \rx, [\ry]
606
        .endm
607
        .macro in_process_context rx, ry
608
        per_cpu \rx, \ry, current_irq_nest_count
609
        ldr     \rx, [\rx]
610
        cmp     \rx, #0
611
        .endm
612
        /* If interrupted a process (as opposed to another irq), saves spsr value to preempted_psr */
613
        .macro cmp_and_save_process_psr rx, ry
614
        in_process_context \rx, \ry             @ If nest count is 0, a running process is preempted.
615
        bne 9999f                               @ Branch ahead if not a process
616
        per_cpu \rx, \ry, preempted_psr         @ Get per-cpu preempted psr
617
        mrs     \ry, SPSR                       @ Re-read spsr since register was trashed
618
        str     \ry, [\rx]                      @ Store it in per-cpu preempted psr
619
        9999:
620
        .endm
621
 
622
        /*
623
         * Clear irq bits on register.
624
         *
625
         * If ARMv5, only I-bit is cleared, but if ARMv6-v7,
626
         * A-bit is also cleared.
627
         */
628
        .macro clr_irq_bits_on_reg rx
629
                bic     \rx, #ARM_IRQ_BIT
630
#if defined (CONFIG_SUBARCH_V6) || defined (CONFIG_SUBARCH_V7)
631
                bic     \rx, #ARM_A_BIT
632
#endif
633
        .endm
634
 
635
#define CONTEXT_PSR             0
636
#define CONTEXT_R0              4
637
#define CONTEXT_R1              8
638
#define CONTEXT_R2              12
639
#define CONTEXT_R3              16
640
#define CONTEXT_R4              20
641
#define CONTEXT_R5              24
642
#define CONTEXT_R6              28
643
#define CONTEXT_R7              32
644
#define CONTEXT_R8              36
645
#define CONTEXT_r9              40
646
#define CONTEXT_R10             44
647
#define CONTEXT_R11             48
648
#define CONTEXT_R12             52
649
#define CONTEXT_R13             56
650
#define CONTEXT_R14             60
651
#define CONTEXT_PC              64
652
 
653
/*
654
 * TODO: Optimization:
655
 * May use SRS/RFE on irq exception _only_. But not
656
 * yet aware of its implications. Only irq handler can
657
 * do it because RFE enables interrupts unconditionally.
658
 */
659
BEGIN_PROC(arm_irq_exception_reentrant_with_schedule)
660
        clear_exclusive
661
        sub     lr, lr, #4
662
        str     lr, [r13, #IRQ_R14]     @ Save lr_irq
663
        mrs     r14, spsr               @ Copy spsr
664
        str     r14, [r13, #IRQ_SPSR]   @ Save spsr on irq stack
665
        str     r0, [r13, #IRQ_R0]      @ Save r0.
666
        cmp_and_save_process_psr r0, r14 @ R14 should have spsr here.
667
        inc_irq_cnt_with_overnest_check r0, r14
668
        mov     r0, r13                 @ Using r0 to keep banked sp_irq when mode is switched.
669
        mrs     r14, cpsr               @ Get current psr (irq)
670
        bic     r14, #ARM_MODE_MASK     @ Clear mode part from psr
671
        orr     r14, r14, #ARM_MODE_SVC @ Write SVC mode bits.
672
        msr     cpsr_fc, r14            @ Change to SVC mode.
673
        @ FIXME: Ensure 8-byte aligned stack here! Make sure to restore original state later!
674
        str     r14, [r13, #-8]!        @ Save lr_svc 2 words down from where svc stack left. SP updated.
675
@ Transfer minimal irq state to svc stack:
676
        ldr     r14, [r0, #IRQ_R14]     @ Load lr_irq to lr using r0 that contains sp_irq.
677
        str     r14, [r13, #4]          @ Save lr_irq 1 word down from where svc stack left.
678
        ldr     r14, [r0, #IRQ_SPSR]    @ Load irq spsr.
679
        ldr     r0, [r0, #IRQ_R0]       @ Restore r0.
680
        stmfd   sp!, {r0-r3,r12,lr}     @ Save all of rest of irq context to svc stack.
681
        mov     lr, pc
682
        ldr     pc, =do_irq             @ Read irq number etc. Free to re-enable irqs here.
683
        @ stack state: (Low) r0|r1|r2|r3|r12|SPSR|LR_SVC|LR_IRQ| (High)
684
 
685
/*
686
 * Decision point for taking the preemption path
687
 */
688
#if !defined(CONFIG_PREEMPT_DISABLE)
689
        per_cpu r0, r1, current_irq_nest_count
690
        ldr     r0, [r0]
691
        cmp     r0, #1                  @ Expect 1 as lowest since each irq increase preempt cnt by 1.
692
        bgt     return_to_prev_context  @ if (irq_nest > 1) return_to_prev_context();
693
        need_resched r0, r1             @ if (irq_nest == 1 && need_resched) schedule();
694
        beq     preemption_path         @ if (irq_nest == 1 && !need_resched) return_to_prev_context();
695
#endif
696
 
697
/*
698
 * Return to previous context path
699
 */
700
return_to_prev_context:
701
        dec_irq_nest_cnt r0, r1
702
        disable_irqs r0                 @ Disable irqs to avoid corrupting spsr.
703
        ldmfd   sp!, {r0-r3,r12,lr}     @ Restore previous context. (note, lr has spsr)
704
        msr     spsr_cxsf, r14          @ Restore spsr register from lr.
705
        @ stack state: (Low) |LR_SVC<-|LR_PREV(IRQ)|{original SP_SVC}| (High)
706
        ldmfd   r13!, {r14, pc}^        @ Return, restoring cpsr. Note r14 gets r14_svc,
707
                                        @ and pc gets lr_irq. Saved at #4 and #8 offsets
708
                                        @ down from where svc stack had left.
709
 
710
/*
711
 * Preemption path
712
 */
713
#if !defined(CONFIG_PREEMPT_DISABLE)
714
preemption_path:
715
        disable_irqs r0                 @ Interrupts can corrupt stack state.
716
        get_current r0                  @ Get the interrupted process
717
        @ stack state: (Low) |->r0|r1|r2|r3|r12|SPSR|LR_SVC|LR_IRQ()| (High)
718
save_interrupted_context:
719
        add     sp, sp, #4
720
        @ stack state: (Low) |r0|->r1|r2|r3|r12|SPSR|LR_SVC|LR_IRQ()| (High)
721
        ldmfd   sp!, {r1-r3, r12, lr}
722
        @ stack state: (Low) |r0|..|..|..|..|..|->LR_SVC|LR_IRQ()| (High)
723
        str     lr, [r0, #CONTEXT_PSR]
724
        is_psr_usr lr
725
        add     r0, r0, #CONTEXT_R1     @ Points at register save location for #CONTEXT_R1
726
        stmia   r0!, {r1-r12}
727
        ldmfd   sp!, {r1-r2}            @ At this point SP_SVC is at its original svc location.
728
        @ stack state: (Low) |r0|..|..|..|..|..|..|..|->(Original)| (High)
729
        @ register state: r0 = (register save loc for #CONTEXT_R13) r1 = LR_SVC, r2 = LR_IRQ
730
        beq     save_usr_context
731
save_svc_context:
732
        stmib   r0, {r1-r2}             @ Save LR_SVC and LR_RETURN in advancing locations.
733
        str     sp, [r0]                @ Current sp is where sp_svc has left, and r0 at #CONTEXT_SP loc.
734
        sub     r0, r0, #CONTEXT_R13    @ Go back to first word from SP position.
735
        ldr     r1, [sp, #-32]          @ Load r0 from stack
736
        str     r1, [r0, #CONTEXT_R0]   @ Save r0
737
        b       prepare_schedule        @ All registers saved.
738
save_usr_context:
739
        sub     r0, r0, #CONTEXT_R13
740
        str     r2, [r0, #CONTEXT_PC]   @ Save Program counter
741
        @ LR_SVC need restoring because it won't be pushed to context frame. SP_SVC is already up-to-date.
742
        mov     lr, r1
743
        stmfd   sp, {sp, lr}^   @ Push USR banked regs to stack.
744
        @ stack state: (Low) |r0|..|..|..|..|..|SP_USR|LR_USR|->(Original)| (High)
745
        nop                     @ Need a NOP after twiddling with usr registers.
746
        sub     sp, sp, #8      @ Adjust SP, since stack op on banked regs is no writeback.
747
        @ stack state: (Low) |r0|..|..|..|..|..|->SP_USR|LR_USR|(Original)| (High)
748
        ldmfd   sp!, {r1-r2}    @ Pop USR Banked regs.
749
        @ stack state: (Low) |r0|..|..|..|..|..|..|..|->(Original)| (High)
750
        str     r1, [r0, #CONTEXT_R13]  @ Save SP_USR to context frame.
751
        str     r2, [r0, #CONTEXT_R14]  @ Save LR_USR to context frame.
752
        ldr     r1, [sp, #-32]
753
        str     r1, [r0, #CONTEXT_R0]
754
        @ stack state: (Low) |..|..|..|..|..|..|..|..|->(Original)| (High)
755
prepare_schedule:
756
        mov lr, pc
757
        ldr pc, =schedule
758
1:
759
        b       1b      /* To catch if schedule returns in irq mode */
760
#endif /* End of !CONFIG_PREEMPT_DISABLE */
761
 
762
END_PROC(arm_irq_exception_reentrant_with_schedule)
763
 
764
/*
765
 * Context switch implementation.
766
 *
767
 * Upon entry:
768
 *
769
 * - r0 = current ktcb ptr, r1 = next ktcb ptr. r2 and r3 = insignificant.
770
 * - The current mode is always SVC, but the call may be coming from interrupt
771
 *   or process context.
772
 * - If coming from interrupt, the interrupted context is already copied to current
773
 *   ktcb in the irq handler, before coming here. Interrupted context can be SVC or USR.
774
 *
775
 * PURPOSE: Handles all paths from irq exception, thread_switch system call,
776
 * and sleeping in the kernel.
777
 *
778
 * NOTES:
779
 * - If coming from interrupt, the interrupted context is already copied to current
780
 *   ktcb in the irq handler, before coming here. Interrupted context can be SVC or USR.
781
 * - If coming from a process context, the current process context need saving here.
782
 * - From irq contexts, preemption is disabled, i.e. preemption count is 1. This is because
783
 *   irqs naturally increase preemption count. From process context preemption count is 0.
784
 *   Process context disables preemption during schedule(), but re-enables before calling
785
 *   switch_to(). Irq and process contexts are distinguished by preemption_count.
786
 *   Furthermore, irqs are also disabled shortly before calling switch_to() from both contexts.
787
 *   This happens at points where stack state would be irrecoverable if an irq occured.
788
 */
789
BEGIN_PROC(arch_context_switch)
790
        clear_exclusive
791
        in_process_context r2, r3       @ Note this depends on preempt count being 0.
792
        beq     save_process_context    @ Voluntary switch needs explicit saving of current state.
793
        dec_irq_nest_cnt r2, r3         @ Soon leaving irq context, so reduce preempt count here.
794
        b       load_next_context       @ Interrupted context already saved by irq handler.
795
save_process_context:           @ Voluntary process schedules enter here:
796
        mrs     r2, cpsr_fc
797
        str     r2, [r0]
798
        stmib   r0, {r0-r14}    @ Voluntary scheduling always in SVC mode, so using svc regs.
799
        str     r14, [r0, #CONTEXT_PC]  @ Store R15 as R14. R14 has return address for switch_to().
800
load_next_context:
801
        @ stack state: (Low) |..|..|..|..|..|..|..|..|..|->(Original)| (High)
802
        mov     sp, r1
803
        ldr     r0, [sp, #CONTEXT_PSR]  @ Load r0 with SPSR
804
        clr_irq_bits_on_reg r0          @ Enable irqs on will-be-restored context.
805
        msr     spsr_fcxs, r0           @ Restore spsr from r0.
806
        is_psr_usr r0
807
        bne load_next_context_svc       @ Loading user context is different than svc.
808
load_next_context_usr:
809
        ldmib   sp, {r0-r14}^           @ Load all including banked user regs.
810
        ldr     lr, [sp, #CONTEXT_PC]   @ Load value of PC to r14
811
        orr     sp, sp, #0xFF0
812
        orr     sp, sp, #0x8            @ 8-byte aligned.
813
        movs    pc, lr                  @ Jump to user changing modes.
814
load_next_context_svc:
815
        ldmib   sp, {r0-r15}^           @ Switch to svc context and jump, loading R13 and R14 from stack.
816
                                        @ This is OK since the jump is to current context.
817
END_PROC(arch_context_switch)
818
 
819
 
820
/*
821
 * vect_fiq
822
 *
823
 * Upon Entry:
824
 * - R14:       Address of next instruction after interrupted instruction.
825
 * - PC:        0x00000014
826
 * - FIQs are disabled (CPSR[6] = 1)
827
 * - IRQs are disabled (CPSR[7] = 1)
828
 * - As in IRQ, the irq controller would provide registers that indicate
829
 *   what kind of interrupt has occured.
830
 *
831
 * PURPOSE:
832
 * Handling of high-priority interrupts. FIQs have highest priority after
833
 * reset and data abort exceptions. They're mainly used for achieving
834
 * low-latency interrupts, e.g. for DMA.
835
 */
836
BEGIN_PROC(arm_fiq_exception)
837
END_PROC(arm_fiq_exception)
838
 
839
/* * * * * * * * * * * * * * * * * * * * * * * *
840
 * External functions with absolute addresses  *
841
 * * * * * * * * * * * * * * * * * * * * * * * */
842
 
843
/*
844
 * NOTE: Notes on relative and absolute symbols on this file:
845
 *
846
 * Note that branches (B and BL) are *RELATIVE* on ARM. So no need to take any
847
 * special action to access symbols within this file, even though this page
848
 * (in virtual memory) is relocated to another address at run-time (high or low
849
 * vectors) - this is an address other than where it is linked at, at
850
 * compile-time.
851
 *
852
 * To access external symbols from this file, (e.g. calling some function in the
853
 * kernel) one needs to use the: `LDR, pc, =external_symbol' pseudo-instruction,
854
 * (note the "=") and use absolute addressing. This automatically generates an
855
 * inline data word within the current module and indirectly loads the value in
856
 * that word to resolve the undefined reference. All other methods, (LDR, B
857
 * instructions, or ADR pseudoinstruction) generate relative addresses, and they
858
 * will complain for external symbols because a relative offset cannot be
859
 * calculated for an unknown distance. In conclusion, relative branches are
860
 * useful for accessing symbols on this page, but they mean nothing outside this
861
 * page, because the page is relocated at run-time. So, wherever you access
862
 * *relatively* outside this page, would be *relative* to where this page is at
863
 * that moment.
864
 */
865
 
866
/* * * * * * * * * * * * * * * * *
867
 * Stacks for Exception Vectors  *
868
 * * * * * * * * * * * * * * * * */
869
.global __stacks_end;
870
.global __abt_stack_high;
871
.global __irq_stack_high;
872
.global __fiq_stack_high;
873
.global __und_stack_high;
874
 
875
/*
876
 * These are also linked at high vectors, just as any other symbol
877
 * on this page.
878
 */
879
.balign 4
880
.equ __abt_stack_high, (__abt_stack - __vector_vaddr + 0xFFFF0000);
881
.equ __irq_stack_high, (__irq_stack - __vector_vaddr + 0xFFFF0000);
882
.equ __fiq_stack_high, (__fiq_stack - __vector_vaddr + 0xFFFF0000);
883
.equ __und_stack_high, (__und_stack - __vector_vaddr + 0xFFFF0000);
884
 
885
/*
886
 * NOTE: This could be cache line aligned.
887
 * (use a macro, e.g. ____arm_asm_cache_aligned)
888
 */
889
.balign 4
890
 
891
/* 16 bytes each per-cpu, up to 8 cpus */
892
__stacks_end:   .space 128
893
__abt_stack:    .space 128
894
__irq_stack:    .space 128
895
__fiq_stack:    .space 128
896
__und_stack:    .space 128
897
 
898
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.