OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [powerpc/] [kernel/] [head_64.S] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 *  PowerPC version
3
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4
 *
5
 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6
 *    Copyright (C) 1996 Cort Dougan 
7
 *  Adapted for Power Macintosh by Paul Mackerras.
8
 *  Low-level exception handlers and MMU support
9
 *  rewritten by Paul Mackerras.
10
 *    Copyright (C) 1996 Paul Mackerras.
11
 *
12
 *  Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
13
 *    Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
14
 *
15
 *  This file contains the low-level support and setup for the
16
 *  PowerPC-64 platform, including trap and interrupt dispatch.
17
 *
18
 *  This program is free software; you can redistribute it and/or
19
 *  modify it under the terms of the GNU General Public License
20
 *  as published by the Free Software Foundation; either version
21
 *  2 of the License, or (at your option) any later version.
22
 */
23
 
24
#include 
25
#include 
26
#include 
27
#include 
28
#include 
29
#include 
30
#include 
31
#include 
32
#include 
33
#include 
34
#include 
35
#include 
36
#include 
37
#include 
38
#include 
39
 
40
#define DO_SOFT_DISABLE
41
 
42
/*
43
 * We layout physical memory as follows:
44
 * 0x0000 - 0x00ff : Secondary processor spin code
45
 * 0x0100 - 0x2fff : pSeries Interrupt prologs
46
 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
47
 * 0x6000 - 0x6fff : Initial (CPU0) segment table
48
 * 0x7000 - 0x7fff : FWNMI data area
49
 * 0x8000 -        : Early init and support code
50
 */
51
 
52
/*
53
 *   SPRG Usage
54
 *
55
 *   Register   Definition
56
 *
57
 *   SPRG0      reserved for hypervisor
58
 *   SPRG1      temp - used to save gpr
59
 *   SPRG2      temp - used to save gpr
60
 *   SPRG3      virt addr of paca
61
 */
62
 
63
/*
64
 * Entering into this code we make the following assumptions:
65
 *  For pSeries:
66
 *   1. The MMU is off & open firmware is running in real mode.
67
 *   2. The kernel is entered at __start
68
 *
69
 *  For iSeries:
70
 *   1. The MMU is on (as it always is for iSeries)
71
 *   2. The kernel is entered at system_reset_iSeries
72
 */
73
 
74
        .text
75
        .globl  _stext
76
_stext:
77
_GLOBAL(__start)
78
        /* NOP this out unconditionally */
79
BEGIN_FTR_SECTION
80
        b       .__start_initialization_multiplatform
81
END_FTR_SECTION(0, 1)
82
 
83
        /* Catch branch to 0 in real mode */
84
        trap
85
 
86
        /* Secondary processors spin on this value until it goes to 1. */
87
        .globl  __secondary_hold_spinloop
88
__secondary_hold_spinloop:
89
        .llong  0x0
90
 
91
        /* Secondary processors write this value with their cpu # */
92
        /* after they enter the spin loop immediately below.      */
93
        .globl  __secondary_hold_acknowledge
94
__secondary_hold_acknowledge:
95
        .llong  0x0
96
 
97
#ifdef CONFIG_PPC_ISERIES
98
        /*
99
         * At offset 0x20, there is a pointer to iSeries LPAR data.
100
         * This is required by the hypervisor
101
         */
102
        . = 0x20
103
        .llong hvReleaseData-KERNELBASE
104
#endif /* CONFIG_PPC_ISERIES */
105
 
106
        . = 0x60
107
/*
108
 * The following code is used to hold secondary processors
109
 * in a spin loop after they have entered the kernel, but
110
 * before the bulk of the kernel has been relocated.  This code
111
 * is relocated to physical address 0x60 before prom_init is run.
112
 * All of it must fit below the first exception vector at 0x100.
113
 */
114
_GLOBAL(__secondary_hold)
115
        mfmsr   r24
116
        ori     r24,r24,MSR_RI
117
        mtmsrd  r24                     /* RI on */
118
 
119
        /* Grab our physical cpu number */
120
        mr      r24,r3
121
 
122
        /* Tell the master cpu we're here */
123
        /* Relocation is off & we are located at an address less */
124
        /* than 0x100, so only need to grab low order offset.    */
125
        std     r24,__secondary_hold_acknowledge@l(0)
126
        sync
127
 
128
        /* All secondary cpus wait here until told to start. */
129
100:    ld      r4,__secondary_hold_spinloop@l(0)
130
        cmpdi   0,r4,1
131
        bne     100b
132
 
133
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
134
        LOAD_REG_IMMEDIATE(r4, .generic_secondary_smp_init)
135
        mtctr   r4
136
        mr      r3,r24
137
        bctr
138
#else
139
        BUG_OPCODE
140
#endif
141
 
142
/* This value is used to mark exception frames on the stack. */
143
        .section ".toc","aw"
144
exception_marker:
145
        .tc     ID_72656773_68657265[TC],0x7265677368657265
146
        .text
147
 
148
/*
149
 * This is the start of the interrupt handlers for pSeries
150
 * This code runs with relocation off.
151
 */
152
        . = 0x100
153
        .globl __start_interrupts
154
__start_interrupts:
155
 
156
        STD_EXCEPTION_PSERIES(0x100, system_reset)
157
 
158
        . = 0x200
159
_machine_check_pSeries:
160
        HMT_MEDIUM
161
        mtspr   SPRN_SPRG1,r13          /* save r13 */
162
        EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
163
 
164
        . = 0x300
165
        .globl data_access_pSeries
166
data_access_pSeries:
167
        HMT_MEDIUM
168
        mtspr   SPRN_SPRG1,r13
169
BEGIN_FTR_SECTION
170
        mtspr   SPRN_SPRG2,r12
171
        mfspr   r13,SPRN_DAR
172
        mfspr   r12,SPRN_DSISR
173
        srdi    r13,r13,60
174
        rlwimi  r13,r12,16,0x20
175
        mfcr    r12
176
        cmpwi   r13,0x2c
177
        beq     do_stab_bolted_pSeries
178
        mtcrf   0x80,r12
179
        mfspr   r12,SPRN_SPRG2
180
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
181
        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
182
 
183
        . = 0x380
184
        .globl data_access_slb_pSeries
185
data_access_slb_pSeries:
186
        HMT_MEDIUM
187
        mtspr   SPRN_SPRG1,r13
188
        mfspr   r13,SPRN_SPRG3          /* get paca address into r13 */
189
        std     r3,PACA_EXSLB+EX_R3(r13)
190
        mfspr   r3,SPRN_DAR
191
        std     r9,PACA_EXSLB+EX_R9(r13)        /* save r9 - r12 */
192
        mfcr    r9
193
#ifdef __DISABLED__
194
        /* Keep that around for when we re-implement dynamic VSIDs */
195
        cmpdi   r3,0
196
        bge     slb_miss_user_pseries
197
#endif /* __DISABLED__ */
198
        std     r10,PACA_EXSLB+EX_R10(r13)
199
        std     r11,PACA_EXSLB+EX_R11(r13)
200
        std     r12,PACA_EXSLB+EX_R12(r13)
201
        mfspr   r10,SPRN_SPRG1
202
        std     r10,PACA_EXSLB+EX_R13(r13)
203
        mfspr   r12,SPRN_SRR1           /* and SRR1 */
204
        b       .slb_miss_realmode      /* Rel. branch works in real mode */
205
 
206
        STD_EXCEPTION_PSERIES(0x400, instruction_access)
207
 
208
        . = 0x480
209
        .globl instruction_access_slb_pSeries
210
instruction_access_slb_pSeries:
211
        HMT_MEDIUM
212
        mtspr   SPRN_SPRG1,r13
213
        mfspr   r13,SPRN_SPRG3          /* get paca address into r13 */
214
        std     r3,PACA_EXSLB+EX_R3(r13)
215
        mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
216
        std     r9,PACA_EXSLB+EX_R9(r13)        /* save r9 - r12 */
217
        mfcr    r9
218
#ifdef __DISABLED__
219
        /* Keep that around for when we re-implement dynamic VSIDs */
220
        cmpdi   r3,0
221
        bge     slb_miss_user_pseries
222
#endif /* __DISABLED__ */
223
        std     r10,PACA_EXSLB+EX_R10(r13)
224
        std     r11,PACA_EXSLB+EX_R11(r13)
225
        std     r12,PACA_EXSLB+EX_R12(r13)
226
        mfspr   r10,SPRN_SPRG1
227
        std     r10,PACA_EXSLB+EX_R13(r13)
228
        mfspr   r12,SPRN_SRR1           /* and SRR1 */
229
        b       .slb_miss_realmode      /* Rel. branch works in real mode */
230
 
231
        MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
232
        STD_EXCEPTION_PSERIES(0x600, alignment)
233
        STD_EXCEPTION_PSERIES(0x700, program_check)
234
        STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
235
        MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
236
        STD_EXCEPTION_PSERIES(0xa00, trap_0a)
237
        STD_EXCEPTION_PSERIES(0xb00, trap_0b)
238
 
239
        . = 0xc00
240
        .globl  system_call_pSeries
241
system_call_pSeries:
242
        HMT_MEDIUM
243
        mr      r9,r13
244
        mfmsr   r10
245
        mfspr   r13,SPRN_SPRG3
246
        mfspr   r11,SPRN_SRR0
247
        clrrdi  r12,r13,32
248
        oris    r12,r12,system_call_common@h
249
        ori     r12,r12,system_call_common@l
250
        mtspr   SPRN_SRR0,r12
251
        ori     r10,r10,MSR_IR|MSR_DR|MSR_RI
252
        mfspr   r12,SPRN_SRR1
253
        mtspr   SPRN_SRR1,r10
254
        rfid
255
        b       .       /* prevent speculative execution */
256
 
257
        STD_EXCEPTION_PSERIES(0xd00, single_step)
258
        STD_EXCEPTION_PSERIES(0xe00, trap_0e)
259
 
260
        /* We need to deal with the Altivec unavailable exception
261
         * here which is at 0xf20, thus in the middle of the
262
         * prolog code of the PerformanceMonitor one. A little
263
         * trickery is thus necessary
264
         */
265
        . = 0xf00
266
        b       performance_monitor_pSeries
267
 
268
        STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable)
269
 
270
#ifdef CONFIG_CBE_RAS
271
        HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
272
#endif /* CONFIG_CBE_RAS */
273
        STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
274
#ifdef CONFIG_CBE_RAS
275
        HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
276
#endif /* CONFIG_CBE_RAS */
277
        STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
278
#ifdef CONFIG_CBE_RAS
279
        HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
280
#endif /* CONFIG_CBE_RAS */
281
 
282
        . = 0x3000
283
 
284
/*** pSeries interrupt support ***/
285
 
286
        /* moved from 0xf00 */
287
        STD_EXCEPTION_PSERIES(., performance_monitor)
288
 
289
/*
290
 * An interrupt came in while soft-disabled; clear EE in SRR1,
291
 * clear paca->hard_enabled and return.
292
 */
293
masked_interrupt:
294
        stb     r10,PACAHARDIRQEN(r13)
295
        mtcrf   0x80,r9
296
        ld      r9,PACA_EXGEN+EX_R9(r13)
297
        mfspr   r10,SPRN_SRR1
298
        rldicl  r10,r10,48,1            /* clear MSR_EE */
299
        rotldi  r10,r10,16
300
        mtspr   SPRN_SRR1,r10
301
        ld      r10,PACA_EXGEN+EX_R10(r13)
302
        mfspr   r13,SPRN_SPRG1
303
        rfid
304
        b       .
305
 
306
        .align  7
307
do_stab_bolted_pSeries:
308
        mtcrf   0x80,r12
309
        mfspr   r12,SPRN_SPRG2
310
        EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
311
 
312
/*
313
 * We have some room here  we use that to put
314
 * the peries slb miss user trampoline code so it's reasonably
315
 * away from slb_miss_user_common to avoid problems with rfid
316
 *
317
 * This is used for when the SLB miss handler has to go virtual,
318
 * which doesn't happen for now anymore but will once we re-implement
319
 * dynamic VSIDs for shared page tables
320
 */
321
#ifdef __DISABLED__
322
slb_miss_user_pseries:
323
        std     r10,PACA_EXGEN+EX_R10(r13)
324
        std     r11,PACA_EXGEN+EX_R11(r13)
325
        std     r12,PACA_EXGEN+EX_R12(r13)
326
        mfspr   r10,SPRG1
327
        ld      r11,PACA_EXSLB+EX_R9(r13)
328
        ld      r12,PACA_EXSLB+EX_R3(r13)
329
        std     r10,PACA_EXGEN+EX_R13(r13)
330
        std     r11,PACA_EXGEN+EX_R9(r13)
331
        std     r12,PACA_EXGEN+EX_R3(r13)
332
        clrrdi  r12,r13,32
333
        mfmsr   r10
334
        mfspr   r11,SRR0                        /* save SRR0 */
335
        ori     r12,r12,slb_miss_user_common@l  /* virt addr of handler */
336
        ori     r10,r10,MSR_IR|MSR_DR|MSR_RI
337
        mtspr   SRR0,r12
338
        mfspr   r12,SRR1                        /* and SRR1 */
339
        mtspr   SRR1,r10
340
        rfid
341
        b       .                               /* prevent spec. execution */
342
#endif /* __DISABLED__ */
343
 
344
#ifdef CONFIG_PPC_PSERIES
345
/*
346
 * Vectors for the FWNMI option.  Share common code.
347
 */
348
        .globl system_reset_fwnmi
349
      .align 7
350
system_reset_fwnmi:
351
        HMT_MEDIUM
352
        mtspr   SPRN_SPRG1,r13          /* save r13 */
353
        EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXGEN, system_reset_common)
354
 
355
        .globl machine_check_fwnmi
356
      .align 7
357
machine_check_fwnmi:
358
        HMT_MEDIUM
359
        mtspr   SPRN_SPRG1,r13          /* save r13 */
360
        EXCEPTION_PROLOG_PSERIES_FORCE_64BIT(PACA_EXMC, machine_check_common)
361
 
362
#endif /* CONFIG_PPC_PSERIES */
363
 
364
/*** Common interrupt handlers ***/
365
 
366
        STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
367
 
368
        /*
369
         * Machine check is different because we use a different
370
         * save area: PACA_EXMC instead of PACA_EXGEN.
371
         */
372
        .align  7
373
        .globl machine_check_common
374
machine_check_common:
375
        EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
376
        FINISH_NAP
377
        DISABLE_INTS
378
        bl      .save_nvgprs
379
        addi    r3,r1,STACK_FRAME_OVERHEAD
380
        bl      .machine_check_exception
381
        b       .ret_from_except
382
 
383
        STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
384
        STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
385
        STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
386
        STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
387
        STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
388
        STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
389
        STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
390
#ifdef CONFIG_ALTIVEC
391
        STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
392
#else
393
        STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
394
#endif
395
#ifdef CONFIG_CBE_RAS
396
        STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
397
        STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
398
        STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
399
#endif /* CONFIG_CBE_RAS */
400
 
401
/*
402
 * Here we have detected that the kernel stack pointer is bad.
403
 * R9 contains the saved CR, r13 points to the paca,
404
 * r10 contains the (bad) kernel stack pointer,
405
 * r11 and r12 contain the saved SRR0 and SRR1.
406
 * We switch to using an emergency stack, save the registers there,
407
 * and call kernel_bad_stack(), which panics.
408
 */
409
bad_stack:
410
        ld      r1,PACAEMERGSP(r13)
411
        subi    r1,r1,64+INT_FRAME_SIZE
412
        std     r9,_CCR(r1)
413
        std     r10,GPR1(r1)
414
        std     r11,_NIP(r1)
415
        std     r12,_MSR(r1)
416
        mfspr   r11,SPRN_DAR
417
        mfspr   r12,SPRN_DSISR
418
        std     r11,_DAR(r1)
419
        std     r12,_DSISR(r1)
420
        mflr    r10
421
        mfctr   r11
422
        mfxer   r12
423
        std     r10,_LINK(r1)
424
        std     r11,_CTR(r1)
425
        std     r12,_XER(r1)
426
        SAVE_GPR(0,r1)
427
        SAVE_GPR(2,r1)
428
        SAVE_4GPRS(3,r1)
429
        SAVE_2GPRS(7,r1)
430
        SAVE_10GPRS(12,r1)
431
        SAVE_10GPRS(22,r1)
432
        lhz     r12,PACA_TRAP_SAVE(r13)
433
        std     r12,_TRAP(r1)
434
        addi    r11,r1,INT_FRAME_SIZE
435
        std     r11,0(r1)
436
        li      r12,0
437
        std     r12,0(r11)
438
        ld      r2,PACATOC(r13)
439
1:      addi    r3,r1,STACK_FRAME_OVERHEAD
440
        bl      .kernel_bad_stack
441
        b       1b
442
 
443
/*
444
 * Return from an exception with minimal checks.
445
 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
446
 * If interrupts have been enabled, or anything has been
447
 * done that might have changed the scheduling status of
448
 * any task or sent any task a signal, you should use
449
 * ret_from_except or ret_from_except_lite instead of this.
450
 */
451
fast_exc_return_irq:                    /* restores irq state too */
452
        ld      r3,SOFTE(r1)
453
        ld      r12,_MSR(r1)
454
        stb     r3,PACASOFTIRQEN(r13)   /* restore paca->soft_enabled */
455
        rldicl  r4,r12,49,63            /* get MSR_EE to LSB */
456
        stb     r4,PACAHARDIRQEN(r13)   /* restore paca->hard_enabled */
457
        b       1f
458
 
459
        .globl  fast_exception_return
460
fast_exception_return:
461
        ld      r12,_MSR(r1)
462
1:      ld      r11,_NIP(r1)
463
        andi.   r3,r12,MSR_RI           /* check if RI is set */
464
        beq-    unrecov_fer
465
 
466
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
467
        andi.   r3,r12,MSR_PR
468
        beq     2f
469
        ACCOUNT_CPU_USER_EXIT(r3, r4)
470
2:
471
#endif
472
 
473
        ld      r3,_CCR(r1)
474
        ld      r4,_LINK(r1)
475
        ld      r5,_CTR(r1)
476
        ld      r6,_XER(r1)
477
        mtcr    r3
478
        mtlr    r4
479
        mtctr   r5
480
        mtxer   r6
481
        REST_GPR(0, r1)
482
        REST_8GPRS(2, r1)
483
 
484
        mfmsr   r10
485
        rldicl  r10,r10,48,1            /* clear EE */
486
        rldicr  r10,r10,16,61           /* clear RI (LE is 0 already) */
487
        mtmsrd  r10,1
488
 
489
        mtspr   SPRN_SRR1,r12
490
        mtspr   SPRN_SRR0,r11
491
        REST_4GPRS(10, r1)
492
        ld      r1,GPR1(r1)
493
        rfid
494
        b       .       /* prevent speculative execution */
495
 
496
unrecov_fer:
497
        bl      .save_nvgprs
498
1:      addi    r3,r1,STACK_FRAME_OVERHEAD
499
        bl      .unrecoverable_exception
500
        b       1b
501
 
502
/*
503
 * Here r13 points to the paca, r9 contains the saved CR,
504
 * SRR0 and SRR1 are saved in r11 and r12,
505
 * r9 - r13 are saved in paca->exgen.
506
 */
507
        .align  7
508
        .globl data_access_common
509
data_access_common:
510
        mfspr   r10,SPRN_DAR
511
        std     r10,PACA_EXGEN+EX_DAR(r13)
512
        mfspr   r10,SPRN_DSISR
513
        stw     r10,PACA_EXGEN+EX_DSISR(r13)
514
        EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
515
        ld      r3,PACA_EXGEN+EX_DAR(r13)
516
        lwz     r4,PACA_EXGEN+EX_DSISR(r13)
517
        li      r5,0x300
518
        b       .do_hash_page           /* Try to handle as hpte fault */
519
 
520
        .align  7
521
        .globl instruction_access_common
522
instruction_access_common:
523
        EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
524
        ld      r3,_NIP(r1)
525
        andis.  r4,r12,0x5820
526
        li      r5,0x400
527
        b       .do_hash_page           /* Try to handle as hpte fault */
528
 
529
/*
530
 * Here is the common SLB miss user that is used when going to virtual
531
 * mode for SLB misses, that is currently not used
532
 */
533
#ifdef __DISABLED__
534
        .align  7
535
        .globl  slb_miss_user_common
536
slb_miss_user_common:
537
        mflr    r10
538
        std     r3,PACA_EXGEN+EX_DAR(r13)
539
        stw     r9,PACA_EXGEN+EX_CCR(r13)
540
        std     r10,PACA_EXGEN+EX_LR(r13)
541
        std     r11,PACA_EXGEN+EX_SRR0(r13)
542
        bl      .slb_allocate_user
543
 
544
        ld      r10,PACA_EXGEN+EX_LR(r13)
545
        ld      r3,PACA_EXGEN+EX_R3(r13)
546
        lwz     r9,PACA_EXGEN+EX_CCR(r13)
547
        ld      r11,PACA_EXGEN+EX_SRR0(r13)
548
        mtlr    r10
549
        beq-    slb_miss_fault
550
 
551
        andi.   r10,r12,MSR_RI          /* check for unrecoverable exception */
552
        beq-    unrecov_user_slb
553
        mfmsr   r10
554
 
555
.machine push
556
.machine "power4"
557
        mtcrf   0x80,r9
558
.machine pop
559
 
560
        clrrdi  r10,r10,2               /* clear RI before setting SRR0/1 */
561
        mtmsrd  r10,1
562
 
563
        mtspr   SRR0,r11
564
        mtspr   SRR1,r12
565
 
566
        ld      r9,PACA_EXGEN+EX_R9(r13)
567
        ld      r10,PACA_EXGEN+EX_R10(r13)
568
        ld      r11,PACA_EXGEN+EX_R11(r13)
569
        ld      r12,PACA_EXGEN+EX_R12(r13)
570
        ld      r13,PACA_EXGEN+EX_R13(r13)
571
        rfid
572
        b       .
573
 
574
slb_miss_fault:
575
        EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
576
        ld      r4,PACA_EXGEN+EX_DAR(r13)
577
        li      r5,0
578
        std     r4,_DAR(r1)
579
        std     r5,_DSISR(r1)
580
        b       handle_page_fault
581
 
582
unrecov_user_slb:
583
        EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
584
        DISABLE_INTS
585
        bl      .save_nvgprs
586
1:      addi    r3,r1,STACK_FRAME_OVERHEAD
587
        bl      .unrecoverable_exception
588
        b       1b
589
 
590
#endif /* __DISABLED__ */
591
 
592
 
593
/*
594
 * r13 points to the PACA, r9 contains the saved CR,
595
 * r12 contain the saved SRR1, SRR0 is still ready for return
596
 * r3 has the faulting address
597
 * r9 - r13 are saved in paca->exslb.
598
 * r3 is saved in paca->slb_r3
599
 * We assume we aren't going to take any exceptions during this procedure.
600
 */
601
_GLOBAL(slb_miss_realmode)
602
        mflr    r10
603
 
604
        stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
605
        std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
606
 
607
        bl      .slb_allocate_realmode
608
 
609
        /* All done -- return from exception. */
610
 
611
        ld      r10,PACA_EXSLB+EX_LR(r13)
612
        ld      r3,PACA_EXSLB+EX_R3(r13)
613
        lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
614
#ifdef CONFIG_PPC_ISERIES
615
BEGIN_FW_FTR_SECTION
616
        ld      r11,PACALPPACAPTR(r13)
617
        ld      r11,LPPACASRR0(r11)             /* get SRR0 value */
618
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
619
#endif /* CONFIG_PPC_ISERIES */
620
 
621
        mtlr    r10
622
 
623
        andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
624
        beq-    unrecov_slb
625
 
626
.machine        push
627
.machine        "power4"
628
        mtcrf   0x80,r9
629
        mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
630
.machine        pop
631
 
632
#ifdef CONFIG_PPC_ISERIES
633
BEGIN_FW_FTR_SECTION
634
        mtspr   SPRN_SRR0,r11
635
        mtspr   SPRN_SRR1,r12
636
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
637
#endif /* CONFIG_PPC_ISERIES */
638
        ld      r9,PACA_EXSLB+EX_R9(r13)
639
        ld      r10,PACA_EXSLB+EX_R10(r13)
640
        ld      r11,PACA_EXSLB+EX_R11(r13)
641
        ld      r12,PACA_EXSLB+EX_R12(r13)
642
        ld      r13,PACA_EXSLB+EX_R13(r13)
643
        rfid
644
        b       .       /* prevent speculative execution */
645
 
646
unrecov_slb:
647
        EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
648
        DISABLE_INTS
649
        bl      .save_nvgprs
650
1:      addi    r3,r1,STACK_FRAME_OVERHEAD
651
        bl      .unrecoverable_exception
652
        b       1b
653
 
654
        .align  7
655
        .globl hardware_interrupt_common
656
        .globl hardware_interrupt_entry
657
hardware_interrupt_common:
658
        EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
659
        FINISH_NAP
660
hardware_interrupt_entry:
661
        DISABLE_INTS
662
BEGIN_FTR_SECTION
663
        bl      .ppc64_runlatch_on
664
END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
665
        addi    r3,r1,STACK_FRAME_OVERHEAD
666
        bl      .do_IRQ
667
        b       .ret_from_except_lite
668
 
669
#ifdef CONFIG_PPC_970_NAP
670
power4_fixup_nap:
671
        andc    r9,r9,r10
672
        std     r9,TI_LOCAL_FLAGS(r11)
673
        ld      r10,_LINK(r1)           /* make idle task do the */
674
        std     r10,_NIP(r1)            /* equivalent of a blr */
675
        blr
676
#endif
677
 
678
        .align  7
679
        .globl alignment_common
680
alignment_common:
681
        mfspr   r10,SPRN_DAR
682
        std     r10,PACA_EXGEN+EX_DAR(r13)
683
        mfspr   r10,SPRN_DSISR
684
        stw     r10,PACA_EXGEN+EX_DSISR(r13)
685
        EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
686
        ld      r3,PACA_EXGEN+EX_DAR(r13)
687
        lwz     r4,PACA_EXGEN+EX_DSISR(r13)
688
        std     r3,_DAR(r1)
689
        std     r4,_DSISR(r1)
690
        bl      .save_nvgprs
691
        addi    r3,r1,STACK_FRAME_OVERHEAD
692
        ENABLE_INTS
693
        bl      .alignment_exception
694
        b       .ret_from_except
695
 
696
        .align  7
697
        .globl program_check_common
698
program_check_common:
699
        EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
700
        bl      .save_nvgprs
701
        addi    r3,r1,STACK_FRAME_OVERHEAD
702
        ENABLE_INTS
703
        bl      .program_check_exception
704
        b       .ret_from_except
705
 
706
        .align  7
707
        .globl fp_unavailable_common
708
fp_unavailable_common:
709
        EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
710
        bne     1f                      /* if from user, just load it up */
711
        bl      .save_nvgprs
712
        addi    r3,r1,STACK_FRAME_OVERHEAD
713
        ENABLE_INTS
714
        bl      .kernel_fp_unavailable_exception
715
        BUG_OPCODE
716
1:      b       .load_up_fpu
717
 
718
        .align  7
719
        .globl altivec_unavailable_common
720
altivec_unavailable_common:
721
        EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
722
#ifdef CONFIG_ALTIVEC
723
BEGIN_FTR_SECTION
724
        bne     .load_up_altivec        /* if from user, just load it up */
725
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
726
#endif
727
        bl      .save_nvgprs
728
        addi    r3,r1,STACK_FRAME_OVERHEAD
729
        ENABLE_INTS
730
        bl      .altivec_unavailable_exception
731
        b       .ret_from_except
732
 
733
#ifdef CONFIG_ALTIVEC
734
/*
735
 * load_up_altivec(unused, unused, tsk)
736
 * Disable VMX for the task which had it previously,
737
 * and save its vector registers in its thread_struct.
738
 * Enables the VMX for use in the kernel on return.
739
 * On SMP we know the VMX is free, since we give it up every
740
 * switch (ie, no lazy save of the vector registers).
741
 * On entry: r13 == 'current' && last_task_used_altivec != 'current'
742
 */
743
_STATIC(load_up_altivec)
744
        mfmsr   r5                      /* grab the current MSR */
745
        oris    r5,r5,MSR_VEC@h
746
        mtmsrd  r5                      /* enable use of VMX now */
747
        isync
748
 
749
/*
750
 * For SMP, we don't do lazy VMX switching because it just gets too
751
 * horrendously complex, especially when a task switches from one CPU
752
 * to another.  Instead we call giveup_altvec in switch_to.
753
 * VRSAVE isn't dealt with here, that is done in the normal context
754
 * switch code. Note that we could rely on vrsave value to eventually
755
 * avoid saving all of the VREGs here...
756
 */
757
#ifndef CONFIG_SMP
758
        ld      r3,last_task_used_altivec@got(r2)
759
        ld      r4,0(r3)
760
        cmpdi   0,r4,0
761
        beq     1f
762
        /* Save VMX state to last_task_used_altivec's THREAD struct */
763
        addi    r4,r4,THREAD
764
        SAVE_32VRS(0,r5,r4)
765
        mfvscr  vr0
766
        li      r10,THREAD_VSCR
767
        stvx    vr0,r10,r4
768
        /* Disable VMX for last_task_used_altivec */
769
        ld      r5,PT_REGS(r4)
770
        ld      r4,_MSR-STACK_FRAME_OVERHEAD(r5)
771
        lis     r6,MSR_VEC@h
772
        andc    r4,r4,r6
773
        std     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
774
1:
775
#endif /* CONFIG_SMP */
776
        /* Hack: if we get an altivec unavailable trap with VRSAVE
777
         * set to all zeros, we assume this is a broken application
778
         * that fails to set it properly, and thus we switch it to
779
         * all 1's
780
         */
781
        mfspr   r4,SPRN_VRSAVE
782
        cmpdi   0,r4,0
783
        bne+    1f
784
        li      r4,-1
785
        mtspr   SPRN_VRSAVE,r4
786
1:
787
        /* enable use of VMX after return */
788
        ld      r4,PACACURRENT(r13)
789
        addi    r5,r4,THREAD            /* Get THREAD */
790
        oris    r12,r12,MSR_VEC@h
791
        std     r12,_MSR(r1)
792
        li      r4,1
793
        li      r10,THREAD_VSCR
794
        stw     r4,THREAD_USED_VR(r5)
795
        lvx     vr0,r10,r5
796
        mtvscr  vr0
797
        REST_32VRS(0,r4,r5)
798
#ifndef CONFIG_SMP
799
        /* Update last_task_used_math to 'current' */
800
        subi    r4,r5,THREAD            /* Back to 'current' */
801
        std     r4,0(r3)
802
#endif /* CONFIG_SMP */
803
        /* restore registers and return */
804
        b       fast_exception_return
805
#endif /* CONFIG_ALTIVEC */
806
 
807
/*
808
 * Hash table stuff
809
 */
810
        .align  7
811
_GLOBAL(do_hash_page)
812
        std     r3,_DAR(r1)
813
        std     r4,_DSISR(r1)
814
 
815
        andis.  r0,r4,0xa450            /* weird error? */
816
        bne-    handle_page_fault       /* if not, try to insert a HPTE */
817
BEGIN_FTR_SECTION
818
        andis.  r0,r4,0x0020            /* Is it a segment table fault? */
819
        bne-    do_ste_alloc            /* If so handle it */
820
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
821
 
822
        /*
823
         * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
824
         * accessing a userspace segment (even from the kernel). We assume
825
         * kernel addresses always have the high bit set.
826
         */
827
        rlwinm  r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
828
        rotldi  r0,r3,15                /* Move high bit into MSR_PR posn */
829
        orc     r0,r12,r0               /* MSR_PR | ~high_bit */
830
        rlwimi  r4,r0,32-13,30,30       /* becomes _PAGE_USER access bit */
831
        ori     r4,r4,1                 /* add _PAGE_PRESENT */
832
        rlwimi  r4,r5,22+2,31-2,31-2    /* Set _PAGE_EXEC if trap is 0x400 */
833
 
834
        /*
835
         * On iSeries, we soft-disable interrupts here, then
836
         * hard-enable interrupts so that the hash_page code can spin on
837
         * the hash_table_lock without problems on a shared processor.
838
         */
839
        DISABLE_INTS
840
 
841
        /*
842
         * r3 contains the faulting address
843
         * r4 contains the required access permissions
844
         * r5 contains the trap number
845
         *
846
         * at return r3 = 0 for success
847
         */
848
        bl      .hash_page              /* build HPTE if possible */
849
        cmpdi   r3,0                    /* see if hash_page succeeded */
850
 
851
#ifdef DO_SOFT_DISABLE
852
BEGIN_FW_FTR_SECTION
853
        /*
854
         * If we had interrupts soft-enabled at the point where the
855
         * DSI/ISI occurred, and an interrupt came in during hash_page,
856
         * handle it now.
857
         * We jump to ret_from_except_lite rather than fast_exception_return
858
         * because ret_from_except_lite will check for and handle pending
859
         * interrupts if necessary.
860
         */
861
        beq     13f
862
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
863
#endif
864
BEGIN_FW_FTR_SECTION
865
        /*
866
         * Here we have interrupts hard-disabled, so it is sufficient
867
         * to restore paca->{soft,hard}_enable and get out.
868
         */
869
        beq     fast_exc_return_irq     /* Return from exception on success */
870
END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
871
 
872
        /* For a hash failure, we don't bother re-enabling interrupts */
873
        ble-    12f
874
 
875
        /*
876
         * hash_page couldn't handle it, set soft interrupt enable back
877
         * to what it was before the trap.  Note that .local_irq_restore
878
         * handles any interrupts pending at this point.
879
         */
880
        ld      r3,SOFTE(r1)
881
        bl      .local_irq_restore
882
        b       11f
883
 
884
/* Here we have a page fault that hash_page can't handle. */
885
handle_page_fault:
886
        ENABLE_INTS
887
11:     ld      r4,_DAR(r1)
888
        ld      r5,_DSISR(r1)
889
        addi    r3,r1,STACK_FRAME_OVERHEAD
890
        bl      .do_page_fault
891
        cmpdi   r3,0
892
        beq+    13f
893
        bl      .save_nvgprs
894
        mr      r5,r3
895
        addi    r3,r1,STACK_FRAME_OVERHEAD
896
        lwz     r4,_DAR(r1)
897
        bl      .bad_page_fault
898
        b       .ret_from_except
899
 
900
13:     b       .ret_from_except_lite
901
 
902
/* We have a page fault that hash_page could handle but HV refused
903
 * the PTE insertion
904
 */
905
12:     bl      .save_nvgprs
906
        addi    r3,r1,STACK_FRAME_OVERHEAD
907
        ld      r4,_DAR(r1)
908
        bl      .low_hash_fault
909
        b       .ret_from_except
910
 
911
        /* here we have a segment miss */
912
do_ste_alloc:
913
        bl      .ste_allocate           /* try to insert stab entry */
914
        cmpdi   r3,0
915
        bne-    handle_page_fault
916
        b       fast_exception_return
917
 
918
/*
919
 * r13 points to the PACA, r9 contains the saved CR,
920
 * r11 and r12 contain the saved SRR0 and SRR1.
921
 * r9 - r13 are saved in paca->exslb.
922
 * We assume we aren't going to take any exceptions during this procedure.
923
 * We assume (DAR >> 60) == 0xc.
924
 */
925
        .align  7
926
_GLOBAL(do_stab_bolted)
927
        stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
928
        std     r11,PACA_EXSLB+EX_SRR0(r13)     /* save SRR0 in exc. frame */
929
 
930
        /* Hash to the primary group */
931
        ld      r10,PACASTABVIRT(r13)
932
        mfspr   r11,SPRN_DAR
933
        srdi    r11,r11,28
934
        rldimi  r10,r11,7,52    /* r10 = first ste of the group */
935
 
936
        /* Calculate VSID */
937
        /* This is a kernel address, so protovsid = ESID */
938
        ASM_VSID_SCRAMBLE(r11, r9, 256M)
939
        rldic   r9,r11,12,16    /* r9 = vsid << 12 */
940
 
941
        /* Search the primary group for a free entry */
942
1:      ld      r11,0(r10)      /* Test valid bit of the current ste    */
943
        andi.   r11,r11,0x80
944
        beq     2f
945
        addi    r10,r10,16
946
        andi.   r11,r10,0x70
947
        bne     1b
948
 
949
        /* Stick for only searching the primary group for now.          */
950
        /* At least for now, we use a very simple random castout scheme */
951
        /* Use the TB as a random number ;  OR in 1 to avoid entry 0    */
952
        mftb    r11
953
        rldic   r11,r11,4,57    /* r11 = (r11 << 4) & 0x70 */
954
        ori     r11,r11,0x10
955
 
956
        /* r10 currently points to an ste one past the group of interest */
957
        /* make it point to the randomly selected entry                 */
958
        subi    r10,r10,128
959
        or      r10,r10,r11     /* r10 is the entry to invalidate       */
960
 
961
        isync                   /* mark the entry invalid               */
962
        ld      r11,0(r10)
963
        rldicl  r11,r11,56,1    /* clear the valid bit */
964
        rotldi  r11,r11,8
965
        std     r11,0(r10)
966
        sync
967
 
968
        clrrdi  r11,r11,28      /* Get the esid part of the ste         */
969
        slbie   r11
970
 
971
2:      std     r9,8(r10)       /* Store the vsid part of the ste       */
972
        eieio
973
 
974
        mfspr   r11,SPRN_DAR            /* Get the new esid                     */
975
        clrrdi  r11,r11,28      /* Permits a full 32b of ESID           */
976
        ori     r11,r11,0x90    /* Turn on valid and kp                 */
977
        std     r11,0(r10)      /* Put new entry back into the stab     */
978
 
979
        sync
980
 
981
        /* All done -- return from exception. */
982
        lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
983
        ld      r11,PACA_EXSLB+EX_SRR0(r13)     /* get saved SRR0 */
984
 
985
        andi.   r10,r12,MSR_RI
986
        beq-    unrecov_slb
987
 
988
        mtcrf   0x80,r9                 /* restore CR */
989
 
990
        mfmsr   r10
991
        clrrdi  r10,r10,2
992
        mtmsrd  r10,1
993
 
994
        mtspr   SPRN_SRR0,r11
995
        mtspr   SPRN_SRR1,r12
996
        ld      r9,PACA_EXSLB+EX_R9(r13)
997
        ld      r10,PACA_EXSLB+EX_R10(r13)
998
        ld      r11,PACA_EXSLB+EX_R11(r13)
999
        ld      r12,PACA_EXSLB+EX_R12(r13)
1000
        ld      r13,PACA_EXSLB+EX_R13(r13)
1001
        rfid
1002
        b       .       /* prevent speculative execution */
1003
 
1004
/*
1005
 * Space for CPU0's segment table.
1006
 *
1007
 * On iSeries, the hypervisor must fill in at least one entry before
1008
 * we get control (with relocate on).  The address is given to the hv
1009
 * as a page number (see xLparMap below), so this must be at a
1010
 * fixed address (the linker can't compute (u64)&initial_stab >>
1011
 * PAGE_SHIFT).
1012
 */
1013
        . = STAB0_OFFSET        /* 0x6000 */
1014
        .globl initial_stab
1015
initial_stab:
1016
        .space  4096
1017
 
1018
#ifdef CONFIG_PPC_PSERIES
1019
/*
1020
 * Data area reserved for FWNMI option.
1021
 * This address (0x7000) is fixed by the RPA.
1022
 */
1023
        .= 0x7000
1024
        .globl fwnmi_data_area
1025
fwnmi_data_area:
1026
#endif /* CONFIG_PPC_PSERIES */
1027
 
1028
        /* iSeries does not use the FWNMI stuff, so it is safe to put
1029
         * this here, even if we later allow kernels that will boot on
1030
         * both pSeries and iSeries */
1031
#ifdef CONFIG_PPC_ISERIES
1032
        . = LPARMAP_PHYS
1033
        .globl xLparMap
1034
xLparMap:
1035
        .quad   HvEsidsToMap            /* xNumberEsids */
1036
        .quad   HvRangesToMap           /* xNumberRanges */
1037
        .quad   STAB0_PAGE              /* xSegmentTableOffs */
1038
        .zero   40                      /* xRsvd */
1039
        /* xEsids (HvEsidsToMap entries of 2 quads) */
1040
        .quad   PAGE_OFFSET_ESID        /* xKernelEsid */
1041
        .quad   PAGE_OFFSET_VSID        /* xKernelVsid */
1042
        .quad   VMALLOC_START_ESID      /* xKernelEsid */
1043
        .quad   VMALLOC_START_VSID      /* xKernelVsid */
1044
        /* xRanges (HvRangesToMap entries of 3 quads) */
1045
        .quad   HvPagesToMap            /* xPages */
1046
        .quad   0                        /* xOffset */
1047
        .quad   PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
1048
 
1049
#endif /* CONFIG_PPC_ISERIES */
1050
 
1051
#ifdef CONFIG_PPC_PSERIES
1052
        . = 0x8000
1053
#endif /* CONFIG_PPC_PSERIES */
1054
 
1055
/*
1056
 * On pSeries and most other platforms, secondary processors spin
1057
 * in the following code.
1058
 * At entry, r3 = this processor's number (physical cpu id)
1059
 */
1060
_GLOBAL(generic_secondary_smp_init)
1061
        mr      r24,r3
1062
 
1063
        /* turn on 64-bit mode */
1064
        bl      .enable_64b_mode
1065
 
1066
        /* Set up a paca value for this processor. Since we have the
1067
         * physical cpu id in r24, we need to search the pacas to find
1068
         * which logical id maps to our physical one.
1069
         */
1070
        LOAD_REG_IMMEDIATE(r13, paca)   /* Get base vaddr of paca array  */
1071
        li      r5,0                    /* logical cpu id                */
1072
1:      lhz     r6,PACAHWCPUID(r13)     /* Load HW procid from paca      */
1073
        cmpw    r6,r24                  /* Compare to our id             */
1074
        beq     2f
1075
        addi    r13,r13,PACA_SIZE       /* Loop to next PACA on miss     */
1076
        addi    r5,r5,1
1077
        cmpwi   r5,NR_CPUS
1078
        blt     1b
1079
 
1080
        mr      r3,r24                  /* not found, copy phys to r3    */
1081
        b       .kexec_wait             /* next kernel might do better   */
1082
 
1083
2:      mtspr   SPRN_SPRG3,r13          /* Save vaddr of paca in SPRG3   */
1084
        /* From now on, r24 is expected to be logical cpuid */
1085
        mr      r24,r5
1086
3:      HMT_LOW
1087
        lbz     r23,PACAPROCSTART(r13)  /* Test if this processor should */
1088
                                        /* start.                        */
1089
        sync
1090
 
1091
#ifndef CONFIG_SMP
1092
        b       3b                      /* Never go on non-SMP           */
1093
#else
1094
        cmpwi   0,r23,0
1095
        beq     3b                      /* Loop until told to go         */
1096
 
1097
        /* See if we need to call a cpu state restore handler */
1098
        LOAD_REG_IMMEDIATE(r23, cur_cpu_spec)
1099
        ld      r23,0(r23)
1100
        ld      r23,CPU_SPEC_RESTORE(r23)
1101
        cmpdi   0,r23,0
1102
        beq     4f
1103
        ld      r23,0(r23)
1104
        mtctr   r23
1105
        bctrl
1106
 
1107
4:      /* Create a temp kernel stack for use before relocation is on.  */
1108
        ld      r1,PACAEMERGSP(r13)
1109
        subi    r1,r1,STACK_FRAME_OVERHEAD
1110
 
1111
        b       __secondary_start
1112
#endif
1113
 
1114
_STATIC(__mmu_off)
1115
        mfmsr   r3
1116
        andi.   r0,r3,MSR_IR|MSR_DR
1117
        beqlr
1118
        andc    r3,r3,r0
1119
        mtspr   SPRN_SRR0,r4
1120
        mtspr   SPRN_SRR1,r3
1121
        sync
1122
        rfid
1123
        b       .       /* prevent speculative execution */
1124
 
1125
 
1126
/*
1127
 * Here is our main kernel entry point. We support currently 2 kind of entries
1128
 * depending on the value of r5.
1129
 *
1130
 *   r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
1131
 *                 in r3...r7
1132
 *
1133
 *   r5 == NULL -> kexec style entry. r3 is a physical pointer to the
1134
 *                 DT block, r4 is a physical pointer to the kernel itself
1135
 *
1136
 */
1137
_GLOBAL(__start_initialization_multiplatform)
1138
        /*
1139
         * Are we booted from a PROM Of-type client-interface ?
1140
         */
1141
        cmpldi  cr0,r5,0
1142
        beq     1f
1143
        b       .__boot_from_prom               /* yes -> prom */
1144
1:
1145
        /* Save parameters */
1146
        mr      r31,r3
1147
        mr      r30,r4
1148
 
1149
        /* Make sure we are running in 64 bits mode */
1150
        bl      .enable_64b_mode
1151
 
1152
        /* Setup some critical 970 SPRs before switching MMU off */
1153
        mfspr   r0,SPRN_PVR
1154
        srwi    r0,r0,16
1155
        cmpwi   r0,0x39         /* 970 */
1156
        beq     1f
1157
        cmpwi   r0,0x3c         /* 970FX */
1158
        beq     1f
1159
        cmpwi   r0,0x44         /* 970MP */
1160
        beq     1f
1161
        cmpwi   r0,0x45         /* 970GX */
1162
        bne     2f
1163
1:      bl      .__cpu_preinit_ppc970
1164
2:
1165
 
1166
        /* Switch off MMU if not already */
1167
        LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
1168
        add     r4,r4,r30
1169
        bl      .__mmu_off
1170
        b       .__after_prom_start
1171
 
1172
_INIT_STATIC(__boot_from_prom)
1173
        /* Save parameters */
1174
        mr      r31,r3
1175
        mr      r30,r4
1176
        mr      r29,r5
1177
        mr      r28,r6
1178
        mr      r27,r7
1179
 
1180
        /*
1181
         * Align the stack to 16-byte boundary
1182
         * Depending on the size and layout of the ELF sections in the initial
1183
         * boot binary, the stack pointer will be unalignet on PowerMac
1184
         */
1185
        rldicr  r1,r1,0,59
1186
 
1187
        /* Make sure we are running in 64 bits mode */
1188
        bl      .enable_64b_mode
1189
 
1190
        /* put a relocation offset into r3 */
1191
        bl      .reloc_offset
1192
 
1193
        LOAD_REG_IMMEDIATE(r2,__toc_start)
1194
        addi    r2,r2,0x4000
1195
        addi    r2,r2,0x4000
1196
 
1197
        /* Relocate the TOC from a virt addr to a real addr */
1198
        add     r2,r2,r3
1199
 
1200
        /* Restore parameters */
1201
        mr      r3,r31
1202
        mr      r4,r30
1203
        mr      r5,r29
1204
        mr      r6,r28
1205
        mr      r7,r27
1206
 
1207
        /* Do all of the interaction with OF client interface */
1208
        bl      .prom_init
1209
        /* We never return */
1210
        trap
1211
 
1212
_STATIC(__after_prom_start)
1213
 
1214
/*
1215
 * We need to run with __start at physical address PHYSICAL_START.
1216
 * This will leave some code in the first 256B of
1217
 * real memory, which are reserved for software use.
1218
 * The remainder of the first page is loaded with the fixed
1219
 * interrupt vectors.  The next two pages are filled with
1220
 * unknown exception placeholders.
1221
 *
1222
 * Note: This process overwrites the OF exception vectors.
1223
 *      r26 == relocation offset
1224
 *      r27 == KERNELBASE
1225
 */
1226
        bl      .reloc_offset
1227
        mr      r26,r3
1228
        LOAD_REG_IMMEDIATE(r27, KERNELBASE)
1229
 
1230
        LOAD_REG_IMMEDIATE(r3, PHYSICAL_START)  /* target addr */
1231
 
1232
        // XXX FIXME: Use phys returned by OF (r30)
1233
        add     r4,r27,r26              /* source addr                   */
1234
                                        /* current address of _start     */
1235
                                        /*   i.e. where we are running   */
1236
                                        /*      the source addr          */
1237
 
1238
        cmpdi   r4,0                    /* In some cases the loader may  */
1239
        bne     1f
1240
        b       .start_here_multiplatform /* have already put us at zero */
1241
                                        /* so we can skip the copy.      */
1242
1:      LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */
1243
        sub     r5,r5,r27
1244
 
1245
        li      r6,0x100                /* Start offset, the first 0x100 */
1246
                                        /* bytes were copied earlier.    */
1247
 
1248
        bl      .copy_and_flush         /* copy the first n bytes        */
1249
                                        /* this includes the code being  */
1250
                                        /* executed here.                */
1251
 
1252
        LOAD_REG_IMMEDIATE(r0, 4f)      /* Jump to the copy of this code */
1253
        mtctr   r0                      /* that we just made/relocated   */
1254
        bctr
1255
 
1256
4:      LOAD_REG_IMMEDIATE(r5,klimit)
1257
        add     r5,r5,r26
1258
        ld      r5,0(r5)                /* get the value of klimit */
1259
        sub     r5,r5,r27
1260
        bl      .copy_and_flush         /* copy the rest */
1261
        b       .start_here_multiplatform
1262
 
1263
/*
1264
 * Copy routine used to copy the kernel to start at physical address 0
1265
 * and flush and invalidate the caches as needed.
1266
 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1267
 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1268
 *
1269
 * Note: this routine *only* clobbers r0, r6 and lr
1270
 */
1271
_GLOBAL(copy_and_flush)
1272
        addi    r5,r5,-8
1273
        addi    r6,r6,-8
1274
4:      li      r0,8                    /* Use the smallest common      */
1275
                                        /* denominator cache line       */
1276
                                        /* size.  This results in       */
1277
                                        /* extra cache line flushes     */
1278
                                        /* but operation is correct.    */
1279
                                        /* Can't get cache line size    */
1280
                                        /* from NACA as it is being     */
1281
                                        /* moved too.                   */
1282
 
1283
        mtctr   r0                      /* put # words/line in ctr      */
1284
3:      addi    r6,r6,8                 /* copy a cache line            */
1285
        ldx     r0,r6,r4
1286
        stdx    r0,r6,r3
1287
        bdnz    3b
1288
        dcbst   r6,r3                   /* write it to memory           */
1289
        sync
1290
        icbi    r6,r3                   /* flush the icache line        */
1291
        cmpld   0,r6,r5
1292
        blt     4b
1293
        sync
1294
        addi    r5,r5,8
1295
        addi    r6,r6,8
1296
        blr
1297
 
1298
.align 8
1299
copy_to_here:
1300
 
1301
#ifdef CONFIG_SMP
1302
#ifdef CONFIG_PPC_PMAC
1303
/*
1304
 * On PowerMac, secondary processors starts from the reset vector, which
1305
 * is temporarily turned into a call to one of the functions below.
1306
 */
1307
        .section ".text";
1308
        .align 2 ;
1309
 
1310
        .globl  __secondary_start_pmac_0
1311
__secondary_start_pmac_0:
1312
        /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
1313
        li      r24,0
1314
        b       1f
1315
        li      r24,1
1316
        b       1f
1317
        li      r24,2
1318
        b       1f
1319
        li      r24,3
1320
1:
1321
 
1322
_GLOBAL(pmac_secondary_start)
1323
        /* turn on 64-bit mode */
1324
        bl      .enable_64b_mode
1325
 
1326
        /* Copy some CPU settings from CPU 0 */
1327
        bl      .__restore_cpu_ppc970
1328
 
1329
        /* pSeries do that early though I don't think we really need it */
1330
        mfmsr   r3
1331
        ori     r3,r3,MSR_RI
1332
        mtmsrd  r3                      /* RI on */
1333
 
1334
        /* Set up a paca value for this processor. */
1335
        LOAD_REG_IMMEDIATE(r4, paca)    /* Get base vaddr of paca array */
1336
        mulli   r13,r24,PACA_SIZE        /* Calculate vaddr of right paca */
1337
        add     r13,r13,r4              /* for this processor.          */
1338
        mtspr   SPRN_SPRG3,r13           /* Save vaddr of paca in SPRG3 */
1339
 
1340
        /* Create a temp kernel stack for use before relocation is on.  */
1341
        ld      r1,PACAEMERGSP(r13)
1342
        subi    r1,r1,STACK_FRAME_OVERHEAD
1343
 
1344
        b       __secondary_start
1345
 
1346
#endif /* CONFIG_PPC_PMAC */
1347
 
1348
/*
1349
 * This function is called after the master CPU has released the
1350
 * secondary processors.  The execution environment is relocation off.
1351
 * The paca for this processor has the following fields initialized at
1352
 * this point:
1353
 *   1. Processor number
1354
 *   2. Segment table pointer (virtual address)
1355
 * On entry the following are set:
1356
 *   r1 = stack pointer.  vaddr for iSeries, raddr (temp stack) for pSeries
1357
 *   r24   = cpu# (in Linux terms)
1358
 *   r13   = paca virtual address
1359
 *   SPRG3 = paca virtual address
1360
 */
1361
        .globl  __secondary_start
1362
__secondary_start:
1363
        /* Set thread priority to MEDIUM */
1364
        HMT_MEDIUM
1365
 
1366
        /* Load TOC */
1367
        ld      r2,PACATOC(r13)
1368
 
1369
        /* Do early setup for that CPU (stab, slb, hash table pointer) */
1370
        bl      .early_setup_secondary
1371
 
1372
        /* Initialize the kernel stack.  Just a repeat for iSeries.      */
1373
        LOAD_REG_ADDR(r3, current_set)
1374
        sldi    r28,r24,3               /* get current_set[cpu#]         */
1375
        ldx     r1,r3,r28
1376
        addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
1377
        std     r1,PACAKSAVE(r13)
1378
 
1379
        /* Clear backchain so we get nice backtraces */
1380
        li      r7,0
1381
        mtlr    r7
1382
 
1383
        /* enable MMU and jump to start_secondary */
1384
        LOAD_REG_ADDR(r3, .start_secondary_prolog)
1385
        LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1386
#ifdef CONFIG_PPC_ISERIES
1387
BEGIN_FW_FTR_SECTION
1388
        ori     r4,r4,MSR_EE
1389
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
1390
#endif
1391
BEGIN_FW_FTR_SECTION
1392
        stb     r7,PACASOFTIRQEN(r13)
1393
        stb     r7,PACAHARDIRQEN(r13)
1394
END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
1395
 
1396
        mtspr   SPRN_SRR0,r3
1397
        mtspr   SPRN_SRR1,r4
1398
        rfid
1399
        b       .       /* prevent speculative execution */
1400
 
1401
/*
1402
 * Running with relocation on at this point.  All we want to do is
1403
 * zero the stack back-chain pointer before going into C code.
1404
 */
1405
_GLOBAL(start_secondary_prolog)
1406
        li      r3,0
1407
        std     r3,0(r1)                /* Zero the stack frame pointer */
1408
        bl      .start_secondary
1409
        b       .
1410
#endif
1411
 
1412
/*
1413
 * This subroutine clobbers r11 and r12
1414
 */
1415
_GLOBAL(enable_64b_mode)
1416
        mfmsr   r11                     /* grab the current MSR */
1417
        li      r12,1
1418
        rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1419
        or      r11,r11,r12
1420
        li      r12,1
1421
        rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1422
        or      r11,r11,r12
1423
        mtmsrd  r11
1424
        isync
1425
        blr
1426
 
1427
/*
1428
 * This is where the main kernel code starts.
1429
 */
1430
_INIT_STATIC(start_here_multiplatform)
1431
        /* get a new offset, now that the kernel has moved. */
1432
        bl      .reloc_offset
1433
        mr      r26,r3
1434
 
1435
        /* Clear out the BSS. It may have been done in prom_init,
1436
         * already but that's irrelevant since prom_init will soon
1437
         * be detached from the kernel completely. Besides, we need
1438
         * to clear it now for kexec-style entry.
1439
         */
1440
        LOAD_REG_IMMEDIATE(r11,__bss_stop)
1441
        LOAD_REG_IMMEDIATE(r8,__bss_start)
1442
        sub     r11,r11,r8              /* bss size                     */
1443
        addi    r11,r11,7               /* round up to an even double word */
1444
        rldicl. r11,r11,61,3            /* shift right by 3             */
1445
        beq     4f
1446
        addi    r8,r8,-8
1447
        li      r0,0
1448
        mtctr   r11                     /* zero this many doublewords   */
1449
3:      stdu    r0,8(r8)
1450
        bdnz    3b
1451
4:
1452
 
1453
        mfmsr   r6
1454
        ori     r6,r6,MSR_RI
1455
        mtmsrd  r6                      /* RI on */
1456
 
1457
        /* The following gets the stack and TOC set up with the regs */
1458
        /* pointing to the real addr of the kernel stack.  This is   */
1459
        /* all done to support the C function call below which sets  */
1460
        /* up the htab.  This is done because we have relocated the  */
1461
        /* kernel but are still running in real mode. */
1462
 
1463
        LOAD_REG_IMMEDIATE(r3,init_thread_union)
1464
        add     r3,r3,r26
1465
 
1466
        /* set up a stack pointer (physical address) */
1467
        addi    r1,r3,THREAD_SIZE
1468
        li      r0,0
1469
        stdu    r0,-STACK_FRAME_OVERHEAD(r1)
1470
 
1471
        /* set up the TOC (physical address) */
1472
        LOAD_REG_IMMEDIATE(r2,__toc_start)
1473
        addi    r2,r2,0x4000
1474
        addi    r2,r2,0x4000
1475
        add     r2,r2,r26
1476
 
1477
        /* Do very early kernel initializations, including initial hash table,
1478
         * stab and slb setup before we turn on relocation.     */
1479
 
1480
        /* Restore parameters passed from prom_init/kexec */
1481
        mr      r3,r31
1482
        bl      .early_setup
1483
 
1484
        LOAD_REG_IMMEDIATE(r3, .start_here_common)
1485
        LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1486
        mtspr   SPRN_SRR0,r3
1487
        mtspr   SPRN_SRR1,r4
1488
        rfid
1489
        b       .       /* prevent speculative execution */
1490
 
1491
        /* This is where all platforms converge execution */
1492
_INIT_GLOBAL(start_here_common)
1493
        /* relocation is on at this point */
1494
 
1495
        /* The following code sets up the SP and TOC now that we are */
1496
        /* running with translation enabled. */
1497
 
1498
        LOAD_REG_IMMEDIATE(r3,init_thread_union)
1499
 
1500
        /* set up the stack */
1501
        addi    r1,r3,THREAD_SIZE
1502
        li      r0,0
1503
        stdu    r0,-STACK_FRAME_OVERHEAD(r1)
1504
 
1505
        /* ptr to current */
1506
        LOAD_REG_IMMEDIATE(r4, init_task)
1507
        std     r4,PACACURRENT(r13)
1508
 
1509
        /* Load the TOC */
1510
        ld      r2,PACATOC(r13)
1511
        std     r1,PACAKSAVE(r13)
1512
 
1513
        bl      .setup_system
1514
 
1515
        /* Load up the kernel context */
1516
5:
1517
        li      r5,0
1518
        stb     r5,PACASOFTIRQEN(r13)   /* Soft Disabled */
1519
#ifdef CONFIG_PPC_ISERIES
1520
BEGIN_FW_FTR_SECTION
1521
        mfmsr   r5
1522
        ori     r5,r5,MSR_EE            /* Hard Enabled */
1523
        mtmsrd  r5
1524
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
1525
#endif
1526
BEGIN_FW_FTR_SECTION
1527
        stb     r5,PACAHARDIRQEN(r13)
1528
END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
1529
 
1530
        bl .start_kernel
1531
 
1532
        /* Not reached */
1533
        BUG_OPCODE
1534
 
1535
/*
1536
 * We put a few things here that have to be page-aligned.
1537
 * This stuff goes at the beginning of the bss, which is page-aligned.
1538
 */
1539
        .section ".bss"
1540
 
1541
        .align  PAGE_SHIFT
1542
 
1543
        .globl  empty_zero_page
1544
empty_zero_page:
1545
        .space  PAGE_SIZE
1546
 
1547
        .globl  swapper_pg_dir
1548
swapper_pg_dir:
1549
        .space  PGD_TABLE_SIZE

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.