OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [sparc64/] [kernel/] [head.S] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* $Id: head.S,v 1.1.1.1 2004-04-15 01:34:33 phoenix Exp $
2
 * head.S: Initial boot code for the Sparc64 port of Linux.
3
 *
4
 * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5
 * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
6
 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7
 * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
8
 */
9
 
10
#include 
11
#include 
12
#include 
13
#include 
14
#include 
15
#include 
16
#include 
17
#include 
18
#include 
19
#include 
20
#include 
21
#include 
22
#include 
23
#include 
24
#include 
25
#include 
26
#include 
27
#include 
28
 
29
/* This section from from _start to sparc64_boot_end should fit into
30
 * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space
31
 * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to
32
 * 0x0000.0000.0040.6000 and empty_bad_page, which is from
33
 * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000.
34
 */
35
 
36
        .text
37
        .globl  start, _start, stext, _stext
38
_start:
39
start:
40
_stext:
41
stext:
42
bootup_user_stack:
43
! 0x0000000000404000
44
        b       sparc64_boot
45
         flushw                                 /* Flush register file.      */
46
 
47
/* This stuff has to be in sync with SILO and other potential boot loaders
48
 * Fields should be kept upward compatible and whenever any change is made,
49
 * HdrS version should be incremented.
50
 */
51
        .global root_flags, ram_flags, root_dev
52
        .global sparc_ramdisk_image, sparc_ramdisk_size
53
        .globl  silo_args
54
 
55
        .ascii  "HdrS"
56
        .word   LINUX_VERSION_CODE
57
 
58
        /* History:
59
         *
60
         * 0x0300 : Supports being located at other than 0x4000
61
         * 0x0202 : Supports kernel params string
62
         * 0x0201 : Supports reboot_command
63
         */
64
        .half   0x0300          /* HdrS version */
65
 
66
root_flags:
67
        .half   1
68
root_dev:
69
        .half   0
70
ram_flags:
71
        .half   0
72
sparc_ramdisk_image:
73
        .word   0
74
sparc_ramdisk_size:
75
        .word   0
76
        .xword  reboot_command
77
        .xword  bootstr_info
78
        .word   _end
79
 
80
        /* We must be careful, 32-bit OpenBOOT will get confused if it
81
         * tries to save away a register window to a 64-bit kernel
82
         * stack address.  Flush all windows, disable interrupts,
83
         * remap if necessary, jump onto kernel trap table, then kernel
84
         * stack, or else we die.
85
         *
86
         * PROM entry point is on %o4
87
         */
88
sparc64_boot:
89
        BRANCH_IF_CHEETAH_BASE(g1,g5,cheetah_boot)
90
        BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g5,cheetah_plus_boot)
91
        ba,pt   %xcc, spitfire_boot
92
         nop
93
 
94
cheetah_plus_boot:
95
        /* Preserve OBP choosen DCU and DCR register settings.  */
96
        ba,pt   %xcc, cheetah_generic_boot
97
         nop
98
 
99
cheetah_boot:
100
        mov     DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
101
        wr      %g1, %asr18
102
 
103
        sethi   %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
104
        or      %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
105
        sllx    %g5, 32, %g5
106
        or      %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
107
        stxa    %g5, [%g0] ASI_DCU_CONTROL_REG
108
        membar  #Sync
109
 
110
cheetah_generic_boot:
111
        mov     TSB_EXTENSION_P, %g3
112
        stxa    %g0, [%g3] ASI_DMMU
113
        stxa    %g0, [%g3] ASI_IMMU
114
        membar  #Sync
115
 
116
        mov     TSB_EXTENSION_S, %g3
117
        stxa    %g0, [%g3] ASI_DMMU
118
        membar  #Sync
119
 
120
        mov     TSB_EXTENSION_N, %g3
121
        stxa    %g0, [%g3] ASI_DMMU
122
        stxa    %g0, [%g3] ASI_IMMU
123
        membar  #Sync
124
 
125
        wrpr    %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
126
        wr      %g0, 0, %fprs
127
 
128
        /* Just like for Spitfire, we probe itlb-2 for a mapping which
129
         * matches our current %pc.  We take the physical address in
130
         * that mapping and use it to make our own.
131
         */
132
 
133
        /* %g5 holds the tlb data */
134
        sethi   %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
135
        sllx    %g5, 32, %g5
136
        or      %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
137
 
138
        /* Put PADDR tlb data mask into %g3. */
139
        sethi   %uhi(_PAGE_PADDR), %g3
140
        or      %g3, %ulo(_PAGE_PADDR), %g3
141
        sllx    %g3, 32, %g3
142
        sethi   %hi(_PAGE_PADDR), %g7
143
        or      %g7, %lo(_PAGE_PADDR), %g7
144
        or      %g3, %g7, %g3
145
 
146
        set     2 << 16, %l0            /* TLB entry walker. */
147
        set     0x1fff, %l2             /* Page mask. */
148
        rd      %pc, %l3
149
        andn    %l3, %l2, %g2           /* vaddr comparator */
150
 
151
1:      ldxa    [%l0] ASI_ITLB_TAG_READ, %g1
152
        membar  #Sync
153
        andn    %g1, %l2, %g1
154
        cmp     %g1, %g2
155
        be,pn   %xcc, cheetah_got_tlbentry
156
         nop
157
        and     %l0, (127 << 3), %g1
158
        cmp     %g1, (127 << 3)
159
        blu,pt  %xcc, 1b
160
         add    %l0, (1 << 3), %l0
161
 
162
        /* Search the small TLB.  OBP never maps us like that but
163
         * newer SILO can.
164
         */
165
        clr     %l0
166
 
167
1:      ldxa    [%l0] ASI_ITLB_TAG_READ, %g1
168
        membar  #Sync
169
        andn    %g1, %l2, %g1
170
        cmp     %g1, %g2
171
        be,pn   %xcc, cheetah_got_tlbentry
172
         nop
173
        cmp     %l0, (15 << 3)
174
        blu,pt  %xcc, 1b
175
         add    %l0, (1 << 3), %l0
176
 
177
        /* BUG() if we get here... */
178
        ta      0x5
179
 
180
cheetah_got_tlbentry:
181
        ldxa    [%l0] ASI_ITLB_DATA_ACCESS, %g0
182
        ldxa    [%l0] ASI_ITLB_DATA_ACCESS, %g1
183
        membar  #Sync
184
        and     %g1, %g3, %g1
185
        set     0x5fff, %l0
186
        andn    %g1, %l0, %g1
187
        or      %g5, %g1, %g5
188
 
189
        /* Clear out any KERNBASE area entries. */
190
        set     2 << 16, %l0
191
        sethi   %hi(KERNBASE), %g3
192
        sethi   %hi(KERNBASE<<1), %g7
193
        mov     TLB_TAG_ACCESS, %l7
194
 
195
        /* First, check ITLB */
196
1:      ldxa    [%l0] ASI_ITLB_TAG_READ, %g1
197
        membar  #Sync
198
        andn    %g1, %l2, %g1
199
        cmp     %g1, %g3
200
        blu,pn  %xcc, 2f
201
         cmp    %g1, %g7
202
        bgeu,pn %xcc, 2f
203
         nop
204
        stxa    %g0, [%l7] ASI_IMMU
205
        membar  #Sync
206
        stxa    %g0, [%l0] ASI_ITLB_DATA_ACCESS
207
        membar  #Sync
208
 
209
2:      and     %l0, (127 << 3), %g1
210
        cmp     %g1, (127 << 3)
211
        blu,pt  %xcc, 1b
212
         add    %l0, (1 << 3), %l0
213
 
214
        /* Next, check DTLB */
215
        set     2 << 16, %l0
216
1:      ldxa    [%l0] ASI_DTLB_TAG_READ, %g1
217
        membar  #Sync
218
        andn    %g1, %l2, %g1
219
        cmp     %g1, %g3
220
        blu,pn  %xcc, 2f
221
         cmp    %g1, %g7
222
        bgeu,pn %xcc, 2f
223
         nop
224
        stxa    %g0, [%l7] ASI_DMMU
225
        membar  #Sync
226
        stxa    %g0, [%l0] ASI_DTLB_DATA_ACCESS
227
        membar  #Sync
228
 
229
2:      and     %l0, (511 << 3), %g1
230
        cmp     %g1, (511 << 3)
231
        blu,pt  %xcc, 1b
232
         add    %l0, (1 << 3), %l0
233
 
234
        /* On Cheetah+, have to check second DTLB.  */
235
        BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f)
236
        ba,pt   %xcc, 9f
237
         nop
238
 
239
2:      set     3 << 16, %l0
240
1:      ldxa    [%l0] ASI_DTLB_TAG_READ, %g1
241
        membar  #Sync
242
        andn    %g1, %l2, %g1
243
        cmp     %g1, %g3
244
        blu,pn  %xcc, 2f
245
         cmp    %g1, %g7
246
        bgeu,pn %xcc, 2f
247
         nop
248
        stxa    %g0, [%l7] ASI_DMMU
249
        membar  #Sync
250
        stxa    %g0, [%l0] ASI_DTLB_DATA_ACCESS
251
        membar  #Sync
252
 
253
2:      and     %l0, (511 << 3), %g1
254
        cmp     %g1, (511 << 3)
255
        blu,pt  %xcc, 1b
256
         add    %l0, (1 << 3), %l0
257
 
258
9:
259
 
260
        /* Now lock the TTE we created into ITLB-0 and DTLB-0,
261
         * entry 15 (and maybe 14 too).
262
         */
263
        sethi   %hi(KERNBASE), %g3
264
        set     (0 << 16) | (15 << 3), %g7
265
        stxa    %g3, [%l7] ASI_DMMU
266
        membar  #Sync
267
        stxa    %g5, [%g7] ASI_DTLB_DATA_ACCESS
268
        membar  #Sync
269
        stxa    %g3, [%l7] ASI_IMMU
270
        membar  #Sync
271
        stxa    %g5, [%g7] ASI_ITLB_DATA_ACCESS
272
        membar  #Sync
273
        flush   %g3
274
        membar  #Sync
275
        sethi   %hi(_end), %g3                  /* Check for bigkernel case */
276
        or      %g3, %lo(_end), %g3
277
        srl     %g3, 23, %g3                    /* Check if _end > 8M */
278
        brz,pt  %g3, 1f
279
         sethi  %hi(KERNBASE), %g3              /* Restore for fixup code below */
280
        sethi   %hi(0x400000), %g3
281
        or      %g3, %lo(0x400000), %g3
282
        add     %g5, %g3, %g5                   /* New tte data */
283
        andn    %g5, (_PAGE_G), %g5
284
        sethi   %hi(KERNBASE+0x400000), %g3
285
        or      %g3, %lo(KERNBASE+0x400000), %g3
286
        set     (0 << 16) | (14 << 3), %g7
287
        stxa    %g3, [%l7] ASI_DMMU
288
        membar  #Sync
289
        stxa    %g5, [%g7] ASI_DTLB_DATA_ACCESS
290
        membar  #Sync
291
        stxa    %g3, [%l7] ASI_IMMU
292
        membar  #Sync
293
        stxa    %g5, [%g7] ASI_ITLB_DATA_ACCESS
294
        membar  #Sync
295
        flush   %g3
296
        membar  #Sync
297
        sethi   %hi(KERNBASE), %g3              /* Restore for fixup code below */
298
        ba,pt   %xcc, 1f
299
         nop
300
 
301
1:      set     sun4u_init, %g2
302
        jmpl    %g2 + %g0, %g0
303
         nop
304
 
305
spitfire_boot:
306
        /* Typically PROM has already enabled both MMU's and both on-chip
307
         * caches, but we do it here anyway just to be paranoid.
308
         */
309
        mov     (LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1
310
        stxa    %g1, [%g0] ASI_LSU_CONTROL
311
        membar  #Sync
312
 
313
        /*
314
         * Make sure we are in privileged mode, have address masking,
315
         * using the ordinary globals and have enabled floating
316
         * point.
317
         *
318
         * Again, typically PROM has left %pil at 13 or similar, and
319
         * (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate.
320
         */
321
        wrpr    %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
322
        wr      %g0, 0, %fprs
323
 
324
spitfire_create_mappings:
325
        /* %g5 holds the tlb data */
326
        sethi   %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
327
        sllx    %g5, 32, %g5
328
        or      %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
329
 
330
        /* Base of physical memory cannot reliably be assumed to be
331
         * at 0x0!  Figure out where it happens to be. -DaveM
332
         */
333
 
334
        /* Put PADDR tlb data mask into %g3. */
335
        sethi   %uhi(_PAGE_PADDR_SF), %g3
336
        or      %g3, %ulo(_PAGE_PADDR_SF), %g3
337
        sllx    %g3, 32, %g3
338
        sethi   %hi(_PAGE_PADDR_SF), %g7
339
        or      %g7, %lo(_PAGE_PADDR_SF), %g7
340
        or      %g3, %g7, %g3
341
 
342
        /* Walk through entire ITLB, looking for entry which maps
343
         * our %pc currently, stick PADDR from there into %g5 tlb data.
344
         */
345
        clr     %l0                     /* TLB entry walker. */
346
        set     0x1fff, %l2             /* Page mask. */
347
        rd      %pc, %l3
348
        andn    %l3, %l2, %g2           /* vaddr comparator */
349
1:
350
        /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
351
        ldxa    [%l0] ASI_ITLB_TAG_READ, %g1
352
        nop
353
        nop
354
        nop
355
        andn    %g1, %l2, %g1           /* Get vaddr */
356
        cmp     %g1, %g2
357
        be,a,pn %xcc, spitfire_got_tlbentry
358
         ldxa   [%l0] ASI_ITLB_DATA_ACCESS, %g1
359
        cmp     %l0, (63 << 3)
360
        blu,pt  %xcc, 1b
361
         add    %l0, (1 << 3), %l0
362
 
363
        /* BUG() if we get here... */
364
        ta      0x5
365
 
366
spitfire_got_tlbentry:
367
        /* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
368
        nop
369
        nop
370
        nop
371
        and     %g1, %g3, %g1           /* Mask to just get paddr bits.       */
372
        set     0x5fff, %l3             /* Mask offset to get phys base.      */
373
        andn    %g1, %l3, %g1
374
 
375
        /* NOTE: We hold on to %g1 paddr base as we need it below to lock
376
         * NOTE: the PROM cif code into the TLB.
377
         */
378
 
379
        or      %g5, %g1, %g5           /* Or it into TAG being built.        */
380
 
381
        clr     %l0                     /* TLB entry walker. */
382
        sethi   %hi(KERNBASE), %g3      /* 4M lower limit */
383
        sethi   %hi(KERNBASE<<1), %g7   /* 8M upper limit */
384
        mov     TLB_TAG_ACCESS, %l7
385
1:
386
        /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
387
        ldxa    [%l0] ASI_ITLB_TAG_READ, %g1
388
        nop
389
        nop
390
        nop
391
        andn    %g1, %l2, %g1           /* Get vaddr */
392
        cmp     %g1, %g3
393
        blu,pn  %xcc, 2f
394
         cmp    %g1, %g7
395
        bgeu,pn %xcc, 2f
396
         nop
397
        stxa    %g0, [%l7] ASI_IMMU
398
        stxa    %g0, [%l0] ASI_ITLB_DATA_ACCESS
399
        membar  #Sync
400
2:
401
        cmp     %l0, (63 << 3)
402
        blu,pt  %xcc, 1b
403
         add    %l0, (1 << 3), %l0
404
 
405
        nop; nop; nop
406
 
407
        clr     %l0                     /* TLB entry walker. */
408
1:
409
        /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
410
        ldxa    [%l0] ASI_DTLB_TAG_READ, %g1
411
        nop
412
        nop
413
        nop
414
        andn    %g1, %l2, %g1           /* Get vaddr */
415
        cmp     %g1, %g3
416
        blu,pn  %xcc, 2f
417
         cmp    %g1, %g7
418
        bgeu,pn %xcc, 2f
419
         nop
420
        stxa    %g0, [%l7] ASI_DMMU
421
        stxa    %g0, [%l0] ASI_DTLB_DATA_ACCESS
422
        membar  #Sync
423
2:
424
        cmp     %l0, (63 << 3)
425
        blu,pt  %xcc, 1b
426
         add    %l0, (1 << 3), %l0
427
 
428
        nop; nop; nop
429
 
430
 
431
        /* PROM never puts any TLB entries into the MMU with the lock bit
432
         * set.  So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too.
433
         */
434
 
435
        sethi   %hi(KERNBASE), %g3
436
        mov     (63 << 3), %g7
437
        stxa    %g3, [%l7] ASI_DMMU             /* KERNBASE into TLB TAG        */
438
        stxa    %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA            */
439
        membar  #Sync
440
        stxa    %g3, [%l7] ASI_IMMU             /* KERNBASE into TLB TAG        */
441
        stxa    %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA            */
442
        membar  #Sync
443
        flush   %g3
444
        membar  #Sync
445
        sethi   %hi(_end), %g3                  /* Check for bigkernel case */
446
        or      %g3, %lo(_end), %g3
447
        srl     %g3, 23, %g3                    /* Check if _end > 8M */
448
        brz,pt  %g3, 2f
449
         sethi  %hi(KERNBASE), %g3              /* Restore for fixup code below */
450
        sethi   %hi(0x400000), %g3
451
        or      %g3, %lo(0x400000), %g3
452
        add     %g5, %g3, %g5                   /* New tte data */
453
        andn    %g5, (_PAGE_G), %g5
454
        sethi   %hi(KERNBASE+0x400000), %g3
455
        or      %g3, %lo(KERNBASE+0x400000), %g3
456
        mov     (62 << 3), %g7
457
        stxa    %g3, [%l7] ASI_DMMU
458
        stxa    %g5, [%g7] ASI_DTLB_DATA_ACCESS
459
        membar  #Sync
460
        stxa    %g3, [%l7] ASI_IMMU
461
        stxa    %g5, [%g7] ASI_ITLB_DATA_ACCESS
462
        membar  #Sync
463
        flush   %g3
464
        membar  #Sync
465
        sethi   %hi(KERNBASE), %g3              /* Restore for fixup code below */
466
2:      ba,pt   %xcc, 1f
467
         nop
468
1:
469
        set     sun4u_init, %g2
470
        jmpl    %g2 + %g0, %g0
471
         nop
472
 
473
sun4u_init:
474
        /* Set ctx 0 */
475
        mov     PRIMARY_CONTEXT, %g7
476
        stxa    %g0, [%g7] ASI_DMMU
477
        membar  #Sync
478
 
479
        mov     SECONDARY_CONTEXT, %g7
480
        stxa    %g0, [%g7] ASI_DMMU
481
        membar  #Sync
482
 
483
        sethi   %uhi(PAGE_OFFSET), %g4
484
        sllx    %g4, 32, %g4
485
 
486
        /* We are now safely (we hope) in Nucleus context (0), rewrite
487
         * the KERNBASE TTE's so they no longer have the global bit set.
488
         * Don't forget to setup TAG_ACCESS first 8-)
489
         */
490
        mov     TLB_TAG_ACCESS, %g2
491
        stxa    %g3, [%g2] ASI_IMMU
492
        stxa    %g3, [%g2] ASI_DMMU
493
        membar  #Sync
494
 
495
        BRANCH_IF_ANY_CHEETAH(g1,g5,cheetah_tlb_fixup)
496
 
497
        ba,pt   %xcc, spitfire_tlb_fixup
498
         nop
499
 
500
cheetah_tlb_fixup:
501
        set     (0 << 16) | (15 << 3), %g7
502
        ldxa    [%g7] ASI_ITLB_DATA_ACCESS, %g0
503
        ldxa    [%g7] ASI_ITLB_DATA_ACCESS, %g1
504
        andn    %g1, (_PAGE_G), %g1
505
        stxa    %g1, [%g7] ASI_ITLB_DATA_ACCESS
506
        membar  #Sync
507
 
508
        ldxa    [%g7] ASI_DTLB_DATA_ACCESS, %g0
509
        ldxa    [%g7] ASI_DTLB_DATA_ACCESS, %g1
510
        andn    %g1, (_PAGE_G), %g1
511
        stxa    %g1, [%g7] ASI_DTLB_DATA_ACCESS
512
        membar  #Sync
513
 
514
        /* Kill instruction prefetch queues. */
515
        flush   %g3
516
        membar  #Sync
517
 
518
        mov     2, %g2          /* Set TLB type to cheetah+. */
519
        BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g5,g7,1f)
520
 
521
        mov     1, %g2          /* Set TLB type to cheetah. */
522
 
523
1:      sethi   %hi(tlb_type), %g5
524
        stw     %g2, [%g5 + %lo(tlb_type)]
525
 
526
        /* Patch copy/page operations to cheetah optimized versions. */
527
        call    cheetah_patch_copyops
528
         nop
529
        call    cheetah_patch_pgcopyops
530
         nop
531
        call    cheetah_patch_cachetlbops
532
         nop
533
 
534
        ba,pt   %xcc, tlb_fixup_done
535
         nop
536
 
537
spitfire_tlb_fixup:
538
        mov     (63 << 3), %g7
539
        ldxa    [%g7] ASI_ITLB_DATA_ACCESS, %g1
540
        andn    %g1, (_PAGE_G), %g1
541
        stxa    %g1, [%g7] ASI_ITLB_DATA_ACCESS
542
        membar  #Sync
543
 
544
        ldxa    [%g7] ASI_DTLB_DATA_ACCESS, %g1
545
        andn    %g1, (_PAGE_G), %g1
546
        stxa    %g1, [%g7] ASI_DTLB_DATA_ACCESS
547
        membar  #Sync
548
 
549
        /* Kill instruction prefetch queues. */
550
        flush   %g3
551
        membar  #Sync
552
 
553
        /* Set TLB type to spitfire. */
554
        mov     0, %g2
555
        sethi   %hi(tlb_type), %g5
556
        stw     %g2, [%g5 + %lo(tlb_type)]
557
 
558
tlb_fixup_done:
559
        sethi   %hi(init_task_union), %g6
560
        or      %g6, %lo(init_task_union), %g6
561
        mov     %sp, %l6
562
        mov     %o4, %l7
563
 
564
#if 0   /* We don't do it like this anymore, but for historical hack value
565
         * I leave this snippet here to show how crazy we can be sometimes. 8-)
566
         */
567
 
568
        /* Setup "Linux Current Register", thanks Sun 8-) */
569
        wr      %g0, 0x1, %pcr
570
 
571
        /* Blackbird errata workaround.  See commentary in
572
         * smp.c:smp_percpu_timer_interrupt() for more
573
         * information.
574
         */
575
        ba,pt   %xcc, 99f
576
         nop
577
        .align  64
578
99:     wr      %g6, %g0, %pic
579
        rd      %pic, %g0
580
#endif
581
 
582
        wr      %g0, ASI_P, %asi
583
        mov     1, %g5
584
        sllx    %g5, THREAD_SHIFT, %g5
585
        sub     %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
586
        add     %g6, %g5, %sp
587
        mov     0, %fp
588
 
589
        wrpr    %g0, 0, %wstate
590
        wrpr    %g0, 0x0, %tl
591
 
592
        /* Clear the bss */
593
        sethi   %hi(__bss_start), %o0
594
        or      %o0, %lo(__bss_start), %o0
595
        sethi   %hi(_end), %o1
596
        or      %o1, %lo(_end), %o1
597
        call    __bzero
598
         sub    %o1, %o0, %o1
599
 
600
        mov     %l6, %o1                        ! OpenPROM stack
601
        call    prom_init
602
         mov    %l7, %o0                        ! OpenPROM cif handler
603
 
604
        /* Off we go.... */
605
        call    start_kernel
606
         nop
607
        /* Not reached... */
608
 
609
/* IMPORTANT NOTE: Whenever making changes here, check
610
 * trampoline.S as well. -jj */
611
        .globl  setup_tba
612
setup_tba:      /* i0 = is_starfire */
613
        save    %sp, -160, %sp
614
 
615
        rdpr    %tba, %g7
616
        sethi   %hi(prom_tba), %o1
617
        or      %o1, %lo(prom_tba), %o1
618
        stx     %g7, [%o1]
619
 
620
        /* Setup "Linux" globals 8-) */
621
        rdpr    %pstate, %o1
622
        mov     %g6, %o2
623
        wrpr    %o1, (PSTATE_AG|PSTATE_IE), %pstate
624
        sethi   %hi(sparc64_ttable_tl0), %g5
625
        wrpr    %g5, %tba
626
        mov     %o2, %g6
627
 
628
        /* Set up MMU globals */
629
        wrpr    %o1, (PSTATE_MG|PSTATE_IE), %pstate
630
 
631
        /* Set fixed globals used by dTLB miss handler. */
632
#define KERN_HIGHBITS           ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
633
#define KERN_LOWBITS            (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
634
 
635
        mov     TSB_REG, %g1
636
        stxa    %g0, [%g1] ASI_DMMU
637
        membar  #Sync
638
        mov     TLB_SFSR, %g1
639
        sethi   %uhi(KERN_HIGHBITS), %g2
640
        or      %g2, %ulo(KERN_HIGHBITS), %g2
641
        sllx    %g2, 32, %g2
642
        or      %g2, KERN_LOWBITS, %g2
643
 
644
        BRANCH_IF_ANY_CHEETAH(g3,g7,cheetah_vpte_base)
645
        ba,pt   %xcc, spitfire_vpte_base
646
         nop
647
 
648
cheetah_vpte_base:
649
        sethi           %uhi(VPTE_BASE_CHEETAH), %g3
650
        or              %g3, %ulo(VPTE_BASE_CHEETAH), %g3
651
        ba,pt           %xcc, 2f
652
         sllx           %g3, 32, %g3
653
 
654
spitfire_vpte_base:
655
        sethi           %uhi(VPTE_BASE_SPITFIRE), %g3
656
        or              %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
657
        sllx            %g3, 32, %g3
658
 
659
2:
660
        clr     %g7
661
#undef KERN_HIGHBITS
662
#undef KERN_LOWBITS
663
 
664
        /* Setup Interrupt globals */
665
        wrpr    %o1, (PSTATE_IG|PSTATE_IE), %pstate
666
#ifndef CONFIG_SMP
667
        sethi   %hi(__up_workvec), %g5
668
        or      %g5, %lo(__up_workvec), %g6
669
#else
670
        /* By definition of where we are, this is boot_cpu. */
671
        brz,pt  %i0, not_starfire
672
         sethi  %hi(0x1fff4000), %g1
673
        or      %g1, %lo(0x1fff4000), %g1
674
        sllx    %g1, 12, %g1
675
        or      %g1, 0xd0, %g1
676
        lduwa   [%g1] ASI_PHYS_BYPASS_EC_E, %g1
677
        b,pt    %xcc, set_worklist
678
         nop
679
 
680
not_starfire:
681
        BRANCH_IF_JALAPENO(g1,g5,is_jalapeno)
682
        BRANCH_IF_ANY_CHEETAH(g1,g5,is_cheetah)
683
 
684
        ba,pt   %xcc, not_cheetah
685
         nop
686
 
687
is_jalapeno:
688
        ldxa            [%g0] ASI_JBUS_CONFIG, %g1
689
        srlx            %g1, 17, %g1
690
        ba,pt           %xcc, set_worklist
691
         and            %g1, 0x1f, %g1          ! 5bit JBUS ID
692
 
693
is_cheetah:
694
        ldxa            [%g0] ASI_SAFARI_CONFIG, %g1
695
        srlx            %g1, 17, %g1
696
        ba,pt           %xcc, set_worklist
697
         and            %g1, 0x3ff, %g1         ! 10bit Safari Agent ID
698
 
699
not_cheetah:
700
        ldxa    [%g0] ASI_UPA_CONFIG, %g1
701
        srlx    %g1, 17, %g1
702
        and     %g1, 0x1f, %g1
703
 
704
        /* In theory this is: &(cpu_data[boot_cpu_id].irq_worklists[0]) */
705
set_worklist:
706
        sethi   %hi(cpu_data), %g5
707
        or      %g5, %lo(cpu_data), %g5
708
        sllx    %g1, 7, %g1
709
        add     %g5, %g1, %g5
710
        add     %g5, 64, %g6
711
#endif
712
 
713
        /* Kill PROM timer */
714
        sethi   %hi(0x80000000), %g1
715
        sllx    %g1, 32, %g1
716
        wr      %g1, 0, %tick_cmpr
717
 
718
        BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
719
 
720
        ba,pt   %xcc, 2f
721
         nop
722
 
723
        /* Disable STICK_INT interrupts. */
724
1:
725
        sethi   %hi(0x80000000), %g1
726
        sllx    %g1, 32, %g1
727
        wr      %g1, %asr25
728
 
729
        /* Ok, we're done setting up all the state our trap mechanims needs,
730
         * now get back into normal globals and let the PROM know what is up.
731
         */
732
2:
733
        wrpr    %g0, %g0, %wstate
734
        wrpr    %o1, PSTATE_IE, %pstate
735
 
736
        sethi   %hi(sparc64_ttable_tl0), %g5
737
        call    prom_set_trap_table
738
         mov    %g5, %o0
739
 
740
        rdpr    %pstate, %o1
741
        or      %o1, PSTATE_IE, %o1
742
        wrpr    %o1, 0, %pstate
743
 
744
        ret
745
         restore
746
 
747
/*
748
 * The following skips make sure the trap table in ttable.S is aligned
749
 * on a 32K boundary as required by the v9 specs for TBA register.
750
 */
751
sparc64_boot_end:
752
        .skip   0x2000 + _start - sparc64_boot_end
753
bootup_user_stack_end:
754
        .skip   0x2000
755
 
756
#ifdef CONFIG_SBUS
757
/* This is just a hack to fool make depend config.h discovering
758
   strategy: As the .S files below need config.h, but
759
   make depend does not find it for them, we include config.h
760
   in head.S */
761
#endif
762
 
763
! 0x0000000000408000
764
 
765
#include "ttable.S"
766
#include "systbls.S"
767
 
768
        .align  1024
769
        .globl  swapper_pg_dir
770
swapper_pg_dir:
771
        .word   0
772
 
773
#include "etrap.S"
774
#include "rtrap.S"
775
#include "winfixup.S"
776
#include "entry.S"
777
 
778
        /* This is just anal retentiveness on my part... */
779
        .align  16384
780
 
781
        .data
782
        .align  8
783
        .globl  prom_tba, tlb_type
784
prom_tba:       .xword  0
785
tlb_type:       .word   0        /* Must NOT end up in BSS */
786
        .section        ".fixup",#alloc,#execinstr
787
        .globl  __ret_efault
788
__ret_efault:
789
        ret
790
         restore %g0, -EFAULT, %o0
791
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.