OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [arm/] [mm/] [proc-xscale.S] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 *  linux/arch/arm/mm/proc-xscale.S
3
 *
4
 *  Author:     Nicolas Pitre
5
 *  Created:    November 2000
6
 *  Copyright:  (C) 2000, 2001 MontaVista Software Inc.
7
 *
8
 * This program is free software; you can redistribute it and/or modify
9
 * it under the terms of the GNU General Public License version 2 as
10
 * published by the Free Software Foundation.
11
 *
12
 * MMU functions for the Intel XScale CPUs
13
 *
14
 * 2001 Aug 21:
15
 *      some contributions by Brett Gaines 
16
 *      Copyright 2001 by Intel Corp.
17
 *
18
 * 2001 Sep 08:
19
 *      Completely revisited, many important fixes
20
 *      Nicolas Pitre 
21
 */
22
 
23
#include 
24
#include 
25
#include 
26
#include 
27
#include 
28
#include 
29
#include 
30
#include 
31
#include "proc-macros.S"
32
 
33
/*
34
 * This is the maximum size of an area which will be flushed.  If the area
35
 * is larger than this, then we flush the whole cache
36
 */
37
#define MAX_AREA_SIZE   32768
38
 
39
/*
40
 * the cache line size of the I and D cache
41
 */
42
#define CACHELINESIZE   32
43
 
44
/*
45
 * the size of the data cache
46
 */
47
#define CACHESIZE       32768
48
 
49
/*
50
 * Virtual address used to allocate the cache when flushed
51
 *
52
 * This must be an address range which is _never_ used.  It should
53
 * apparently have a mapping in the corresponding page table for
54
 * compatibility with future CPUs that _could_ require it.  For instance we
55
 * don't care.
56
 *
57
 * This must be aligned on a 2*CACHESIZE boundary.  The code selects one of
58
 * the 2 areas in alternance each time the clean_d_cache macro is used.
59
 * Without this the XScale core exhibits cache eviction problems and no one
60
 * knows why.
61
 *
62
 * Reminder: the vector table is located at 0xffff0000-0xffff0fff.
63
 */
64
#define CLEAN_ADDR      0xfffe0000
65
 
66
/*
67
 * This macro is used to wait for a CP15 write and is needed
68
 * when we have to ensure that the last operation to the co-pro
69
 * was completed before continuing with operation.
70
 */
71
        .macro  cpwait, rd
72
        mrc     p15, 0, \rd, c2, c0, 0          @ arbitrary read of cp15
73
        mov     \rd, \rd                        @ wait for completion
74
        sub     pc, pc, #4                      @ flush instruction pipeline
75
        .endm
76
 
77
        .macro  cpwait_ret, lr, rd
78
        mrc     p15, 0, \rd, c2, c0, 0          @ arbitrary read of cp15
79
        sub     pc, \lr, \rd, LSR #32           @ wait for completion and
80
                                                @ flush instruction pipeline
81
        .endm
82
 
83
/*
84
 * This macro cleans the entire dcache using line allocate.
85
 * The main loop has been unrolled to reduce loop overhead.
86
 * rd and rs are two scratch registers.
87
 */
88
        .macro  clean_d_cache, rd, rs
89
        ldr     \rs, =clean_addr
90
        ldr     \rd, [\rs]
91
        eor     \rd, \rd, #CACHESIZE
92
        str     \rd, [\rs]
93
        add     \rs, \rd, #CACHESIZE
94
1:      mcr     p15, 0, \rd, c7, c2, 5          @ allocate D cache line
95
        add     \rd, \rd, #CACHELINESIZE
96
        mcr     p15, 0, \rd, c7, c2, 5          @ allocate D cache line
97
        add     \rd, \rd, #CACHELINESIZE
98
        mcr     p15, 0, \rd, c7, c2, 5          @ allocate D cache line
99
        add     \rd, \rd, #CACHELINESIZE
100
        mcr     p15, 0, \rd, c7, c2, 5          @ allocate D cache line
101
        add     \rd, \rd, #CACHELINESIZE
102
        teq     \rd, \rs
103
        bne     1b
104
        .endm
105
 
106
        .data
107
clean_addr:     .word   CLEAN_ADDR
108
 
109
        .text
110
 
111
/*
112
 * cpu_xscale_proc_init()
113
 *
114
 * Nothing too exciting at the moment
115
 */
116
ENTRY(cpu_xscale_proc_init)
117
        mov     pc, lr
118
 
119
/*
120
 * cpu_xscale_proc_fin()
121
 */
122
ENTRY(cpu_xscale_proc_fin)
123
        str     lr, [sp, #-4]!
124
        mov     r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
125
        msr     cpsr_c, r0
126
        bl      xscale_flush_kern_cache_all     @ clean caches
127
        mrc     p15, 0, r0, c1, c0, 0           @ ctrl register
128
        bic     r0, r0, #0x1800                 @ ...IZ...........
129
        bic     r0, r0, #0x0006                 @ .............CA.
130
        mcr     p15, 0, r0, c1, c0, 0           @ disable caches
131
        ldr     pc, [sp], #4
132
 
133
/*
134
 * cpu_xscale_reset(loc)
135
 *
136
 * Perform a soft reset of the system.  Put the CPU into the
137
 * same state as it would be if it had been reset, and branch
138
 * to what would be the reset vector.
139
 *
140
 * loc: location to jump to for soft reset
141
 *
142
 * Beware PXA270 erratum E7.
143
 */
144
        .align  5
145
ENTRY(cpu_xscale_reset)
146
        mov     r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
147
        msr     cpsr_c, r1                      @ reset CPSR
148
        mcr     p15, 0, r1, c10, c4, 1          @ unlock I-TLB
149
        mcr     p15, 0, r1, c8, c5, 0           @ invalidate I-TLB
150
        mrc     p15, 0, r1, c1, c0, 0           @ ctrl register
151
        bic     r1, r1, #0x0086                 @ ........B....CA.
152
        bic     r1, r1, #0x3900                 @ ..VIZ..S........
153
        sub     pc, pc, #4                      @ flush pipeline
154
        @ *** cache line aligned ***
155
        mcr     p15, 0, r1, c1, c0, 0           @ ctrl register
156
        bic     r1, r1, #0x0001                 @ ...............M
157
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches & BTB
158
        mcr     p15, 0, r1, c1, c0, 0           @ ctrl register
159
        @ CAUTION: MMU turned off from this point. We count on the pipeline
160
        @ already containing those two last instructions to survive.
161
        mcr     p15, 0, ip, c8, c7, 0           @ invalidate I & D TLBs
162
        mov     pc, r0
163
 
164
/*
165
 * cpu_xscale_do_idle()
166
 *
167
 * Cause the processor to idle
168
 *
169
 * For now we do nothing but go to idle mode for every case
170
 *
171
 * XScale supports clock switching, but using idle mode support
172
 * allows external hardware to react to system state changes.
173
 */
174
        .align  5
175
 
176
ENTRY(cpu_xscale_do_idle)
177
        mov     r0, #1
178
        mcr     p14, 0, r0, c7, c0, 0           @ Go to IDLE
179
        mov     pc, lr
180
 
181
/* ================================= CACHE ================================ */
182
 
183
/*
184
 *      flush_user_cache_all()
185
 *
186
 *      Invalidate all cache entries in a particular address
187
 *      space.
188
 */
189
ENTRY(xscale_flush_user_cache_all)
190
        /* FALLTHROUGH */
191
 
192
/*
193
 *      flush_kern_cache_all()
194
 *
195
 *      Clean and invalidate the entire cache.
196
 */
197
ENTRY(xscale_flush_kern_cache_all)
198
        mov     r2, #VM_EXEC
199
        mov     ip, #0
200
__flush_whole_cache:
201
        clean_d_cache r0, r1
202
        tst     r2, #VM_EXEC
203
        mcrne   p15, 0, ip, c7, c5, 0           @ Invalidate I cache & BTB
204
        mcrne   p15, 0, ip, c7, c10, 4          @ Drain Write (& Fill) Buffer
205
        mov     pc, lr
206
 
207
/*
208
 *      flush_user_cache_range(start, end, vm_flags)
209
 *
210
 *      Invalidate a range of cache entries in the specified
211
 *      address space.
212
 *
213
 *      - start - start address (may not be aligned)
214
 *      - end   - end address (exclusive, may not be aligned)
215
 *      - vma   - vma_area_struct describing address space
216
 */
217
        .align  5
218
ENTRY(xscale_flush_user_cache_range)
219
        mov     ip, #0
220
        sub     r3, r1, r0                      @ calculate total size
221
        cmp     r3, #MAX_AREA_SIZE
222
        bhs     __flush_whole_cache
223
 
224
1:      tst     r2, #VM_EXEC
225
        mcrne   p15, 0, r0, c7, c5, 1           @ Invalidate I cache line
226
        mcr     p15, 0, r0, c7, c10, 1          @ Clean D cache line
227
        mcr     p15, 0, r0, c7, c6, 1           @ Invalidate D cache line
228
        add     r0, r0, #CACHELINESIZE
229
        cmp     r0, r1
230
        blo     1b
231
        tst     r2, #VM_EXEC
232
        mcrne   p15, 0, ip, c7, c5, 6           @ Invalidate BTB
233
        mcrne   p15, 0, ip, c7, c10, 4          @ Drain Write (& Fill) Buffer
234
        mov     pc, lr
235
 
236
/*
237
 *      coherent_kern_range(start, end)
238
 *
239
 *      Ensure coherency between the Icache and the Dcache in the
240
 *      region described by start.  If you have non-snooping
241
 *      Harvard caches, you need to implement this function.
242
 *
243
 *      - start  - virtual start address
244
 *      - end    - virtual end address
245
 *
246
 *      Note: single I-cache line invalidation isn't used here since
247
 *      it also trashes the mini I-cache used by JTAG debuggers.
248
 */
249
ENTRY(xscale_coherent_kern_range)
250
        bic     r0, r0, #CACHELINESIZE - 1
251
1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
252
        add     r0, r0, #CACHELINESIZE
253
        cmp     r0, r1
254
        blo     1b
255
        mov     r0, #0
256
        mcr     p15, 0, r0, c7, c5, 0           @ Invalidate I cache & BTB
257
        mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
258
        mov     pc, lr
259
 
260
/*
261
 *      coherent_user_range(start, end)
262
 *
263
 *      Ensure coherency between the Icache and the Dcache in the
264
 *      region described by start.  If you have non-snooping
265
 *      Harvard caches, you need to implement this function.
266
 *
267
 *      - start  - virtual start address
268
 *      - end    - virtual end address
269
 */
270
ENTRY(xscale_coherent_user_range)
271
        bic     r0, r0, #CACHELINESIZE - 1
272
1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
273
        mcr     p15, 0, r0, c7, c5, 1           @ Invalidate I cache entry
274
        add     r0, r0, #CACHELINESIZE
275
        cmp     r0, r1
276
        blo     1b
277
        mov     r0, #0
278
        mcr     p15, 0, r0, c7, c5, 6           @ Invalidate BTB
279
        mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
280
        mov     pc, lr
281
 
282
/*
283
 *      flush_kern_dcache_page(void *page)
284
 *
285
 *      Ensure no D cache aliasing occurs, either with itself or
286
 *      the I cache
287
 *
288
 *      - addr  - page aligned address
289
 */
290
ENTRY(xscale_flush_kern_dcache_page)
291
        add     r1, r0, #PAGE_SZ
292
1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
293
        mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
294
        add     r0, r0, #CACHELINESIZE
295
        cmp     r0, r1
296
        blo     1b
297
        mov     r0, #0
298
        mcr     p15, 0, r0, c7, c5, 0           @ Invalidate I cache & BTB
299
        mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
300
        mov     pc, lr
301
 
302
/*
303
 *      dma_inv_range(start, end)
304
 *
305
 *      Invalidate (discard) the specified virtual address range.
306
 *      May not write back any entries.  If 'start' or 'end'
307
 *      are not cache line aligned, those lines must be written
308
 *      back.
309
 *
310
 *      - start  - virtual start address
311
 *      - end    - virtual end address
312
 */
313
ENTRY(xscale_dma_inv_range)
314
        tst     r0, #CACHELINESIZE - 1
315
        bic     r0, r0, #CACHELINESIZE - 1
316
        mcrne   p15, 0, r0, c7, c10, 1          @ clean D entry
317
        tst     r1, #CACHELINESIZE - 1
318
        mcrne   p15, 0, r1, c7, c10, 1          @ clean D entry
319
1:      mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
320
        add     r0, r0, #CACHELINESIZE
321
        cmp     r0, r1
322
        blo     1b
323
        mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
324
        mov     pc, lr
325
 
326
/*
327
 *      dma_clean_range(start, end)
328
 *
329
 *      Clean the specified virtual address range.
330
 *
331
 *      - start  - virtual start address
332
 *      - end    - virtual end address
333
 */
334
ENTRY(xscale_dma_clean_range)
335
        bic     r0, r0, #CACHELINESIZE - 1
336
1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
337
        add     r0, r0, #CACHELINESIZE
338
        cmp     r0, r1
339
        blo     1b
340
        mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
341
        mov     pc, lr
342
 
343
/*
344
 *      dma_flush_range(start, end)
345
 *
346
 *      Clean and invalidate the specified virtual address range.
347
 *
348
 *      - start  - virtual start address
349
 *      - end    - virtual end address
350
 */
351
ENTRY(xscale_dma_flush_range)
352
        bic     r0, r0, #CACHELINESIZE - 1
353
1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
354
        mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
355
        add     r0, r0, #CACHELINESIZE
356
        cmp     r0, r1
357
        blo     1b
358
        mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
359
        mov     pc, lr
360
 
361
ENTRY(xscale_cache_fns)
362
        .long   xscale_flush_kern_cache_all
363
        .long   xscale_flush_user_cache_all
364
        .long   xscale_flush_user_cache_range
365
        .long   xscale_coherent_kern_range
366
        .long   xscale_coherent_user_range
367
        .long   xscale_flush_kern_dcache_page
368
        .long   xscale_dma_inv_range
369
        .long   xscale_dma_clean_range
370
        .long   xscale_dma_flush_range
371
 
372
/*
373
 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
374
 * clear the dirty bits, which means that if we invalidate a dirty line,
375
 * the dirty data can still be written back to external memory later on.
376
 *
377
 * The recommended workaround is to always do a clean D-cache line before
378
 * doing an invalidate D-cache line, so on the affected processors,
379
 * dma_inv_range() is implemented as dma_flush_range().
380
 *
381
 * See erratum #25 of "Intel 80200 Processor Specification Update",
382
 * revision January 22, 2003, available at:
383
 *     http://www.intel.com/design/iio/specupdt/273415.htm
384
 */
385
ENTRY(xscale_80200_A0_A1_cache_fns)
386
        .long   xscale_flush_kern_cache_all
387
        .long   xscale_flush_user_cache_all
388
        .long   xscale_flush_user_cache_range
389
        .long   xscale_coherent_kern_range
390
        .long   xscale_coherent_user_range
391
        .long   xscale_flush_kern_dcache_page
392
        .long   xscale_dma_flush_range
393
        .long   xscale_dma_clean_range
394
        .long   xscale_dma_flush_range
395
 
396
ENTRY(cpu_xscale_dcache_clean_area)
397
1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
398
        add     r0, r0, #CACHELINESIZE
399
        subs    r1, r1, #CACHELINESIZE
400
        bhi     1b
401
        mov     pc, lr
402
 
403
/* =============================== PageTable ============================== */
404
 
405
#define PTE_CACHE_WRITE_ALLOCATE 0
406
 
407
/*
408
 * cpu_xscale_switch_mm(pgd)
409
 *
410
 * Set the translation base pointer to be as described by pgd.
411
 *
412
 * pgd: new page tables
413
 */
414
        .align  5
415
ENTRY(cpu_xscale_switch_mm)
416
        clean_d_cache r1, r2
417
        mcr     p15, 0, ip, c7, c5, 0           @ Invalidate I cache & BTB
418
        mcr     p15, 0, ip, c7, c10, 4          @ Drain Write (& Fill) Buffer
419
        mcr     p15, 0, r0, c2, c0, 0           @ load page table pointer
420
        mcr     p15, 0, ip, c8, c7, 0           @ invalidate I & D TLBs
421
        cpwait_ret lr, ip
422
 
423
/*
424
 * cpu_xscale_set_pte_ext(ptep, pte, ext)
425
 *
426
 * Set a PTE and flush it out
427
 *
428
 * Errata 40: must set memory to write-through for user read-only pages.
429
 */
430
        .align  5
431
ENTRY(cpu_xscale_set_pte_ext)
432
        str     r1, [r0], #-2048                @ linux version
433
 
434
        bic     r2, r1, #0xff0
435
        orr     r2, r2, #PTE_TYPE_EXT           @ extended page
436
 
437
        eor     r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
438
 
439
        tst     r3, #L_PTE_USER                 @ User?
440
        orrne   r2, r2, #PTE_EXT_AP_URO_SRW     @ yes -> user r/o, system r/w
441
 
442
        tst     r3, #L_PTE_WRITE | L_PTE_DIRTY  @ Write and Dirty?
443
        orreq   r2, r2, #PTE_EXT_AP_UNO_SRW     @ yes -> user n/a, system r/w
444
                                                @ combined with user -> user r/w
445
 
446
        @
447
        @ Handle the X bit.  We want to set this bit for the minicache
448
        @ (U = E = B = W = 0, C = 1) or when write allocate is enabled,
449
        @ and we have a writeable, cacheable region.  If we ignore the
450
        @ U and E bits, we can allow user space to use the minicache as
451
        @ well.
452
        @
453
        @  X = (C & ~W & ~B) | (C & W & B & write_allocate)
454
        @
455
        eor     ip, r1, #L_PTE_CACHEABLE
456
        tst     ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
457
#if PTE_CACHE_WRITE_ALLOCATE
458
        eorne   ip, r1, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
459
        tstne   ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
460
#endif
461
        orreq   r2, r2, #PTE_EXT_TEX(1)
462
 
463
        @
464
        @ Erratum 40: The B bit must be cleared for a user read-only
465
        @ cacheable page.
466
        @
467
        @  B = B & ~(U & C & ~W)
468
        @
469
        and     ip, r1, #L_PTE_USER | L_PTE_WRITE | L_PTE_CACHEABLE
470
        teq     ip, #L_PTE_USER | L_PTE_CACHEABLE
471
        biceq   r2, r2, #PTE_BUFFERABLE
472
 
473
        tst     r3, #L_PTE_PRESENT | L_PTE_YOUNG        @ Present and Young?
474
        movne   r2, #0                          @ no -> fault
475
 
476
        str     r2, [r0]                        @ hardware version
477
        mov     ip, #0
478
        mcr     p15, 0, r0, c7, c10, 1          @ Clean D cache line
479
        mcr     p15, 0, ip, c7, c10, 4          @ Drain Write (& Fill) Buffer
480
        mov     pc, lr
481
 
482
 
483
        .ltorg
484
 
485
        .align
486
 
487
        __INIT
488
 
489
        .type   __xscale_setup, #function
490
__xscale_setup:
491
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I, D caches & BTB
492
        mcr     p15, 0, ip, c7, c10, 4          @ Drain Write (& Fill) Buffer
493
        mcr     p15, 0, ip, c8, c7, 0           @ invalidate I, D TLBs
494
        mov     r0, #1 << 6                     @ cp6 for IOP3xx and Bulverde
495
        orr     r0, r0, #1 << 13                @ Its undefined whether this
496
        mcr     p15, 0, r0, c15, c1, 0          @ affects USR or SVC modes
497
 
498
        adr     r5, xscale_crval
499
        ldmia   r5, {r5, r6}
500
        mrc     p15, 0, r0, c1, c0, 0           @ get control register
501
        bic     r0, r0, r5
502
        orr     r0, r0, r6
503
        mov     pc, lr
504
        .size   __xscale_setup, . - __xscale_setup
505
 
506
        /*
507
         *  R
508
         * .RVI ZFRS BLDP WCAM
509
         * ..11 1.01 .... .101
510
         *
511
         */
512
        .type   xscale_crval, #object
513
xscale_crval:
514
        crval   clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900
515
 
516
        __INITDATA
517
 
518
/*
519
 * Purpose : Function pointers used to access above functions - all calls
520
 *           come through these
521
 */
522
 
523
        .type   xscale_processor_functions, #object
524
ENTRY(xscale_processor_functions)
525
        .word   v5t_early_abort
526
        .word   cpu_xscale_proc_init
527
        .word   cpu_xscale_proc_fin
528
        .word   cpu_xscale_reset
529
        .word   cpu_xscale_do_idle
530
        .word   cpu_xscale_dcache_clean_area
531
        .word   cpu_xscale_switch_mm
532
        .word   cpu_xscale_set_pte_ext
533
        .size   xscale_processor_functions, . - xscale_processor_functions
534
 
535
        .section ".rodata"
536
 
537
        .type   cpu_arch_name, #object
538
cpu_arch_name:
539
        .asciz  "armv5te"
540
        .size   cpu_arch_name, . - cpu_arch_name
541
 
542
        .type   cpu_elf_name, #object
543
cpu_elf_name:
544
        .asciz  "v5"
545
        .size   cpu_elf_name, . - cpu_elf_name
546
 
547
        .type   cpu_80200_A0_A1_name, #object
548
cpu_80200_A0_A1_name:
549
        .asciz  "XScale-80200 A0/A1"
550
        .size   cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name
551
 
552
        .type   cpu_80200_name, #object
553
cpu_80200_name:
554
        .asciz  "XScale-80200"
555
        .size   cpu_80200_name, . - cpu_80200_name
556
 
557
        .type   cpu_80219_name, #object
558
cpu_80219_name:
559
        .asciz  "XScale-80219"
560
        .size   cpu_80219_name, . - cpu_80219_name
561
 
562
        .type   cpu_8032x_name, #object
563
cpu_8032x_name:
564
        .asciz  "XScale-IOP8032x Family"
565
        .size   cpu_8032x_name, . - cpu_8032x_name
566
 
567
        .type   cpu_8033x_name, #object
568
cpu_8033x_name:
569
        .asciz  "XScale-IOP8033x Family"
570
        .size   cpu_8033x_name, . - cpu_8033x_name
571
 
572
        .type   cpu_pxa250_name, #object
573
cpu_pxa250_name:
574
        .asciz  "XScale-PXA250"
575
        .size   cpu_pxa250_name, . - cpu_pxa250_name
576
 
577
        .type   cpu_pxa210_name, #object
578
cpu_pxa210_name:
579
        .asciz  "XScale-PXA210"
580
        .size   cpu_pxa210_name, . - cpu_pxa210_name
581
 
582
        .type   cpu_ixp42x_name, #object
583
cpu_ixp42x_name:
584
        .asciz  "XScale-IXP42x Family"
585
        .size   cpu_ixp42x_name, . - cpu_ixp42x_name
586
 
587
        .type   cpu_ixp43x_name, #object
588
cpu_ixp43x_name:
589
        .asciz  "XScale-IXP43x Family"
590
        .size   cpu_ixp43x_name, . - cpu_ixp43x_name
591
 
592
        .type   cpu_ixp46x_name, #object
593
cpu_ixp46x_name:
594
        .asciz  "XScale-IXP46x Family"
595
        .size   cpu_ixp46x_name, . - cpu_ixp46x_name
596
 
597
        .type   cpu_ixp2400_name, #object
598
cpu_ixp2400_name:
599
        .asciz  "XScale-IXP2400"
600
        .size   cpu_ixp2400_name, . - cpu_ixp2400_name
601
 
602
        .type   cpu_ixp2800_name, #object
603
cpu_ixp2800_name:
604
        .asciz  "XScale-IXP2800"
605
        .size   cpu_ixp2800_name, . - cpu_ixp2800_name
606
 
607
        .type   cpu_pxa255_name, #object
608
cpu_pxa255_name:
609
        .asciz  "XScale-PXA255"
610
        .size   cpu_pxa255_name, . - cpu_pxa255_name
611
 
612
        .type   cpu_pxa270_name, #object
613
cpu_pxa270_name:
614
        .asciz  "XScale-PXA270"
615
        .size   cpu_pxa270_name, . - cpu_pxa270_name
616
 
617
        .align
618
 
619
        .section ".proc.info.init", #alloc, #execinstr
620
 
621
        .type   __80200_A0_A1_proc_info,#object
622
__80200_A0_A1_proc_info:
623
        .long   0x69052000
624
        .long   0xfffffffe
625
        .long   PMD_TYPE_SECT | \
626
                PMD_SECT_BUFFERABLE | \
627
                PMD_SECT_CACHEABLE | \
628
                PMD_SECT_AP_WRITE | \
629
                PMD_SECT_AP_READ
630
        .long   PMD_TYPE_SECT | \
631
                PMD_SECT_AP_WRITE | \
632
                PMD_SECT_AP_READ
633
        b       __xscale_setup
634
        .long   cpu_arch_name
635
        .long   cpu_elf_name
636
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
637
        .long   cpu_80200_name
638
        .long   xscale_processor_functions
639
        .long   v4wbi_tlb_fns
640
        .long   xscale_mc_user_fns
641
        .long   xscale_80200_A0_A1_cache_fns
642
        .size   __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info
643
 
644
        .type   __80200_proc_info,#object
645
__80200_proc_info:
646
        .long   0x69052000
647
        .long   0xfffffff0
648
        .long   PMD_TYPE_SECT | \
649
                PMD_SECT_BUFFERABLE | \
650
                PMD_SECT_CACHEABLE | \
651
                PMD_SECT_AP_WRITE | \
652
                PMD_SECT_AP_READ
653
        .long   PMD_TYPE_SECT | \
654
                PMD_SECT_AP_WRITE | \
655
                PMD_SECT_AP_READ
656
        b       __xscale_setup
657
        .long   cpu_arch_name
658
        .long   cpu_elf_name
659
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
660
        .long   cpu_80200_name
661
        .long   xscale_processor_functions
662
        .long   v4wbi_tlb_fns
663
        .long   xscale_mc_user_fns
664
        .long   xscale_cache_fns
665
        .size   __80200_proc_info, . - __80200_proc_info
666
 
667
        .type   __80219_proc_info,#object
668
__80219_proc_info:
669
        .long   0x69052e20
670
        .long   0xffffffe0
671
        .long   PMD_TYPE_SECT | \
672
                PMD_SECT_BUFFERABLE | \
673
                PMD_SECT_CACHEABLE | \
674
                PMD_SECT_AP_WRITE | \
675
                PMD_SECT_AP_READ
676
        .long   PMD_TYPE_SECT | \
677
                PMD_SECT_AP_WRITE | \
678
                PMD_SECT_AP_READ
679
        b       __xscale_setup
680
        .long   cpu_arch_name
681
        .long   cpu_elf_name
682
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
683
        .long   cpu_80219_name
684
        .long   xscale_processor_functions
685
        .long   v4wbi_tlb_fns
686
        .long   xscale_mc_user_fns
687
        .long   xscale_cache_fns
688
        .size   __80219_proc_info, . - __80219_proc_info
689
 
690
        .type   __8032x_proc_info,#object
691
__8032x_proc_info:
692
        .long   0x69052420
693
        .long   0xfffff7e0
694
        .long   PMD_TYPE_SECT | \
695
                PMD_SECT_BUFFERABLE | \
696
                PMD_SECT_CACHEABLE | \
697
                PMD_SECT_AP_WRITE | \
698
                PMD_SECT_AP_READ
699
        .long   PMD_TYPE_SECT | \
700
                PMD_SECT_AP_WRITE | \
701
                PMD_SECT_AP_READ
702
        b       __xscale_setup
703
        .long   cpu_arch_name
704
        .long   cpu_elf_name
705
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
706
        .long   cpu_8032x_name
707
        .long   xscale_processor_functions
708
        .long   v4wbi_tlb_fns
709
        .long   xscale_mc_user_fns
710
        .long   xscale_cache_fns
711
        .size   __8032x_proc_info, . - __8032x_proc_info
712
 
713
        .type   __8033x_proc_info,#object
714
__8033x_proc_info:
715
        .long   0x69054010
716
        .long   0xfffffd30
717
        .long   PMD_TYPE_SECT | \
718
                PMD_SECT_BUFFERABLE | \
719
                PMD_SECT_CACHEABLE | \
720
                PMD_SECT_AP_WRITE | \
721
                PMD_SECT_AP_READ
722
        .long   PMD_TYPE_SECT | \
723
                PMD_SECT_AP_WRITE | \
724
                PMD_SECT_AP_READ
725
        b       __xscale_setup
726
        .long   cpu_arch_name
727
        .long   cpu_elf_name
728
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
729
        .long   cpu_8033x_name
730
        .long   xscale_processor_functions
731
        .long   v4wbi_tlb_fns
732
        .long   xscale_mc_user_fns
733
        .long   xscale_cache_fns
734
        .size   __8033x_proc_info, . - __8033x_proc_info
735
 
736
        .type   __pxa250_proc_info,#object
737
__pxa250_proc_info:
738
        .long   0x69052100
739
        .long   0xfffff7f0
740
        .long   PMD_TYPE_SECT | \
741
                PMD_SECT_BUFFERABLE | \
742
                PMD_SECT_CACHEABLE | \
743
                PMD_SECT_AP_WRITE | \
744
                PMD_SECT_AP_READ
745
        .long   PMD_TYPE_SECT | \
746
                PMD_SECT_AP_WRITE | \
747
                PMD_SECT_AP_READ
748
        b       __xscale_setup
749
        .long   cpu_arch_name
750
        .long   cpu_elf_name
751
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
752
        .long   cpu_pxa250_name
753
        .long   xscale_processor_functions
754
        .long   v4wbi_tlb_fns
755
        .long   xscale_mc_user_fns
756
        .long   xscale_cache_fns
757
        .size   __pxa250_proc_info, . - __pxa250_proc_info
758
 
759
        .type   __pxa210_proc_info,#object
760
__pxa210_proc_info:
761
        .long   0x69052120
762
        .long   0xfffff3f0
763
        .long   PMD_TYPE_SECT | \
764
                PMD_SECT_BUFFERABLE | \
765
                PMD_SECT_CACHEABLE | \
766
                PMD_SECT_AP_WRITE | \
767
                PMD_SECT_AP_READ
768
        .long   PMD_TYPE_SECT | \
769
                PMD_SECT_AP_WRITE | \
770
                PMD_SECT_AP_READ
771
        b       __xscale_setup
772
        .long   cpu_arch_name
773
        .long   cpu_elf_name
774
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
775
        .long   cpu_pxa210_name
776
        .long   xscale_processor_functions
777
        .long   v4wbi_tlb_fns
778
        .long   xscale_mc_user_fns
779
        .long   xscale_cache_fns
780
        .size   __pxa210_proc_info, . - __pxa210_proc_info
781
 
782
        .type   __ixp2400_proc_info, #object
783
__ixp2400_proc_info:
784
        .long   0x69054190
785
        .long   0xfffffff0
786
        .long   PMD_TYPE_SECT | \
787
                PMD_SECT_BUFFERABLE | \
788
                PMD_SECT_CACHEABLE | \
789
                PMD_SECT_AP_WRITE | \
790
                PMD_SECT_AP_READ
791
        .long   PMD_TYPE_SECT | \
792
                PMD_SECT_AP_WRITE | \
793
                PMD_SECT_AP_READ
794
        b       __xscale_setup
795
        .long   cpu_arch_name
796
        .long   cpu_elf_name
797
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
798
        .long   cpu_ixp2400_name
799
        .long   xscale_processor_functions
800
        .long   v4wbi_tlb_fns
801
        .long   xscale_mc_user_fns
802
        .long   xscale_cache_fns
803
        .size   __ixp2400_proc_info, . - __ixp2400_proc_info
804
 
805
        .type   __ixp2800_proc_info, #object
806
__ixp2800_proc_info:
807
        .long   0x690541a0
808
        .long   0xfffffff0
809
        .long   PMD_TYPE_SECT | \
810
                PMD_SECT_BUFFERABLE | \
811
                PMD_SECT_CACHEABLE | \
812
                PMD_SECT_AP_WRITE | \
813
                PMD_SECT_AP_READ
814
        .long   PMD_TYPE_SECT | \
815
                PMD_SECT_AP_WRITE | \
816
                PMD_SECT_AP_READ
817
        b       __xscale_setup
818
        .long   cpu_arch_name
819
        .long   cpu_elf_name
820
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
821
        .long   cpu_ixp2800_name
822
        .long   xscale_processor_functions
823
        .long   v4wbi_tlb_fns
824
        .long   xscale_mc_user_fns
825
        .long   xscale_cache_fns
826
        .size   __ixp2800_proc_info, . - __ixp2800_proc_info
827
 
828
        .type   __ixp42x_proc_info, #object
829
__ixp42x_proc_info:
830
        .long   0x690541c0
831
        .long   0xffffffc0
832
        .long   PMD_TYPE_SECT | \
833
                PMD_SECT_BUFFERABLE | \
834
                PMD_SECT_CACHEABLE | \
835
                PMD_SECT_AP_WRITE | \
836
                PMD_SECT_AP_READ
837
        .long   PMD_TYPE_SECT | \
838
                PMD_SECT_AP_WRITE | \
839
                PMD_SECT_AP_READ
840
        b       __xscale_setup
841
        .long   cpu_arch_name
842
        .long   cpu_elf_name
843
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
844
        .long   cpu_ixp42x_name
845
        .long   xscale_processor_functions
846
        .long   v4wbi_tlb_fns
847
        .long   xscale_mc_user_fns
848
        .long   xscale_cache_fns
849
        .size   __ixp42x_proc_info, . - __ixp42x_proc_info
850
 
851
        .type   __ixp43x_proc_info, #object
852
__ixp43x_proc_info:
853
        .long   0x69054040
854
        .long   0xfffffff0
855
        .long   PMD_TYPE_SECT | \
856
                PMD_SECT_BUFFERABLE | \
857
                PMD_SECT_CACHEABLE | \
858
                PMD_SECT_AP_WRITE | \
859
                PMD_SECT_AP_READ
860
        .long   PMD_TYPE_SECT | \
861
                PMD_SECT_AP_WRITE | \
862
                PMD_SECT_AP_READ
863
        b       __xscale_setup
864
        .long   cpu_arch_name
865
        .long   cpu_elf_name
866
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
867
        .long   cpu_ixp43x_name
868
        .long   xscale_processor_functions
869
        .long   v4wbi_tlb_fns
870
        .long   xscale_mc_user_fns
871
        .long   xscale_cache_fns
872
        .size   __ixp43x_proc_info, . - __ixp43x_proc_info
873
 
874
        .type   __ixp46x_proc_info, #object
875
__ixp46x_proc_info:
876
        .long   0x69054200
877
        .long   0xffffff00
878
        .long   PMD_TYPE_SECT | \
879
                PMD_SECT_BUFFERABLE | \
880
                PMD_SECT_CACHEABLE | \
881
                PMD_SECT_AP_WRITE | \
882
                PMD_SECT_AP_READ
883
        .long   PMD_TYPE_SECT | \
884
                PMD_SECT_AP_WRITE | \
885
                PMD_SECT_AP_READ
886
        b       __xscale_setup
887
        .long   cpu_arch_name
888
        .long   cpu_elf_name
889
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
890
        .long   cpu_ixp46x_name
891
        .long   xscale_processor_functions
892
        .long   v4wbi_tlb_fns
893
        .long   xscale_mc_user_fns
894
        .long   xscale_cache_fns
895
        .size   __ixp46x_proc_info, . - __ixp46x_proc_info
896
 
897
        .type   __pxa255_proc_info,#object
898
__pxa255_proc_info:
899
        .long   0x69052d00
900
        .long   0xfffffff0
901
        .long   PMD_TYPE_SECT | \
902
                PMD_SECT_BUFFERABLE | \
903
                PMD_SECT_CACHEABLE | \
904
                PMD_SECT_AP_WRITE | \
905
                PMD_SECT_AP_READ
906
        .long   PMD_TYPE_SECT | \
907
                PMD_SECT_AP_WRITE | \
908
                PMD_SECT_AP_READ
909
        b       __xscale_setup
910
        .long   cpu_arch_name
911
        .long   cpu_elf_name
912
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
913
        .long   cpu_pxa255_name
914
        .long   xscale_processor_functions
915
        .long   v4wbi_tlb_fns
916
        .long   xscale_mc_user_fns
917
        .long   xscale_cache_fns
918
        .size   __pxa255_proc_info, . - __pxa255_proc_info
919
 
920
        .type   __pxa270_proc_info,#object
921
__pxa270_proc_info:
922
        .long   0x69054110
923
        .long   0xfffffff0
924
        .long   PMD_TYPE_SECT | \
925
                PMD_SECT_BUFFERABLE | \
926
                PMD_SECT_CACHEABLE | \
927
                PMD_SECT_AP_WRITE | \
928
                PMD_SECT_AP_READ
929
        .long   PMD_TYPE_SECT | \
930
                PMD_SECT_AP_WRITE | \
931
                PMD_SECT_AP_READ
932
        b       __xscale_setup
933
        .long   cpu_arch_name
934
        .long   cpu_elf_name
935
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
936
        .long   cpu_pxa270_name
937
        .long   xscale_processor_functions
938
        .long   v4wbi_tlb_fns
939
        .long   xscale_mc_user_fns
940
        .long   xscale_cache_fns
941
        .size   __pxa270_proc_info, . - __pxa270_proc_info
942
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.