OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [arm/] [mm/] [proc-arm940.S] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 *  linux/arch/arm/mm/arm940.S: utility functions for ARM940T
3
 *
4
 *  Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License version 2 as
8
 * published by the Free Software Foundation.
9
 *
10
 */
11
#include 
12
#include 
13
#include 
14
#include 
15
#include 
16
#include 
17
#include 
18
 
19
/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
20
#define CACHE_DLINESIZE 16
21
#define CACHE_DSEGMENTS 4
22
#define CACHE_DENTRIES  64
23
 
24
        .text
25
/*
26
 * cpu_arm940_proc_init()
27
 * cpu_arm940_switch_mm()
28
 *
29
 * These are not required.
30
 */
31
ENTRY(cpu_arm940_proc_init)
32
ENTRY(cpu_arm940_switch_mm)
33
        mov     pc, lr
34
 
35
/*
36
 * cpu_arm940_proc_fin()
37
 */
38
ENTRY(cpu_arm940_proc_fin)
39
        stmfd   sp!, {lr}
40
        mov     ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
41
        msr     cpsr_c, ip
42
        bl      arm940_flush_kern_cache_all
43
        mrc     p15, 0, r0, c1, c0, 0           @ ctrl register
44
        bic     r0, r0, #0x00001000             @ i-cache
45
        bic     r0, r0, #0x00000004             @ d-cache
46
        mcr     p15, 0, r0, c1, c0, 0           @ disable caches
47
        ldmfd   sp!, {pc}
48
 
49
/*
50
 * cpu_arm940_reset(loc)
51
 * Params  : r0 = address to jump to
52
 * Notes   : This sets up everything for a reset
53
 */
54
ENTRY(cpu_arm940_reset)
55
        mov     ip, #0
56
        mcr     p15, 0, ip, c7, c5, 0           @ flush I cache
57
        mcr     p15, 0, ip, c7, c6, 0           @ flush D cache
58
        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
59
        mrc     p15, 0, ip, c1, c0, 0           @ ctrl register
60
        bic     ip, ip, #0x00000005             @ .............c.p
61
        bic     ip, ip, #0x00001000             @ i-cache
62
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
63
        mov     pc, r0
64
 
65
/*
66
 * cpu_arm940_do_idle()
67
 */
68
        .align  5
69
ENTRY(cpu_arm940_do_idle)
70
        mcr     p15, 0, r0, c7, c0, 4           @ Wait for interrupt
71
        mov     pc, lr
72
 
73
/*
74
 *      flush_user_cache_all()
75
 */
76
ENTRY(arm940_flush_user_cache_all)
77
        /* FALLTHROUGH */
78
 
79
/*
80
 *      flush_kern_cache_all()
81
 *
82
 *      Clean and invalidate the entire cache.
83
 */
84
ENTRY(arm940_flush_kern_cache_all)
85
        mov     r2, #VM_EXEC
86
        /* FALLTHROUGH */
87
 
88
/*
89
 *      flush_user_cache_range(start, end, flags)
90
 *
91
 *      There is no efficient way to flush a range of cache entries
92
 *      in the specified address range. Thus, flushes all.
93
 *
94
 *      - start - start address (inclusive)
95
 *      - end   - end address (exclusive)
96
 *      - flags - vm_flags describing address space
97
 */
98
ENTRY(arm940_flush_user_cache_range)
99
        mov     ip, #0
100
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
101
        mcr     p15, 0, ip, c7, c6, 0           @ flush D cache
102
#else
103
        mov     r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
104
1:      orr     r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
105
2:      mcr     p15, 0, r3, c7, c14, 2          @ clean/flush D index
106
        subs    r3, r3, #1 << 26
107
        bcs     2b                              @ entries 63 to 0
108
        subs    r1, r1, #1 << 4
109
        bcs     1b                              @ segments 3 to 0
110
#endif
111
        tst     r2, #VM_EXEC
112
        mcrne   p15, 0, ip, c7, c5, 0           @ invalidate I cache
113
        mcrne   p15, 0, ip, c7, c10, 4          @ drain WB
114
        mov     pc, lr
115
 
116
/*
117
 *      coherent_kern_range(start, end)
118
 *
119
 *      Ensure coherency between the Icache and the Dcache in the
120
 *      region described by start, end.  If you have non-snooping
121
 *      Harvard caches, you need to implement this function.
122
 *
123
 *      - start - virtual start address
124
 *      - end   - virtual end address
125
 */
126
ENTRY(arm940_coherent_kern_range)
127
        /* FALLTHROUGH */
128
 
129
/*
130
 *      coherent_user_range(start, end)
131
 *
132
 *      Ensure coherency between the Icache and the Dcache in the
133
 *      region described by start, end.  If you have non-snooping
134
 *      Harvard caches, you need to implement this function.
135
 *
136
 *      - start - virtual start address
137
 *      - end   - virtual end address
138
 */
139
ENTRY(arm940_coherent_user_range)
140
        /* FALLTHROUGH */
141
 
142
/*
143
 *      flush_kern_dcache_page(void *page)
144
 *
145
 *      Ensure no D cache aliasing occurs, either with itself or
146
 *      the I cache
147
 *
148
 *      - addr  - page aligned address
149
 */
150
ENTRY(arm940_flush_kern_dcache_page)
151
        mov     ip, #0
152
        mov     r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
153
1:      orr     r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
154
2:      mcr     p15, 0, r3, c7, c14, 2          @ clean/flush D index
155
        subs    r3, r3, #1 << 26
156
        bcs     2b                              @ entries 63 to 0
157
        subs    r1, r1, #1 << 4
158
        bcs     1b                              @ segments 7 to 0
159
        mcr     p15, 0, ip, c7, c5, 0           @ invalidate I cache
160
        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
161
        mov     pc, lr
162
 
163
/*
164
 *      dma_inv_range(start, end)
165
 *
166
 *      There is no efficient way to invalidate a specifid virtual
167
 *      address range. Thus, invalidates all.
168
 *
169
 *      - start - virtual start address
170
 *      - end   - virtual end address
171
 */
172
ENTRY(arm940_dma_inv_range)
173
        mov     ip, #0
174
        mov     r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
175
1:      orr     r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
176
2:      mcr     p15, 0, r3, c7, c6, 2           @ flush D entry
177
        subs    r3, r3, #1 << 26
178
        bcs     2b                              @ entries 63 to 0
179
        subs    r1, r1, #1 << 4
180
        bcs     1b                              @ segments 7 to 0
181
        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
182
        mov     pc, lr
183
 
184
/*
185
 *      dma_clean_range(start, end)
186
 *
187
 *      There is no efficient way to clean a specifid virtual
188
 *      address range. Thus, cleans all.
189
 *
190
 *      - start - virtual start address
191
 *      - end   - virtual end address
192
 */
193
ENTRY(arm940_dma_clean_range)
194
ENTRY(cpu_arm940_dcache_clean_area)
195
        mov     ip, #0
196
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
197
        mov     r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
198
1:      orr     r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
199
2:      mcr     p15, 0, r3, c7, c10, 2          @ clean D entry
200
        subs    r3, r3, #1 << 26
201
        bcs     2b                              @ entries 63 to 0
202
        subs    r1, r1, #1 << 4
203
        bcs     1b                              @ segments 7 to 0
204
#endif
205
        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
206
        mov     pc, lr
207
 
208
/*
209
 *      dma_flush_range(start, end)
210
 *
211
 *      There is no efficient way to clean and invalidate a specifid
212
 *      virtual address range.
213
 *
214
 *      - start - virtual start address
215
 *      - end   - virtual end address
216
 */
217
ENTRY(arm940_dma_flush_range)
218
        mov     ip, #0
219
        mov     r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
220
1:      orr     r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
221
2:
222
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
223
        mcr     p15, 0, r3, c7, c14, 2          @ clean/flush D entry
224
#else
225
        mcr     p15, 0, r3, c7, c10, 2          @ clean D entry
226
#endif
227
        subs    r3, r3, #1 << 26
228
        bcs     2b                              @ entries 63 to 0
229
        subs    r1, r1, #1 << 4
230
        bcs     1b                              @ segments 7 to 0
231
        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
232
        mov     pc, lr
233
 
234
ENTRY(arm940_cache_fns)
235
        .long   arm940_flush_kern_cache_all
236
        .long   arm940_flush_user_cache_all
237
        .long   arm940_flush_user_cache_range
238
        .long   arm940_coherent_kern_range
239
        .long   arm940_coherent_user_range
240
        .long   arm940_flush_kern_dcache_page
241
        .long   arm940_dma_inv_range
242
        .long   arm940_dma_clean_range
243
        .long   arm940_dma_flush_range
244
 
245
        __INIT
246
 
247
        .type   __arm940_setup, #function
248
__arm940_setup:
249
        mov     r0, #0
250
        mcr     p15, 0, r0, c7, c5, 0           @ invalidate I cache
251
        mcr     p15, 0, r0, c7, c6, 0           @ invalidate D cache
252
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
253
 
254
        mcr     p15, 0, r0, c6, c3, 0           @ disable data area 3~7
255
        mcr     p15, 0, r0, c6, c4, 0
256
        mcr     p15, 0, r0, c6, c5, 0
257
        mcr     p15, 0, r0, c6, c6, 0
258
        mcr     p15, 0, r0, c6, c7, 0
259
 
260
        mcr     p15, 0, r0, c6, c3, 1           @ disable instruction area 3~7
261
        mcr     p15, 0, r0, c6, c4, 1
262
        mcr     p15, 0, r0, c6, c5, 1
263
        mcr     p15, 0, r0, c6, c6, 1
264
        mcr     p15, 0, r0, c6, c7, 1
265
 
266
        mov     r0, #0x0000003F                 @ base = 0, size = 4GB
267
        mcr     p15, 0, r0, c6, c0, 0           @ set area 0, default
268
        mcr     p15, 0, r0, c6, c0, 1
269
 
270
        ldr     r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
271
        ldr     r1, =(CONFIG_DRAM_SIZE >> 12)   @ size of RAM (must be >= 4KB)
272
        mov     r2, #10                         @ 11 is the minimum (4KB)
273
1:      add     r2, r2, #1                      @ area size *= 2
274
        mov     r1, r1, lsr #1
275
        bne     1b                              @ count not zero r-shift
276
        orr     r0, r0, r2, lsl #1              @ the area register value
277
        orr     r0, r0, #1                      @ set enable bit
278
        mcr     p15, 0, r0, c6, c1, 0           @ set area 1, RAM
279
        mcr     p15, 0, r0, c6, c1, 1
280
 
281
        ldr     r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
282
        ldr     r1, =(CONFIG_FLASH_SIZE >> 12)  @ size of FLASH (must be >= 4KB)
283
        mov     r2, #10                         @ 11 is the minimum (4KB)
284
1:      add     r2, r2, #1                      @ area size *= 2
285
        mov     r1, r1, lsr #1
286
        bne     1b                              @ count not zero r-shift
287
        orr     r0, r0, r2, lsl #1              @ the area register value
288
        orr     r0, r0, #1                      @ set enable bit
289
        mcr     p15, 0, r0, c6, c2, 0           @ set area 2, ROM/FLASH
290
        mcr     p15, 0, r0, c6, c2, 1
291
 
292
        mov     r0, #0x06
293
        mcr     p15, 0, r0, c2, c0, 0           @ Region 1&2 cacheable
294
        mcr     p15, 0, r0, c2, c0, 1
295
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
296
        mov     r0, #0x00                       @ disable whole write buffer
297
#else
298
        mov     r0, #0x02                       @ Region 1 write bufferred
299
#endif
300
        mcr     p15, 0, r0, c3, c0, 0
301
 
302
        mov     r0, #0x10000
303
        sub     r0, r0, #1                      @ r0 = 0xffff
304
        mcr     p15, 0, r0, c5, c0, 0           @ all read/write access
305
        mcr     p15, 0, r0, c5, c0, 1
306
 
307
        mrc     p15, 0, r0, c1, c0              @ get control register
308
        orr     r0, r0, #0x00001000             @ I-cache
309
        orr     r0, r0, #0x00000005             @ MPU/D-cache
310
 
311
        mov     pc, lr
312
 
313
        .size   __arm940_setup, . - __arm940_setup
314
 
315
        __INITDATA
316
 
317
/*
318
 * Purpose : Function pointers used to access above functions - all calls
319
 *           come through these
320
 */
321
        .type   arm940_processor_functions, #object
322
ENTRY(arm940_processor_functions)
323
        .word   nommu_early_abort
324
        .word   cpu_arm940_proc_init
325
        .word   cpu_arm940_proc_fin
326
        .word   cpu_arm940_reset
327
        .word   cpu_arm940_do_idle
328
        .word   cpu_arm940_dcache_clean_area
329
        .word   cpu_arm940_switch_mm
330
        .word   0                @ cpu_*_set_pte
331
        .size   arm940_processor_functions, . - arm940_processor_functions
332
 
333
        .section ".rodata"
334
 
335
.type   cpu_arch_name, #object
336
cpu_arch_name:
337
        .asciz  "armv4t"
338
        .size   cpu_arch_name, . - cpu_arch_name
339
 
340
        .type   cpu_elf_name, #object
341
cpu_elf_name:
342
        .asciz  "v4"
343
        .size   cpu_elf_name, . - cpu_elf_name
344
 
345
        .type   cpu_arm940_name, #object
346
cpu_arm940_name:
347
        .ascii  "ARM940T"
348
        .size   cpu_arm940_name, . - cpu_arm940_name
349
 
350
        .align
351
 
352
        .section ".proc.info.init", #alloc, #execinstr
353
 
354
        .type   __arm940_proc_info,#object
355
__arm940_proc_info:
356
        .long   0x41009400
357
        .long   0xff00fff0
358
        .long   0
359
        b       __arm940_setup
360
        .long   cpu_arch_name
361
        .long   cpu_elf_name
362
        .long   HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
363
        .long   cpu_arm940_name
364
        .long   arm940_processor_functions
365
        .long   0
366
        .long   0
367
        .long   arm940_cache_fns
368
        .size   __arm940_proc_info, . - __arm940_proc_info
369
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.