OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [sh/] [mm/] [clear_page.S] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* $Id: clear_page.S,v 1.1.1.1 2004-04-15 01:17:15 phoenix Exp $
2
 *
3
 * __clear_user_page, __clear_user, clear_page implementation of SuperH
4
 *
5
 * Copyright (C) 2001  Kaz Kojima
6
 * Copyright (C) 2001, 2002  Niibe Yutaka
7
 *
8
 */
9
#include 
10
#include 
11
 
12
/*
13
 * clear_page
14
 * @to: P1 address
15
 *
16
 * void clear_page(void *to)
17
 */
18
 
19
/*
20
 * r0 --- scratch
21
 * r4 --- to
22
 * r5 --- to + 4096
23
 */
24
ENTRY(clear_page)
25
        mov     r4,r5
26
        mov.w   .Llimit,r0
27
        add     r0,r5
28
        mov     #0,r0
29
        !
30
1:
31
#if defined(__sh3__)
32
        mov.l   r0,@r4
33
#elif defined(__SH4__)
34
        movca.l r0,@r4
35
        mov     r4,r1
36
#endif
37
        add     #32,r4
38
        mov.l   r0,@-r4
39
        mov.l   r0,@-r4
40
        mov.l   r0,@-r4
41
        mov.l   r0,@-r4
42
        mov.l   r0,@-r4
43
        mov.l   r0,@-r4
44
        mov.l   r0,@-r4
45
#if defined(__SH4__)
46
        ocbwb   @r1
47
#endif
48
        cmp/eq  r5,r4
49
        bf/s    1b
50
         add    #28,r4
51
        !
52
        rts
53
         nop
54
.Llimit:        .word   (4096-28)
55
 
56
ENTRY(__clear_user)
57
        !
58
        mov     #0, r0
59
        mov     #0xe0, r1       ! 0xffffffe0
60
        !
61
        ! r4..(r4+31)&~32          -------- not aligned [ Area 0 ]
62
        ! (r4+31)&~32..(r4+r5)&~32 -------- aligned     [ Area 1 ]
63
        ! (r4+r5)&~32..r4+r5       -------- not aligned [ Area 2 ]
64
        !
65
        ! Clear area 0
66
        mov     r4, r2
67
        !
68
        tst     r1, r5          ! length < 32
69
        bt      .Larea2         ! skip to remainder
70
        !
71
        add     #31, r2
72
        and     r1, r2
73
        cmp/eq  r4, r2
74
        bt      .Larea1
75
        mov     r2, r3
76
        sub     r4, r3
77
        mov     r3, r7
78
        mov     r4, r2
79
        !
80
.L0:    dt      r3
81
0:      mov.b   r0, @r2
82
        bf/s    .L0
83
         add    #1, r2
84
        !
85
        sub     r7, r5
86
        mov     r2, r4
87
.Larea1:
88
        mov     r4, r3
89
        add     r5, r3
90
        and     r1, r3
91
        cmp/hi  r2, r3
92
        bf      .Larea2
93
        !
94
        ! Clear area 1
95
#if defined(__SH4__)
96
1:      movca.l r0, @r2
97
#else
98
1:      mov.l   r0, @r2
99
#endif
100
        add     #4, r2
101
2:      mov.l   r0, @r2
102
        add     #4, r2
103
3:      mov.l   r0, @r2
104
        add     #4, r2
105
4:      mov.l   r0, @r2
106
        add     #4, r2
107
5:      mov.l   r0, @r2
108
        add     #4, r2
109
6:      mov.l   r0, @r2
110
        add     #4, r2
111
7:      mov.l   r0, @r2
112
        add     #4, r2
113
8:      mov.l   r0, @r2
114
        add     #4, r2
115
        cmp/hi  r2, r3
116
        bt/s    1b
117
         nop
118
        !
119
        ! Clear area 2
120
.Larea2:
121
        mov     r4, r3
122
        add     r5, r3
123
        cmp/hs  r3, r2
124
        bt/s    .Ldone
125
         sub    r2, r3
126
.L2:    dt      r3
127
9:      mov.b   r0, @r2
128
        bf/s    .L2
129
         add    #1, r2
130
        !
131
.Ldone: rts
132
         mov    #0, r0  ! return 0 as normal return
133
 
134
        ! return the number of bytes remained
135
.Lbad_clear_user:
136
        mov     r4, r0
137
        add     r5, r0
138
        rts
139
         sub    r2, r0
140
 
141
.section __ex_table,"a"
142
        .align 2
143
        .long   0b, .Lbad_clear_user
144
        .long   1b, .Lbad_clear_user
145
        .long   2b, .Lbad_clear_user
146
        .long   3b, .Lbad_clear_user
147
        .long   4b, .Lbad_clear_user
148
        .long   5b, .Lbad_clear_user
149
        .long   6b, .Lbad_clear_user
150
        .long   7b, .Lbad_clear_user
151
        .long   8b, .Lbad_clear_user
152
        .long   9b, .Lbad_clear_user
153
.previous
154
 
155
#if defined(__SH4__)
156
/*
157
 * __clear_user_page
158
 * @to: P1 address (with same color)
159
 * @orig_to: P1 address
160
 *
161
 * void __clear_user_page(void *to, void *orig_to)
162
 */
163
 
164
/*
165
 * r0 --- scratch
166
 * r4 --- to
167
 * r5 --- orig_to
168
 * r6 --- to + 4096
169
 */
170
ENTRY(__clear_user_page)
171
        mov.w   .L4096,r0
172
        mov     r4,r6
173
        add     r0,r6
174
        mov     #0,r0
175
        !
176
1:      ocbi    @r5
177
        add     #32,r5
178
        movca.l r0,@r4
179
        mov     r4,r1
180
        add     #32,r4
181
        mov.l   r0,@-r4
182
        mov.l   r0,@-r4
183
        mov.l   r0,@-r4
184
        mov.l   r0,@-r4
185
        mov.l   r0,@-r4
186
        mov.l   r0,@-r4
187
        mov.l   r0,@-r4
188
        add     #28,r4
189
        cmp/eq  r6,r4
190
        bf/s    1b
191
         ocbwb  @r1
192
        !
193
        rts
194
         nop
195
.L4096: .word   4096
196
 
197
/*
198
 * __flush_cache_4096
199
 *
200
 * Flush the page at the specified physical address by writing to to
201
 * the memory mapped address array.
202
 * The offset into the memory mapped cache array selects the `color' of the
203
 * virtual addresses which will be checked.
204
 * Lower two bits of phys control the operation (invalidate/write-back).
205
 *
206
 * void __flush_cache_4096(unsigned long addr, unsigned long phys,
207
 *                         unsigned long exec_offset);
208
 *
209
 * @addr: address of the memory mapped cache address array
210
 * @phys: P1 address to be flushed
211
 * @exec_offset: set to 0x20000000 if the flush needs to be executed from P2
212
 * (ie from uncached memory), otherwise 0.
213
 */
214
 
215
/*
216
 * Updated for the 2-way associative cache option on the SH-4-202 and SH7751R.
217
 *
218
 * The current implmentation simply adds an additional loop to flush the
219
 * other way, but this could be improved by merging both loops to handle the
220
 * flushing of boths ways with one iteration.
221
 *
222
 * benedict.gaster@superh.com
223
 */
224
 
225
/*
226
 * r4 --- addr
227
 * r5 --- phys
228
 * r6 --- exec_offset
229
 */
230
 
231
ENTRY(__flush_cache_4096)
232
        mov.l   1f,r3
233
        add     r6,r3
234
        mov     r4,r0
235
        mov     #64,r2
236
        shll    r2
237
        mov     #64,r6
238
        jmp     @r3
239
         mov    #96,r7
240
        .align  2
241
1:      .long   2f
242
2:
243
#if defined (CONFIG_SH_CACHE_ASSOC)
244
        mov     r5, r3
245
#endif
246
        .rept   32
247
        mov.l   r5,@r0
248
        mov.l   r5,@(32,r0)
249
        mov.l   r5,@(r0,r6)
250
        mov.l   r5,@(r0,r7)
251
        add     r2,r5
252
        add     r2,r0
253
        .endr
254
 
255
#if defined (CONFIG_SH_CACHE_ASSOC)
256
        mov     r4, r0
257
        mov     #0x40, r1       ! set bit 14 in r0 to imply 2 way.
258
        shll8   r1
259
        or      r1, r0
260
        .rept   32
261
        mov.l   r3,@r0
262
        mov.l   r3,@(32,r0)
263
        mov.l   r3,@(r0,r6)
264
        mov.l   r3,@(r0,r7)
265
        add     r2,r3
266
        add     r2,r0
267
        .endr
268
#endif
269
        nop
270
        nop
271
        nop
272
        nop
273
        nop
274
        nop
275
        nop
276
        rts
277
        nop
278
 
279
 
280
ENTRY(__flush_dcache_all)
281
        mov.l   2f,r0
282
        mov.l   3f,r4
283
        and     r0,r4           ! r4 = (unsigned long)&empty_zero_page[0] & ~0xffffc000
284
        stc     sr,r1           ! save SR
285
        mov.l   4f,r2
286
        or      r1,r2
287
        mov     #32,r3
288
        shll2   r3
289
 
290
! TODO : make this be dynamically selected based on CPU probing rather than assembled-in
291
 
292
#if defined (CONFIG_SH_CACHE_ASSOC)
293
        mov     #0x40, r5
294
        shll8   r5
295
        add     r4, r5          ! r5 = r4 + 16k
296
1:
297
        ldc     r2,sr           ! set BL bit
298
        movca.l r0,@r4
299
        movca.l r0,@r5
300
        ocbi    @r4
301
        add     #32,r4
302
        ocbi    @r5
303
        add     #32,r5
304
        movca.l r0,@r4
305
        movca.l r0,@r5
306
        ocbi    @r4
307
        add     #32,r4
308
        ocbi    @r5
309
        add     #32,r5
310
        movca.l r0,@r4
311
        movca.l r0,@r5
312
        ocbi    @r4
313
        add     #32,r4
314
        ocbi    @r5
315
        add     #32,r5
316
        movca.l r0,@r4
317
        movca.l r0,@r5
318
        ocbi    @r4
319
        add     #32, r4
320
        ocbi    @r5
321
        ldc     r1,sr           ! restore SR
322
        dt      r3
323
        bf/s    1b
324
         add    #32,r5
325
 
326
        rts
327
         nop
328
#else
329
1:
330
        ldc     r2,sr           ! set BL bit
331
        movca.l r0,@r4
332
        ocbi    @r4
333
        add     #32,r4
334
        movca.l r0,@r4
335
        ocbi    @r4
336
        add     #32,r4
337
        movca.l r0,@r4
338
        ocbi    @r4
339
        add     #32,r4
340
        movca.l r0,@r4
341
        ocbi    @r4
342
        ldc     r1,sr           ! restore SR
343
        dt      r3
344
        bf/s    1b
345
         add    #32,r4
346
 
347
        rts
348
         nop
349
#endif /* CONFIG_SH_CACHE_ASSOC */
350
 
351
        .align  2
352
2:      .long   0xffffc000
353
 
354
3:      .long   SYMBOL_NAME(empty_zero_page)
355
4:      .long   0x10000000      ! BL bit
356
 
357
/* flush_cache_4096_all(unsigned long addr) */
358
ENTRY(flush_cache_4096_all)
359
        mov.l   2f,r0
360
        mov.l   3f,r2
361
        and     r0,r2
362
        or      r2,r4           ! r4 = addr | (unsigned long)&empty_zero_page[0] & ~0x3fff
363
        stc     sr,r1           ! save SR
364
        mov.l   4f,r2
365
        or      r1,r2
366
        mov     #32,r3
367
! TODO : make this be dynamically selected based on CPU probing rather than assembled-in
368
 
369
#if defined (CONFIG_SH_CACHE_ASSOC)
370
        mov     #0x40, r5
371
        shll8   r5
372
        add     r4, r5          ! r5 = r4 + 16k
373
1:
374
        ldc     r2,sr           ! set BL bit
375
        movca.l r0,@r4
376
        movca.l r0,@r5
377
        ocbi    @r4
378
        add     #32,r4
379
        ocbi    @r5
380
        add     #32,r5
381
        movca.l r0,@r4
382
        movca.l r0,@r5
383
        ocbi    @r4
384
        add     #32,r4
385
        ocbi    @r5
386
        add     #32,r5
387
        movca.l r0,@r4
388
        movca.l r0,@r5
389
        ocbi    @r4
390
        add     #32,r4
391
        ocbi    @r5
392
        add     #32,r5
393
        movca.l r0,@r4
394
        movca.l r0,@r5
395
        ocbi    @r4
396
        add     #32,r4
397
        ocbi    @r5
398
 
399
        ldc     r1,sr           ! restore SR
400
        dt      r3
401
        bf/s    1b
402
        add     #32,r5
403
 
404
        rts
405
        nop
406
#else
407
1:
408
        ldc     r2,sr           ! set BL bit
409
        movca.l r0,@r4
410
        ocbi    @r4
411
        add     #32,r4
412
 
413
        movca.l r0,@r4
414
        ocbi    @r4
415
        add     #32,r4
416
        movca.l r0,@r4
417
        ocbi    @r4
418
        add     #32,r4
419
        movca.l r0,@r4
420
        ocbi    @r4
421
 
422
        ldc     r1,sr           ! restore SR
423
        dt      r3
424
        bf/s    1b
425
        add     #32,r4
426
 
427
        rts
428
        nop
429
#endif
430
 
431
        .align  2
432
2:      .long   0xffffc000
433
3:      .long   SYMBOL_NAME(empty_zero_page)
434
4:      .long   0x10000000      ! BL bit
435
 
436
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.