OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-mips64/] [r4kcache.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * This file is subject to the terms and conditions of the GNU General Public
3
 * License.  See the file "COPYING" in the main directory of this archive
4
 * for more details.
5
 *
6
 * Inline assembly cache operations.
7
 *
8
 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
9
 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10
 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11
 */
12
#ifndef __ASM_R4KCACHE_H
13
#define __ASM_R4KCACHE_H
14
 
15
#include <asm/asm.h>
16
#include <asm/cacheops.h>
17
 
18
#define cache_op(op,addr)                                               \
19
        __asm__ __volatile__(                                           \
20
        "       .set    noreorder                               \n"     \
21
        "       .set    mips3\n\t                               \n"     \
22
        "       cache   %0, %1                                  \n"     \
23
        "       .set    mips0                                   \n"     \
24
        "       .set    reorder"                                        \
25
        :                                                               \
26
        : "i" (op), "m" (*(unsigned char *)(addr)))
27
 
28
static inline void flush_icache_line_indexed(unsigned long addr)
29
{
30
        cache_op(Index_Invalidate_I, addr);
31
}
32
 
33
static inline void flush_dcache_line_indexed(unsigned long addr)
34
{
35
        cache_op(Index_Writeback_Inv_D, addr);
36
}
37
 
38
static inline void flush_scache_line_indexed(unsigned long addr)
39
{
40
        cache_op(Index_Writeback_Inv_SD, addr);
41
}
42
 
43
static inline void flush_icache_line(unsigned long addr)
44
{
45
        cache_op(Hit_Invalidate_I, addr);
46
}
47
 
48
static inline void flush_dcache_line(unsigned long addr)
49
{
50
        cache_op(Hit_Writeback_Inv_D, addr);
51
}
52
 
53
static inline void invalidate_dcache_line(unsigned long addr)
54
{
55
        cache_op(Hit_Invalidate_D, addr);
56
}
57
 
58
static inline void invalidate_scache_line(unsigned long addr)
59
{
60
        cache_op(Hit_Invalidate_SD, addr);
61
}
62
 
63
static inline void flush_scache_line(unsigned long addr)
64
{
65
        cache_op(Hit_Writeback_Inv_SD, addr);
66
}
67
 
68
/*
69
 * The next two are for badland addresses like signal trampolines.
70
 */
71
static inline void protected_flush_icache_line(unsigned long addr)
72
{
73
        __asm__ __volatile__(
74
                ".set noreorder\n\t"
75
                ".set mips3\n"
76
                "1:\tcache %0,(%1)\n"
77
                "2:\t.set mips0\n\t"
78
                ".set reorder\n\t"
79
                ".section\t__ex_table,\"a\"\n\t"
80
                STR(PTR)"\t1b,2b\n\t"
81
                ".previous"
82
                :
83
                : "i" (Hit_Invalidate_I), "r" (addr));
84
}
85
 
86
/*
87
 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
88
 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
89
 * caches.  We're talking about one cacheline unnecessarily getting invalidated
90
 * here so the penaltiy isn't overly hard.
91
 */
92
static inline void protected_writeback_dcache_line(unsigned long addr)
93
{
94
        __asm__ __volatile__(
95
                ".set noreorder\n\t"
96
                ".set mips3\n"
97
                "1:\tcache %0,(%1)\n"
98
                "2:\t.set mips0\n\t"
99
                ".set reorder\n\t"
100
                ".section\t__ex_table,\"a\"\n\t"
101
                STR(PTR)"\t1b,2b\n\t"
102
                ".previous"
103
                :
104
                : "i" (Hit_Writeback_Inv_D), "r" (addr));
105
}
106
 
107
/*
108
 * This one is RM7000-specific
109
 */
110
static inline void invalidate_tcache_page(unsigned long addr)
111
{
112
        cache_op(Page_Invalidate_T, addr);
113
}
114
 
115
#define cache16_unroll32(base,op)                                       \
116
        __asm__ __volatile__(                                           \
117
        "       .set noreorder                                  \n"     \
118
        "       .set mips3                                      \n"     \
119
        "       cache %1, 0x000(%0); cache %1, 0x010(%0)        \n"     \
120
        "       cache %1, 0x020(%0); cache %1, 0x030(%0)        \n"     \
121
        "       cache %1, 0x040(%0); cache %1, 0x050(%0)        \n"     \
122
        "       cache %1, 0x060(%0); cache %1, 0x070(%0)        \n"     \
123
        "       cache %1, 0x080(%0); cache %1, 0x090(%0)        \n"     \
124
        "       cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)        \n"     \
125
        "       cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)        \n"     \
126
        "       cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)        \n"     \
127
        "       cache %1, 0x100(%0); cache %1, 0x110(%0)        \n"     \
128
        "       cache %1, 0x120(%0); cache %1, 0x130(%0)        \n"     \
129
        "       cache %1, 0x140(%0); cache %1, 0x150(%0)        \n"     \
130
        "       cache %1, 0x160(%0); cache %1, 0x170(%0)        \n"     \
131
        "       cache %1, 0x180(%0); cache %1, 0x190(%0)        \n"     \
132
        "       cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)        \n"     \
133
        "       cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)        \n"     \
134
        "       cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)        \n"     \
135
        "       .set mips0                                      \n"     \
136
        "       .set reorder                                    \n"     \
137
                :                                                       \
138
                : "r" (base),                                           \
139
                  "i" (op));
140
 
141
static inline void blast_dcache16(void)
142
{
143
        unsigned long start = KSEG0;
144
        unsigned long end = start + current_cpu_data.dcache.waysize;
145
        unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
146
        unsigned long ws_end = current_cpu_data.dcache.ways <<
147
                               current_cpu_data.dcache.waybit;
148
        unsigned long ws, addr;
149
 
150
        for (ws = 0; ws < ws_end; ws += ws_inc)
151
                for (addr = start; addr < end; addr += 0x200)
152
                        cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
153
}
154
 
155
static inline void blast_dcache16_page(unsigned long page)
156
{
157
        unsigned long start = page;
158
        unsigned long end = start + PAGE_SIZE;
159
 
160
        do {
161
                cache16_unroll32(start,Hit_Writeback_Inv_D);
162
                start += 0x200;
163
        } while (start < end);
164
}
165
 
166
static inline void blast_dcache16_page_indexed(unsigned long page)
167
{
168
        unsigned long start = page;
169
        unsigned long end = start + PAGE_SIZE;
170
        unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
171
        unsigned long ws_end = current_cpu_data.dcache.ways <<
172
                               current_cpu_data.dcache.waybit;
173
        unsigned long ws, addr;
174
 
175
        for (ws = 0; ws < ws_end; ws += ws_inc)
176
                for (addr = start; addr < end; addr += 0x200)
177
                        cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
178
}
179
 
180
static inline void blast_icache16(void)
181
{
182
        unsigned long start = KSEG0;
183
        unsigned long end = start + current_cpu_data.icache.waysize;
184
        unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
185
        unsigned long ws_end = current_cpu_data.icache.ways <<
186
                               current_cpu_data.icache.waybit;
187
        unsigned long ws, addr;
188
 
189
        for (ws = 0; ws < ws_end; ws += ws_inc)
190
                for (addr = start; addr < end; addr += 0x200)
191
                        cache16_unroll32(addr|ws,Index_Invalidate_I);
192
}
193
 
194
static inline void blast_icache16_page(unsigned long page)
195
{
196
        unsigned long start = page;
197
        unsigned long end = start + PAGE_SIZE;
198
 
199
        do {
200
                cache16_unroll32(start,Hit_Invalidate_I);
201
                start += 0x200;
202
        } while (start < end);
203
}
204
 
205
static inline void blast_icache16_page_indexed(unsigned long page)
206
{
207
        unsigned long start = page;
208
        unsigned long end = start + PAGE_SIZE;
209
        unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
210
        unsigned long ws_end = current_cpu_data.icache.ways <<
211
                               current_cpu_data.icache.waybit;
212
        unsigned long ws, addr;
213
 
214
        for (ws = 0; ws < ws_end; ws += ws_inc)
215
                for (addr = start; addr < end; addr += 0x200)
216
                        cache16_unroll32(addr|ws,Index_Invalidate_I);
217
}
218
 
219
static inline void blast_scache16(void)
220
{
221
        unsigned long start = KSEG0;
222
        unsigned long end = start + current_cpu_data.scache.waysize;
223
        unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
224
        unsigned long ws_end = current_cpu_data.scache.ways <<
225
                               current_cpu_data.scache.waybit;
226
        unsigned long ws, addr;
227
 
228
        for (ws = 0; ws < ws_end; ws += ws_inc)
229
                for (addr = start; addr < end; addr += 0x200)
230
                        cache16_unroll32(addr|ws,Index_Writeback_Inv_SD);
231
}
232
 
233
static inline void blast_scache16_page(unsigned long page)
234
{
235
        unsigned long start = page;
236
        unsigned long end = page + PAGE_SIZE;
237
 
238
        do {
239
                cache16_unroll32(start,Hit_Writeback_Inv_SD);
240
                start += 0x200;
241
        } while (start < end);
242
}
243
 
244
static inline void blast_scache16_page_indexed(unsigned long page)
245
{
246
        unsigned long start = page;
247
        unsigned long end = start + PAGE_SIZE;
248
        unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
249
        unsigned long ws_end = current_cpu_data.scache.ways <<
250
                               current_cpu_data.scache.waybit;
251
        unsigned long ws, addr;
252
 
253
        for (ws = 0; ws < ws_end; ws += ws_inc)
254
                for (addr = start; addr < end; addr += 0x200)
255
                        cache16_unroll32(addr|ws,Index_Writeback_Inv_SD);
256
}
257
 
258
#define cache32_unroll32(base,op)                                       \
259
        __asm__ __volatile__(                                           \
260
        "       .set noreorder                                  \n"     \
261
        "       .set mips3                                      \n"     \
262
        "       cache %1, 0x000(%0); cache %1, 0x020(%0)        \n"     \
263
        "       cache %1, 0x040(%0); cache %1, 0x060(%0)        \n"     \
264
        "       cache %1, 0x080(%0); cache %1, 0x0a0(%0)        \n"     \
265
        "       cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)        \n"     \
266
        "       cache %1, 0x100(%0); cache %1, 0x120(%0)        \n"     \
267
        "       cache %1, 0x140(%0); cache %1, 0x160(%0)        \n"     \
268
        "       cache %1, 0x180(%0); cache %1, 0x1a0(%0)        \n"     \
269
        "       cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)        \n"     \
270
        "       cache %1, 0x200(%0); cache %1, 0x220(%0)        \n"     \
271
        "       cache %1, 0x240(%0); cache %1, 0x260(%0)        \n"     \
272
        "       cache %1, 0x280(%0); cache %1, 0x2a0(%0)        \n"     \
273
        "       cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)        \n"     \
274
        "       cache %1, 0x300(%0); cache %1, 0x320(%0)        \n"     \
275
        "       cache %1, 0x340(%0); cache %1, 0x360(%0)        \n"     \
276
        "       cache %1, 0x380(%0); cache %1, 0x3a0(%0)        \n"     \
277
        "       cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)        \n"     \
278
        "       .set mips0                                      \n"     \
279
        "       .set reorder                                    \n"     \
280
                :                                                       \
281
                : "r" (base),                                           \
282
                  "i" (op));
283
 
284
static inline void blast_dcache32(void)
285
{
286
        unsigned long start = KSEG0;
287
        unsigned long end = start + current_cpu_data.dcache.waysize;
288
        unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
289
        unsigned long ws_end = current_cpu_data.dcache.ways <<
290
                               current_cpu_data.dcache.waybit;
291
        unsigned long ws, addr;
292
 
293
        for (ws = 0; ws < ws_end; ws += ws_inc)
294
                for (addr = start; addr < end; addr += 0x400)
295
                        cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
296
}
297
 
298
static inline void blast_dcache32_page(unsigned long page)
299
{
300
        unsigned long start = page;
301
        unsigned long end = start + PAGE_SIZE;
302
 
303
        do {
304
                cache32_unroll32(start,Hit_Writeback_Inv_D);
305
                start += 0x400;
306
        } while (start < end);
307
}
308
 
309
static inline void blast_dcache32_page_indexed(unsigned long page)
310
{
311
        unsigned long start = page;
312
        unsigned long end = start + PAGE_SIZE;
313
        unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
314
        unsigned long ws_end = current_cpu_data.dcache.ways <<
315
                               current_cpu_data.dcache.waybit;
316
        unsigned long ws, addr;
317
 
318
        for (ws = 0; ws < ws_end; ws += ws_inc)
319
                for (addr = start; addr < end; addr += 0x400)
320
                        cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
321
}
322
 
323
static inline void blast_icache32(void)
324
{
325
        unsigned long start = KSEG0;
326
        unsigned long end = start + current_cpu_data.icache.waysize;
327
        unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
328
        unsigned long ws_end = current_cpu_data.icache.ways <<
329
                               current_cpu_data.icache.waybit;
330
        unsigned long ws, addr;
331
 
332
        for (ws = 0; ws < ws_end; ws += ws_inc)
333
                for (addr = start; addr < end; addr += 0x400)
334
                        cache32_unroll32(addr|ws,Index_Invalidate_I);
335
}
336
 
337
static inline void blast_icache32_page(unsigned long page)
338
{
339
        unsigned long start = page;
340
        unsigned long end = start + PAGE_SIZE;
341
 
342
        do {
343
                cache32_unroll32(start,Hit_Invalidate_I);
344
                start += 0x400;
345
        } while (start < end);
346
}
347
 
348
static inline void blast_icache32_page_indexed(unsigned long page)
349
{
350
        unsigned long start = page;
351
        unsigned long end = start + PAGE_SIZE;
352
        unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
353
        unsigned long ws_end = current_cpu_data.icache.ways <<
354
                               current_cpu_data.icache.waybit;
355
        unsigned long ws, addr;
356
 
357
        for (ws = 0; ws < ws_end; ws += ws_inc)
358
                for (addr = start; addr < end; addr += 0x400)
359
                        cache32_unroll32(addr|ws,Index_Invalidate_I);
360
}
361
 
362
static inline void blast_scache32(void)
363
{
364
        unsigned long start = KSEG0;
365
        unsigned long end = start + current_cpu_data.scache.waysize;
366
        unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
367
        unsigned long ws_end = current_cpu_data.scache.ways <<
368
                               current_cpu_data.scache.waybit;
369
        unsigned long ws, addr;
370
 
371
        for (ws = 0; ws < ws_end; ws += ws_inc)
372
                for (addr = start; addr < end; addr += 0x400)
373
                        cache32_unroll32(addr|ws,Index_Writeback_Inv_SD);
374
}
375
 
376
static inline void blast_scache32_page(unsigned long page)
377
{
378
        unsigned long start = page;
379
        unsigned long end = page + PAGE_SIZE;
380
 
381
        do {
382
                cache32_unroll32(start,Hit_Writeback_Inv_SD);
383
                start += 0x400;
384
        } while (start < end);
385
}
386
 
387
static inline void blast_scache32_page_indexed(unsigned long page)
388
{
389
        unsigned long start = page;
390
        unsigned long end = start + PAGE_SIZE;
391
        unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
392
        unsigned long ws_end = current_cpu_data.scache.ways <<
393
                               current_cpu_data.scache.waybit;
394
        unsigned long ws, addr;
395
 
396
        for (ws = 0; ws < ws_end; ws += ws_inc)
397
                for (addr = start; addr < end; addr += 0x400)
398
                        cache32_unroll32(addr|ws,Index_Writeback_Inv_SD);
399
}
400
 
401
#define cache64_unroll32(base,op)                                       \
402
        __asm__ __volatile__(                                           \
403
        "       .set noreorder                                  \n"     \
404
        "       .set mips3                                      \n"     \
405
        "       cache %1, 0x000(%0); cache %1, 0x040(%0)        \n"     \
406
        "       cache %1, 0x080(%0); cache %1, 0x0c0(%0)        \n"     \
407
        "       cache %1, 0x100(%0); cache %1, 0x140(%0)        \n"     \
408
        "       cache %1, 0x180(%0); cache %1, 0x1c0(%0)        \n"     \
409
        "       cache %1, 0x200(%0); cache %1, 0x240(%0)        \n"     \
410
        "       cache %1, 0x280(%0); cache %1, 0x2c0(%0)        \n"     \
411
        "       cache %1, 0x300(%0); cache %1, 0x340(%0)        \n"     \
412
        "       cache %1, 0x380(%0); cache %1, 0x3c0(%0)        \n"     \
413
        "       cache %1, 0x400(%0); cache %1, 0x440(%0)        \n"     \
414
        "       cache %1, 0x480(%0); cache %1, 0x4c0(%0)        \n"     \
415
        "       cache %1, 0x500(%0); cache %1, 0x540(%0)        \n"     \
416
        "       cache %1, 0x580(%0); cache %1, 0x5c0(%0)        \n"     \
417
        "       cache %1, 0x600(%0); cache %1, 0x640(%0)        \n"     \
418
        "       cache %1, 0x680(%0); cache %1, 0x6c0(%0)        \n"     \
419
        "       cache %1, 0x700(%0); cache %1, 0x740(%0)        \n"     \
420
        "       cache %1, 0x780(%0); cache %1, 0x7c0(%0)        \n"     \
421
        "       .set mips0                                      \n"     \
422
        "       .set reorder                                    \n"     \
423
                :                                                       \
424
                : "r" (base),                                           \
425
                  "i" (op));
426
 
427
static inline void blast_icache64(void)
428
{
429
        unsigned long start = KSEG0;
430
        unsigned long end = start + current_cpu_data.icache.waysize;
431
        unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
432
        unsigned long ws_end = current_cpu_data.icache.ways <<
433
                               current_cpu_data.icache.waybit;
434
        unsigned long ws, addr;
435
 
436
        for (ws = 0; ws < ws_end; ws += ws_inc)
437
                for (addr = start; addr < end; addr += 0x800)
438
                        cache64_unroll32(addr|ws,Index_Invalidate_I);
439
}
440
 
441
static inline void blast_icache64_page(unsigned long page)
442
{
443
        unsigned long start = page;
444
        unsigned long end = start + PAGE_SIZE;
445
 
446
        do {
447
                cache64_unroll32(start,Hit_Invalidate_I);
448
                start += 0x800;
449
        } while (start < end);
450
}
451
 
452
static inline void blast_icache64_page_indexed(unsigned long page)
453
{
454
        unsigned long start = page;
455
        unsigned long end = start + PAGE_SIZE;
456
        unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
457
        unsigned long ws_end = current_cpu_data.icache.ways <<
458
                               current_cpu_data.icache.waybit;
459
        unsigned long ws, addr;
460
 
461
        for (ws = 0; ws < ws_end; ws += ws_inc)
462
                for (addr = start; addr < end; addr += 0x800)
463
                        cache64_unroll32(addr|ws,Index_Invalidate_I);
464
}
465
 
466
static inline void blast_scache64(void)
467
{
468
        unsigned long start = KSEG0;
469
        unsigned long end = start + current_cpu_data.scache.waysize;
470
        unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
471
        unsigned long ws_end = current_cpu_data.scache.ways <<
472
                               current_cpu_data.scache.waybit;
473
        unsigned long ws, addr;
474
 
475
        for (ws = 0; ws < ws_end; ws += ws_inc)
476
                for (addr = start; addr < end; addr += 0x800)
477
                        cache64_unroll32(addr|ws,Index_Writeback_Inv_SD);
478
}
479
 
480
static inline void blast_scache64_page(unsigned long page)
481
{
482
        unsigned long start = page;
483
        unsigned long end = page + PAGE_SIZE;
484
 
485
        do {
486
                cache64_unroll32(start,Hit_Writeback_Inv_SD);
487
                start += 0x800;
488
        } while (start < end);
489
}
490
 
491
static inline void blast_scache64_page_indexed(unsigned long page)
492
{
493
        unsigned long start = page;
494
        unsigned long end = start + PAGE_SIZE;
495
        unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
496
        unsigned long ws_end = current_cpu_data.scache.ways <<
497
                               current_cpu_data.scache.waybit;
498
        unsigned long ws, addr;
499
 
500
        for (ws = 0; ws < ws_end; ws += ws_inc)
501
                for (addr = start; addr < end; addr += 0x800)
502
                        cache64_unroll32(addr|ws,Index_Writeback_Inv_SD);
503
}
504
 
505
#define cache128_unroll32(base,op)                                      \
506
        __asm__ __volatile__(                                           \
507
        "       .set noreorder                                  \n"     \
508
        "       .set mips3                                      \n"     \
509
        "       cache %1, 0x000(%0); cache %1, 0x080(%0)        \n"     \
510
        "       cache %1, 0x100(%0); cache %1, 0x180(%0)        \n"     \
511
        "       cache %1, 0x200(%0); cache %1, 0x280(%0)        \n"     \
512
        "       cache %1, 0x300(%0); cache %1, 0x380(%0)        \n"     \
513
        "       cache %1, 0x400(%0); cache %1, 0x480(%0)        \n"     \
514
        "       cache %1, 0x500(%0); cache %1, 0x580(%0)        \n"     \
515
        "       cache %1, 0x600(%0); cache %1, 0x680(%0)        \n"     \
516
        "       cache %1, 0x700(%0); cache %1, 0x780(%0)        \n"     \
517
        "       cache %1, 0x800(%0); cache %1, 0x880(%0)        \n"     \
518
        "       cache %1, 0x900(%0); cache %1, 0x980(%0)        \n"     \
519
        "       cache %1, 0xa00(%0); cache %1, 0xa80(%0)        \n"     \
520
        "       cache %1, 0xb00(%0); cache %1, 0xb80(%0)        \n"     \
521
        "       cache %1, 0xc00(%0); cache %1, 0xc80(%0)        \n"     \
522
        "       cache %1, 0xd00(%0); cache %1, 0xd80(%0)        \n"     \
523
        "       cache %1, 0xe00(%0); cache %1, 0xe80(%0)        \n"     \
524
        "       cache %1, 0xf00(%0); cache %1, 0xf80(%0)        \n"     \
525
        "       .set mips0                                      \n"     \
526
        "       .set reorder                                    \n"     \
527
                :                                                       \
528
                : "r" (base),                                           \
529
                  "i" (op));
530
 
531
static inline void blast_scache128(void)
532
{
533
        unsigned long start = KSEG0;
534
        unsigned long end = start + current_cpu_data.scache.waysize;
535
        unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
536
        unsigned long ws_end = current_cpu_data.scache.ways <<
537
                               current_cpu_data.scache.waybit;
538
        unsigned long ws, addr;
539
 
540
        for (ws = 0; ws < ws_end; ws += ws_inc)
541
                for (addr = start; addr < end; addr += 0x1000)
542
                        cache128_unroll32(addr|ws,Index_Writeback_Inv_SD);
543
}
544
 
545
static inline void blast_scache128_page(unsigned long page)
546
{
547
        unsigned long start = page;
548
        unsigned long end = page + PAGE_SIZE;
549
 
550
        do {
551
                cache128_unroll32(start,Hit_Writeback_Inv_SD);
552
                start += 0x1000;
553
        } while (start < end);
554
}
555
 
556
static inline void blast_scache128_page_indexed(unsigned long page)
557
{
558
        unsigned long start = page;
559
        unsigned long end = start + PAGE_SIZE;
560
        unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
561
        unsigned long ws_end = current_cpu_data.scache.ways <<
562
                               current_cpu_data.scache.waybit;
563
        unsigned long ws, addr;
564
 
565
        for (ws = 0; ws < ws_end; ws += ws_inc)
566
                for (addr = start; addr < end; addr += 0x1000)
567
                        cache128_unroll32(addr|ws,Index_Writeback_Inv_SD);
568
}
569
 
570
#endif /* __ASM_R4KCACHE_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.