OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [sparc/] [mm/] [sun4c.c] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/* sun4c.c: Doing in software what should be done in hardware.
2
 *
3
 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
4
 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
5
 * Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au)
6
 * Copyright (C) 1997-2000 Anton Blanchard (anton@samba.org)
7
 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8
 */
9
 
10
#define NR_TASK_BUCKETS 512
11
 
12
#include <linux/kernel.h>
13
#include <linux/mm.h>
14
#include <linux/init.h>
15
#include <linux/bootmem.h>
16
#include <linux/highmem.h>
17
#include <linux/fs.h>
18
#include <linux/seq_file.h>
19
#include <linux/scatterlist.h>
20
 
21
#include <asm/page.h>
22
#include <asm/pgalloc.h>
23
#include <asm/pgtable.h>
24
#include <asm/vaddrs.h>
25
#include <asm/idprom.h>
26
#include <asm/machines.h>
27
#include <asm/memreg.h>
28
#include <asm/processor.h>
29
#include <asm/auxio.h>
30
#include <asm/io.h>
31
#include <asm/oplib.h>
32
#include <asm/openprom.h>
33
#include <asm/mmu_context.h>
34
#include <asm/sun4paddr.h>
35
#include <asm/highmem.h>
36
#include <asm/btfixup.h>
37
#include <asm/cacheflush.h>
38
#include <asm/tlbflush.h>
39
 
40
/* Because of our dynamic kernel TLB miss strategy, and how
41
 * our DVMA mapping allocation works, you _MUST_:
42
 *
43
 * 1) Disable interrupts _and_ not touch any dynamic kernel
44
 *    memory while messing with kernel MMU state.  By
45
 *    dynamic memory I mean any object which is not in
46
 *    the kernel image itself or a thread_union (both of
47
 *    which are locked into the MMU).
48
 * 2) Disable interrupts while messing with user MMU state.
49
 */
50
 
51
extern int num_segmaps, num_contexts;
52
 
53
extern unsigned long page_kernel;
54
 
55
#ifdef CONFIG_SUN4
56
#define SUN4C_VAC_SIZE sun4c_vacinfo.num_bytes
57
#else
58
/* That's it, we prom_halt() on sun4c if the cache size is something other than 65536.
59
 * So let's save some cycles and just use that everywhere except for that bootup
60
 * sanity check.
61
 */
62
#define SUN4C_VAC_SIZE 65536
63
#endif
64
 
65
#define SUN4C_KERNEL_BUCKETS 32
66
 
67
/* Flushing the cache. */
68
struct sun4c_vac_props sun4c_vacinfo;
69
unsigned long sun4c_kernel_faults;
70
 
71
/* Invalidate every sun4c cache line tag. */
72
static void __init sun4c_flush_all(void)
73
{
74
        unsigned long begin, end;
75
 
76
        if (sun4c_vacinfo.on)
77
                panic("SUN4C: AIEEE, trying to invalidate vac while it is on.");
78
 
79
        /* Clear 'valid' bit in all cache line tags */
80
        begin = AC_CACHETAGS;
81
        end = (AC_CACHETAGS + SUN4C_VAC_SIZE);
82
        while (begin < end) {
83
                __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
84
                                     "r" (begin), "i" (ASI_CONTROL));
85
                begin += sun4c_vacinfo.linesize;
86
        }
87
}
88
 
89
static void sun4c_flush_context_hw(void)
90
{
91
        unsigned long end = SUN4C_VAC_SIZE;
92
 
93
        __asm__ __volatile__(
94
                "1:     addcc   %0, -4096, %0\n\t"
95
                "       bne     1b\n\t"
96
                "        sta    %%g0, [%0] %2"
97
        : "=&r" (end)
98
        : "0" (end), "i" (ASI_HWFLUSHCONTEXT)
99
        : "cc");
100
}
101
 
102
/* Must be called minimally with IRQs disabled. */
103
static void sun4c_flush_segment_hw(unsigned long addr)
104
{
105
        if (sun4c_get_segmap(addr) != invalid_segment) {
106
                unsigned long vac_size = SUN4C_VAC_SIZE;
107
 
108
                __asm__ __volatile__(
109
                        "1:     addcc   %0, -4096, %0\n\t"
110
                        "       bne     1b\n\t"
111
                        "        sta    %%g0, [%2 + %0] %3"
112
                        : "=&r" (vac_size)
113
                        : "0" (vac_size), "r" (addr), "i" (ASI_HWFLUSHSEG)
114
                        : "cc");
115
        }
116
}
117
 
118
/* File local boot time fixups. */
119
BTFIXUPDEF_CALL(void, sun4c_flush_page, unsigned long)
120
BTFIXUPDEF_CALL(void, sun4c_flush_segment, unsigned long)
121
BTFIXUPDEF_CALL(void, sun4c_flush_context, void)
122
 
123
#define sun4c_flush_page(addr) BTFIXUP_CALL(sun4c_flush_page)(addr)
124
#define sun4c_flush_segment(addr) BTFIXUP_CALL(sun4c_flush_segment)(addr)
125
#define sun4c_flush_context() BTFIXUP_CALL(sun4c_flush_context)()
126
 
127
/* Must be called minimally with interrupts disabled. */
128
static void sun4c_flush_page_hw(unsigned long addr)
129
{
130
        addr &= PAGE_MASK;
131
        if ((int)sun4c_get_pte(addr) < 0)
132
                __asm__ __volatile__("sta %%g0, [%0] %1"
133
                                     : : "r" (addr), "i" (ASI_HWFLUSHPAGE));
134
}
135
 
136
/* Don't inline the software version as it eats too many cache lines if expanded. */
137
static void sun4c_flush_context_sw(void)
138
{
139
        unsigned long nbytes = SUN4C_VAC_SIZE;
140
        unsigned long lsize = sun4c_vacinfo.linesize;
141
 
142
        __asm__ __volatile__(
143
        "add    %2, %2, %%g1\n\t"
144
        "add    %2, %%g1, %%g2\n\t"
145
        "add    %2, %%g2, %%g3\n\t"
146
        "add    %2, %%g3, %%g4\n\t"
147
        "add    %2, %%g4, %%g5\n\t"
148
        "add    %2, %%g5, %%o4\n\t"
149
        "add    %2, %%o4, %%o5\n"
150
        "1:\n\t"
151
        "subcc  %0, %%o5, %0\n\t"
152
        "sta    %%g0, [%0] %3\n\t"
153
        "sta    %%g0, [%0 + %2] %3\n\t"
154
        "sta    %%g0, [%0 + %%g1] %3\n\t"
155
        "sta    %%g0, [%0 + %%g2] %3\n\t"
156
        "sta    %%g0, [%0 + %%g3] %3\n\t"
157
        "sta    %%g0, [%0 + %%g4] %3\n\t"
158
        "sta    %%g0, [%0 + %%g5] %3\n\t"
159
        "bg     1b\n\t"
160
        " sta   %%g0, [%1 + %%o4] %3\n"
161
        : "=&r" (nbytes)
162
        : "0" (nbytes), "r" (lsize), "i" (ASI_FLUSHCTX)
163
        : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
164
}
165
 
166
/* Don't inline the software version as it eats too many cache lines if expanded. */
167
static void sun4c_flush_segment_sw(unsigned long addr)
168
{
169
        if (sun4c_get_segmap(addr) != invalid_segment) {
170
                unsigned long nbytes = SUN4C_VAC_SIZE;
171
                unsigned long lsize = sun4c_vacinfo.linesize;
172
 
173
                __asm__ __volatile__(
174
                "add    %2, %2, %%g1\n\t"
175
                "add    %2, %%g1, %%g2\n\t"
176
                "add    %2, %%g2, %%g3\n\t"
177
                "add    %2, %%g3, %%g4\n\t"
178
                "add    %2, %%g4, %%g5\n\t"
179
                "add    %2, %%g5, %%o4\n\t"
180
                "add    %2, %%o4, %%o5\n"
181
                "1:\n\t"
182
                "subcc  %1, %%o5, %1\n\t"
183
                "sta    %%g0, [%0] %6\n\t"
184
                "sta    %%g0, [%0 + %2] %6\n\t"
185
                "sta    %%g0, [%0 + %%g1] %6\n\t"
186
                "sta    %%g0, [%0 + %%g2] %6\n\t"
187
                "sta    %%g0, [%0 + %%g3] %6\n\t"
188
                "sta    %%g0, [%0 + %%g4] %6\n\t"
189
                "sta    %%g0, [%0 + %%g5] %6\n\t"
190
                "sta    %%g0, [%0 + %%o4] %6\n\t"
191
                "bg     1b\n\t"
192
                " add   %0, %%o5, %0\n"
193
                : "=&r" (addr), "=&r" (nbytes), "=&r" (lsize)
194
                : "0" (addr), "1" (nbytes), "2" (lsize),
195
                  "i" (ASI_FLUSHSEG)
196
                : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
197
        }
198
}
199
 
200
/* Don't inline the software version as it eats too many cache lines if expanded. */
201
static void sun4c_flush_page_sw(unsigned long addr)
202
{
203
        addr &= PAGE_MASK;
204
        if ((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) ==
205
            _SUN4C_PAGE_VALID) {
206
                unsigned long left = PAGE_SIZE;
207
                unsigned long lsize = sun4c_vacinfo.linesize;
208
 
209
                __asm__ __volatile__(
210
                "add    %2, %2, %%g1\n\t"
211
                "add    %2, %%g1, %%g2\n\t"
212
                "add    %2, %%g2, %%g3\n\t"
213
                "add    %2, %%g3, %%g4\n\t"
214
                "add    %2, %%g4, %%g5\n\t"
215
                "add    %2, %%g5, %%o4\n\t"
216
                "add    %2, %%o4, %%o5\n"
217
                "1:\n\t"
218
                "subcc  %1, %%o5, %1\n\t"
219
                "sta    %%g0, [%0] %6\n\t"
220
                "sta    %%g0, [%0 + %2] %6\n\t"
221
                "sta    %%g0, [%0 + %%g1] %6\n\t"
222
                "sta    %%g0, [%0 + %%g2] %6\n\t"
223
                "sta    %%g0, [%0 + %%g3] %6\n\t"
224
                "sta    %%g0, [%0 + %%g4] %6\n\t"
225
                "sta    %%g0, [%0 + %%g5] %6\n\t"
226
                "sta    %%g0, [%0 + %%o4] %6\n\t"
227
                "bg     1b\n\t"
228
                " add   %0, %%o5, %0\n"
229
                : "=&r" (addr), "=&r" (left), "=&r" (lsize)
230
                : "0" (addr), "1" (left), "2" (lsize),
231
                  "i" (ASI_FLUSHPG)
232
                : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
233
        }
234
}
235
 
236
/* The sun4c's do have an on chip store buffer.  And the way you
237
 * clear them out isn't so obvious.  The only way I can think of
238
 * to accomplish this is to read the current context register,
239
 * store the same value there, then read an external hardware
240
 * register.
241
 */
242
void sun4c_complete_all_stores(void)
243
{
244
        volatile int _unused;
245
 
246
        _unused = sun4c_get_context();
247
        sun4c_set_context(_unused);
248
#ifdef CONFIG_SUN_AUXIO
249
        _unused = get_auxio();
250
#endif
251
}
252
 
253
/* Bootup utility functions. */
254
static inline void sun4c_init_clean_segmap(unsigned char pseg)
255
{
256
        unsigned long vaddr;
257
 
258
        sun4c_put_segmap(0, pseg);
259
        for (vaddr = 0; vaddr < SUN4C_REAL_PGDIR_SIZE; vaddr += PAGE_SIZE)
260
                sun4c_put_pte(vaddr, 0);
261
        sun4c_put_segmap(0, invalid_segment);
262
}
263
 
264
static inline void sun4c_init_clean_mmu(unsigned long kernel_end)
265
{
266
        unsigned long vaddr;
267
        unsigned char savectx, ctx;
268
 
269
        savectx = sun4c_get_context();
270
        for (ctx = 0; ctx < num_contexts; ctx++) {
271
                sun4c_set_context(ctx);
272
                for (vaddr = 0; vaddr < 0x20000000; vaddr += SUN4C_REAL_PGDIR_SIZE)
273
                        sun4c_put_segmap(vaddr, invalid_segment);
274
                for (vaddr = 0xe0000000; vaddr < KERNBASE; vaddr += SUN4C_REAL_PGDIR_SIZE)
275
                        sun4c_put_segmap(vaddr, invalid_segment);
276
                for (vaddr = kernel_end; vaddr < KADB_DEBUGGER_BEGVM; vaddr += SUN4C_REAL_PGDIR_SIZE)
277
                        sun4c_put_segmap(vaddr, invalid_segment);
278
                for (vaddr = LINUX_OPPROM_ENDVM; vaddr; vaddr += SUN4C_REAL_PGDIR_SIZE)
279
                        sun4c_put_segmap(vaddr, invalid_segment);
280
        }
281
        sun4c_set_context(savectx);
282
}
283
 
284
void __init sun4c_probe_vac(void)
285
{
286
        sun4c_disable_vac();
287
 
288
        if (ARCH_SUN4) {
289
                switch (idprom->id_machtype) {
290
 
291
                case (SM_SUN4|SM_4_110):
292
                        sun4c_vacinfo.type = VAC_NONE;
293
                        sun4c_vacinfo.num_bytes = 0;
294
                        sun4c_vacinfo.linesize = 0;
295
                        sun4c_vacinfo.do_hwflushes = 0;
296
                        prom_printf("No VAC. Get some bucks and buy a real computer.");
297
                        prom_halt();
298
                        break;
299
 
300
                case (SM_SUN4|SM_4_260):
301
                        sun4c_vacinfo.type = VAC_WRITE_BACK;
302
                        sun4c_vacinfo.num_bytes = 128 * 1024;
303
                        sun4c_vacinfo.linesize = 16;
304
                        sun4c_vacinfo.do_hwflushes = 0;
305
                        break;
306
 
307
                case (SM_SUN4|SM_4_330):
308
                        sun4c_vacinfo.type = VAC_WRITE_THROUGH;
309
                        sun4c_vacinfo.num_bytes = 128 * 1024;
310
                        sun4c_vacinfo.linesize = 16;
311
                        sun4c_vacinfo.do_hwflushes = 0;
312
                        break;
313
 
314
                case (SM_SUN4|SM_4_470):
315
                        sun4c_vacinfo.type = VAC_WRITE_BACK;
316
                        sun4c_vacinfo.num_bytes = 128 * 1024;
317
                        sun4c_vacinfo.linesize = 32;
318
                        sun4c_vacinfo.do_hwflushes = 0;
319
                        break;
320
 
321
                default:
322
                        prom_printf("Cannot initialize VAC - weird sun4 model idprom->id_machtype = %d", idprom->id_machtype);
323
                        prom_halt();
324
                };
325
        } else {
326
                sun4c_vacinfo.type = VAC_WRITE_THROUGH;
327
 
328
                if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
329
                    (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
330
                        /* PROM on SS1 lacks this info, to be super safe we
331
                         * hard code it here since this arch is cast in stone.
332
                         */
333
                        sun4c_vacinfo.num_bytes = 65536;
334
                        sun4c_vacinfo.linesize = 16;
335
                } else {
336
                        sun4c_vacinfo.num_bytes =
337
                         prom_getintdefault(prom_root_node, "vac-size", 65536);
338
                        sun4c_vacinfo.linesize =
339
                         prom_getintdefault(prom_root_node, "vac-linesize", 16);
340
                }
341
                sun4c_vacinfo.do_hwflushes =
342
                 prom_getintdefault(prom_root_node, "vac-hwflush", 0);
343
 
344
                if (sun4c_vacinfo.do_hwflushes == 0)
345
                        sun4c_vacinfo.do_hwflushes =
346
                         prom_getintdefault(prom_root_node, "vac_hwflush", 0);
347
 
348
                if (sun4c_vacinfo.num_bytes != 65536) {
349
                        prom_printf("WEIRD Sun4C VAC cache size, "
350
                                    "tell sparclinux@vger.kernel.org");
351
                        prom_halt();
352
                }
353
        }
354
 
355
        sun4c_vacinfo.num_lines =
356
                (sun4c_vacinfo.num_bytes / sun4c_vacinfo.linesize);
357
        switch (sun4c_vacinfo.linesize) {
358
        case 16:
359
                sun4c_vacinfo.log2lsize = 4;
360
                break;
361
        case 32:
362
                sun4c_vacinfo.log2lsize = 5;
363
                break;
364
        default:
365
                prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n",
366
                            sun4c_vacinfo.linesize);
367
                prom_halt();
368
        };
369
 
370
        sun4c_flush_all();
371
        sun4c_enable_vac();
372
}
373
 
374
/* Patch instructions for the low level kernel fault handler. */
375
extern unsigned long invalid_segment_patch1, invalid_segment_patch1_ff;
376
extern unsigned long invalid_segment_patch2, invalid_segment_patch2_ff;
377
extern unsigned long invalid_segment_patch1_1ff, invalid_segment_patch2_1ff;
378
extern unsigned long num_context_patch1, num_context_patch1_16;
379
extern unsigned long num_context_patch2_16;
380
extern unsigned long vac_linesize_patch, vac_linesize_patch_32;
381
extern unsigned long vac_hwflush_patch1, vac_hwflush_patch1_on;
382
extern unsigned long vac_hwflush_patch2, vac_hwflush_patch2_on;
383
 
384
#define PATCH_INSN(src, dst) do {       \
385
                daddr = &(dst);         \
386
                iaddr = &(src);         \
387
                *daddr = *iaddr;        \
388
        } while (0)
389
 
390
static void __init patch_kernel_fault_handler(void)
391
{
392
        unsigned long *iaddr, *daddr;
393
 
394
        switch (num_segmaps) {
395
                case 128:
396
                        /* Default, nothing to do. */
397
                        break;
398
                case 256:
399
                        PATCH_INSN(invalid_segment_patch1_ff,
400
                                   invalid_segment_patch1);
401
                        PATCH_INSN(invalid_segment_patch2_ff,
402
                                   invalid_segment_patch2);
403
                        break;
404
                case 512:
405
                        PATCH_INSN(invalid_segment_patch1_1ff,
406
                                   invalid_segment_patch1);
407
                        PATCH_INSN(invalid_segment_patch2_1ff,
408
                                   invalid_segment_patch2);
409
                        break;
410
                default:
411
                        prom_printf("Unhandled number of segmaps: %d\n",
412
                                    num_segmaps);
413
                        prom_halt();
414
        };
415
        switch (num_contexts) {
416
                case 8:
417
                        /* Default, nothing to do. */
418
                        break;
419
                case 16:
420
                        PATCH_INSN(num_context_patch1_16,
421
                                   num_context_patch1);
422
                        break;
423
                default:
424
                        prom_printf("Unhandled number of contexts: %d\n",
425
                                    num_contexts);
426
                        prom_halt();
427
        };
428
 
429
        if (sun4c_vacinfo.do_hwflushes != 0) {
430
                PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1);
431
                PATCH_INSN(vac_hwflush_patch2_on, vac_hwflush_patch2);
432
        } else {
433
                switch (sun4c_vacinfo.linesize) {
434
                case 16:
435
                        /* Default, nothing to do. */
436
                        break;
437
                case 32:
438
                        PATCH_INSN(vac_linesize_patch_32, vac_linesize_patch);
439
                        break;
440
                default:
441
                        prom_printf("Impossible VAC linesize %d, halting...\n",
442
                                    sun4c_vacinfo.linesize);
443
                        prom_halt();
444
                };
445
        }
446
}
447
 
448
static void __init sun4c_probe_mmu(void)
449
{
450
        if (ARCH_SUN4) {
451
                switch (idprom->id_machtype) {
452
                case (SM_SUN4|SM_4_110):
453
                        prom_printf("No support for 4100 yet\n");
454
                        prom_halt();
455
                        num_segmaps = 256;
456
                        num_contexts = 8;
457
                        break;
458
 
459
                case (SM_SUN4|SM_4_260):
460
                        /* should be 512 segmaps. when it get fixed */
461
                        num_segmaps = 256;
462
                        num_contexts = 16;
463
                        break;
464
 
465
                case (SM_SUN4|SM_4_330):
466
                        num_segmaps = 256;
467
                        num_contexts = 16;
468
                        break;
469
 
470
                case (SM_SUN4|SM_4_470):
471
                        /* should be 1024 segmaps. when it get fixed */
472
                        num_segmaps = 256;
473
                        num_contexts = 64;
474
                        break;
475
                default:
476
                        prom_printf("Invalid SUN4 model\n");
477
                        prom_halt();
478
                };
479
        } else {
480
                if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
481
                    (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
482
                        /* Hardcode these just to be safe, PROM on SS1 does
483
                        * not have this info available in the root node.
484
                        */
485
                        num_segmaps = 128;
486
                        num_contexts = 8;
487
                } else {
488
                        num_segmaps =
489
                            prom_getintdefault(prom_root_node, "mmu-npmg", 128);
490
                        num_contexts =
491
                            prom_getintdefault(prom_root_node, "mmu-nctx", 0x8);
492
                }
493
        }
494
        patch_kernel_fault_handler();
495
}
496
 
497
volatile unsigned long __iomem *sun4c_memerr_reg = NULL;
498
 
499
void __init sun4c_probe_memerr_reg(void)
500
{
501
        int node;
502
        struct linux_prom_registers regs[1];
503
 
504
        if (ARCH_SUN4) {
505
                sun4c_memerr_reg = ioremap(sun4_memreg_physaddr, PAGE_SIZE);
506
        } else {
507
                node = prom_getchild(prom_root_node);
508
                node = prom_searchsiblings(prom_root_node, "memory-error");
509
                if (!node)
510
                        return;
511
                if (prom_getproperty(node, "reg", (char *)regs, sizeof(regs)) <= 0)
512
                        return;
513
                /* hmm I think regs[0].which_io is zero here anyways */
514
                sun4c_memerr_reg = ioremap(regs[0].phys_addr, regs[0].reg_size);
515
        }
516
}
517
 
518
static inline void sun4c_init_ss2_cache_bug(void)
519
{
520
        extern unsigned long start;
521
 
522
        if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) ||
523
            (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) ||
524
            (idprom->id_machtype == (SM_SUN4 | SM_4_330)) ||
525
            (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) {
526
                /* Whee.. */
527
                printk("SS2 cache bug detected, uncaching trap table page\n");
528
                sun4c_flush_page((unsigned int) &start);
529
                sun4c_put_pte(((unsigned long) &start),
530
                        (sun4c_get_pte((unsigned long) &start) | _SUN4C_PAGE_NOCACHE));
531
        }
532
}
533
 
534
/* Addr is always aligned on a page boundary for us already. */
535
static int sun4c_map_dma_area(dma_addr_t *pba, unsigned long va,
536
    unsigned long addr, int len)
537
{
538
        unsigned long page, end;
539
 
540
        *pba = addr;
541
 
542
        end = PAGE_ALIGN((addr + len));
543
        while (addr < end) {
544
                page = va;
545
                sun4c_flush_page(page);
546
                page -= PAGE_OFFSET;
547
                page >>= PAGE_SHIFT;
548
                page |= (_SUN4C_PAGE_VALID | _SUN4C_PAGE_DIRTY |
549
                         _SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_PRIV);
550
                sun4c_put_pte(addr, page);
551
                addr += PAGE_SIZE;
552
                va += PAGE_SIZE;
553
        }
554
 
555
        return 0;
556
}
557
 
558
static struct page *sun4c_translate_dvma(unsigned long busa)
559
{
560
        /* Fortunately for us, bus_addr == uncached_virt in sun4c. */
561
        unsigned long pte = sun4c_get_pte(busa);
562
        return pfn_to_page(pte & SUN4C_PFN_MASK);
563
}
564
 
565
static void sun4c_unmap_dma_area(unsigned long busa, int len)
566
{
567
        /* Fortunately for us, bus_addr == uncached_virt in sun4c. */
568
        /* XXX Implement this */
569
}
570
 
571
/* TLB management. */
572
 
573
/* Don't change this struct without changing entry.S. This is used
574
 * in the in-window kernel fault handler, and you don't want to mess
575
 * with that. (See sun4c_fault in entry.S).
576
 */
577
struct sun4c_mmu_entry {
578
        struct sun4c_mmu_entry *next;
579
        struct sun4c_mmu_entry *prev;
580
        unsigned long vaddr;
581
        unsigned char pseg;
582
        unsigned char locked;
583
 
584
        /* For user mappings only, and completely hidden from kernel
585
         * TLB miss code.
586
         */
587
        unsigned char ctx;
588
        struct sun4c_mmu_entry *lru_next;
589
        struct sun4c_mmu_entry *lru_prev;
590
};
591
 
592
static struct sun4c_mmu_entry mmu_entry_pool[SUN4C_MAX_SEGMAPS];
593
 
594
static void __init sun4c_init_mmu_entry_pool(void)
595
{
596
        int i;
597
 
598
        for (i=0; i < SUN4C_MAX_SEGMAPS; i++) {
599
                mmu_entry_pool[i].pseg = i;
600
                mmu_entry_pool[i].next = NULL;
601
                mmu_entry_pool[i].prev = NULL;
602
                mmu_entry_pool[i].vaddr = 0;
603
                mmu_entry_pool[i].locked = 0;
604
                mmu_entry_pool[i].ctx = 0;
605
                mmu_entry_pool[i].lru_next = NULL;
606
                mmu_entry_pool[i].lru_prev = NULL;
607
        }
608
        mmu_entry_pool[invalid_segment].locked = 1;
609
}
610
 
611
static inline void fix_permissions(unsigned long vaddr, unsigned long bits_on,
612
                                   unsigned long bits_off)
613
{
614
        unsigned long start, end;
615
 
616
        end = vaddr + SUN4C_REAL_PGDIR_SIZE;
617
        for (start = vaddr; start < end; start += PAGE_SIZE)
618
                if (sun4c_get_pte(start) & _SUN4C_PAGE_VALID)
619
                        sun4c_put_pte(start, (sun4c_get_pte(start) | bits_on) &
620
                                      ~bits_off);
621
}
622
 
623
static inline void sun4c_init_map_kernelprom(unsigned long kernel_end)
624
{
625
        unsigned long vaddr;
626
        unsigned char pseg, ctx;
627
#ifdef CONFIG_SUN4
628
        /* sun4/110 and 260 have no kadb. */
629
        if ((idprom->id_machtype != (SM_SUN4 | SM_4_260)) &&
630
            (idprom->id_machtype != (SM_SUN4 | SM_4_110))) {
631
#endif
632
        for (vaddr = KADB_DEBUGGER_BEGVM;
633
             vaddr < LINUX_OPPROM_ENDVM;
634
             vaddr += SUN4C_REAL_PGDIR_SIZE) {
635
                pseg = sun4c_get_segmap(vaddr);
636
                if (pseg != invalid_segment) {
637
                        mmu_entry_pool[pseg].locked = 1;
638
                        for (ctx = 0; ctx < num_contexts; ctx++)
639
                                prom_putsegment(ctx, vaddr, pseg);
640
                        fix_permissions(vaddr, _SUN4C_PAGE_PRIV, 0);
641
                }
642
        }
643
#ifdef CONFIG_SUN4
644
        }
645
#endif
646
        for (vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) {
647
                pseg = sun4c_get_segmap(vaddr);
648
                mmu_entry_pool[pseg].locked = 1;
649
                for (ctx = 0; ctx < num_contexts; ctx++)
650
                        prom_putsegment(ctx, vaddr, pseg);
651
                fix_permissions(vaddr, _SUN4C_PAGE_PRIV, _SUN4C_PAGE_NOCACHE);
652
        }
653
}
654
 
655
static void __init sun4c_init_lock_area(unsigned long start, unsigned long end)
656
{
657
        int i, ctx;
658
 
659
        while (start < end) {
660
                for (i = 0; i < invalid_segment; i++)
661
                        if (!mmu_entry_pool[i].locked)
662
                                break;
663
                mmu_entry_pool[i].locked = 1;
664
                sun4c_init_clean_segmap(i);
665
                for (ctx = 0; ctx < num_contexts; ctx++)
666
                        prom_putsegment(ctx, start, mmu_entry_pool[i].pseg);
667
                start += SUN4C_REAL_PGDIR_SIZE;
668
        }
669
}
670
 
671
/* Don't change this struct without changing entry.S. This is used
672
 * in the in-window kernel fault handler, and you don't want to mess
673
 * with that. (See sun4c_fault in entry.S).
674
 */
675
struct sun4c_mmu_ring {
676
        struct sun4c_mmu_entry ringhd;
677
        int num_entries;
678
};
679
 
680
static struct sun4c_mmu_ring sun4c_context_ring[SUN4C_MAX_CONTEXTS]; /* used user entries */
681
static struct sun4c_mmu_ring sun4c_ufree_ring;       /* free user entries */
682
static struct sun4c_mmu_ring sun4c_ulru_ring;        /* LRU user entries */
683
struct sun4c_mmu_ring sun4c_kernel_ring;      /* used kernel entries */
684
struct sun4c_mmu_ring sun4c_kfree_ring;       /* free kernel entries */
685
 
686
static inline void sun4c_init_rings(void)
687
{
688
        int i;
689
 
690
        for (i = 0; i < SUN4C_MAX_CONTEXTS; i++) {
691
                sun4c_context_ring[i].ringhd.next =
692
                        sun4c_context_ring[i].ringhd.prev =
693
                        &sun4c_context_ring[i].ringhd;
694
                sun4c_context_ring[i].num_entries = 0;
695
        }
696
        sun4c_ufree_ring.ringhd.next = sun4c_ufree_ring.ringhd.prev =
697
                &sun4c_ufree_ring.ringhd;
698
        sun4c_ufree_ring.num_entries = 0;
699
        sun4c_ulru_ring.ringhd.lru_next = sun4c_ulru_ring.ringhd.lru_prev =
700
                &sun4c_ulru_ring.ringhd;
701
        sun4c_ulru_ring.num_entries = 0;
702
        sun4c_kernel_ring.ringhd.next = sun4c_kernel_ring.ringhd.prev =
703
                &sun4c_kernel_ring.ringhd;
704
        sun4c_kernel_ring.num_entries = 0;
705
        sun4c_kfree_ring.ringhd.next = sun4c_kfree_ring.ringhd.prev =
706
                &sun4c_kfree_ring.ringhd;
707
        sun4c_kfree_ring.num_entries = 0;
708
}
709
 
710
static void add_ring(struct sun4c_mmu_ring *ring,
711
                     struct sun4c_mmu_entry *entry)
712
{
713
        struct sun4c_mmu_entry *head = &ring->ringhd;
714
 
715
        entry->prev = head;
716
        (entry->next = head->next)->prev = entry;
717
        head->next = entry;
718
        ring->num_entries++;
719
}
720
 
721
static inline void add_lru(struct sun4c_mmu_entry *entry)
722
{
723
        struct sun4c_mmu_ring *ring = &sun4c_ulru_ring;
724
        struct sun4c_mmu_entry *head = &ring->ringhd;
725
 
726
        entry->lru_next = head;
727
        (entry->lru_prev = head->lru_prev)->lru_next = entry;
728
        head->lru_prev = entry;
729
}
730
 
731
static void add_ring_ordered(struct sun4c_mmu_ring *ring,
732
                             struct sun4c_mmu_entry *entry)
733
{
734
        struct sun4c_mmu_entry *head = &ring->ringhd;
735
        unsigned long addr = entry->vaddr;
736
 
737
        while ((head->next != &ring->ringhd) && (head->next->vaddr < addr))
738
                head = head->next;
739
 
740
        entry->prev = head;
741
        (entry->next = head->next)->prev = entry;
742
        head->next = entry;
743
        ring->num_entries++;
744
 
745
        add_lru(entry);
746
}
747
 
748
static inline void remove_ring(struct sun4c_mmu_ring *ring,
749
                                   struct sun4c_mmu_entry *entry)
750
{
751
        struct sun4c_mmu_entry *next = entry->next;
752
 
753
        (next->prev = entry->prev)->next = next;
754
        ring->num_entries--;
755
}
756
 
757
static void remove_lru(struct sun4c_mmu_entry *entry)
758
{
759
        struct sun4c_mmu_entry *next = entry->lru_next;
760
 
761
        (next->lru_prev = entry->lru_prev)->lru_next = next;
762
}
763
 
764
static void free_user_entry(int ctx, struct sun4c_mmu_entry *entry)
765
{
766
        remove_ring(sun4c_context_ring+ctx, entry);
767
        remove_lru(entry);
768
        add_ring(&sun4c_ufree_ring, entry);
769
}
770
 
771
static void free_kernel_entry(struct sun4c_mmu_entry *entry,
772
                              struct sun4c_mmu_ring *ring)
773
{
774
        remove_ring(ring, entry);
775
        add_ring(&sun4c_kfree_ring, entry);
776
}
777
 
778
static void __init sun4c_init_fill_kernel_ring(int howmany)
779
{
780
        int i;
781
 
782
        while (howmany) {
783
                for (i = 0; i < invalid_segment; i++)
784
                        if (!mmu_entry_pool[i].locked)
785
                                break;
786
                mmu_entry_pool[i].locked = 1;
787
                sun4c_init_clean_segmap(i);
788
                add_ring(&sun4c_kfree_ring, &mmu_entry_pool[i]);
789
                howmany--;
790
        }
791
}
792
 
793
static void __init sun4c_init_fill_user_ring(void)
794
{
795
        int i;
796
 
797
        for (i = 0; i < invalid_segment; i++) {
798
                if (mmu_entry_pool[i].locked)
799
                        continue;
800
                sun4c_init_clean_segmap(i);
801
                add_ring(&sun4c_ufree_ring, &mmu_entry_pool[i]);
802
        }
803
}
804
 
805
static void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry)
806
{
807
        int savectx, ctx;
808
 
809
        savectx = sun4c_get_context();
810
        for (ctx = 0; ctx < num_contexts; ctx++) {
811
                sun4c_set_context(ctx);
812
                sun4c_put_segmap(kentry->vaddr, invalid_segment);
813
        }
814
        sun4c_set_context(savectx);
815
}
816
 
817
static void sun4c_kernel_map(struct sun4c_mmu_entry *kentry)
818
{
819
        int savectx, ctx;
820
 
821
        savectx = sun4c_get_context();
822
        for (ctx = 0; ctx < num_contexts; ctx++) {
823
                sun4c_set_context(ctx);
824
                sun4c_put_segmap(kentry->vaddr, kentry->pseg);
825
        }
826
        sun4c_set_context(savectx);
827
}
828
 
829
#define sun4c_user_unmap(__entry) \
830
        sun4c_put_segmap((__entry)->vaddr, invalid_segment)
831
 
832
static void sun4c_demap_context(struct sun4c_mmu_ring *crp, unsigned char ctx)
833
{
834
        struct sun4c_mmu_entry *head = &crp->ringhd;
835
        unsigned long flags;
836
 
837
        local_irq_save(flags);
838
        if (head->next != head) {
839
                struct sun4c_mmu_entry *entry = head->next;
840
                int savectx = sun4c_get_context();
841
 
842
                flush_user_windows();
843
                sun4c_set_context(ctx);
844
                sun4c_flush_context();
845
                do {
846
                        struct sun4c_mmu_entry *next = entry->next;
847
 
848
                        sun4c_user_unmap(entry);
849
                        free_user_entry(ctx, entry);
850
 
851
                        entry = next;
852
                } while (entry != head);
853
                sun4c_set_context(savectx);
854
        }
855
        local_irq_restore(flags);
856
}
857
 
858
static int sun4c_user_taken_entries;  /* This is how much we have.             */
859
static int max_user_taken_entries;    /* This limits us and prevents deadlock. */
860
 
861
static struct sun4c_mmu_entry *sun4c_kernel_strategy(void)
862
{
863
        struct sun4c_mmu_entry *this_entry;
864
 
865
        /* If some are free, return first one. */
866
        if (sun4c_kfree_ring.num_entries) {
867
                this_entry = sun4c_kfree_ring.ringhd.next;
868
                return this_entry;
869
        }
870
 
871
        /* Else free one up. */
872
        this_entry = sun4c_kernel_ring.ringhd.prev;
873
        sun4c_flush_segment(this_entry->vaddr);
874
        sun4c_kernel_unmap(this_entry);
875
        free_kernel_entry(this_entry, &sun4c_kernel_ring);
876
        this_entry = sun4c_kfree_ring.ringhd.next;
877
 
878
        return this_entry;
879
}
880
 
881
/* Using this method to free up mmu entries eliminates a lot of
882
 * potential races since we have a kernel that incurs tlb
883
 * replacement faults.  There may be performance penalties.
884
 *
885
 * NOTE: Must be called with interrupts disabled.
886
 */
887
static struct sun4c_mmu_entry *sun4c_user_strategy(void)
888
{
889
        struct sun4c_mmu_entry *entry;
890
        unsigned char ctx;
891
        int savectx;
892
 
893
        /* If some are free, return first one. */
894
        if (sun4c_ufree_ring.num_entries) {
895
                entry = sun4c_ufree_ring.ringhd.next;
896
                goto unlink_out;
897
        }
898
 
899
        if (sun4c_user_taken_entries) {
900
                entry = sun4c_kernel_strategy();
901
                sun4c_user_taken_entries--;
902
                goto kunlink_out;
903
        }
904
 
905
        /* Grab from the beginning of the LRU list. */
906
        entry = sun4c_ulru_ring.ringhd.lru_next;
907
        ctx = entry->ctx;
908
 
909
        savectx = sun4c_get_context();
910
        flush_user_windows();
911
        sun4c_set_context(ctx);
912
        sun4c_flush_segment(entry->vaddr);
913
        sun4c_user_unmap(entry);
914
        remove_ring(sun4c_context_ring + ctx, entry);
915
        remove_lru(entry);
916
        sun4c_set_context(savectx);
917
 
918
        return entry;
919
 
920
unlink_out:
921
        remove_ring(&sun4c_ufree_ring, entry);
922
        return entry;
923
kunlink_out:
924
        remove_ring(&sun4c_kfree_ring, entry);
925
        return entry;
926
}
927
 
928
/* NOTE: Must be called with interrupts disabled. */
929
void sun4c_grow_kernel_ring(void)
930
{
931
        struct sun4c_mmu_entry *entry;
932
 
933
        /* Prevent deadlock condition. */
934
        if (sun4c_user_taken_entries >= max_user_taken_entries)
935
                return;
936
 
937
        if (sun4c_ufree_ring.num_entries) {
938
                entry = sun4c_ufree_ring.ringhd.next;
939
                remove_ring(&sun4c_ufree_ring, entry);
940
                add_ring(&sun4c_kfree_ring, entry);
941
                sun4c_user_taken_entries++;
942
        }
943
}
944
 
945
/* 2 page buckets for task struct and kernel stack allocation.
946
 *
947
 * TASK_STACK_BEGIN
948
 * bucket[0]
949
 * bucket[1]
950
 *   [ ... ]
951
 * bucket[NR_TASK_BUCKETS-1]
952
 * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASK_BUCKETS)
953
 *
954
 * Each slot looks like:
955
 *
956
 *  page 1 --  task struct + beginning of kernel stack
957
 *  page 2 --  rest of kernel stack
958
 */
959
 
960
union task_union *sun4c_bucket[NR_TASK_BUCKETS];
961
 
962
static int sun4c_lowbucket_avail;
963
 
964
#define BUCKET_EMPTY     ((union task_union *) 0)
965
#define BUCKET_SHIFT     (PAGE_SHIFT + 1)        /* log2(sizeof(struct task_bucket)) */
966
#define BUCKET_SIZE      (1 << BUCKET_SHIFT)
967
#define BUCKET_NUM(addr) ((((addr) - SUN4C_LOCK_VADDR) >> BUCKET_SHIFT))
968
#define BUCKET_ADDR(num) (((num) << BUCKET_SHIFT) + SUN4C_LOCK_VADDR)
969
#define BUCKET_PTE(page)       \
970
        ((((page) - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(SUN4C_PAGE_KERNEL))
971
#define BUCKET_PTE_PAGE(pte)   \
972
        (PAGE_OFFSET + (((pte) & SUN4C_PFN_MASK) << PAGE_SHIFT))
973
 
974
static void get_locked_segment(unsigned long addr)
975
{
976
        struct sun4c_mmu_entry *stolen;
977
        unsigned long flags;
978
 
979
        local_irq_save(flags);
980
        addr &= SUN4C_REAL_PGDIR_MASK;
981
        stolen = sun4c_user_strategy();
982
        max_user_taken_entries--;
983
        stolen->vaddr = addr;
984
        flush_user_windows();
985
        sun4c_kernel_map(stolen);
986
        local_irq_restore(flags);
987
}
988
 
989
static void free_locked_segment(unsigned long addr)
990
{
991
        struct sun4c_mmu_entry *entry;
992
        unsigned long flags;
993
        unsigned char pseg;
994
 
995
        local_irq_save(flags);
996
        addr &= SUN4C_REAL_PGDIR_MASK;
997
        pseg = sun4c_get_segmap(addr);
998
        entry = &mmu_entry_pool[pseg];
999
 
1000
        flush_user_windows();
1001
        sun4c_flush_segment(addr);
1002
        sun4c_kernel_unmap(entry);
1003
        add_ring(&sun4c_ufree_ring, entry);
1004
        max_user_taken_entries++;
1005
        local_irq_restore(flags);
1006
}
1007
 
1008
static inline void garbage_collect(int entry)
1009
{
1010
        int start, end;
1011
 
1012
        /* 32 buckets per segment... */
1013
        entry &= ~31;
1014
        start = entry;
1015
        for (end = (start + 32); start < end; start++)
1016
                if (sun4c_bucket[start] != BUCKET_EMPTY)
1017
                        return;
1018
 
1019
        /* Entire segment empty, release it. */
1020
        free_locked_segment(BUCKET_ADDR(entry));
1021
}
1022
 
1023
static struct thread_info *sun4c_alloc_thread_info(void)
1024
{
1025
        unsigned long addr, pages;
1026
        int entry;
1027
 
1028
        pages = __get_free_pages(GFP_KERNEL, THREAD_INFO_ORDER);
1029
        if (!pages)
1030
                return NULL;
1031
 
1032
        for (entry = sun4c_lowbucket_avail; entry < NR_TASK_BUCKETS; entry++)
1033
                if (sun4c_bucket[entry] == BUCKET_EMPTY)
1034
                        break;
1035
        if (entry == NR_TASK_BUCKETS) {
1036
                free_pages(pages, THREAD_INFO_ORDER);
1037
                return NULL;
1038
        }
1039
        if (entry >= sun4c_lowbucket_avail)
1040
                sun4c_lowbucket_avail = entry + 1;
1041
 
1042
        addr = BUCKET_ADDR(entry);
1043
        sun4c_bucket[entry] = (union task_union *) addr;
1044
        if(sun4c_get_segmap(addr) == invalid_segment)
1045
                get_locked_segment(addr);
1046
 
1047
        /* We are changing the virtual color of the page(s)
1048
         * so we must flush the cache to guarantee consistency.
1049
         */
1050
        sun4c_flush_page(pages);
1051
#ifndef CONFIG_SUN4     
1052
        sun4c_flush_page(pages + PAGE_SIZE);
1053
#endif
1054
 
1055
        sun4c_put_pte(addr, BUCKET_PTE(pages));
1056
#ifndef CONFIG_SUN4     
1057
        sun4c_put_pte(addr + PAGE_SIZE, BUCKET_PTE(pages + PAGE_SIZE));
1058
#endif
1059
 
1060
#ifdef CONFIG_DEBUG_STACK_USAGE
1061
        memset((void *)addr, 0, PAGE_SIZE << THREAD_INFO_ORDER);
1062
#endif /* DEBUG_STACK_USAGE */
1063
 
1064
        return (struct thread_info *) addr;
1065
}
1066
 
1067
static void sun4c_free_thread_info(struct thread_info *ti)
1068
{
1069
        unsigned long tiaddr = (unsigned long) ti;
1070
        unsigned long pages = BUCKET_PTE_PAGE(sun4c_get_pte(tiaddr));
1071
        int entry = BUCKET_NUM(tiaddr);
1072
 
1073
        /* We are deleting a mapping, so the flush here is mandatory. */
1074
        sun4c_flush_page(tiaddr);
1075
#ifndef CONFIG_SUN4     
1076
        sun4c_flush_page(tiaddr + PAGE_SIZE);
1077
#endif
1078
        sun4c_put_pte(tiaddr, 0);
1079
#ifndef CONFIG_SUN4     
1080
        sun4c_put_pte(tiaddr + PAGE_SIZE, 0);
1081
#endif
1082
        sun4c_bucket[entry] = BUCKET_EMPTY;
1083
        if (entry < sun4c_lowbucket_avail)
1084
                sun4c_lowbucket_avail = entry;
1085
 
1086
        free_pages(pages, THREAD_INFO_ORDER);
1087
        garbage_collect(entry);
1088
}
1089
 
1090
static void __init sun4c_init_buckets(void)
1091
{
1092
        int entry;
1093
 
1094
        if (sizeof(union thread_union) != (PAGE_SIZE << THREAD_INFO_ORDER)) {
1095
                extern void thread_info_size_is_bolixed_pete(void);
1096
                thread_info_size_is_bolixed_pete();
1097
        }
1098
 
1099
        for (entry = 0; entry < NR_TASK_BUCKETS; entry++)
1100
                sun4c_bucket[entry] = BUCKET_EMPTY;
1101
        sun4c_lowbucket_avail = 0;
1102
}
1103
 
1104
static unsigned long sun4c_iobuffer_start;
1105
static unsigned long sun4c_iobuffer_end;
1106
static unsigned long sun4c_iobuffer_high;
1107
static unsigned long *sun4c_iobuffer_map;
1108
static int iobuffer_map_size;
1109
 
1110
/*
1111
 * Alias our pages so they do not cause a trap.
1112
 * Also one page may be aliased into several I/O areas and we may
1113
 * finish these I/O separately.
1114
 */
1115
static char *sun4c_lockarea(char *vaddr, unsigned long size)
1116
{
1117
        unsigned long base, scan;
1118
        unsigned long npages;
1119
        unsigned long vpage;
1120
        unsigned long pte;
1121
        unsigned long apage;
1122
        unsigned long high;
1123
        unsigned long flags;
1124
 
1125
        npages = (((unsigned long)vaddr & ~PAGE_MASK) +
1126
                  size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
1127
 
1128
        scan = 0;
1129
        local_irq_save(flags);
1130
        for (;;) {
1131
                scan = find_next_zero_bit(sun4c_iobuffer_map,
1132
                                          iobuffer_map_size, scan);
1133
                if ((base = scan) + npages > iobuffer_map_size) goto abend;
1134
                for (;;) {
1135
                        if (scan >= base + npages) goto found;
1136
                        if (test_bit(scan, sun4c_iobuffer_map)) break;
1137
                        scan++;
1138
                }
1139
        }
1140
 
1141
found:
1142
        high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start;
1143
        high = SUN4C_REAL_PGDIR_ALIGN(high);
1144
        while (high > sun4c_iobuffer_high) {
1145
                get_locked_segment(sun4c_iobuffer_high);
1146
                sun4c_iobuffer_high += SUN4C_REAL_PGDIR_SIZE;
1147
        }
1148
 
1149
        vpage = ((unsigned long) vaddr) & PAGE_MASK;
1150
        for (scan = base; scan < base+npages; scan++) {
1151
                pte = ((vpage-PAGE_OFFSET) >> PAGE_SHIFT);
1152
                pte |= pgprot_val(SUN4C_PAGE_KERNEL);
1153
                pte |= _SUN4C_PAGE_NOCACHE;
1154
                set_bit(scan, sun4c_iobuffer_map);
1155
                apage = (scan << PAGE_SHIFT) + sun4c_iobuffer_start;
1156
 
1157
                /* Flush original mapping so we see the right things later. */
1158
                sun4c_flush_page(vpage);
1159
 
1160
                sun4c_put_pte(apage, pte);
1161
                vpage += PAGE_SIZE;
1162
        }
1163
        local_irq_restore(flags);
1164
        return (char *) ((base << PAGE_SHIFT) + sun4c_iobuffer_start +
1165
                         (((unsigned long) vaddr) & ~PAGE_MASK));
1166
 
1167
abend:
1168
        local_irq_restore(flags);
1169
        printk("DMA vaddr=0x%p size=%08lx\n", vaddr, size);
1170
        panic("Out of iobuffer table");
1171
        return NULL;
1172
}
1173
 
1174
static void sun4c_unlockarea(char *vaddr, unsigned long size)
1175
{
1176
        unsigned long vpage, npages;
1177
        unsigned long flags;
1178
        int scan, high;
1179
 
1180
        vpage = (unsigned long)vaddr & PAGE_MASK;
1181
        npages = (((unsigned long)vaddr & ~PAGE_MASK) +
1182
                  size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
1183
 
1184
        local_irq_save(flags);
1185
        while (npages != 0) {
1186
                --npages;
1187
 
1188
                /* This mapping is marked non-cachable, no flush necessary. */
1189
                sun4c_put_pte(vpage, 0);
1190
                clear_bit((vpage - sun4c_iobuffer_start) >> PAGE_SHIFT,
1191
                          sun4c_iobuffer_map);
1192
                vpage += PAGE_SIZE;
1193
        }
1194
 
1195
        /* garbage collect */
1196
        scan = (sun4c_iobuffer_high - sun4c_iobuffer_start) >> PAGE_SHIFT;
1197
        while (scan >= 0 && !sun4c_iobuffer_map[scan >> 5])
1198
                scan -= 32;
1199
        scan += 32;
1200
        high = sun4c_iobuffer_start + (scan << PAGE_SHIFT);
1201
        high = SUN4C_REAL_PGDIR_ALIGN(high) + SUN4C_REAL_PGDIR_SIZE;
1202
        while (high < sun4c_iobuffer_high) {
1203
                sun4c_iobuffer_high -= SUN4C_REAL_PGDIR_SIZE;
1204
                free_locked_segment(sun4c_iobuffer_high);
1205
        }
1206
        local_irq_restore(flags);
1207
}
1208
 
1209
/* Note the scsi code at init time passes to here buffers
1210
 * which sit on the kernel stack, those are already locked
1211
 * by implication and fool the page locking code above
1212
 * if passed to by mistake.
1213
 */
1214
static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus *sbus)
1215
{
1216
        unsigned long page;
1217
 
1218
        page = ((unsigned long)bufptr) & PAGE_MASK;
1219
        if (!virt_addr_valid(page)) {
1220
                sun4c_flush_page(page);
1221
                return (__u32)bufptr; /* already locked */
1222
        }
1223
        return (__u32)sun4c_lockarea(bufptr, len);
1224
}
1225
 
1226
static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
1227
{
1228
        while (sz != 0) {
1229
                --sz;
1230
                sg->dvma_address = (__u32)sun4c_lockarea(sg_virt(sg), sg->length);
1231
                sg->dvma_length = sg->length;
1232
                sg = sg_next(sg);
1233
        }
1234
}
1235
 
1236
static void sun4c_release_scsi_one(__u32 bufptr, unsigned long len, struct sbus_bus *sbus)
1237
{
1238
        if (bufptr < sun4c_iobuffer_start)
1239
                return; /* On kernel stack or similar, see above */
1240
        sun4c_unlockarea((char *)bufptr, len);
1241
}
1242
 
1243
static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
1244
{
1245
        while (sz != 0) {
1246
                --sz;
1247
                sun4c_unlockarea((char *)sg->dvma_address, sg->length);
1248
                sg = sg_next(sg);
1249
        }
1250
}
1251
 
1252
#define TASK_ENTRY_SIZE    BUCKET_SIZE /* see above */
1253
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1254
 
1255
struct vm_area_struct sun4c_kstack_vma;
1256
 
1257
static void __init sun4c_init_lock_areas(void)
1258
{
1259
        unsigned long sun4c_taskstack_start;
1260
        unsigned long sun4c_taskstack_end;
1261
        int bitmap_size;
1262
 
1263
        sun4c_init_buckets();
1264
        sun4c_taskstack_start = SUN4C_LOCK_VADDR;
1265
        sun4c_taskstack_end = (sun4c_taskstack_start +
1266
                               (TASK_ENTRY_SIZE * NR_TASK_BUCKETS));
1267
        if (sun4c_taskstack_end >= SUN4C_LOCK_END) {
1268
                prom_printf("Too many tasks, decrease NR_TASK_BUCKETS please.\n");
1269
                prom_halt();
1270
        }
1271
 
1272
        sun4c_iobuffer_start = sun4c_iobuffer_high =
1273
                                SUN4C_REAL_PGDIR_ALIGN(sun4c_taskstack_end);
1274
        sun4c_iobuffer_end = SUN4C_LOCK_END;
1275
        bitmap_size = (sun4c_iobuffer_end - sun4c_iobuffer_start) >> PAGE_SHIFT;
1276
        bitmap_size = (bitmap_size + 7) >> 3;
1277
        bitmap_size = LONG_ALIGN(bitmap_size);
1278
        iobuffer_map_size = bitmap_size << 3;
1279
        sun4c_iobuffer_map = __alloc_bootmem(bitmap_size, SMP_CACHE_BYTES, 0UL);
1280
        memset((void *) sun4c_iobuffer_map, 0, bitmap_size);
1281
 
1282
        sun4c_kstack_vma.vm_mm = &init_mm;
1283
        sun4c_kstack_vma.vm_start = sun4c_taskstack_start;
1284
        sun4c_kstack_vma.vm_end = sun4c_taskstack_end;
1285
        sun4c_kstack_vma.vm_page_prot = PAGE_SHARED;
1286
        sun4c_kstack_vma.vm_flags = VM_READ | VM_WRITE | VM_EXEC;
1287
        insert_vm_struct(&init_mm, &sun4c_kstack_vma);
1288
}
1289
 
1290
/* Cache flushing on the sun4c. */
1291
static void sun4c_flush_cache_all(void)
1292
{
1293
        unsigned long begin, end;
1294
 
1295
        flush_user_windows();
1296
        begin = (KERNBASE + SUN4C_REAL_PGDIR_SIZE);
1297
        end = (begin + SUN4C_VAC_SIZE);
1298
 
1299
        if (sun4c_vacinfo.linesize == 32) {
1300
                while (begin < end) {
1301
                        __asm__ __volatile__(
1302
                        "ld     [%0 + 0x00], %%g0\n\t"
1303
                        "ld     [%0 + 0x20], %%g0\n\t"
1304
                        "ld     [%0 + 0x40], %%g0\n\t"
1305
                        "ld     [%0 + 0x60], %%g0\n\t"
1306
                        "ld     [%0 + 0x80], %%g0\n\t"
1307
                        "ld     [%0 + 0xa0], %%g0\n\t"
1308
                        "ld     [%0 + 0xc0], %%g0\n\t"
1309
                        "ld     [%0 + 0xe0], %%g0\n\t"
1310
                        "ld     [%0 + 0x100], %%g0\n\t"
1311
                        "ld     [%0 + 0x120], %%g0\n\t"
1312
                        "ld     [%0 + 0x140], %%g0\n\t"
1313
                        "ld     [%0 + 0x160], %%g0\n\t"
1314
                        "ld     [%0 + 0x180], %%g0\n\t"
1315
                        "ld     [%0 + 0x1a0], %%g0\n\t"
1316
                        "ld     [%0 + 0x1c0], %%g0\n\t"
1317
                        "ld     [%0 + 0x1e0], %%g0\n"
1318
                        : : "r" (begin));
1319
                        begin += 512;
1320
                }
1321
        } else {
1322
                while (begin < end) {
1323
                        __asm__ __volatile__(
1324
                        "ld     [%0 + 0x00], %%g0\n\t"
1325
                        "ld     [%0 + 0x10], %%g0\n\t"
1326
                        "ld     [%0 + 0x20], %%g0\n\t"
1327
                        "ld     [%0 + 0x30], %%g0\n\t"
1328
                        "ld     [%0 + 0x40], %%g0\n\t"
1329
                        "ld     [%0 + 0x50], %%g0\n\t"
1330
                        "ld     [%0 + 0x60], %%g0\n\t"
1331
                        "ld     [%0 + 0x70], %%g0\n\t"
1332
                        "ld     [%0 + 0x80], %%g0\n\t"
1333
                        "ld     [%0 + 0x90], %%g0\n\t"
1334
                        "ld     [%0 + 0xa0], %%g0\n\t"
1335
                        "ld     [%0 + 0xb0], %%g0\n\t"
1336
                        "ld     [%0 + 0xc0], %%g0\n\t"
1337
                        "ld     [%0 + 0xd0], %%g0\n\t"
1338
                        "ld     [%0 + 0xe0], %%g0\n\t"
1339
                        "ld     [%0 + 0xf0], %%g0\n"
1340
                        : : "r" (begin));
1341
                        begin += 256;
1342
                }
1343
        }
1344
}
1345
 
1346
static void sun4c_flush_cache_mm(struct mm_struct *mm)
1347
{
1348
        int new_ctx = mm->context;
1349
 
1350
        if (new_ctx != NO_CONTEXT) {
1351
                flush_user_windows();
1352
 
1353
                if (sun4c_context_ring[new_ctx].num_entries) {
1354
                        struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1355
                        unsigned long flags;
1356
 
1357
                        local_irq_save(flags);
1358
                        if (head->next != head) {
1359
                                struct sun4c_mmu_entry *entry = head->next;
1360
                                int savectx = sun4c_get_context();
1361
 
1362
                                sun4c_set_context(new_ctx);
1363
                                sun4c_flush_context();
1364
                                do {
1365
                                        struct sun4c_mmu_entry *next = entry->next;
1366
 
1367
                                        sun4c_user_unmap(entry);
1368
                                        free_user_entry(new_ctx, entry);
1369
 
1370
                                        entry = next;
1371
                                } while (entry != head);
1372
                                sun4c_set_context(savectx);
1373
                        }
1374
                        local_irq_restore(flags);
1375
                }
1376
        }
1377
}
1378
 
1379
static void sun4c_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1380
{
1381
        struct mm_struct *mm = vma->vm_mm;
1382
        int new_ctx = mm->context;
1383
 
1384
        if (new_ctx != NO_CONTEXT) {
1385
                struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1386
                struct sun4c_mmu_entry *entry;
1387
                unsigned long flags;
1388
 
1389
                flush_user_windows();
1390
 
1391
                local_irq_save(flags);
1392
                /* All user segmap chains are ordered on entry->vaddr. */
1393
                for (entry = head->next;
1394
                     (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
1395
                     entry = entry->next)
1396
                        ;
1397
 
1398
                /* Tracing various job mixtures showed that this conditional
1399
                 * only passes ~35% of the time for most worse case situations,
1400
                 * therefore we avoid all of this gross overhead ~65% of the time.
1401
                 */
1402
                if ((entry != head) && (entry->vaddr < end)) {
1403
                        int octx = sun4c_get_context();
1404
                        sun4c_set_context(new_ctx);
1405
 
1406
                        /* At this point, always, (start >= entry->vaddr) and
1407
                         * (entry->vaddr < end), once the latter condition
1408
                         * ceases to hold, or we hit the end of the list, we
1409
                         * exit the loop.  The ordering of all user allocated
1410
                         * segmaps makes this all work out so beautifully.
1411
                         */
1412
                        do {
1413
                                struct sun4c_mmu_entry *next = entry->next;
1414
                                unsigned long realend;
1415
 
1416
                                /* "realstart" is always >= entry->vaddr */
1417
                                realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE;
1418
                                if (end < realend)
1419
                                        realend = end;
1420
                                if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
1421
                                        unsigned long page = entry->vaddr;
1422
                                        while (page < realend) {
1423
                                                sun4c_flush_page(page);
1424
                                                page += PAGE_SIZE;
1425
                                        }
1426
                                } else {
1427
                                        sun4c_flush_segment(entry->vaddr);
1428
                                        sun4c_user_unmap(entry);
1429
                                        free_user_entry(new_ctx, entry);
1430
                                }
1431
                                entry = next;
1432
                        } while ((entry != head) && (entry->vaddr < end));
1433
                        sun4c_set_context(octx);
1434
                }
1435
                local_irq_restore(flags);
1436
        }
1437
}
1438
 
1439
static void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1440
{
1441
        struct mm_struct *mm = vma->vm_mm;
1442
        int new_ctx = mm->context;
1443
 
1444
        /* Sun4c has no separate I/D caches so cannot optimize for non
1445
         * text page flushes.
1446
         */
1447
        if (new_ctx != NO_CONTEXT) {
1448
                int octx = sun4c_get_context();
1449
                unsigned long flags;
1450
 
1451
                flush_user_windows();
1452
                local_irq_save(flags);
1453
                sun4c_set_context(new_ctx);
1454
                sun4c_flush_page(page);
1455
                sun4c_set_context(octx);
1456
                local_irq_restore(flags);
1457
        }
1458
}
1459
 
1460
static void sun4c_flush_page_to_ram(unsigned long page)
1461
{
1462
        unsigned long flags;
1463
 
1464
        local_irq_save(flags);
1465
        sun4c_flush_page(page);
1466
        local_irq_restore(flags);
1467
}
1468
 
1469
/* Sun4c cache is unified, both instructions and data live there, so
1470
 * no need to flush the on-stack instructions for new signal handlers.
1471
 */
1472
static void sun4c_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1473
{
1474
}
1475
 
1476
/* TLB flushing on the sun4c.  These routines count on the cache
1477
 * flushing code to flush the user register windows so that we need
1478
 * not do so when we get here.
1479
 */
1480
 
1481
static void sun4c_flush_tlb_all(void)
1482
{
1483
        struct sun4c_mmu_entry *this_entry, *next_entry;
1484
        unsigned long flags;
1485
        int savectx, ctx;
1486
 
1487
        local_irq_save(flags);
1488
        this_entry = sun4c_kernel_ring.ringhd.next;
1489
        savectx = sun4c_get_context();
1490
        flush_user_windows();
1491
        while (sun4c_kernel_ring.num_entries) {
1492
                next_entry = this_entry->next;
1493
                sun4c_flush_segment(this_entry->vaddr);
1494
                for (ctx = 0; ctx < num_contexts; ctx++) {
1495
                        sun4c_set_context(ctx);
1496
                        sun4c_put_segmap(this_entry->vaddr, invalid_segment);
1497
                }
1498
                free_kernel_entry(this_entry, &sun4c_kernel_ring);
1499
                this_entry = next_entry;
1500
        }
1501
        sun4c_set_context(savectx);
1502
        local_irq_restore(flags);
1503
}
1504
 
1505
static void sun4c_flush_tlb_mm(struct mm_struct *mm)
1506
{
1507
        int new_ctx = mm->context;
1508
 
1509
        if (new_ctx != NO_CONTEXT) {
1510
                struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1511
                unsigned long flags;
1512
 
1513
                local_irq_save(flags);
1514
                if (head->next != head) {
1515
                        struct sun4c_mmu_entry *entry = head->next;
1516
                        int savectx = sun4c_get_context();
1517
 
1518
                        sun4c_set_context(new_ctx);
1519
                        sun4c_flush_context();
1520
                        do {
1521
                                struct sun4c_mmu_entry *next = entry->next;
1522
 
1523
                                sun4c_user_unmap(entry);
1524
                                free_user_entry(new_ctx, entry);
1525
 
1526
                                entry = next;
1527
                        } while (entry != head);
1528
                        sun4c_set_context(savectx);
1529
                }
1530
                local_irq_restore(flags);
1531
        }
1532
}
1533
 
1534
static void sun4c_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1535
{
1536
        struct mm_struct *mm = vma->vm_mm;
1537
        int new_ctx = mm->context;
1538
 
1539
        if (new_ctx != NO_CONTEXT) {
1540
                struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
1541
                struct sun4c_mmu_entry *entry;
1542
                unsigned long flags;
1543
 
1544
                local_irq_save(flags);
1545
                /* See commentary in sun4c_flush_cache_range(). */
1546
                for (entry = head->next;
1547
                     (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
1548
                     entry = entry->next)
1549
                        ;
1550
 
1551
                if ((entry != head) && (entry->vaddr < end)) {
1552
                        int octx = sun4c_get_context();
1553
 
1554
                        sun4c_set_context(new_ctx);
1555
                        do {
1556
                                struct sun4c_mmu_entry *next = entry->next;
1557
 
1558
                                sun4c_flush_segment(entry->vaddr);
1559
                                sun4c_user_unmap(entry);
1560
                                free_user_entry(new_ctx, entry);
1561
 
1562
                                entry = next;
1563
                        } while ((entry != head) && (entry->vaddr < end));
1564
                        sun4c_set_context(octx);
1565
                }
1566
                local_irq_restore(flags);
1567
        }
1568
}
1569
 
1570
static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1571
{
1572
        struct mm_struct *mm = vma->vm_mm;
1573
        int new_ctx = mm->context;
1574
 
1575
        if (new_ctx != NO_CONTEXT) {
1576
                int savectx = sun4c_get_context();
1577
                unsigned long flags;
1578
 
1579
                local_irq_save(flags);
1580
                sun4c_set_context(new_ctx);
1581
                page &= PAGE_MASK;
1582
                sun4c_flush_page(page);
1583
                sun4c_put_pte(page, 0);
1584
                sun4c_set_context(savectx);
1585
                local_irq_restore(flags);
1586
        }
1587
}
1588
 
1589
static inline void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr)
1590
{
1591
        unsigned long page_entry, pg_iobits;
1592
 
1593
        pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE |
1594
                    _SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE;
1595
 
1596
        page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK);
1597
        page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT));
1598
        sun4c_put_pte(virt_addr, page_entry);
1599
}
1600
 
1601
static void sun4c_mapiorange(unsigned int bus, unsigned long xpa,
1602
    unsigned long xva, unsigned int len)
1603
{
1604
        while (len != 0) {
1605
                len -= PAGE_SIZE;
1606
                sun4c_mapioaddr(xpa, xva);
1607
                xva += PAGE_SIZE;
1608
                xpa += PAGE_SIZE;
1609
        }
1610
}
1611
 
1612
static void sun4c_unmapiorange(unsigned long virt_addr, unsigned int len)
1613
{
1614
        while (len != 0) {
1615
                len -= PAGE_SIZE;
1616
                sun4c_put_pte(virt_addr, 0);
1617
                virt_addr += PAGE_SIZE;
1618
        }
1619
}
1620
 
1621
static void sun4c_alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
1622
{
1623
        struct ctx_list *ctxp;
1624
 
1625
        ctxp = ctx_free.next;
1626
        if (ctxp != &ctx_free) {
1627
                remove_from_ctx_list(ctxp);
1628
                add_to_used_ctxlist(ctxp);
1629
                mm->context = ctxp->ctx_number;
1630
                ctxp->ctx_mm = mm;
1631
                return;
1632
        }
1633
        ctxp = ctx_used.next;
1634
        if (ctxp->ctx_mm == old_mm)
1635
                ctxp = ctxp->next;
1636
        remove_from_ctx_list(ctxp);
1637
        add_to_used_ctxlist(ctxp);
1638
        ctxp->ctx_mm->context = NO_CONTEXT;
1639
        ctxp->ctx_mm = mm;
1640
        mm->context = ctxp->ctx_number;
1641
        sun4c_demap_context(&sun4c_context_ring[ctxp->ctx_number],
1642
                               ctxp->ctx_number);
1643
}
1644
 
1645
/* Switch the current MM context. */
1646
static void sun4c_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu)
1647
{
1648
        struct ctx_list *ctx;
1649
        int dirty = 0;
1650
 
1651
        if (mm->context == NO_CONTEXT) {
1652
                dirty = 1;
1653
                sun4c_alloc_context(old_mm, mm);
1654
        } else {
1655
                /* Update the LRU ring of contexts. */
1656
                ctx = ctx_list_pool + mm->context;
1657
                remove_from_ctx_list(ctx);
1658
                add_to_used_ctxlist(ctx);
1659
        }
1660
        if (dirty || old_mm != mm)
1661
                sun4c_set_context(mm->context);
1662
}
1663
 
1664
static void sun4c_destroy_context(struct mm_struct *mm)
1665
{
1666
        struct ctx_list *ctx_old;
1667
 
1668
        if (mm->context != NO_CONTEXT) {
1669
                sun4c_demap_context(&sun4c_context_ring[mm->context], mm->context);
1670
                ctx_old = ctx_list_pool + mm->context;
1671
                remove_from_ctx_list(ctx_old);
1672
                add_to_free_ctxlist(ctx_old);
1673
                mm->context = NO_CONTEXT;
1674
        }
1675
}
1676
 
1677
static void sun4c_mmu_info(struct seq_file *m)
1678
{
1679
        int used_user_entries, i;
1680
 
1681
        used_user_entries = 0;
1682
        for (i = 0; i < num_contexts; i++)
1683
                used_user_entries += sun4c_context_ring[i].num_entries;
1684
 
1685
        seq_printf(m,
1686
                   "vacsize\t\t: %d bytes\n"
1687
                   "vachwflush\t: %s\n"
1688
                   "vaclinesize\t: %d bytes\n"
1689
                   "mmuctxs\t\t: %d\n"
1690
                   "mmupsegs\t: %d\n"
1691
                   "kernelpsegs\t: %d\n"
1692
                   "kfreepsegs\t: %d\n"
1693
                   "usedpsegs\t: %d\n"
1694
                   "ufreepsegs\t: %d\n"
1695
                   "user_taken\t: %d\n"
1696
                   "max_taken\t: %d\n",
1697
                   sun4c_vacinfo.num_bytes,
1698
                   (sun4c_vacinfo.do_hwflushes ? "yes" : "no"),
1699
                   sun4c_vacinfo.linesize,
1700
                   num_contexts,
1701
                   (invalid_segment + 1),
1702
                   sun4c_kernel_ring.num_entries,
1703
                   sun4c_kfree_ring.num_entries,
1704
                   used_user_entries,
1705
                   sun4c_ufree_ring.num_entries,
1706
                   sun4c_user_taken_entries,
1707
                   max_user_taken_entries);
1708
}
1709
 
1710
/* Nothing below here should touch the mmu hardware nor the mmu_entry
1711
 * data structures.
1712
 */
1713
 
1714
/* First the functions which the mid-level code uses to directly
1715
 * manipulate the software page tables.  Some defines since we are
1716
 * emulating the i386 page directory layout.
1717
 */
1718
#define PGD_PRESENT  0x001
1719
#define PGD_RW       0x002
1720
#define PGD_USER     0x004
1721
#define PGD_ACCESSED 0x020
1722
#define PGD_DIRTY    0x040
1723
#define PGD_TABLE    (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY)
1724
 
1725
static void sun4c_set_pte(pte_t *ptep, pte_t pte)
1726
{
1727
        *ptep = pte;
1728
}
1729
 
1730
static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
1731
{
1732
}
1733
 
1734
static void sun4c_pmd_set(pmd_t * pmdp, pte_t * ptep)
1735
{
1736
        pmdp->pmdv[0] = PGD_TABLE | (unsigned long) ptep;
1737
}
1738
 
1739
static void sun4c_pmd_populate(pmd_t * pmdp, struct page * ptep)
1740
{
1741
        if (page_address(ptep) == NULL) BUG();  /* No highmem on sun4c */
1742
        pmdp->pmdv[0] = PGD_TABLE | (unsigned long) page_address(ptep);
1743
}
1744
 
1745
static int sun4c_pte_present(pte_t pte)
1746
{
1747
        return ((pte_val(pte) & (_SUN4C_PAGE_PRESENT | _SUN4C_PAGE_PRIV)) != 0);
1748
}
1749
static void sun4c_pte_clear(pte_t *ptep)        { *ptep = __pte(0); }
1750
 
1751
static int sun4c_pmd_bad(pmd_t pmd)
1752
{
1753
        return (((pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE) ||
1754
                (!virt_addr_valid(pmd_val(pmd))));
1755
}
1756
 
1757
static int sun4c_pmd_present(pmd_t pmd)
1758
{
1759
        return ((pmd_val(pmd) & PGD_PRESENT) != 0);
1760
}
1761
 
1762
#if 0 /* if PMD takes one word */
1763
static void sun4c_pmd_clear(pmd_t *pmdp)        { *pmdp = __pmd(0); }
1764
#else /* if pmd_t is a longish aggregate */
1765
static void sun4c_pmd_clear(pmd_t *pmdp) {
1766
        memset((void *)pmdp, 0, sizeof(pmd_t));
1767
}
1768
#endif
1769
 
1770
static int sun4c_pgd_none(pgd_t pgd)            { return 0; }
1771
static int sun4c_pgd_bad(pgd_t pgd)             { return 0; }
1772
static int sun4c_pgd_present(pgd_t pgd)         { return 1; }
1773
static void sun4c_pgd_clear(pgd_t * pgdp)       { }
1774
 
1775
/*
1776
 * The following only work if pte_present() is true.
1777
 * Undefined behaviour if not..
1778
 */
1779
static pte_t sun4c_pte_mkwrite(pte_t pte)
1780
{
1781
        pte = __pte(pte_val(pte) | _SUN4C_PAGE_WRITE);
1782
        if (pte_val(pte) & _SUN4C_PAGE_MODIFIED)
1783
                pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE);
1784
        return pte;
1785
}
1786
 
1787
static pte_t sun4c_pte_mkdirty(pte_t pte)
1788
{
1789
        pte = __pte(pte_val(pte) | _SUN4C_PAGE_MODIFIED);
1790
        if (pte_val(pte) & _SUN4C_PAGE_WRITE)
1791
                pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_WRITE);
1792
        return pte;
1793
}
1794
 
1795
static pte_t sun4c_pte_mkyoung(pte_t pte)
1796
{
1797
        pte = __pte(pte_val(pte) | _SUN4C_PAGE_ACCESSED);
1798
        if (pte_val(pte) & _SUN4C_PAGE_READ)
1799
                pte = __pte(pte_val(pte) | _SUN4C_PAGE_SILENT_READ);
1800
        return pte;
1801
}
1802
 
1803
/*
1804
 * Conversion functions: convert a page and protection to a page entry,
1805
 * and a page entry and page directory to the page they refer to.
1806
 */
1807
static pte_t sun4c_mk_pte(struct page *page, pgprot_t pgprot)
1808
{
1809
        return __pte(page_to_pfn(page) | pgprot_val(pgprot));
1810
}
1811
 
1812
static pte_t sun4c_mk_pte_phys(unsigned long phys_page, pgprot_t pgprot)
1813
{
1814
        return __pte((phys_page >> PAGE_SHIFT) | pgprot_val(pgprot));
1815
}
1816
 
1817
static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
1818
{
1819
        return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
1820
}
1821
 
1822
static unsigned long sun4c_pte_pfn(pte_t pte)
1823
{
1824
        return pte_val(pte) & SUN4C_PFN_MASK;
1825
}
1826
 
1827
static pte_t sun4c_pgoff_to_pte(unsigned long pgoff)
1828
{
1829
        return __pte(pgoff | _SUN4C_PAGE_FILE);
1830
}
1831
 
1832
static unsigned long sun4c_pte_to_pgoff(pte_t pte)
1833
{
1834
        return pte_val(pte) & ((1UL << PTE_FILE_MAX_BITS) - 1);
1835
}
1836
 
1837
 
1838
static inline unsigned long sun4c_pmd_page_v(pmd_t pmd)
1839
{
1840
        return (pmd_val(pmd) & PAGE_MASK);
1841
}
1842
 
1843
static struct page *sun4c_pmd_page(pmd_t pmd)
1844
{
1845
        return virt_to_page(sun4c_pmd_page_v(pmd));
1846
}
1847
 
1848
static unsigned long sun4c_pgd_page(pgd_t pgd) { return 0; }
1849
 
1850
/* to find an entry in a page-table-directory */
1851
static inline pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
1852
{
1853
        return mm->pgd + (address >> SUN4C_PGDIR_SHIFT);
1854
}
1855
 
1856
/* Find an entry in the second-level page table.. */
1857
static pmd_t *sun4c_pmd_offset(pgd_t * dir, unsigned long address)
1858
{
1859
        return (pmd_t *) dir;
1860
}
1861
 
1862
/* Find an entry in the third-level page table.. */
1863
pte_t *sun4c_pte_offset_kernel(pmd_t * dir, unsigned long address)
1864
{
1865
        return (pte_t *) sun4c_pmd_page_v(*dir) +
1866
                        ((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1));
1867
}
1868
 
1869
static unsigned long sun4c_swp_type(swp_entry_t entry)
1870
{
1871
        return (entry.val & SUN4C_SWP_TYPE_MASK);
1872
}
1873
 
1874
static unsigned long sun4c_swp_offset(swp_entry_t entry)
1875
{
1876
        return (entry.val >> SUN4C_SWP_OFF_SHIFT) & SUN4C_SWP_OFF_MASK;
1877
}
1878
 
1879
static swp_entry_t sun4c_swp_entry(unsigned long type, unsigned long offset)
1880
{
1881
        return (swp_entry_t) {
1882
                  (offset & SUN4C_SWP_OFF_MASK) << SUN4C_SWP_OFF_SHIFT
1883
                | (type & SUN4C_SWP_TYPE_MASK) };
1884
}
1885
 
1886
static void sun4c_free_pte_slow(pte_t *pte)
1887
{
1888
        free_page((unsigned long)pte);
1889
}
1890
 
1891
static void sun4c_free_pgd_slow(pgd_t *pgd)
1892
{
1893
        free_page((unsigned long)pgd);
1894
}
1895
 
1896
static pgd_t *sun4c_get_pgd_fast(void)
1897
{
1898
        unsigned long *ret;
1899
 
1900
        if ((ret = pgd_quicklist) != NULL) {
1901
                pgd_quicklist = (unsigned long *)(*ret);
1902
                ret[0] = ret[1];
1903
                pgtable_cache_size--;
1904
        } else {
1905
                pgd_t *init;
1906
 
1907
                ret = (unsigned long *)__get_free_page(GFP_KERNEL);
1908
                memset (ret, 0, (KERNBASE / SUN4C_PGDIR_SIZE) * sizeof(pgd_t));
1909
                init = sun4c_pgd_offset(&init_mm, 0);
1910
                memcpy (((pgd_t *)ret) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
1911
                        (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
1912
        }
1913
        return (pgd_t *)ret;
1914
}
1915
 
1916
static void sun4c_free_pgd_fast(pgd_t *pgd)
1917
{
1918
        *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
1919
        pgd_quicklist = (unsigned long *) pgd;
1920
        pgtable_cache_size++;
1921
}
1922
 
1923
 
1924
static inline pte_t *
1925
sun4c_pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
1926
{
1927
        unsigned long *ret;
1928
 
1929
        if ((ret = (unsigned long *)pte_quicklist) != NULL) {
1930
                pte_quicklist = (unsigned long *)(*ret);
1931
                ret[0] = ret[1];
1932
                pgtable_cache_size--;
1933
        }
1934
        return (pte_t *)ret;
1935
}
1936
 
1937
static pte_t *sun4c_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
1938
{
1939
        pte_t *pte;
1940
 
1941
        if ((pte = sun4c_pte_alloc_one_fast(mm, address)) != NULL)
1942
                return pte;
1943
 
1944
        pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
1945
        if (pte)
1946
                memset(pte, 0, PAGE_SIZE);
1947
        return pte;
1948
}
1949
 
1950
static struct page *sun4c_pte_alloc_one(struct mm_struct *mm, unsigned long address)
1951
{
1952
        pte_t *pte = sun4c_pte_alloc_one_kernel(mm, address);
1953
        if (pte == NULL)
1954
                return NULL;
1955
        return virt_to_page(pte);
1956
}
1957
 
1958
static inline void sun4c_free_pte_fast(pte_t *pte)
1959
{
1960
        *(unsigned long *)pte = (unsigned long) pte_quicklist;
1961
        pte_quicklist = (unsigned long *) pte;
1962
        pgtable_cache_size++;
1963
}
1964
 
1965
static void sun4c_pte_free(struct page *pte)
1966
{
1967
        sun4c_free_pte_fast(page_address(pte));
1968
}
1969
 
1970
/*
1971
 * allocating and freeing a pmd is trivial: the 1-entry pmd is
1972
 * inside the pgd, so has no extra memory associated with it.
1973
 */
1974
static pmd_t *sun4c_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
1975
{
1976
        BUG();
1977
        return NULL;
1978
}
1979
 
1980
static void sun4c_free_pmd_fast(pmd_t * pmd) { }
1981
 
1982
static void sun4c_check_pgt_cache(int low, int high)
1983
{
1984
        if (pgtable_cache_size > high) {
1985
                do {
1986
                        if (pgd_quicklist)
1987
                                sun4c_free_pgd_slow(sun4c_get_pgd_fast());
1988
                        if (pte_quicklist)
1989
                                sun4c_free_pte_slow(sun4c_pte_alloc_one_fast(NULL, 0));
1990
                } while (pgtable_cache_size > low);
1991
        }
1992
}
1993
 
1994
/* An experiment, turn off by default for now... -DaveM */
1995
#define SUN4C_PRELOAD_PSEG
1996
 
1997
void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
1998
{
1999
        unsigned long flags;
2000
        int pseg;
2001
 
2002
        if (vma->vm_mm->context == NO_CONTEXT)
2003
                return;
2004
 
2005
        local_irq_save(flags);
2006
        address &= PAGE_MASK;
2007
        if ((pseg = sun4c_get_segmap(address)) == invalid_segment) {
2008
                struct sun4c_mmu_entry *entry = sun4c_user_strategy();
2009
                struct mm_struct *mm = vma->vm_mm;
2010
                unsigned long start, end;
2011
 
2012
                entry->vaddr = start = (address & SUN4C_REAL_PGDIR_MASK);
2013
                entry->ctx = mm->context;
2014
                add_ring_ordered(sun4c_context_ring + mm->context, entry);
2015
                sun4c_put_segmap(entry->vaddr, entry->pseg);
2016
                end = start + SUN4C_REAL_PGDIR_SIZE;
2017
                while (start < end) {
2018
#ifdef SUN4C_PRELOAD_PSEG
2019
                        pgd_t *pgdp = sun4c_pgd_offset(mm, start);
2020
                        pte_t *ptep;
2021
 
2022
                        if (!pgdp)
2023
                                goto no_mapping;
2024
                        ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, start);
2025
                        if (!ptep || !(pte_val(*ptep) & _SUN4C_PAGE_PRESENT))
2026
                                goto no_mapping;
2027
                        sun4c_put_pte(start, pte_val(*ptep));
2028
                        goto next;
2029
 
2030
                no_mapping:
2031
#endif
2032
                        sun4c_put_pte(start, 0);
2033
#ifdef SUN4C_PRELOAD_PSEG
2034
                next:
2035
#endif
2036
                        start += PAGE_SIZE;
2037
                }
2038
#ifndef SUN4C_PRELOAD_PSEG
2039
                sun4c_put_pte(address, pte_val(pte));
2040
#endif
2041
                local_irq_restore(flags);
2042
                return;
2043
        } else {
2044
                struct sun4c_mmu_entry *entry = &mmu_entry_pool[pseg];
2045
 
2046
                remove_lru(entry);
2047
                add_lru(entry);
2048
        }
2049
 
2050
        sun4c_put_pte(address, pte_val(pte));
2051
        local_irq_restore(flags);
2052
}
2053
 
2054
extern void sparc_context_init(int);
2055
extern unsigned long end;
2056
extern unsigned long bootmem_init(unsigned long *pages_avail);
2057
extern unsigned long last_valid_pfn;
2058
 
2059
void __init sun4c_paging_init(void)
2060
{
2061
        int i, cnt;
2062
        unsigned long kernel_end, vaddr;
2063
        extern struct resource sparc_iomap;
2064
        unsigned long end_pfn, pages_avail;
2065
 
2066
        kernel_end = (unsigned long) &end;
2067
        kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
2068
 
2069
        pages_avail = 0;
2070
        last_valid_pfn = bootmem_init(&pages_avail);
2071
        end_pfn = last_valid_pfn;
2072
 
2073
        sun4c_probe_mmu();
2074
        invalid_segment = (num_segmaps - 1);
2075
        sun4c_init_mmu_entry_pool();
2076
        sun4c_init_rings();
2077
        sun4c_init_map_kernelprom(kernel_end);
2078
        sun4c_init_clean_mmu(kernel_end);
2079
        sun4c_init_fill_kernel_ring(SUN4C_KERNEL_BUCKETS);
2080
        sun4c_init_lock_area(sparc_iomap.start, IOBASE_END);
2081
        sun4c_init_lock_area(DVMA_VADDR, DVMA_END);
2082
        sun4c_init_lock_areas();
2083
        sun4c_init_fill_user_ring();
2084
 
2085
        sun4c_set_context(0);
2086
        memset(swapper_pg_dir, 0, PAGE_SIZE);
2087
        memset(pg0, 0, PAGE_SIZE);
2088
        memset(pg1, 0, PAGE_SIZE);
2089
        memset(pg2, 0, PAGE_SIZE);
2090
        memset(pg3, 0, PAGE_SIZE);
2091
 
2092
        /* Save work later. */
2093
        vaddr = VMALLOC_START;
2094
        swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg0);
2095
        vaddr += SUN4C_PGDIR_SIZE;
2096
        swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg1);
2097
        vaddr += SUN4C_PGDIR_SIZE;
2098
        swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg2);
2099
        vaddr += SUN4C_PGDIR_SIZE;
2100
        swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg3);
2101
        sun4c_init_ss2_cache_bug();
2102
        sparc_context_init(num_contexts);
2103
 
2104
        {
2105
                unsigned long zones_size[MAX_NR_ZONES];
2106
                unsigned long zholes_size[MAX_NR_ZONES];
2107
                unsigned long npages;
2108
                int znum;
2109
 
2110
                for (znum = 0; znum < MAX_NR_ZONES; znum++)
2111
                        zones_size[znum] = zholes_size[znum] = 0;
2112
 
2113
                npages = max_low_pfn - pfn_base;
2114
 
2115
                zones_size[ZONE_DMA] = npages;
2116
                zholes_size[ZONE_DMA] = npages - pages_avail;
2117
 
2118
                npages = highend_pfn - max_low_pfn;
2119
                zones_size[ZONE_HIGHMEM] = npages;
2120
                zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
2121
 
2122
                free_area_init_node(0, &contig_page_data, zones_size,
2123
                                    pfn_base, zholes_size);
2124
        }
2125
 
2126
        cnt = 0;
2127
        for (i = 0; i < num_segmaps; i++)
2128
                if (mmu_entry_pool[i].locked)
2129
                        cnt++;
2130
 
2131
        max_user_taken_entries = num_segmaps - cnt - 40 - 1;
2132
 
2133
        printk("SUN4C: %d mmu entries for the kernel\n", cnt);
2134
}
2135
 
2136
static pgprot_t sun4c_pgprot_noncached(pgprot_t prot)
2137
{
2138
        prot |= __pgprot(_SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE);
2139
 
2140
        return prot;
2141
}
2142
 
2143
/* Load up routines and constants for sun4c mmu */
2144
void __init ld_mmu_sun4c(void)
2145
{
2146
        extern void ___xchg32_sun4c(void);
2147
 
2148
        printk("Loading sun4c MMU routines\n");
2149
 
2150
        /* First the constants */
2151
        BTFIXUPSET_SIMM13(pgdir_shift, SUN4C_PGDIR_SHIFT);
2152
        BTFIXUPSET_SETHI(pgdir_size, SUN4C_PGDIR_SIZE);
2153
        BTFIXUPSET_SETHI(pgdir_mask, SUN4C_PGDIR_MASK);
2154
 
2155
        BTFIXUPSET_SIMM13(ptrs_per_pmd, SUN4C_PTRS_PER_PMD);
2156
        BTFIXUPSET_SIMM13(ptrs_per_pgd, SUN4C_PTRS_PER_PGD);
2157
        BTFIXUPSET_SIMM13(user_ptrs_per_pgd, KERNBASE / SUN4C_PGDIR_SIZE);
2158
 
2159
        BTFIXUPSET_INT(page_none, pgprot_val(SUN4C_PAGE_NONE));
2160
        PAGE_SHARED = pgprot_val(SUN4C_PAGE_SHARED);
2161
        BTFIXUPSET_INT(page_copy, pgprot_val(SUN4C_PAGE_COPY));
2162
        BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY));
2163
        BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL));
2164
        page_kernel = pgprot_val(SUN4C_PAGE_KERNEL);
2165
 
2166
        /* Functions */
2167
        BTFIXUPSET_CALL(pgprot_noncached, sun4c_pgprot_noncached, BTFIXUPCALL_NORM);
2168
        BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4c, BTFIXUPCALL_NORM);
2169
        BTFIXUPSET_CALL(do_check_pgt_cache, sun4c_check_pgt_cache, BTFIXUPCALL_NORM);
2170
 
2171
        BTFIXUPSET_CALL(flush_cache_all, sun4c_flush_cache_all, BTFIXUPCALL_NORM);
2172
 
2173
        if (sun4c_vacinfo.do_hwflushes) {
2174
                BTFIXUPSET_CALL(sun4c_flush_page, sun4c_flush_page_hw, BTFIXUPCALL_NORM);
2175
                BTFIXUPSET_CALL(sun4c_flush_segment, sun4c_flush_segment_hw, BTFIXUPCALL_NORM);
2176
                BTFIXUPSET_CALL(sun4c_flush_context, sun4c_flush_context_hw, BTFIXUPCALL_NORM);
2177
        } else {
2178
                BTFIXUPSET_CALL(sun4c_flush_page, sun4c_flush_page_sw, BTFIXUPCALL_NORM);
2179
                BTFIXUPSET_CALL(sun4c_flush_segment, sun4c_flush_segment_sw, BTFIXUPCALL_NORM);
2180
                BTFIXUPSET_CALL(sun4c_flush_context, sun4c_flush_context_sw, BTFIXUPCALL_NORM);
2181
        }
2182
 
2183
        BTFIXUPSET_CALL(flush_tlb_mm, sun4c_flush_tlb_mm, BTFIXUPCALL_NORM);
2184
        BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm, BTFIXUPCALL_NORM);
2185
        BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context, BTFIXUPCALL_NORM);
2186
        BTFIXUPSET_CALL(switch_mm, sun4c_switch_mm, BTFIXUPCALL_NORM);
2187
        BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page, BTFIXUPCALL_NORM);
2188
        BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page, BTFIXUPCALL_NORM);
2189
        BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range, BTFIXUPCALL_NORM);
2190
        BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range, BTFIXUPCALL_NORM);
2191
        BTFIXUPSET_CALL(__flush_page_to_ram, sun4c_flush_page_to_ram, BTFIXUPCALL_NORM);
2192
        BTFIXUPSET_CALL(flush_tlb_all, sun4c_flush_tlb_all, BTFIXUPCALL_NORM);
2193
 
2194
        BTFIXUPSET_CALL(flush_sig_insns, sun4c_flush_sig_insns, BTFIXUPCALL_NOP);
2195
 
2196
        BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0);
2197
 
2198
        /* The 2.4.18 code does not set this on sun4c, how does it work? XXX */
2199
        /* BTFIXUPSET_SETHI(none_mask, 0x00000000); */  /* Defaults to zero? */
2200
 
2201
        BTFIXUPSET_CALL(pte_pfn, sun4c_pte_pfn, BTFIXUPCALL_NORM);
2202
#if 0 /* PAGE_SHIFT <= 12 */ /* Eek. Investigate. XXX */
2203
        BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1));
2204
#else
2205
        BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_NORM);
2206
#endif
2207
        BTFIXUPSET_CALL(pmd_set, sun4c_pmd_set, BTFIXUPCALL_NORM);
2208
        BTFIXUPSET_CALL(pmd_populate, sun4c_pmd_populate, BTFIXUPCALL_NORM);
2209
 
2210
        BTFIXUPSET_CALL(pte_present, sun4c_pte_present, BTFIXUPCALL_NORM);
2211
        BTFIXUPSET_CALL(pte_clear, sun4c_pte_clear, BTFIXUPCALL_STG0O0);
2212
 
2213
        BTFIXUPSET_CALL(pmd_bad, sun4c_pmd_bad, BTFIXUPCALL_NORM);
2214
        BTFIXUPSET_CALL(pmd_present, sun4c_pmd_present, BTFIXUPCALL_NORM);
2215
        BTFIXUPSET_CALL(pmd_clear, sun4c_pmd_clear, BTFIXUPCALL_STG0O0);
2216
 
2217
        BTFIXUPSET_CALL(pgd_none, sun4c_pgd_none, BTFIXUPCALL_RETINT(0));
2218
        BTFIXUPSET_CALL(pgd_bad, sun4c_pgd_bad, BTFIXUPCALL_RETINT(0));
2219
        BTFIXUPSET_CALL(pgd_present, sun4c_pgd_present, BTFIXUPCALL_RETINT(1));
2220
        BTFIXUPSET_CALL(pgd_clear, sun4c_pgd_clear, BTFIXUPCALL_NOP);
2221
 
2222
        BTFIXUPSET_CALL(mk_pte, sun4c_mk_pte, BTFIXUPCALL_NORM);
2223
        BTFIXUPSET_CALL(mk_pte_phys, sun4c_mk_pte_phys, BTFIXUPCALL_NORM);
2224
        BTFIXUPSET_CALL(mk_pte_io, sun4c_mk_pte_io, BTFIXUPCALL_NORM);
2225
 
2226
        BTFIXUPSET_INT(pte_modify_mask, _SUN4C_PAGE_CHG_MASK);
2227
        BTFIXUPSET_CALL(pmd_offset, sun4c_pmd_offset, BTFIXUPCALL_NORM);
2228
        BTFIXUPSET_CALL(pte_offset_kernel, sun4c_pte_offset_kernel, BTFIXUPCALL_NORM);
2229
        BTFIXUPSET_CALL(free_pte_fast, sun4c_free_pte_fast, BTFIXUPCALL_NORM);
2230
        BTFIXUPSET_CALL(pte_free, sun4c_pte_free, BTFIXUPCALL_NORM);
2231
        BTFIXUPSET_CALL(pte_alloc_one_kernel, sun4c_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
2232
        BTFIXUPSET_CALL(pte_alloc_one, sun4c_pte_alloc_one, BTFIXUPCALL_NORM);
2233
        BTFIXUPSET_CALL(free_pmd_fast, sun4c_free_pmd_fast, BTFIXUPCALL_NOP);
2234
        BTFIXUPSET_CALL(pmd_alloc_one, sun4c_pmd_alloc_one, BTFIXUPCALL_RETO0);
2235
        BTFIXUPSET_CALL(free_pgd_fast, sun4c_free_pgd_fast, BTFIXUPCALL_NORM);
2236
        BTFIXUPSET_CALL(get_pgd_fast, sun4c_get_pgd_fast, BTFIXUPCALL_NORM);
2237
 
2238
        BTFIXUPSET_HALF(pte_writei, _SUN4C_PAGE_WRITE);
2239
        BTFIXUPSET_HALF(pte_dirtyi, _SUN4C_PAGE_MODIFIED);
2240
        BTFIXUPSET_HALF(pte_youngi, _SUN4C_PAGE_ACCESSED);
2241
        BTFIXUPSET_HALF(pte_filei, _SUN4C_PAGE_FILE);
2242
        BTFIXUPSET_HALF(pte_wrprotecti, _SUN4C_PAGE_WRITE|_SUN4C_PAGE_SILENT_WRITE);
2243
        BTFIXUPSET_HALF(pte_mkcleani, _SUN4C_PAGE_MODIFIED|_SUN4C_PAGE_SILENT_WRITE);
2244
        BTFIXUPSET_HALF(pte_mkoldi, _SUN4C_PAGE_ACCESSED|_SUN4C_PAGE_SILENT_READ);
2245
        BTFIXUPSET_CALL(pte_mkwrite, sun4c_pte_mkwrite, BTFIXUPCALL_NORM);
2246
        BTFIXUPSET_CALL(pte_mkdirty, sun4c_pte_mkdirty, BTFIXUPCALL_NORM);
2247
        BTFIXUPSET_CALL(pte_mkyoung, sun4c_pte_mkyoung, BTFIXUPCALL_NORM);
2248
        BTFIXUPSET_CALL(update_mmu_cache, sun4c_update_mmu_cache, BTFIXUPCALL_NORM);
2249
 
2250
        BTFIXUPSET_CALL(pte_to_pgoff, sun4c_pte_to_pgoff, BTFIXUPCALL_NORM);
2251
        BTFIXUPSET_CALL(pgoff_to_pte, sun4c_pgoff_to_pte, BTFIXUPCALL_NORM);
2252
 
2253
        BTFIXUPSET_CALL(mmu_lockarea, sun4c_lockarea, BTFIXUPCALL_NORM);
2254
        BTFIXUPSET_CALL(mmu_unlockarea, sun4c_unlockarea, BTFIXUPCALL_NORM);
2255
 
2256
        BTFIXUPSET_CALL(mmu_get_scsi_one, sun4c_get_scsi_one, BTFIXUPCALL_NORM);
2257
        BTFIXUPSET_CALL(mmu_get_scsi_sgl, sun4c_get_scsi_sgl, BTFIXUPCALL_NORM);
2258
        BTFIXUPSET_CALL(mmu_release_scsi_one, sun4c_release_scsi_one, BTFIXUPCALL_NORM);
2259
        BTFIXUPSET_CALL(mmu_release_scsi_sgl, sun4c_release_scsi_sgl, BTFIXUPCALL_NORM);
2260
 
2261
        BTFIXUPSET_CALL(mmu_map_dma_area, sun4c_map_dma_area, BTFIXUPCALL_NORM);
2262
        BTFIXUPSET_CALL(mmu_unmap_dma_area, sun4c_unmap_dma_area, BTFIXUPCALL_NORM);
2263
        BTFIXUPSET_CALL(mmu_translate_dvma, sun4c_translate_dvma, BTFIXUPCALL_NORM);
2264
 
2265
        BTFIXUPSET_CALL(sparc_mapiorange, sun4c_mapiorange, BTFIXUPCALL_NORM);
2266
        BTFIXUPSET_CALL(sparc_unmapiorange, sun4c_unmapiorange, BTFIXUPCALL_NORM);
2267
 
2268
        BTFIXUPSET_CALL(__swp_type, sun4c_swp_type, BTFIXUPCALL_NORM);
2269
        BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM);
2270
        BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM);
2271
 
2272
        BTFIXUPSET_CALL(alloc_thread_info, sun4c_alloc_thread_info, BTFIXUPCALL_NORM);
2273
        BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM);
2274
 
2275
        BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM);
2276
 
2277
        /* These should _never_ get called with two level tables. */
2278
        BTFIXUPSET_CALL(pgd_set, sun4c_pgd_set, BTFIXUPCALL_NOP);
2279
        BTFIXUPSET_CALL(pgd_page_vaddr, sun4c_pgd_page, BTFIXUPCALL_RETO0);
2280
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.