OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [sparc64/] [mm/] [fault.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* $Id: fault.c,v 1.1.1.1 2004-04-15 01:33:38 phoenix Exp $
2
 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
3
 *
4
 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
5
 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
6
 */
7
 
8
#include <asm/head.h>
9
 
10
#include <linux/string.h>
11
#include <linux/types.h>
12
#include <linux/ptrace.h>
13
#include <linux/mman.h>
14
#include <linux/signal.h>
15
#include <linux/mm.h>
16
#include <linux/smp_lock.h>
17
#include <linux/init.h>
18
#include <linux/interrupt.h>
19
 
20
#include <asm/page.h>
21
#include <asm/pgtable.h>
22
#include <asm/openprom.h>
23
#include <asm/oplib.h>
24
#include <asm/uaccess.h>
25
#include <asm/asi.h>
26
#include <asm/lsu.h>
27
 
28
#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
29
 
30
extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
31
 
32
/*
33
 * To debug kernel during syscall entry.
34
 */
35
void syscall_trace_entry(struct pt_regs *regs)
36
{
37
        printk("scall entry: %s[%d]/cpu%d: %d\n", current->comm, current->pid, smp_processor_id(), (int) regs->u_regs[UREG_G1]);
38
}
39
 
40
/*
41
 * To debug kernel during syscall exit.
42
 */
43
void syscall_trace_exit(struct pt_regs *regs)
44
{
45
        printk("scall exit: %s[%d]/cpu%d: %d\n", current->comm, current->pid, smp_processor_id(), (int) regs->u_regs[UREG_G1]);
46
}
47
 
48
/*
49
 * To debug kernel to catch accesses to certain virtual/physical addresses.
50
 * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
51
 * flags = VM_READ watches memread accesses, flags = VM_WRITE watches memwrite accesses.
52
 * Caller passes in a 64bit aligned addr, with mask set to the bytes that need to be
53
 * watched. This is only useful on a single cpu machine for now. After the watchpoint
54
 * is detected, the process causing it will be killed, thus preventing an infinite loop.
55
 */
56
void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode)
57
{
58
        unsigned long lsubits;
59
 
60
        __asm__ __volatile__("ldxa [%%g0] %1, %0"
61
                             : "=r" (lsubits)
62
                             : "i" (ASI_LSU_CONTROL));
63
        lsubits &= ~(LSU_CONTROL_PM | LSU_CONTROL_VM |
64
                     LSU_CONTROL_PR | LSU_CONTROL_VR |
65
                     LSU_CONTROL_PW | LSU_CONTROL_VW);
66
 
67
        __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
68
                             "membar    #Sync"
69
                             : /* no outputs */
70
                             : "r" (addr), "r" (mode ? VIRT_WATCHPOINT : PHYS_WATCHPOINT),
71
                               "i" (ASI_DMMU));
72
 
73
        lsubits |= ((unsigned long)mask << (mode ? 25 : 33));
74
        if (flags & VM_READ)
75
                lsubits |= (mode ? LSU_CONTROL_VR : LSU_CONTROL_PR);
76
        if (flags & VM_WRITE)
77
                lsubits |= (mode ? LSU_CONTROL_VW : LSU_CONTROL_PW);
78
        __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
79
                             "membar #Sync"
80
                             : /* no outputs */
81
                             : "r" (lsubits), "i" (ASI_LSU_CONTROL)
82
                             : "memory");
83
}
84
 
85
/* Nice, simple, prom library does all the sweating for us. ;) */
86
unsigned long __init prom_probe_memory (void)
87
{
88
        register struct linux_mlist_p1275 *mlist;
89
        register unsigned long bytes, base_paddr, tally;
90
        register int i;
91
 
92
        i = 0;
93
        mlist = *prom_meminfo()->p1275_available;
94
        bytes = tally = mlist->num_bytes;
95
        base_paddr = mlist->start_adr;
96
 
97
        sp_banks[0].base_addr = base_paddr;
98
        sp_banks[0].num_bytes = bytes;
99
 
100
        while (mlist->theres_more != (void *) 0) {
101
                i++;
102
                mlist = mlist->theres_more;
103
                bytes = mlist->num_bytes;
104
                tally += bytes;
105
                if (i >= SPARC_PHYS_BANKS-1) {
106
                        printk ("The machine has more banks than "
107
                                "this kernel can support\n"
108
                                "Increase the SPARC_PHYS_BANKS "
109
                                "setting (currently %d)\n",
110
                                SPARC_PHYS_BANKS);
111
                        i = SPARC_PHYS_BANKS-1;
112
                        break;
113
                }
114
 
115
                sp_banks[i].base_addr = mlist->start_adr;
116
                sp_banks[i].num_bytes = mlist->num_bytes;
117
        }
118
 
119
        i++;
120
        sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
121
        sp_banks[i].num_bytes = 0;
122
 
123
        /* Now mask all bank sizes on a page boundary, it is all we can
124
         * use anyways.
125
         */
126
        for (i = 0; sp_banks[i].num_bytes != 0; i++)
127
                sp_banks[i].num_bytes &= PAGE_MASK;
128
 
129
        return tally;
130
}
131
 
132
static void unhandled_fault(unsigned long address, struct task_struct *tsk,
133
                            struct pt_regs *regs)
134
{
135
        if ((unsigned long) address < PAGE_SIZE) {
136
                printk(KERN_ALERT "Unable to handle kernel NULL "
137
                       "pointer dereference\n");
138
        } else {
139
                printk(KERN_ALERT "Unable to handle kernel paging request "
140
                       "at virtual address %016lx\n", (unsigned long)address);
141
        }
142
        printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
143
               (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
144
        printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
145
               (tsk->mm ? (unsigned long) tsk->mm->pgd :
146
                          (unsigned long) tsk->active_mm->pgd));
147
        die_if_kernel("Oops", regs);
148
}
149
 
150
extern void show_trace_raw(struct task_struct *, unsigned long);
151
 
152
static void bad_kernel_pc(struct pt_regs *regs)
153
{
154
        unsigned long ksp;
155
 
156
        printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
157
               regs->tpc);
158
        __asm__("mov %%sp, %0" : "=r" (ksp));
159
        show_trace_raw(current, ksp);
160
        unhandled_fault(regs->tpc, current, regs);
161
}
162
 
163
/*
164
 * We now make sure that mmap_sem is held in all paths that call
165
 * this. Additionally, to prevent kswapd from ripping ptes from
166
 * under us, raise interrupts around the time that we look at the
167
 * pte, kswapd will have to wait to get his smp ipi response from
168
 * us. This saves us having to get page_table_lock.
169
 */
170
static unsigned int get_user_insn(unsigned long tpc)
171
{
172
        pgd_t *pgdp = pgd_offset(current->mm, tpc);
173
        pmd_t *pmdp;
174
        pte_t *ptep, pte;
175
        unsigned long pa;
176
        u32 insn = 0;
177
        unsigned long pstate;
178
 
179
        if (pgd_none(*pgdp))
180
                goto outret;
181
        pmdp = pmd_offset(pgdp, tpc);
182
        if (pmd_none(*pmdp))
183
                goto outret;
184
        ptep = pte_offset(pmdp, tpc);
185
        __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
186
        __asm__ __volatile__("wrpr %0, %1, %%pstate"
187
                                : : "r" (pstate), "i" (PSTATE_IE));
188
        pte = *ptep;
189
        if (!pte_present(pte))
190
                goto out;
191
 
192
        pa  = (pte_val(pte) & _PAGE_PADDR);
193
        pa += (tpc & ~PAGE_MASK);
194
 
195
        /* Use phys bypass so we don't pollute dtlb/dcache. */
196
        __asm__ __volatile__("lduwa [%1] %2, %0"
197
                             : "=r" (insn)
198
                             : "r" (pa), "i" (ASI_PHYS_USE_EC));
199
 
200
out:
201
        __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
202
outret:
203
        return insn;
204
}
205
 
206
static void do_fault_siginfo(int code, int sig, unsigned long address)
207
{
208
        siginfo_t info;
209
 
210
        info.si_code = code;
211
        info.si_signo = sig;
212
        info.si_errno = 0;
213
        info.si_addr = (void *) address;
214
        info.si_trapno = 0;
215
        force_sig_info(sig, &info, current);
216
}
217
 
218
extern int handle_ldf_stq(u32, struct pt_regs *);
219
extern int handle_ld_nf(u32, struct pt_regs *);
220
 
221
static inline unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
222
{
223
        if (!insn) {
224
                if (!regs->tpc || (regs->tpc & 0x3))
225
                        return 0;
226
                if (regs->tstate & TSTATE_PRIV) {
227
                        insn = *(unsigned int *) regs->tpc;
228
                } else {
229
                        insn = get_user_insn(regs->tpc);
230
                }
231
        }
232
        return insn;
233
}
234
 
235
static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
236
                            unsigned int insn, unsigned long address)
237
{
238
        unsigned long g2;
239
        unsigned char asi = ASI_P;
240
 
241
        if ((!insn) && (regs->tstate & TSTATE_PRIV))
242
                goto cannot_handle;
243
 
244
        /* If user insn could be read (thus insn is zero), that
245
         * is fine.  We will just gun down the process with a signal
246
         * in that case.
247
         */
248
 
249
        if (!(fault_code & FAULT_CODE_WRITE) &&
250
            (insn & 0xc0800000) == 0xc0800000) {
251
                if (insn & 0x2000)
252
                        asi = (regs->tstate >> 24);
253
                else
254
                        asi = (insn >> 5);
255
                if ((asi & 0xf2) == 0x82) {
256
                        if (insn & 0x1000000) {
257
                                handle_ldf_stq(insn, regs);
258
                        } else {
259
                                /* This was a non-faulting load. Just clear the
260
                                 * destination register(s) and continue with the next
261
                                 * instruction. -jj
262
                                 */
263
                                handle_ld_nf(insn, regs);
264
                        }
265
                        return;
266
                }
267
        }
268
 
269
        g2 = regs->u_regs[UREG_G2];
270
 
271
        /* Is this in ex_table? */
272
        if (regs->tstate & TSTATE_PRIV) {
273
                unsigned long fixup;
274
 
275
                if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) {
276
                        if (insn & 0x2000)
277
                                asi = (regs->tstate >> 24);
278
                        else
279
                                asi = (insn >> 5);
280
                }
281
 
282
                /* Look in asi.h: All _S asis have LS bit set */
283
                if ((asi & 0x1) &&
284
                    (fixup = search_exception_table (regs->tpc, &g2))) {
285
                        regs->tpc = fixup;
286
                        regs->tnpc = regs->tpc + 4;
287
                        regs->u_regs[UREG_G2] = g2;
288
                        return;
289
                }
290
        } else {
291
                /* The si_code was set to make clear whether
292
                 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
293
                 */
294
                do_fault_siginfo(si_code, SIGSEGV, address);
295
                return;
296
        }
297
 
298
cannot_handle:
299
        unhandled_fault (address, current, regs);
300
}
301
 
302
asmlinkage void do_sparc64_fault(struct pt_regs *regs)
303
{
304
        struct mm_struct *mm = current->mm;
305
        struct vm_area_struct *vma;
306
        unsigned int insn = 0;
307
        int si_code, fault_code;
308
        unsigned long address;
309
 
310
        si_code = SEGV_MAPERR;
311
        fault_code = current->thread.fault_code;
312
        address = current->thread.fault_address;
313
 
314
        if ((fault_code & FAULT_CODE_ITLB) &&
315
            (fault_code & FAULT_CODE_DTLB))
316
                BUG();
317
 
318
        if (regs->tstate & TSTATE_PRIV) {
319
                unsigned long tpc = regs->tpc;
320
                extern unsigned int _etext;
321
 
322
                /* Sanity check the PC. */
323
                if ((tpc >= KERNBASE && tpc < (unsigned long) &_etext) ||
324
                    (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
325
                        /* Valid, no problems... */
326
                } else {
327
                        bad_kernel_pc(regs);
328
                        return;
329
                }
330
        }
331
 
332
        /*
333
         * If we're in an interrupt or have no user
334
         * context, we must not take the fault..
335
         */
336
        if (in_interrupt() || !mm)
337
                goto intr_or_no_mm;
338
 
339
        if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
340
                regs->tpc &= 0xffffffff;
341
                address &= 0xffffffff;
342
        }
343
 
344
        down_read(&mm->mmap_sem);
345
        vma = find_vma(mm, address);
346
        if (!vma)
347
                goto bad_area;
348
 
349
        /* Pure DTLB misses do not tell us whether the fault causing
350
         * load/store/atomic was a write or not, it only says that there
351
         * was no match.  So in such a case we (carefully) read the
352
         * instruction to try and figure this out.  It's an optimization
353
         * so it's ok if we can't do this.
354
         *
355
         * Special hack, window spill/fill knows the exact fault type.
356
         */
357
        if (((fault_code &
358
              (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
359
            (vma->vm_flags & VM_WRITE) != 0) {
360
                insn = get_fault_insn(regs, 0);
361
                if (!insn)
362
                        goto continue_fault;
363
                if ((insn & 0xc0200000) == 0xc0200000 &&
364
                    (insn & 0x1780000) != 0x1680000) {
365
                        /* Don't bother updating thread struct value,
366
                         * because update_mmu_cache only cares which tlb
367
                         * the access came from.
368
                         */
369
                        fault_code |= FAULT_CODE_WRITE;
370
                }
371
        }
372
continue_fault:
373
 
374
        if (vma->vm_start <= address)
375
                goto good_area;
376
        if (!(vma->vm_flags & VM_GROWSDOWN))
377
                goto bad_area;
378
        if (!(fault_code & FAULT_CODE_WRITE)) {
379
                /* Non-faulting loads shouldn't expand stack. */
380
                insn = get_fault_insn(regs, insn);
381
                if ((insn & 0xc0800000) == 0xc0800000) {
382
                        unsigned char asi;
383
 
384
                        if (insn & 0x2000)
385
                                asi = (regs->tstate >> 24);
386
                        else
387
                                asi = (insn >> 5);
388
                        if ((asi & 0xf2) == 0x82)
389
                                goto bad_area;
390
                }
391
        }
392
        if (expand_stack(vma, address))
393
                goto bad_area;
394
        /*
395
         * Ok, we have a good vm_area for this memory access, so
396
         * we can handle it..
397
         */
398
good_area:
399
        si_code = SEGV_ACCERR;
400
        if (fault_code & FAULT_CODE_WRITE) {
401
                if (!(vma->vm_flags & VM_WRITE))
402
                        goto bad_area;
403
 
404
                /* Spitfire has an icache which does not snoop
405
                 * processor stores.  Later processors do...
406
                 */
407
                if (tlb_type == spitfire &&
408
                    (vma->vm_flags & VM_EXEC) != 0 &&
409
                    vma->vm_file != NULL)
410
                        current->thread.use_blkcommit = 1;
411
        } else {
412
                /* Allow reads even for write-only mappings */
413
                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
414
                        goto bad_area;
415
        }
416
 
417
        switch (handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE))) {
418
        case 1:
419
                current->min_flt++;
420
                break;
421
        case 2:
422
                current->maj_flt++;
423
                break;
424
        case 0:
425
                goto do_sigbus;
426
        default:
427
                goto out_of_memory;
428
        }
429
 
430
        up_read(&mm->mmap_sem);
431
        goto fault_done;
432
 
433
        /*
434
         * Something tried to access memory that isn't in our memory map..
435
         * Fix it, but check if it's kernel or user first..
436
         */
437
bad_area:
438
        insn = get_fault_insn(regs, insn);
439
        up_read(&mm->mmap_sem);
440
 
441
handle_kernel_fault:
442
        do_kernel_fault(regs, si_code, fault_code, insn, address);
443
 
444
        goto fault_done;
445
 
446
/*
447
 * We ran out of memory, or some other thing happened to us that made
448
 * us unable to handle the page fault gracefully.
449
 */
450
out_of_memory:
451
        insn = get_fault_insn(regs, insn);
452
        up_read(&mm->mmap_sem);
453
        printk("VM: killing process %s\n", current->comm);
454
        if (!(regs->tstate & TSTATE_PRIV))
455
                do_exit(SIGKILL);
456
        goto handle_kernel_fault;
457
 
458
intr_or_no_mm:
459
        insn = get_fault_insn(regs, 0);
460
        goto handle_kernel_fault;
461
 
462
do_sigbus:
463
        insn = get_fault_insn(regs, insn);
464
        up_read(&mm->mmap_sem);
465
 
466
        /*
467
         * Send a sigbus, regardless of whether we were in kernel
468
         * or user mode.
469
         */
470
        do_fault_siginfo(BUS_ADRERR, SIGBUS, address);
471
 
472
        /* Kernel mode? Handle exceptions or die */
473
        if (regs->tstate & TSTATE_PRIV)
474
                goto handle_kernel_fault;
475
 
476
fault_done:
477
        /* These values are no longer needed, clear them. */
478
        current->thread.fault_code = 0;
479
        current->thread.use_blkcommit = 0;
480
        current->thread.fault_address = 0;
481
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.