OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [or32/] [mm/] [fault.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  linux/arch/or32/mm/fault.c
3
 *
4
 *  or32 version
5
 *    author(s): Matjaz Breskvar (phoenix@opencores.org)
6
 *
7
 *  derived from cris, i386, m68k, ppc, sh ports.
8
 *
9
 *  changes:
10
 *  18. 11. 2003: Matjaz Breskvar (phoenix@opencores.org)
11
 *    initial port to or32 architecture
12
 *
13
 *  based on:
14
 *
15
 *  PowerPC version
16
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
17
 *
18
 *  Derived from "arch/i386/mm/fault.c"
19
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
20
 *
21
 *  Modified by Cort Dougan and Paul Mackerras.
22
 *
23
 *  This program is free software; you can redistribute it and/or
24
 *  modify it under the terms of the GNU General Public License
25
 *  as published by the Free Software Foundation; either version
26
 *  2 of the License, or (at your option) any later version.
27
 */
28
 
29
#include <linux/config.h>
30
#include <linux/signal.h>
31
#include <linux/sched.h>
32
#include <linux/kernel.h>
33
#include <linux/errno.h>
34
#include <linux/string.h>
35
#include <linux/types.h>
36
#include <linux/ptrace.h>
37
#include <linux/mman.h>
38
#include <linux/mm.h>
39
#include <linux/interrupt.h>
40
 
41
#include <asm/page.h>
42
#include <asm/pgtable.h>
43
#include <asm/mmu.h>
44
#include <asm/mmu_context.h>
45
#include <asm/system.h>
46
#include <asm/uaccess.h>
47
 
48
/* debug of low-level TLB reload */
49
#undef DEBUG
50
 
51
#ifdef DEBUG
52
#define D(x) x
53
#else
54
#define D(x)
55
#endif
56
 
57
/* debug of higher-level faults */
58
#define DPG(x) x
59
 
60
#define NUM_TLB_ENTRIES 64
61
#define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1))
62
 
63
unsigned long pte_misses;       /* updated by do_page_fault() */
64
unsigned long pte_errors;       /* updated by do_page_fault() */
65
 
66
/* __PHX__ :: - check the vmalloc_fault in do_page_fault()
67
 *            - also look into include/asm-or32/mmu_context.h
68
 */
69
volatile pgd_t *current_pgd;
70
 
71
extern void die(char *, struct pt_regs *, long);
72
 
73
/*
74
 * This routine handles page faults.  It determines the address,
75
 * and the problem, and then passes it off to one of the appropriate
76
 * routines.
77
 *
78
 * If this routine detects a bad access, it returns 1, otherwise it
79
 * returns 0.
80
 */
81
 
82
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
83
                              unsigned long vector, int write_acc)
84
{
85
        struct task_struct *tsk;
86
        struct mm_struct *mm;
87
        struct vm_area_struct * vma;
88
        unsigned long fixup;
89
        siginfo_t info;
90
 
91
        tsk = current;
92
 
93
        /*
94
         * We fault-in kernel-space virtual memory on-demand. The
95
         * 'reference' page table is init_mm.pgd.
96
         *
97
         * NOTE! We MUST NOT take any locks for this case. We may
98
         * be in an interrupt or a critical region, and should
99
         * only copy the information from the master page table,
100
         * nothing more.
101
         *
102
         * NOTE2: This is done so that, when updating the vmalloc
103
         * mappings we don't have to walk all processes pgdirs and
104
         * add the high mappings all at once. Instead we do it as they
105
         * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
106
         * bit set so sometimes the TLB can use a lingering entry.
107
         *
108
         * This verifies that the fault happens in kernel space
109
         * and that the fault was not a protection error.
110
         */
111
 
112
        D(phx_mmu("dpf :: addr %x, vect %x, write %x, regs %x, user %x\n",
113
               address, vector, write_acc, regs, user_mode(regs)));
114
 
115
        if (address >= VMALLOC_START &&
116
            (vector != 0x300 && vector != 0x400) &&
117
            !user_mode(regs))
118
                goto vmalloc_fault;
119
 
120
        /* we can and should enable interrupts at this point */
121
        sti();
122
 
123
        mm = tsk->mm;
124
        info.si_code = SEGV_MAPERR;
125
 
126
        /*
127
         * If we're in an interrupt or have no user
128
         * context, we must not take the fault..
129
         */
130
 
131
        if (in_interrupt() || !mm)
132
                goto no_context;
133
 
134
        down_read(&mm->mmap_sem);
135
        vma = find_vma(mm, address);
136
 
137
        if (!vma)
138
                goto bad_area;
139
 
140
        if (vma->vm_start <= address)
141
                goto good_area;
142
 
143
        if (!(vma->vm_flags & VM_GROWSDOWN))
144
                goto bad_area;
145
 
146
        if (user_mode(regs)) {
147
                /*
148
                 * accessing the stack below usp is always a bug.
149
                 * we get page-aligned addresses so we can only check
150
                 * if we're within a page from usp, but that might be
151
                 * enough to catch brutal errors at least.
152
                 */
153
                if (address + PAGE_SIZE < regs->sp)
154
                        goto bad_area;
155
        }
156
        if (expand_stack(vma, address))
157
                goto bad_area;
158
 
159
        /*
160
         * Ok, we have a good vm_area for this memory access, so
161
         * we can handle it..
162
         */
163
 
164
 good_area:
165
        info.si_code = SEGV_ACCERR;
166
 
167
        /* first do some preliminary protection checks */
168
 
169
        if (write_acc) {
170
                if (!(vma->vm_flags & VM_WRITE))
171
                        goto bad_area;
172
        } else {
173
                /* not present */
174
                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
175
                        goto bad_area;
176
        }
177
 
178
        /* are we trying to execute nonexecutable area */
179
        if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC))
180
                goto bad_area;
181
 
182
        /*
183
         * If for any reason at all we couldn't handle the fault,
184
         * make sure we exit gracefully rather than endlessly redo
185
         * the fault.
186
         */
187
 
188
        switch (handle_mm_fault(mm, vma, address, write_acc)) {
189
        case 1:
190
                tsk->min_flt++;
191
                break;
192
        case 2:
193
                tsk->maj_flt++;
194
                break;
195
        case 0:
196
                goto do_sigbus;
197
        default:
198
                goto out_of_memory;
199
        }
200
 
201
        up_read(&mm->mmap_sem);
202
        return;
203
 
204
        /*
205
         * Something tried to access memory that isn't in our memory map..
206
         * Fix it, but check if it's kernel or user first..
207
         */
208
 
209
 bad_area:
210
        up_read(&mm->mmap_sem);
211
 
212
 bad_area_nosemaphore:
213
 
214
        /* User mode accesses just cause a SIGSEGV */
215
 
216
        if (user_mode(regs)) {
217
                printk("USERSPACE: SIGSEGV (current %p, pid %d)\n",
218
                       current, current->pid);
219
                info.si_signo = SIGSEGV;
220
                info.si_errno = 0;
221
                /* info.si_code has been set above */
222
                info.si_addr = (void *)address;
223
                force_sig_info(SIGSEGV, &info, tsk);
224
                DPG(show_regs(regs));
225
                __asm__ __volatile__("l.nop 1");
226
                return;
227
        }
228
//      DPG(show_regs(regs));
229
 
230
 no_context:
231
 
232
        /* Are we prepared to handle this kernel fault?
233
         *
234
         * (The kernel has valid exception-points in the source
235
         *  when it acesses user-memory. When it fails in one
236
         *  of those points, we find it in a table and do a jump
237
         *  to some fixup code that loads an appropriate error
238
         *  code)
239
         */
240
 
241
        phx_mmu("search exception table");
242
        if ((fixup = search_exception_table(regs->pc)) != 0) {
243
                /* Adjust the instruction pointer in the stackframe */
244
                phx_mmu("kernel: doing fixup at EPC=0x%x to 0x%lx\n", regs->pc, fixup);
245
                regs->pc = fixup;
246
                return;
247
        }
248
 
249
        /*
250
         * Oops. The kernel tried to access some bad page. We'll have to
251
         * terminate things with extreme prejudice.
252
         */
253
 
254
        if ((unsigned long) (address) < PAGE_SIZE)
255
                printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
256
        else
257
                printk(KERN_ALERT "Unable to handle kernel access");
258
        printk(" at virtual address 0x%08lx\n",address);
259
 
260
        die("Oops", regs, write_acc);
261
 
262
        do_exit(SIGKILL);
263
 
264
        /*
265
         * We ran out of memory, or some other thing happened to us that made
266
         * us unable to handle the page fault gracefully.
267
         */
268
 
269
 out_of_memory:
270
        up_read(&mm->mmap_sem);
271
        printk("VM: killing process %s\n", tsk->comm);
272
        if (user_mode(regs))
273
                do_exit(SIGKILL);
274
        goto no_context;
275
 
276
 do_sigbus:
277
        up_read(&mm->mmap_sem);
278
 
279
        /*
280
         * Send a sigbus, regardless of whether we were in kernel
281
         * or user mode.
282
         */
283
        info.si_signo = SIGBUS;
284
        info.si_errno = 0;
285
        info.si_code = BUS_ADRERR;
286
        info.si_addr = (void *)address;
287
        force_sig_info(SIGBUS, &info, tsk);
288
 
289
        /* Kernel mode? Handle exceptions or die */
290
        if (!user_mode(regs))
291
                goto no_context;
292
        return;
293
 
294
vmalloc_fault:
295
        {
296
                /*
297
                 * Synchronize this task's top level page-table
298
                 * with the 'reference' page table.
299
                 *
300
                 * Use current_pgd instead of tsk->active_mm->pgd
301
                 * since the latter might be unavailable if this
302
                 * code is executed in a misfortunately run irq
303
                 * (like inside schedule() between switch_mm and
304
                 *  switch_to...).
305
                 */
306
 
307
                phx_warn("do_page_fault(): vmalloc_fault will not work, "
308
                         "since current_pgd assign a proper value somewhere\n"
309
                         "anyhow we don't need this at the moment\n");
310
 
311
                int offset = pgd_index(address);
312
                pgd_t *pgd, *pgd_k;
313
                pmd_t *pmd, *pmd_k;
314
                pte_t *pte_k;
315
 
316
                phx_mmu("vmalloc_fault");
317
 
318
                pgd = (pgd_t *)current_pgd + offset;
319
                pgd_k = init_mm.pgd + offset;
320
 
321
                /* Since we're two-level, we don't need to do both
322
                 * set_pgd and set_pmd (they do the same thing). If
323
                 * we go three-level at some point, do the right thing
324
                 * with pgd_present and set_pgd here.
325
                 *
326
                 * Also, since the vmalloc area is global, we don't
327
                 * need to copy individual PTE's, it is enough to
328
                 * copy the pgd pointer into the pte page of the
329
                 * root task. If that is there, we'll find our pte if
330
                 * it exists.
331
                 */
332
 
333
                pmd = pmd_offset(pgd, address);
334
                pmd_k = pmd_offset(pgd_k, address);
335
 
336
                if (!pmd_present(*pmd_k))
337
                        goto bad_area_nosemaphore;
338
 
339
                set_pmd(pmd, *pmd_k);
340
 
341
                /* Make sure the actual PTE exists as well to
342
                 * catch kernel vmalloc-area accesses to non-mapped
343
                 * addresses. If we don't do this, this will just
344
                 * silently loop forever.
345
                 */
346
 
347
                pte_k = pte_offset(pmd_k, address);
348
                if (!pte_present(*pte_k))
349
                        goto no_context;
350
 
351
                return;
352
        }
353
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.