OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [or32/] [mm/] [fault.c] - Blame information for rev 7

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 7 xianfeng
/*
2
 *  linux/arch/or32/mm/fault.c
3
 *
4
 *  or32 version
5
 *    author(s): Matjaz Breskvar (phoenix@bsemi.com)
6
 *
7
 *  derived from cris, i386, m68k, ppc, sh ports.
8
 *
9
 *  changes:
10
 *  18. 11. 2003: Matjaz Breskvar (phoenix@bsemi.com)
11
 *    initial port to or32 architecture
12
 *
13
 *  based on:
14
 *
15
 *  PowerPC version
16
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
17
 *
18
 *  Derived from "arch/i386/mm/fault.c"
19
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
20
 *
21
 *  Modified by Cort Dougan and Paul Mackerras.
22
 *
23
 *  This program is free software; you can redistribute it and/or
24
 *  modify it under the terms of the GNU General Public License
25
 *  as published by the Free Software Foundation; either version
26
 *  2 of the License, or (at your option) any later version.
27
 */
28
 
29
#include <linux/mm.h>
30
#include <linux/interrupt.h>
31
#include <linux/module.h>
32
 
33
#include <asm/uaccess.h>
34
#include <asm/or32-hf.h>
35
 
36
/* debug of low-level TLB reload */
37
#undef DEBUG
38
 
39
#ifdef DEBUG
40
#define D(x) x
41
#else
42
#define D(x)
43
#endif
44
 
45
/* debug of higher-level faults */
46
#define DPG(x) x
47
 
48
#define NUM_TLB_ENTRIES 64
49
#define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1))
50
 
51
unsigned long pte_misses;       /* updated by do_page_fault() */
52
unsigned long pte_errors;       /* updated by do_page_fault() */
53
 
54
/* __PHX__ :: - check the vmalloc_fault in do_page_fault()
55
 *            - also look into include/asm-or32/mmu_context.h
56
 */
57
volatile pgd_t *current_pgd;
58
 
59
extern void die(char *, struct pt_regs *, long);
60
 
61
/*
62
 * This routine handles page faults.  It determines the address,
63
 * and the problem, and then passes it off to one of the appropriate
64
 * routines.
65
 *
66
 * If this routine detects a bad access, it returns 1, otherwise it
67
 * returns 0.
68
 */
69
 
70
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
71
                              unsigned long vector, int write_acc)
72
{
73
        struct task_struct *tsk;
74
        struct mm_struct *mm;
75
        struct vm_area_struct * vma;
76
        siginfo_t info;
77
        int fault;
78
        check_stack(NULL, __FILE__, __FUNCTION__, __LINE__);
79
 
80
        tsk = current;
81
 
82
        /*
83
         * We fault-in kernel-space virtual memory on-demand. The
84
         * 'reference' page table is init_mm.pgd.
85
         *
86
         * NOTE! We MUST NOT take any locks for this case. We may
87
         * be in an interrupt or a critical region, and should
88
         * only copy the information from the master page table,
89
         * nothing more.
90
         *
91
         * NOTE2: This is done so that, when updating the vmalloc
92
         * mappings we don't have to walk all processes pgdirs and
93
         * add the high mappings all at once. Instead we do it as they
94
         * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
95
         * bit set so sometimes the TLB can use a lingering entry.
96
         *
97
         * This verifies that the fault happens in kernel space
98
         * and that the fault was not a protection error.
99
         */
100
 
101
        D(phx_mmu("dpf :: addr %x, vect %x, write %x, regs %x, user %x\n",
102
               address, vector, write_acc, regs, user_mode(regs)));
103
 
104
        if (address >= VMALLOC_START &&
105
            (vector != 0x300 && vector != 0x400) &&
106
            !user_mode(regs))
107
                goto vmalloc_fault;
108
 
109
        /* we can and should enable interrupts at this point */
110
        local_irq_enable();
111
 
112
        mm = tsk->mm;
113
        info.si_code = SEGV_MAPERR;
114
 
115
        /*
116
         * If we're in an interrupt or have no user
117
         * context, we must not take the fault..
118
         */
119
 
120
        if (in_interrupt() || !mm)
121
                goto no_context;
122
 
123
        down_read(&mm->mmap_sem);
124
        vma = find_vma(mm, address);
125
 
126
        if (!vma)
127
                goto bad_area;
128
 
129
        if (vma->vm_start <= address)
130
                goto good_area;
131
 
132
        if (!(vma->vm_flags & VM_GROWSDOWN))
133
                goto bad_area;
134
 
135
        if (user_mode(regs)) {
136
                /*
137
                 * accessing the stack below usp is always a bug.
138
                 * we get page-aligned addresses so we can only check
139
                 * if we're within a page from usp, but that might be
140
                 * enough to catch brutal errors at least.
141
                 */
142
                if (address + PAGE_SIZE < regs->sp)
143
                        goto bad_area;
144
        }
145
        if (expand_stack(vma, address))
146
                goto bad_area;
147
 
148
        /*
149
         * Ok, we have a good vm_area for this memory access, so
150
         * we can handle it..
151
         */
152
 
153
 good_area:
154
        info.si_code = SEGV_ACCERR;
155
 
156
        /* first do some preliminary protection checks */
157
 
158
        if (write_acc) {
159
                if (!(vma->vm_flags & VM_WRITE))
160
                        goto bad_area;
161
        } else {
162
                /* not present */
163
                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
164
                        goto bad_area;
165
        }
166
 
167
        /* are we trying to execute nonexecutable area */
168
        if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC))
169
                goto bad_area;
170
 
171
        /*
172
         * If for any reason at all we couldn't handle the fault,
173
         * make sure we exit gracefully rather than endlessly redo
174
         * the fault.
175
         */
176
 
177
        fault = handle_mm_fault(mm, vma, address, write_acc);
178
        if (unlikely(fault & VM_FAULT_ERROR)) {
179
          if (fault & VM_FAULT_OOM)
180
            goto out_of_memory;
181
          else if (fault & VM_FAULT_SIGBUS)
182
            goto do_sigbus;
183
          BUG();
184
        }/*RGD modeled on Cris*/
185
 
186
        if (fault & VM_FAULT_MAJOR)
187
          tsk->maj_flt++;
188
        else
189
          tsk->min_flt++;
190
 
191
        up_read(&mm->mmap_sem);
192
        return;
193
 
194
        /*
195
         * Something tried to access memory that isn't in our memory map..
196
         * Fix it, but check if it's kernel or user first..
197
         */
198
 
199
 bad_area:
200
        up_read(&mm->mmap_sem);
201
 
202
 bad_area_nosemaphore:
203
 
204
        /* User mode accesses just cause a SIGSEGV */
205
 
206
        if (user_mode(regs)) {
207
                printk("USERSPACE: SIGSEGV (current %p, pid %d)\n",
208
                       current, current->pid);
209
                info.si_signo = SIGSEGV;
210
                info.si_errno = 0;
211
                /* info.si_code has been set above */
212
                info.si_addr = (void *)address;
213
                force_sig_info(SIGSEGV, &info, tsk);
214
                DPG(show_regs(regs));
215
                __asm__ __volatile__("l.nop 1");
216
                return;
217
        }
218
//      DPG(show_regs(regs));
219
 
220
 no_context:
221
 
222
        /* Are we prepared to handle this kernel fault?
223
         *
224
         * (The kernel has valid exception-points in the source
225
         *  when it acesses user-memory. When it fails in one
226
         *  of those points, we find it in a table and do a jump
227
         *  to some fixup code that loads an appropriate error
228
         *  code)
229
         */
230
 
231
        {
232
                const struct exception_table_entry *entry;
233
 
234
                __asm__ __volatile__("l.nop 42");
235
 
236
                // phx_mmu("search exception table");
237
                if ((entry = search_exception_tables(regs->pc)) != NULL) {
238
                        /* Adjust the instruction pointer in the stackframe */
239
                        // phx_mmu("kernel: doing fixup at EPC=0x%lx to 0x%lx\n", regs->pc, fixup);
240
                        regs->pc = entry->fixup;
241
                        return;
242
                }
243
        }
244
 
245
        /*
246
         * Oops. The kernel tried to access some bad page. We'll have to
247
         * terminate things with extreme prejudice.
248
         */
249
 
250
        if ((unsigned long) (address) < PAGE_SIZE)
251
                printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
252
        else
253
                printk(KERN_ALERT "Unable to handle kernel access");
254
        printk(" at virtual address 0x%08lx\n",address);
255
 
256
        die("Oops", regs, write_acc);
257
 
258
        do_exit(SIGKILL);
259
 
260
        /*
261
         * We ran out of memory, or some other thing happened to us that made
262
         * us unable to handle the page fault gracefully.
263
         */
264
 
265
 out_of_memory:
266
        __asm__ __volatile__("l.nop 42");
267
        __asm__ __volatile__("l.nop 1");
268
 
269
        up_read(&mm->mmap_sem);
270
        printk("VM: killing process %s\n", tsk->comm);
271
        if (user_mode(regs))
272
                do_exit(SIGKILL);
273
        goto no_context;
274
 
275
 do_sigbus:
276
        up_read(&mm->mmap_sem);
277
 
278
        /*
279
         * Send a sigbus, regardless of whether we were in kernel
280
         * or user mode.
281
         */
282
        info.si_signo = SIGBUS;
283
        info.si_errno = 0;
284
        info.si_code = BUS_ADRERR;
285
        info.si_addr = (void *)address;
286
        force_sig_info(SIGBUS, &info, tsk);
287
 
288
        /* Kernel mode? Handle exceptions or die */
289
        if (!user_mode(regs))
290
                goto no_context;
291
        return;
292
 
293
vmalloc_fault:
294
        {
295
                /*
296
                 * Synchronize this task's top level page-table
297
                 * with the 'reference' page table.
298
                 *
299
                 * Use current_pgd instead of tsk->active_mm->pgd
300
                 * since the latter might be unavailable if this
301
                 * code is executed in a misfortunately run irq
302
                 * (like inside schedule() between switch_mm and
303
                 *  switch_to...).
304
                 */
305
 
306
                int offset = pgd_index(address);
307
                pgd_t *pgd, *pgd_k;
308
                pmd_t *pmd, *pmd_k;
309
                pte_t *pte_k;
310
 
311
                phx_warn("do_page_fault(): vmalloc_fault will not work, "
312
                         "since current_pgd assign a proper value somewhere\n"
313
                         "anyhow we don't need this at the moment\n");
314
 
315
                phx_mmu("vmalloc_fault");
316
 
317
                pgd = (pgd_t *)current_pgd + offset;
318
                pgd_k = init_mm.pgd + offset;
319
 
320
                /* Since we're two-level, we don't need to do both
321
                 * set_pgd and set_pmd (they do the same thing). If
322
                 * we go three-level at some point, do the right thing
323
                 * with pgd_present and set_pgd here.
324
                 *
325
                 * Also, since the vmalloc area is global, we don't
326
                 * need to copy individual PTE's, it is enough to
327
                 * copy the pgd pointer into the pte page of the
328
                 * root task. If that is there, we'll find our pte if
329
                 * it exists.
330
                 */
331
 
332
                pmd = pmd_offset(pgd, address);
333
                pmd_k = pmd_offset(pgd_k, address);
334
 
335
                if (!pmd_present(*pmd_k))
336
                        goto bad_area_nosemaphore;
337
 
338
                set_pmd(pmd, *pmd_k);
339
 
340
                /* Make sure the actual PTE exists as well to
341
                 * catch kernel vmalloc-area accesses to non-mapped
342
                 * addresses. If we don't do this, this will just
343
                 * silently loop forever.
344
                 */
345
 
346
                pte_k = pte_offset_kernel(pmd_k, address);
347
                if (!pte_present(*pte_k))
348
                        goto no_context;
349
 
350
                return;
351
        }
352
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.