OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [or32/] [mm/] [fault.c] - Rev 1765

Compare with Previous | Blame | View Log

/*
 *  linux/arch/or32/mm/fault.c
 *
 *  or32 version
 *    author(s): Matjaz Breskvar (phoenix@opencores.org)
 *
 *  derived from cris, i386, m68k, ppc, sh ports.
 *
 *  changes:
 *  18. 11. 2003: Matjaz Breskvar (phoenix@opencores.org)
 *    initial port to or32 architecture
 *
 *  based on:
 *
 *  PowerPC version 
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Derived from "arch/i386/mm/fault.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  Modified by Cort Dougan and Paul Mackerras.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */
 
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
 
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/uaccess.h>
 
/* debug of low-level TLB reload */
#undef DEBUG
 
#ifdef DEBUG
#define D(x) x
#else
#define D(x)
#endif
 
/* debug of higher-level faults */
#define DPG(x) x
 
#define NUM_TLB_ENTRIES 64
#define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1))
 
unsigned long pte_misses;	/* updated by do_page_fault() */
unsigned long pte_errors;	/* updated by do_page_fault() */
 
/* __PHX__ :: - check the vmalloc_fault in do_page_fault()
 *            - also look into include/asm-or32/mmu_context.h
 */
volatile pgd_t *current_pgd;
 
extern void die(char *, struct pt_regs *, long);
 
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 *
 * If this routine detects a bad access, it returns 1, otherwise it
 * returns 0.
 */
 
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
			      unsigned long vector, int write_acc)
{
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct vm_area_struct * vma;
	unsigned long fixup;
	siginfo_t info;
 
	tsk = current;
 
	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * NOTE2: This is done so that, when updating the vmalloc
	 * mappings we don't have to walk all processes pgdirs and
	 * add the high mappings all at once. Instead we do it as they
	 * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
	 * bit set so sometimes the TLB can use a lingering entry.
	 *
	 * This verifies that the fault happens in kernel space
	 * and that the fault was not a protection error.
	 */
 
	D(phx_mmu("dpf :: addr %x, vect %x, write %x, regs %x, user %x\n",
	       address, vector, write_acc, regs, user_mode(regs)));
 
	if (address >= VMALLOC_START &&
	    (vector != 0x300 && vector != 0x400) &&
	    !user_mode(regs))
		goto vmalloc_fault;
 
	/* we can and should enable interrupts at this point */
	sti();
 
	mm = tsk->mm;
	info.si_code = SEGV_MAPERR;
 
	/*
	 * If we're in an interrupt or have no user
	 * context, we must not take the fault..
	 */
 
	if (in_interrupt() || !mm)
		goto no_context;
 
	down_read(&mm->mmap_sem);
	vma = find_vma(mm, address);
 
	if (!vma)
		goto bad_area;
 
	if (vma->vm_start <= address)
		goto good_area;
 
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
 
	if (user_mode(regs)) {
		/*
		 * accessing the stack below usp is always a bug.
		 * we get page-aligned addresses so we can only check
		 * if we're within a page from usp, but that might be
		 * enough to catch brutal errors at least.
		 */
		if (address + PAGE_SIZE < regs->sp)
			goto bad_area;
	}
	if (expand_stack(vma, address))
		goto bad_area;
 
	/*
	 * Ok, we have a good vm_area for this memory access, so
	 * we can handle it..
	 */
 
 good_area:
	info.si_code = SEGV_ACCERR;
 
	/* first do some preliminary protection checks */
 
	if (write_acc) {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
	} else {
		/* not present */
		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
			goto bad_area;
	}
 
	/* are we trying to execute nonexecutable area */
	if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC))  
		goto bad_area;
 
	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
 
	switch (handle_mm_fault(mm, vma, address, write_acc)) {
	case 1:
		tsk->min_flt++;
		break;
	case 2:
		tsk->maj_flt++;
		break;
	case 0:
		goto do_sigbus;
	default:
		goto out_of_memory;
	}
 
	up_read(&mm->mmap_sem);
	return;
 
	/*
	 * Something tried to access memory that isn't in our memory map..
	 * Fix it, but check if it's kernel or user first..
	 */
 
 bad_area:
	up_read(&mm->mmap_sem);
 
 bad_area_nosemaphore:
 
	/* User mode accesses just cause a SIGSEGV */
 
	if (user_mode(regs)) {
	        printk("USERSPACE: SIGSEGV (current %p, pid %d)\n", 
	               current, current->pid);
		info.si_signo = SIGSEGV;
		info.si_errno = 0;
		/* info.si_code has been set above */
		info.si_addr = (void *)address;
		force_sig_info(SIGSEGV, &info, tsk);
		DPG(show_regs(regs));
		__asm__ __volatile__("l.nop 1");
		return;
	}
//	DPG(show_regs(regs));
 
 no_context:
 
	/* Are we prepared to handle this kernel fault?
	 *
	 * (The kernel has valid exception-points in the source 
	 *  when it acesses user-memory. When it fails in one
	 *  of those points, we find it in a table and do a jump
	 *  to some fixup code that loads an appropriate error
	 *  code)
	 */
 
	phx_mmu("search exception table");
	if ((fixup = search_exception_table(regs->pc)) != 0) {
		/* Adjust the instruction pointer in the stackframe */
		phx_mmu("kernel: doing fixup at EPC=0x%x to 0x%lx\n", regs->pc, fixup);
		regs->pc = fixup;
		return;
	}
 
	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
	 * terminate things with extreme prejudice.
	 */
 
	if ((unsigned long) (address) < PAGE_SIZE)
		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
	else
		printk(KERN_ALERT "Unable to handle kernel access");
	printk(" at virtual address 0x%08lx\n",address);
 
	die("Oops", regs, write_acc);
 
	do_exit(SIGKILL);
 
	/*
	 * We ran out of memory, or some other thing happened to us that made
	 * us unable to handle the page fault gracefully.
	 */
 
 out_of_memory:
	up_read(&mm->mmap_sem);
	printk("VM: killing process %s\n", tsk->comm);
	if (user_mode(regs))
		do_exit(SIGKILL);
	goto no_context;
 
 do_sigbus:
	up_read(&mm->mmap_sem);
 
	/*
	 * Send a sigbus, regardless of whether we were in kernel
	 * or user mode.
	 */
	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code = BUS_ADRERR;
	info.si_addr = (void *)address;
	force_sig_info(SIGBUS, &info, tsk);
 
	/* Kernel mode? Handle exceptions or die */
	if (!user_mode(regs))
		goto no_context;
	return;
 
vmalloc_fault:
	{
		/*
		 * Synchronize this task's top level page-table
		 * with the 'reference' page table.
		 *
		 * Use current_pgd instead of tsk->active_mm->pgd
		 * since the latter might be unavailable if this
		 * code is executed in a misfortunately run irq
		 * (like inside schedule() between switch_mm and
		 *  switch_to...).
		 */
 
	        phx_warn("do_page_fault(): vmalloc_fault will not work, "
			 "since current_pgd assign a proper value somewhere\n"
			 "anyhow we don't need this at the moment\n");
 
		int offset = pgd_index(address);
		pgd_t *pgd, *pgd_k;
		pmd_t *pmd, *pmd_k;
		pte_t *pte_k;
 
		phx_mmu("vmalloc_fault");
 
		pgd = (pgd_t *)current_pgd + offset;
		pgd_k = init_mm.pgd + offset;
 
		/* Since we're two-level, we don't need to do both
		 * set_pgd and set_pmd (they do the same thing). If
		 * we go three-level at some point, do the right thing
		 * with pgd_present and set_pgd here. 
		 * 
		 * Also, since the vmalloc area is global, we don't
		 * need to copy individual PTE's, it is enough to
		 * copy the pgd pointer into the pte page of the
		 * root task. If that is there, we'll find our pte if
		 * it exists.
		 */
 
		pmd = pmd_offset(pgd, address);
		pmd_k = pmd_offset(pgd_k, address);
 
		if (!pmd_present(*pmd_k))
			goto bad_area_nosemaphore;
 
		set_pmd(pmd, *pmd_k);
 
		/* Make sure the actual PTE exists as well to
		 * catch kernel vmalloc-area accesses to non-mapped
		 * addresses. If we don't do this, this will just
		 * silently loop forever.
		 */
 
		pte_k = pte_offset(pmd_k, address);
		if (!pte_present(*pte_k))
			goto no_context;
 
		return;
	}
}
 

Compare with Previous | Blame | View Log

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.