OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [mm/] [memory.c] - Diff between revs 1634 and 1765

Go to most recent revision | Only display areas with differences | Details | Blame | View Log

Rev 1634 Rev 1765
/*
/*
 *  linux/mm/memory.c
 *  linux/mm/memory.c
 *
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 */
 */
 
 
/*
/*
 * demand-loading started 01.12.91 - seems it is high on the list of
 * demand-loading started 01.12.91 - seems it is high on the list of
 * things wanted, and it should be easy to implement. - Linus
 * things wanted, and it should be easy to implement. - Linus
 */
 */
 
 
/*
/*
 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
 * pages started 02.12.91, seems to work. - Linus.
 * pages started 02.12.91, seems to work. - Linus.
 *
 *
 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
 * would have taken more than the 6M I have free, but it worked well as
 * would have taken more than the 6M I have free, but it worked well as
 * far as I could see.
 * far as I could see.
 *
 *
 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
 */
 */
 
 
/*
/*
 * Real VM (paging to/from disk) started 18.12.91. Much more work and
 * Real VM (paging to/from disk) started 18.12.91. Much more work and
 * thought has to go into this. Oh, well..
 * thought has to go into this. Oh, well..
 * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
 * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
 *              Found it. Everything seems to work now.
 *              Found it. Everything seems to work now.
 * 20.12.91  -  Ok, making the swap-device changeable like the root.
 * 20.12.91  -  Ok, making the swap-device changeable like the root.
 */
 */
 
 
/*
/*
 * 05.04.94  -  Multi-page memory management added for v1.1.
 * 05.04.94  -  Multi-page memory management added for v1.1.
 *              Idea by Alex Bligh (alex@cconcepts.co.uk)
 *              Idea by Alex Bligh (alex@cconcepts.co.uk)
 */
 */
 
 
#include <linux/signal.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/head.h>
#include <linux/head.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/swap.h>
 
 
#include <asm/system.h>
#include <asm/system.h>
#include <asm/segment.h>
#include <asm/segment.h>
#include <asm/pgtable.h>
#include <asm/pgtable.h>
#include <asm/string.h>
#include <asm/string.h>
 
 
unsigned long high_memory = 0;
unsigned long high_memory = 0;
 
 
/*
/*
 * We special-case the C-O-W ZERO_PAGE, because it's such
 * We special-case the C-O-W ZERO_PAGE, because it's such
 * a common occurrence (no need to read the page to know
 * a common occurrence (no need to read the page to know
 * that it's zero - better for the cache and memory subsystem).
 * that it's zero - better for the cache and memory subsystem).
 */
 */
static inline void copy_page(unsigned long from, unsigned long to)
static inline void copy_page(unsigned long from, unsigned long to)
{
{
        if (from == ZERO_PAGE) {
        if (from == ZERO_PAGE) {
                memset((void *) to, 0, PAGE_SIZE);
                memset((void *) to, 0, PAGE_SIZE);
                return;
                return;
        }
        }
        memcpy((void *) to, (void *) from, PAGE_SIZE);
        memcpy((void *) to, (void *) from, PAGE_SIZE);
}
}
 
 
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
 
 
mem_map_t * mem_map = NULL;
mem_map_t * mem_map = NULL;
 
 
/*
/*
 * oom() prints a message (so that the user knows why the process died),
 * oom() prints a message (so that the user knows why the process died),
 * and gives the process an untrappable SIGKILL.
 * and gives the process an untrappable SIGKILL.
 */
 */
void oom(struct task_struct * task)
void oom(struct task_struct * task)
{
{
        printk("\nOut of memory for %s.\n", task->comm);
        printk("\nOut of memory for %s.\n", task->comm);
        task->sig->action[SIGKILL-1].sa_handler = NULL;
        task->sig->action[SIGKILL-1].sa_handler = NULL;
        task->blocked &= ~(1<<(SIGKILL-1));
        task->blocked &= ~(1<<(SIGKILL-1));
        send_sig(SIGKILL,task,1);
        send_sig(SIGKILL,task,1);
}
}
 
 
/*
/*
 * Note: this doesn't free the actual pages themselves. That
 * Note: this doesn't free the actual pages themselves. That
 * has been handled earlier when unmapping all the memory regions.
 * has been handled earlier when unmapping all the memory regions.
 */
 */
static inline void free_one_pmd(pmd_t * dir)
static inline void free_one_pmd(pmd_t * dir)
{
{
        pte_t * pte;
        pte_t * pte;
 
 
        if (pmd_none(*dir))
        if (pmd_none(*dir))
                return;
                return;
        if (pmd_bad(*dir)) {
        if (pmd_bad(*dir)) {
                printk("free_one_pmd: bad directory entry %08lx\n", pmd_val(*dir));
                printk("free_one_pmd: bad directory entry %08lx\n", pmd_val(*dir));
                pmd_clear(dir);
                pmd_clear(dir);
                return;
                return;
        }
        }
        pte = pte_offset(dir, 0);
        pte = pte_offset(dir, 0);
        pmd_clear(dir);
        pmd_clear(dir);
        pte_free(pte);
        pte_free(pte);
}
}
 
 
static inline void free_one_pgd(pgd_t * dir)
static inline void free_one_pgd(pgd_t * dir)
{
{
        int j;
        int j;
        pmd_t * pmd;
        pmd_t * pmd;
 
 
        if (pgd_none(*dir))
        if (pgd_none(*dir))
                return;
                return;
        if (pgd_bad(*dir)) {
        if (pgd_bad(*dir)) {
                printk("free_one_pgd: bad directory entry %08lx\n", pgd_val(*dir));
                printk("free_one_pgd: bad directory entry %08lx\n", pgd_val(*dir));
                pgd_clear(dir);
                pgd_clear(dir);
                return;
                return;
        }
        }
        pmd = pmd_offset(dir, 0);
        pmd = pmd_offset(dir, 0);
        pgd_clear(dir);
        pgd_clear(dir);
        for (j = 0; j < PTRS_PER_PMD ; j++)
        for (j = 0; j < PTRS_PER_PMD ; j++)
                free_one_pmd(pmd+j);
                free_one_pmd(pmd+j);
        pmd_free(pmd);
        pmd_free(pmd);
}
}
 
 
/*
/*
 * This function clears all user-level page tables of a process - this
 * This function clears all user-level page tables of a process - this
 * is needed by execve(), so that old pages aren't in the way.
 * is needed by execve(), so that old pages aren't in the way.
 */
 */
void clear_page_tables(struct task_struct * tsk)
void clear_page_tables(struct task_struct * tsk)
{
{
        int i;
        int i;
        pgd_t * page_dir;
        pgd_t * page_dir;
 
 
        page_dir = tsk->mm->pgd;
        page_dir = tsk->mm->pgd;
        if (!page_dir || page_dir == swapper_pg_dir) {
        if (!page_dir || page_dir == swapper_pg_dir) {
                printk("%s trying to clear kernel page-directory: not good\n", tsk->comm);
                printk("%s trying to clear kernel page-directory: not good\n", tsk->comm);
                return;
                return;
        }
        }
        flush_cache_mm(tsk->mm);
        flush_cache_mm(tsk->mm);
        for (i = 0 ; i < USER_PTRS_PER_PGD ; i++)
        for (i = 0 ; i < USER_PTRS_PER_PGD ; i++)
                free_one_pgd(page_dir + i);
                free_one_pgd(page_dir + i);
        flush_tlb_mm(tsk->mm);
        flush_tlb_mm(tsk->mm);
}
}
 
 
/*
/*
 * This function frees up all page tables of a process when it exits. It
 * This function frees up all page tables of a process when it exits. It
 * is the same as "clear_page_tables()", except it also changes the process'
 * is the same as "clear_page_tables()", except it also changes the process'
 * page table directory to the kernel page tables and then frees the old
 * page table directory to the kernel page tables and then frees the old
 * page table directory.
 * page table directory.
 */
 */
void free_page_tables(struct mm_struct * mm)
void free_page_tables(struct mm_struct * mm)
{
{
        int i;
        int i;
        pgd_t * page_dir;
        pgd_t * page_dir;
 
 
        page_dir = mm->pgd;
        page_dir = mm->pgd;
        if (!page_dir || page_dir == swapper_pg_dir) {
        if (!page_dir || page_dir == swapper_pg_dir) {
                printk("Trying to free kernel page-directory: not good\n");
                printk("Trying to free kernel page-directory: not good\n");
                return;
                return;
        }
        }
        for (i = 0 ; i < USER_PTRS_PER_PGD ; i++)
        for (i = 0 ; i < USER_PTRS_PER_PGD ; i++)
                free_one_pgd(page_dir + i);
                free_one_pgd(page_dir + i);
        pgd_free(page_dir);
        pgd_free(page_dir);
}
}
 
 
int new_page_tables(struct task_struct * tsk)
int new_page_tables(struct task_struct * tsk)
{
{
        pgd_t * page_dir, * new_pg;
        pgd_t * page_dir, * new_pg;
 
 
        if (!(new_pg = pgd_alloc()))
        if (!(new_pg = pgd_alloc()))
                return -ENOMEM;
                return -ENOMEM;
        page_dir = pgd_offset(&init_mm, 0);
        page_dir = pgd_offset(&init_mm, 0);
        flush_cache_mm(tsk->mm);
        flush_cache_mm(tsk->mm);
        memcpy(new_pg + USER_PTRS_PER_PGD, page_dir + USER_PTRS_PER_PGD,
        memcpy(new_pg + USER_PTRS_PER_PGD, page_dir + USER_PTRS_PER_PGD,
               (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof (pgd_t));
               (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof (pgd_t));
        flush_tlb_mm(tsk->mm);
        flush_tlb_mm(tsk->mm);
        SET_PAGE_DIR(tsk, new_pg);
        SET_PAGE_DIR(tsk, new_pg);
        tsk->mm->pgd = new_pg;
        tsk->mm->pgd = new_pg;
        return 0;
        return 0;
}
}
 
 
static inline void copy_one_pte(pte_t * old_pte, pte_t * new_pte, int cow)
static inline void copy_one_pte(pte_t * old_pte, pte_t * new_pte, int cow)
{
{
        pte_t pte = *old_pte;
        pte_t pte = *old_pte;
        unsigned long page_nr;
        unsigned long page_nr;
 
 
        if (pte_none(pte))
        if (pte_none(pte))
                return;
                return;
        if (!pte_present(pte)) {
        if (!pte_present(pte)) {
                swap_duplicate(pte_val(pte));
                swap_duplicate(pte_val(pte));
                set_pte(new_pte, pte);
                set_pte(new_pte, pte);
                return;
                return;
        }
        }
        page_nr = MAP_NR(pte_page(pte));
        page_nr = MAP_NR(pte_page(pte));
        if (page_nr >= MAP_NR(high_memory) || PageReserved(mem_map+page_nr)) {
        if (page_nr >= MAP_NR(high_memory) || PageReserved(mem_map+page_nr)) {
                set_pte(new_pte, pte);
                set_pte(new_pte, pte);
                return;
                return;
        }
        }
        if (cow)
        if (cow)
                pte = pte_wrprotect(pte);
                pte = pte_wrprotect(pte);
        if (delete_from_swap_cache(page_nr))
        if (delete_from_swap_cache(page_nr))
                pte = pte_mkdirty(pte);
                pte = pte_mkdirty(pte);
        set_pte(new_pte, pte_mkold(pte));
        set_pte(new_pte, pte_mkold(pte));
        set_pte(old_pte, pte);
        set_pte(old_pte, pte);
        mem_map[page_nr].count++;
        mem_map[page_nr].count++;
}
}
 
 
static inline int copy_pte_range(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long address, unsigned long size, int cow)
static inline int copy_pte_range(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long address, unsigned long size, int cow)
{
{
        pte_t * src_pte, * dst_pte;
        pte_t * src_pte, * dst_pte;
        unsigned long end;
        unsigned long end;
 
 
        if (pmd_none(*src_pmd))
        if (pmd_none(*src_pmd))
                return 0;
                return 0;
        if (pmd_bad(*src_pmd)) {
        if (pmd_bad(*src_pmd)) {
                printk("copy_pte_range: bad pmd (%08lx)\n", pmd_val(*src_pmd));
                printk("copy_pte_range: bad pmd (%08lx)\n", pmd_val(*src_pmd));
                pmd_clear(src_pmd);
                pmd_clear(src_pmd);
                return 0;
                return 0;
        }
        }
        src_pte = pte_offset(src_pmd, address);
        src_pte = pte_offset(src_pmd, address);
        if (pmd_none(*dst_pmd)) {
        if (pmd_none(*dst_pmd)) {
                if (!pte_alloc(dst_pmd, 0))
                if (!pte_alloc(dst_pmd, 0))
                        return -ENOMEM;
                        return -ENOMEM;
        }
        }
        dst_pte = pte_offset(dst_pmd, address);
        dst_pte = pte_offset(dst_pmd, address);
        address &= ~PMD_MASK;
        address &= ~PMD_MASK;
        end = address + size;
        end = address + size;
        if (end >= PMD_SIZE)
        if (end >= PMD_SIZE)
                end = PMD_SIZE;
                end = PMD_SIZE;
        do {
        do {
                /* I would like to switch arguments here, to make it
                /* I would like to switch arguments here, to make it
                 * consistent with copy_xxx_range and memcpy syntax.
                 * consistent with copy_xxx_range and memcpy syntax.
                 */
                 */
                copy_one_pte(src_pte++, dst_pte++, cow);
                copy_one_pte(src_pte++, dst_pte++, cow);
                address += PAGE_SIZE;
                address += PAGE_SIZE;
        } while (address < end);
        } while (address < end);
        return 0;
        return 0;
}
}
 
 
static inline int copy_pmd_range(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long address, unsigned long size, int cow)
static inline int copy_pmd_range(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long address, unsigned long size, int cow)
{
{
        pmd_t * src_pmd, * dst_pmd;
        pmd_t * src_pmd, * dst_pmd;
        unsigned long end;
        unsigned long end;
        int error = 0;
        int error = 0;
 
 
        if (pgd_none(*src_pgd))
        if (pgd_none(*src_pgd))
                return 0;
                return 0;
        if (pgd_bad(*src_pgd)) {
        if (pgd_bad(*src_pgd)) {
                printk("copy_pmd_range: bad pgd (%08lx)\n", pgd_val(*src_pgd));
                printk("copy_pmd_range: bad pgd (%08lx)\n", pgd_val(*src_pgd));
                pgd_clear(src_pgd);
                pgd_clear(src_pgd);
                return 0;
                return 0;
        }
        }
        src_pmd = pmd_offset(src_pgd, address);
        src_pmd = pmd_offset(src_pgd, address);
        if (pgd_none(*dst_pgd)) {
        if (pgd_none(*dst_pgd)) {
                if (!pmd_alloc(dst_pgd, 0))
                if (!pmd_alloc(dst_pgd, 0))
                        return -ENOMEM;
                        return -ENOMEM;
        }
        }
        dst_pmd = pmd_offset(dst_pgd, address);
        dst_pmd = pmd_offset(dst_pgd, address);
        address &= ~PGDIR_MASK;
        address &= ~PGDIR_MASK;
        end = address + size;
        end = address + size;
        if (end > PGDIR_SIZE)
        if (end > PGDIR_SIZE)
                end = PGDIR_SIZE;
                end = PGDIR_SIZE;
        do {
        do {
                error = copy_pte_range(dst_pmd++, src_pmd++, address, end - address, cow);
                error = copy_pte_range(dst_pmd++, src_pmd++, address, end - address, cow);
                if (error)
                if (error)
                        break;
                        break;
                address = (address + PMD_SIZE) & PMD_MASK;
                address = (address + PMD_SIZE) & PMD_MASK;
        } while (address < end);
        } while (address < end);
        return error;
        return error;
}
}
 
 
/*
/*
 * copy one vm_area from one task to the other. Assumes the page tables
 * copy one vm_area from one task to the other. Assumes the page tables
 * already present in the new task to be cleared in the whole range
 * already present in the new task to be cleared in the whole range
 * covered by this vma.
 * covered by this vma.
 */
 */
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
                        struct vm_area_struct *vma)
                        struct vm_area_struct *vma)
{
{
        pgd_t * src_pgd, * dst_pgd;
        pgd_t * src_pgd, * dst_pgd;
        unsigned long address = vma->vm_start;
        unsigned long address = vma->vm_start;
        unsigned long end = vma->vm_end;
        unsigned long end = vma->vm_end;
        int error = 0, cow;
        int error = 0, cow;
 
 
        cow = (vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE;
        cow = (vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE;
        src_pgd = pgd_offset(src, address);
        src_pgd = pgd_offset(src, address);
        dst_pgd = pgd_offset(dst, address);
        dst_pgd = pgd_offset(dst, address);
        flush_cache_range(src, vma->vm_start, vma->vm_end);
        flush_cache_range(src, vma->vm_start, vma->vm_end);
        flush_cache_range(dst, vma->vm_start, vma->vm_end);
        flush_cache_range(dst, vma->vm_start, vma->vm_end);
        while (address < end) {
        while (address < end) {
                error = copy_pmd_range(dst_pgd++, src_pgd++, address, end - address, cow);
                error = copy_pmd_range(dst_pgd++, src_pgd++, address, end - address, cow);
                if (error)
                if (error)
                        break;
                        break;
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
        }
        }
        /* Note that the src ptes get c-o-w treatment, so they change too. */
        /* Note that the src ptes get c-o-w treatment, so they change too. */
        flush_tlb_range(src, vma->vm_start, vma->vm_end);
        flush_tlb_range(src, vma->vm_start, vma->vm_end);
        flush_tlb_range(dst, vma->vm_start, vma->vm_end);
        flush_tlb_range(dst, vma->vm_start, vma->vm_end);
        return error;
        return error;
}
}
 
 
static inline void free_pte(pte_t page)
static inline void free_pte(pte_t page)
{
{
        if (pte_present(page)) {
        if (pte_present(page)) {
                unsigned long addr = pte_page(page);
                unsigned long addr = pte_page(page);
                if (addr >= high_memory || PageReserved(mem_map+MAP_NR(addr)))
                if (addr >= high_memory || PageReserved(mem_map+MAP_NR(addr)))
                        return;
                        return;
                free_page(addr);
                free_page(addr);
                if (current->mm->rss <= 0)
                if (current->mm->rss <= 0)
                        return;
                        return;
                current->mm->rss--;
                current->mm->rss--;
                return;
                return;
        }
        }
        swap_free(pte_val(page));
        swap_free(pte_val(page));
}
}
 
 
static inline void forget_pte(pte_t page)
static inline void forget_pte(pte_t page)
{
{
        if (!pte_none(page)) {
        if (!pte_none(page)) {
                printk("forget_pte: old mapping existed!\n");
                printk("forget_pte: old mapping existed!\n");
                free_pte(page);
                free_pte(page);
        }
        }
}
}
 
 
static inline void zap_pte_range(pmd_t * pmd, unsigned long address, unsigned long size)
static inline void zap_pte_range(pmd_t * pmd, unsigned long address, unsigned long size)
{
{
        pte_t * pte;
        pte_t * pte;
 
 
        if (pmd_none(*pmd))
        if (pmd_none(*pmd))
                return;
                return;
        if (pmd_bad(*pmd)) {
        if (pmd_bad(*pmd)) {
                printk("zap_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
                printk("zap_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
                pmd_clear(pmd);
                pmd_clear(pmd);
                return;
                return;
        }
        }
        pte = pte_offset(pmd, address);
        pte = pte_offset(pmd, address);
        address &= ~PMD_MASK;
        address &= ~PMD_MASK;
        if (address + size > PMD_SIZE)
        if (address + size > PMD_SIZE)
                size = PMD_SIZE - address;
                size = PMD_SIZE - address;
        size >>= PAGE_SHIFT;
        size >>= PAGE_SHIFT;
        for (;;) {
        for (;;) {
                pte_t page;
                pte_t page;
                if (!size)
                if (!size)
                        break;
                        break;
                page = *pte;
                page = *pte;
                pte++;
                pte++;
                size--;
                size--;
                if (pte_none(page))
                if (pte_none(page))
                        continue;
                        continue;
                pte_clear(pte-1);
                pte_clear(pte-1);
                free_pte(page);
                free_pte(page);
        }
        }
}
}
 
 
static inline void zap_pmd_range(pgd_t * dir, unsigned long address, unsigned long size)
static inline void zap_pmd_range(pgd_t * dir, unsigned long address, unsigned long size)
{
{
        pmd_t * pmd;
        pmd_t * pmd;
        unsigned long end;
        unsigned long end;
 
 
        if (pgd_none(*dir))
        if (pgd_none(*dir))
                return;
                return;
        if (pgd_bad(*dir)) {
        if (pgd_bad(*dir)) {
                printk("zap_pmd_range: bad pgd (%08lx)\n", pgd_val(*dir));
                printk("zap_pmd_range: bad pgd (%08lx)\n", pgd_val(*dir));
                pgd_clear(dir);
                pgd_clear(dir);
                return;
                return;
        }
        }
        pmd = pmd_offset(dir, address);
        pmd = pmd_offset(dir, address);
        address &= ~PGDIR_MASK;
        address &= ~PGDIR_MASK;
        end = address + size;
        end = address + size;
        if (end > PGDIR_SIZE)
        if (end > PGDIR_SIZE)
                end = PGDIR_SIZE;
                end = PGDIR_SIZE;
        do {
        do {
                zap_pte_range(pmd, address, end - address);
                zap_pte_range(pmd, address, end - address);
                address = (address + PMD_SIZE) & PMD_MASK;
                address = (address + PMD_SIZE) & PMD_MASK;
                pmd++;
                pmd++;
        } while (address < end);
        } while (address < end);
}
}
 
 
/*
/*
 * remove user pages in a given range.
 * remove user pages in a given range.
 */
 */
int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size)
{
{
        pgd_t * dir;
        pgd_t * dir;
        unsigned long end = address + size;
        unsigned long end = address + size;
 
 
        dir = pgd_offset(mm, address);
        dir = pgd_offset(mm, address);
        flush_cache_range(mm, end - size, end);
        flush_cache_range(mm, end - size, end);
        while (address < end) {
        while (address < end) {
                zap_pmd_range(dir, address, end - address);
                zap_pmd_range(dir, address, end - address);
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
                dir++;
        }
        }
        flush_tlb_range(mm, end - size, end);
        flush_tlb_range(mm, end - size, end);
        return 0;
        return 0;
}
}
 
 
static inline void zeromap_pte_range(pte_t * pte, unsigned long address, unsigned long size, pte_t zero_pte)
static inline void zeromap_pte_range(pte_t * pte, unsigned long address, unsigned long size, pte_t zero_pte)
{
{
        unsigned long end;
        unsigned long end;
 
 
        address &= ~PMD_MASK;
        address &= ~PMD_MASK;
        end = address + size;
        end = address + size;
        if (end > PMD_SIZE)
        if (end > PMD_SIZE)
                end = PMD_SIZE;
                end = PMD_SIZE;
        do {
        do {
                pte_t oldpage = *pte;
                pte_t oldpage = *pte;
                set_pte(pte, zero_pte);
                set_pte(pte, zero_pte);
                forget_pte(oldpage);
                forget_pte(oldpage);
                address += PAGE_SIZE;
                address += PAGE_SIZE;
                pte++;
                pte++;
        } while (address < end);
        } while (address < end);
}
}
 
 
static inline int zeromap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size, pte_t zero_pte)
static inline int zeromap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size, pte_t zero_pte)
{
{
        unsigned long end;
        unsigned long end;
 
 
        address &= ~PGDIR_MASK;
        address &= ~PGDIR_MASK;
        end = address + size;
        end = address + size;
        if (end > PGDIR_SIZE)
        if (end > PGDIR_SIZE)
                end = PGDIR_SIZE;
                end = PGDIR_SIZE;
        do {
        do {
                pte_t * pte = pte_alloc(pmd, address);
                pte_t * pte = pte_alloc(pmd, address);
                if (!pte)
                if (!pte)
                        return -ENOMEM;
                        return -ENOMEM;
                zeromap_pte_range(pte, address, end - address, zero_pte);
                zeromap_pte_range(pte, address, end - address, zero_pte);
                address = (address + PMD_SIZE) & PMD_MASK;
                address = (address + PMD_SIZE) & PMD_MASK;
                pmd++;
                pmd++;
        } while (address < end);
        } while (address < end);
        return 0;
        return 0;
}
}
 
 
int zeromap_page_range(unsigned long address, unsigned long size, pgprot_t prot)
int zeromap_page_range(unsigned long address, unsigned long size, pgprot_t prot)
{
{
        int error = 0;
        int error = 0;
        pgd_t * dir;
        pgd_t * dir;
        unsigned long beg = address;
        unsigned long beg = address;
        unsigned long end = address + size;
        unsigned long end = address + size;
        pte_t zero_pte;
        pte_t zero_pte;
 
 
        zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE, prot));
        zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE, prot));
        dir = pgd_offset(current->mm, address);
        dir = pgd_offset(current->mm, address);
        flush_cache_range(current->mm, beg, end);
        flush_cache_range(current->mm, beg, end);
        while (address < end) {
        while (address < end) {
                pmd_t *pmd = pmd_alloc(dir, address);
                pmd_t *pmd = pmd_alloc(dir, address);
                error = -ENOMEM;
                error = -ENOMEM;
                if (!pmd)
                if (!pmd)
                        break;
                        break;
                error = zeromap_pmd_range(pmd, address, end - address, zero_pte);
                error = zeromap_pmd_range(pmd, address, end - address, zero_pte);
                if (error)
                if (error)
                        break;
                        break;
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
                dir++;
        }
        }
        flush_tlb_range(current->mm, beg, end);
        flush_tlb_range(current->mm, beg, end);
        return error;
        return error;
}
}
 
 
/*
/*
 * maps a range of physical memory into the requested pages. the old
 * maps a range of physical memory into the requested pages. the old
 * mappings are removed. any references to nonexistent pages results
 * mappings are removed. any references to nonexistent pages results
 * in null mappings (currently treated as "copy-on-access")
 * in null mappings (currently treated as "copy-on-access")
 */
 */
static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
        unsigned long offset, pgprot_t prot)
        unsigned long offset, pgprot_t prot)
{
{
        unsigned long end;
        unsigned long end;
 
 
        address &= ~PMD_MASK;
        address &= ~PMD_MASK;
        end = address + size;
        end = address + size;
        if (end > PMD_SIZE)
        if (end > PMD_SIZE)
                end = PMD_SIZE;
                end = PMD_SIZE;
        do {
        do {
                pte_t oldpage = *pte;
                pte_t oldpage = *pte;
                pte_clear(pte);
                pte_clear(pte);
                if (offset >= high_memory || PageReserved(mem_map+MAP_NR(offset)))
                if (offset >= high_memory || PageReserved(mem_map+MAP_NR(offset)))
                        set_pte(pte, mk_pte(offset, prot));
                        set_pte(pte, mk_pte(offset, prot));
                forget_pte(oldpage);
                forget_pte(oldpage);
                address += PAGE_SIZE;
                address += PAGE_SIZE;
                offset += PAGE_SIZE;
                offset += PAGE_SIZE;
                pte++;
                pte++;
        } while (address < end);
        } while (address < end);
}
}
 
 
static inline int remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size,
static inline int remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size,
        unsigned long offset, pgprot_t prot)
        unsigned long offset, pgprot_t prot)
{
{
        unsigned long end;
        unsigned long end;
 
 
        address &= ~PGDIR_MASK;
        address &= ~PGDIR_MASK;
        end = address + size;
        end = address + size;
        if (end > PGDIR_SIZE)
        if (end > PGDIR_SIZE)
                end = PGDIR_SIZE;
                end = PGDIR_SIZE;
        offset -= address;
        offset -= address;
        do {
        do {
                pte_t * pte = pte_alloc(pmd, address);
                pte_t * pte = pte_alloc(pmd, address);
                if (!pte)
                if (!pte)
                        return -ENOMEM;
                        return -ENOMEM;
                remap_pte_range(pte, address, end - address, address + offset, prot);
                remap_pte_range(pte, address, end - address, address + offset, prot);
                address = (address + PMD_SIZE) & PMD_MASK;
                address = (address + PMD_SIZE) & PMD_MASK;
                pmd++;
                pmd++;
        } while (address < end);
        } while (address < end);
        return 0;
        return 0;
}
}
 
 
int remap_page_range(unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot)
int remap_page_range(unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot)
{
{
        int error = 0;
        int error = 0;
        pgd_t * dir;
        pgd_t * dir;
        unsigned long beg = from;
        unsigned long beg = from;
        unsigned long end = from + size;
        unsigned long end = from + size;
 
 
        offset -= from;
        offset -= from;
        dir = pgd_offset(current->mm, from);
        dir = pgd_offset(current->mm, from);
        flush_cache_range(current->mm, beg, end);
        flush_cache_range(current->mm, beg, end);
        while (from < end) {
        while (from < end) {
                pmd_t *pmd = pmd_alloc(dir, from);
                pmd_t *pmd = pmd_alloc(dir, from);
                error = -ENOMEM;
                error = -ENOMEM;
                if (!pmd)
                if (!pmd)
                        break;
                        break;
                error = remap_pmd_range(pmd, from, end - from, offset + from, prot);
                error = remap_pmd_range(pmd, from, end - from, offset + from, prot);
                if (error)
                if (error)
                        break;
                        break;
                from = (from + PGDIR_SIZE) & PGDIR_MASK;
                from = (from + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
                dir++;
        }
        }
        flush_tlb_range(current->mm, beg, end);
        flush_tlb_range(current->mm, beg, end);
        return error;
        return error;
}
}
 
 
/*
/*
 * sanity-check function..
 * sanity-check function..
 */
 */
static void put_page(pte_t * page_table, pte_t pte)
static void put_page(pte_t * page_table, pte_t pte)
{
{
        if (!pte_none(*page_table)) {
        if (!pte_none(*page_table)) {
                free_page(pte_page(pte));
                free_page(pte_page(pte));
                return;
                return;
        }
        }
/* no need for flush_tlb */
/* no need for flush_tlb */
        set_pte(page_table, pte);
        set_pte(page_table, pte);
}
}
 
 
/*
/*
 * This routine is used to map in a page into an address space: needed by
 * This routine is used to map in a page into an address space: needed by
 * execve() for the initial stack and environment pages.
 * execve() for the initial stack and environment pages.
 */
 */
unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
unsigned long put_dirty_page(struct task_struct * tsk, unsigned long page, unsigned long address)
{
{
        pgd_t * pgd;
        pgd_t * pgd;
        pmd_t * pmd;
        pmd_t * pmd;
        pte_t * pte;
        pte_t * pte;
 
 
        if (page >= high_memory)
        if (page >= high_memory)
                printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address);
                printk("put_dirty_page: trying to put page %08lx at %08lx\n",page,address);
        if (mem_map[MAP_NR(page)].count != 1)
        if (mem_map[MAP_NR(page)].count != 1)
                printk("mem_map disagrees with %08lx at %08lx\n",page,address);
                printk("mem_map disagrees with %08lx at %08lx\n",page,address);
        pgd = pgd_offset(tsk->mm,address);
        pgd = pgd_offset(tsk->mm,address);
        pmd = pmd_alloc(pgd, address);
        pmd = pmd_alloc(pgd, address);
        if (!pmd) {
        if (!pmd) {
                free_page(page);
                free_page(page);
                oom(tsk);
                oom(tsk);
                return 0;
                return 0;
        }
        }
        pte = pte_alloc(pmd, address);
        pte = pte_alloc(pmd, address);
        if (!pte) {
        if (!pte) {
                free_page(page);
                free_page(page);
                oom(tsk);
                oom(tsk);
                return 0;
                return 0;
        }
        }
        if (!pte_none(*pte)) {
        if (!pte_none(*pte)) {
                printk("put_dirty_page: page already exists\n");
                printk("put_dirty_page: page already exists\n");
                free_page(page);
                free_page(page);
                return 0;
                return 0;
        }
        }
        flush_page_to_ram(page);
        flush_page_to_ram(page);
        set_pte(pte, pte_mkwrite(pte_mkdirty(mk_pte(page, PAGE_COPY))));
        set_pte(pte, pte_mkwrite(pte_mkdirty(mk_pte(page, PAGE_COPY))));
/* no need for invalidate */
/* no need for invalidate */
        return page;
        return page;
}
}
 
 
/*
/*
 * This routine handles present pages, when users try to write
 * This routine handles present pages, when users try to write
 * to a shared page. It is done by copying the page to a new address
 * to a shared page. It is done by copying the page to a new address
 * and decrementing the shared-page counter for the old page.
 * and decrementing the shared-page counter for the old page.
 *
 *
 * Goto-purists beware: the only reason for goto's here is that it results
 * Goto-purists beware: the only reason for goto's here is that it results
 * in better assembly code.. The "default" path will see no jumps at all.
 * in better assembly code.. The "default" path will see no jumps at all.
 *
 *
 * Note that this routine assumes that the protection checks have been
 * Note that this routine assumes that the protection checks have been
 * done by the caller (the low-level page fault routine in most cases).
 * done by the caller (the low-level page fault routine in most cases).
 * Thus we can safely just mark it writable once we've done any necessary
 * Thus we can safely just mark it writable once we've done any necessary
 * COW.
 * COW.
 *
 *
 * We also mark the page dirty at this point even though the page will
 * We also mark the page dirty at this point even though the page will
 * change only once the write actually happens. This avoids a few races,
 * change only once the write actually happens. This avoids a few races,
 * and potentially makes it more efficient.
 * and potentially makes it more efficient.
 */
 */
void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
        unsigned long address, int write_access)
        unsigned long address, int write_access)
{
{
        pgd_t *page_dir;
        pgd_t *page_dir;
        pmd_t *page_middle;
        pmd_t *page_middle;
        pte_t *page_table, pte;
        pte_t *page_table, pte;
        unsigned long old_page, new_page;
        unsigned long old_page, new_page;
 
 
        new_page = __get_free_page(GFP_KERNEL);
        new_page = __get_free_page(GFP_KERNEL);
        page_dir = pgd_offset(vma->vm_mm, address);
        page_dir = pgd_offset(vma->vm_mm, address);
        if (pgd_none(*page_dir))
        if (pgd_none(*page_dir))
                goto end_wp_page;
                goto end_wp_page;
        if (pgd_bad(*page_dir))
        if (pgd_bad(*page_dir))
                goto bad_wp_pagedir;
                goto bad_wp_pagedir;
        page_middle = pmd_offset(page_dir, address);
        page_middle = pmd_offset(page_dir, address);
        if (pmd_none(*page_middle))
        if (pmd_none(*page_middle))
                goto end_wp_page;
                goto end_wp_page;
        if (pmd_bad(*page_middle))
        if (pmd_bad(*page_middle))
                goto bad_wp_pagemiddle;
                goto bad_wp_pagemiddle;
        page_table = pte_offset(page_middle, address);
        page_table = pte_offset(page_middle, address);
        pte = *page_table;
        pte = *page_table;
        if (!pte_present(pte))
        if (!pte_present(pte))
                goto end_wp_page;
                goto end_wp_page;
        if (pte_write(pte))
        if (pte_write(pte))
                goto end_wp_page;
                goto end_wp_page;
        old_page = pte_page(pte);
        old_page = pte_page(pte);
        if (old_page >= high_memory)
        if (old_page >= high_memory)
                goto bad_wp_page;
                goto bad_wp_page;
        tsk->min_flt++;
        tsk->min_flt++;
        /*
        /*
         * Do we need to copy?
         * Do we need to copy?
         */
         */
        if (mem_map[MAP_NR(old_page)].count != 1) {
        if (mem_map[MAP_NR(old_page)].count != 1) {
                if (new_page) {
                if (new_page) {
                        if (PageReserved(mem_map + MAP_NR(old_page)))
                        if (PageReserved(mem_map + MAP_NR(old_page)))
                                ++vma->vm_mm->rss;
                                ++vma->vm_mm->rss;
                        copy_page(old_page,new_page);
                        copy_page(old_page,new_page);
                        flush_page_to_ram(old_page);
                        flush_page_to_ram(old_page);
                        flush_page_to_ram(new_page);
                        flush_page_to_ram(new_page);
                        flush_cache_page(vma, address);
                        flush_cache_page(vma, address);
                        set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
                        set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
                        free_page(old_page);
                        free_page(old_page);
                        flush_tlb_page(vma, address);
                        flush_tlb_page(vma, address);
                        return;
                        return;
                }
                }
                flush_cache_page(vma, address);
                flush_cache_page(vma, address);
                set_pte(page_table, BAD_PAGE);
                set_pte(page_table, BAD_PAGE);
                flush_tlb_page(vma, address);
                flush_tlb_page(vma, address);
                free_page(old_page);
                free_page(old_page);
                oom(tsk);
                oom(tsk);
                return;
                return;
        }
        }
        flush_cache_page(vma, address);
        flush_cache_page(vma, address);
        set_pte(page_table, pte_mkdirty(pte_mkwrite(pte)));
        set_pte(page_table, pte_mkdirty(pte_mkwrite(pte)));
        flush_tlb_page(vma, address);
        flush_tlb_page(vma, address);
        if (new_page)
        if (new_page)
                free_page(new_page);
                free_page(new_page);
        return;
        return;
bad_wp_page:
bad_wp_page:
        printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
        printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
        send_sig(SIGKILL, tsk, 1);
        send_sig(SIGKILL, tsk, 1);
        goto end_wp_page;
        goto end_wp_page;
bad_wp_pagemiddle:
bad_wp_pagemiddle:
        printk("do_wp_page: bogus page-middle at address %08lx (%08lx)\n", address, pmd_val(*page_middle));
        printk("do_wp_page: bogus page-middle at address %08lx (%08lx)\n", address, pmd_val(*page_middle));
        send_sig(SIGKILL, tsk, 1);
        send_sig(SIGKILL, tsk, 1);
        goto end_wp_page;
        goto end_wp_page;
bad_wp_pagedir:
bad_wp_pagedir:
        printk("do_wp_page: bogus page-dir entry at address %08lx (%08lx)\n", address, pgd_val(*page_dir));
        printk("do_wp_page: bogus page-dir entry at address %08lx (%08lx)\n", address, pgd_val(*page_dir));
        send_sig(SIGKILL, tsk, 1);
        send_sig(SIGKILL, tsk, 1);
end_wp_page:
end_wp_page:
        if (new_page)
        if (new_page)
                free_page(new_page);
                free_page(new_page);
        return;
        return;
}
}
 
 
/*
/*
 * Ugly, ugly, but the goto's result in better assembly..
 * Ugly, ugly, but the goto's result in better assembly..
 */
 */
int verify_area(int type, const void * addr, unsigned long size)
int verify_area(int type, const void * addr, unsigned long size)
{
{
        struct vm_area_struct * vma;
        struct vm_area_struct * vma;
        unsigned long start = (unsigned long) addr;
        unsigned long start = (unsigned long) addr;
 
 
        /* If the current user space is mapped to kernel space (for the
        /* If the current user space is mapped to kernel space (for the
         * case where we use a fake user buffer with get_fs/set_fs()) we
         * case where we use a fake user buffer with get_fs/set_fs()) we
         * don't expect to find the address in the user vm map.
         * don't expect to find the address in the user vm map.
         */
         */
        if (!size || get_fs() == KERNEL_DS)
        if (!size || get_fs() == KERNEL_DS)
                return 0;
                return 0;
 
 
        vma = find_vma(current->mm, start);
        vma = find_vma(current->mm, start);
        if (!vma)
        if (!vma)
                goto bad_area;
                goto bad_area;
        if (vma->vm_start > start)
        if (vma->vm_start > start)
                goto check_stack;
                goto check_stack;
 
 
good_area:
good_area:
        if (type == VERIFY_WRITE)
        if (type == VERIFY_WRITE)
                goto check_write;
                goto check_write;
        for (;;) {
        for (;;) {
                struct vm_area_struct * next;
                struct vm_area_struct * next;
                if (!(vma->vm_flags & VM_READ))
                if (!(vma->vm_flags & VM_READ))
                        goto bad_area;
                        goto bad_area;
                if (vma->vm_end - start >= size)
                if (vma->vm_end - start >= size)
                        return 0;
                        return 0;
                next = vma->vm_next;
                next = vma->vm_next;
                if (!next || vma->vm_end != next->vm_start)
                if (!next || vma->vm_end != next->vm_start)
                        goto bad_area;
                        goto bad_area;
                vma = next;
                vma = next;
        }
        }
 
 
check_write:
check_write:
        if (!(vma->vm_flags & VM_WRITE))
        if (!(vma->vm_flags & VM_WRITE))
                goto bad_area;
                goto bad_area;
        if (!wp_works_ok)
        if (!wp_works_ok)
                goto check_wp_fault_by_hand;
                goto check_wp_fault_by_hand;
        for (;;) {
        for (;;) {
                if (vma->vm_end - start >= size)
                if (vma->vm_end - start >= size)
                        break;
                        break;
                if (!vma->vm_next || vma->vm_end != vma->vm_next->vm_start)
                if (!vma->vm_next || vma->vm_end != vma->vm_next->vm_start)
                        goto bad_area;
                        goto bad_area;
                vma = vma->vm_next;
                vma = vma->vm_next;
                if (!(vma->vm_flags & VM_WRITE))
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
                        goto bad_area;
        }
        }
        return 0;
        return 0;
 
 
check_wp_fault_by_hand:
check_wp_fault_by_hand:
        size--;
        size--;
        size += start & ~PAGE_MASK;
        size += start & ~PAGE_MASK;
        size >>= PAGE_SHIFT;
        size >>= PAGE_SHIFT;
        start &= PAGE_MASK;
        start &= PAGE_MASK;
 
 
        for (;;) {
        for (;;) {
                do_wp_page(current, vma, start, 1);
                do_wp_page(current, vma, start, 1);
                if (!size)
                if (!size)
                        break;
                        break;
                size--;
                size--;
                start += PAGE_SIZE;
                start += PAGE_SIZE;
                if (start < vma->vm_end)
                if (start < vma->vm_end)
                        continue;
                        continue;
                vma = vma->vm_next;
                vma = vma->vm_next;
                if (!vma || vma->vm_start != start)
                if (!vma || vma->vm_start != start)
                        goto bad_area;
                        goto bad_area;
                if (!(vma->vm_flags & VM_WRITE))
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;;
                        goto bad_area;;
        }
        }
        return 0;
        return 0;
 
 
check_stack:
check_stack:
        if (!(vma->vm_flags & VM_GROWSDOWN))
        if (!(vma->vm_flags & VM_GROWSDOWN))
                goto bad_area;
                goto bad_area;
        if (expand_stack(vma, start) == 0)
        if (expand_stack(vma, start) == 0)
                goto good_area;
                goto good_area;
 
 
bad_area:
bad_area:
        return -EFAULT;
        return -EFAULT;
}
}
 
 
/*
/*
 * This function zeroes out partial mmap'ed pages at truncation time..
 * This function zeroes out partial mmap'ed pages at truncation time..
 */
 */
static void partial_clear(struct vm_area_struct *vma, unsigned long address)
static void partial_clear(struct vm_area_struct *vma, unsigned long address)
{
{
        pgd_t *page_dir;
        pgd_t *page_dir;
        pmd_t *page_middle;
        pmd_t *page_middle;
        pte_t *page_table, pte;
        pte_t *page_table, pte;
 
 
        page_dir = pgd_offset(vma->vm_mm, address);
        page_dir = pgd_offset(vma->vm_mm, address);
        if (pgd_none(*page_dir))
        if (pgd_none(*page_dir))
                return;
                return;
        if (pgd_bad(*page_dir)) {
        if (pgd_bad(*page_dir)) {
                printk("bad page table directory entry %p:[%lx]\n", page_dir, pgd_val(*page_dir));
                printk("bad page table directory entry %p:[%lx]\n", page_dir, pgd_val(*page_dir));
                pgd_clear(page_dir);
                pgd_clear(page_dir);
                return;
                return;
        }
        }
        page_middle = pmd_offset(page_dir, address);
        page_middle = pmd_offset(page_dir, address);
        if (pmd_none(*page_middle))
        if (pmd_none(*page_middle))
                return;
                return;
        if (pmd_bad(*page_middle)) {
        if (pmd_bad(*page_middle)) {
                printk("bad page table directory entry %p:[%lx]\n", page_dir, pgd_val(*page_dir));
                printk("bad page table directory entry %p:[%lx]\n", page_dir, pgd_val(*page_dir));
                pmd_clear(page_middle);
                pmd_clear(page_middle);
                return;
                return;
        }
        }
        page_table = pte_offset(page_middle, address);
        page_table = pte_offset(page_middle, address);
        pte = *page_table;
        pte = *page_table;
        if (!pte_present(pte))
        if (!pte_present(pte))
                return;
                return;
        flush_cache_page(vma, address);
        flush_cache_page(vma, address);
        address &= ~PAGE_MASK;
        address &= ~PAGE_MASK;
        address += pte_page(pte);
        address += pte_page(pte);
        if (address >= high_memory)
        if (address >= high_memory)
                return;
                return;
        memset((void *) address, 0, PAGE_SIZE - (address & ~PAGE_MASK));
        memset((void *) address, 0, PAGE_SIZE - (address & ~PAGE_MASK));
        flush_page_to_ram(pte_page(pte));
        flush_page_to_ram(pte_page(pte));
}
}
 
 
/*
/*
 * Handle all mappings that got truncated by a "truncate()"
 * Handle all mappings that got truncated by a "truncate()"
 * system call.
 * system call.
 *
 *
 * NOTE! We have to be ready to update the memory sharing
 * NOTE! We have to be ready to update the memory sharing
 * between the file and the memory map for a potential last
 * between the file and the memory map for a potential last
 * incomplete page.  Ugly, but necessary.
 * incomplete page.  Ugly, but necessary.
 */
 */
void vmtruncate(struct inode * inode, unsigned long offset)
void vmtruncate(struct inode * inode, unsigned long offset)
{
{
        struct vm_area_struct * mpnt;
        struct vm_area_struct * mpnt;
 
 
        truncate_inode_pages(inode, offset);
        truncate_inode_pages(inode, offset);
        if (!inode->i_mmap)
        if (!inode->i_mmap)
                return;
                return;
        mpnt = inode->i_mmap;
        mpnt = inode->i_mmap;
        do {
        do {
                unsigned long start = mpnt->vm_start;
                unsigned long start = mpnt->vm_start;
                unsigned long len = mpnt->vm_end - start;
                unsigned long len = mpnt->vm_end - start;
                unsigned long diff;
                unsigned long diff;
 
 
                /* mapping wholly truncated? */
                /* mapping wholly truncated? */
                if (mpnt->vm_offset >= offset) {
                if (mpnt->vm_offset >= offset) {
                        zap_page_range(mpnt->vm_mm, start, len);
                        zap_page_range(mpnt->vm_mm, start, len);
                        continue;
                        continue;
                }
                }
                /* mapping wholly unaffected? */
                /* mapping wholly unaffected? */
                diff = offset - mpnt->vm_offset;
                diff = offset - mpnt->vm_offset;
                if (diff >= len)
                if (diff >= len)
                        continue;
                        continue;
                /* Ok, partially affected.. */
                /* Ok, partially affected.. */
                start += diff;
                start += diff;
                len = (len - diff) & PAGE_MASK;
                len = (len - diff) & PAGE_MASK;
                if (start & ~PAGE_MASK) {
                if (start & ~PAGE_MASK) {
                        partial_clear(mpnt, start);
                        partial_clear(mpnt, start);
                        start = (start + ~PAGE_MASK) & PAGE_MASK;
                        start = (start + ~PAGE_MASK) & PAGE_MASK;
                }
                }
                zap_page_range(mpnt->vm_mm, start, len);
                zap_page_range(mpnt->vm_mm, start, len);
        } while ((mpnt = mpnt->vm_next_share) != inode->i_mmap);
        } while ((mpnt = mpnt->vm_next_share) != inode->i_mmap);
}
}
 
 
 
 
static inline void do_swap_page(struct task_struct * tsk,
static inline void do_swap_page(struct task_struct * tsk,
        struct vm_area_struct * vma, unsigned long address,
        struct vm_area_struct * vma, unsigned long address,
        pte_t * page_table, pte_t entry, int write_access)
        pte_t * page_table, pte_t entry, int write_access)
{
{
        pte_t page;
        pte_t page;
 
 
        if (!vma->vm_ops || !vma->vm_ops->swapin) {
        if (!vma->vm_ops || !vma->vm_ops->swapin) {
                swap_in(tsk, vma, page_table, pte_val(entry), write_access);
                swap_in(tsk, vma, page_table, pte_val(entry), write_access);
                flush_page_to_ram(pte_page(*page_table));
                flush_page_to_ram(pte_page(*page_table));
                return;
                return;
        }
        }
        page = vma->vm_ops->swapin(vma, address - vma->vm_start + vma->vm_offset, pte_val(entry));
        page = vma->vm_ops->swapin(vma, address - vma->vm_start + vma->vm_offset, pte_val(entry));
        if (pte_val(*page_table) != pte_val(entry)) {
        if (pte_val(*page_table) != pte_val(entry)) {
                free_page(pte_page(page));
                free_page(pte_page(page));
                return;
                return;
        }
        }
        if (mem_map[MAP_NR(pte_page(page))].count > 1 && !(vma->vm_flags & VM_SHARED))
        if (mem_map[MAP_NR(pte_page(page))].count > 1 && !(vma->vm_flags & VM_SHARED))
                page = pte_wrprotect(page);
                page = pte_wrprotect(page);
        ++vma->vm_mm->rss;
        ++vma->vm_mm->rss;
        ++tsk->maj_flt;
        ++tsk->maj_flt;
        flush_page_to_ram(pte_page(page));
        flush_page_to_ram(pte_page(page));
        set_pte(page_table, page);
        set_pte(page_table, page);
        return;
        return;
}
}
 
 
/*
/*
 * do_no_page() tries to create a new page mapping. It aggressively
 * do_no_page() tries to create a new page mapping. It aggressively
 * tries to share with existing pages, but makes a separate copy if
 * tries to share with existing pages, but makes a separate copy if
 * the "write_access" parameter is true in order to avoid the next
 * the "write_access" parameter is true in order to avoid the next
 * page fault.
 * page fault.
 *
 *
 * As this is called only for pages that do not currently exist, we
 * As this is called only for pages that do not currently exist, we
 * do not need to flush old virtual caches or the TLB.
 * do not need to flush old virtual caches or the TLB.
 */
 */
void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma,
        unsigned long address, int write_access)
        unsigned long address, int write_access)
{
{
        pgd_t * pgd;
        pgd_t * pgd;
        pmd_t * pmd;
        pmd_t * pmd;
        pte_t * page_table;
        pte_t * page_table;
        pte_t entry;
        pte_t entry;
        unsigned long page;
        unsigned long page;
 
 
        pgd = pgd_offset(tsk->mm, address);
        pgd = pgd_offset(tsk->mm, address);
        pmd = pmd_alloc(pgd, address);
        pmd = pmd_alloc(pgd, address);
        if (!pmd)
        if (!pmd)
                goto no_memory;
                goto no_memory;
        page_table = pte_alloc(pmd, address);
        page_table = pte_alloc(pmd, address);
        if (!page_table)
        if (!page_table)
                goto no_memory;
                goto no_memory;
        entry = *page_table;
        entry = *page_table;
        if (pte_present(entry))
        if (pte_present(entry))
                goto is_present;
                goto is_present;
        if (!pte_none(entry))
        if (!pte_none(entry))
                goto swap_page;
                goto swap_page;
        address &= PAGE_MASK;
        address &= PAGE_MASK;
        if (!vma->vm_ops || !vma->vm_ops->nopage)
        if (!vma->vm_ops || !vma->vm_ops->nopage)
                goto anonymous_page;
                goto anonymous_page;
        /*
        /*
         * The third argument is "no_share", which tells the low-level code
         * The third argument is "no_share", which tells the low-level code
         * to copy, not share the page even if sharing is possible.  It's
         * to copy, not share the page even if sharing is possible.  It's
         * essentially an early COW detection
         * essentially an early COW detection
         */
         */
        page = vma->vm_ops->nopage(vma, address,
        page = vma->vm_ops->nopage(vma, address,
                (vma->vm_flags & VM_SHARED)?0:write_access);
                (vma->vm_flags & VM_SHARED)?0:write_access);
        if (!page)
        if (!page)
                goto sigbus;
                goto sigbus;
        ++tsk->maj_flt;
        ++tsk->maj_flt;
        ++vma->vm_mm->rss;
        ++vma->vm_mm->rss;
        /*
        /*
         * This silly early PAGE_DIRTY setting removes a race
         * This silly early PAGE_DIRTY setting removes a race
         * due to the bad i386 page protection. But it's valid
         * due to the bad i386 page protection. But it's valid
         * for other architectures too.
         * for other architectures too.
         *
         *
         * Note that if write_access is true, we either now have
         * Note that if write_access is true, we either now have
         * a exclusive copy of the page, or this is a shared mapping,
         * a exclusive copy of the page, or this is a shared mapping,
         * so we can make it writable and dirty to avoid having to
         * so we can make it writable and dirty to avoid having to
         * handle that later.
         * handle that later.
         */
         */
        flush_page_to_ram(page);
        flush_page_to_ram(page);
        entry = mk_pte(page, vma->vm_page_prot);
        entry = mk_pte(page, vma->vm_page_prot);
        if (write_access) {
        if (write_access) {
                entry = pte_mkwrite(pte_mkdirty(entry));
                entry = pte_mkwrite(pte_mkdirty(entry));
        } else if (mem_map[MAP_NR(page)].count > 1 && !(vma->vm_flags & VM_SHARED))
        } else if (mem_map[MAP_NR(page)].count > 1 && !(vma->vm_flags & VM_SHARED))
                entry = pte_wrprotect(entry);
                entry = pte_wrprotect(entry);
        put_page(page_table, entry);
        put_page(page_table, entry);
        /* no need to invalidate: a not-present page shouldn't be cached */
        /* no need to invalidate: a not-present page shouldn't be cached */
        return;
        return;
 
 
anonymous_page:
anonymous_page:
        entry = pte_wrprotect(mk_pte(ZERO_PAGE, vma->vm_page_prot));
        entry = pte_wrprotect(mk_pte(ZERO_PAGE, vma->vm_page_prot));
        if (write_access) {
        if (write_access) {
                unsigned long page = __get_free_page(GFP_KERNEL);
                unsigned long page = __get_free_page(GFP_KERNEL);
                if (!page)
                if (!page)
                        goto sigbus;
                        goto sigbus;
                memset((void *) page, 0, PAGE_SIZE);
                memset((void *) page, 0, PAGE_SIZE);
                entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
                entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
                vma->vm_mm->rss++;
                vma->vm_mm->rss++;
                tsk->min_flt++;
                tsk->min_flt++;
                flush_page_to_ram(page);
                flush_page_to_ram(page);
        }
        }
        put_page(page_table, entry);
        put_page(page_table, entry);
        return;
        return;
 
 
sigbus:
sigbus:
        force_sig(SIGBUS, current);
        force_sig(SIGBUS, current);
        put_page(page_table, BAD_PAGE);
        put_page(page_table, BAD_PAGE);
        /* no need to invalidate, wasn't present */
        /* no need to invalidate, wasn't present */
        return;
        return;
 
 
swap_page:
swap_page:
        do_swap_page(tsk, vma, address, page_table, entry, write_access);
        do_swap_page(tsk, vma, address, page_table, entry, write_access);
        return;
        return;
 
 
no_memory:
no_memory:
        oom(tsk);
        oom(tsk);
is_present:
is_present:
        return;
        return;
}
}
 
 
/*
/*
 * The above separate functions for the no-page and wp-page
 * The above separate functions for the no-page and wp-page
 * cases will go away (they mostly do the same thing anyway),
 * cases will go away (they mostly do the same thing anyway),
 * and we'll instead use only a general "handle_mm_fault()".
 * and we'll instead use only a general "handle_mm_fault()".
 *
 *
 * These routines also need to handle stuff like marking pages dirty
 * These routines also need to handle stuff like marking pages dirty
 * and/or accessed for architectures that don't do it in hardware (most
 * and/or accessed for architectures that don't do it in hardware (most
 * RISC architectures).  The early dirtying is also good on the i386.
 * RISC architectures).  The early dirtying is also good on the i386.
 *
 *
 * There is also a hook called "update_mmu_cache()" that architectures
 * There is also a hook called "update_mmu_cache()" that architectures
 * with external mmu caches can use to update those (ie the Sparc or
 * with external mmu caches can use to update those (ie the Sparc or
 * PowerPC hashed page tables that act as extended TLBs).
 * PowerPC hashed page tables that act as extended TLBs).
 */
 */
static inline void handle_pte_fault(struct vm_area_struct * vma, unsigned long address,
static inline void handle_pte_fault(struct vm_area_struct * vma, unsigned long address,
        int write_access, pte_t * pte)
        int write_access, pte_t * pte)
{
{
        if (!pte_present(*pte)) {
        if (!pte_present(*pte)) {
                do_no_page(current, vma, address, write_access);
                do_no_page(current, vma, address, write_access);
                return;
                return;
        }
        }
        set_pte(pte, pte_mkyoung(*pte));
        set_pte(pte, pte_mkyoung(*pte));
        flush_tlb_page(vma, address);
        flush_tlb_page(vma, address);
        if (!write_access)
        if (!write_access)
                return;
                return;
        if (pte_write(*pte)) {
        if (pte_write(*pte)) {
                set_pte(pte, pte_mkdirty(*pte));
                set_pte(pte, pte_mkdirty(*pte));
                flush_tlb_page(vma, address);
                flush_tlb_page(vma, address);
                return;
                return;
        }
        }
        do_wp_page(current, vma, address, write_access);
        do_wp_page(current, vma, address, write_access);
}
}
 
 
void handle_mm_fault(struct vm_area_struct * vma, unsigned long address,
void handle_mm_fault(struct vm_area_struct * vma, unsigned long address,
        int write_access)
        int write_access)
{
{
        pgd_t *pgd;
        pgd_t *pgd;
        pmd_t *pmd;
        pmd_t *pmd;
        pte_t *pte;
        pte_t *pte;
 
 
        pgd = pgd_offset(vma->vm_mm, address);
        pgd = pgd_offset(vma->vm_mm, address);
        pmd = pmd_alloc(pgd, address);
        pmd = pmd_alloc(pgd, address);
        if (!pmd)
        if (!pmd)
                goto no_memory;
                goto no_memory;
        pte = pte_alloc(pmd, address);
        pte = pte_alloc(pmd, address);
        if (!pte)
        if (!pte)
                goto no_memory;
                goto no_memory;
        handle_pte_fault(vma, address, write_access, pte);
        handle_pte_fault(vma, address, write_access, pte);
        update_mmu_cache(vma, address, *pte);
        update_mmu_cache(vma, address, *pte);
        return;
        return;
no_memory:
no_memory:
        oom(current);
        oom(current);
}
}
 
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.