OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [mm/] [filemap.c] - Diff between revs 1765 and 1782

Only display areas with differences | Details | Blame | View Log

Rev 1765 Rev 1782
/*
/*
 *      linux/mm/filemap.c
 *      linux/mm/filemap.c
 *
 *
 * Copyright (C) 1994, 1995  Linus Torvalds
 * Copyright (C) 1994, 1995  Linus Torvalds
 */
 */
 
 
/*
/*
 * This file handles the generic file mmap semantics used by
 * This file handles the generic file mmap semantics used by
 * most "normal" filesystems (but you don't /have/ to use this:
 * most "normal" filesystems (but you don't /have/ to use this:
 * the NFS filesystem does this differently, for example)
 * the NFS filesystem does this differently, for example)
 */
 */
#include <linux/config.h> /* CONFIG_READA_SMALL */
#include <linux/config.h> /* CONFIG_READA_SMALL */
#include <linux/stat.h>
#include <linux/stat.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <linux/shm.h>
#include <linux/shm.h>
#include <linux/errno.h>
#include <linux/errno.h>
#include <linux/mman.h>
#include <linux/mman.h>
#include <linux/string.h>
#include <linux/string.h>
#include <linux/malloc.h>
#include <linux/malloc.h>
#include <linux/fs.h>
#include <linux/fs.h>
#include <linux/locks.h>
#include <linux/locks.h>
#include <linux/pagemap.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/swap.h>
 
 
#include <asm/segment.h>
#include <asm/segment.h>
#include <asm/system.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/pgtable.h>
 
 
/*
/*
 * Shared mappings implemented 30.11.1994. It's not fully working yet,
 * Shared mappings implemented 30.11.1994. It's not fully working yet,
 * though.
 * though.
 *
 *
 * Shared mappings now work. 15.8.1995  Bruno.
 * Shared mappings now work. 15.8.1995  Bruno.
 */
 */
 
 
unsigned long page_cache_size = 0;
unsigned long page_cache_size = 0;
struct page * page_hash_table[PAGE_HASH_SIZE];
struct page * page_hash_table[PAGE_HASH_SIZE];
 
 
/*
/*
 * Simple routines for both non-shared and shared mappings.
 * Simple routines for both non-shared and shared mappings.
 */
 */
 
 
#define release_page(page) __free_page((page))
#define release_page(page) __free_page((page))
 
 
/*
/*
 * Invalidate the pages of an inode, removing all pages that aren't
 * Invalidate the pages of an inode, removing all pages that aren't
 * locked down (those are sure to be up-to-date anyway, so we shouldn't
 * locked down (those are sure to be up-to-date anyway, so we shouldn't
 * invalidate them).
 * invalidate them).
 */
 */
void invalidate_inode_pages(struct inode * inode)
void invalidate_inode_pages(struct inode * inode)
{
{
        struct page ** p;
        struct page ** p;
        struct page * page;
        struct page * page;
 
 
        p = &inode->i_pages;
        p = &inode->i_pages;
        while ((page = *p) != NULL) {
        while ((page = *p) != NULL) {
                if (PageLocked(page)) {
                if (PageLocked(page)) {
                        p = &page->next;
                        p = &page->next;
                        continue;
                        continue;
                }
                }
                inode->i_nrpages--;
                inode->i_nrpages--;
                if ((*p = page->next) != NULL)
                if ((*p = page->next) != NULL)
                        (*p)->prev = page->prev;
                        (*p)->prev = page->prev;
                page->dirty = 0;
                page->dirty = 0;
                page->next = NULL;
                page->next = NULL;
                page->prev = NULL;
                page->prev = NULL;
                remove_page_from_hash_queue(page);
                remove_page_from_hash_queue(page);
                page->inode = NULL;
                page->inode = NULL;
                __free_page(page);
                __free_page(page);
                continue;
                continue;
        }
        }
}
}
 
 
/*
/*
 * Truncate the page cache at a set offset, removing the pages
 * Truncate the page cache at a set offset, removing the pages
 * that are beyond that offset (and zeroing out partial pages).
 * that are beyond that offset (and zeroing out partial pages).
 */
 */
void truncate_inode_pages(struct inode * inode, unsigned long start)
void truncate_inode_pages(struct inode * inode, unsigned long start)
{
{
        struct page ** p;
        struct page ** p;
        struct page * page;
        struct page * page;
 
 
repeat:
repeat:
        p = &inode->i_pages;
        p = &inode->i_pages;
        while ((page = *p) != NULL) {
        while ((page = *p) != NULL) {
                unsigned long offset = page->offset;
                unsigned long offset = page->offset;
 
 
                /* page wholly truncated - free it */
                /* page wholly truncated - free it */
                if (offset >= start) {
                if (offset >= start) {
                        if (PageLocked(page)) {
                        if (PageLocked(page)) {
                                __wait_on_page(page);
                                __wait_on_page(page);
                                goto repeat;
                                goto repeat;
                        }
                        }
                        inode->i_nrpages--;
                        inode->i_nrpages--;
                        if ((*p = page->next) != NULL)
                        if ((*p = page->next) != NULL)
                                (*p)->prev = page->prev;
                                (*p)->prev = page->prev;
                        page->dirty = 0;
                        page->dirty = 0;
                        page->next = NULL;
                        page->next = NULL;
                        page->prev = NULL;
                        page->prev = NULL;
                        remove_page_from_hash_queue(page);
                        remove_page_from_hash_queue(page);
                        page->inode = NULL;
                        page->inode = NULL;
                        __free_page(page);
                        __free_page(page);
                        continue;
                        continue;
                }
                }
                p = &page->next;
                p = &page->next;
                offset = start - offset;
                offset = start - offset;
                /* partial truncate, clear end of page */
                /* partial truncate, clear end of page */
                if (offset < PAGE_SIZE) {
                if (offset < PAGE_SIZE) {
                        unsigned long address = page_address(page);
                        unsigned long address = page_address(page);
                        memset((void *) (offset + address), 0, PAGE_SIZE - offset);
                        memset((void *) (offset + address), 0, PAGE_SIZE - offset);
                        flush_page_to_ram(address);
                        flush_page_to_ram(address);
                }
                }
        }
        }
}
}
 
 
int shrink_mmap(int priority, int dma, int free_buf)
int shrink_mmap(int priority, int dma, int free_buf)
{
{
        static int clock = 0;
        static int clock = 0;
        struct page * page;
        struct page * page;
        unsigned long limit = MAP_NR(high_memory);
        unsigned long limit = MAP_NR(high_memory);
        struct buffer_head *tmp, *bh;
        struct buffer_head *tmp, *bh;
        int count_max, count_min;
        int count_max, count_min;
 
 
        count_max = (limit<<1) >> (priority>>1);
        count_max = (limit<<1) >> (priority>>1);
        count_min = (limit<<1) >> (priority);
        count_min = (limit<<1) >> (priority);
 
 
        page = mem_map + clock;
        page = mem_map + clock;
        do {
        do {
                count_max--;
                count_max--;
                if (page->inode || page->buffers)
                if (page->inode || page->buffers)
                        count_min--;
                        count_min--;
 
 
                if (PageLocked(page))
                if (PageLocked(page))
                        goto next;
                        goto next;
                if (dma && !PageDMA(page))
                if (dma && !PageDMA(page))
                        goto next;
                        goto next;
                /* First of all, regenerate the page's referenced bit
                /* First of all, regenerate the page's referenced bit
                   from any buffers in the page */
                   from any buffers in the page */
                bh = page->buffers;
                bh = page->buffers;
                if (bh) {
                if (bh) {
                        tmp = bh;
                        tmp = bh;
                        do {
                        do {
                                if (buffer_touched(tmp)) {
                                if (buffer_touched(tmp)) {
                                        clear_bit(BH_Touched, &tmp->b_state);
                                        clear_bit(BH_Touched, &tmp->b_state);
                                        set_bit(PG_referenced, &page->flags);
                                        set_bit(PG_referenced, &page->flags);
                                }
                                }
                                tmp = tmp->b_this_page;
                                tmp = tmp->b_this_page;
                        } while (tmp != bh);
                        } while (tmp != bh);
                }
                }
 
 
                /* We can't throw away shared pages, but we do mark
                /* We can't throw away shared pages, but we do mark
                   them as referenced.  This relies on the fact that
                   them as referenced.  This relies on the fact that
                   no page is currently in both the page cache and the
                   no page is currently in both the page cache and the
                   buffer cache; we'd have to modify the following
                   buffer cache; we'd have to modify the following
                   test to allow for that case. */
                   test to allow for that case. */
 
 
                switch (page->count) {
                switch (page->count) {
                        case 1:
                        case 1:
                                /* If it has been referenced recently, don't free it */
                                /* If it has been referenced recently, don't free it */
                                if (clear_bit(PG_referenced, &page->flags)) {
                                if (clear_bit(PG_referenced, &page->flags)) {
                                        /* age this page potential used */
                                        /* age this page potential used */
                                        if (priority < 4)
                                        if (priority < 4)
                                                age_page(page);
                                                age_page(page);
                                        break;
                                        break;
                                }
                                }
 
 
                                /* is it a page cache page? */
                                /* is it a page cache page? */
                                if (page->inode) {
                                if (page->inode) {
                                        remove_page_from_hash_queue(page);
                                        remove_page_from_hash_queue(page);
                                        remove_page_from_inode_queue(page);
                                        remove_page_from_inode_queue(page);
                                        __free_page(page);
                                        __free_page(page);
                                        return 1;
                                        return 1;
                                }
                                }
 
 
                                /* is it a buffer cache page? */
                                /* is it a buffer cache page? */
                                if (free_buf && bh && try_to_free_buffer(bh, &bh, 6))
                                if (free_buf && bh && try_to_free_buffer(bh, &bh, 6))
                                        return 1;
                                        return 1;
                                break;
                                break;
 
 
                        default:
                        default:
                                /* more than one users: we can't throw it away */
                                /* more than one users: we can't throw it away */
                                set_bit(PG_referenced, &page->flags);
                                set_bit(PG_referenced, &page->flags);
                                /* fall through */
                                /* fall through */
                        case 0:
                        case 0:
                                /* nothing */
                                /* nothing */
                }
                }
next:
next:
                page++;
                page++;
                clock++;
                clock++;
                if (clock >= limit) {
                if (clock >= limit) {
                        clock = 0;
                        clock = 0;
                        page = mem_map;
                        page = mem_map;
                }
                }
        } while (count_max > 0 && count_min > 0);
        } while (count_max > 0 && count_min > 0);
        return 0;
        return 0;
}
}
 
 
/*
/*
 * This is called from try_to_swap_out() when we try to get rid of some
 * This is called from try_to_swap_out() when we try to get rid of some
 * pages..  If we're unmapping the last occurrence of this page, we also
 * pages..  If we're unmapping the last occurrence of this page, we also
 * free it from the page hash-queues etc, as we don't want to keep it
 * free it from the page hash-queues etc, as we don't want to keep it
 * in-core unnecessarily.
 * in-core unnecessarily.
 */
 */
unsigned long page_unuse(unsigned long page)
unsigned long page_unuse(unsigned long page)
{
{
        struct page * p = mem_map + MAP_NR(page);
        struct page * p = mem_map + MAP_NR(page);
        int count = p->count;
        int count = p->count;
 
 
        if (count != 2)
        if (count != 2)
                return count;
                return count;
        if (!p->inode)
        if (!p->inode)
                return count;
                return count;
        remove_page_from_hash_queue(p);
        remove_page_from_hash_queue(p);
        remove_page_from_inode_queue(p);
        remove_page_from_inode_queue(p);
        free_page(page);
        free_page(page);
        return 1;
        return 1;
}
}
 
 
/*
/*
 * Update a page cache copy, when we're doing a "write()" system call
 * Update a page cache copy, when we're doing a "write()" system call
 * See also "update_vm_cache()".
 * See also "update_vm_cache()".
 */
 */
void update_vm_cache(struct inode * inode, unsigned long pos, const char * buf, int count)
void update_vm_cache(struct inode * inode, unsigned long pos, const char * buf, int count)
{
{
        unsigned long offset, len;
        unsigned long offset, len;
 
 
        offset = (pos & ~PAGE_MASK);
        offset = (pos & ~PAGE_MASK);
        pos = pos & PAGE_MASK;
        pos = pos & PAGE_MASK;
        len = PAGE_SIZE - offset;
        len = PAGE_SIZE - offset;
        do {
        do {
                struct page * page;
                struct page * page;
 
 
                if (len > count)
                if (len > count)
                        len = count;
                        len = count;
                page = find_page(inode, pos);
                page = find_page(inode, pos);
                if (page) {
                if (page) {
                        wait_on_page(page);
                        wait_on_page(page);
                        memcpy((void *) (offset + page_address(page)), buf, len);
                        memcpy((void *) (offset + page_address(page)), buf, len);
                        release_page(page);
                        release_page(page);
                }
                }
                count -= len;
                count -= len;
                buf += len;
                buf += len;
                len = PAGE_SIZE;
                len = PAGE_SIZE;
                offset = 0;
                offset = 0;
                pos += PAGE_SIZE;
                pos += PAGE_SIZE;
        } while (count);
        } while (count);
}
}
 
 
static inline void add_to_page_cache(struct page * page,
static inline void add_to_page_cache(struct page * page,
        struct inode * inode, unsigned long offset,
        struct inode * inode, unsigned long offset,
        struct page **hash)
        struct page **hash)
{
{
        page->count++;
        page->count++;
        page->flags &= ~((1 << PG_uptodate) | (1 << PG_error));
        page->flags &= ~((1 << PG_uptodate) | (1 << PG_error));
        page->offset = offset;
        page->offset = offset;
        add_page_to_inode_queue(inode, page);
        add_page_to_inode_queue(inode, page);
        __add_page_to_hash_queue(page, hash);
        __add_page_to_hash_queue(page, hash);
}
}
 
 
/*
/*
 * Try to read ahead in the file. "page_cache" is a potentially free page
 * Try to read ahead in the file. "page_cache" is a potentially free page
 * that we could use for the cache (if it is 0 we can try to create one,
 * that we could use for the cache (if it is 0 we can try to create one,
 * this is all overlapped with the IO on the previous page finishing anyway)
 * this is all overlapped with the IO on the previous page finishing anyway)
 */
 */
static unsigned long try_to_read_ahead(struct inode * inode, unsigned long offset, unsigned long page_cache)
static unsigned long try_to_read_ahead(struct inode * inode, unsigned long offset, unsigned long page_cache)
{
{
        struct page * page;
        struct page * page;
        struct page ** hash;
        struct page ** hash;
 
 
        offset &= PAGE_MASK;
        offset &= PAGE_MASK;
        switch (page_cache) {
        switch (page_cache) {
        case 0:
        case 0:
                page_cache = __get_free_page(GFP_KERNEL);
                page_cache = __get_free_page(GFP_KERNEL);
                if (!page_cache)
                if (!page_cache)
                        break;
                        break;
        default:
        default:
                if (offset >= inode->i_size)
                if (offset >= inode->i_size)
                        break;
                        break;
                hash = page_hash(inode, offset);
                hash = page_hash(inode, offset);
                page = __find_page(inode, offset, *hash);
                page = __find_page(inode, offset, *hash);
                if (!page) {
                if (!page) {
                        /*
                        /*
                         * Ok, add the new page to the hash-queues...
                         * Ok, add the new page to the hash-queues...
                         */
                         */
                        page = mem_map + MAP_NR(page_cache);
                        page = mem_map + MAP_NR(page_cache);
                        add_to_page_cache(page, inode, offset, hash);
                        add_to_page_cache(page, inode, offset, hash);
                        inode->i_op->readpage(inode, page);
                        inode->i_op->readpage(inode, page);
                        page_cache = 0;
                        page_cache = 0;
                }
                }
                release_page(page);
                release_page(page);
        }
        }
        return page_cache;
        return page_cache;
}
}
 
 
/*
/*
 * Wait for IO to complete on a locked page.
 * Wait for IO to complete on a locked page.
 *
 *
 * This must be called with the caller "holding" the page,
 * This must be called with the caller "holding" the page,
 * ie with increased "page->count" so that the page won't
 * ie with increased "page->count" so that the page won't
 * go away during the wait..
 * go away during the wait..
 */
 */
void __wait_on_page(struct page *page)
void __wait_on_page(struct page *page)
{
{
        struct wait_queue wait = { current, NULL };
        struct wait_queue wait = { current, NULL };
 
 
        add_wait_queue(&page->wait, &wait);
        add_wait_queue(&page->wait, &wait);
repeat:
repeat:
        run_task_queue(&tq_disk);
        run_task_queue(&tq_disk);
        current->state = TASK_UNINTERRUPTIBLE;
        current->state = TASK_UNINTERRUPTIBLE;
        if (PageLocked(page)) {
        if (PageLocked(page)) {
                schedule();
                schedule();
                goto repeat;
                goto repeat;
        }
        }
        remove_wait_queue(&page->wait, &wait);
        remove_wait_queue(&page->wait, &wait);
        current->state = TASK_RUNNING;
        current->state = TASK_RUNNING;
}
}
 
 
#if 0
#if 0
#define PROFILE_READAHEAD
#define PROFILE_READAHEAD
#define DEBUG_READAHEAD
#define DEBUG_READAHEAD
#endif
#endif
 
 
/*
/*
 * Read-ahead profiling information
 * Read-ahead profiling information
 * --------------------------------
 * --------------------------------
 * Every PROFILE_MAXREADCOUNT, the following information is written
 * Every PROFILE_MAXREADCOUNT, the following information is written
 * to the syslog:
 * to the syslog:
 *   Percentage of asynchronous read-ahead.
 *   Percentage of asynchronous read-ahead.
 *   Average of read-ahead fields context value.
 *   Average of read-ahead fields context value.
 * If DEBUG_READAHEAD is defined, a snapshot of these fields is written
 * If DEBUG_READAHEAD is defined, a snapshot of these fields is written
 * to the syslog.
 * to the syslog.
 */
 */
 
 
#ifdef PROFILE_READAHEAD
#ifdef PROFILE_READAHEAD
 
 
#define PROFILE_MAXREADCOUNT 1000
#define PROFILE_MAXREADCOUNT 1000
 
 
static unsigned long total_reada;
static unsigned long total_reada;
static unsigned long total_async;
static unsigned long total_async;
static unsigned long total_ramax;
static unsigned long total_ramax;
static unsigned long total_ralen;
static unsigned long total_ralen;
static unsigned long total_rawin;
static unsigned long total_rawin;
 
 
static void profile_readahead(int async, struct file *filp)
static void profile_readahead(int async, struct file *filp)
{
{
        unsigned long flags;
        unsigned long flags;
 
 
        ++total_reada;
        ++total_reada;
        if (async)
        if (async)
                ++total_async;
                ++total_async;
 
 
        total_ramax     += filp->f_ramax;
        total_ramax     += filp->f_ramax;
        total_ralen     += filp->f_ralen;
        total_ralen     += filp->f_ralen;
        total_rawin     += filp->f_rawin;
        total_rawin     += filp->f_rawin;
 
 
        if (total_reada > PROFILE_MAXREADCOUNT) {
        if (total_reada > PROFILE_MAXREADCOUNT) {
                save_flags(flags);
                save_flags(flags);
                cli();
                cli();
                if (!(total_reada > PROFILE_MAXREADCOUNT)) {
                if (!(total_reada > PROFILE_MAXREADCOUNT)) {
                        restore_flags(flags);
                        restore_flags(flags);
                        return;
                        return;
                }
                }
 
 
                printk("Readahead average:  max=%ld, len=%ld, win=%ld, async=%ld%%\n",
                printk("Readahead average:  max=%ld, len=%ld, win=%ld, async=%ld%%\n",
                        total_ramax/total_reada,
                        total_ramax/total_reada,
                        total_ralen/total_reada,
                        total_ralen/total_reada,
                        total_rawin/total_reada,
                        total_rawin/total_reada,
                        (total_async*100)/total_reada);
                        (total_async*100)/total_reada);
#ifdef DEBUG_READAHEAD
#ifdef DEBUG_READAHEAD
                printk("Readahead snapshot: max=%ld, len=%ld, win=%ld, raend=%ld\n",
                printk("Readahead snapshot: max=%ld, len=%ld, win=%ld, raend=%ld\n",
                        filp->f_ramax, filp->f_ralen, filp->f_rawin, filp->f_raend);
                        filp->f_ramax, filp->f_ralen, filp->f_rawin, filp->f_raend);
#endif
#endif
 
 
                total_reada     = 0;
                total_reada     = 0;
                total_async     = 0;
                total_async     = 0;
                total_ramax     = 0;
                total_ramax     = 0;
                total_ralen     = 0;
                total_ralen     = 0;
                total_rawin     = 0;
                total_rawin     = 0;
 
 
                restore_flags(flags);
                restore_flags(flags);
        }
        }
}
}
#endif  /* defined PROFILE_READAHEAD */
#endif  /* defined PROFILE_READAHEAD */
 
 
/*
/*
 * Read-ahead context:
 * Read-ahead context:
 * -------------------
 * -------------------
 * The read ahead context fields of the "struct file" are the following:
 * The read ahead context fields of the "struct file" are the following:
 * - f_raend : position of the first byte after the last page we tried to
 * - f_raend : position of the first byte after the last page we tried to
 *             read ahead.
 *             read ahead.
 * - f_ramax : current read-ahead maximum size.
 * - f_ramax : current read-ahead maximum size.
 * - f_ralen : length of the current IO read block we tried to read-ahead.
 * - f_ralen : length of the current IO read block we tried to read-ahead.
 * - f_rawin : length of the current read-ahead window.
 * - f_rawin : length of the current read-ahead window.
 *             if last read-ahead was synchronous then
 *             if last read-ahead was synchronous then
 *                  f_rawin = f_ralen
 *                  f_rawin = f_ralen
 *             otherwise (was asynchronous)
 *             otherwise (was asynchronous)
 *                  f_rawin = previous value of f_ralen + f_ralen
 *                  f_rawin = previous value of f_ralen + f_ralen
 *
 *
 * Read-ahead limits:
 * Read-ahead limits:
 * ------------------
 * ------------------
 * MIN_READAHEAD   : minimum read-ahead size when read-ahead.
 * MIN_READAHEAD   : minimum read-ahead size when read-ahead.
 * MAX_READAHEAD   : maximum read-ahead size when read-ahead.
 * MAX_READAHEAD   : maximum read-ahead size when read-ahead.
 *
 *
 * Synchronous read-ahead benefits:
 * Synchronous read-ahead benefits:
 * --------------------------------
 * --------------------------------
 * Using reasonable IO xfer length from peripheral devices increase system
 * Using reasonable IO xfer length from peripheral devices increase system
 * performances.
 * performances.
 * Reasonable means, in this context, not too large but not too small.
 * Reasonable means, in this context, not too large but not too small.
 * The actual maximum value is:
 * The actual maximum value is:
 *      MAX_READAHEAD + PAGE_SIZE = 76k is CONFIG_READA_SMALL is undefined
 *      MAX_READAHEAD + PAGE_SIZE = 76k is CONFIG_READA_SMALL is undefined
 *      and 32K if defined (4K page size assumed).
 *      and 32K if defined (4K page size assumed).
 *
 *
 * Asynchronous read-ahead benefits:
 * Asynchronous read-ahead benefits:
 * ---------------------------------
 * ---------------------------------
 * Overlapping next read request and user process execution increase system
 * Overlapping next read request and user process execution increase system
 * performance.
 * performance.
 *
 *
 * Read-ahead risks:
 * Read-ahead risks:
 * -----------------
 * -----------------
 * We have to guess which further data are needed by the user process.
 * We have to guess which further data are needed by the user process.
 * If these data are often not really needed, it's bad for system
 * If these data are often not really needed, it's bad for system
 * performances.
 * performances.
 * However, we know that files are often accessed sequentially by
 * However, we know that files are often accessed sequentially by
 * application programs and it seems that it is possible to have some good
 * application programs and it seems that it is possible to have some good
 * strategy in that guessing.
 * strategy in that guessing.
 * We only try to read-ahead files that seems to be read sequentially.
 * We only try to read-ahead files that seems to be read sequentially.
 *
 *
 * Asynchronous read-ahead risks:
 * Asynchronous read-ahead risks:
 * ------------------------------
 * ------------------------------
 * In order to maximize overlapping, we must start some asynchronous read
 * In order to maximize overlapping, we must start some asynchronous read
 * request from the device, as soon as possible.
 * request from the device, as soon as possible.
 * We must be very careful about:
 * We must be very careful about:
 * - The number of effective pending IO read requests.
 * - The number of effective pending IO read requests.
 *   ONE seems to be the only reasonable value.
 *   ONE seems to be the only reasonable value.
 * - The total memory pool usage for the file access stream.
 * - The total memory pool usage for the file access stream.
 *   This maximum memory usage is implicitly 2 IO read chunks:
 *   This maximum memory usage is implicitly 2 IO read chunks:
 *   2*(MAX_READAHEAD + PAGE_SIZE) = 156K if CONFIG_READA_SMALL is undefined,
 *   2*(MAX_READAHEAD + PAGE_SIZE) = 156K if CONFIG_READA_SMALL is undefined,
 *   64k if defined (4K page size assumed).
 *   64k if defined (4K page size assumed).
 */
 */
 
 
#define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
#define PageAlignSize(size) (((size) + PAGE_SIZE -1) & PAGE_MASK)
 
 
#ifdef CONFIG_READA_SMALL  /* small readahead */
#ifdef CONFIG_READA_SMALL  /* small readahead */
#define MAX_READAHEAD PageAlignSize(4096*7)
#define MAX_READAHEAD PageAlignSize(4096*7)
#define MIN_READAHEAD PageAlignSize(4096*2)
#define MIN_READAHEAD PageAlignSize(4096*2)
#else /* large readahead */
#else /* large readahead */
#define MAX_READAHEAD PageAlignSize(4096*18)
#define MAX_READAHEAD PageAlignSize(4096*18)
#define MIN_READAHEAD PageAlignSize(4096*3)
#define MIN_READAHEAD PageAlignSize(4096*3)
#endif
#endif
 
 
static inline unsigned long generic_file_readahead(int reada_ok, struct file * filp, struct inode * inode,
static inline unsigned long generic_file_readahead(int reada_ok, struct file * filp, struct inode * inode,
        unsigned long ppos, struct page * page,
        unsigned long ppos, struct page * page,
        unsigned long page_cache)
        unsigned long page_cache)
{
{
        unsigned long max_ahead, ahead;
        unsigned long max_ahead, ahead;
        unsigned long raend;
        unsigned long raend;
 
 
        raend = filp->f_raend & PAGE_MASK;
        raend = filp->f_raend & PAGE_MASK;
        max_ahead = 0;
        max_ahead = 0;
 
 
/*
/*
 * The current page is locked.
 * The current page is locked.
 * If the current position is inside the previous read IO request, do not
 * If the current position is inside the previous read IO request, do not
 * try to reread previously read ahead pages.
 * try to reread previously read ahead pages.
 * Otherwise decide or not to read ahead some pages synchronously.
 * Otherwise decide or not to read ahead some pages synchronously.
 * If we are not going to read ahead, set the read ahead context for this
 * If we are not going to read ahead, set the read ahead context for this
 * page only.
 * page only.
 */
 */
        if (PageLocked(page)) {
        if (PageLocked(page)) {
                if (!filp->f_ralen || ppos >= raend || ppos + filp->f_ralen < raend) {
                if (!filp->f_ralen || ppos >= raend || ppos + filp->f_ralen < raend) {
                        raend = ppos;
                        raend = ppos;
                        if (raend < inode->i_size)
                        if (raend < inode->i_size)
                                max_ahead = filp->f_ramax;
                                max_ahead = filp->f_ramax;
                        filp->f_rawin = 0;
                        filp->f_rawin = 0;
                        filp->f_ralen = PAGE_SIZE;
                        filp->f_ralen = PAGE_SIZE;
                        if (!max_ahead) {
                        if (!max_ahead) {
                                filp->f_raend  = ppos + filp->f_ralen;
                                filp->f_raend  = ppos + filp->f_ralen;
                                filp->f_rawin += filp->f_ralen;
                                filp->f_rawin += filp->f_ralen;
                        }
                        }
                }
                }
        }
        }
/*
/*
 * The current page is not locked.
 * The current page is not locked.
 * If we were reading ahead and,
 * If we were reading ahead and,
 * if the current max read ahead size is not zero and,
 * if the current max read ahead size is not zero and,
 * if the current position is inside the last read-ahead IO request,
 * if the current position is inside the last read-ahead IO request,
 *   it is the moment to try to read ahead asynchronously.
 *   it is the moment to try to read ahead asynchronously.
 * We will later force unplug device in order to force asynchronous read IO.
 * We will later force unplug device in order to force asynchronous read IO.
 */
 */
        else if (reada_ok && filp->f_ramax && raend >= PAGE_SIZE &&
        else if (reada_ok && filp->f_ramax && raend >= PAGE_SIZE &&
                 ppos <= raend && ppos + filp->f_ralen >= raend) {
                 ppos <= raend && ppos + filp->f_ralen >= raend) {
/*
/*
 * Add ONE page to max_ahead in order to try to have about the same IO max size
 * Add ONE page to max_ahead in order to try to have about the same IO max size
 * as synchronous read-ahead (MAX_READAHEAD + 1)*PAGE_SIZE.
 * as synchronous read-ahead (MAX_READAHEAD + 1)*PAGE_SIZE.
 * Compute the position of the last page we have tried to read in order to
 * Compute the position of the last page we have tried to read in order to
 * begin to read ahead just at the next page.
 * begin to read ahead just at the next page.
 */
 */
                raend -= PAGE_SIZE;
                raend -= PAGE_SIZE;
                if (raend < inode->i_size)
                if (raend < inode->i_size)
                        max_ahead = filp->f_ramax + PAGE_SIZE;
                        max_ahead = filp->f_ramax + PAGE_SIZE;
 
 
                if (max_ahead) {
                if (max_ahead) {
                        filp->f_rawin = filp->f_ralen;
                        filp->f_rawin = filp->f_ralen;
                        filp->f_ralen = 0;
                        filp->f_ralen = 0;
                        reada_ok      = 2;
                        reada_ok      = 2;
                }
                }
        }
        }
/*
/*
 * Try to read ahead pages.
 * Try to read ahead pages.
 * We hope that ll_rw_blk() plug/unplug, coalescence, requests sort and the
 * We hope that ll_rw_blk() plug/unplug, coalescence, requests sort and the
 * scheduler, will work enough for us to avoid too bad actuals IO requests.
 * scheduler, will work enough for us to avoid too bad actuals IO requests.
 */
 */
        ahead = 0;
        ahead = 0;
        while (ahead < max_ahead) {
        while (ahead < max_ahead) {
                ahead += PAGE_SIZE;
                ahead += PAGE_SIZE;
                page_cache = try_to_read_ahead(inode, raend + ahead, page_cache);
                page_cache = try_to_read_ahead(inode, raend + ahead, page_cache);
        }
        }
/*
/*
 * If we tried to read ahead some pages,
 * If we tried to read ahead some pages,
 * If we tried to read ahead asynchronously,
 * If we tried to read ahead asynchronously,
 *   Try to force unplug of the device in order to start an asynchronous
 *   Try to force unplug of the device in order to start an asynchronous
 *   read IO request.
 *   read IO request.
 * Update the read-ahead context.
 * Update the read-ahead context.
 * Store the length of the current read-ahead window.
 * Store the length of the current read-ahead window.
 * Double the current max read ahead size.
 * Double the current max read ahead size.
 *   That heuristic avoid to do some large IO for files that are not really
 *   That heuristic avoid to do some large IO for files that are not really
 *   accessed sequentially.
 *   accessed sequentially.
 */
 */
        if (ahead) {
        if (ahead) {
                if (reada_ok == 2) {
                if (reada_ok == 2) {
                        run_task_queue(&tq_disk);
                        run_task_queue(&tq_disk);
                }
                }
 
 
                filp->f_ralen += ahead;
                filp->f_ralen += ahead;
                filp->f_rawin += filp->f_ralen;
                filp->f_rawin += filp->f_ralen;
                filp->f_raend = raend + ahead + PAGE_SIZE;
                filp->f_raend = raend + ahead + PAGE_SIZE;
 
 
                filp->f_ramax += filp->f_ramax;
                filp->f_ramax += filp->f_ramax;
 
 
                if (filp->f_ramax > MAX_READAHEAD)
                if (filp->f_ramax > MAX_READAHEAD)
                        filp->f_ramax = MAX_READAHEAD;
                        filp->f_ramax = MAX_READAHEAD;
 
 
#ifdef PROFILE_READAHEAD
#ifdef PROFILE_READAHEAD
                profile_readahead((reada_ok == 2), filp);
                profile_readahead((reada_ok == 2), filp);
#endif
#endif
        }
        }
 
 
        return page_cache;
        return page_cache;
}
}
 
 
 
 
/*
/*
 * This is a generic file read routine, and uses the
 * This is a generic file read routine, and uses the
 * inode->i_op->readpage() function for the actual low-level
 * inode->i_op->readpage() function for the actual low-level
 * stuff.
 * stuff.
 *
 *
 * This is really ugly. But the goto's actually try to clarify some
 * This is really ugly. But the goto's actually try to clarify some
 * of the logic when it comes to error handling etc.
 * of the logic when it comes to error handling etc.
 */
 */
 
 
int generic_file_read(struct inode * inode, struct file * filp, char * buf, int count)
int generic_file_read(struct inode * inode, struct file * filp, char * buf, int count)
{
{
        int error, read;
        int error, read;
        unsigned long pos, ppos, page_cache;
        unsigned long pos, ppos, page_cache;
        int reada_ok;
        int reada_ok;
 
 
        error = 0;
        error = 0;
        read = 0;
        read = 0;
        page_cache = 0;
        page_cache = 0;
 
 
        pos = filp->f_pos;
        pos = filp->f_pos;
        ppos = pos & PAGE_MASK;
        ppos = pos & PAGE_MASK;
/*
/*
 * If the current position is outside the previous read-ahead window,
 * If the current position is outside the previous read-ahead window,
 * we reset the current read-ahead context and set read ahead max to zero
 * we reset the current read-ahead context and set read ahead max to zero
 * (will be set to just needed value later),
 * (will be set to just needed value later),
 * otherwise, we assume that the file accesses are sequential enough to
 * otherwise, we assume that the file accesses are sequential enough to
 * continue read-ahead.
 * continue read-ahead.
 */
 */
        if (ppos > filp->f_raend || ppos + filp->f_rawin < filp->f_raend) {
        if (ppos > filp->f_raend || ppos + filp->f_rawin < filp->f_raend) {
                reada_ok = 0;
                reada_ok = 0;
                filp->f_raend = 0;
                filp->f_raend = 0;
                filp->f_ralen = 0;
                filp->f_ralen = 0;
                filp->f_ramax = 0;
                filp->f_ramax = 0;
                filp->f_rawin = 0;
                filp->f_rawin = 0;
        } else {
        } else {
                reada_ok = 1;
                reada_ok = 1;
        }
        }
/*
/*
 * Adjust the current value of read-ahead max.
 * Adjust the current value of read-ahead max.
 * If the read operation stay in the first half page, force no readahead.
 * If the read operation stay in the first half page, force no readahead.
 * Otherwise try to increase read ahead max just enough to do the read request.
 * Otherwise try to increase read ahead max just enough to do the read request.
 * Then, at least MIN_READAHEAD if read ahead is ok,
 * Then, at least MIN_READAHEAD if read ahead is ok,
 * and at most MAX_READAHEAD in all cases.
 * and at most MAX_READAHEAD in all cases.
 */
 */
        if (pos + count <= (PAGE_SIZE >> 1)) {
        if (pos + count <= (PAGE_SIZE >> 1)) {
                filp->f_ramax = 0;
                filp->f_ramax = 0;
        } else {
        } else {
                unsigned long needed;
                unsigned long needed;
 
 
                needed = ((pos + count) & PAGE_MASK) - ppos;
                needed = ((pos + count) & PAGE_MASK) - ppos;
 
 
                if (filp->f_ramax < needed)
                if (filp->f_ramax < needed)
                        filp->f_ramax = needed;
                        filp->f_ramax = needed;
 
 
                if (reada_ok && filp->f_ramax < MIN_READAHEAD)
                if (reada_ok && filp->f_ramax < MIN_READAHEAD)
                                filp->f_ramax = MIN_READAHEAD;
                                filp->f_ramax = MIN_READAHEAD;
                if (filp->f_ramax > MAX_READAHEAD)
                if (filp->f_ramax > MAX_READAHEAD)
                        filp->f_ramax = MAX_READAHEAD;
                        filp->f_ramax = MAX_READAHEAD;
        }
        }
 
 
        for (;;) {
        for (;;) {
                struct page *page, **hash;
                struct page *page, **hash;
 
 
                if (pos >= inode->i_size)
                if (pos >= inode->i_size)
                        break;
                        break;
 
 
                /*
                /*
                 * Try to find the data in the page cache..
                 * Try to find the data in the page cache..
                 */
                 */
                hash = page_hash(inode, pos & PAGE_MASK);
                hash = page_hash(inode, pos & PAGE_MASK);
                page = __find_page(inode, pos & PAGE_MASK, *hash);
                page = __find_page(inode, pos & PAGE_MASK, *hash);
                if (!page)
                if (!page)
                        goto no_cached_page;
                        goto no_cached_page;
 
 
found_page:
found_page:
/*
/*
 * Try to read ahead only if the current page is filled or being filled.
 * Try to read ahead only if the current page is filled or being filled.
 * Otherwise, if we were reading ahead, decrease max read ahead size to
 * Otherwise, if we were reading ahead, decrease max read ahead size to
 * the minimum value.
 * the minimum value.
 * In this context, that seems to may happen only on some read error or if
 * In this context, that seems to may happen only on some read error or if
 * the page has been rewritten.
 * the page has been rewritten.
 */
 */
                if (PageUptodate(page) || PageLocked(page))
                if (PageUptodate(page) || PageLocked(page))
                        page_cache = generic_file_readahead(reada_ok, filp, inode, pos & PAGE_MASK, page, page_cache);
                        page_cache = generic_file_readahead(reada_ok, filp, inode, pos & PAGE_MASK, page, page_cache);
                else if (reada_ok && filp->f_ramax > MIN_READAHEAD)
                else if (reada_ok && filp->f_ramax > MIN_READAHEAD)
                                filp->f_ramax = MIN_READAHEAD;
                                filp->f_ramax = MIN_READAHEAD;
 
 
                wait_on_page(page);
                wait_on_page(page);
 
 
                if (!PageUptodate(page))
                if (!PageUptodate(page))
                        goto page_read_error;
                        goto page_read_error;
 
 
success:
success:
                /*
                /*
                 * Ok, we have the page, it's up-to-date and ok,
                 * Ok, we have the page, it's up-to-date and ok,
                 * so now we can finally copy it to user space...
                 * so now we can finally copy it to user space...
                 */
                 */
        {
        {
                unsigned long offset, nr;
                unsigned long offset, nr;
                offset = pos & ~PAGE_MASK;
                offset = pos & ~PAGE_MASK;
                nr = PAGE_SIZE - offset;
                nr = PAGE_SIZE - offset;
                if (nr > count)
                if (nr > count)
                        nr = count;
                        nr = count;
 
 
                if (nr > inode->i_size - pos)
                if (nr > inode->i_size - pos)
                        nr = inode->i_size - pos;
                        nr = inode->i_size - pos;
                memcpy_tofs(buf, (void *) (page_address(page) + offset), nr);
                memcpy_tofs(buf, (void *) (page_address(page) + offset), nr);
                release_page(page);
                release_page(page);
                buf += nr;
                buf += nr;
                pos += nr;
                pos += nr;
                read += nr;
                read += nr;
                count -= nr;
                count -= nr;
                if (count) {
                if (count) {
                        /*
                        /*
                         * to prevent hogging the CPU on well-cached systems,
                         * to prevent hogging the CPU on well-cached systems,
                         * schedule if needed, it's safe to do it here:
                         * schedule if needed, it's safe to do it here:
                         */
                         */
                        if (need_resched)
                        if (need_resched)
                                schedule();
                                schedule();
                        continue;
                        continue;
                }
                }
                break;
                break;
        }
        }
 
 
no_cached_page:
no_cached_page:
                /*
                /*
                 * Ok, it wasn't cached, so we need to create a new
                 * Ok, it wasn't cached, so we need to create a new
                 * page..
                 * page..
                 */
                 */
                if (!page_cache) {
                if (!page_cache) {
                        page_cache = __get_free_page(GFP_KERNEL);
                        page_cache = __get_free_page(GFP_KERNEL);
                        /*
                        /*
                         * That could have slept, so go around to the
                         * That could have slept, so go around to the
                         * very beginning..
                         * very beginning..
                         */
                         */
                        if (page_cache)
                        if (page_cache)
                                continue;
                                continue;
                        error = -ENOMEM;
                        error = -ENOMEM;
                        break;
                        break;
                }
                }
 
 
                /*
                /*
                 * Ok, add the new page to the hash-queues...
                 * Ok, add the new page to the hash-queues...
                 */
                 */
                page = mem_map + MAP_NR(page_cache);
                page = mem_map + MAP_NR(page_cache);
                page_cache = 0;
                page_cache = 0;
                add_to_page_cache(page, inode, pos & PAGE_MASK, hash);
                add_to_page_cache(page, inode, pos & PAGE_MASK, hash);
 
 
                /*
                /*
                 * Error handling is tricky. If we get a read error,
                 * Error handling is tricky. If we get a read error,
                 * the cached page stays in the cache (but uptodate=0),
                 * the cached page stays in the cache (but uptodate=0),
                 * and the next process that accesses it will try to
                 * and the next process that accesses it will try to
                 * re-read it. This is needed for NFS etc, where the
                 * re-read it. This is needed for NFS etc, where the
                 * identity of the reader can decide if we can read the
                 * identity of the reader can decide if we can read the
                 * page or not..
                 * page or not..
                 */
                 */
/*
/*
 * We have to read the page.
 * We have to read the page.
 * If we were reading ahead, we had previously tried to read this page,
 * If we were reading ahead, we had previously tried to read this page,
 * That means that the page has probably been removed from the cache before
 * That means that the page has probably been removed from the cache before
 * the application process needs it, or has been rewritten.
 * the application process needs it, or has been rewritten.
 * Decrease max readahead size to the minimum value in that situation.
 * Decrease max readahead size to the minimum value in that situation.
 */
 */
                if (reada_ok && filp->f_ramax > MIN_READAHEAD)
                if (reada_ok && filp->f_ramax > MIN_READAHEAD)
                        filp->f_ramax = MIN_READAHEAD;
                        filp->f_ramax = MIN_READAHEAD;
 
 
                error = inode->i_op->readpage(inode, page);
                error = inode->i_op->readpage(inode, page);
                if (!error)
                if (!error)
                        goto found_page;
                        goto found_page;
                release_page(page);
                release_page(page);
                break;
                break;
 
 
page_read_error:
page_read_error:
                /*
                /*
                 * We found the page, but it wasn't up-to-date.
                 * We found the page, but it wasn't up-to-date.
                 * Try to re-read it _once_. We do this synchronously,
                 * Try to re-read it _once_. We do this synchronously,
                 * because this happens only if there were errors.
                 * because this happens only if there were errors.
                 */
                 */
                error = inode->i_op->readpage(inode, page);
                error = inode->i_op->readpage(inode, page);
                if (!error) {
                if (!error) {
                        wait_on_page(page);
                        wait_on_page(page);
                        if (PageUptodate(page) && !PageError(page))
                        if (PageUptodate(page) && !PageError(page))
                                goto success;
                                goto success;
                        error = -EIO; /* Some unspecified error occurred.. */
                        error = -EIO; /* Some unspecified error occurred.. */
                }
                }
                release_page(page);
                release_page(page);
                break;
                break;
        }
        }
 
 
        filp->f_pos = pos;
        filp->f_pos = pos;
        filp->f_reada = 1;
        filp->f_reada = 1;
        if (page_cache)
        if (page_cache)
                free_page(page_cache);
                free_page(page_cache);
        UPDATE_ATIME(inode)
        UPDATE_ATIME(inode)
        if (!read)
        if (!read)
                read = error;
                read = error;
        return read;
        return read;
}
}
 
 
/*
/*
 * Semantics for shared and private memory areas are different past the end
 * Semantics for shared and private memory areas are different past the end
 * of the file. A shared mapping past the last page of the file is an error
 * of the file. A shared mapping past the last page of the file is an error
 * and results in a SIGBUS, while a private mapping just maps in a zero page.
 * and results in a SIGBUS, while a private mapping just maps in a zero page.
 *
 *
 * The goto's are kind of ugly, but this streamlines the normal case of having
 * The goto's are kind of ugly, but this streamlines the normal case of having
 * it in the page cache, and handles the special cases reasonably without
 * it in the page cache, and handles the special cases reasonably without
 * having a lot of duplicated code.
 * having a lot of duplicated code.
 */
 */
static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address, int no_share)
static unsigned long filemap_nopage(struct vm_area_struct * area, unsigned long address, int no_share)
{
{
        unsigned long offset;
        unsigned long offset;
        struct page * page, **hash;
        struct page * page, **hash;
        struct inode * inode = area->vm_inode;
        struct inode * inode = area->vm_inode;
        unsigned long old_page, new_page;
        unsigned long old_page, new_page;
 
 
        new_page = 0;
        new_page = 0;
        offset = (address & PAGE_MASK) - area->vm_start + area->vm_offset;
        offset = (address & PAGE_MASK) - area->vm_start + area->vm_offset;
        if (offset >= inode->i_size && (area->vm_flags & VM_SHARED) && area->vm_mm == current->mm)
        if (offset >= inode->i_size && (area->vm_flags & VM_SHARED) && area->vm_mm == current->mm)
                goto no_page;
                goto no_page;
 
 
        /*
        /*
         * Do we have something in the page cache already?
         * Do we have something in the page cache already?
         */
         */
        hash = page_hash(inode, offset);
        hash = page_hash(inode, offset);
        page = __find_page(inode, offset, *hash);
        page = __find_page(inode, offset, *hash);
        if (!page)
        if (!page)
                goto no_cached_page;
                goto no_cached_page;
 
 
found_page:
found_page:
        /*
        /*
         * Ok, found a page in the page cache, now we need to check
         * Ok, found a page in the page cache, now we need to check
         * that it's up-to-date.  First check whether we'll need an
         * that it's up-to-date.  First check whether we'll need an
         * extra page -- better to overlap the allocation with the I/O.
         * extra page -- better to overlap the allocation with the I/O.
         */
         */
        if (no_share && !new_page) {
        if (no_share && !new_page) {
                new_page = __get_free_page(GFP_KERNEL);
                new_page = __get_free_page(GFP_KERNEL);
                if (!new_page)
                if (!new_page)
                        goto failure;
                        goto failure;
        }
        }
 
 
        if (PageLocked(page))
        if (PageLocked(page))
                goto page_locked_wait;
                goto page_locked_wait;
        if (!PageUptodate(page))
        if (!PageUptodate(page))
                goto page_read_error;
                goto page_read_error;
 
 
success:
success:
        /*
        /*
         * Found the page, need to check sharing and possibly
         * Found the page, need to check sharing and possibly
         * copy it over to another page..
         * copy it over to another page..
         */
         */
        old_page = page_address(page);
        old_page = page_address(page);
        if (!no_share) {
        if (!no_share) {
                /*
                /*
                 * Ok, we can share the cached page directly.. Get rid
                 * Ok, we can share the cached page directly.. Get rid
                 * of any potential extra pages.
                 * of any potential extra pages.
                 */
                 */
                if (new_page)
                if (new_page)
                        free_page(new_page);
                        free_page(new_page);
 
 
                flush_page_to_ram(old_page);
                flush_page_to_ram(old_page);
                return old_page;
                return old_page;
        }
        }
 
 
        /*
        /*
         * No sharing ... copy to the new page.
         * No sharing ... copy to the new page.
         */
         */
        memcpy((void *) new_page, (void *) old_page, PAGE_SIZE);
        memcpy((void *) new_page, (void *) old_page, PAGE_SIZE);
        flush_page_to_ram(new_page);
        flush_page_to_ram(new_page);
        release_page(page);
        release_page(page);
        return new_page;
        return new_page;
 
 
no_cached_page:
no_cached_page:
        new_page = __get_free_page(GFP_KERNEL);
        new_page = __get_free_page(GFP_KERNEL);
        if (!new_page)
        if (!new_page)
                goto no_page;
                goto no_page;
 
 
        /*
        /*
         * During getting the above page we might have slept,
         * During getting the above page we might have slept,
         * so we need to re-check the situation with the page
         * so we need to re-check the situation with the page
         * cache.. The page we just got may be useful if we
         * cache.. The page we just got may be useful if we
         * can't share, so don't get rid of it here.
         * can't share, so don't get rid of it here.
         */
         */
        page = find_page(inode, offset);
        page = find_page(inode, offset);
        if (page)
        if (page)
                goto found_page;
                goto found_page;
 
 
        /*
        /*
         * Now, create a new page-cache page from the page we got
         * Now, create a new page-cache page from the page we got
         */
         */
        page = mem_map + MAP_NR(new_page);
        page = mem_map + MAP_NR(new_page);
        new_page = 0;
        new_page = 0;
        add_to_page_cache(page, inode, offset, hash);
        add_to_page_cache(page, inode, offset, hash);
 
 
        if (inode->i_op->readpage(inode, page) != 0)
        if (inode->i_op->readpage(inode, page) != 0)
                goto failure;
                goto failure;
 
 
        /*
        /*
         * Do a very limited read-ahead if appropriate
         * Do a very limited read-ahead if appropriate
         */
         */
        if (PageLocked(page))
        if (PageLocked(page))
                new_page = try_to_read_ahead(inode, offset + PAGE_SIZE, 0);
                new_page = try_to_read_ahead(inode, offset + PAGE_SIZE, 0);
        goto found_page;
        goto found_page;
 
 
page_locked_wait:
page_locked_wait:
        __wait_on_page(page);
        __wait_on_page(page);
        if (PageUptodate(page))
        if (PageUptodate(page))
                goto success;
                goto success;
 
 
page_read_error:
page_read_error:
        /*
        /*
         * Umm, take care of errors if the page isn't up-to-date.
         * Umm, take care of errors if the page isn't up-to-date.
         * Try to re-read it _once_. We do this synchronously,
         * Try to re-read it _once_. We do this synchronously,
         * because there really aren't any performance issues here
         * because there really aren't any performance issues here
         * and we need to check for errors.
         * and we need to check for errors.
         */
         */
        if (inode->i_op->readpage(inode, page) != 0)
        if (inode->i_op->readpage(inode, page) != 0)
                goto failure;
                goto failure;
        wait_on_page(page);
        wait_on_page(page);
        if (PageError(page))
        if (PageError(page))
                goto failure;
                goto failure;
        if (PageUptodate(page))
        if (PageUptodate(page))
                goto success;
                goto success;
 
 
        /*
        /*
         * Uhhuh.. Things didn't work out. Return zero to tell the
         * Uhhuh.. Things didn't work out. Return zero to tell the
         * mm layer so, possibly freeing the page cache page first.
         * mm layer so, possibly freeing the page cache page first.
         */
         */
failure:
failure:
        release_page(page);
        release_page(page);
        if (new_page)
        if (new_page)
                free_page(new_page);
                free_page(new_page);
no_page:
no_page:
        return 0;
        return 0;
}
}
 
 
/*
/*
 * Tries to write a shared mapped page to its backing store. May return -EIO
 * Tries to write a shared mapped page to its backing store. May return -EIO
 * if the disk is full.
 * if the disk is full.
 */
 */
static inline int do_write_page(struct inode * inode, struct file * file,
static inline int do_write_page(struct inode * inode, struct file * file,
        const char * page, unsigned long offset)
        const char * page, unsigned long offset)
{
{
        int old_fs, retval;
        int old_fs, retval;
        unsigned long size;
        unsigned long size;
 
 
        size = offset + PAGE_SIZE;
        size = offset + PAGE_SIZE;
        /* refuse to extend file size.. */
        /* refuse to extend file size.. */
        if (S_ISREG(inode->i_mode)) {
        if (S_ISREG(inode->i_mode)) {
                if (size > inode->i_size)
                if (size > inode->i_size)
                        size = inode->i_size;
                        size = inode->i_size;
                /* Ho humm.. We should have tested for this earlier */
                /* Ho humm.. We should have tested for this earlier */
                if (size < offset)
                if (size < offset)
                        return -EIO;
                        return -EIO;
        }
        }
        size -= offset;
        size -= offset;
        old_fs = get_fs();
        old_fs = get_fs();
        set_fs(KERNEL_DS);
        set_fs(KERNEL_DS);
        retval = -EIO;
        retval = -EIO;
        if (size == file->f_op->write(inode, file, (const char *) page, size))
        if (size == file->f_op->write(inode, file, (const char *) page, size))
                retval = 0;
                retval = 0;
        set_fs(old_fs);
        set_fs(old_fs);
        return retval;
        return retval;
}
}
 
 
static int filemap_write_page(struct vm_area_struct * vma,
static int filemap_write_page(struct vm_area_struct * vma,
        unsigned long offset,
        unsigned long offset,
        unsigned long page)
        unsigned long page)
{
{
        int result;
        int result;
        struct file file;
        struct file file;
        struct inode * inode;
        struct inode * inode;
        struct buffer_head * bh;
        struct buffer_head * bh;
 
 
        bh = mem_map[MAP_NR(page)].buffers;
        bh = mem_map[MAP_NR(page)].buffers;
        if (bh) {
        if (bh) {
                /* whee.. just mark the buffer heads dirty */
                /* whee.. just mark the buffer heads dirty */
                struct buffer_head * tmp = bh;
                struct buffer_head * tmp = bh;
                do {
                do {
                        mark_buffer_dirty(tmp, 0);
                        mark_buffer_dirty(tmp, 0);
                        tmp = tmp->b_this_page;
                        tmp = tmp->b_this_page;
                } while (tmp != bh);
                } while (tmp != bh);
                return 0;
                return 0;
        }
        }
 
 
        inode = vma->vm_inode;
        inode = vma->vm_inode;
        file.f_op = inode->i_op->default_file_ops;
        file.f_op = inode->i_op->default_file_ops;
        if (!file.f_op->write)
        if (!file.f_op->write)
                return -EIO;
                return -EIO;
        file.f_mode = 3;
        file.f_mode = 3;
        file.f_flags = 0;
        file.f_flags = 0;
        file.f_count = 1;
        file.f_count = 1;
        file.f_inode = inode;
        file.f_inode = inode;
        file.f_pos = offset;
        file.f_pos = offset;
        file.f_reada = 0;
        file.f_reada = 0;
 
 
        down(&inode->i_sem);
        down(&inode->i_sem);
        result = do_write_page(inode, &file, (const char *) page, offset);
        result = do_write_page(inode, &file, (const char *) page, offset);
        up(&inode->i_sem);
        up(&inode->i_sem);
        return result;
        return result;
}
}
 
 
 
 
/*
/*
 * Swapping to a shared file: while we're busy writing out the page
 * Swapping to a shared file: while we're busy writing out the page
 * (and the page still exists in memory), we save the page information
 * (and the page still exists in memory), we save the page information
 * in the page table, so that "filemap_swapin()" can re-use the page
 * in the page table, so that "filemap_swapin()" can re-use the page
 * immediately if it is called while we're busy swapping it out..
 * immediately if it is called while we're busy swapping it out..
 *
 *
 * Once we've written it all out, we mark the page entry "empty", which
 * Once we've written it all out, we mark the page entry "empty", which
 * will result in a normal page-in (instead of a swap-in) from the now
 * will result in a normal page-in (instead of a swap-in) from the now
 * up-to-date disk file.
 * up-to-date disk file.
 */
 */
int filemap_swapout(struct vm_area_struct * vma,
int filemap_swapout(struct vm_area_struct * vma,
        unsigned long offset,
        unsigned long offset,
        pte_t *page_table)
        pte_t *page_table)
{
{
        int error;
        int error;
        unsigned long page = pte_page(*page_table);
        unsigned long page = pte_page(*page_table);
        unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
        unsigned long entry = SWP_ENTRY(SHM_SWP_TYPE, MAP_NR(page));
 
 
        flush_cache_page(vma, (offset + vma->vm_start - vma->vm_offset));
        flush_cache_page(vma, (offset + vma->vm_start - vma->vm_offset));
        set_pte(page_table, __pte(entry));
        set_pte(page_table, __pte(entry));
        flush_tlb_page(vma, (offset + vma->vm_start - vma->vm_offset));
        flush_tlb_page(vma, (offset + vma->vm_start - vma->vm_offset));
        error = filemap_write_page(vma, offset, page);
        error = filemap_write_page(vma, offset, page);
        if (pte_val(*page_table) == entry)
        if (pte_val(*page_table) == entry)
                pte_clear(page_table);
                pte_clear(page_table);
        return error;
        return error;
}
}
 
 
/*
/*
 * filemap_swapin() is called only if we have something in the page
 * filemap_swapin() is called only if we have something in the page
 * tables that is non-zero (but not present), which we know to be the
 * tables that is non-zero (but not present), which we know to be the
 * page index of a page that is busy being swapped out (see above).
 * page index of a page that is busy being swapped out (see above).
 * So we just use it directly..
 * So we just use it directly..
 */
 */
static pte_t filemap_swapin(struct vm_area_struct * vma,
static pte_t filemap_swapin(struct vm_area_struct * vma,
        unsigned long offset,
        unsigned long offset,
        unsigned long entry)
        unsigned long entry)
{
{
        unsigned long page = SWP_OFFSET(entry);
        unsigned long page = SWP_OFFSET(entry);
 
 
        mem_map[page].count++;
        mem_map[page].count++;
        page = (page << PAGE_SHIFT) + PAGE_OFFSET;
        page = (page << PAGE_SHIFT) + PAGE_OFFSET;
        return mk_pte(page,vma->vm_page_prot);
        return mk_pte(page,vma->vm_page_prot);
}
}
 
 
 
 
static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
        unsigned long address, unsigned int flags)
        unsigned long address, unsigned int flags)
{
{
        pte_t pte = *ptep;
        pte_t pte = *ptep;
        unsigned long page;
        unsigned long page;
        int error;
        int error;
 
 
        if (pte_none(pte))
        if (pte_none(pte))
                return 0;
                return 0;
        if (!(flags & MS_INVALIDATE)) {
        if (!(flags & MS_INVALIDATE)) {
                if (!pte_present(pte))
                if (!pte_present(pte))
                        return 0;
                        return 0;
                if (!pte_dirty(pte))
                if (!pte_dirty(pte))
                        return 0;
                        return 0;
                flush_page_to_ram(pte_page(pte));
                flush_page_to_ram(pte_page(pte));
                flush_cache_page(vma, address);
                flush_cache_page(vma, address);
                set_pte(ptep, pte_mkclean(pte));
                set_pte(ptep, pte_mkclean(pte));
                flush_tlb_page(vma, address);
                flush_tlb_page(vma, address);
                page = pte_page(pte);
                page = pte_page(pte);
                mem_map[MAP_NR(page)].count++;
                mem_map[MAP_NR(page)].count++;
        } else {
        } else {
                flush_cache_page(vma, address);
                flush_cache_page(vma, address);
                pte_clear(ptep);
                pte_clear(ptep);
                flush_tlb_page(vma, address);
                flush_tlb_page(vma, address);
                if (!pte_present(pte)) {
                if (!pte_present(pte)) {
                        swap_free(pte_val(pte));
                        swap_free(pte_val(pte));
                        return 0;
                        return 0;
                }
                }
                page = pte_page(pte);
                page = pte_page(pte);
                if (!pte_dirty(pte) || flags == MS_INVALIDATE) {
                if (!pte_dirty(pte) || flags == MS_INVALIDATE) {
                        free_page(page);
                        free_page(page);
                        return 0;
                        return 0;
                }
                }
        }
        }
        error = filemap_write_page(vma, address - vma->vm_start + vma->vm_offset, page);
        error = filemap_write_page(vma, address - vma->vm_start + vma->vm_offset, page);
        free_page(page);
        free_page(page);
        return error;
        return error;
}
}
 
 
static inline int filemap_sync_pte_range(pmd_t * pmd,
static inline int filemap_sync_pte_range(pmd_t * pmd,
        unsigned long address, unsigned long size,
        unsigned long address, unsigned long size,
        struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
        struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
{
{
        pte_t * pte;
        pte_t * pte;
        unsigned long end;
        unsigned long end;
        int error;
        int error;
 
 
        if (pmd_none(*pmd))
        if (pmd_none(*pmd))
                return 0;
                return 0;
        if (pmd_bad(*pmd)) {
        if (pmd_bad(*pmd)) {
                printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
                printk("filemap_sync_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
                pmd_clear(pmd);
                pmd_clear(pmd);
                return 0;
                return 0;
        }
        }
        pte = pte_offset(pmd, address);
        pte = pte_offset(pmd, address);
        offset += address & PMD_MASK;
        offset += address & PMD_MASK;
        address &= ~PMD_MASK;
        address &= ~PMD_MASK;
        end = address + size;
        end = address + size;
        if (end > PMD_SIZE)
        if (end > PMD_SIZE)
                end = PMD_SIZE;
                end = PMD_SIZE;
        error = 0;
        error = 0;
        do {
        do {
                error |= filemap_sync_pte(pte, vma, address + offset, flags);
                error |= filemap_sync_pte(pte, vma, address + offset, flags);
                address += PAGE_SIZE;
                address += PAGE_SIZE;
                pte++;
                pte++;
        } while (address < end);
        } while (address < end);
        return error;
        return error;
}
}
 
 
static inline int filemap_sync_pmd_range(pgd_t * pgd,
static inline int filemap_sync_pmd_range(pgd_t * pgd,
        unsigned long address, unsigned long size,
        unsigned long address, unsigned long size,
        struct vm_area_struct *vma, unsigned int flags)
        struct vm_area_struct *vma, unsigned int flags)
{
{
        pmd_t * pmd;
        pmd_t * pmd;
        unsigned long offset, end;
        unsigned long offset, end;
        int error;
        int error;
 
 
        if (pgd_none(*pgd))
        if (pgd_none(*pgd))
                return 0;
                return 0;
        if (pgd_bad(*pgd)) {
        if (pgd_bad(*pgd)) {
                printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
                printk("filemap_sync_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
                pgd_clear(pgd);
                pgd_clear(pgd);
                return 0;
                return 0;
        }
        }
        pmd = pmd_offset(pgd, address);
        pmd = pmd_offset(pgd, address);
        offset = address & PGDIR_MASK;
        offset = address & PGDIR_MASK;
        address &= ~PGDIR_MASK;
        address &= ~PGDIR_MASK;
        end = address + size;
        end = address + size;
        if (end > PGDIR_SIZE)
        if (end > PGDIR_SIZE)
                end = PGDIR_SIZE;
                end = PGDIR_SIZE;
        error = 0;
        error = 0;
        do {
        do {
                error |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
                error |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
                address = (address + PMD_SIZE) & PMD_MASK;
                address = (address + PMD_SIZE) & PMD_MASK;
                pmd++;
                pmd++;
        } while (address < end);
        } while (address < end);
        return error;
        return error;
}
}
 
 
static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
static int filemap_sync(struct vm_area_struct * vma, unsigned long address,
        size_t size, unsigned int flags)
        size_t size, unsigned int flags)
{
{
        pgd_t * dir;
        pgd_t * dir;
        unsigned long end = address + size;
        unsigned long end = address + size;
        int error = 0;
        int error = 0;
 
 
        dir = pgd_offset(vma->vm_mm, address);
        dir = pgd_offset(vma->vm_mm, address);
        flush_cache_range(vma->vm_mm, end - size, end);
        flush_cache_range(vma->vm_mm, end - size, end);
        while (address < end) {
        while (address < end) {
                error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
                error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
                dir++;
        }
        }
        flush_tlb_range(vma->vm_mm, end - size, end);
        flush_tlb_range(vma->vm_mm, end - size, end);
        return error;
        return error;
}
}
 
 
/*
/*
 * This handles (potentially partial) area unmaps..
 * This handles (potentially partial) area unmaps..
 */
 */
static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
{
{
        filemap_sync(vma, start, len, MS_ASYNC);
        filemap_sync(vma, start, len, MS_ASYNC);
}
}
 
 
/*
/*
 * Shared mappings need to be able to do the right thing at
 * Shared mappings need to be able to do the right thing at
 * close/unmap/sync. They will also use the private file as
 * close/unmap/sync. They will also use the private file as
 * backing-store for swapping..
 * backing-store for swapping..
 */
 */
static struct vm_operations_struct file_shared_mmap = {
static struct vm_operations_struct file_shared_mmap = {
        NULL,                   /* no special open */
        NULL,                   /* no special open */
        NULL,                   /* no special close */
        NULL,                   /* no special close */
        filemap_unmap,          /* unmap - we need to sync the pages */
        filemap_unmap,          /* unmap - we need to sync the pages */
        NULL,                   /* no special protect */
        NULL,                   /* no special protect */
        filemap_sync,           /* sync */
        filemap_sync,           /* sync */
        NULL,                   /* advise */
        NULL,                   /* advise */
        filemap_nopage,         /* nopage */
        filemap_nopage,         /* nopage */
        NULL,                   /* wppage */
        NULL,                   /* wppage */
        filemap_swapout,        /* swapout */
        filemap_swapout,        /* swapout */
        filemap_swapin,         /* swapin */
        filemap_swapin,         /* swapin */
};
};
 
 
/*
/*
 * Private mappings just need to be able to load in the map.
 * Private mappings just need to be able to load in the map.
 *
 *
 * (This is actually used for shared mappings as well, if we
 * (This is actually used for shared mappings as well, if we
 * know they can't ever get write permissions..)
 * know they can't ever get write permissions..)
 */
 */
static struct vm_operations_struct file_private_mmap = {
static struct vm_operations_struct file_private_mmap = {
        NULL,                   /* open */
        NULL,                   /* open */
        NULL,                   /* close */
        NULL,                   /* close */
        NULL,                   /* unmap */
        NULL,                   /* unmap */
        NULL,                   /* protect */
        NULL,                   /* protect */
        NULL,                   /* sync */
        NULL,                   /* sync */
        NULL,                   /* advise */
        NULL,                   /* advise */
        filemap_nopage,         /* nopage */
        filemap_nopage,         /* nopage */
        NULL,                   /* wppage */
        NULL,                   /* wppage */
        NULL,                   /* swapout */
        NULL,                   /* swapout */
        NULL,                   /* swapin */
        NULL,                   /* swapin */
};
};
 
 
/* This is used for a general mmap of a disk file */
/* This is used for a general mmap of a disk file */
int generic_file_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
int generic_file_mmap(struct inode * inode, struct file * file, struct vm_area_struct * vma)
{
{
        struct vm_operations_struct * ops;
        struct vm_operations_struct * ops;
 
 
        if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
        if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
                ops = &file_shared_mmap;
                ops = &file_shared_mmap;
                /* share_page() can only guarantee proper page sharing if
                /* share_page() can only guarantee proper page sharing if
                 * the offsets are all page aligned. */
                 * the offsets are all page aligned. */
                if (vma->vm_offset & (PAGE_SIZE - 1))
                if (vma->vm_offset & (PAGE_SIZE - 1))
                        return -EINVAL;
                        return -EINVAL;
        } else {
        } else {
                ops = &file_private_mmap;
                ops = &file_private_mmap;
                if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
                if (vma->vm_offset & (inode->i_sb->s_blocksize - 1))
                        return -EINVAL;
                        return -EINVAL;
        }
        }
        if (!inode->i_sb || !S_ISREG(inode->i_mode))
        if (!inode->i_sb || !S_ISREG(inode->i_mode))
                return -EACCES;
                return -EACCES;
        if (!inode->i_op || !inode->i_op->readpage)
        if (!inode->i_op || !inode->i_op->readpage)
                return -ENOEXEC;
                return -ENOEXEC;
        UPDATE_ATIME(inode)
        UPDATE_ATIME(inode)
        vma->vm_inode = inode;
        vma->vm_inode = inode;
        inode->i_count++;
        inode->i_count++;
        vma->vm_ops = ops;
        vma->vm_ops = ops;
        return 0;
        return 0;
}
}
 
 
 
 
/*
/*
 * The msync() system call.
 * The msync() system call.
 */
 */
 
 
static int msync_interval(struct vm_area_struct * vma,
static int msync_interval(struct vm_area_struct * vma,
        unsigned long start, unsigned long end, int flags)
        unsigned long start, unsigned long end, int flags)
{
{
        if (vma->vm_inode && vma->vm_ops && vma->vm_ops->sync) {
        if (vma->vm_inode && vma->vm_ops && vma->vm_ops->sync) {
                int error;
                int error;
                error = vma->vm_ops->sync(vma, start, end-start, flags);
                error = vma->vm_ops->sync(vma, start, end-start, flags);
                if (error)
                if (error)
                        return error;
                        return error;
                if (flags & MS_SYNC)
                if (flags & MS_SYNC)
                        return file_fsync(vma->vm_inode, NULL);
                        return file_fsync(vma->vm_inode, NULL);
                return 0;
                return 0;
        }
        }
        return 0;
        return 0;
}
}
 
 
asmlinkage int sys_msync(unsigned long start, size_t len, int flags)
asmlinkage int sys_msync(unsigned long start, size_t len, int flags)
{
{
        unsigned long end;
        unsigned long end;
        struct vm_area_struct * vma;
        struct vm_area_struct * vma;
        int unmapped_error, error;
        int unmapped_error, error;
 
 
        if (start & ~PAGE_MASK)
        if (start & ~PAGE_MASK)
                return -EINVAL;
                return -EINVAL;
        len = (len + ~PAGE_MASK) & PAGE_MASK;
        len = (len + ~PAGE_MASK) & PAGE_MASK;
        end = start + len;
        end = start + len;
        if (end < start)
        if (end < start)
                return -EINVAL;
                return -EINVAL;
        if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
        if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
                return -EINVAL;
                return -EINVAL;
        if (end == start)
        if (end == start)
                return 0;
                return 0;
        /*
        /*
         * If the interval [start,end) covers some unmapped address ranges,
         * If the interval [start,end) covers some unmapped address ranges,
         * just ignore them, but return -EFAULT at the end.
         * just ignore them, but return -EFAULT at the end.
         */
         */
        vma = find_vma(current->mm, start);
        vma = find_vma(current->mm, start);
        unmapped_error = 0;
        unmapped_error = 0;
        for (;;) {
        for (;;) {
                /* Still start < end. */
                /* Still start < end. */
                if (!vma)
                if (!vma)
                        return -EFAULT;
                        return -EFAULT;
                /* Here start < vma->vm_end. */
                /* Here start < vma->vm_end. */
                if (start < vma->vm_start) {
                if (start < vma->vm_start) {
                        unmapped_error = -EFAULT;
                        unmapped_error = -EFAULT;
                        start = vma->vm_start;
                        start = vma->vm_start;
                }
                }
                /* Here vma->vm_start <= start < vma->vm_end. */
                /* Here vma->vm_start <= start < vma->vm_end. */
                if (end <= vma->vm_end) {
                if (end <= vma->vm_end) {
                        if (start < end) {
                        if (start < end) {
                                error = msync_interval(vma, start, end, flags);
                                error = msync_interval(vma, start, end, flags);
                                if (error)
                                if (error)
                                        return error;
                                        return error;
                        }
                        }
                        return unmapped_error;
                        return unmapped_error;
                }
                }
                /* Here vma->vm_start <= start < vma->vm_end < end. */
                /* Here vma->vm_start <= start < vma->vm_end < end. */
                error = msync_interval(vma, start, vma->vm_end, flags);
                error = msync_interval(vma, start, vma->vm_end, flags);
                if (error)
                if (error)
                        return error;
                        return error;
                start = vma->vm_end;
                start = vma->vm_end;
                vma = vma->vm_next;
                vma = vma->vm_next;
        }
        }
}
}
 
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.