OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [mm/] [fremap.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 *   linux/mm/fremap.c
3
 *
4
 * Explicit pagetable population and nonlinear (random) mappings support.
5
 *
6
 * started by Ingo Molnar, Copyright (C) 2002, 2003
7
 */
8
#include <linux/backing-dev.h>
9
#include <linux/mm.h>
10
#include <linux/swap.h>
11
#include <linux/file.h>
12
#include <linux/mman.h>
13
#include <linux/pagemap.h>
14
#include <linux/swapops.h>
15
#include <linux/rmap.h>
16
#include <linux/module.h>
17
#include <linux/syscalls.h>
18
 
19
#include <asm/mmu_context.h>
20
#include <asm/cacheflush.h>
21
#include <asm/tlbflush.h>
22
 
23
static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
24
                        unsigned long addr, pte_t *ptep)
25
{
26
        pte_t pte = *ptep;
27
 
28
        if (pte_present(pte)) {
29
                struct page *page;
30
 
31
                flush_cache_page(vma, addr, pte_pfn(pte));
32
                pte = ptep_clear_flush(vma, addr, ptep);
33
                page = vm_normal_page(vma, addr, pte);
34
                if (page) {
35
                        if (pte_dirty(pte))
36
                                set_page_dirty(page);
37
                        page_remove_rmap(page, vma);
38
                        page_cache_release(page);
39
                        update_hiwater_rss(mm);
40
                        dec_mm_counter(mm, file_rss);
41
                }
42
        } else {
43
                if (!pte_file(pte))
44
                        free_swap_and_cache(pte_to_swp_entry(pte));
45
                pte_clear_not_present_full(mm, addr, ptep, 0);
46
        }
47
}
48
 
49
/*
50
 * Install a file pte to a given virtual memory address, release any
51
 * previously existing mapping.
52
 */
53
static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
54
                unsigned long addr, unsigned long pgoff, pgprot_t prot)
55
{
56
        int err = -ENOMEM;
57
        pte_t *pte;
58
        spinlock_t *ptl;
59
 
60
        pte = get_locked_pte(mm, addr, &ptl);
61
        if (!pte)
62
                goto out;
63
 
64
        if (!pte_none(*pte))
65
                zap_pte(mm, vma, addr, pte);
66
 
67
        set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
68
        /*
69
         * We don't need to run update_mmu_cache() here because the "file pte"
70
         * being installed by install_file_pte() is not a real pte - it's a
71
         * non-present entry (like a swap entry), noting what file offset should
72
         * be mapped there when there's a fault (in a non-linear vma where
73
         * that's not obvious).
74
         */
75
        pte_unmap_unlock(pte, ptl);
76
        err = 0;
77
out:
78
        return err;
79
}
80
 
81
static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
82
                        unsigned long addr, unsigned long size, pgoff_t pgoff)
83
{
84
        int err;
85
 
86
        do {
87
                err = install_file_pte(mm, vma, addr, pgoff, vma->vm_page_prot);
88
                if (err)
89
                        return err;
90
 
91
                size -= PAGE_SIZE;
92
                addr += PAGE_SIZE;
93
                pgoff++;
94
        } while (size);
95
 
96
        return 0;
97
 
98
}
99
 
100
/**
101
 * sys_remap_file_pages - remap arbitrary pages of an existing VM_SHARED vma
102
 * @start: start of the remapped virtual memory range
103
 * @size: size of the remapped virtual memory range
104
 * @prot: new protection bits of the range (see NOTE)
105
 * @pgoff: to-be-mapped page of the backing store file
106
 * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
107
 *
108
 * sys_remap_file_pages remaps arbitrary pages of an existing VM_SHARED vma
109
 * (shared backing store file).
110
 *
111
 * This syscall works purely via pagetables, so it's the most efficient
112
 * way to map the same (large) file into a given virtual window. Unlike
113
 * mmap()/mremap() it does not create any new vmas. The new mappings are
114
 * also safe across swapout.
115
 *
116
 * NOTE: the 'prot' parameter right now is ignored (but must be zero),
117
 * and the vma's default protection is used. Arbitrary protections
118
 * might be implemented in the future.
119
 */
120
asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
121
        unsigned long prot, unsigned long pgoff, unsigned long flags)
122
{
123
        struct mm_struct *mm = current->mm;
124
        struct address_space *mapping;
125
        unsigned long end = start + size;
126
        struct vm_area_struct *vma;
127
        int err = -EINVAL;
128
        int has_write_lock = 0;
129
 
130
        if (prot)
131
                return err;
132
        /*
133
         * Sanitize the syscall parameters:
134
         */
135
        start = start & PAGE_MASK;
136
        size = size & PAGE_MASK;
137
 
138
        /* Does the address range wrap, or is the span zero-sized? */
139
        if (start + size <= start)
140
                return err;
141
 
142
        /* Can we represent this offset inside this architecture's pte's? */
143
#if PTE_FILE_MAX_BITS < BITS_PER_LONG
144
        if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
145
                return err;
146
#endif
147
 
148
        /* We need down_write() to change vma->vm_flags. */
149
        down_read(&mm->mmap_sem);
150
 retry:
151
        vma = find_vma(mm, start);
152
 
153
        /*
154
         * Make sure the vma is shared, that it supports prefaulting,
155
         * and that the remapped range is valid and fully within
156
         * the single existing vma.  vm_private_data is used as a
157
         * swapout cursor in a VM_NONLINEAR vma.
158
         */
159
        if (!vma || !(vma->vm_flags & VM_SHARED))
160
                goto out;
161
 
162
        if (vma->vm_private_data && !(vma->vm_flags & VM_NONLINEAR))
163
                goto out;
164
 
165
        if (!(vma->vm_flags & VM_CAN_NONLINEAR))
166
                goto out;
167
 
168
        if (end <= start || start < vma->vm_start || end > vma->vm_end)
169
                goto out;
170
 
171
        /* Must set VM_NONLINEAR before any pages are populated. */
172
        if (!(vma->vm_flags & VM_NONLINEAR)) {
173
                /* Don't need a nonlinear mapping, exit success */
174
                if (pgoff == linear_page_index(vma, start)) {
175
                        err = 0;
176
                        goto out;
177
                }
178
 
179
                if (!has_write_lock) {
180
                        up_read(&mm->mmap_sem);
181
                        down_write(&mm->mmap_sem);
182
                        has_write_lock = 1;
183
                        goto retry;
184
                }
185
                mapping = vma->vm_file->f_mapping;
186
                /*
187
                 * page_mkclean doesn't work on nonlinear vmas, so if
188
                 * dirty pages need to be accounted, emulate with linear
189
                 * vmas.
190
                 */
191
                if (mapping_cap_account_dirty(mapping)) {
192
                        unsigned long addr;
193
 
194
                        flags &= MAP_NONBLOCK;
195
                        addr = mmap_region(vma->vm_file, start, size,
196
                                        flags, vma->vm_flags, pgoff, 1);
197
                        if (IS_ERR_VALUE(addr)) {
198
                                err = addr;
199
                        } else {
200
                                BUG_ON(addr != start);
201
                                err = 0;
202
                        }
203
                        goto out;
204
                }
205
                spin_lock(&mapping->i_mmap_lock);
206
                flush_dcache_mmap_lock(mapping);
207
                vma->vm_flags |= VM_NONLINEAR;
208
                vma_prio_tree_remove(vma, &mapping->i_mmap);
209
                vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
210
                flush_dcache_mmap_unlock(mapping);
211
                spin_unlock(&mapping->i_mmap_lock);
212
        }
213
 
214
        err = populate_range(mm, vma, start, size, pgoff);
215
        if (!err && !(flags & MAP_NONBLOCK)) {
216
                if (unlikely(has_write_lock)) {
217
                        downgrade_write(&mm->mmap_sem);
218
                        has_write_lock = 0;
219
                }
220
                make_pages_present(start, start+size);
221
        }
222
 
223
        /*
224
         * We can't clear VM_NONLINEAR because we'd have to do
225
         * it after ->populate completes, and that would prevent
226
         * downgrading the lock.  (Locks can't be upgraded).
227
         */
228
 
229
out:
230
        if (likely(!has_write_lock))
231
                up_read(&mm->mmap_sem);
232
        else
233
                up_write(&mm->mmap_sem);
234
 
235
        return err;
236
}
237
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.