OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [tags/] [linux-2.6/] [linux-2.6.24_or32_unified_v2.3/] [mm/] [mremap.c] - Blame information for rev 18

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 *      mm/mremap.c
3
 *
4
 *      (C) Copyright 1996 Linus Torvalds
5
 *
6
 *      Address space accounting code   <alan@redhat.com>
7
 *      (C) Copyright 2002 Red Hat Inc, All Rights Reserved
8
 */
9
 
10
#include <linux/mm.h>
11
#include <linux/hugetlb.h>
12
#include <linux/slab.h>
13
#include <linux/shm.h>
14
#include <linux/mman.h>
15
#include <linux/swap.h>
16
#include <linux/capability.h>
17
#include <linux/fs.h>
18
#include <linux/highmem.h>
19
#include <linux/security.h>
20
#include <linux/syscalls.h>
21
 
22
#include <asm/uaccess.h>
23
#include <asm/cacheflush.h>
24
#include <asm/tlbflush.h>
25
 
26
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
27
{
28
        pgd_t *pgd;
29
        pud_t *pud;
30
        pmd_t *pmd;
31
 
32
        pgd = pgd_offset(mm, addr);
33
        if (pgd_none_or_clear_bad(pgd))
34
                return NULL;
35
 
36
        pud = pud_offset(pgd, addr);
37
        if (pud_none_or_clear_bad(pud))
38
                return NULL;
39
 
40
        pmd = pmd_offset(pud, addr);
41
        if (pmd_none_or_clear_bad(pmd))
42
                return NULL;
43
 
44
        return pmd;
45
}
46
 
47
static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
48
{
49
        pgd_t *pgd;
50
        pud_t *pud;
51
        pmd_t *pmd;
52
 
53
        pgd = pgd_offset(mm, addr);
54
        pud = pud_alloc(mm, pgd, addr);
55
        if (!pud)
56
                return NULL;
57
 
58
        pmd = pmd_alloc(mm, pud, addr);
59
        if (!pmd)
60
                return NULL;
61
 
62
        if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
63
                return NULL;
64
 
65
        return pmd;
66
}
67
 
68
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
69
                unsigned long old_addr, unsigned long old_end,
70
                struct vm_area_struct *new_vma, pmd_t *new_pmd,
71
                unsigned long new_addr)
72
{
73
        struct address_space *mapping = NULL;
74
        struct mm_struct *mm = vma->vm_mm;
75
        pte_t *old_pte, *new_pte, pte;
76
        spinlock_t *old_ptl, *new_ptl;
77
 
78
        if (vma->vm_file) {
79
                /*
80
                 * Subtle point from Rajesh Venkatasubramanian: before
81
                 * moving file-based ptes, we must lock vmtruncate out,
82
                 * since it might clean the dst vma before the src vma,
83
                 * and we propagate stale pages into the dst afterward.
84
                 */
85
                mapping = vma->vm_file->f_mapping;
86
                spin_lock(&mapping->i_mmap_lock);
87
                if (new_vma->vm_truncate_count &&
88
                    new_vma->vm_truncate_count != vma->vm_truncate_count)
89
                        new_vma->vm_truncate_count = 0;
90
        }
91
 
92
        /*
93
         * We don't have to worry about the ordering of src and dst
94
         * pte locks because exclusive mmap_sem prevents deadlock.
95
         */
96
        old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
97
        new_pte = pte_offset_map_nested(new_pmd, new_addr);
98
        new_ptl = pte_lockptr(mm, new_pmd);
99
        if (new_ptl != old_ptl)
100
                spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
101
        arch_enter_lazy_mmu_mode();
102
 
103
        for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
104
                                   new_pte++, new_addr += PAGE_SIZE) {
105
                if (pte_none(*old_pte))
106
                        continue;
107
                pte = ptep_clear_flush(vma, old_addr, old_pte);
108
                pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
109
                set_pte_at(mm, new_addr, new_pte, pte);
110
        }
111
 
112
        arch_leave_lazy_mmu_mode();
113
        if (new_ptl != old_ptl)
114
                spin_unlock(new_ptl);
115
        pte_unmap_nested(new_pte - 1);
116
        pte_unmap_unlock(old_pte - 1, old_ptl);
117
        if (mapping)
118
                spin_unlock(&mapping->i_mmap_lock);
119
}
120
 
121
#define LATENCY_LIMIT   (64 * PAGE_SIZE)
122
 
123
unsigned long move_page_tables(struct vm_area_struct *vma,
124
                unsigned long old_addr, struct vm_area_struct *new_vma,
125
                unsigned long new_addr, unsigned long len)
126
{
127
        unsigned long extent, next, old_end;
128
        pmd_t *old_pmd, *new_pmd;
129
 
130
        old_end = old_addr + len;
131
        flush_cache_range(vma, old_addr, old_end);
132
 
133
        for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
134
                cond_resched();
135
                next = (old_addr + PMD_SIZE) & PMD_MASK;
136
                if (next - 1 > old_end)
137
                        next = old_end;
138
                extent = next - old_addr;
139
                old_pmd = get_old_pmd(vma->vm_mm, old_addr);
140
                if (!old_pmd)
141
                        continue;
142
                new_pmd = alloc_new_pmd(vma->vm_mm, new_addr);
143
                if (!new_pmd)
144
                        break;
145
                next = (new_addr + PMD_SIZE) & PMD_MASK;
146
                if (extent > next - new_addr)
147
                        extent = next - new_addr;
148
                if (extent > LATENCY_LIMIT)
149
                        extent = LATENCY_LIMIT;
150
                move_ptes(vma, old_pmd, old_addr, old_addr + extent,
151
                                new_vma, new_pmd, new_addr);
152
        }
153
 
154
        return len + old_addr - old_end;        /* how much done */
155
}
156
 
157
static unsigned long move_vma(struct vm_area_struct *vma,
158
                unsigned long old_addr, unsigned long old_len,
159
                unsigned long new_len, unsigned long new_addr)
160
{
161
        struct mm_struct *mm = vma->vm_mm;
162
        struct vm_area_struct *new_vma;
163
        unsigned long vm_flags = vma->vm_flags;
164
        unsigned long new_pgoff;
165
        unsigned long moved_len;
166
        unsigned long excess = 0;
167
        unsigned long hiwater_vm;
168
        int split = 0;
169
 
170
        /*
171
         * We'd prefer to avoid failure later on in do_munmap:
172
         * which may split one vma into three before unmapping.
173
         */
174
        if (mm->map_count >= sysctl_max_map_count - 3)
175
                return -ENOMEM;
176
 
177
        new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
178
        new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff);
179
        if (!new_vma)
180
                return -ENOMEM;
181
 
182
        moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len);
183
        if (moved_len < old_len) {
184
                /*
185
                 * On error, move entries back from new area to old,
186
                 * which will succeed since page tables still there,
187
                 * and then proceed to unmap new area instead of old.
188
                 */
189
                move_page_tables(new_vma, new_addr, vma, old_addr, moved_len);
190
                vma = new_vma;
191
                old_len = new_len;
192
                old_addr = new_addr;
193
                new_addr = -ENOMEM;
194
        }
195
 
196
        /* Conceal VM_ACCOUNT so old reservation is not undone */
197
        if (vm_flags & VM_ACCOUNT) {
198
                vma->vm_flags &= ~VM_ACCOUNT;
199
                excess = vma->vm_end - vma->vm_start - old_len;
200
                if (old_addr > vma->vm_start &&
201
                    old_addr + old_len < vma->vm_end)
202
                        split = 1;
203
        }
204
 
205
        /*
206
         * If we failed to move page tables we still do total_vm increment
207
         * since do_munmap() will decrement it by old_len == new_len.
208
         *
209
         * Since total_vm is about to be raised artificially high for a
210
         * moment, we need to restore high watermark afterwards: if stats
211
         * are taken meanwhile, total_vm and hiwater_vm appear too high.
212
         * If this were a serious issue, we'd add a flag to do_munmap().
213
         */
214
        hiwater_vm = mm->hiwater_vm;
215
        mm->total_vm += new_len >> PAGE_SHIFT;
216
        vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
217
 
218
        if (do_munmap(mm, old_addr, old_len) < 0) {
219
                /* OOM: unable to split vma, just get accounts right */
220
                vm_unacct_memory(excess >> PAGE_SHIFT);
221
                excess = 0;
222
        }
223
        mm->hiwater_vm = hiwater_vm;
224
 
225
        /* Restore VM_ACCOUNT if one or two pieces of vma left */
226
        if (excess) {
227
                vma->vm_flags |= VM_ACCOUNT;
228
                if (split)
229
                        vma->vm_next->vm_flags |= VM_ACCOUNT;
230
        }
231
 
232
        if (vm_flags & VM_LOCKED) {
233
                mm->locked_vm += new_len >> PAGE_SHIFT;
234
                if (new_len > old_len)
235
                        make_pages_present(new_addr + old_len,
236
                                           new_addr + new_len);
237
        }
238
 
239
        return new_addr;
240
}
241
 
242
/*
243
 * Expand (or shrink) an existing mapping, potentially moving it at the
244
 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
245
 *
246
 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
247
 * This option implies MREMAP_MAYMOVE.
248
 */
249
unsigned long do_mremap(unsigned long addr,
250
        unsigned long old_len, unsigned long new_len,
251
        unsigned long flags, unsigned long new_addr)
252
{
253
        struct mm_struct *mm = current->mm;
254
        struct vm_area_struct *vma;
255
        unsigned long ret = -EINVAL;
256
        unsigned long charged = 0;
257
 
258
        if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
259
                goto out;
260
 
261
        if (addr & ~PAGE_MASK)
262
                goto out;
263
 
264
        old_len = PAGE_ALIGN(old_len);
265
        new_len = PAGE_ALIGN(new_len);
266
 
267
        /*
268
         * We allow a zero old-len as a special case
269
         * for DOS-emu "duplicate shm area" thing. But
270
         * a zero new-len is nonsensical.
271
         */
272
        if (!new_len)
273
                goto out;
274
 
275
        /* new_addr is only valid if MREMAP_FIXED is specified */
276
        if (flags & MREMAP_FIXED) {
277
                if (new_addr & ~PAGE_MASK)
278
                        goto out;
279
                if (!(flags & MREMAP_MAYMOVE))
280
                        goto out;
281
 
282
                if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
283
                        goto out;
284
 
285
                /* Check if the location we're moving into overlaps the
286
                 * old location at all, and fail if it does.
287
                 */
288
                if ((new_addr <= addr) && (new_addr+new_len) > addr)
289
                        goto out;
290
 
291
                if ((addr <= new_addr) && (addr+old_len) > new_addr)
292
                        goto out;
293
 
294
                ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
295
                if (ret)
296
                        goto out;
297
 
298
                ret = do_munmap(mm, new_addr, new_len);
299
                if (ret)
300
                        goto out;
301
        }
302
 
303
        /*
304
         * Always allow a shrinking remap: that just unmaps
305
         * the unnecessary pages..
306
         * do_munmap does all the needed commit accounting
307
         */
308
        if (old_len >= new_len) {
309
                ret = do_munmap(mm, addr+new_len, old_len - new_len);
310
                if (ret && old_len != new_len)
311
                        goto out;
312
                ret = addr;
313
                if (!(flags & MREMAP_FIXED) || (new_addr == addr))
314
                        goto out;
315
                old_len = new_len;
316
        }
317
 
318
        /*
319
         * Ok, we need to grow..  or relocate.
320
         */
321
        ret = -EFAULT;
322
        vma = find_vma(mm, addr);
323
        if (!vma || vma->vm_start > addr)
324
                goto out;
325
        if (is_vm_hugetlb_page(vma)) {
326
                ret = -EINVAL;
327
                goto out;
328
        }
329
        /* We can't remap across vm area boundaries */
330
        if (old_len > vma->vm_end - addr)
331
                goto out;
332
        if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
333
                if (new_len > old_len)
334
                        goto out;
335
        }
336
        if (vma->vm_flags & VM_LOCKED) {
337
                unsigned long locked, lock_limit;
338
                locked = mm->locked_vm << PAGE_SHIFT;
339
                lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
340
                locked += new_len - old_len;
341
                ret = -EAGAIN;
342
                if (locked > lock_limit && !capable(CAP_IPC_LOCK))
343
                        goto out;
344
        }
345
        if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) {
346
                ret = -ENOMEM;
347
                goto out;
348
        }
349
 
350
        if (vma->vm_flags & VM_ACCOUNT) {
351
                charged = (new_len - old_len) >> PAGE_SHIFT;
352
                if (security_vm_enough_memory(charged))
353
                        goto out_nc;
354
        }
355
 
356
        /* old_len exactly to the end of the area..
357
         * And we're not relocating the area.
358
         */
359
        if (old_len == vma->vm_end - addr &&
360
            !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
361
            (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
362
                unsigned long max_addr = TASK_SIZE;
363
                if (vma->vm_next)
364
                        max_addr = vma->vm_next->vm_start;
365
                /* can we just expand the current mapping? */
366
                if (max_addr - addr >= new_len) {
367
                        int pages = (new_len - old_len) >> PAGE_SHIFT;
368
 
369
                        vma_adjust(vma, vma->vm_start,
370
                                addr + new_len, vma->vm_pgoff, NULL);
371
 
372
                        mm->total_vm += pages;
373
                        vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
374
                        if (vma->vm_flags & VM_LOCKED) {
375
                                mm->locked_vm += pages;
376
                                make_pages_present(addr + old_len,
377
                                                   addr + new_len);
378
                        }
379
                        ret = addr;
380
                        goto out;
381
                }
382
        }
383
 
384
        /*
385
         * We weren't able to just expand or shrink the area,
386
         * we need to create a new one and move it..
387
         */
388
        ret = -ENOMEM;
389
        if (flags & MREMAP_MAYMOVE) {
390
                if (!(flags & MREMAP_FIXED)) {
391
                        unsigned long map_flags = 0;
392
                        if (vma->vm_flags & VM_MAYSHARE)
393
                                map_flags |= MAP_SHARED;
394
 
395
                        new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
396
                                                vma->vm_pgoff, map_flags);
397
                        if (new_addr & ~PAGE_MASK) {
398
                                ret = new_addr;
399
                                goto out;
400
                        }
401
 
402
                        ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
403
                        if (ret)
404
                                goto out;
405
                }
406
                ret = move_vma(vma, addr, old_len, new_len, new_addr);
407
        }
408
out:
409
        if (ret & ~PAGE_MASK)
410
                vm_unacct_memory(charged);
411
out_nc:
412
        return ret;
413
}
414
 
415
asmlinkage unsigned long sys_mremap(unsigned long addr,
416
        unsigned long old_len, unsigned long new_len,
417
        unsigned long flags, unsigned long new_addr)
418
{
419
        unsigned long ret;
420
 
421
        down_write(&current->mm->mmap_sem);
422
        ret = do_mremap(addr, old_len, new_len, flags, new_addr);
423
        up_write(&current->mm->mmap_sem);
424
        return ret;
425
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.