OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [mm/] [mremap.c] - Blame information for rev 1765

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1634 jcastillo
/*
2
 *      linux/mm/remap.c
3
 *
4
 *      (C) Copyright 1996 Linus Torvalds
5
 */
6
 
7
#include <linux/stat.h>
8
#include <linux/sched.h>
9
#include <linux/kernel.h>
10
#include <linux/mm.h>
11
#include <linux/shm.h>
12
#include <linux/errno.h>
13
#include <linux/mman.h>
14
#include <linux/string.h>
15
#include <linux/malloc.h>
16
#include <linux/swap.h>
17
 
18
#include <asm/segment.h>
19
#include <asm/system.h>
20
#include <asm/pgtable.h>
21
 
22
extern int vm_enough_memory(long pages);
23
 
24
static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
25
{
26
        pgd_t * pgd;
27
        pmd_t * pmd;
28
        pte_t * pte = NULL;
29
 
30
        pgd = pgd_offset(mm, addr);
31
        if (pgd_none(*pgd))
32
                goto end;
33
        if (pgd_bad(*pgd)) {
34
                printk("move_one_page: bad source pgd (%08lx)\n", pgd_val(*pgd));
35
                pgd_clear(pgd);
36
                goto end;
37
        }
38
 
39
        pmd = pmd_offset(pgd, addr);
40
        if (pmd_none(*pmd))
41
                goto end;
42
        if (pmd_bad(*pmd)) {
43
                printk("move_one_page: bad source pmd (%08lx)\n", pmd_val(*pmd));
44
                pmd_clear(pmd);
45
                goto end;
46
        }
47
 
48
        pte = pte_offset(pmd, addr);
49
        if (pte_none(*pte))
50
                pte = NULL;
51
end:
52
        return pte;
53
}
54
 
55
static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr)
56
{
57
        pmd_t * pmd;
58
        pte_t * pte = NULL;
59
 
60
        pmd = pmd_alloc(pgd_offset(mm, addr), addr);
61
        if (pmd)
62
                pte = pte_alloc(pmd, addr);
63
        return pte;
64
}
65
 
66
static inline int copy_one_pte(pte_t * src, pte_t * dst)
67
{
68
        int error = 0;
69
        pte_t pte = *src;
70
 
71
        if (!pte_none(pte)) {
72
                error++;
73
                if (dst) {
74
                        pte_clear(src);
75
                        set_pte(dst, pte);
76
                        error--;
77
                }
78
        }
79
        return error;
80
}
81
 
82
static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr)
83
{
84
        int error = 0;
85
        pte_t * src;
86
 
87
        src = get_one_pte(mm, old_addr);
88
        if (src)
89
                error = copy_one_pte(src, alloc_one_pte(mm, new_addr));
90
        return error;
91
}
92
 
93
static int move_page_tables(struct mm_struct * mm,
94
        unsigned long new_addr, unsigned long old_addr, unsigned long len)
95
{
96
        unsigned long offset = len;
97
 
98
        flush_cache_range(mm, old_addr, old_addr + len);
99
        flush_tlb_range(mm, old_addr, old_addr + len);
100
 
101
        /*
102
         * This is not the clever way to do this, but we're taking the
103
         * easy way out on the assumption that most remappings will be
104
         * only a few pages.. This also makes error recovery easier.
105
         */
106
        while (offset) {
107
                offset -= PAGE_SIZE;
108
                if (move_one_page(mm, old_addr + offset, new_addr + offset))
109
                        goto oops_we_failed;
110
        }
111
        return 0;
112
 
113
        /*
114
         * Ok, the move failed because we didn't have enough pages for
115
         * the new page table tree. This is unlikely, but we have to
116
         * take the possibility into account. In that case we just move
117
         * all the pages back (this will work, because we still have
118
         * the old page tables)
119
         */
120
oops_we_failed:
121
        flush_cache_range(mm, new_addr, new_addr + len);
122
        while ((offset += PAGE_SIZE) < len)
123
                move_one_page(mm, new_addr + offset, old_addr + offset);
124
        flush_tlb_range(mm, new_addr, new_addr + len);
125
        zap_page_range(mm, new_addr, new_addr + len);
126
        return -1;
127
}
128
 
129
static inline unsigned long move_vma(struct vm_area_struct * vma,
130
        unsigned long addr, unsigned long old_len, unsigned long new_len)
131
{
132
        struct vm_area_struct * new_vma;
133
 
134
        new_vma = (struct vm_area_struct *)
135
                kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
136
        if (new_vma) {
137
                unsigned long new_addr = get_unmapped_area(addr, new_len);
138
 
139
                if (new_addr && !move_page_tables(current->mm, new_addr, addr, old_len)) {
140
                        *new_vma = *vma;
141
                        new_vma->vm_start = new_addr;
142
                        new_vma->vm_end = new_addr+new_len;
143
                        new_vma->vm_offset = vma->vm_offset + (addr - vma->vm_start);
144
                        if (new_vma->vm_inode)
145
                                new_vma->vm_inode->i_count++;
146
                        if (new_vma->vm_ops && new_vma->vm_ops->open)
147
                                new_vma->vm_ops->open(new_vma);
148
                        insert_vm_struct(current->mm, new_vma);
149
                        merge_segments(current->mm, new_vma->vm_start, new_vma->vm_end);
150
                        do_munmap(addr, old_len);
151
                        current->mm->total_vm += new_len >> PAGE_SHIFT;
152
                        return new_addr;
153
                }
154
                kfree(new_vma);
155
        }
156
        return -ENOMEM;
157
}
158
 
159
/*
160
 * Expand (or shrink) an existing mapping, potentially moving it at the
161
 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
162
 */
163
asmlinkage unsigned long sys_mremap(unsigned long addr,
164
        unsigned long old_len, unsigned long new_len,
165
        unsigned long flags)
166
{
167
        struct vm_area_struct *vma;
168
 
169
        if (addr & ~PAGE_MASK)
170
                return -EINVAL;
171
        old_len = PAGE_ALIGN(old_len);
172
        new_len = PAGE_ALIGN(new_len);
173
 
174
        /*
175
         * Always allow a shrinking remap: that just unmaps
176
         * the unnecessary pages..
177
         */
178
        if (old_len >= new_len) {
179
                do_munmap(addr+new_len, old_len - new_len);
180
                return addr;
181
        }
182
 
183
        /*
184
         * Ok, we need to grow..
185
         */
186
        vma = find_vma(current->mm, addr);
187
        if (!vma || vma->vm_start > addr)
188
                return -EFAULT;
189
        /* We can't remap across vm area boundaries */
190
        if (old_len > vma->vm_end - addr)
191
                return -EFAULT;
192
        if (vma->vm_flags & VM_LOCKED) {
193
                unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
194
                locked += new_len - old_len;
195
                if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
196
                        return -EAGAIN;
197
        }
198
        if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
199
            > current->rlim[RLIMIT_AS].rlim_cur)
200
                return -ENOMEM;
201
        /* Private writable mapping? Check memory availability.. */
202
        if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE) {
203
                if (!vm_enough_memory((new_len - old_len) >> PAGE_SHIFT))
204
                        return -ENOMEM;
205
        }
206
 
207
        /* old_len exactly to the end of the area.. */
208
        if (old_len == vma->vm_end - addr &&
209
            (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
210
                unsigned long max_addr = MAX_USER_ADDR;
211
                if (vma->vm_next)
212
                        max_addr = vma->vm_next->vm_start;
213
                /* can we just expand the current mapping? */
214
                if (max_addr - addr >= new_len) {
215
                        int pages = (new_len - old_len) >> PAGE_SHIFT;
216
                        vma->vm_end = addr + new_len;
217
                        current->mm->total_vm += pages;
218
                        if (vma->vm_flags & VM_LOCKED)
219
                                current->mm->locked_vm += pages;
220
                        return addr;
221
                }
222
        }
223
 
224
        /*
225
         * We weren't able to just expand or shrink the area,
226
         * we need to create a new one and move it..
227
         */
228
        if (flags & MREMAP_MAYMOVE)
229
                return move_vma(vma, addr, old_len, new_len);
230
        return -ENOMEM;
231
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.