OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [mm/] [mlock.c] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1634 jcastillo
/*
2
 *      linux/mm/mlock.c
3
 *
4
 *  (C) Copyright 1995 Linus Torvalds
5
 */
6
#include <linux/stat.h>
7
#include <linux/sched.h>
8
#include <linux/kernel.h>
9
#include <linux/mm.h>
10
#include <linux/shm.h>
11
#include <linux/errno.h>
12
#include <linux/mman.h>
13
#include <linux/string.h>
14
#include <linux/malloc.h>
15
 
16
#include <asm/segment.h>
17
#include <asm/system.h>
18
#include <asm/pgtable.h>
19
 
20
static inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags)
21
{
22
        vma->vm_flags = newflags;
23
        return 0;
24
}
25
 
26
static inline int mlock_fixup_start(struct vm_area_struct * vma,
27
        unsigned long end, int newflags)
28
{
29
        struct vm_area_struct * n;
30
 
31
        n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
32
        if (!n)
33
                return -EAGAIN;
34
        *n = *vma;
35
        vma->vm_start = end;
36
        n->vm_end = end;
37
        vma->vm_offset += vma->vm_start - n->vm_start;
38
        n->vm_flags = newflags;
39
        if (n->vm_inode)
40
                n->vm_inode->i_count++;
41
        if (n->vm_ops && n->vm_ops->open)
42
                n->vm_ops->open(n);
43
        insert_vm_struct(current->mm, n);
44
        return 0;
45
}
46
 
47
static inline int mlock_fixup_end(struct vm_area_struct * vma,
48
        unsigned long start, int newflags)
49
{
50
        struct vm_area_struct * n;
51
 
52
        n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
53
        if (!n)
54
                return -EAGAIN;
55
        *n = *vma;
56
        vma->vm_end = start;
57
        n->vm_start = start;
58
        n->vm_offset += n->vm_start - vma->vm_start;
59
        n->vm_flags = newflags;
60
        if (n->vm_inode)
61
                n->vm_inode->i_count++;
62
        if (n->vm_ops && n->vm_ops->open)
63
                n->vm_ops->open(n);
64
        insert_vm_struct(current->mm, n);
65
        return 0;
66
}
67
 
68
static inline int mlock_fixup_middle(struct vm_area_struct * vma,
69
        unsigned long start, unsigned long end, int newflags)
70
{
71
        struct vm_area_struct * left, * right;
72
 
73
        left = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
74
        if (!left)
75
                return -EAGAIN;
76
        right = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
77
        if (!right) {
78
                kfree(left);
79
                return -EAGAIN;
80
        }
81
        *left = *vma;
82
        *right = *vma;
83
        left->vm_end = start;
84
        vma->vm_start = start;
85
        vma->vm_end = end;
86
        right->vm_start = end;
87
        vma->vm_offset += vma->vm_start - left->vm_start;
88
        right->vm_offset += right->vm_start - left->vm_start;
89
        vma->vm_flags = newflags;
90
        if (vma->vm_inode)
91
                vma->vm_inode->i_count += 2;
92
        if (vma->vm_ops && vma->vm_ops->open) {
93
                vma->vm_ops->open(left);
94
                vma->vm_ops->open(right);
95
        }
96
        insert_vm_struct(current->mm, left);
97
        insert_vm_struct(current->mm, right);
98
        return 0;
99
}
100
 
101
static int mlock_fixup(struct vm_area_struct * vma,
102
        unsigned long start, unsigned long end, unsigned int newflags)
103
{
104
        int pages, retval;
105
 
106
        if (newflags == vma->vm_flags)
107
                return 0;
108
 
109
        if (start == vma->vm_start) {
110
                if (end == vma->vm_end)
111
                        retval = mlock_fixup_all(vma, newflags);
112
                else
113
                        retval = mlock_fixup_start(vma, end, newflags);
114
        } else {
115
                if (end == vma->vm_end)
116
                        retval = mlock_fixup_end(vma, start, newflags);
117
                else
118
                        retval = mlock_fixup_middle(vma, start, end, newflags);
119
        }
120
        if (!retval) {
121
                /* keep track of amount of locked VM */
122
                pages = (end - start) >> PAGE_SHIFT;
123
                if (!(newflags & VM_LOCKED))
124
                        pages = -pages;
125
                vma->vm_mm->locked_vm += pages;
126
 
127
                if ((newflags & VM_LOCKED) && (newflags & VM_READ))
128
                        while (start < end) {
129
                                int c = get_user((int *) start);
130
                                __asm__ __volatile__("": :"r" (c));
131
                                start += PAGE_SIZE;
132
                        }
133
        }
134
        return retval;
135
}
136
 
137
static int do_mlock(unsigned long start, size_t len, int on)
138
{
139
        unsigned long nstart, end, tmp;
140
        struct vm_area_struct * vma, * next;
141
        int error;
142
 
143
        if (!suser())
144
                return -EPERM;
145
        len = (len + ~PAGE_MASK) & PAGE_MASK;
146
        end = start + len;
147
        if (end < start)
148
                return -EINVAL;
149
        if (end == start)
150
                return 0;
151
        vma = find_vma(current->mm, start);
152
        if (!vma || vma->vm_start > start)
153
                return -ENOMEM;
154
 
155
        for (nstart = start ; ; ) {
156
                unsigned int newflags;
157
 
158
                /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
159
 
160
                newflags = vma->vm_flags | VM_LOCKED;
161
                if (!on)
162
                        newflags &= ~VM_LOCKED;
163
 
164
                if (vma->vm_end >= end) {
165
                        error = mlock_fixup(vma, nstart, end, newflags);
166
                        break;
167
                }
168
 
169
                tmp = vma->vm_end;
170
                next = vma->vm_next;
171
                error = mlock_fixup(vma, nstart, tmp, newflags);
172
                if (error)
173
                        break;
174
                nstart = tmp;
175
                vma = next;
176
                if (!vma || vma->vm_start != nstart) {
177
                        error = -ENOMEM;
178
                        break;
179
                }
180
        }
181
        merge_segments(current->mm, start, end);
182
        return error;
183
}
184
 
185
asmlinkage int sys_mlock(unsigned long start, size_t len)
186
{
187
        unsigned long locked;
188
        unsigned long lock_limit;
189
 
190
        len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
191
        start &= PAGE_MASK;
192
 
193
        locked = len >> PAGE_SHIFT;
194
        locked += current->mm->locked_vm;
195
 
196
        lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
197
        lock_limit >>= PAGE_SHIFT;
198
 
199
        /* check against resource limits */
200
        if (locked > lock_limit)
201
                return -ENOMEM;
202
 
203
        /* we may lock at most half of physical memory... */
204
        /* (this check is pretty bogus, but doesn't hurt) */
205
        if (locked > (MAP_NR(high_memory) >> 1))
206
                return -ENOMEM;
207
 
208
        return do_mlock(start, len, 1);
209
}
210
 
211
asmlinkage int sys_munlock(unsigned long start, size_t len)
212
{
213
        len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
214
        start &= PAGE_MASK;
215
        return do_mlock(start, len, 0);
216
}
217
 
218
static int do_mlockall(int flags)
219
{
220
        int error;
221
        unsigned int def_flags;
222
        struct vm_area_struct * vma;
223
 
224
        if (!suser())
225
                return -EPERM;
226
 
227
        def_flags = 0;
228
        if (flags & MCL_FUTURE)
229
                def_flags = VM_LOCKED;
230
        current->mm->def_flags = def_flags;
231
 
232
        error = 0;
233
        for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
234
                unsigned int newflags;
235
 
236
                newflags = vma->vm_flags | VM_LOCKED;
237
                if (!(flags & MCL_CURRENT))
238
                        newflags &= ~VM_LOCKED;
239
                error = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
240
                if (error)
241
                        break;
242
        }
243
        merge_segments(current->mm, 0, TASK_SIZE);
244
        return error;
245
}
246
 
247
asmlinkage int sys_mlockall(int flags)
248
{
249
        unsigned long lock_limit;
250
 
251
        if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
252
                return -EINVAL;
253
 
254
        lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
255
        lock_limit >>= PAGE_SHIFT;
256
 
257
        if (current->mm->total_vm > lock_limit)
258
                return -ENOMEM;
259
 
260
        /* we may lock at most half of physical memory... */
261
        /* (this check is pretty bogus, but doesn't hurt) */
262
        if (current->mm->total_vm > (MAP_NR(high_memory) >> 1))
263
                return -ENOMEM;
264
 
265
        return do_mlockall(flags);
266
}
267
 
268
asmlinkage int sys_munlockall(void)
269
{
270
        return do_mlockall(0);
271
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.