/*
|
/*
|
* linux/mm/mlock.c
|
* linux/mm/mlock.c
|
*
|
*
|
* (C) Copyright 1995 Linus Torvalds
|
* (C) Copyright 1995 Linus Torvalds
|
*/
|
*/
|
#include <linux/stat.h>
|
#include <linux/stat.h>
|
#include <linux/sched.h>
|
#include <linux/sched.h>
|
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
#include <linux/mm.h>
|
#include <linux/mm.h>
|
#include <linux/shm.h>
|
#include <linux/shm.h>
|
#include <linux/errno.h>
|
#include <linux/errno.h>
|
#include <linux/mman.h>
|
#include <linux/mman.h>
|
#include <linux/string.h>
|
#include <linux/string.h>
|
#include <linux/malloc.h>
|
#include <linux/malloc.h>
|
|
|
#include <asm/segment.h>
|
#include <asm/segment.h>
|
#include <asm/system.h>
|
#include <asm/system.h>
|
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
|
|
static inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags)
|
static inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags)
|
{
|
{
|
vma->vm_flags = newflags;
|
vma->vm_flags = newflags;
|
return 0;
|
return 0;
|
}
|
}
|
|
|
static inline int mlock_fixup_start(struct vm_area_struct * vma,
|
static inline int mlock_fixup_start(struct vm_area_struct * vma,
|
unsigned long end, int newflags)
|
unsigned long end, int newflags)
|
{
|
{
|
struct vm_area_struct * n;
|
struct vm_area_struct * n;
|
|
|
n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
if (!n)
|
if (!n)
|
return -EAGAIN;
|
return -EAGAIN;
|
*n = *vma;
|
*n = *vma;
|
vma->vm_start = end;
|
vma->vm_start = end;
|
n->vm_end = end;
|
n->vm_end = end;
|
vma->vm_offset += vma->vm_start - n->vm_start;
|
vma->vm_offset += vma->vm_start - n->vm_start;
|
n->vm_flags = newflags;
|
n->vm_flags = newflags;
|
if (n->vm_inode)
|
if (n->vm_inode)
|
n->vm_inode->i_count++;
|
n->vm_inode->i_count++;
|
if (n->vm_ops && n->vm_ops->open)
|
if (n->vm_ops && n->vm_ops->open)
|
n->vm_ops->open(n);
|
n->vm_ops->open(n);
|
insert_vm_struct(current->mm, n);
|
insert_vm_struct(current->mm, n);
|
return 0;
|
return 0;
|
}
|
}
|
|
|
static inline int mlock_fixup_end(struct vm_area_struct * vma,
|
static inline int mlock_fixup_end(struct vm_area_struct * vma,
|
unsigned long start, int newflags)
|
unsigned long start, int newflags)
|
{
|
{
|
struct vm_area_struct * n;
|
struct vm_area_struct * n;
|
|
|
n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
if (!n)
|
if (!n)
|
return -EAGAIN;
|
return -EAGAIN;
|
*n = *vma;
|
*n = *vma;
|
vma->vm_end = start;
|
vma->vm_end = start;
|
n->vm_start = start;
|
n->vm_start = start;
|
n->vm_offset += n->vm_start - vma->vm_start;
|
n->vm_offset += n->vm_start - vma->vm_start;
|
n->vm_flags = newflags;
|
n->vm_flags = newflags;
|
if (n->vm_inode)
|
if (n->vm_inode)
|
n->vm_inode->i_count++;
|
n->vm_inode->i_count++;
|
if (n->vm_ops && n->vm_ops->open)
|
if (n->vm_ops && n->vm_ops->open)
|
n->vm_ops->open(n);
|
n->vm_ops->open(n);
|
insert_vm_struct(current->mm, n);
|
insert_vm_struct(current->mm, n);
|
return 0;
|
return 0;
|
}
|
}
|
|
|
static inline int mlock_fixup_middle(struct vm_area_struct * vma,
|
static inline int mlock_fixup_middle(struct vm_area_struct * vma,
|
unsigned long start, unsigned long end, int newflags)
|
unsigned long start, unsigned long end, int newflags)
|
{
|
{
|
struct vm_area_struct * left, * right;
|
struct vm_area_struct * left, * right;
|
|
|
left = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
left = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
if (!left)
|
if (!left)
|
return -EAGAIN;
|
return -EAGAIN;
|
right = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
right = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
if (!right) {
|
if (!right) {
|
kfree(left);
|
kfree(left);
|
return -EAGAIN;
|
return -EAGAIN;
|
}
|
}
|
*left = *vma;
|
*left = *vma;
|
*right = *vma;
|
*right = *vma;
|
left->vm_end = start;
|
left->vm_end = start;
|
vma->vm_start = start;
|
vma->vm_start = start;
|
vma->vm_end = end;
|
vma->vm_end = end;
|
right->vm_start = end;
|
right->vm_start = end;
|
vma->vm_offset += vma->vm_start - left->vm_start;
|
vma->vm_offset += vma->vm_start - left->vm_start;
|
right->vm_offset += right->vm_start - left->vm_start;
|
right->vm_offset += right->vm_start - left->vm_start;
|
vma->vm_flags = newflags;
|
vma->vm_flags = newflags;
|
if (vma->vm_inode)
|
if (vma->vm_inode)
|
vma->vm_inode->i_count += 2;
|
vma->vm_inode->i_count += 2;
|
if (vma->vm_ops && vma->vm_ops->open) {
|
if (vma->vm_ops && vma->vm_ops->open) {
|
vma->vm_ops->open(left);
|
vma->vm_ops->open(left);
|
vma->vm_ops->open(right);
|
vma->vm_ops->open(right);
|
}
|
}
|
insert_vm_struct(current->mm, left);
|
insert_vm_struct(current->mm, left);
|
insert_vm_struct(current->mm, right);
|
insert_vm_struct(current->mm, right);
|
return 0;
|
return 0;
|
}
|
}
|
|
|
static int mlock_fixup(struct vm_area_struct * vma,
|
static int mlock_fixup(struct vm_area_struct * vma,
|
unsigned long start, unsigned long end, unsigned int newflags)
|
unsigned long start, unsigned long end, unsigned int newflags)
|
{
|
{
|
int pages, retval;
|
int pages, retval;
|
|
|
if (newflags == vma->vm_flags)
|
if (newflags == vma->vm_flags)
|
return 0;
|
return 0;
|
|
|
if (start == vma->vm_start) {
|
if (start == vma->vm_start) {
|
if (end == vma->vm_end)
|
if (end == vma->vm_end)
|
retval = mlock_fixup_all(vma, newflags);
|
retval = mlock_fixup_all(vma, newflags);
|
else
|
else
|
retval = mlock_fixup_start(vma, end, newflags);
|
retval = mlock_fixup_start(vma, end, newflags);
|
} else {
|
} else {
|
if (end == vma->vm_end)
|
if (end == vma->vm_end)
|
retval = mlock_fixup_end(vma, start, newflags);
|
retval = mlock_fixup_end(vma, start, newflags);
|
else
|
else
|
retval = mlock_fixup_middle(vma, start, end, newflags);
|
retval = mlock_fixup_middle(vma, start, end, newflags);
|
}
|
}
|
if (!retval) {
|
if (!retval) {
|
/* keep track of amount of locked VM */
|
/* keep track of amount of locked VM */
|
pages = (end - start) >> PAGE_SHIFT;
|
pages = (end - start) >> PAGE_SHIFT;
|
if (!(newflags & VM_LOCKED))
|
if (!(newflags & VM_LOCKED))
|
pages = -pages;
|
pages = -pages;
|
vma->vm_mm->locked_vm += pages;
|
vma->vm_mm->locked_vm += pages;
|
|
|
if ((newflags & VM_LOCKED) && (newflags & VM_READ))
|
if ((newflags & VM_LOCKED) && (newflags & VM_READ))
|
while (start < end) {
|
while (start < end) {
|
int c = get_user((int *) start);
|
int c = get_user((int *) start);
|
__asm__ __volatile__("": :"r" (c));
|
__asm__ __volatile__("": :"r" (c));
|
start += PAGE_SIZE;
|
start += PAGE_SIZE;
|
}
|
}
|
}
|
}
|
return retval;
|
return retval;
|
}
|
}
|
|
|
static int do_mlock(unsigned long start, size_t len, int on)
|
static int do_mlock(unsigned long start, size_t len, int on)
|
{
|
{
|
unsigned long nstart, end, tmp;
|
unsigned long nstart, end, tmp;
|
struct vm_area_struct * vma, * next;
|
struct vm_area_struct * vma, * next;
|
int error;
|
int error;
|
|
|
if (!suser())
|
if (!suser())
|
return -EPERM;
|
return -EPERM;
|
len = (len + ~PAGE_MASK) & PAGE_MASK;
|
len = (len + ~PAGE_MASK) & PAGE_MASK;
|
end = start + len;
|
end = start + len;
|
if (end < start)
|
if (end < start)
|
return -EINVAL;
|
return -EINVAL;
|
if (end == start)
|
if (end == start)
|
return 0;
|
return 0;
|
vma = find_vma(current->mm, start);
|
vma = find_vma(current->mm, start);
|
if (!vma || vma->vm_start > start)
|
if (!vma || vma->vm_start > start)
|
return -ENOMEM;
|
return -ENOMEM;
|
|
|
for (nstart = start ; ; ) {
|
for (nstart = start ; ; ) {
|
unsigned int newflags;
|
unsigned int newflags;
|
|
|
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
|
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
|
|
|
newflags = vma->vm_flags | VM_LOCKED;
|
newflags = vma->vm_flags | VM_LOCKED;
|
if (!on)
|
if (!on)
|
newflags &= ~VM_LOCKED;
|
newflags &= ~VM_LOCKED;
|
|
|
if (vma->vm_end >= end) {
|
if (vma->vm_end >= end) {
|
error = mlock_fixup(vma, nstart, end, newflags);
|
error = mlock_fixup(vma, nstart, end, newflags);
|
break;
|
break;
|
}
|
}
|
|
|
tmp = vma->vm_end;
|
tmp = vma->vm_end;
|
next = vma->vm_next;
|
next = vma->vm_next;
|
error = mlock_fixup(vma, nstart, tmp, newflags);
|
error = mlock_fixup(vma, nstart, tmp, newflags);
|
if (error)
|
if (error)
|
break;
|
break;
|
nstart = tmp;
|
nstart = tmp;
|
vma = next;
|
vma = next;
|
if (!vma || vma->vm_start != nstart) {
|
if (!vma || vma->vm_start != nstart) {
|
error = -ENOMEM;
|
error = -ENOMEM;
|
break;
|
break;
|
}
|
}
|
}
|
}
|
merge_segments(current->mm, start, end);
|
merge_segments(current->mm, start, end);
|
return error;
|
return error;
|
}
|
}
|
|
|
asmlinkage int sys_mlock(unsigned long start, size_t len)
|
asmlinkage int sys_mlock(unsigned long start, size_t len)
|
{
|
{
|
unsigned long locked;
|
unsigned long locked;
|
unsigned long lock_limit;
|
unsigned long lock_limit;
|
|
|
len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
|
len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
|
start &= PAGE_MASK;
|
start &= PAGE_MASK;
|
|
|
locked = len >> PAGE_SHIFT;
|
locked = len >> PAGE_SHIFT;
|
locked += current->mm->locked_vm;
|
locked += current->mm->locked_vm;
|
|
|
lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
|
lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
|
lock_limit >>= PAGE_SHIFT;
|
lock_limit >>= PAGE_SHIFT;
|
|
|
/* check against resource limits */
|
/* check against resource limits */
|
if (locked > lock_limit)
|
if (locked > lock_limit)
|
return -ENOMEM;
|
return -ENOMEM;
|
|
|
/* we may lock at most half of physical memory... */
|
/* we may lock at most half of physical memory... */
|
/* (this check is pretty bogus, but doesn't hurt) */
|
/* (this check is pretty bogus, but doesn't hurt) */
|
if (locked > (MAP_NR(high_memory) >> 1))
|
if (locked > (MAP_NR(high_memory) >> 1))
|
return -ENOMEM;
|
return -ENOMEM;
|
|
|
return do_mlock(start, len, 1);
|
return do_mlock(start, len, 1);
|
}
|
}
|
|
|
asmlinkage int sys_munlock(unsigned long start, size_t len)
|
asmlinkage int sys_munlock(unsigned long start, size_t len)
|
{
|
{
|
len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
|
len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK;
|
start &= PAGE_MASK;
|
start &= PAGE_MASK;
|
return do_mlock(start, len, 0);
|
return do_mlock(start, len, 0);
|
}
|
}
|
|
|
static int do_mlockall(int flags)
|
static int do_mlockall(int flags)
|
{
|
{
|
int error;
|
int error;
|
unsigned int def_flags;
|
unsigned int def_flags;
|
struct vm_area_struct * vma;
|
struct vm_area_struct * vma;
|
|
|
if (!suser())
|
if (!suser())
|
return -EPERM;
|
return -EPERM;
|
|
|
def_flags = 0;
|
def_flags = 0;
|
if (flags & MCL_FUTURE)
|
if (flags & MCL_FUTURE)
|
def_flags = VM_LOCKED;
|
def_flags = VM_LOCKED;
|
current->mm->def_flags = def_flags;
|
current->mm->def_flags = def_flags;
|
|
|
error = 0;
|
error = 0;
|
for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
|
for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
|
unsigned int newflags;
|
unsigned int newflags;
|
|
|
newflags = vma->vm_flags | VM_LOCKED;
|
newflags = vma->vm_flags | VM_LOCKED;
|
if (!(flags & MCL_CURRENT))
|
if (!(flags & MCL_CURRENT))
|
newflags &= ~VM_LOCKED;
|
newflags &= ~VM_LOCKED;
|
error = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
|
error = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
|
if (error)
|
if (error)
|
break;
|
break;
|
}
|
}
|
merge_segments(current->mm, 0, TASK_SIZE);
|
merge_segments(current->mm, 0, TASK_SIZE);
|
return error;
|
return error;
|
}
|
}
|
|
|
asmlinkage int sys_mlockall(int flags)
|
asmlinkage int sys_mlockall(int flags)
|
{
|
{
|
unsigned long lock_limit;
|
unsigned long lock_limit;
|
|
|
if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
|
if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
|
return -EINVAL;
|
return -EINVAL;
|
|
|
lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
|
lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
|
lock_limit >>= PAGE_SHIFT;
|
lock_limit >>= PAGE_SHIFT;
|
|
|
if (current->mm->total_vm > lock_limit)
|
if (current->mm->total_vm > lock_limit)
|
return -ENOMEM;
|
return -ENOMEM;
|
|
|
/* we may lock at most half of physical memory... */
|
/* we may lock at most half of physical memory... */
|
/* (this check is pretty bogus, but doesn't hurt) */
|
/* (this check is pretty bogus, but doesn't hurt) */
|
if (current->mm->total_vm > (MAP_NR(high_memory) >> 1))
|
if (current->mm->total_vm > (MAP_NR(high_memory) >> 1))
|
return -ENOMEM;
|
return -ENOMEM;
|
|
|
return do_mlockall(flags);
|
return do_mlockall(flags);
|
}
|
}
|
|
|
asmlinkage int sys_munlockall(void)
|
asmlinkage int sys_munlockall(void)
|
{
|
{
|
return do_mlockall(0);
|
return do_mlockall(0);
|
}
|
}
|
|
|