/*
|
/*
|
* linux/mm/mprotect.c
|
* linux/mm/mprotect.c
|
*
|
*
|
* (C) Copyright 1994 Linus Torvalds
|
* (C) Copyright 1994 Linus Torvalds
|
*/
|
*/
|
#include <linux/stat.h>
|
#include <linux/stat.h>
|
#include <linux/sched.h>
|
#include <linux/sched.h>
|
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
#include <linux/mm.h>
|
#include <linux/mm.h>
|
#include <linux/shm.h>
|
#include <linux/shm.h>
|
#include <linux/errno.h>
|
#include <linux/errno.h>
|
#include <linux/mman.h>
|
#include <linux/mman.h>
|
#include <linux/string.h>
|
#include <linux/string.h>
|
#include <linux/malloc.h>
|
#include <linux/malloc.h>
|
|
|
#include <asm/segment.h>
|
#include <asm/segment.h>
|
#include <asm/system.h>
|
#include <asm/system.h>
|
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
|
|
static inline void change_pte_range(pmd_t * pmd, unsigned long address,
|
static inline void change_pte_range(pmd_t * pmd, unsigned long address,
|
unsigned long size, pgprot_t newprot)
|
unsigned long size, pgprot_t newprot)
|
{
|
{
|
pte_t * pte;
|
pte_t * pte;
|
unsigned long end;
|
unsigned long end;
|
|
|
if (pmd_none(*pmd))
|
if (pmd_none(*pmd))
|
return;
|
return;
|
if (pmd_bad(*pmd)) {
|
if (pmd_bad(*pmd)) {
|
printk("change_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
|
printk("change_pte_range: bad pmd (%08lx)\n", pmd_val(*pmd));
|
pmd_clear(pmd);
|
pmd_clear(pmd);
|
return;
|
return;
|
}
|
}
|
pte = pte_offset(pmd, address);
|
pte = pte_offset(pmd, address);
|
address &= ~PMD_MASK;
|
address &= ~PMD_MASK;
|
end = address + size;
|
end = address + size;
|
if (end > PMD_SIZE)
|
if (end > PMD_SIZE)
|
end = PMD_SIZE;
|
end = PMD_SIZE;
|
do {
|
do {
|
pte_t entry = *pte;
|
pte_t entry = *pte;
|
if (pte_present(entry))
|
if (pte_present(entry))
|
set_pte(pte, pte_modify(entry, newprot));
|
set_pte(pte, pte_modify(entry, newprot));
|
address += PAGE_SIZE;
|
address += PAGE_SIZE;
|
pte++;
|
pte++;
|
} while (address < end);
|
} while (address < end);
|
}
|
}
|
|
|
static inline void change_pmd_range(pgd_t * pgd, unsigned long address,
|
static inline void change_pmd_range(pgd_t * pgd, unsigned long address,
|
unsigned long size, pgprot_t newprot)
|
unsigned long size, pgprot_t newprot)
|
{
|
{
|
pmd_t * pmd;
|
pmd_t * pmd;
|
unsigned long end;
|
unsigned long end;
|
|
|
if (pgd_none(*pgd))
|
if (pgd_none(*pgd))
|
return;
|
return;
|
if (pgd_bad(*pgd)) {
|
if (pgd_bad(*pgd)) {
|
printk("change_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
|
printk("change_pmd_range: bad pgd (%08lx)\n", pgd_val(*pgd));
|
pgd_clear(pgd);
|
pgd_clear(pgd);
|
return;
|
return;
|
}
|
}
|
pmd = pmd_offset(pgd, address);
|
pmd = pmd_offset(pgd, address);
|
address &= ~PGDIR_MASK;
|
address &= ~PGDIR_MASK;
|
end = address + size;
|
end = address + size;
|
if (end > PGDIR_SIZE)
|
if (end > PGDIR_SIZE)
|
end = PGDIR_SIZE;
|
end = PGDIR_SIZE;
|
do {
|
do {
|
change_pte_range(pmd, address, end - address, newprot);
|
change_pte_range(pmd, address, end - address, newprot);
|
address = (address + PMD_SIZE) & PMD_MASK;
|
address = (address + PMD_SIZE) & PMD_MASK;
|
pmd++;
|
pmd++;
|
} while (address < end);
|
} while (address < end);
|
}
|
}
|
|
|
static void change_protection(unsigned long start, unsigned long end, pgprot_t newprot)
|
static void change_protection(unsigned long start, unsigned long end, pgprot_t newprot)
|
{
|
{
|
pgd_t *dir;
|
pgd_t *dir;
|
unsigned long beg = start;
|
unsigned long beg = start;
|
|
|
dir = pgd_offset(current->mm, start);
|
dir = pgd_offset(current->mm, start);
|
flush_cache_range(current->mm, beg, end);
|
flush_cache_range(current->mm, beg, end);
|
while (start < end) {
|
while (start < end) {
|
change_pmd_range(dir, start, end - start, newprot);
|
change_pmd_range(dir, start, end - start, newprot);
|
start = (start + PGDIR_SIZE) & PGDIR_MASK;
|
start = (start + PGDIR_SIZE) & PGDIR_MASK;
|
dir++;
|
dir++;
|
}
|
}
|
flush_tlb_range(current->mm, beg, end);
|
flush_tlb_range(current->mm, beg, end);
|
return;
|
return;
|
}
|
}
|
|
|
static inline int mprotect_fixup_all(struct vm_area_struct * vma,
|
static inline int mprotect_fixup_all(struct vm_area_struct * vma,
|
int newflags, pgprot_t prot)
|
int newflags, pgprot_t prot)
|
{
|
{
|
vma->vm_flags = newflags;
|
vma->vm_flags = newflags;
|
vma->vm_page_prot = prot;
|
vma->vm_page_prot = prot;
|
return 0;
|
return 0;
|
}
|
}
|
|
|
static inline int mprotect_fixup_start(struct vm_area_struct * vma,
|
static inline int mprotect_fixup_start(struct vm_area_struct * vma,
|
unsigned long end,
|
unsigned long end,
|
int newflags, pgprot_t prot)
|
int newflags, pgprot_t prot)
|
{
|
{
|
struct vm_area_struct * n;
|
struct vm_area_struct * n;
|
|
|
n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
if (!n)
|
if (!n)
|
return -ENOMEM;
|
return -ENOMEM;
|
*n = *vma;
|
*n = *vma;
|
vma->vm_start = end;
|
vma->vm_start = end;
|
n->vm_end = end;
|
n->vm_end = end;
|
vma->vm_offset += vma->vm_start - n->vm_start;
|
vma->vm_offset += vma->vm_start - n->vm_start;
|
n->vm_flags = newflags;
|
n->vm_flags = newflags;
|
n->vm_page_prot = prot;
|
n->vm_page_prot = prot;
|
if (n->vm_inode)
|
if (n->vm_inode)
|
n->vm_inode->i_count++;
|
n->vm_inode->i_count++;
|
if (n->vm_ops && n->vm_ops->open)
|
if (n->vm_ops && n->vm_ops->open)
|
n->vm_ops->open(n);
|
n->vm_ops->open(n);
|
insert_vm_struct(current->mm, n);
|
insert_vm_struct(current->mm, n);
|
return 0;
|
return 0;
|
}
|
}
|
|
|
static inline int mprotect_fixup_end(struct vm_area_struct * vma,
|
static inline int mprotect_fixup_end(struct vm_area_struct * vma,
|
unsigned long start,
|
unsigned long start,
|
int newflags, pgprot_t prot)
|
int newflags, pgprot_t prot)
|
{
|
{
|
struct vm_area_struct * n;
|
struct vm_area_struct * n;
|
|
|
n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
if (!n)
|
if (!n)
|
return -ENOMEM;
|
return -ENOMEM;
|
*n = *vma;
|
*n = *vma;
|
vma->vm_end = start;
|
vma->vm_end = start;
|
n->vm_start = start;
|
n->vm_start = start;
|
n->vm_offset += n->vm_start - vma->vm_start;
|
n->vm_offset += n->vm_start - vma->vm_start;
|
n->vm_flags = newflags;
|
n->vm_flags = newflags;
|
n->vm_page_prot = prot;
|
n->vm_page_prot = prot;
|
if (n->vm_inode)
|
if (n->vm_inode)
|
n->vm_inode->i_count++;
|
n->vm_inode->i_count++;
|
if (n->vm_ops && n->vm_ops->open)
|
if (n->vm_ops && n->vm_ops->open)
|
n->vm_ops->open(n);
|
n->vm_ops->open(n);
|
insert_vm_struct(current->mm, n);
|
insert_vm_struct(current->mm, n);
|
return 0;
|
return 0;
|
}
|
}
|
|
|
static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
|
static inline int mprotect_fixup_middle(struct vm_area_struct * vma,
|
unsigned long start, unsigned long end,
|
unsigned long start, unsigned long end,
|
int newflags, pgprot_t prot)
|
int newflags, pgprot_t prot)
|
{
|
{
|
struct vm_area_struct * left, * right;
|
struct vm_area_struct * left, * right;
|
|
|
left = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
left = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
if (!left)
|
if (!left)
|
return -ENOMEM;
|
return -ENOMEM;
|
right = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
right = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
|
if (!right) {
|
if (!right) {
|
kfree(left);
|
kfree(left);
|
return -ENOMEM;
|
return -ENOMEM;
|
}
|
}
|
*left = *vma;
|
*left = *vma;
|
*right = *vma;
|
*right = *vma;
|
left->vm_end = start;
|
left->vm_end = start;
|
vma->vm_start = start;
|
vma->vm_start = start;
|
vma->vm_end = end;
|
vma->vm_end = end;
|
right->vm_start = end;
|
right->vm_start = end;
|
vma->vm_offset += vma->vm_start - left->vm_start;
|
vma->vm_offset += vma->vm_start - left->vm_start;
|
right->vm_offset += right->vm_start - left->vm_start;
|
right->vm_offset += right->vm_start - left->vm_start;
|
vma->vm_flags = newflags;
|
vma->vm_flags = newflags;
|
vma->vm_page_prot = prot;
|
vma->vm_page_prot = prot;
|
if (vma->vm_inode)
|
if (vma->vm_inode)
|
vma->vm_inode->i_count += 2;
|
vma->vm_inode->i_count += 2;
|
if (vma->vm_ops && vma->vm_ops->open) {
|
if (vma->vm_ops && vma->vm_ops->open) {
|
vma->vm_ops->open(left);
|
vma->vm_ops->open(left);
|
vma->vm_ops->open(right);
|
vma->vm_ops->open(right);
|
}
|
}
|
insert_vm_struct(current->mm, left);
|
insert_vm_struct(current->mm, left);
|
insert_vm_struct(current->mm, right);
|
insert_vm_struct(current->mm, right);
|
return 0;
|
return 0;
|
}
|
}
|
|
|
static int mprotect_fixup(struct vm_area_struct * vma,
|
static int mprotect_fixup(struct vm_area_struct * vma,
|
unsigned long start, unsigned long end, unsigned int newflags)
|
unsigned long start, unsigned long end, unsigned int newflags)
|
{
|
{
|
pgprot_t newprot;
|
pgprot_t newprot;
|
int error;
|
int error;
|
|
|
if (newflags == vma->vm_flags)
|
if (newflags == vma->vm_flags)
|
return 0;
|
return 0;
|
newprot = protection_map[newflags & 0xf];
|
newprot = protection_map[newflags & 0xf];
|
if (start == vma->vm_start)
|
if (start == vma->vm_start)
|
if (end == vma->vm_end)
|
if (end == vma->vm_end)
|
error = mprotect_fixup_all(vma, newflags, newprot);
|
error = mprotect_fixup_all(vma, newflags, newprot);
|
else
|
else
|
error = mprotect_fixup_start(vma, end, newflags, newprot);
|
error = mprotect_fixup_start(vma, end, newflags, newprot);
|
else if (end == vma->vm_end)
|
else if (end == vma->vm_end)
|
error = mprotect_fixup_end(vma, start, newflags, newprot);
|
error = mprotect_fixup_end(vma, start, newflags, newprot);
|
else
|
else
|
error = mprotect_fixup_middle(vma, start, end, newflags, newprot);
|
error = mprotect_fixup_middle(vma, start, end, newflags, newprot);
|
|
|
if (error)
|
if (error)
|
return error;
|
return error;
|
|
|
change_protection(start, end, newprot);
|
change_protection(start, end, newprot);
|
return 0;
|
return 0;
|
}
|
}
|
|
|
asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot)
|
asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot)
|
{
|
{
|
unsigned long nstart, end, tmp;
|
unsigned long nstart, end, tmp;
|
struct vm_area_struct * vma, * next;
|
struct vm_area_struct * vma, * next;
|
int error;
|
int error;
|
|
|
if (start & ~PAGE_MASK)
|
if (start & ~PAGE_MASK)
|
return -EINVAL;
|
return -EINVAL;
|
len = (len + ~PAGE_MASK) & PAGE_MASK;
|
len = (len + ~PAGE_MASK) & PAGE_MASK;
|
end = start + len;
|
end = start + len;
|
if (end < start)
|
if (end < start)
|
return -EINVAL;
|
return -EINVAL;
|
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
|
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
|
return -EINVAL;
|
return -EINVAL;
|
if (end == start)
|
if (end == start)
|
return 0;
|
return 0;
|
vma = find_vma(current->mm, start);
|
vma = find_vma(current->mm, start);
|
if (!vma || vma->vm_start > start)
|
if (!vma || vma->vm_start > start)
|
return -EFAULT;
|
return -EFAULT;
|
|
|
for (nstart = start ; ; ) {
|
for (nstart = start ; ; ) {
|
unsigned int newflags;
|
unsigned int newflags;
|
|
|
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
|
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
|
|
|
newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
|
newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
|
if ((newflags & ~(newflags >> 4)) & 0xf) {
|
if ((newflags & ~(newflags >> 4)) & 0xf) {
|
error = -EACCES;
|
error = -EACCES;
|
break;
|
break;
|
}
|
}
|
|
|
if (vma->vm_end >= end) {
|
if (vma->vm_end >= end) {
|
error = mprotect_fixup(vma, nstart, end, newflags);
|
error = mprotect_fixup(vma, nstart, end, newflags);
|
break;
|
break;
|
}
|
}
|
|
|
tmp = vma->vm_end;
|
tmp = vma->vm_end;
|
next = vma->vm_next;
|
next = vma->vm_next;
|
error = mprotect_fixup(vma, nstart, tmp, newflags);
|
error = mprotect_fixup(vma, nstart, tmp, newflags);
|
if (error)
|
if (error)
|
break;
|
break;
|
nstart = tmp;
|
nstart = tmp;
|
vma = next;
|
vma = next;
|
if (!vma || vma->vm_start != nstart) {
|
if (!vma || vma->vm_start != nstart) {
|
error = -EFAULT;
|
error = -EFAULT;
|
break;
|
break;
|
}
|
}
|
}
|
}
|
merge_segments(current->mm, start, end);
|
merge_segments(current->mm, start, end);
|
return error;
|
return error;
|
}
|
}
|
|
|