/*
|
/*
|
* linux/include/asm-arm/proc-armv/pgtable.h
|
* linux/include/asm-arm/proc-armv/pgtable.h
|
*
|
*
|
* Copyright (C) 1995, 1996, 1997 Russell King
|
* Copyright (C) 1995, 1996, 1997 Russell King
|
*
|
*
|
* 12-01-1997 RMK Altered flushing routines to use function pointers
|
* 12-01-1997 RMK Altered flushing routines to use function pointers
|
* now possible to combine ARM6, ARM7 and StrongARM versions.
|
* now possible to combine ARM6, ARM7 and StrongARM versions.
|
*/
|
*/
|
#ifndef __ASM_PROC_PGTABLE_H
|
#ifndef __ASM_PROC_PGTABLE_H
|
#define __ASM_PROC_PGTABLE_H
|
#define __ASM_PROC_PGTABLE_H
|
|
|
#include <asm/arch/mmu.h>
|
#include <asm/arch/mmu.h>
|
|
|
#define LIBRARY_TEXT_START 0x0c000000
|
#define LIBRARY_TEXT_START 0x0c000000
|
|
|
/*
|
/*
|
* Cache flushing...
|
* Cache flushing...
|
*/
|
*/
|
#define flush_cache_all() \
|
#define flush_cache_all() \
|
processor.u.armv3v4._flush_cache_all()
|
processor.u.armv3v4._flush_cache_all()
|
|
|
#define flush_cache_mm(_mm) \
|
#define flush_cache_mm(_mm) \
|
do { \
|
do { \
|
if ((_mm) == current->mm) \
|
if ((_mm) == current->mm) \
|
processor.u.armv3v4._flush_cache_all(); \
|
processor.u.armv3v4._flush_cache_all(); \
|
} while (0)
|
} while (0)
|
|
|
#define flush_cache_range(_mm,_start,_end) \
|
#define flush_cache_range(_mm,_start,_end) \
|
do { \
|
do { \
|
if ((_mm) == current->mm) \
|
if ((_mm) == current->mm) \
|
processor.u.armv3v4._flush_cache_area \
|
processor.u.armv3v4._flush_cache_area \
|
((_start), (_end), 1); \
|
((_start), (_end), 1); \
|
} while (0)
|
} while (0)
|
|
|
#define flush_cache_page(_vma,_vmaddr) \
|
#define flush_cache_page(_vma,_vmaddr) \
|
do { \
|
do { \
|
if ((_vma)->vm_mm == current->mm) \
|
if ((_vma)->vm_mm == current->mm) \
|
processor.u.armv3v4._flush_cache_area \
|
processor.u.armv3v4._flush_cache_area \
|
((_vmaddr), (_vmaddr) + PAGE_SIZE, \
|
((_vmaddr), (_vmaddr) + PAGE_SIZE, \
|
((_vma)->vm_flags & VM_EXEC) ? 1 : 0); \
|
((_vma)->vm_flags & VM_EXEC) ? 1 : 0); \
|
} while (0)
|
} while (0)
|
|
|
/*
|
/*
|
* We don't have a mem map cache...
|
* We don't have a mem map cache...
|
*/
|
*/
|
#define update_mm_cache_all() do { } while (0)
|
#define update_mm_cache_all() do { } while (0)
|
#define update_mm_cache_task(tsk) do { } while (0)
|
#define update_mm_cache_task(tsk) do { } while (0)
|
#define update_mm_cache_mm(mm) do { } while (0)
|
#define update_mm_cache_mm(mm) do { } while (0)
|
#define update_mm_cache_mm_addr(mm,addr,pte) do { } while (0)
|
#define update_mm_cache_mm_addr(mm,addr,pte) do { } while (0)
|
|
|
/*
|
/*
|
* This flushes back any buffered write data. We have to clean and flush the entries
|
* This flushes back any buffered write data. We have to clean and flush the entries
|
* in the cache for this page. Is it necessary to invalidate the I-cache?
|
* in the cache for this page. Is it necessary to invalidate the I-cache?
|
*/
|
*/
|
#define flush_page_to_ram(_page) \
|
#define flush_page_to_ram(_page) \
|
processor.u.armv3v4._flush_ram_page ((_page) & PAGE_MASK);
|
processor.u.armv3v4._flush_ram_page ((_page) & PAGE_MASK);
|
|
|
/*
|
/*
|
* Make the page uncacheable (must flush page beforehand).
|
* Make the page uncacheable (must flush page beforehand).
|
*/
|
*/
|
#define uncache_page(_page) \
|
#define uncache_page(_page) \
|
processor.u.armv3v4._flush_ram_page ((_page) & PAGE_MASK);
|
processor.u.armv3v4._flush_ram_page ((_page) & PAGE_MASK);
|
|
|
/*
|
/*
|
* TLB flushing:
|
* TLB flushing:
|
*
|
*
|
* - flush_tlb() flushes the current mm struct TLBs
|
* - flush_tlb() flushes the current mm struct TLBs
|
* - flush_tlb_all() flushes all processes TLBs
|
* - flush_tlb_all() flushes all processes TLBs
|
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
|
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
|
* - flush_tlb_page(vma, vmaddr) flushes one page
|
* - flush_tlb_page(vma, vmaddr) flushes one page
|
* - flush_tlb_range(mm, start, end) flushes a range of pages
|
* - flush_tlb_range(mm, start, end) flushes a range of pages
|
*
|
*
|
* GCC uses conditional instructions, and expects the assembler code to do so as well.
|
* GCC uses conditional instructions, and expects the assembler code to do so as well.
|
*
|
*
|
* We drain the write buffer in here to ensure that the page tables in ram
|
* We drain the write buffer in here to ensure that the page tables in ram
|
* are really up to date. It is more efficient to do this here...
|
* are really up to date. It is more efficient to do this here...
|
*/
|
*/
|
#define flush_tlb() flush_tlb_all()
|
#define flush_tlb() flush_tlb_all()
|
|
|
#define flush_tlb_all() \
|
#define flush_tlb_all() \
|
processor.u.armv3v4._flush_tlb_all()
|
processor.u.armv3v4._flush_tlb_all()
|
|
|
#define flush_tlb_mm(_mm) \
|
#define flush_tlb_mm(_mm) \
|
do { \
|
do { \
|
if ((_mm) == current->mm) \
|
if ((_mm) == current->mm) \
|
processor.u.armv3v4._flush_tlb_all(); \
|
processor.u.armv3v4._flush_tlb_all(); \
|
} while (0)
|
} while (0)
|
|
|
#define flush_tlb_range(_mm,_start,_end) \
|
#define flush_tlb_range(_mm,_start,_end) \
|
do { \
|
do { \
|
if ((_mm) == current->mm) \
|
if ((_mm) == current->mm) \
|
processor.u.armv3v4._flush_tlb_area \
|
processor.u.armv3v4._flush_tlb_area \
|
((_start), (_end), 1); \
|
((_start), (_end), 1); \
|
} while (0)
|
} while (0)
|
|
|
#define flush_tlb_page(_vma,_vmaddr) \
|
#define flush_tlb_page(_vma,_vmaddr) \
|
do { \
|
do { \
|
if ((_vma)->vm_mm == current->mm) \
|
if ((_vma)->vm_mm == current->mm) \
|
processor.u.armv3v4._flush_tlb_area \
|
processor.u.armv3v4._flush_tlb_area \
|
((_vmaddr), (_vmaddr) + PAGE_SIZE, \
|
((_vmaddr), (_vmaddr) + PAGE_SIZE, \
|
((_vma)->vm_flags & VM_EXEC) ? 1 : 0); \
|
((_vma)->vm_flags & VM_EXEC) ? 1 : 0); \
|
} while (0)
|
} while (0)
|
|
|
/*
|
/*
|
* Since the page tables are in cached memory, we need to flush the dirty
|
* Since the page tables are in cached memory, we need to flush the dirty
|
* data cached entries back before we flush the tlb... This is also useful
|
* data cached entries back before we flush the tlb... This is also useful
|
* to flush out the SWI instruction for signal handlers...
|
* to flush out the SWI instruction for signal handlers...
|
*/
|
*/
|
#define __flush_entry_to_ram(entry) \
|
#define __flush_entry_to_ram(entry) \
|
processor.u.armv3v4._flush_cache_entry((unsigned long)(entry))
|
processor.u.armv3v4._flush_cache_entry((unsigned long)(entry))
|
|
|
#define __flush_pte_to_ram(entry) \
|
#define __flush_pte_to_ram(entry) \
|
processor.u.armv3v4._flush_cache_pte((unsigned long)(entry))
|
processor.u.armv3v4._flush_cache_pte((unsigned long)(entry))
|
|
|
/* PMD_SHIFT determines the size of the area a second-level page table can map */
|
/* PMD_SHIFT determines the size of the area a second-level page table can map */
|
#define PMD_SHIFT 20
|
#define PMD_SHIFT 20
|
#define PMD_SIZE (1UL << PMD_SHIFT)
|
#define PMD_SIZE (1UL << PMD_SHIFT)
|
#define PMD_MASK (~(PMD_SIZE-1))
|
#define PMD_MASK (~(PMD_SIZE-1))
|
|
|
/* PGDIR_SHIFT determines what a third-level page table entry can map */
|
/* PGDIR_SHIFT determines what a third-level page table entry can map */
|
#define PGDIR_SHIFT 20
|
#define PGDIR_SHIFT 20
|
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
|
|
/*
|
/*
|
* entries per page directory level: the sa110 is two-level, so
|
* entries per page directory level: the sa110 is two-level, so
|
* we don't really have any PMD directory physically.
|
* we don't really have any PMD directory physically.
|
*/
|
*/
|
#define PTRS_PER_PTE 256
|
#define PTRS_PER_PTE 256
|
#define PTRS_PER_PMD 1
|
#define PTRS_PER_PMD 1
|
#define PTRS_PER_PGD 4096
|
#define PTRS_PER_PGD 4096
|
|
|
/* Just any arbitrary offset to the start of the vmalloc VM area: the
|
/* Just any arbitrary offset to the start of the vmalloc VM area: the
|
* current 8MB value just means that there will be a 8MB "hole" after the
|
* current 8MB value just means that there will be a 8MB "hole" after the
|
* physical memory until the kernel virtual memory starts. That means that
|
* physical memory until the kernel virtual memory starts. That means that
|
* any out-of-bounds memory accesses will hopefully be caught.
|
* any out-of-bounds memory accesses will hopefully be caught.
|
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
|
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
|
* area for the same reason. ;)
|
* area for the same reason. ;)
|
*/
|
*/
|
#define VMALLOC_OFFSET (8*1024*1024)
|
#define VMALLOC_OFFSET (8*1024*1024)
|
#define VMALLOC_START ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
|
#define VMALLOC_START ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
|
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
|
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
|
|
|
/* PMD types (actually level 1 descriptor) */
|
/* PMD types (actually level 1 descriptor) */
|
#define PMD_TYPE_MASK 0x0003
|
#define PMD_TYPE_MASK 0x0003
|
#define PMD_TYPE_FAULT 0x0000
|
#define PMD_TYPE_FAULT 0x0000
|
#define PMD_TYPE_TABLE 0x0001
|
#define PMD_TYPE_TABLE 0x0001
|
#define PMD_TYPE_SECT 0x0002
|
#define PMD_TYPE_SECT 0x0002
|
#define PMD_UPDATABLE 0x0010
|
#define PMD_UPDATABLE 0x0010
|
#define PMD_SECT_CACHEABLE 0x0008
|
#define PMD_SECT_CACHEABLE 0x0008
|
#define PMD_SECT_BUFFERABLE 0x0004
|
#define PMD_SECT_BUFFERABLE 0x0004
|
#define PMD_SECT_AP_WRITE 0x0400
|
#define PMD_SECT_AP_WRITE 0x0400
|
#define PMD_SECT_AP_READ 0x0800
|
#define PMD_SECT_AP_READ 0x0800
|
#define PMD_DOMAIN(x) ((x) << 5)
|
#define PMD_DOMAIN(x) ((x) << 5)
|
|
|
/* PTE types (actually level 2 descriptor) */
|
/* PTE types (actually level 2 descriptor) */
|
#define PTE_TYPE_MASK 0x0003
|
#define PTE_TYPE_MASK 0x0003
|
#define PTE_TYPE_FAULT 0x0000
|
#define PTE_TYPE_FAULT 0x0000
|
#define PTE_TYPE_LARGE 0x0001
|
#define PTE_TYPE_LARGE 0x0001
|
#define PTE_TYPE_SMALL 0x0002
|
#define PTE_TYPE_SMALL 0x0002
|
#define PTE_AP_READ 0x0aa0
|
#define PTE_AP_READ 0x0aa0
|
#define PTE_AP_WRITE 0x0550
|
#define PTE_AP_WRITE 0x0550
|
#define PTE_CACHEABLE 0x0008
|
#define PTE_CACHEABLE 0x0008
|
#define PTE_BUFFERABLE 0x0004
|
#define PTE_BUFFERABLE 0x0004
|
|
|
/* Domains */
|
/* Domains */
|
#define DOMAIN_KERNEL 0
|
#define DOMAIN_KERNEL 0
|
|
|
#define _PAGE_CHG_MASK (0xfffff00c | PTE_TYPE_MASK)
|
#define _PAGE_CHG_MASK (0xfffff00c | PTE_TYPE_MASK)
|
|
|
/*
|
/*
|
* We define the bits in the page tables as follows:
|
* We define the bits in the page tables as follows:
|
* PTE_BUFFERABLE page is dirty
|
* PTE_BUFFERABLE page is dirty
|
* PTE_AP_WRITE page is writable
|
* PTE_AP_WRITE page is writable
|
* PTE_AP_READ page is a young (unsetting this causes faults for any access)
|
* PTE_AP_READ page is a young (unsetting this causes faults for any access)
|
*
|
*
|
* Any page that is mapped in is assumed to be readable...
|
* Any page that is mapped in is assumed to be readable...
|
*/
|
*/
|
#define PAGE_NONE __pgprot(PTE_TYPE_SMALL)
|
#define PAGE_NONE __pgprot(PTE_TYPE_SMALL)
|
#define PAGE_SHARED __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_AP_READ | PTE_AP_WRITE)
|
#define PAGE_SHARED __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_AP_READ | PTE_AP_WRITE)
|
#define PAGE_COPY __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_AP_READ)
|
#define PAGE_COPY __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_AP_READ)
|
#define PAGE_READONLY __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_AP_READ)
|
#define PAGE_READONLY __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_AP_READ)
|
#define PAGE_KERNEL __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_BUFFERABLE | PTE_AP_WRITE)
|
#define PAGE_KERNEL __pgprot(PTE_TYPE_SMALL | PTE_CACHEABLE | PTE_BUFFERABLE | PTE_AP_WRITE)
|
|
|
#define _PAGE_TABLE (PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_KERNEL))
|
#define _PAGE_TABLE (PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_KERNEL))
|
|
|
/*
|
/*
|
* The arm can't do page protection for execute, and considers that the same are read.
|
* The arm can't do page protection for execute, and considers that the same are read.
|
* Also, write permissions imply read permissions. This is the closest we can get..
|
* Also, write permissions imply read permissions. This is the closest we can get..
|
*/
|
*/
|
#define __P000 PAGE_NONE
|
#define __P000 PAGE_NONE
|
#define __P001 PAGE_READONLY
|
#define __P001 PAGE_READONLY
|
#define __P010 PAGE_COPY
|
#define __P010 PAGE_COPY
|
#define __P011 PAGE_COPY
|
#define __P011 PAGE_COPY
|
#define __P100 PAGE_READONLY
|
#define __P100 PAGE_READONLY
|
#define __P101 PAGE_READONLY
|
#define __P101 PAGE_READONLY
|
#define __P110 PAGE_COPY
|
#define __P110 PAGE_COPY
|
#define __P111 PAGE_COPY
|
#define __P111 PAGE_COPY
|
|
|
#define __S000 PAGE_NONE
|
#define __S000 PAGE_NONE
|
#define __S001 PAGE_READONLY
|
#define __S001 PAGE_READONLY
|
#define __S010 PAGE_SHARED
|
#define __S010 PAGE_SHARED
|
#define __S011 PAGE_SHARED
|
#define __S011 PAGE_SHARED
|
#define __S100 PAGE_READONLY
|
#define __S100 PAGE_READONLY
|
#define __S101 PAGE_READONLY
|
#define __S101 PAGE_READONLY
|
#define __S110 PAGE_SHARED
|
#define __S110 PAGE_SHARED
|
#define __S111 PAGE_SHARED
|
#define __S111 PAGE_SHARED
|
|
|
#undef TEST_VERIFY_AREA
|
#undef TEST_VERIFY_AREA
|
|
|
/*
|
/*
|
* BAD_PAGETABLE is used when we need a bogus page-table, while
|
* BAD_PAGETABLE is used when we need a bogus page-table, while
|
* BAD_PAGE is used for a bogus page.
|
* BAD_PAGE is used for a bogus page.
|
*
|
*
|
* ZERO_PAGE is a global shared page that is always zero: used
|
* ZERO_PAGE is a global shared page that is always zero: used
|
* for zero-mapped memory areas etc..
|
* for zero-mapped memory areas etc..
|
*/
|
*/
|
extern pte_t __bad_page(void);
|
extern pte_t __bad_page(void);
|
extern pte_t * __bad_pagetable(void);
|
extern pte_t * __bad_pagetable(void);
|
extern unsigned long *empty_zero_page;
|
extern unsigned long *empty_zero_page;
|
|
|
#define BAD_PAGETABLE __bad_pagetable()
|
#define BAD_PAGETABLE __bad_pagetable()
|
#define BAD_PAGE __bad_page()
|
#define BAD_PAGE __bad_page()
|
#define ZERO_PAGE ((unsigned long) empty_zero_page)
|
#define ZERO_PAGE ((unsigned long) empty_zero_page)
|
|
|
/* number of bits that fit into a memory pointer */
|
/* number of bits that fit into a memory pointer */
|
#define BYTES_PER_PTR (sizeof(unsigned long))
|
#define BYTES_PER_PTR (sizeof(unsigned long))
|
#define BITS_PER_PTR (8*BYTES_PER_PTR)
|
#define BITS_PER_PTR (8*BYTES_PER_PTR)
|
|
|
/* to align the pointer to a pointer address */
|
/* to align the pointer to a pointer address */
|
#define PTR_MASK (~(sizeof(void*)-1))
|
#define PTR_MASK (~(sizeof(void*)-1))
|
|
|
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
|
/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
|
#define SIZEOF_PTR_LOG2 2
|
#define SIZEOF_PTR_LOG2 2
|
|
|
/* to find an entry in a page-table */
|
/* to find an entry in a page-table */
|
#define PAGE_PTR(address) \
|
#define PAGE_PTR(address) \
|
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
|
((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
|
|
|
/* to set the page-dir */
|
/* to set the page-dir */
|
#define SET_PAGE_DIR(tsk,pgdir) \
|
#define SET_PAGE_DIR(tsk,pgdir) \
|
do { \
|
do { \
|
tsk->tss.memmap = __virt_to_phys((unsigned long)pgdir); \
|
tsk->tss.memmap = __virt_to_phys((unsigned long)pgdir); \
|
if ((tsk) == current) \
|
if ((tsk) == current) \
|
__asm__ __volatile__( \
|
__asm__ __volatile__( \
|
"mcr%? p15, 0, %0, c2, c0, 0\n" \
|
"mcr%? p15, 0, %0, c2, c0, 0\n" \
|
: : "r" (tsk->tss.memmap)); \
|
: : "r" (tsk->tss.memmap)); \
|
} while (0)
|
} while (0)
|
|
|
extern __inline__ int pte_none(pte_t pte)
|
extern __inline__ int pte_none(pte_t pte)
|
{
|
{
|
return !pte_val(pte);
|
return !pte_val(pte);
|
}
|
}
|
|
|
#define pte_clear(ptep) set_pte(ptep, __pte(0))
|
#define pte_clear(ptep) set_pte(ptep, __pte(0))
|
|
|
extern __inline__ int pte_present(pte_t pte)
|
extern __inline__ int pte_present(pte_t pte)
|
{
|
{
|
switch (pte_val(pte) & PTE_TYPE_MASK) {
|
switch (pte_val(pte) & PTE_TYPE_MASK) {
|
case PTE_TYPE_LARGE:
|
case PTE_TYPE_LARGE:
|
case PTE_TYPE_SMALL:
|
case PTE_TYPE_SMALL:
|
return 1;
|
return 1;
|
default:
|
default:
|
return 0;
|
return 0;
|
}
|
}
|
}
|
}
|
|
|
extern __inline__ int pmd_none(pmd_t pmd)
|
extern __inline__ int pmd_none(pmd_t pmd)
|
{
|
{
|
return !pmd_val(pmd);
|
return !pmd_val(pmd);
|
}
|
}
|
|
|
#define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0))
|
#define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0))
|
|
|
extern __inline__ int pmd_bad(pmd_t pmd)
|
extern __inline__ int pmd_bad(pmd_t pmd)
|
{
|
{
|
switch (pmd_val(pmd) & PMD_TYPE_MASK) {
|
switch (pmd_val(pmd) & PMD_TYPE_MASK) {
|
case PMD_TYPE_FAULT:
|
case PMD_TYPE_FAULT:
|
case PMD_TYPE_TABLE:
|
case PMD_TYPE_TABLE:
|
return 0;
|
return 0;
|
default:
|
default:
|
return 1;
|
return 1;
|
}
|
}
|
}
|
}
|
|
|
extern __inline__ int pmd_present(pmd_t pmd)
|
extern __inline__ int pmd_present(pmd_t pmd)
|
{
|
{
|
switch (pmd_val(pmd) & PMD_TYPE_MASK) {
|
switch (pmd_val(pmd) & PMD_TYPE_MASK) {
|
case PMD_TYPE_TABLE:
|
case PMD_TYPE_TABLE:
|
return 1;
|
return 1;
|
default:
|
default:
|
return 0;
|
return 0;
|
}
|
}
|
}
|
}
|
|
|
/*
|
/*
|
* The "pgd_xxx()" functions here are trivial for a folded two-level
|
* The "pgd_xxx()" functions here are trivial for a folded two-level
|
* setup: the pgd is never bad, and a pmd always exists (as it's folded
|
* setup: the pgd is never bad, and a pmd always exists (as it's folded
|
* into the pgd entry)
|
* into the pgd entry)
|
*/
|
*/
|
#define pgd_none(pgd) (0)
|
#define pgd_none(pgd) (0)
|
#define pgd_bad(pgd) (0)
|
#define pgd_bad(pgd) (0)
|
#define pgd_present(pgd) (1)
|
#define pgd_present(pgd) (1)
|
#define pgd_clear(pgdp)
|
#define pgd_clear(pgdp)
|
|
|
/*
|
/*
|
* The following only work if pte_present() is true.
|
* The following only work if pte_present() is true.
|
* Undefined behaviour if not..
|
* Undefined behaviour if not..
|
*/
|
*/
|
#define pte_read(pte) (1)
|
#define pte_read(pte) (1)
|
#define pte_exec(pte) (1)
|
#define pte_exec(pte) (1)
|
|
|
extern __inline__ int pte_write(pte_t pte)
|
extern __inline__ int pte_write(pte_t pte)
|
{
|
{
|
return pte_val(pte) & PTE_AP_WRITE;
|
return pte_val(pte) & PTE_AP_WRITE;
|
}
|
}
|
|
|
extern __inline__ int pte_cacheable(pte_t pte)
|
extern __inline__ int pte_cacheable(pte_t pte)
|
{
|
{
|
return pte_val(pte) & PTE_CACHEABLE;
|
return pte_val(pte) & PTE_CACHEABLE;
|
}
|
}
|
|
|
extern __inline__ int pte_dirty(pte_t pte)
|
extern __inline__ int pte_dirty(pte_t pte)
|
{
|
{
|
return pte_val(pte) & PTE_BUFFERABLE;
|
return pte_val(pte) & PTE_BUFFERABLE;
|
}
|
}
|
|
|
extern __inline__ int pte_young(pte_t pte)
|
extern __inline__ int pte_young(pte_t pte)
|
{
|
{
|
return pte_val(pte) & PTE_AP_READ;
|
return pte_val(pte) & PTE_AP_READ;
|
}
|
}
|
|
|
extern __inline__ pte_t pte_wrprotect(pte_t pte)
|
extern __inline__ pte_t pte_wrprotect(pte_t pte)
|
{
|
{
|
pte_val(pte) &= ~PTE_AP_WRITE;
|
pte_val(pte) &= ~PTE_AP_WRITE;
|
return pte;
|
return pte;
|
}
|
}
|
|
|
extern __inline__ pte_t pte_nocache(pte_t pte)
|
extern __inline__ pte_t pte_nocache(pte_t pte)
|
{
|
{
|
pte_val(pte) &= ~PTE_CACHEABLE;
|
pte_val(pte) &= ~PTE_CACHEABLE;
|
return pte;
|
return pte;
|
}
|
}
|
|
|
extern __inline__ pte_t pte_mkclean(pte_t pte)
|
extern __inline__ pte_t pte_mkclean(pte_t pte)
|
{
|
{
|
pte_val(pte) &= ~PTE_BUFFERABLE;
|
pte_val(pte) &= ~PTE_BUFFERABLE;
|
return pte;
|
return pte;
|
}
|
}
|
|
|
extern __inline__ pte_t pte_mkold(pte_t pte)
|
extern __inline__ pte_t pte_mkold(pte_t pte)
|
{
|
{
|
pte_val(pte) &= ~PTE_AP_READ;
|
pte_val(pte) &= ~PTE_AP_READ;
|
return pte;
|
return pte;
|
}
|
}
|
|
|
extern __inline__ pte_t pte_mkwrite(pte_t pte)
|
extern __inline__ pte_t pte_mkwrite(pte_t pte)
|
{
|
{
|
pte_val(pte) |= PTE_AP_WRITE;
|
pte_val(pte) |= PTE_AP_WRITE;
|
return pte;
|
return pte;
|
}
|
}
|
|
|
extern __inline__ pte_t pte_mkdirty(pte_t pte)
|
extern __inline__ pte_t pte_mkdirty(pte_t pte)
|
{
|
{
|
pte_val(pte) |= PTE_BUFFERABLE;
|
pte_val(pte) |= PTE_BUFFERABLE;
|
return pte;
|
return pte;
|
}
|
}
|
|
|
extern __inline__ pte_t pte_mkyoung(pte_t pte)
|
extern __inline__ pte_t pte_mkyoung(pte_t pte)
|
{
|
{
|
pte_val(pte) |= PTE_AP_READ;
|
pte_val(pte) |= PTE_AP_READ;
|
return pte;
|
return pte;
|
}
|
}
|
|
|
/*
|
/*
|
* The following are unable to be implemented on this MMU
|
* The following are unable to be implemented on this MMU
|
*/
|
*/
|
#if 0
|
#if 0
|
extern __inline__ pte_t pte_rdprotect(pte_t pte)
|
extern __inline__ pte_t pte_rdprotect(pte_t pte)
|
{
|
{
|
pte_val(pte) &= ~(PTE_CACHEABLE|PTE_AP_READ);
|
pte_val(pte) &= ~(PTE_CACHEABLE|PTE_AP_READ);
|
return pte;
|
return pte;
|
}
|
}
|
|
|
extern __inline__ pte_t pte_exprotect(pte_t pte)
|
extern __inline__ pte_t pte_exprotect(pte_t pte)
|
{
|
{
|
pte_val(pte) &= ~(PTE_CACHEABLE|PTE_AP_READ);
|
pte_val(pte) &= ~(PTE_CACHEABLE|PTE_AP_READ);
|
return pte;
|
return pte;
|
}
|
}
|
|
|
extern __inline__ pte_t pte_mkread(pte_t pte)
|
extern __inline__ pte_t pte_mkread(pte_t pte)
|
{
|
{
|
pte_val(pte) |= PTE_CACHEABLE;
|
pte_val(pte) |= PTE_CACHEABLE;
|
return pte;
|
return pte;
|
}
|
}
|
|
|
extern __inline__ pte_t pte_mkexec(pte_t pte)
|
extern __inline__ pte_t pte_mkexec(pte_t pte)
|
{
|
{
|
pte_val(pte) |= PTE_CACHEABLE;
|
pte_val(pte) |= PTE_CACHEABLE;
|
return pte;
|
return pte;
|
}
|
}
|
#endif
|
#endif
|
|
|
/*
|
/*
|
* Conversion functions: convert a page and protection to a page entry,
|
* Conversion functions: convert a page and protection to a page entry,
|
* and a page entry and page directory to the page they refer to.
|
* and a page entry and page directory to the page they refer to.
|
*/
|
*/
|
extern __inline__ pte_t mk_pte(unsigned long page, pgprot_t pgprot)
|
extern __inline__ pte_t mk_pte(unsigned long page, pgprot_t pgprot)
|
{
|
{
|
pte_t pte;
|
pte_t pte;
|
pte_val(pte) = __virt_to_phys(page) | pgprot_val(pgprot);
|
pte_val(pte) = __virt_to_phys(page) | pgprot_val(pgprot);
|
return pte;
|
return pte;
|
}
|
}
|
|
|
extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
{
|
{
|
pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
|
pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
|
return pte;
|
return pte;
|
}
|
}
|
|
|
extern __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
|
extern __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
|
{
|
{
|
*pteptr = pteval;
|
*pteptr = pteval;
|
__flush_pte_to_ram(pteptr);
|
__flush_pte_to_ram(pteptr);
|
}
|
}
|
|
|
extern __inline__ unsigned long pte_page(pte_t pte)
|
extern __inline__ unsigned long pte_page(pte_t pte)
|
{
|
{
|
return __phys_to_virt(pte_val(pte) & PAGE_MASK);
|
return __phys_to_virt(pte_val(pte) & PAGE_MASK);
|
}
|
}
|
|
|
extern __inline__ pmd_t mk_pmd(pte_t *ptep)
|
extern __inline__ pmd_t mk_pmd(pte_t *ptep)
|
{
|
{
|
pmd_t pmd;
|
pmd_t pmd;
|
pmd_val(pmd) = __virt_to_phys((unsigned long)ptep) | _PAGE_TABLE;
|
pmd_val(pmd) = __virt_to_phys((unsigned long)ptep) | _PAGE_TABLE;
|
return pmd;
|
return pmd;
|
}
|
}
|
|
|
#if 1
|
#if 1
|
#define set_pmd(pmdp,pmd) processor.u.armv3v4._set_pmd(pmdp,pmd)
|
#define set_pmd(pmdp,pmd) processor.u.armv3v4._set_pmd(pmdp,pmd)
|
#else
|
#else
|
extern __inline__ void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
extern __inline__ void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
{
|
{
|
*pmdp = pmd;
|
*pmdp = pmd;
|
__flush_pte_to_ram(pmdp);
|
__flush_pte_to_ram(pmdp);
|
}
|
}
|
#endif
|
#endif
|
|
|
extern __inline__ unsigned long pmd_page(pmd_t pmd)
|
extern __inline__ unsigned long pmd_page(pmd_t pmd)
|
{
|
{
|
return __phys_to_virt(pmd_val(pmd) & 0xfffffc00);
|
return __phys_to_virt(pmd_val(pmd) & 0xfffffc00);
|
}
|
}
|
|
|
/* to find an entry in a page-table-directory */
|
/* to find an entry in a page-table-directory */
|
extern __inline__ pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
|
extern __inline__ pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
|
{
|
{
|
return mm->pgd + (address >> PGDIR_SHIFT);
|
return mm->pgd + (address >> PGDIR_SHIFT);
|
}
|
}
|
|
|
/* Find an entry in the second-level page table.. */
|
/* Find an entry in the second-level page table.. */
|
#define pmd_offset(dir, address) ((pmd_t *)(dir))
|
#define pmd_offset(dir, address) ((pmd_t *)(dir))
|
|
|
/* Find an entry in the third-level page table.. */
|
/* Find an entry in the third-level page table.. */
|
extern __inline__ pte_t * pte_offset(pmd_t * dir, unsigned long address)
|
extern __inline__ pte_t * pte_offset(pmd_t * dir, unsigned long address)
|
{
|
{
|
return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
|
return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
|
}
|
}
|
|
|
extern unsigned long get_small_page(int priority);
|
extern unsigned long get_small_page(int priority);
|
extern void free_small_page(unsigned long page);
|
extern void free_small_page(unsigned long page);
|
|
|
/*
|
/*
|
* Allocate and free page tables. The xxx_kernel() versions are
|
* Allocate and free page tables. The xxx_kernel() versions are
|
* used to allocate a kernel page table - this turns on ASN bits
|
* used to allocate a kernel page table - this turns on ASN bits
|
* if any.
|
* if any.
|
*/
|
*/
|
extern __inline__ void pte_free_kernel(pte_t * pte)
|
extern __inline__ void pte_free_kernel(pte_t * pte)
|
{
|
{
|
free_small_page((unsigned long) pte);
|
free_small_page((unsigned long) pte);
|
}
|
}
|
|
|
extern const char bad_pmd_string[];
|
extern const char bad_pmd_string[];
|
|
|
extern __inline__ pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
|
extern __inline__ pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address)
|
{
|
{
|
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
|
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
|
if (pmd_none(*pmd)) {
|
if (pmd_none(*pmd)) {
|
pte_t *page = (pte_t *) get_small_page(GFP_KERNEL);
|
pte_t *page = (pte_t *) get_small_page(GFP_KERNEL);
|
if (pmd_none(*pmd)) {
|
if (pmd_none(*pmd)) {
|
if (page) {
|
if (page) {
|
memzero (page, PTRS_PER_PTE * BYTES_PER_PTR);
|
memzero (page, PTRS_PER_PTE * BYTES_PER_PTR);
|
set_pmd(pmd, mk_pmd(page));
|
set_pmd(pmd, mk_pmd(page));
|
return page + address;
|
return page + address;
|
}
|
}
|
set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
|
set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
|
return NULL;
|
return NULL;
|
}
|
}
|
free_small_page((unsigned long) page);
|
free_small_page((unsigned long) page);
|
}
|
}
|
if (pmd_bad(*pmd)) {
|
if (pmd_bad(*pmd)) {
|
printk(bad_pmd_string, pmd_val(*pmd));
|
printk(bad_pmd_string, pmd_val(*pmd));
|
set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
|
set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
|
return NULL;
|
return NULL;
|
}
|
}
|
return (pte_t *) pmd_page(*pmd) + address;
|
return (pte_t *) pmd_page(*pmd) + address;
|
}
|
}
|
|
|
/*
|
/*
|
* allocating and freeing a pmd is trivial: the 1-entry pmd is
|
* allocating and freeing a pmd is trivial: the 1-entry pmd is
|
* inside the pgd, so has no extra memory associated with it.
|
* inside the pgd, so has no extra memory associated with it.
|
*/
|
*/
|
#define pmd_free_kernel(pmdp) pmd_val(*(pmdp)) = 0;
|
#define pmd_free_kernel(pmdp) pmd_val(*(pmdp)) = 0;
|
#define pmd_alloc_kernel(pgdp, address) ((pmd_t *)(pgdp))
|
#define pmd_alloc_kernel(pgdp, address) ((pmd_t *)(pgdp))
|
|
|
extern __inline__ void pte_free(pte_t * pte)
|
extern __inline__ void pte_free(pte_t * pte)
|
{
|
{
|
free_small_page((unsigned long) pte);
|
free_small_page((unsigned long) pte);
|
}
|
}
|
|
|
extern __inline__ pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
|
extern __inline__ pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
|
{
|
{
|
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
|
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
|
|
|
if (pmd_none(*pmd)) {
|
if (pmd_none(*pmd)) {
|
pte_t *page = (pte_t *) get_small_page(GFP_KERNEL);
|
pte_t *page = (pte_t *) get_small_page(GFP_KERNEL);
|
if (pmd_none(*pmd)) {
|
if (pmd_none(*pmd)) {
|
if (page) {
|
if (page) {
|
memzero (page, PTRS_PER_PTE * BYTES_PER_PTR);
|
memzero (page, PTRS_PER_PTE * BYTES_PER_PTR);
|
set_pmd(pmd, mk_pmd(page));
|
set_pmd(pmd, mk_pmd(page));
|
return page + address;
|
return page + address;
|
}
|
}
|
set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
|
set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
|
return NULL;
|
return NULL;
|
}
|
}
|
free_small_page ((unsigned long) page);
|
free_small_page ((unsigned long) page);
|
}
|
}
|
if (pmd_bad(*pmd)) {
|
if (pmd_bad(*pmd)) {
|
printk(bad_pmd_string, pmd_val(*pmd));
|
printk(bad_pmd_string, pmd_val(*pmd));
|
set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
|
set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
|
return NULL;
|
return NULL;
|
}
|
}
|
return (pte_t *) pmd_page(*pmd) + address;
|
return (pte_t *) pmd_page(*pmd) + address;
|
}
|
}
|
|
|
/*
|
/*
|
* allocating and freeing a pmd is trivial: the 1-entry pmd is
|
* allocating and freeing a pmd is trivial: the 1-entry pmd is
|
* inside the pgd, so has no extra memory associated with it.
|
* inside the pgd, so has no extra memory associated with it.
|
*/
|
*/
|
#define pmd_free(pmdp) pmd_val(*(pmdp)) = 0;
|
#define pmd_free(pmdp) pmd_val(*(pmdp)) = 0;
|
#define pmd_alloc(pgdp, address) ((pmd_t *)(pgdp))
|
#define pmd_alloc(pgdp, address) ((pmd_t *)(pgdp))
|
|
|
/*
|
/*
|
* Free a page directory. Takes the virtual address.
|
* Free a page directory. Takes the virtual address.
|
*/
|
*/
|
extern __inline__ void pgd_free(pgd_t * pgd)
|
extern __inline__ void pgd_free(pgd_t * pgd)
|
{
|
{
|
free_pages((unsigned long) pgd, 2);
|
free_pages((unsigned long) pgd, 2);
|
}
|
}
|
|
|
/*
|
/*
|
* Allocate a new page directory. Return the virtual address of it.
|
* Allocate a new page directory. Return the virtual address of it.
|
*/
|
*/
|
extern __inline__ pgd_t * pgd_alloc(void)
|
extern __inline__ pgd_t * pgd_alloc(void)
|
{
|
{
|
unsigned long pgd;
|
unsigned long pgd;
|
|
|
/*
|
/*
|
* need to get a 16k page for level 1
|
* need to get a 16k page for level 1
|
*/
|
*/
|
pgd = __get_free_pages(GFP_KERNEL,2,0);
|
pgd = __get_free_pages(GFP_KERNEL,2,0);
|
if (pgd)
|
if (pgd)
|
memzero ((void *)pgd, PTRS_PER_PGD * BYTES_PER_PTR);
|
memzero ((void *)pgd, PTRS_PER_PGD * BYTES_PER_PTR);
|
return (pgd_t *)pgd;
|
return (pgd_t *)pgd;
|
}
|
}
|
|
|
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
|
|
/*
|
/*
|
* The sa110 doesn't have any external MMU info: the kernel page
|
* The sa110 doesn't have any external MMU info: the kernel page
|
* tables contain all the necessary information.
|
* tables contain all the necessary information.
|
*/
|
*/
|
extern __inline__ void update_mmu_cache(struct vm_area_struct * vma,
|
extern __inline__ void update_mmu_cache(struct vm_area_struct * vma,
|
unsigned long address, pte_t pte)
|
unsigned long address, pte_t pte)
|
{
|
{
|
}
|
}
|
|
|
#define SWP_TYPE(entry) (((entry) >> 2) & 0x7f)
|
#define SWP_TYPE(entry) (((entry) >> 2) & 0x7f)
|
#define SWP_OFFSET(entry) ((entry) >> 9)
|
#define SWP_OFFSET(entry) ((entry) >> 9)
|
#define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9))
|
#define SWP_ENTRY(type,offset) (((type) << 2) | ((offset) << 9))
|
|
|
#endif /* __ASM_PROC_PAGE_H */
|
#endif /* __ASM_PROC_PAGE_H */
|
|
|
|
|