URL
https://opencores.org/ocsvn/or1k_old/or1k_old/trunk
Subversion Repositories or1k_old
Compare Revisions
- This comparison shows the changes necessary to convert path
/or1k_old/trunk/rc203soc/sw/uClinux/arch/m68k/mm
- from Rev 1765 to Rev 1782
- ↔ Reverse comparison
Rev 1765 → Rev 1782
/init.c
0,0 → 1,547
/* |
* linux/arch/m68k/mm/init.c |
* |
* Copyright (C) 1995 Hamish Macdonald |
*/ |
|
#include <linux/config.h> |
#include <linux/signal.h> |
#include <linux/sched.h> |
#include <linux/mm.h> |
#include <linux/swap.h> |
#include <linux/kernel.h> |
#include <linux/string.h> |
#include <linux/types.h> |
#ifdef CONFIG_BLK_DEV_RAM |
#include <linux/blk.h> |
#endif |
|
#include <asm/segment.h> |
#include <asm/page.h> |
#include <asm/pgtable.h> |
#include <asm/system.h> |
#include <asm/bootinfo.h> |
#include <asm/machdep.h> |
|
extern void die_if_kernel(char *,struct pt_regs *,long); |
extern void init_kpointer_table(void); |
extern void show_net_buffers(void); |
extern unsigned long mm_phys_to_virt (unsigned long addr); |
extern char *rd_start; |
extern int rd_doload; |
|
unsigned long ramdisk_length; |
|
/* |
* BAD_PAGE is the page that is used for page faults when linux |
* is out-of-memory. Older versions of linux just did a |
* do_exit(), but using this instead means there is less risk |
* for a process dying in kernel mode, possibly leaving a inode |
* unused etc.. |
* |
* BAD_PAGETABLE is the accompanying page-table: it is initialized |
* to point to BAD_PAGE entries. |
* |
* ZERO_PAGE is a special page that is used for zero-initialized |
* data and COW. |
*/ |
static unsigned long empty_bad_page_table; |
|
pte_t *__bad_pagetable(void) |
{ |
memset((void *)empty_bad_page_table, 0, PAGE_SIZE); |
return (pte_t *)empty_bad_page_table; |
} |
|
static unsigned long empty_bad_page; |
|
pte_t __bad_page(void) |
{ |
memset ((void *)empty_bad_page, 0, PAGE_SIZE); |
return pte_mkdirty(mk_pte(empty_bad_page, PAGE_SHARED)); |
} |
|
unsigned long empty_zero_page; |
|
void show_mem(void) |
{ |
unsigned long i; |
int free = 0, total = 0, reserved = 0, nonshared = 0, shared = 0; |
|
printk("\nMem-info:\n"); |
show_free_areas(); |
printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); |
i = high_memory >> PAGE_SHIFT; |
while (i-- > 0) { |
total++; |
if (PageReserved(mem_map+i)) |
reserved++; |
else if (!mem_map[i].count) |
free++; |
else if (mem_map[i].count == 1) |
nonshared++; |
else |
shared += mem_map[i].count-1; |
} |
printk("%d pages of RAM\n",total); |
printk("%d free pages\n",free); |
printk("%d reserved pages\n",reserved); |
printk("%d pages nonshared\n",nonshared); |
printk("%d pages shared\n",shared); |
show_buffers(); |
#ifdef CONFIG_NET |
show_net_buffers(); |
#endif |
} |
|
#if 0 /* The 68030 doesn't care about reserved bits. */ |
/* |
* Bits to add to page descriptors for "normal" caching mode. |
* For 68020/030 this is 0. |
* For 68040, this is _PAGE_CACHE040 (cachable, copyback) |
*/ |
unsigned long mm_cachebits; |
#endif |
|
pte_t *kernel_page_table (unsigned long *memavailp) |
{ |
pte_t *ptablep; |
|
ptablep = (pte_t *)*memavailp; |
*memavailp += PAGE_SIZE; |
|
nocache_page ((unsigned long)ptablep); |
|
return ptablep; |
} |
|
static unsigned long map_chunk (unsigned long addr, |
unsigned long size, |
unsigned long *memavailp) |
{ |
#define ONEMEG (1024*1024) |
#define L3TREESIZE (256*1024) |
|
int is040 = m68k_is040or060; |
static unsigned long mem_mapped = 0; |
static unsigned long virtaddr = 0; |
static pte_t *ktablep = NULL; |
unsigned long *kpointerp; |
unsigned long physaddr; |
extern pte_t *kpt; |
int pindex; /* index into pointer table */ |
pgd_t *page_dir = pgd_offset_k (virtaddr); |
|
if (!pgd_present (*page_dir)) { |
/* we need a new pointer table */ |
kpointerp = (unsigned long *) get_kpointer_table (); |
pgd_set (page_dir, (pmd_t *) kpointerp); |
memset (kpointerp, 0, PTRS_PER_PMD * sizeof (pmd_t)); |
} |
else |
kpointerp = (unsigned long *) pgd_page (*page_dir); |
|
/* |
* pindex is the offset into the pointer table for the |
* descriptors for the current virtual address being mapped. |
*/ |
pindex = (virtaddr >> 18) & 0x7f; |
|
#ifdef DEBUG |
printk ("mm=%ld, kernel_pg_dir=%p, kpointerp=%p, pindex=%d\n", |
mem_mapped, kernel_pg_dir, kpointerp, pindex); |
#endif |
|
/* |
* if this is running on an '040, we already allocated a page |
* table for the first 4M. The address is stored in kpt by |
* arch/head.S |
* |
*/ |
if (is040 && mem_mapped == 0) |
ktablep = kpt; |
|
for (physaddr = addr; |
physaddr < addr + size; |
mem_mapped += L3TREESIZE, virtaddr += L3TREESIZE) { |
|
#ifdef DEBUG |
printk ("pa=%#lx va=%#lx ", physaddr, virtaddr); |
#endif |
|
if (pindex > 127 && mem_mapped >= 32*ONEMEG) { |
/* we need a new pointer table every 32M */ |
#ifdef DEBUG |
printk ("[new pointer]"); |
#endif |
|
kpointerp = (unsigned long *)get_kpointer_table (); |
pgd_set(pgd_offset_k(virtaddr), (pmd_t *)kpointerp); |
pindex = 0; |
} |
|
if (is040) { |
int i; |
unsigned long ktable; |
|
/* Don't map the first 4 MB again. The pagetables |
* for this range have already been initialized |
* in boot/head.S. Otherwise the pages used for |
* tables would be reinitialized to copyback mode. |
*/ |
|
if (mem_mapped < 4 * ONEMEG) |
{ |
#ifdef DEBUG |
printk ("Already initialized\n"); |
#endif |
physaddr += L3TREESIZE; |
pindex++; |
continue; |
} |
#ifdef DEBUG |
printk ("[setup table]"); |
#endif |
|
/* |
* 68040, use page tables pointed to by the |
* kernel pointer table. |
*/ |
|
if ((pindex & 15) == 0) { |
/* Need new page table every 4M on the '040 */ |
#ifdef DEBUG |
printk ("[new table]"); |
#endif |
ktablep = kernel_page_table (memavailp); |
} |
|
ktable = VTOP(ktablep); |
|
/* |
* initialize section of the page table mapping |
* this 256K portion. |
*/ |
for (i = 0; i < 64; i++) { |
pte_val(ktablep[i]) = physaddr | _PAGE_PRESENT |
| _PAGE_CACHE040 | _PAGE_GLOBAL040; |
physaddr += PAGE_SIZE; |
} |
ktablep += 64; |
|
/* |
* make the kernel pointer table point to the |
* kernel page table. Each entries point to a |
* 64 entry section of the page table. |
*/ |
|
kpointerp[pindex++] = ktable | _PAGE_TABLE; |
} else { |
/* |
* 68030, use early termination page descriptors. |
* Each one points to 64 pages (256K). |
*/ |
#ifdef DEBUG |
printk ("[early term] "); |
#endif |
if (virtaddr == 0UL) { |
/* map the first 256K using a 64 entry |
* 3rd level page table. |
* UNMAP the first entry to trap |
* zero page (NULL pointer) references |
*/ |
int i; |
unsigned long *tbl; |
|
tbl = (unsigned long *)get_kpointer_table(); |
|
kpointerp[pindex++] = VTOP(tbl) | _PAGE_TABLE; |
|
for (i = 0; i < 64; i++, physaddr += PAGE_SIZE) |
tbl[i] = physaddr | _PAGE_PRESENT; |
|
/* unmap the zero page */ |
tbl[0] = 0; |
} else { |
/* not the first 256K */ |
kpointerp[pindex++] = physaddr | _PAGE_PRESENT; |
#ifdef DEBUG |
printk ("%lx=%lx ", VTOP(&kpointerp[pindex-1]), |
kpointerp[pindex-1]); |
#endif |
physaddr += 64 * PAGE_SIZE; |
} |
} |
#ifdef DEBUG |
printk ("\n"); |
#endif |
} |
|
return mem_mapped; |
} |
|
extern unsigned long free_area_init(unsigned long, unsigned long); |
|
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
|
/* |
* paging_init() continues the virtual memory environment setup which |
* was begun by the code in arch/head.S. |
* The parameters are pointers to where to stick the starting and ending |
* addresses of available kernel virtual memory. |
*/ |
unsigned long paging_init(unsigned long start_mem, unsigned long end_mem) |
{ |
int chunk; |
unsigned long mem_avail = 0; |
/* pointer to page table for kernel stacks */ |
extern unsigned long availmem; |
|
#ifdef DEBUG |
{ |
extern pte_t *kpt; |
printk ("start of paging_init (%p, %p, %lx, %lx, %lx)\n", |
kernel_pg_dir, kpt, availmem, start_mem, end_mem); |
} |
#endif |
|
init_kpointer_table(); |
#if 0 |
/* |
* Setup cache bits |
*/ |
mm_cachebits = m68k_is040or060 ? _PAGE_CACHE040 : 0; |
|
/* Initialize protection map. */ |
protection_map[0] = PAGE_READONLY; |
protection_map[1] = PAGE_READONLY; |
protection_map[2] = PAGE_COPY; |
protection_map[3] = PAGE_COPY; |
protection_map[4] = PAGE_READONLY; |
protection_map[5] = PAGE_READONLY; |
protection_map[6] = PAGE_COPY; |
protection_map[7] = PAGE_COPY; |
protection_map[8] = PAGE_READONLY; |
protection_map[9] = PAGE_READONLY; |
protection_map[10] = PAGE_SHARED; |
protection_map[11] = PAGE_SHARED; |
protection_map[12] = PAGE_READONLY; |
protection_map[13] = PAGE_READONLY; |
protection_map[14] = PAGE_SHARED; |
protection_map[15] = PAGE_SHARED; |
#endif |
|
/* |
* Map the physical memory available into the kernel virtual |
* address space. It may allocate some memory for page |
* tables and thus modify availmem. |
*/ |
|
for (chunk = 0; chunk < boot_info.num_memory; chunk++) { |
mem_avail = map_chunk (boot_info.memory[chunk].addr, |
boot_info.memory[chunk].size, |
&availmem); |
|
} |
flush_tlb_all(); |
#ifdef DEBUG |
printk ("memory available is %ldKB\n", mem_avail >> 10); |
#endif |
|
/* |
* virtual address after end of kernel |
* "availmem" is setup by the code in head.S. |
*/ |
start_mem = availmem; |
|
#ifdef DEBUG |
printk ("start_mem is %#lx\nvirtual_end is %#lx\n", |
start_mem, end_mem); |
#endif |
|
/* |
* initialize the bad page table and bad page to point |
* to a couple of allocated pages |
*/ |
empty_bad_page_table = start_mem; |
start_mem += PAGE_SIZE; |
empty_bad_page = start_mem; |
start_mem += PAGE_SIZE; |
empty_zero_page = start_mem; |
start_mem += PAGE_SIZE; |
memset((void *)empty_zero_page, 0, PAGE_SIZE); |
|
#if 0 |
/* |
* allocate the "swapper" page directory and |
* record in task 0 (swapper) tss |
*/ |
swapper_pg_dir = (pgd_t *)get_kpointer_table(); |
|
init_mm.pgd = swapper_pg_dir; |
#endif |
|
memset (swapper_pg_dir, 0, sizeof(pgd_t)*PTRS_PER_PGD); |
task[0]->tss.pagedir_v = (unsigned long *)swapper_pg_dir; |
task[0]->tss.pagedir_p = VTOP (swapper_pg_dir); |
|
#ifdef DEBUG |
printk ("task 0 pagedir at %p virt, %#lx phys\n", |
task[0]->tss.pagedir_v, task[0]->tss.pagedir_p); |
#endif |
|
/* setup CPU root pointer for swapper task */ |
task[0]->tss.crp[0] = 0x80000000 | _PAGE_SHORT; |
task[0]->tss.crp[1] = task[0]->tss.pagedir_p; |
|
if (m68k_is040or060) |
asm ("movel %0,%/d0\n\t" |
".long 0x4e7b0806" /* movec d0,urp */ |
: /* no outputs */ |
: "g" (task[0]->tss.crp[1]) |
: "d0"); |
else |
asm ("pmove %0@,%/crp" |
: /* no outputs */ |
: "a" (task[0]->tss.crp)); |
|
#ifdef DEBUG |
printk ("set crp\n"); |
#endif |
|
/* |
* Set up SFC/DFC registers (user data space) |
*/ |
set_fs (USER_DS); |
|
#ifdef DEBUG |
printk ("before free_area_init\n"); |
#endif |
|
#ifdef CONFIG_BLK_DEV_RAM |
#ifndef CONFIG_BLK_DEV_INITRD |
/* |
* Since the initialization of the ramdisk's has been changed |
* so it fits the new driver initialization scheme, we have to |
* make room for our preloaded image here, instead of doing it |
* in rd_init() as we cannot kmalloc() a block large enough |
* for the image. |
*/ |
|
ramdisk_length = boot_info.ramdisk_size * 1024; |
|
if ((ramdisk_length > 0) && (ROOT_DEV == 0)) { |
char *rdp; /* current location of ramdisk */ |
|
rd_start = (char *) start_mem; |
|
/* get current address of ramdisk */ |
rdp = (char *)mm_phys_to_virt (boot_info.ramdisk_addr); |
|
/* copy the ram disk image */ |
memcpy (rd_start, rdp, ramdisk_length); |
start_mem += ramdisk_length; |
rd_doload = 1; /* tell rd_load to load this thing */ |
} |
#endif |
#endif |
|
return free_area_init (start_mem, end_mem); |
} |
|
void mem_init(unsigned long start_mem, unsigned long end_mem) |
{ |
int codepages = 0; |
int datapages = 0; |
unsigned long tmp; |
extern int _etext; |
|
end_mem &= PAGE_MASK; |
high_memory = end_mem; |
|
start_mem = PAGE_ALIGN(start_mem); |
while (start_mem < high_memory) { |
clear_bit(PG_reserved, &mem_map[MAP_NR(start_mem)].flags); |
start_mem += PAGE_SIZE; |
} |
|
#ifdef CONFIG_ATARI |
|
if (MACH_IS_ATARI) { |
|
/* If the page with physical address 0 isn't the first kernel |
* code page, it has to be reserved because the first 2 KB of |
* ST-Ram can only be accessed from supervisor mode by |
* hardware. |
*/ |
|
unsigned long virt0 = PTOV( 0 ), adr; |
extern unsigned long rsvd_stram_beg, rsvd_stram_end; |
|
if (virt0 != 0) { |
|
set_bit(PG_reserved, &mem_map[MAP_NR(virt0)].flags); |
|
/* Also, reserve all pages that have been marked by |
* stram_alloc() (e.g. for the screen memory). (This may |
* treat the first ST-Ram page a second time, but that |
* doesn't hurt...) */ |
|
rsvd_stram_end += PAGE_SIZE - 1; |
rsvd_stram_end &= PAGE_MASK; |
rsvd_stram_beg &= PAGE_MASK; |
for( adr = rsvd_stram_beg; adr < rsvd_stram_end; adr += PAGE_SIZE ) |
set_bit(PG_reserved, &mem_map[MAP_NR(adr)].flags); |
} |
} |
|
#endif |
#ifdef DEBUG |
printk ("task[0] root table is %p\n", task[0]->tss.pagedir_v); |
#endif |
|
for (tmp = 0 ; tmp < end_mem ; tmp += PAGE_SIZE) { |
if (VTOP (tmp) >= mach_max_dma_address) |
clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags); |
if (PageReserved(mem_map+MAP_NR(tmp))) { |
if (tmp < (unsigned long)&_etext) |
codepages++; |
else |
datapages++; |
continue; |
} |
mem_map[MAP_NR(tmp)].count = 1; |
#ifdef CONFIG_BLK_DEV_INITRD |
if (!initrd_start || (tmp < initrd_start || tmp >= initrd_end)) |
#endif |
free_page(tmp); |
} |
tmp = nr_free_pages << PAGE_SHIFT; |
printk("Memory: %luk/%luk available (%dk kernel code, %dk data)\n", |
tmp >> 10, |
high_memory >> 10, |
codepages << (PAGE_SHIFT-10), |
datapages << (PAGE_SHIFT-10)); |
} |
|
void si_meminfo(struct sysinfo *val) |
{ |
unsigned long i; |
|
i = high_memory >> PAGE_SHIFT; |
val->totalram = 0; |
val->sharedram = 0; |
val->freeram = nr_free_pages << PAGE_SHIFT; |
val->bufferram = buffermem; |
while (i-- > 0) { |
if (PageReserved(mem_map+i)) |
continue; |
val->totalram++; |
if (!mem_map[i].count) |
continue; |
val->sharedram += mem_map[i].count-1; |
} |
val->totalram <<= PAGE_SHIFT; |
val->sharedram <<= PAGE_SHIFT; |
return; |
} |
/memory.c
0,0 → 1,869
/* |
* linux/arch/m68k/mm/memory.c |
* |
* Copyright (C) 1995 Hamish Macdonald |
*/ |
|
#include <linux/mm.h> |
#include <linux/kernel.h> |
#include <linux/string.h> |
#include <linux/types.h> |
#include <linux/malloc.h> |
|
#include <asm/segment.h> |
#include <asm/page.h> |
#include <asm/pgtable.h> |
#include <asm/system.h> |
#include <asm/traps.h> |
#include <asm/amigahw.h> |
#include <asm/bootinfo.h> |
|
extern pte_t *kernel_page_table (unsigned long *memavailp); |
|
/* Strings for `extern inline' functions in <asm/pgtable.h>. If put |
directly into these functions, they are output for every file that |
includes pgtable.h */ |
|
const char PgtabStr_bad_pmd[] = "Bad pmd in pte_alloc: %08lx\n"; |
const char PgtabStr_bad_pgd[] = "Bad pgd in pmd_alloc: %08lx\n"; |
const char PgtabStr_bad_pmdk[] = "Bad pmd in pte_alloc_kernel: %08lx\n"; |
const char PgtabStr_bad_pgdk[] = "Bad pgd in pmd_alloc_kernel: %08lx\n"; |
|
static struct ptable_desc { |
struct ptable_desc *prev; |
struct ptable_desc *next; |
unsigned long page; |
unsigned char alloced; |
} ptable_list = { &ptable_list, &ptable_list, 0, 0xff }; |
|
#define PD_NONEFREE(dp) ((dp)->alloced == 0xff) |
#define PD_ALLFREE(dp) ((dp)->alloced == 0) |
#define PD_TABLEFREE(dp,i) (!((dp)->alloced & (1<<(i)))) |
#define PD_MARKUSED(dp,i) ((dp)->alloced |= (1<<(i))) |
#define PD_MARKFREE(dp,i) ((dp)->alloced &= ~(1<<(i))) |
|
#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t)) |
|
pmd_t *get_pointer_table (void) |
{ |
pmd_t *pmdp = NULL; |
unsigned long flags; |
struct ptable_desc *dp = ptable_list.next; |
int i; |
|
/* |
* For a pointer table for a user process address space, a |
* table is taken from a page allocated for the purpose. Each |
* page can hold 8 pointer tables. The page is remapped in |
* virtual address space to be noncacheable. |
*/ |
if (PD_NONEFREE (dp)) { |
|
if (!(dp = kmalloc (sizeof(struct ptable_desc),GFP_KERNEL))) { |
return 0; |
} |
|
if (!(dp->page = __get_free_page (GFP_KERNEL))) { |
kfree (dp); |
return 0; |
} |
|
nocache_page (dp->page); |
|
dp->alloced = 0; |
/* put at head of list */ |
save_flags(flags); |
cli(); |
dp->next = ptable_list.next; |
dp->prev = ptable_list.next->prev; |
ptable_list.next->prev = dp; |
ptable_list.next = dp; |
restore_flags(flags); |
} |
|
for (i = 0; i < 8; i++) |
if (PD_TABLEFREE (dp, i)) { |
PD_MARKUSED (dp, i); |
pmdp = (pmd_t *)(dp->page + PTABLE_SIZE*i); |
break; |
} |
|
if (PD_NONEFREE (dp)) { |
/* move to end of list */ |
save_flags(flags); |
cli(); |
dp->prev->next = dp->next; |
dp->next->prev = dp->prev; |
|
dp->next = ptable_list.next->prev; |
dp->prev = ptable_list.prev; |
ptable_list.prev->next = dp; |
ptable_list.prev = dp; |
restore_flags(flags); |
} |
|
memset (pmdp, 0, PTABLE_SIZE); |
|
return pmdp; |
} |
|
void free_pointer_table (pmd_t *ptable) |
{ |
struct ptable_desc *dp; |
unsigned long page = (unsigned long)ptable & PAGE_MASK; |
int index = ((unsigned long)ptable - page)/PTABLE_SIZE; |
unsigned long flags; |
|
for (dp = ptable_list.next; dp->page && dp->page != page; dp = dp->next) |
; |
|
if (!dp->page) |
panic ("unable to find desc for ptable %p on list!", ptable); |
|
if (PD_TABLEFREE (dp, index)) |
panic ("table already free!"); |
|
PD_MARKFREE (dp, index); |
|
if (PD_ALLFREE (dp)) { |
/* all tables in page are free, free page */ |
save_flags(flags); |
cli(); |
dp->prev->next = dp->next; |
dp->next->prev = dp->prev; |
restore_flags(flags); |
cache_page (dp->page); |
free_page (dp->page); |
kfree (dp); |
return; |
} else { |
/* |
* move this descriptor to the front of the list, since |
* it has one or more free tables. |
*/ |
save_flags(flags); |
cli(); |
dp->prev->next = dp->next; |
dp->next->prev = dp->prev; |
|
dp->next = ptable_list.next; |
dp->prev = ptable_list.next->prev; |
ptable_list.next->prev = dp; |
ptable_list.next = dp; |
restore_flags(flags); |
} |
} |
|
/* maximum pages used for kpointer tables */ |
#define KPTR_PAGES 4 |
/* # of reserved slots */ |
#define RESERVED_KPTR 4 |
extern pmd_tablepage kernel_pmd_table; /* reserved in head.S */ |
|
static struct kpointer_pages { |
pmd_tablepage *page[KPTR_PAGES]; |
u_char alloced[KPTR_PAGES]; |
} kptr_pages; |
|
void init_kpointer_table(void) { |
short i = KPTR_PAGES-1; |
|
/* first page is reserved in head.S */ |
kptr_pages.page[i] = &kernel_pmd_table; |
kptr_pages.alloced[i] = ~(0xff>>RESERVED_KPTR); |
for (i--; i>=0; i--) { |
kptr_pages.page[i] = NULL; |
kptr_pages.alloced[i] = 0; |
} |
} |
|
pmd_t *get_kpointer_table (void) |
{ |
/* For pointer tables for the kernel virtual address space, |
* use the page that is reserved in head.S that can hold up to |
* 8 pointer tables. 3 of these tables are always reserved |
* (kernel_pg_dir, swapper_pg_dir and kernel pointer table for |
* the first 16 MB of RAM). In addition, the 4th pointer table |
* in this page is reserved. On Amiga and Atari, it is used to |
* map in the hardware registers. It may be used for other |
* purposes on other 68k machines. This leaves 4 pointer tables |
* available for use by the kernel. 1 of them are usually used |
* for the vmalloc tables. This allows mapping of 3 * 32 = 96 MB |
* of physical memory. But these pointer tables are also used |
* for other purposes, like kernel_map(), so further pages can |
* now be allocated. |
*/ |
pmd_tablepage *page; |
pmd_table *table; |
long nr, offset = -8; |
short i; |
|
for (i=KPTR_PAGES-1; i>=0; i--) { |
asm volatile("bfffo %1{%2,#8},%0" |
: "=d" (nr) |
: "d" ((u_char)~kptr_pages.alloced[i]), "d" (offset)); |
if (nr) |
break; |
} |
if (i < 0) { |
printk("No space for kernel pointer table!\n"); |
return NULL; |
} |
if (!(page = kptr_pages.page[i])) { |
if (!(page = (pmd_tablepage *)__get_free_page(GFP_KERNEL))) { |
printk("No space for kernel pointer table!\n"); |
return NULL; |
} |
nocache_page((u_long)(kptr_pages.page[i] = page)); |
} |
asm volatile("bfset %0@{%1,#1}" |
: /* no output */ |
: "a" (&kptr_pages.alloced[i]), "d" (nr-offset)); |
table = &(*page)[nr-offset]; |
memset(table, 0, sizeof(pmd_table)); |
return ((pmd_t *)table); |
} |
|
void free_kpointer_table (pmd_t *pmdp) |
{ |
pmd_table *table = (pmd_table *)pmdp; |
pmd_tablepage *page = (pmd_tablepage *)((u_long)table & PAGE_MASK); |
long nr; |
short i; |
|
for (i=KPTR_PAGES-1; i>=0; i--) { |
if (kptr_pages.page[i] == page) |
break; |
} |
nr = ((u_long)table - (u_long)page) / sizeof(pmd_table); |
if (!table || i < 0 || (i == KPTR_PAGES-1 && nr < RESERVED_KPTR)) { |
printk("Attempt to free invalid kernel pointer table: %p\n", table); |
return; |
} |
asm volatile("bfclr %0@{%1,#1}" |
: /* no output */ |
: "a" (&kptr_pages.alloced[i]), "d" (nr)); |
if (!kptr_pages.alloced[i]) { |
kptr_pages.page[i] = 0; |
cache_page ((u_long)page); |
free_page ((u_long)page); |
} |
} |
|
/* |
* The following two routines map from a physical address to a kernel |
* virtual address and vice versa. |
*/ |
unsigned long mm_vtop (unsigned long vaddr) |
{ |
int i; |
unsigned long voff = vaddr; |
unsigned long offset = 0; |
|
for (i = 0; i < boot_info.num_memory; i++) |
{ |
if (voff < offset + boot_info.memory[i].size) { |
#ifdef DEBUGPV |
printk ("VTOP(%lx)=%lx\n", vaddr, |
boot_info.memory[i].addr + voff - offset); |
#endif |
return boot_info.memory[i].addr + voff - offset; |
} else |
offset += boot_info.memory[i].size; |
} |
|
/* not in one of the memory chunks; get the actual |
* physical address from the MMU. |
*/ |
if (m68k_is040or060 == 6) { |
unsigned long fs = get_fs(); |
unsigned long paddr; |
|
set_fs (SUPER_DATA); |
|
/* The PLPAR instruction causes an access error if the translation |
* is not possible. We don't catch that here, so a bad kernel trap |
* will be reported in this case. */ |
asm volatile ("movel %1,%/a0\n\t" |
".word 0xf5c8\n\t" /* plpar (a0) */ |
"movel %/a0,%0" |
: "=g" (paddr) |
: "g" (vaddr) |
: "a0" ); |
set_fs (fs); |
|
return paddr; |
|
} else if (m68k_is040or060 == 4) { |
unsigned long mmusr; |
unsigned long fs = get_fs(); |
|
set_fs (SUPER_DATA); |
|
asm volatile ("movel %1,%/a0\n\t" |
".word 0xf568\n\t" /* ptestr (a0) */ |
".long 0x4e7a8805\n\t" /* movec mmusr, a0 */ |
"movel %/a0,%0" |
: "=g" (mmusr) |
: "g" (vaddr) |
: "a0", "d0"); |
set_fs (fs); |
|
if (mmusr & MMU_R_040) |
return (mmusr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1)); |
|
panic ("VTOP040: bad virtual address %08lx (%lx)", vaddr, mmusr); |
} else { |
volatile unsigned short temp; |
unsigned short mmusr; |
unsigned long *descaddr; |
|
asm volatile ("ptestr #5,%2@,#7,%0\n\t" |
"pmove %/psr,%1@" |
: "=a&" (descaddr) |
: "a" (&temp), "a" (vaddr)); |
mmusr = temp; |
|
if (mmusr & (MMU_I|MMU_B|MMU_L)) |
panic ("VTOP030: bad virtual address %08lx (%x)", vaddr, mmusr); |
|
descaddr = (unsigned long *)PTOV(descaddr); |
|
switch (mmusr & MMU_NUM) { |
case 1: |
return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff); |
case 2: |
return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff); |
case 3: |
return (*descaddr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1)); |
default: |
panic ("VTOP: bad levels (%u) for virtual address %08lx", |
mmusr & MMU_NUM, vaddr); |
} |
} |
|
panic ("VTOP: bad virtual address %08lx", vaddr); |
} |
|
unsigned long mm_ptov (unsigned long paddr) |
{ |
int i; |
unsigned long offset = 0; |
|
for (i = 0; i < boot_info.num_memory; i++) |
{ |
if (paddr >= boot_info.memory[i].addr && |
paddr < (boot_info.memory[i].addr |
+ boot_info.memory[i].size)) { |
#ifdef DEBUGPV |
printk ("PTOV(%lx)=%lx\n", paddr, |
(paddr - boot_info.memory[i].addr) + offset); |
#endif |
return (paddr - boot_info.memory[i].addr) + offset; |
} else |
offset += boot_info.memory[i].size; |
} |
|
/* |
* assume that the kernel virtual address is the same as the |
* physical address. |
* |
* This should be reasonable in most situations: |
* 1) They shouldn't be dereferencing the virtual address |
* unless they are sure that it is valid from kernel space. |
* 2) The only usage I see so far is converting a page table |
* reference to some non-FASTMEM address space when freeing |
* mmaped "/dev/mem" pages. These addresses are just passed |
* to "free_page", which ignores addresses that aren't in |
* the memory list anyway. |
* |
*/ |
|
/* |
* if on an amiga and address is in first 16M, move it |
* to the ZTWO_ADDR range |
*/ |
if (MACH_IS_AMIGA && paddr < 16*1024*1024) |
return ZTWO_VADDR(paddr); |
return paddr; |
} |
|
/* invalidate page in both caches */ |
#define clear040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\ |
"nop\n\t"\ |
".word 0xf4d0"\ |
/* CINVP I/D (a0) */\ |
: : "g" ((paddr))\ |
: "a0") |
|
/* invalidate page in i-cache */ |
#define cleari040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\ |
/* CINVP I (a0) */\ |
"nop\n\t"\ |
".word 0xf490"\ |
: : "g" ((paddr))\ |
: "a0") |
|
/* push page in both caches */ |
#define push040(paddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\ |
"nop\n\t"\ |
".word 0xf4f0"\ |
/* CPUSHP I/D (a0) */\ |
: : "g" ((paddr))\ |
: "a0") |
|
/* push and invalidate page in both caches */ |
#define pushcl040(paddr) do { push040((paddr));\ |
if (m68k_is040or060 == 6) clear040((paddr));\ |
} while(0) |
|
/* push page in both caches, invalidate in i-cache */ |
#define pushcli040(paddr) do { push040((paddr));\ |
if (m68k_is040or060 == 6) cleari040((paddr));\ |
} while(0) |
|
/* push page defined by virtual address in both caches */ |
#define pushv040(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\ |
/* ptestr (a0) */\ |
".word 0xf568\n\t"\ |
/* movec mmusr,d0 */\ |
".long 0x4e7a0805\n\t"\ |
"andw #0xf000,%/d0\n\t"\ |
"movel %/d0,%/a0\n\t"\ |
/* CPUSHP I/D (a0) */\ |
"nop\n\t"\ |
".word 0xf4f0"\ |
: : "g" ((vaddr))\ |
: "a0", "d0") |
|
/* push page defined by virtual address in both caches */ |
#define pushv060(vaddr) __asm__ __volatile__ ("movel %0,%/a0\n\t"\ |
/* plpar (a0) */\ |
".word 0xf5c8\n\t"\ |
/* CPUSHP I/D (a0) */\ |
".word 0xf4f0"\ |
: : "g" ((vaddr))\ |
: "a0") |
|
|
/* |
* 040: Hit every page containing an address in the range paddr..paddr+len-1. |
* (Low order bits of the ea of a CINVP/CPUSHP are "don't care"s). |
* Hit every page until there is a page or less to go. Hit the next page, |
* and the one after that if the range hits it. |
*/ |
/* ++roman: A little bit more care is required here: The CINVP instruction |
* invalidates cache entries WITHOUT WRITING DIRTY DATA BACK! So the beginning |
* and the end of the region must be treated differently if they are not |
* exactly at the beginning or end of a page boundary. Else, maybe too much |
* data becomes invalidated and thus lost forever. CPUSHP does what we need: |
* it invalidates the page after pushing dirty data to memory. (Thanks to Jes |
* for discovering the problem!) |
*/ |
/* ... but on the '060, CPUSH doesn't invalidate (for us, since we have set |
* the DPI bit in the CACR; would it cause problems with temporarily changing |
* this?). So we have to push first and then additionally to invalidate. |
*/ |
|
/* |
* cache_clear() semantics: Clear any cache entries for the area in question, |
* without writing back dirty entries first. This is useful if the data will |
* be overwritten anyway, e.g. by DMA to memory. The range is defined by a |
* _physical_ address. |
*/ |
|
void cache_clear (unsigned long paddr, int len) |
{ |
if (m68k_is040or060) { |
/* ++roman: There have been too many problems with the CINV, it seems |
* to break the cache maintenance of DMAing drivers. I don't expect |
* too much overhead by using CPUSH instead. |
*/ |
while (len > PAGE_SIZE) { |
pushcl040(paddr); |
len -= PAGE_SIZE; |
paddr += PAGE_SIZE; |
} |
if (len > 0) { |
pushcl040(paddr); |
if (((paddr + len - 1) ^ paddr) & PAGE_MASK) { |
/* a page boundary gets crossed at the end */ |
pushcl040(paddr + len - 1); |
} |
} |
} |
#if 0 |
/* on 68040, invalidate cache lines for pages in the range */ |
while (len > PAGE_SIZE) { |
clear040(paddr); |
len -= PAGE_SIZE; |
paddr += PAGE_SIZE; |
} |
if (len > 0) { |
/* 0 < len <= PAGE_SIZE */ |
clear040(paddr); |
if (((paddr + len - 1) / PAGE_SIZE) != (paddr / PAGE_SIZE)) { |
/* a page boundary gets crossed at the end */ |
clear040(paddr + len - 1); |
} |
} |
#endif |
else /* 68030 or 68020 */ |
asm volatile ("movec %/cacr,%/d0\n\t" |
"oriw %0,%/d0\n\t" |
"movec %/d0,%/cacr" |
: : "i" (FLUSH_I_AND_D) |
: "d0"); |
} |
|
|
/* |
* cache_push() semantics: Write back any dirty cache data in the given area, |
* and invalidate the range in the instruction cache. It needs not (but may) |
* invalidate those entries also in the data cache. The range is defined by a |
* _physical_ address. |
*/ |
|
void cache_push (unsigned long paddr, int len) |
{ |
if (m68k_is040or060) { |
/* |
* on 68040 or 68060, push cache lines for pages in the range; |
* on the '040 this also invalidates the pushed lines, but not on |
* the '060! |
*/ |
while (len > PAGE_SIZE) { |
pushcli040(paddr); |
len -= PAGE_SIZE; |
paddr += PAGE_SIZE; |
} |
if (len > 0) { |
pushcli040(paddr); |
#if 0 |
if (((paddr + len - 1) / PAGE_SIZE) != (paddr / PAGE_SIZE)) { |
#endif |
if (((paddr + len - 1) ^ paddr) & PAGE_MASK) { |
/* a page boundary gets crossed at the end */ |
pushcli040(paddr + len - 1); |
} |
} |
} |
|
|
/* |
* 68030/68020 have no writeback cache. On the other hand, |
* cache_push is actually a superset of cache_clear (the lines |
* get written back and invalidated), so we should make sure |
* to perform the corresponding actions. After all, this is getting |
* called in places where we've just loaded code, or whatever, so |
* flushing the icache is appropriate; flushing the dcache shouldn't |
* be required. |
*/ |
else /* 68030 or 68020 */ |
asm volatile ("movec %/cacr,%/d0\n\t" |
"oriw %0,%/d0\n\t" |
"movec %/d0,%/cacr" |
: : "i" (FLUSH_I) |
: "d0"); |
} |
|
|
/* |
* cache_push_v() semantics: Write back any dirty cache data in the given |
* area, and invalidate those entries at least in the instruction cache. This |
* is intended to be used after data has been written that can be executed as |
* code later. The range is defined by a _user_mode_ _virtual_ address (or, |
* more exactly, the space is defined by the %sfc/%dfc register.) |
*/ |
|
void cache_push_v (unsigned long vaddr, int len) |
{ |
if (m68k_is040or060 == 4) { |
/* on 68040, push cache lines for pages in the range */ |
while (len > PAGE_SIZE) { |
pushv040(vaddr); |
len -= PAGE_SIZE; |
vaddr += PAGE_SIZE; |
} |
if (len > 0) { |
pushv040(vaddr); |
#if 0 |
if (((vaddr + len - 1) / PAGE_SIZE) != (vaddr / PAGE_SIZE)) { |
#endif |
if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) { |
/* a page boundary gets crossed at the end */ |
pushv040(vaddr + len - 1); |
} |
} |
} |
else if (m68k_is040or060 == 6) { |
/* on 68040, push cache lines for pages in the range */ |
while (len > PAGE_SIZE) { |
pushv060(vaddr); |
len -= PAGE_SIZE; |
vaddr += PAGE_SIZE; |
} |
if (len > 0) { |
pushv060(vaddr); |
if (((vaddr + len - 1) ^ vaddr) & PAGE_MASK) { |
/* a page boundary gets crossed at the end */ |
pushv060(vaddr + len - 1); |
} |
} |
} |
/* 68030/68020 have no writeback cache; still need to clear icache. */ |
else /* 68030 or 68020 */ |
asm volatile ("movec %/cacr,%/d0\n\t" |
"oriw %0,%/d0\n\t" |
"movec %/d0,%/cacr" |
: : "i" (FLUSH_I) |
: "d0"); |
} |
|
#undef clear040 |
#undef cleari040 |
#undef push040 |
#undef pushcl040 |
#undef pushcli040 |
#undef pushv040 |
#undef pushv060 |
|
unsigned long mm_phys_to_virt (unsigned long addr) |
{ |
return PTOV (addr); |
} |
|
int mm_end_of_chunk (unsigned long addr, int len) |
{ |
int i; |
|
for (i = 0; i < boot_info.num_memory; i++) |
if (boot_info.memory[i].addr + boot_info.memory[i].size |
== addr + len) |
return 1; |
return 0; |
} |
|
/* Map some physical address range into the kernel address space. The |
* code is copied and adapted from map_chunk(). |
*/ |
|
unsigned long kernel_map(unsigned long paddr, unsigned long size, |
int nocacheflag, unsigned long *memavailp ) |
{ |
#define STEP_SIZE (256*1024) |
|
static unsigned long vaddr = 0xe0000000; /* safe place */ |
unsigned long physaddr, retaddr; |
pte_t *ktablep = NULL; |
pmd_t *kpointerp; |
pgd_t *page_dir; |
int pindex; /* index into pointer table */ |
int prot; |
|
/* Round down 'paddr' to 256 KB and adjust size */ |
physaddr = paddr & ~(STEP_SIZE-1); |
size += paddr - physaddr; |
retaddr = vaddr + (paddr - physaddr); |
paddr = physaddr; |
/* Round up the size to 256 KB. It doesn't hurt if too much is |
* mapped... */ |
size = (size + STEP_SIZE - 1) & ~(STEP_SIZE-1); |
|
if (m68k_is040or060) { |
prot = _PAGE_PRESENT | _PAGE_GLOBAL040; |
switch( nocacheflag ) { |
case KERNELMAP_FULL_CACHING: |
prot |= _PAGE_CACHE040; |
break; |
case KERNELMAP_NOCACHE_SER: |
default: |
prot |= _PAGE_NOCACHE_S; |
break; |
case KERNELMAP_NOCACHE_NONSER: |
prot |= _PAGE_NOCACHE; |
break; |
case KERNELMAP_NO_COPYBACK: |
prot |= _PAGE_CACHE040W; |
/* prot |= 0; */ |
break; |
} |
} else |
prot = _PAGE_PRESENT | |
((nocacheflag == KERNELMAP_FULL_CACHING || |
nocacheflag == KERNELMAP_NO_COPYBACK) ? 0 : _PAGE_NOCACHE030); |
|
page_dir = pgd_offset_k(vaddr); |
if (pgd_present(*page_dir)) { |
kpointerp = (pmd_t *)pgd_page(*page_dir); |
pindex = (vaddr >> 18) & 0x7f; |
if (pindex != 0 && m68k_is040or060) { |
if (pmd_present(*kpointerp)) |
ktablep = (pte_t *)pmd_page(*kpointerp); |
else { |
ktablep = kernel_page_table (memavailp); |
/* Make entries invalid */ |
memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE); |
pmd_set(kpointerp,ktablep); |
} |
ktablep += (pindex & 15)*64; |
} |
} |
else { |
/* we need a new pointer table */ |
kpointerp = get_kpointer_table (); |
pgd_set(page_dir, (pmd_t *)kpointerp); |
memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t)); |
pindex = 0; |
} |
|
for (physaddr = paddr; physaddr < paddr + size; vaddr += STEP_SIZE) { |
|
if (pindex > 127) { |
/* we need a new pointer table */ |
kpointerp = get_kpointer_table (); |
pgd_set(pgd_offset_k(vaddr), (pmd_t *)kpointerp); |
memset( kpointerp, 0, PTRS_PER_PMD*sizeof(pmd_t)); |
pindex = 0; |
} |
|
if (m68k_is040or060) { |
int i; |
unsigned long ktable; |
|
/* |
* 68040, use page tables pointed to by the |
* kernel pointer table. |
*/ |
|
if ((pindex & 15) == 0) { |
/* Need new page table every 4M on the '040 */ |
ktablep = kernel_page_table (memavailp); |
/* Make entries invalid */ |
memset( ktablep, 0, sizeof(long)*PTRS_PER_PTE); |
} |
|
ktable = VTOP(ktablep); |
|
/* |
* initialize section of the page table mapping |
* this 1M portion. |
*/ |
for (i = 0; i < 64; i++) { |
pte_val(*ktablep++) = physaddr | prot; |
physaddr += PAGE_SIZE; |
} |
|
/* |
* make the kernel pointer table point to the |
* kernel page table. |
*/ |
|
((unsigned long *)kpointerp)[pindex++] = ktable | _PAGE_TABLE; |
|
} else { |
/* |
* 68030, use early termination page descriptors. |
* Each one points to 64 pages (256K). |
*/ |
((unsigned long *)kpointerp)[pindex++] = physaddr | prot; |
physaddr += 64 * PAGE_SIZE; |
} |
} |
|
return( retaddr ); |
} |
|
|
static inline void set_cmode_pte( pmd_t *pmd, unsigned long address, |
unsigned long size, unsigned cmode ) |
{ pte_t *pte; |
unsigned long end; |
|
if (pmd_none(*pmd)) |
return; |
|
pte = pte_offset( pmd, address ); |
address &= ~PMD_MASK; |
end = address + size; |
if (end >= PMD_SIZE) |
end = PMD_SIZE; |
|
for( ; address < end; pte++ ) { |
pte_val(*pte) = (pte_val(*pte) & ~_PAGE_NOCACHE) | cmode; |
address += PAGE_SIZE; |
} |
} |
|
|
static inline void set_cmode_pmd( pgd_t *dir, unsigned long address, |
unsigned long size, unsigned cmode ) |
{ |
pmd_t *pmd; |
unsigned long end; |
|
if (pgd_none(*dir)) |
return; |
|
pmd = pmd_offset( dir, address ); |
address &= ~PGDIR_MASK; |
end = address + size; |
if (end > PGDIR_SIZE) |
end = PGDIR_SIZE; |
|
if ((pmd_val(*pmd) & _DESCTYPE_MASK) == _PAGE_PRESENT) { |
/* 68030 early termination descriptor */ |
pmd_val(*pmd) = (pmd_val(*pmd) & ~_PAGE_NOCACHE) | cmode; |
return; |
} |
else { |
/* "normal" tables */ |
for( ; address < end; pmd++ ) { |
set_cmode_pte( pmd, address, end - address, cmode ); |
address = (address + PMD_SIZE) & PMD_MASK; |
} |
} |
} |
|
|
/* |
* Set new cache mode for some kernel address space. |
* The caller must push data for that range itself, if such data may already |
* be in the cache. |
*/ |
|
void kernel_set_cachemode( unsigned long address, unsigned long size, |
unsigned cmode ) |
{ |
pgd_t *dir = pgd_offset_k( address ); |
unsigned long end = address + size; |
|
if (m68k_is040or060) { |
switch( cmode ) { |
case KERNELMAP_FULL_CACHING: |
cmode = _PAGE_CACHE040; |
break; |
case KERNELMAP_NOCACHE_SER: |
default: |
cmode = _PAGE_NOCACHE_S; |
break; |
case KERNELMAP_NOCACHE_NONSER: |
cmode = _PAGE_NOCACHE; |
break; |
case KERNELMAP_NO_COPYBACK: |
cmode = _PAGE_CACHE040W; |
break; |
} |
} else |
cmode = ((cmode == KERNELMAP_FULL_CACHING || |
cmode == KERNELMAP_NO_COPYBACK) ? |
0 : _PAGE_NOCACHE030); |
|
for( ; address < end; dir++ ) { |
set_cmode_pmd( dir, address, end - address, cmode ); |
address = (address + PGDIR_SIZE) & PGDIR_MASK; |
} |
flush_tlb_all(); |
} |
|
|
/fault.c
0,0 → 1,113
/* |
* linux/arch/m68k/mm/fault.c |
* |
* Copyright (C) 1995 Hamish Macdonald |
*/ |
|
#include <linux/mman.h> |
#include <linux/mm.h> |
#include <linux/kernel.h> |
#include <linux/ptrace.h> |
|
#include <asm/system.h> |
#include <asm/pgtable.h> |
|
extern void die_if_kernel(char *, struct pt_regs *, long); |
|
/* |
* This routine handles page faults. It determines the problem, and |
* then passes it off to one of the appropriate routines. |
* |
* error_code: |
* bit 0 == 0 means no page found, 1 means protection fault |
* bit 1 == 0 means read, 1 means write |
* |
* If this routine detects a bad access, it returns 1, otherwise it |
* returns 0. |
*/ |
asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, |
unsigned long error_code) |
{ |
struct vm_area_struct * vma; |
|
#ifdef DEBUG |
printk ("regs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", |
regs->sr, regs->pc, address, error_code, |
current->tss.pagedir_v); |
#endif |
|
vma = find_vma(current, address); |
if (!vma) |
goto bad_area; |
if (vma->vm_start <= address) |
goto good_area; |
if (!(vma->vm_flags & VM_GROWSDOWN)) |
goto bad_area; |
if (user_mode(regs)) { |
/* Accessing the stack below usp is always a bug. The |
"+ 256" is there due to some instructions doing |
pre-decrement on the stack and that doesn't show up |
until later. */ |
if (address + 256 < rdusp()) |
goto bad_area; |
} |
if (expand_stack(vma, address)) |
goto bad_area; |
|
/* |
* Ok, we have a good vm_area for this memory access, so |
* we can handle it.. |
*/ |
good_area: |
/* |
* was it a write? |
*/ |
if (error_code & 2) { |
if (!(vma->vm_flags & VM_WRITE)) |
goto bad_area; |
} else { |
/* read with protection fault? */ |
if (error_code & 1) |
goto bad_area; |
if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
goto bad_area; |
} |
if (error_code & 1) { |
do_wp_page(current, vma, address, error_code & 2); |
return 0; |
} |
do_no_page(current, vma, address, error_code & 2); |
|
/* There seems to be a missing invalidate somewhere in do_no_page. |
* Until I found it, this one cures the problem and makes |
* 1.2 run on the 68040 (Martin Apel). |
*/ |
flush_tlb_all(); |
|
return 0; |
|
/* |
* Something tried to access memory that isn't in our memory map.. |
* Fix it, but check if it's kernel or user first.. |
*/ |
bad_area: |
if (user_mode(regs)) { |
/* User memory access */ |
force_sig (SIGSEGV, current); |
return 1; |
} |
|
/* |
* Oops. The kernel tried to access some bad page. We'll have to |
* terminate things with extreme prejudice. |
*/ |
if ((unsigned long) address < PAGE_SIZE) { |
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); |
} else |
printk(KERN_ALERT "Unable to handle kernel access"); |
printk(" at virtual address %08lx\n",address); |
die_if_kernel("Oops", regs, error_code); |
do_exit(SIGKILL); |
|
return 1; |
} |
/Makefile
0,0 → 1,13
# |
# Makefile for the linux m68k-specific parts of the memory manager. |
# |
# Note! Dependencies are done automagically by 'make dep', which also |
# removes any old dependencies. DON'T put your own dependencies here |
# unless it's something special (ie not a .c file). |
# |
# Note 2! The CFLAGS definition is now in the main makefile... |
|
O_TARGET := mm.o |
O_OBJS := init.o fault.o memory.o |
|
include $(TOPDIR)/Rules.make |