OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [rtos/] [rtems/] [c/] [src/] [lib/] [libbsp/] [powerpc/] [shared/] [bootloader/] [mm.c] - Diff between revs 30 and 173

Only display areas with differences | Details | Blame | View Log

Rev 30 Rev 173
/*
/*
 *  mm.c -- Crude memory management for early boot.
 *  mm.c -- Crude memory management for early boot.
 *
 *
 *  Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
 *  Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
 *
 *
 *  Modified to compile in RTEMS development environment
 *  Modified to compile in RTEMS development environment
 *  by Eric Valette
 *  by Eric Valette
 *
 *
 *  Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
 *  Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
 *
 *
 *  The license and distribution terms for this file may be
 *  The license and distribution terms for this file may be
 *  found in found in the file LICENSE in this distribution or at
 *  found in found in the file LICENSE in this distribution or at
 *  http://www.OARcorp.com/rtems/license.html.
 *  http://www.OARcorp.com/rtems/license.html.
 *
 *
 * $Id: mm.c,v 1.2 2001-09-27 12:01:06 chris Exp $
 * $Id: mm.c,v 1.2 2001-09-27 12:01:06 chris Exp $
 */
 */
 
 
/* This code is a crude memory manager for early boot for LinuxPPC.
/* This code is a crude memory manager for early boot for LinuxPPC.
 * As such, it does not try to perform many optimiztions depending
 * As such, it does not try to perform many optimiztions depending
 * on the processor, it only uses features which are common to
 * on the processor, it only uses features which are common to
 * all processors (no BATs...).
 * all processors (no BATs...).
 *
 *
 * On PreP platorms (the only ones on which it works for now),
 * On PreP platorms (the only ones on which it works for now),
 * it maps 1:1 all RAM/ROM and I/O space as claimed by the
 * it maps 1:1 all RAM/ROM and I/O space as claimed by the
 * residual data. The holes between these areas can be virtually
 * residual data. The holes between these areas can be virtually
 * remapped to any of these, since for some functions it is very handy
 * remapped to any of these, since for some functions it is very handy
 * to have virtually contiguous but physically discontiguous memory.
 * to have virtually contiguous but physically discontiguous memory.
 *
 *
 * Physical memory allocation is also very crude, since it's only
 * Physical memory allocation is also very crude, since it's only
 * designed to manage a small number of large chunks. For valloc/vfree
 * designed to manage a small number of large chunks. For valloc/vfree
 * and palloc/pfree, the unit of allocation is the 4kB page.
 * and palloc/pfree, the unit of allocation is the 4kB page.
 *
 *
 * The salloc/sfree has been added after tracing gunzip and seeing
 * The salloc/sfree has been added after tracing gunzip and seeing
 * how it performed a very large number of small allocations.
 * how it performed a very large number of small allocations.
 * For these the unit of allocation is 8 bytes (the s stands for
 * For these the unit of allocation is 8 bytes (the s stands for
 * small or subpage). This memory is cleared when allocated.
 * small or subpage). This memory is cleared when allocated.
 *
 *
 */
 */
 
 
#include <sys/types.h>
#include <sys/types.h>
#include <libcpu/spr.h>
#include <libcpu/spr.h>
#include "bootldr.h"
#include "bootldr.h"
#include <libcpu/mmu.h>
#include <libcpu/mmu.h>
#include <libcpu/page.h>
#include <libcpu/page.h>
#include <limits.h>
#include <limits.h>
 
 
/* We use our own kind of simple memory areas for the loader, but
/* We use our own kind of simple memory areas for the loader, but
 * we want to avoid potential clashes with kernel includes.
 * we want to avoid potential clashes with kernel includes.
 * Here a map maps contiguous areas from base to end,
 * Here a map maps contiguous areas from base to end,
 * the firstpte entry corresponds to physical address and has the low
 * the firstpte entry corresponds to physical address and has the low
 * order bits set for caching and permission.
 * order bits set for caching and permission.
 */
 */
 
 
typedef struct _map {
typedef struct _map {
        struct _map *next;
        struct _map *next;
        u_long base;
        u_long base;
        u_long end;
        u_long end;
        u_long firstpte;
        u_long firstpte;
} map;
} map;
 
 
/* The LSB of the firstpte entries on map lists other than mappings
/* The LSB of the firstpte entries on map lists other than mappings
 * are constants which can be checked for debugging. All these constants
 * are constants which can be checked for debugging. All these constants
 * have bit of weight 4 set, this bit is zero in the mappings list entries.
 * have bit of weight 4 set, this bit is zero in the mappings list entries.
 * Actually firstpte&7 value is:
 * Actually firstpte&7 value is:
 * - 0 or 1 should not happen
 * - 0 or 1 should not happen
 * - 2 for RW actual virtual->physical mappings
 * - 2 for RW actual virtual->physical mappings
 * - 3 for RO actual virtual->physical mappings
 * - 3 for RO actual virtual->physical mappings
 * - 6 for free areas to be suballocated by salloc
 * - 6 for free areas to be suballocated by salloc
 * - 7 for salloc'ated areas
 * - 7 for salloc'ated areas
 * - 4 or 5 for all others, in this case firtpte & 63 is
 * - 4 or 5 for all others, in this case firtpte & 63 is
 *   - 4 for unused maps (on the free list)
 *   - 4 for unused maps (on the free list)
 *   - 12 for free physical memory
 *   - 12 for free physical memory
 *   - 13 for physical memory in use
 *   - 13 for physical memory in use
 *   - 20 for free virtual address space
 *   - 20 for free virtual address space
 *   - 21 for allocated virtual address space
 *   - 21 for allocated virtual address space
 *   - 28 for physical memory space suballocated by salloc
 *   - 28 for physical memory space suballocated by salloc
 *   - 29 for physical memory that can't be freed
 *   - 29 for physical memory that can't be freed
 */
 */
 
 
#define MAP_FREE_SUBS 6
#define MAP_FREE_SUBS 6
#define MAP_USED_SUBS 7
#define MAP_USED_SUBS 7
 
 
#define MAP_FREE 4      
#define MAP_FREE 4      
#define MAP_FREE_PHYS 12
#define MAP_FREE_PHYS 12
#define MAP_USED_PHYS 13
#define MAP_USED_PHYS 13
#define MAP_FREE_VIRT 20
#define MAP_FREE_VIRT 20
#define MAP_USED_VIRT 21
#define MAP_USED_VIRT 21
#define MAP_SUBS_PHYS 28
#define MAP_SUBS_PHYS 28
#define MAP_PERM_PHYS 29
#define MAP_PERM_PHYS 29
 
 
SPR_RW(SDR1);
SPR_RW(SDR1);
SPR_RO(DSISR);
SPR_RO(DSISR);
SPR_RO(DAR);
SPR_RO(DAR);
 
 
/* We need a few statically allocated free maps to bootstrap the
/* We need a few statically allocated free maps to bootstrap the
 * memory managment */
 * memory managment */
static map free_maps[4] = {{free_maps+1, 0, 0, MAP_FREE},
static map free_maps[4] = {{free_maps+1, 0, 0, MAP_FREE},
                           {free_maps+2, 0, 0, MAP_FREE},
                           {free_maps+2, 0, 0, MAP_FREE},
                           {free_maps+3, 0, 0, MAP_FREE},
                           {free_maps+3, 0, 0, MAP_FREE},
                           {NULL, 0, 0, MAP_FREE}};
                           {NULL, 0, 0, MAP_FREE}};
struct _mm_private {
struct _mm_private {
        void *sdr1;
        void *sdr1;
        u_long hashmask;
        u_long hashmask;
        map *freemaps;     /* Pool of unused map structs */
        map *freemaps;     /* Pool of unused map structs */
        map *mappings;     /* Sorted list of virtual->physical mappings */
        map *mappings;     /* Sorted list of virtual->physical mappings */
        map *physavail;    /* Unallocated physical address space */
        map *physavail;    /* Unallocated physical address space */
        map *physused;     /* Allocated physical address space */
        map *physused;     /* Allocated physical address space */
        map *physperm;     /* Permanently allocated physical space */
        map *physperm;     /* Permanently allocated physical space */
        map *virtavail;    /* Unallocated virtual address space */
        map *virtavail;    /* Unallocated virtual address space */
        map *virtused;     /* Allocated virtual address space */
        map *virtused;     /* Allocated virtual address space */
        map *sallocfree;   /* Free maps for salloc */
        map *sallocfree;   /* Free maps for salloc */
        map *sallocused;   /* Used maps for salloc */
        map *sallocused;   /* Used maps for salloc */
        map *sallocphys;   /* Physical areas used by salloc */
        map *sallocphys;   /* Physical areas used by salloc */
        u_int hashcnt;     /* Used to cycle in PTEG when they overflow */
        u_int hashcnt;     /* Used to cycle in PTEG when they overflow */
} mm_private = {hashmask: 0xffc0,
} mm_private = {hashmask: 0xffc0,
                freemaps: free_maps+0};
                freemaps: free_maps+0};
 
 
/* A simplified hash table entry declaration */
/* A simplified hash table entry declaration */
typedef struct _hash_entry {
typedef struct _hash_entry {
        int key;
        int key;
        u_long rpn;
        u_long rpn;
} hash_entry;
} hash_entry;
 
 
void print_maps(map *, const char *);
void print_maps(map *, const char *);
 
 
/* The handler used for all exceptions although for now it is only
/* The handler used for all exceptions although for now it is only
 * designed to properly handle MMU interrupts to fill the hash table.
 * designed to properly handle MMU interrupts to fill the hash table.
 */
 */
 
 
 
 
void _handler(int vec, ctxt *p) {
void _handler(int vec, ctxt *p) {
        map *area;
        map *area;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        u_long vaddr, cause;
        u_long vaddr, cause;
        if (vec==4 || vec==7) { /* ISI exceptions are different */
        if (vec==4 || vec==7) { /* ISI exceptions are different */
                vaddr = p->nip;
                vaddr = p->nip;
                cause = p->msr;
                cause = p->msr;
        } else { /* Valid for DSI and alignment exceptions */
        } else { /* Valid for DSI and alignment exceptions */
                vaddr = _read_DAR();
                vaddr = _read_DAR();
                cause = _read_DSISR();
                cause = _read_DSISR();
        }
        }
 
 
        if (vec==3 || vec==4) {
        if (vec==3 || vec==4) {
                /* Panic if the fault is not PTE not found. */
                /* Panic if the fault is not PTE not found. */
                if (!(cause & 0x40000000)) {
                if (!(cause & 0x40000000)) {
                        MMUon();
                        MMUon();
                        printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
                        printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
                        hang("Memory protection violation at ", vaddr, p);
                        hang("Memory protection violation at ", vaddr, p);
                }
                }
 
 
                for(area=mm->mappings; area; area=area->next) {
                for(area=mm->mappings; area; area=area->next) {
                        if(area->base<=vaddr && vaddr<=area->end) break;
                        if(area->base<=vaddr && vaddr<=area->end) break;
                }
                }
 
 
                if (area) {
                if (area) {
                        u_long hash, vsid, rpn;
                        u_long hash, vsid, rpn;
                        hash_entry volatile *hte, *_hte1;
                        hash_entry volatile *hte, *_hte1;
                        u_int i, alt=0, flushva;
                        u_int i, alt=0, flushva;
 
 
                        vsid = _read_SR((void *)vaddr);
                        vsid = _read_SR((void *)vaddr);
                        rpn = (vaddr&PAGE_MASK)-area->base+area->firstpte;
                        rpn = (vaddr&PAGE_MASK)-area->base+area->firstpte;
                        hash = vsid<<6;
                        hash = vsid<<6;
                        hash ^= (vaddr>>(PAGE_SHIFT-6))&0x3fffc0;
                        hash ^= (vaddr>>(PAGE_SHIFT-6))&0x3fffc0;
                        hash &= mm->hashmask;
                        hash &= mm->hashmask;
                        /* Find an empty entry in the PTEG, else
                        /* Find an empty entry in the PTEG, else
                         * replace a random one.
                         * replace a random one.
                         */
                         */
                        hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
                        hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
                        for (i=0; i<8; i++) {
                        for (i=0; i<8; i++) {
                                if (hte[i].key>=0) goto found;
                                if (hte[i].key>=0) goto found;
                        }
                        }
                        hash ^= mm->hashmask;
                        hash ^= mm->hashmask;
                        alt = 0x40; _hte1 = hte;
                        alt = 0x40; _hte1 = hte;
                        hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
                        hte = (hash_entry *) ((u_long)(mm->sdr1)+hash);
 
 
                        for (i=0; i<8; i++) {
                        for (i=0; i<8; i++) {
                                if (hte[i].key>=0) goto found;
                                if (hte[i].key>=0) goto found;
                        }
                        }
                        alt = 0;
                        alt = 0;
                        hte = _hte1;
                        hte = _hte1;
                        /* Chose a victim entry and replace it. There might be
                        /* Chose a victim entry and replace it. There might be
                         * better policies to choose the victim, but in a boot
                         * better policies to choose the victim, but in a boot
                         * loader we want simplicity as long as it works.
                         * loader we want simplicity as long as it works.
                         *
                         *
                         * We would not need to invalidate the TLB entry since
                         * We would not need to invalidate the TLB entry since
                         * the mapping is still valid. But this would be a mess
                         * the mapping is still valid. But this would be a mess
                         * when unmapping so we make sure that the TLB is a
                         * when unmapping so we make sure that the TLB is a
                         * subset of the hash table under all circumstances.
                         * subset of the hash table under all circumstances.
                         */
                         */
                        i = mm->hashcnt;
                        i = mm->hashcnt;
                        mm->hashcnt = (mm->hashcnt+1)%8;
                        mm->hashcnt = (mm->hashcnt+1)%8;
                        /* Note that the hash is already complemented here ! */
                        /* Note that the hash is already complemented here ! */
                        flushva = (~(hash<<9)^((hte[i].key)<<5)) &0x3ff000;
                        flushva = (~(hash<<9)^((hte[i].key)<<5)) &0x3ff000;
                        if (hte[i].key&0x40) flushva^=0x3ff000;
                        if (hte[i].key&0x40) flushva^=0x3ff000;
                        flushva |= ((hte[i].key<<21)&0xf0000000)
                        flushva |= ((hte[i].key<<21)&0xf0000000)
                          | ((hte[i].key<<22)&0x0fc00000);
                          | ((hte[i].key<<22)&0x0fc00000);
                        hte[i].key=0;
                        hte[i].key=0;
                        asm volatile("sync; tlbie %0; sync" : : "r" (flushva));
                        asm volatile("sync; tlbie %0; sync" : : "r" (flushva));
                found:
                found:
                        hte[i].rpn = rpn;
                        hte[i].rpn = rpn;
                        asm volatile("eieio": : );
                        asm volatile("eieio": : );
                        hte[i].key = 0x80000000|(vsid<<7)|alt|
                        hte[i].key = 0x80000000|(vsid<<7)|alt|
                          ((vaddr>>22)&0x3f);
                          ((vaddr>>22)&0x3f);
                        return;
                        return;
                } else {
                } else {
                        MMUon();
                        MMUon();
                        printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
                        printk("\nPanic: vector=%x, cause=%lx\n", vec, cause);
                        hang("\nInvalid memory access attempt at ", vaddr, p);
                        hang("\nInvalid memory access attempt at ", vaddr, p);
                }
                }
        } else {
        } else {
          MMUon();
          MMUon();
          printk("\nPanic: vector=%x, dsisr=%lx, faultaddr =%lx, msr=%lx opcode=%lx\n", vec,
          printk("\nPanic: vector=%x, dsisr=%lx, faultaddr =%lx, msr=%lx opcode=%lx\n", vec,
                 cause, p->nip, p->msr, * ((unsigned int*) p->nip) );
                 cause, p->nip, p->msr, * ((unsigned int*) p->nip) );
          if (vec == 7) {
          if (vec == 7) {
            unsigned int* ptr = ((unsigned int*) p->nip) - 4 * 10;
            unsigned int* ptr = ((unsigned int*) p->nip) - 4 * 10;
            for (; ptr <= (((unsigned int*) p->nip) + 4 * 10); ptr ++)
            for (; ptr <= (((unsigned int*) p->nip) + 4 * 10); ptr ++)
              printk("Hexdecimal code at address %x = %x\n", ptr, *ptr);
              printk("Hexdecimal code at address %x = %x\n", ptr, *ptr);
          }
          }
          hang("Program or alignment exception at ", vaddr, p);
          hang("Program or alignment exception at ", vaddr, p);
        }
        }
}
}
 
 
/* Generic routines for map handling.
/* Generic routines for map handling.
 */
 */
 
 
static inline
static inline
void free_map(map *p) {
void free_map(map *p) {
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        if (!p) return;
        if (!p) return;
        p->next=mm->freemaps;
        p->next=mm->freemaps;
        mm->freemaps=p;
        mm->freemaps=p;
        p->firstpte=MAP_FREE;
        p->firstpte=MAP_FREE;
}
}
 
 
/* Sorted insertion in linked list */
/* Sorted insertion in linked list */
static
static
int insert_map(map **head, map *p) {
int insert_map(map **head, map *p) {
        map *q = *head;
        map *q = *head;
        if (!p) return 0;
        if (!p) return 0;
        if (q && (q->base < p->base)) {
        if (q && (q->base < p->base)) {
                for(;q->next && q->next->base<p->base; q = q->next);
                for(;q->next && q->next->base<p->base; q = q->next);
                if ((q->end >= p->base) ||
                if ((q->end >= p->base) ||
                    (q->next && p->end>=q->next->base)) {
                    (q->next && p->end>=q->next->base)) {
                        free_map(p);
                        free_map(p);
                        printk("Overlapping areas!\n");
                        printk("Overlapping areas!\n");
                        return 1;
                        return 1;
                }
                }
                p->next = q->next;
                p->next = q->next;
                q->next = p;
                q->next = p;
        } else { /* Insert at head */
        } else { /* Insert at head */
                if (q && (p->end >= q->base)) {
                if (q && (p->end >= q->base)) {
                        free_map(p);
                        free_map(p);
                        printk("Overlapping areas!\n");
                        printk("Overlapping areas!\n");
                        return 1;
                        return 1;
                }
                }
                p->next = q;
                p->next = q;
                *head = p;
                *head = p;
        }
        }
        return 0;
        return 0;
}
}
 
 
 
 
/* Removal from linked list */
/* Removal from linked list */
 
 
static
static
map *remove_map(map **head, map *p) {
map *remove_map(map **head, map *p) {
        map *q = *head;
        map *q = *head;
 
 
        if (!p || !q) return NULL;
        if (!p || !q) return NULL;
        if (q==p) {
        if (q==p) {
                *head = q->next;
                *head = q->next;
                return p;
                return p;
        }
        }
        for(;q && q->next!=p; q=q->next);
        for(;q && q->next!=p; q=q->next);
        if (q) {
        if (q) {
                q->next=p->next;
                q->next=p->next;
                return p;
                return p;
        } else {
        } else {
                return NULL;
                return NULL;
        }
        }
}
}
 
 
static
static
map *remove_map_at(map **head, void * vaddr) {
map *remove_map_at(map **head, void * vaddr) {
        map *p, *q = *head;
        map *p, *q = *head;
 
 
        if (!vaddr || !q) return NULL;
        if (!vaddr || !q) return NULL;
        if (q->base==(u_long)vaddr) {
        if (q->base==(u_long)vaddr) {
                *head = q->next;
                *head = q->next;
                return q;
                return q;
        }
        }
        while (q->next && q->next->base != (u_long)vaddr) q=q->next;
        while (q->next && q->next->base != (u_long)vaddr) q=q->next;
        p=q->next;
        p=q->next;
        if (p) q->next=p->next;
        if (p) q->next=p->next;
        return p;
        return p;
}
}
 
 
static inline
static inline
map * alloc_map_page(void) {
map * alloc_map_page(void) {
        map *from, *p;
        map *from, *p;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
 
 
        /* printk("Allocating new map page !"); */
        /* printk("Allocating new map page !"); */
        /* Get the highest page */
        /* Get the highest page */
        for (from=mm->physavail; from && from->next; from=from->next);
        for (from=mm->physavail; from && from->next; from=from->next);
        if (!from) return NULL;
        if (!from) return NULL;
 
 
        from->end -= PAGE_SIZE;
        from->end -= PAGE_SIZE;
 
 
        mm->freemaps = (map *) (from->end+1);
        mm->freemaps = (map *) (from->end+1);
 
 
        for(p=mm->freemaps; p<mm->freemaps+PAGE_SIZE/sizeof(map)-1; p++) {
        for(p=mm->freemaps; p<mm->freemaps+PAGE_SIZE/sizeof(map)-1; p++) {
                p->next = p+1;
                p->next = p+1;
                p->firstpte = MAP_FREE;
                p->firstpte = MAP_FREE;
        }
        }
        (p-1)->next=0;
        (p-1)->next=0;
 
 
        /* Take the last one as pointer to self and insert
        /* Take the last one as pointer to self and insert
         * the map into the permanent map list.
         * the map into the permanent map list.
         */
         */
 
 
        p->firstpte = MAP_PERM_PHYS;
        p->firstpte = MAP_PERM_PHYS;
        p->base=(u_long) mm->freemaps;
        p->base=(u_long) mm->freemaps;
        p->end = p->base+PAGE_SIZE-1;
        p->end = p->base+PAGE_SIZE-1;
 
 
        insert_map(&mm->physperm, p);
        insert_map(&mm->physperm, p);
 
 
        if (from->end+1 == from->base)
        if (from->end+1 == from->base)
                free_map(remove_map(&mm->physavail, from));
                free_map(remove_map(&mm->physavail, from));
 
 
        return mm->freemaps;
        return mm->freemaps;
}
}
 
 
static
static
map * alloc_map(void) {
map * alloc_map(void) {
        map *p;
        map *p;
        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
 
 
        p = mm->freemaps;
        p = mm->freemaps;
        if (!p) {
        if (!p) {
                p=alloc_map_page();
                p=alloc_map_page();
        }
        }
 
 
        if(p) mm->freemaps=p->next;
        if(p) mm->freemaps=p->next;
 
 
        return p;
        return p;
}
}
 
 
static
static
void coalesce_maps(map *p) {
void coalesce_maps(map *p) {
        while(p) {
        while(p) {
                if (p->next && (p->end+1 == p->next->base)) {
                if (p->next && (p->end+1 == p->next->base)) {
                        map *q=p->next;
                        map *q=p->next;
                        p->end=q->end;
                        p->end=q->end;
                        p->next=q->next;
                        p->next=q->next;
                        free_map(q);
                        free_map(q);
                } else {
                } else {
                        p = p->next;
                        p = p->next;
                }
                }
        }
        }
}
}
 
 
/* These routines are used to find the free memory zones to avoid
/* These routines are used to find the free memory zones to avoid
 * overlapping destructive copies when initializing.
 * overlapping destructive copies when initializing.
 * They work from the top because of the way we want to boot.
 * They work from the top because of the way we want to boot.
 * In the following the term zone refers to the memory described
 * In the following the term zone refers to the memory described
 * by one or several contiguous so called segments in the
 * by one or several contiguous so called segments in the
 * residual data.
 * residual data.
 */
 */
#define STACK_PAGES 2
#define STACK_PAGES 2
static inline u_long
static inline u_long
find_next_zone(RESIDUAL *res, u_long lowpage, u_long flags) {
find_next_zone(RESIDUAL *res, u_long lowpage, u_long flags) {
        u_long i, newmin=0, size=0;
        u_long i, newmin=0, size=0;
        for(i=0; i<res->ActualNumMemSegs; i++) {
        for(i=0; i<res->ActualNumMemSegs; i++) {
                if (res->Segs[i].Usage & flags
                if (res->Segs[i].Usage & flags
                    && res->Segs[i].BasePage<lowpage
                    && res->Segs[i].BasePage<lowpage
                    && res->Segs[i].BasePage>newmin) {
                    && res->Segs[i].BasePage>newmin) {
                        newmin=res->Segs[i].BasePage;
                        newmin=res->Segs[i].BasePage;
                        size=res->Segs[i].PageCount;
                        size=res->Segs[i].PageCount;
                }
                }
        }
        }
        return newmin+size;
        return newmin+size;
}
}
 
 
static inline u_long
static inline u_long
find_zone_start(RESIDUAL *res, u_long highpage, u_long flags) {
find_zone_start(RESIDUAL *res, u_long highpage, u_long flags) {
        u_long i;
        u_long i;
        int progress;
        int progress;
        do {
        do {
                progress=0;
                progress=0;
                for (i=0; i<res->ActualNumMemSegs; i++) {
                for (i=0; i<res->ActualNumMemSegs; i++) {
                        if ( (res->Segs[i].BasePage+res->Segs[i].PageCount
                        if ( (res->Segs[i].BasePage+res->Segs[i].PageCount
                              == highpage)
                              == highpage)
                             && res->Segs[i].Usage & flags) {
                             && res->Segs[i].Usage & flags) {
                                highpage=res->Segs[i].BasePage;
                                highpage=res->Segs[i].BasePage;
                                progress=1;
                                progress=1;
                        }
                        }
                }
                }
        } while(progress);
        } while(progress);
        return highpage;
        return highpage;
}
}
 
 
/* The Motorola NT firmware does not provide any setting in the residual
/* The Motorola NT firmware does not provide any setting in the residual
 * data about memory segment usage. The following table provides enough
 * data about memory segment usage. The following table provides enough
 * info so that this bootloader can work.
 * info so that this bootloader can work.
 */
 */
MEM_MAP seg_fix[] = {
MEM_MAP seg_fix[] = {
    { 0x2000, 0xFFF00, 0x00100 },
    { 0x2000, 0xFFF00, 0x00100 },
    { 0x0020, 0x02000, 0x7E000 },
    { 0x0020, 0x02000, 0x7E000 },
    { 0x0008, 0x00800, 0x00168 },
    { 0x0008, 0x00800, 0x00168 },
    { 0x0004, 0x00000, 0x00005 },
    { 0x0004, 0x00000, 0x00005 },
    { 0x0001, 0x006F1, 0x0010F },
    { 0x0001, 0x006F1, 0x0010F },
    { 0x0002, 0x006AD, 0x00044 },
    { 0x0002, 0x006AD, 0x00044 },
    { 0x0010, 0x00005, 0x006A8 },
    { 0x0010, 0x00005, 0x006A8 },
    { 0x0010, 0x00968, 0x00698 },
    { 0x0010, 0x00968, 0x00698 },
    { 0x0800, 0xC0000, 0x3F000 },
    { 0x0800, 0xC0000, 0x3F000 },
    { 0x0600, 0xBF800, 0x00800 },
    { 0x0600, 0xBF800, 0x00800 },
    { 0x0500, 0x81000, 0x3E800 },
    { 0x0500, 0x81000, 0x3E800 },
    { 0x0480, 0x80800, 0x00800 },
    { 0x0480, 0x80800, 0x00800 },
    { 0x0440, 0x80000, 0x00800 } };
    { 0x0440, 0x80000, 0x00800 } };
 
 
 
 
/* The Motorola NT firmware does not set up all required info in the residual
/* The Motorola NT firmware does not set up all required info in the residual
 * data. This routine changes some things in a way that the bootloader and
 * data. This routine changes some things in a way that the bootloader and
 * linux are happy.
 * linux are happy.
 */
 */
void
void
fix_residual( RESIDUAL *res )
fix_residual( RESIDUAL *res )
{
{
#if 0
#if 0
    PPC_DEVICE *hostbridge;
    PPC_DEVICE *hostbridge;
#endif
#endif
    int i;
    int i;
 
 
    /* Missing memory segment information */
    /* Missing memory segment information */
    res->ActualNumMemSegs = sizeof(seg_fix)/sizeof(MEM_MAP);
    res->ActualNumMemSegs = sizeof(seg_fix)/sizeof(MEM_MAP);
    for (i=0; i<res->ActualNumMemSegs; i++) {
    for (i=0; i<res->ActualNumMemSegs; i++) {
        res->Segs[i].Usage = seg_fix[i].Usage;
        res->Segs[i].Usage = seg_fix[i].Usage;
        res->Segs[i].BasePage = seg_fix[i].BasePage;
        res->Segs[i].BasePage = seg_fix[i].BasePage;
        res->Segs[i].PageCount = seg_fix[i].PageCount;
        res->Segs[i].PageCount = seg_fix[i].PageCount;
    }
    }
    /* The following should be fixed in the current version of the
    /* The following should be fixed in the current version of the
     * kernel and of the bootloader.
     * kernel and of the bootloader.
     */
     */
#if 0
#if 0
    /* PPCBug has this zero */
    /* PPCBug has this zero */
    res->VitalProductData.CacheLineSize = 0;
    res->VitalProductData.CacheLineSize = 0;
    /* Motorola NT firmware sets TimeBaseDivisor to 0 */
    /* Motorola NT firmware sets TimeBaseDivisor to 0 */
    if ( res->VitalProductData.TimeBaseDivisor == 0 ) {
    if ( res->VitalProductData.TimeBaseDivisor == 0 ) {
        res->VitalProductData.TimeBaseDivisor = 4000;
        res->VitalProductData.TimeBaseDivisor = 4000;
    }
    }
 
 
    /* Motorola NT firmware records the PCIBridge as a "PCIDEVICE" and
    /* Motorola NT firmware records the PCIBridge as a "PCIDEVICE" and
     * sets "PCIBridgeDirect". This bootloader and linux works better if
     * sets "PCIBridgeDirect". This bootloader and linux works better if
     * BusId = "PROCESSORDEVICE" and Interface = "PCIBridgeIndirect".
     * BusId = "PROCESSORDEVICE" and Interface = "PCIBridgeIndirect".
     */
     */
    hostbridge=residual_find_device(PCIDEVICE, NULL,
    hostbridge=residual_find_device(PCIDEVICE, NULL,
                                        BridgeController,
                                        BridgeController,
                                        PCIBridge, -1, 0);
                                        PCIBridge, -1, 0);
    if (hostbridge) {
    if (hostbridge) {
        hostbridge->DeviceId.BusId = PROCESSORDEVICE;
        hostbridge->DeviceId.BusId = PROCESSORDEVICE;
        hostbridge->DeviceId.Interface = PCIBridgeIndirect;
        hostbridge->DeviceId.Interface = PCIBridgeIndirect;
    }
    }
#endif
#endif
}
}
 
 
/* This routine is the first C code called with very little stack space!
/* This routine is the first C code called with very little stack space!
 * Its goal is to find where the boot image can be moved. This will
 * Its goal is to find where the boot image can be moved. This will
 * be the highest address with enough room.
 * be the highest address with enough room.
 */
 */
int early_setup(u_long image_size) {
int early_setup(u_long image_size) {
        register RESIDUAL *res = bd->residual;
        register RESIDUAL *res = bd->residual;
        u_long minpages = PAGE_ALIGN(image_size)>>PAGE_SHIFT;
        u_long minpages = PAGE_ALIGN(image_size)>>PAGE_SHIFT;
 
 
        /* Fix residual if we are loaded by Motorola NT firmware */
        /* Fix residual if we are loaded by Motorola NT firmware */
        if ( res && res->VitalProductData.FirmwareSupplier == 0x10000 )
        if ( res && res->VitalProductData.FirmwareSupplier == 0x10000 )
            fix_residual( res );
            fix_residual( res );
 
 
        /* FIXME: if OF we should do something different */
        /* FIXME: if OF we should do something different */
        if( !bd->of_entry && res &&
        if( !bd->of_entry && res &&
           res->ResidualLength <= sizeof(RESIDUAL) && res->Version == 0 ) {
           res->ResidualLength <= sizeof(RESIDUAL) && res->Version == 0 ) {
                u_long lowpage=ULONG_MAX, highpage;
                u_long lowpage=ULONG_MAX, highpage;
                u_long imghigh=0, stkhigh=0;
                u_long imghigh=0, stkhigh=0;
                /* Find the highest and large enough contiguous zone
                /* Find the highest and large enough contiguous zone
                   consisting of free and BootImage sections. */
                   consisting of free and BootImage sections. */
                /* Find 3 free areas of memory, one for the main image, one
                /* Find 3 free areas of memory, one for the main image, one
                 * for the stack (STACK_PAGES), and page one to put the map
                 * for the stack (STACK_PAGES), and page one to put the map
                 * structures. They are allocated from the top of memory.
                 * structures. They are allocated from the top of memory.
                 * In most cases the stack will be put just below the image.
                 * In most cases the stack will be put just below the image.
                 */
                 */
                while((highpage =
                while((highpage =
                       find_next_zone(res, lowpage, BootImage|Free))) {
                       find_next_zone(res, lowpage, BootImage|Free))) {
                        lowpage=find_zone_start(res, highpage, BootImage|Free);
                        lowpage=find_zone_start(res, highpage, BootImage|Free);
                        if ((highpage-lowpage)>minpages &&
                        if ((highpage-lowpage)>minpages &&
                            highpage>imghigh) {
                            highpage>imghigh) {
                                imghigh=highpage;
                                imghigh=highpage;
                                highpage -=minpages;
                                highpage -=minpages;
                        }
                        }
                        if ((highpage-lowpage)>STACK_PAGES &&
                        if ((highpage-lowpage)>STACK_PAGES &&
                            highpage>stkhigh) {
                            highpage>stkhigh) {
                                stkhigh=highpage;
                                stkhigh=highpage;
                                highpage-=STACK_PAGES;
                                highpage-=STACK_PAGES;
                        }
                        }
                }
                }
 
 
                bd->image = (void *)((imghigh-minpages)<<PAGE_SHIFT);
                bd->image = (void *)((imghigh-minpages)<<PAGE_SHIFT);
                bd->stack=(void *) (stkhigh<<PAGE_SHIFT);
                bd->stack=(void *) (stkhigh<<PAGE_SHIFT);
 
 
                /* The code mover is put at the lowest possible place
                /* The code mover is put at the lowest possible place
                 * of free memory. If this corresponds to the loaded boot
                 * of free memory. If this corresponds to the loaded boot
                 * partition image it does not matter because it overrides
                 * partition image it does not matter because it overrides
                 * the unused part of it (x86 code).
                 * the unused part of it (x86 code).
                 */
                 */
                bd->mover=(void *) (lowpage<<PAGE_SHIFT);
                bd->mover=(void *) (lowpage<<PAGE_SHIFT);
 
 
                /* Let us flush the caches in all cases. After all it should
                /* Let us flush the caches in all cases. After all it should
                 * not harm even on 601 and we don't care about performance.
                 * not harm even on 601 and we don't care about performance.
                 * Right now it's easy since all processors have a line size
                 * Right now it's easy since all processors have a line size
                 * of 32 bytes. Once again residual data has proved unreliable.
                 * of 32 bytes. Once again residual data has proved unreliable.
                 */
                 */
                bd->cache_lsize = 32;
                bd->cache_lsize = 32;
        }
        }
        /* For now we always assume that it's succesful, we should
        /* For now we always assume that it's succesful, we should
         * handle better the case of insufficient memory.
         * handle better the case of insufficient memory.
         */
         */
        return 0;
        return 0;
}
}
 
 
void * valloc(u_long size) {
void * valloc(u_long size) {
        map *p, *q;
        map *p, *q;
        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
 
 
        if (size==0) return NULL;
        if (size==0) return NULL;
        size=PAGE_ALIGN(size)-1;
        size=PAGE_ALIGN(size)-1;
        for (p=mm->virtavail; p; p=p->next) {
        for (p=mm->virtavail; p; p=p->next) {
                if (p->base+size <= p->end) break;
                if (p->base+size <= p->end) break;
        }
        }
        if(!p) return NULL;
        if(!p) return NULL;
        q=alloc_map();
        q=alloc_map();
        q->base=p->base;
        q->base=p->base;
        q->end=q->base+size;
        q->end=q->base+size;
        q->firstpte=MAP_USED_VIRT;
        q->firstpte=MAP_USED_VIRT;
        insert_map(&mm->virtused, q);
        insert_map(&mm->virtused, q);
        if (q->end==p->end) free_map(remove_map(&mm->virtavail, p));
        if (q->end==p->end) free_map(remove_map(&mm->virtavail, p));
        else p->base += size+1;
        else p->base += size+1;
        return (void *)q->base;
        return (void *)q->base;
}
}
 
 
static
static
void vflush(map *virtmap) {
void vflush(map *virtmap) {
        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
        u_long i, limit=(mm->hashmask>>3)+8;
        u_long i, limit=(mm->hashmask>>3)+8;
        hash_entry volatile *p=(hash_entry *) mm->sdr1;
        hash_entry volatile *p=(hash_entry *) mm->sdr1;
 
 
        /* PTE handling is simple since the processor never update
        /* PTE handling is simple since the processor never update
         * the entries. Writable pages always have the C bit set and
         * the entries. Writable pages always have the C bit set and
         * all valid entries have the R bit set. From the processor
         * all valid entries have the R bit set. From the processor
         * point of view the hash table is read only.
         * point of view the hash table is read only.
         */
         */
        for (i=0; i<limit; i++) {
        for (i=0; i<limit; i++) {
                if (p[i].key<0) {
                if (p[i].key<0) {
                        u_long va;
                        u_long va;
                        va = ((i<<9)^((p[i].key)<<5)) &0x3ff000;
                        va = ((i<<9)^((p[i].key)<<5)) &0x3ff000;
                        if (p[i].key&0x40) va^=0x3ff000;
                        if (p[i].key&0x40) va^=0x3ff000;
                        va |= ((p[i].key<<21)&0xf0000000)
                        va |= ((p[i].key<<21)&0xf0000000)
                          | ((p[i].key<<22)&0x0fc00000);
                          | ((p[i].key<<22)&0x0fc00000);
                        if (va>=virtmap->base && va<=virtmap->end) {
                        if (va>=virtmap->base && va<=virtmap->end) {
                                p[i].key=0;
                                p[i].key=0;
                                asm volatile("sync; tlbie %0; sync" : :
                                asm volatile("sync; tlbie %0; sync" : :
                                             "r" (va));
                                             "r" (va));
                        }
                        }
                }
                }
        }
        }
}
}
 
 
void vfree(void *vaddr) {
void vfree(void *vaddr) {
        map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
        map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private * mm = (struct _mm_private *) bd->mm_private;
 
 
        /* Flush memory queues */
        /* Flush memory queues */
        asm volatile("sync": : : "memory");
        asm volatile("sync": : : "memory");
 
 
        virtmap = remove_map_at(&mm->virtused, vaddr);
        virtmap = remove_map_at(&mm->virtused, vaddr);
        if (!virtmap) return;
        if (!virtmap) return;
 
 
        /* Remove mappings corresponding to virtmap */
        /* Remove mappings corresponding to virtmap */
        for (physmap=mm->mappings; physmap; ) {
        for (physmap=mm->mappings; physmap; ) {
                map *nextmap=physmap->next;
                map *nextmap=physmap->next;
                if (physmap->base>=virtmap->base
                if (physmap->base>=virtmap->base
                    && physmap->base<virtmap->end) {
                    && physmap->base<virtmap->end) {
                        free_map(remove_map(&mm->mappings, physmap));
                        free_map(remove_map(&mm->mappings, physmap));
                }
                }
                physmap=nextmap;
                physmap=nextmap;
        }
        }
 
 
        vflush(virtmap);
        vflush(virtmap);
 
 
        virtmap->firstpte= MAP_FREE_VIRT;
        virtmap->firstpte= MAP_FREE_VIRT;
        insert_map(&mm->virtavail, virtmap);
        insert_map(&mm->virtavail, virtmap);
        coalesce_maps(mm->virtavail);
        coalesce_maps(mm->virtavail);
}
}
 
 
void vunmap(void *vaddr) {
void vunmap(void *vaddr) {
        map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
        map *physmap, *virtmap; /* Actual mappings pertaining to this vm */
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
 
 
        /* Flush memory queues */
        /* Flush memory queues */
        asm volatile("sync": : : "memory");
        asm volatile("sync": : : "memory");
 
 
        /* vaddr must be within one of the vm areas in use and
        /* vaddr must be within one of the vm areas in use and
         * then must correspond to one of the physical areas
         * then must correspond to one of the physical areas
         */
         */
        for (virtmap=mm->virtused; virtmap; virtmap=virtmap->next) {
        for (virtmap=mm->virtused; virtmap; virtmap=virtmap->next) {
                if (virtmap->base<=(u_long)vaddr &&
                if (virtmap->base<=(u_long)vaddr &&
                    virtmap->end>=(u_long)vaddr) break;
                    virtmap->end>=(u_long)vaddr) break;
        }
        }
        if (!virtmap) return;
        if (!virtmap) return;
 
 
        physmap = remove_map_at(&mm->mappings, vaddr);
        physmap = remove_map_at(&mm->mappings, vaddr);
        if(!physmap) return;
        if(!physmap) return;
        vflush(physmap);
        vflush(physmap);
        free_map(physmap);
        free_map(physmap);
}
}
 
 
int vmap(void *vaddr, u_long p, u_long size) {
int vmap(void *vaddr, u_long p, u_long size) {
        map *q;
        map *q;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
 
 
        size=PAGE_ALIGN(size);
        size=PAGE_ALIGN(size);
        if(!size) return 1;
        if(!size) return 1;
        /* Check that the requested area fits in one vm image */
        /* Check that the requested area fits in one vm image */
        for (q=mm->virtused; q; q=q->next) {
        for (q=mm->virtused; q; q=q->next) {
                if ((q->base <= (u_long)vaddr) &&
                if ((q->base <= (u_long)vaddr) &&
                    (q->end>=(u_long)vaddr+size -1)) break;
                    (q->end>=(u_long)vaddr+size -1)) break;
        }
        }
        if (!q) return 1;
        if (!q) return 1;
        q= alloc_map();
        q= alloc_map();
        if (!q) return 1;
        if (!q) return 1;
        q->base = (u_long)vaddr;
        q->base = (u_long)vaddr;
        q->end = (u_long)vaddr+size-1;
        q->end = (u_long)vaddr+size-1;
        q->firstpte = p;
        q->firstpte = p;
        return insert_map(&mm->mappings, q);
        return insert_map(&mm->mappings, q);
}
}
 
 
static
static
void create_identity_mappings(int type, int attr) {
void create_identity_mappings(int type, int attr) {
        u_long lowpage=ULONG_MAX, highpage;
        u_long lowpage=ULONG_MAX, highpage;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        RESIDUAL * res=bd->residual;
        RESIDUAL * res=bd->residual;
 
 
        while((highpage = find_next_zone(res, lowpage, type))) {
        while((highpage = find_next_zone(res, lowpage, type))) {
                map *p;
                map *p;
                lowpage=find_zone_start(res, highpage, type);
                lowpage=find_zone_start(res, highpage, type);
                p=alloc_map();
                p=alloc_map();
                /* Do not map page 0 to catch null pointers */
                /* Do not map page 0 to catch null pointers */
                lowpage = lowpage ? lowpage : 1;
                lowpage = lowpage ? lowpage : 1;
                p->base=lowpage<<PAGE_SHIFT;
                p->base=lowpage<<PAGE_SHIFT;
                p->end=(highpage<<PAGE_SHIFT)-1;
                p->end=(highpage<<PAGE_SHIFT)-1;
                p->firstpte = (lowpage<<PAGE_SHIFT)|attr;
                p->firstpte = (lowpage<<PAGE_SHIFT)|attr;
                insert_map(&mm->mappings, p);
                insert_map(&mm->mappings, p);
        }
        }
}
}
 
 
static inline
static inline
void add_free_map(u_long base, u_long end) {
void add_free_map(u_long base, u_long end) {
        map *q=NULL;
        map *q=NULL;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
 
 
        if (base<end) q=alloc_map();
        if (base<end) q=alloc_map();
        if (!q) return;
        if (!q) return;
        q->base=base;
        q->base=base;
        q->end=end-1;
        q->end=end-1;
        q->firstpte=MAP_FREE_VIRT;
        q->firstpte=MAP_FREE_VIRT;
        insert_map(&mm->virtavail, q);
        insert_map(&mm->virtavail, q);
}
}
 
 
static inline
static inline
void create_free_vm(void) {
void create_free_vm(void) {
        map *p;
        map *p;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
 
 
        u_long vaddr=PAGE_SIZE; /* Never map vaddr 0 */
        u_long vaddr=PAGE_SIZE; /* Never map vaddr 0 */
        for(p=mm->mappings; p; p=p->next) {
        for(p=mm->mappings; p; p=p->next) {
                add_free_map(vaddr, p->base);
                add_free_map(vaddr, p->base);
                vaddr=p->end+1;
                vaddr=p->end+1;
        }
        }
        /* Special end of memory case */
        /* Special end of memory case */
        if (vaddr) add_free_map(vaddr,0);
        if (vaddr) add_free_map(vaddr,0);
}
}
 
 
/* Memory management initialization.
/* Memory management initialization.
 * Set up the mapping lists.
 * Set up the mapping lists.
 */
 */
 
 
static inline
static inline
void add_perm_map(u_long start, u_long size) {
void add_perm_map(u_long start, u_long size) {
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        map *p=alloc_map();
        map *p=alloc_map();
        p->base = start;
        p->base = start;
        p->end = start + size - 1;
        p->end = start + size - 1;
        p->firstpte = MAP_PERM_PHYS;
        p->firstpte = MAP_PERM_PHYS;
        insert_map(& mm->physperm , p);
        insert_map(& mm->physperm , p);
}
}
 
 
void mm_init(u_long image_size)
void mm_init(u_long image_size)
{
{
        u_long lowpage=ULONG_MAX, highpage;
        u_long lowpage=ULONG_MAX, highpage;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        RESIDUAL * res=bd->residual;
        RESIDUAL * res=bd->residual;
        extern void (tlb_handlers)(void);
        extern void (tlb_handlers)(void);
        extern void (_handler_glue)(void);
        extern void (_handler_glue)(void);
        int i;
        int i;
        map *p;
        map *p;
 
 
        /* The checks are simplified by the fact that the image
        /* The checks are simplified by the fact that the image
         * and stack area are always allocated at the upper end
         * and stack area are always allocated at the upper end
         * of a free block.
         * of a free block.
         */
         */
        while((highpage = find_next_zone(res, lowpage, BootImage|Free))) {
        while((highpage = find_next_zone(res, lowpage, BootImage|Free))) {
                lowpage=find_zone_start(res, highpage, BootImage|Free);
                lowpage=find_zone_start(res, highpage, BootImage|Free);
                if ( ( ((u_long)bd->image+PAGE_ALIGN(image_size))>>PAGE_SHIFT)
                if ( ( ((u_long)bd->image+PAGE_ALIGN(image_size))>>PAGE_SHIFT)
                     == highpage) {
                     == highpage) {
                        highpage=(u_long)(bd->image)>>PAGE_SHIFT;
                        highpage=(u_long)(bd->image)>>PAGE_SHIFT;
                        add_perm_map((u_long)bd->image, image_size);
                        add_perm_map((u_long)bd->image, image_size);
                }
                }
                if ( (( u_long)bd->stack>>PAGE_SHIFT) == highpage) {
                if ( (( u_long)bd->stack>>PAGE_SHIFT) == highpage) {
                        highpage -= STACK_PAGES;
                        highpage -= STACK_PAGES;
                        add_perm_map(highpage<<PAGE_SHIFT,
                        add_perm_map(highpage<<PAGE_SHIFT,
                                     STACK_PAGES*PAGE_SIZE);
                                     STACK_PAGES*PAGE_SIZE);
                }
                }
                /* Protect the interrupt handlers that we need ! */
                /* Protect the interrupt handlers that we need ! */
                if (lowpage<2) lowpage=2;
                if (lowpage<2) lowpage=2;
                /* Check for the special case of full area! */
                /* Check for the special case of full area! */
                if (highpage>lowpage) {
                if (highpage>lowpage) {
                        p = alloc_map();
                        p = alloc_map();
                        p->base = lowpage<<PAGE_SHIFT;
                        p->base = lowpage<<PAGE_SHIFT;
                        p->end = (highpage<<PAGE_SHIFT)-1;
                        p->end = (highpage<<PAGE_SHIFT)-1;
                        p->firstpte=MAP_FREE_PHYS;
                        p->firstpte=MAP_FREE_PHYS;
                        insert_map(&mm->physavail, p);
                        insert_map(&mm->physavail, p);
                }
                }
        }
        }
 
 
        /* Allocate the hash table */
        /* Allocate the hash table */
        mm->sdr1=__palloc(0x10000, PA_PERM|16);
        mm->sdr1=__palloc(0x10000, PA_PERM|16);
        _write_SDR1((u_long)mm->sdr1);
        _write_SDR1((u_long)mm->sdr1);
        memset(mm->sdr1, 0, 0x10000);
        memset(mm->sdr1, 0, 0x10000);
        mm->hashmask = 0xffc0;
        mm->hashmask = 0xffc0;
 
 
        /* Setup the segment registers as we want them */
        /* Setup the segment registers as we want them */
        for (i=0; i<16; i++) _write_SR(i, (void *)(i<<28));
        for (i=0; i<16; i++) _write_SR(i, (void *)(i<<28));
        /* Create the maps for the physical memory, firwmarecode does not
        /* Create the maps for the physical memory, firwmarecode does not
         * seem to be necessary. ROM is mapped read-only to reduce the risk
         * seem to be necessary. ROM is mapped read-only to reduce the risk
         * of reprogramming it because it's often Flash and some are
         * of reprogramming it because it's often Flash and some are
         * amazingly easy to overwrite.
         * amazingly easy to overwrite.
         */
         */
        create_identity_mappings(BootImage|Free|FirmwareCode|FirmwareHeap|
        create_identity_mappings(BootImage|Free|FirmwareCode|FirmwareHeap|
                                 FirmwareStack, PTE_RAM);
                                 FirmwareStack, PTE_RAM);
        create_identity_mappings(SystemROM, PTE_ROM);
        create_identity_mappings(SystemROM, PTE_ROM);
        create_identity_mappings(IOMemory|SystemIO|SystemRegs|
        create_identity_mappings(IOMemory|SystemIO|SystemRegs|
                                 PCIAddr|PCIConfig|ISAAddr, PTE_IO);
                                 PCIAddr|PCIConfig|ISAAddr, PTE_IO);
 
 
        create_free_vm();
        create_free_vm();
 
 
        /* Install our own MMU and trap handlers. */
        /* Install our own MMU and trap handlers. */
        codemove((void *) 0x300, _handler_glue, 0x100, bd->cache_lsize);
        codemove((void *) 0x300, _handler_glue, 0x100, bd->cache_lsize);
        codemove((void *) 0x400, _handler_glue, 0x100, bd->cache_lsize);
        codemove((void *) 0x400, _handler_glue, 0x100, bd->cache_lsize);
        codemove((void *) 0x600, _handler_glue, 0x100, bd->cache_lsize);
        codemove((void *) 0x600, _handler_glue, 0x100, bd->cache_lsize);
        codemove((void *) 0x700, _handler_glue, 0x100, bd->cache_lsize);
        codemove((void *) 0x700, _handler_glue, 0x100, bd->cache_lsize);
}
}
 
 
void * salloc(u_long size) {
void * salloc(u_long size) {
        map *p, *q;
        map *p, *q;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
 
 
        if (size==0) return NULL;
        if (size==0) return NULL;
 
 
        size = (size+7)&~7;
        size = (size+7)&~7;
 
 
        for (p=mm->sallocfree; p; p=p->next) {
        for (p=mm->sallocfree; p; p=p->next) {
                if (p->base+size <= p->end) break;
                if (p->base+size <= p->end) break;
        }
        }
        if(!p) {
        if(!p) {
                void *m;
                void *m;
                m = __palloc(size, PA_SUBALLOC);
                m = __palloc(size, PA_SUBALLOC);
                p = alloc_map();
                p = alloc_map();
                if (!m && !p) return NULL;
                if (!m && !p) return NULL;
                p->base = (u_long) m;
                p->base = (u_long) m;
                p->firstpte = MAP_FREE_SUBS;
                p->firstpte = MAP_FREE_SUBS;
                p->end = (u_long)m+PAGE_ALIGN(size)-1;
                p->end = (u_long)m+PAGE_ALIGN(size)-1;
                insert_map(&mm->sallocfree, p);
                insert_map(&mm->sallocfree, p);
                coalesce_maps(mm->sallocfree);
                coalesce_maps(mm->sallocfree);
                coalesce_maps(mm->sallocphys);
                coalesce_maps(mm->sallocphys);
        };
        };
        q=alloc_map();
        q=alloc_map();
        q->base=p->base;
        q->base=p->base;
        q->end=q->base+size-1;
        q->end=q->base+size-1;
        q->firstpte=MAP_USED_SUBS;
        q->firstpte=MAP_USED_SUBS;
        insert_map(&mm->sallocused, q);
        insert_map(&mm->sallocused, q);
        if (q->end==p->end) free_map(remove_map(&mm->sallocfree, p));
        if (q->end==p->end) free_map(remove_map(&mm->sallocfree, p));
        else p->base += size;
        else p->base += size;
        memset((void *)q->base, 0, size);
        memset((void *)q->base, 0, size);
        return (void *)q->base;
        return (void *)q->base;
}
}
 
 
void sfree(void *p) {
void sfree(void *p) {
        map *q;
        map *q;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
 
 
        q=remove_map_at(&mm->sallocused, p);
        q=remove_map_at(&mm->sallocused, p);
        if (!q) return;
        if (!q) return;
        q->firstpte=MAP_FREE_SUBS;
        q->firstpte=MAP_FREE_SUBS;
        insert_map(&mm->sallocfree, q);
        insert_map(&mm->sallocfree, q);
        coalesce_maps(mm->sallocfree);
        coalesce_maps(mm->sallocfree);
}
}
 
 
/* first/last area fit, flags is a power of 2 indicating the required
/* first/last area fit, flags is a power of 2 indicating the required
 * alignment. The algorithms are stupid because we expect very little
 * alignment. The algorithms are stupid because we expect very little
 * fragmentation of the areas, if any. The unit of allocation is the page.
 * fragmentation of the areas, if any. The unit of allocation is the page.
 * The allocation is by default performed from higher addresses down,
 * The allocation is by default performed from higher addresses down,
 * unless flags&PA_LOW is true.
 * unless flags&PA_LOW is true.
 */
 */
 
 
void * __palloc(u_long size, int flags)
void * __palloc(u_long size, int flags)
{
{
        u_long mask = ((1<<(flags&PA_ALIGN_MASK))-1);
        u_long mask = ((1<<(flags&PA_ALIGN_MASK))-1);
        map *newmap, *frommap, *p, *splitmap=0;
        map *newmap, *frommap, *p, *splitmap=0;
        map **queue;
        map **queue;
        u_long qflags;
        u_long qflags;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
 
 
        /* Asking for a size which is not a multiple of the alignment
        /* Asking for a size which is not a multiple of the alignment
           is likely to be an error. */
           is likely to be an error. */
 
 
        if (size & mask) return NULL;
        if (size & mask) return NULL;
        size = PAGE_ALIGN(size);
        size = PAGE_ALIGN(size);
        if(!size) return NULL;
        if(!size) return NULL;
 
 
        if (flags&PA_SUBALLOC) {
        if (flags&PA_SUBALLOC) {
                queue = &mm->sallocphys;
                queue = &mm->sallocphys;
                qflags = MAP_SUBS_PHYS;
                qflags = MAP_SUBS_PHYS;
        } else if (flags&PA_PERM) {
        } else if (flags&PA_PERM) {
                queue = &mm->physperm;
                queue = &mm->physperm;
                qflags = MAP_PERM_PHYS;
                qflags = MAP_PERM_PHYS;
        } else {
        } else {
                queue = &mm->physused;
                queue = &mm->physused;
                qflags = MAP_USED_PHYS;
                qflags = MAP_USED_PHYS;
        }
        }
        /* We need to allocate that one now so no two allocations may attempt
        /* We need to allocate that one now so no two allocations may attempt
         * to take the same memory simultaneously. Alloc_map_page does
         * to take the same memory simultaneously. Alloc_map_page does
         * not call back here to avoid infinite recursion in alloc_map.
         * not call back here to avoid infinite recursion in alloc_map.
         */
         */
 
 
        if (mask&PAGE_MASK) {
        if (mask&PAGE_MASK) {
                splitmap=alloc_map();
                splitmap=alloc_map();
                if (!splitmap) return NULL;
                if (!splitmap) return NULL;
        }
        }
 
 
        for (p=mm->physavail, frommap=NULL; p; p=p->next) {
        for (p=mm->physavail, frommap=NULL; p; p=p->next) {
                u_long high = p->end;
                u_long high = p->end;
                u_long limit  = ((p->base+mask)&~mask) + size-1;
                u_long limit  = ((p->base+mask)&~mask) + size-1;
                if (high>=limit && ((p->base+mask)&~mask)+size>p->base) {
                if (high>=limit && ((p->base+mask)&~mask)+size>p->base) {
                        frommap = p;
                        frommap = p;
                        if (flags&PA_LOW) break;
                        if (flags&PA_LOW) break;
                }
                }
        }
        }
 
 
        if (!frommap) {
        if (!frommap) {
                if (splitmap) free_map(splitmap);
                if (splitmap) free_map(splitmap);
                return NULL;
                return NULL;
        }
        }
 
 
        newmap=alloc_map();
        newmap=alloc_map();
 
 
        if (flags&PA_LOW) {
        if (flags&PA_LOW) {
                newmap->base = (frommap->base+mask)&~mask;
                newmap->base = (frommap->base+mask)&~mask;
        } else {
        } else {
                newmap->base = (frommap->end +1 - size) & ~mask;
                newmap->base = (frommap->end +1 - size) & ~mask;
        }
        }
 
 
        newmap->end = newmap->base+size-1;
        newmap->end = newmap->base+size-1;
        newmap->firstpte = qflags;
        newmap->firstpte = qflags;
 
 
        /* Add a fragment if we don't allocate until the end. */
        /* Add a fragment if we don't allocate until the end. */
 
 
        if (splitmap) {
        if (splitmap) {
                splitmap->base=newmap->base+size;
                splitmap->base=newmap->base+size;
                splitmap->end=frommap->end;
                splitmap->end=frommap->end;
                splitmap->firstpte= MAP_FREE_PHYS;
                splitmap->firstpte= MAP_FREE_PHYS;
                frommap->end=newmap->base-1;
                frommap->end=newmap->base-1;
        } else if (flags & PA_LOW) {
        } else if (flags & PA_LOW) {
                frommap->base=newmap->base+size;
                frommap->base=newmap->base+size;
        } else {
        } else {
                frommap->end=newmap->base-1;
                frommap->end=newmap->base-1;
        }
        }
 
 
        /* Remove a fragment if it becomes empty. */
        /* Remove a fragment if it becomes empty. */
        if (frommap->base == frommap->end+1) {
        if (frommap->base == frommap->end+1) {
                free_map(remove_map(&mm->physavail, frommap));
                free_map(remove_map(&mm->physavail, frommap));
        }
        }
 
 
        if (splitmap) {
        if (splitmap) {
                if (splitmap->base == splitmap->end+1) {
                if (splitmap->base == splitmap->end+1) {
                        free_map(remove_map(&mm->physavail, splitmap));
                        free_map(remove_map(&mm->physavail, splitmap));
                } else {
                } else {
                        insert_map(&mm->physavail, splitmap);
                        insert_map(&mm->physavail, splitmap);
                }
                }
        }
        }
 
 
        insert_map(queue, newmap);
        insert_map(queue, newmap);
        return (void *) newmap->base;
        return (void *) newmap->base;
 
 
}
}
 
 
void pfree(void * p) {
void pfree(void * p) {
        map *q;
        map *q;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        q=remove_map_at(&mm->physused, p);
        q=remove_map_at(&mm->physused, p);
        if (!q) return;
        if (!q) return;
        q->firstpte=MAP_FREE_PHYS;
        q->firstpte=MAP_FREE_PHYS;
        insert_map(&mm->physavail, q);
        insert_map(&mm->physavail, q);
        coalesce_maps(mm->physavail);
        coalesce_maps(mm->physavail);
}
}
 
 
#ifdef DEBUG 
#ifdef DEBUG 
/* Debugging functions */
/* Debugging functions */
void print_maps(map *chain, const char *s) {
void print_maps(map *chain, const char *s) {
        map *p;
        map *p;
        printk("%s",s);
        printk("%s",s);
        for(p=chain; p; p=p->next) {
        for(p=chain; p; p=p->next) {
                printk("    %08lx-%08lx: %08lx\n",
                printk("    %08lx-%08lx: %08lx\n",
                       p->base, p->end, p->firstpte);
                       p->base, p->end, p->firstpte);
        }
        }
}
}
 
 
void print_all_maps(const char * s) {
void print_all_maps(const char * s) {
        u_long freemaps;
        u_long freemaps;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        map *free;
        map *free;
        printk("%s",s);
        printk("%s",s);
        print_maps(mm->mappings, "  Currently defined mappings:\n");
        print_maps(mm->mappings, "  Currently defined mappings:\n");
        print_maps(mm->physavail, "  Currently available physical areas:\n");
        print_maps(mm->physavail, "  Currently available physical areas:\n");
        print_maps(mm->physused, "  Currently used physical areas:\n");
        print_maps(mm->physused, "  Currently used physical areas:\n");
        print_maps(mm->virtavail, "  Currently available virtual areas:\n");
        print_maps(mm->virtavail, "  Currently available virtual areas:\n");
        print_maps(mm->virtused, "  Currently used virtual areas:\n");
        print_maps(mm->virtused, "  Currently used virtual areas:\n");
        print_maps(mm->physperm, "  Permanently used physical areas:\n");
        print_maps(mm->physperm, "  Permanently used physical areas:\n");
        print_maps(mm->sallocphys, "  Physical memory used for salloc:\n");
        print_maps(mm->sallocphys, "  Physical memory used for salloc:\n");
        print_maps(mm->sallocfree, "  Memory available for salloc:\n");
        print_maps(mm->sallocfree, "  Memory available for salloc:\n");
        print_maps(mm->sallocused, "  Memory allocated through salloc:\n");
        print_maps(mm->sallocused, "  Memory allocated through salloc:\n");
        for (freemaps=0, free=mm->freemaps; free; freemaps++, free=free->next);
        for (freemaps=0, free=mm->freemaps; free; freemaps++, free=free->next);
        printk("  %ld free maps.\n", freemaps);
        printk("  %ld free maps.\n", freemaps);
}
}
 
 
void print_hash_table(void) {
void print_hash_table(void) {
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        struct _mm_private *mm = (struct _mm_private *) bd->mm_private;
        hash_entry *p=(hash_entry *) mm->sdr1;
        hash_entry *p=(hash_entry *) mm->sdr1;
        u_int i, valid=0;
        u_int i, valid=0;
        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
                if (p[i].key<0) valid++;
                if (p[i].key<0) valid++;
        }
        }
        printk("%u valid hash entries on pass 1.\n", valid);
        printk("%u valid hash entries on pass 1.\n", valid);
        valid = 0;
        valid = 0;
        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
                if (p[i].key<0) valid++;
                if (p[i].key<0) valid++;
        }
        }
        printk("%u valid hash entries on pass 2.\n"
        printk("%u valid hash entries on pass 2.\n"
               "     vpn:rpn_attr, p/s, pteg.i\n", valid);
               "     vpn:rpn_attr, p/s, pteg.i\n", valid);
        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
        for (i=0; i<((mm->hashmask)>>3)+8; i++) {
                if (p[i].key<0) {
                if (p[i].key<0) {
                        u_int pteg=(i>>3);
                        u_int pteg=(i>>3);
                        u_long vpn;
                        u_long vpn;
                        vpn = (pteg^((p[i].key)>>7)) &0x3ff;
                        vpn = (pteg^((p[i].key)>>7)) &0x3ff;
                        if (p[i].key&0x40) vpn^=0x3ff;
                        if (p[i].key&0x40) vpn^=0x3ff;
                        vpn |= ((p[i].key<<9)&0xffff0000)
                        vpn |= ((p[i].key<<9)&0xffff0000)
                          | ((p[i].key<<10)&0xfc00);
                          | ((p[i].key<<10)&0xfc00);
                        printk("%08lx:%08lx, %s, %5d.%d\n",
                        printk("%08lx:%08lx, %s, %5d.%d\n",
                               vpn,  p[i].rpn, p[i].key&0x40 ? "sec" : "pri",
                               vpn,  p[i].rpn, p[i].key&0x40 ? "sec" : "pri",
                               pteg, i%8);
                               pteg, i%8);
                }
                }
        }
        }
}
}
 
 
#endif
#endif
 
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.