URL
https://opencores.org/ocsvn/c0or1k/c0or1k/trunk
Subversion Repositories c0or1k
Compare Revisions
- This comparison shows the changes necessary to convert path
/c0or1k/trunk
- from Rev 6 to Rev 7
- ↔ Reverse comparison
Rev 6 → Rev 7
/include/l4/glue/or1k/syscall.h
0,0 → 1,78
/* |
* ARM-specific system call details. |
* |
* Copyright (C) 2007 Bahadir Balban |
*/ |
|
#ifndef __ARM_GLUE_SYSCALL_H__ |
#define __ARM_GLUE_SYSCALL_H__ |
|
#include <l4/types.h> |
#include INC_GLUE(message.h) |
|
/* Only specific call is the trap that gives back the kip address |
* from which other system calls can be discovered. */ |
#define L4_TRAP_KIP 0xB4 |
|
/* Used in the kernel to refer to virtual address of this page. |
* User space discovers it from the KIP */ |
#define ARM_SYSCALL_PAGE 0xFFFFF000 |
|
extern unsigned int __syscall_page_start; |
|
/* |
* This structure is saved on the kernel stack |
* just after entering a system call exception. |
*/ |
typedef struct syscall_context { |
u32 spsr; |
u32 r0; |
u32 r1; |
u32 r2; |
u32 r3; /* MR0 */ |
u32 r4; /* MR1 */ |
u32 r5; /* MR2 */ |
u32 r6; /* MR3 */ |
u32 r7; /* MR4 */ |
u32 r8; /* MR5 */ |
u32 r9; |
u32 r10; |
u32 r11; |
u32 r12; |
u32 sp_usr; |
u32 lr_usr; |
} __attribute__((__packed__)) syscall_context_t; |
|
typedef struct msg_regs { |
u32 mr0; |
u32 mr1; |
u32 mr2; |
u32 mr3; |
u32 mr4; |
u32 mr5; |
} msg_regs_t; |
|
/* NOTE: |
* These references are valid only when they have been explicitly set |
* by a kernel entry point, e.g. a system call, a data abort handler |
* that imitates a page fault ipc etc. |
* |
* Second note: |
* _If_ these refer to real utcb's in the future, make sure to have |
* utcb_map_lazily() check so that they're safe accesses. |
*/ |
#define KTCB_REF_ARG0(ktcb) (&(ktcb)->syscall_regs->r0) |
#define KTCB_REF_MR0(ktcb) (&(ktcb)->syscall_regs->MR0_REGISTER) |
|
/* Represents each syscall. We get argument registers |
* from stack for now. This is slower but the simplest. */ |
typedef int (*syscall_fn_t)(struct syscall_context *regs); |
|
/* Entry point for syscall dispatching. Called from asm */ |
int syscall(struct syscall_context *regs, unsigned long); |
|
/* Syscall-related initialiser called during system init. */ |
void syscall_init(void); |
void kip_init_syscalls(void); |
|
#endif /* __ARM_GLUE_SYSCALL_H__ */ |
/include/l4/glue/or1k/ipc.h
0,0 → 1,55
#ifndef __GLUE_ARM_IPC_H__ |
#define __GLUE_ARM_IPC_H__ |
|
#include <l4/generic/tcb.h> |
#include INC_GLUE(message.h) |
|
static inline int extended_ipc_msg_index(unsigned int flags) |
{ |
return (flags & IPC_FLAGS_MSG_INDEX_MASK) |
>> IPC_FLAGS_MSG_INDEX_SHIFT; |
} |
|
static inline int extended_ipc_msg_size(unsigned int flags) |
{ |
return (flags & IPC_FLAGS_SIZE_MASK) |
>> IPC_FLAGS_SIZE_SHIFT; |
} |
|
static inline void tcb_set_ipc_flags(struct ktcb *task, |
unsigned int flags) |
{ |
task->ipc_flags = flags; |
} |
|
static inline unsigned int tcb_get_ipc_flags(struct ktcb *task) |
{ |
return task->ipc_flags; |
} |
|
static inline unsigned int |
ipc_flags_set_type(unsigned int flags, unsigned int type) |
{ |
flags &= ~IPC_FLAGS_TYPE_MASK; |
flags |= type & IPC_FLAGS_TYPE_MASK; |
return flags; |
} |
|
static inline unsigned int ipc_flags_get_type(unsigned int flags) |
{ |
return flags & IPC_FLAGS_TYPE_MASK; |
} |
|
static inline void tcb_set_ipc_type(struct ktcb *task, |
unsigned int type) |
{ |
task->ipc_flags = ipc_flags_set_type(task->ipc_flags, |
type); |
} |
|
static inline unsigned int tcb_get_ipc_type(struct ktcb *task) |
{ |
return ipc_flags_get_type(task->ipc_flags); |
} |
|
#endif /* __GLUE_ARM_IPC_H__ */ |
/include/l4/glue/or1k/mapping.h
0,0 → 1,89
/* |
* Generic mapping operations |
* |
* Operations on address space mappings that |
* all subarchitectures support generically. |
* |
* Copyright (C) 2008 - 2010 B Labs Ltd. |
* Written by Bahadir Balban |
*/ |
|
#ifndef __ARM_GLUE_MAPPING_H__ |
#define __ARM_GLUE_MAPPING_H__ |
|
#include INC_SUBARCH(mm.h) |
|
#define TASK_PGD(x) (x)->space->pgd |
|
unsigned int space_flags_to_ptflags(unsigned int flags); |
|
void add_mapping_pgd(unsigned long paddr, unsigned long vaddr, |
unsigned int size, unsigned int flags, |
pgd_table_t *pgd); |
|
void add_mapping(unsigned long paddr, unsigned long vaddr, |
unsigned int size, unsigned int flags); |
|
void add_boot_mapping(unsigned long paddr, unsigned long vaddr, |
unsigned int size, unsigned int flags); |
|
int remove_mapping(unsigned long vaddr); |
int remove_mapping_pgd(pgd_table_t *pgd, unsigned long vaddr); |
void remove_mapping_pgd_all_user(pgd_table_t *pgd); |
|
int check_mapping_pgd(unsigned long vaddr, unsigned long size, |
unsigned int flags, pgd_table_t *pgd); |
|
int check_mapping(unsigned long vaddr, unsigned long size, |
unsigned int flags); |
|
void copy_pgd_kern_all(pgd_table_t *); |
|
struct address_space; |
int delete_page_tables(struct address_space *space); |
int copy_user_tables(struct address_space *new, struct address_space *orig); |
void remap_as_pages(void *vstart, void *vend); |
|
void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from, |
unsigned long start, unsigned long end); |
|
/* |
* TODO: Some of these may be made inline by |
* removing their signature from here completely |
* and creating an arch-specific mapping.h which |
* has inline definitions or just signatures. |
*/ |
|
pte_t virt_to_pte(unsigned long vaddr); |
pte_t *virt_to_ptep(unsigned long vaddr); |
pte_t virt_to_pte_from_pgd(pgd_table_t *pgd, unsigned long vaddr); |
unsigned long virt_to_phys_by_pgd(pgd_table_t *pgd, unsigned long vaddr); |
|
void arch_prepare_pte(u32 paddr, u32 vaddr, unsigned int flags, |
pte_t *ptep); |
|
void arch_write_pte(pte_t *ptep, pte_t pte, u32 vaddr); |
|
void arch_prepare_write_pte(u32 paddr, u32 vaddr, |
unsigned int flags, pte_t *ptep); |
|
pmd_t *arch_pick_pmd(pgd_table_t *pgd, unsigned long vaddr); |
|
void arch_write_pmd(pmd_t *pmd_entry, u32 pmd_phys, u32 vaddr); |
|
int arch_check_pte_access_perms(pte_t pte, unsigned int flags); |
|
pgd_table_t *arch_realloc_page_tables(void); |
|
void arch_copy_pgd_kernel_entries(pgd_table_t *to); |
|
int is_global_pgdi(int i); |
|
struct ktcb; |
void arch_space_switch(struct ktcb *task); |
|
int pgd_count_boot_pmds(); |
|
void idle_task(void); |
|
#endif /* __ARM_GLUE_MAPPING_H__ */ |
/include/l4/glue/or1k/memlayout.h
0,0 → 1,60
/* |
* Virtual memory layout of ARM systems. |
*/ |
|
#ifndef __MEMLAYOUT_H__ |
#define __MEMLAYOUT_H__ |
|
#ifndef __ASSEMBLY__ |
#include INC_GLUE(memory.h) |
#endif |
#include INC_PLAT(offsets.h) |
|
#define KERNEL_AREA_START 0xF0000000 |
#define KERNEL_AREA_END 0xF8000000 /* 128 MB */ |
#define KERNEL_AREA_SIZE (KERNEL_AREA_END - KERNEL_AREA_START) |
#define KERNEL_AREA_SECTIONS (KERNEL_AREA_SIZE / ARM_SECTION_SIZE) |
|
#define UTCB_SIZE (sizeof(int) * 64) |
|
#define IO_AREA_START 0xF9000000 |
#define IO_AREA_END 0xFF000000 |
#define IO_AREA_SIZE (IO_AREA_END - IO_AREA_START) |
#define IO_AREA_SECTIONS (IO_AREA_SIZE / ARM_SECTION_SIZE) |
|
#define USER_KIP_PAGE 0xFF000000 |
|
/* ARM-specific offset in KIP that tells the address of UTCB page */ |
#define UTCB_KIP_OFFSET 0x50 |
|
#define IO_AREA0_VADDR IO_AREA_START |
#define IO_AREA1_VADDR (IO_AREA_START + (SZ_1MB*1)) |
#define IO_AREA2_VADDR (IO_AREA_START + (SZ_1MB*2)) |
#define IO_AREA3_VADDR (IO_AREA_START + (SZ_1MB*3)) |
#define IO_AREA4_VADDR (IO_AREA_START + (SZ_1MB*4)) |
#define IO_AREA5_VADDR (IO_AREA_START + (SZ_1MB*5)) |
#define IO_AREA6_VADDR (IO_AREA_START + (SZ_1MB*6)) |
#define IO_AREA7_VADDR (IO_AREA_START + (SZ_1MB*7)) |
|
/* |
* IO_AREA8_VADDR |
* The beginning page in this slot is used for userspace uart mapping |
*/ |
|
#define ARM_HIGH_VECTOR 0xFFFF0000 |
#define ARM_SYSCALL_VECTOR 0xFFFFFF00 |
|
#define KERNEL_OFFSET (KERNEL_AREA_START - PLATFORM_PHYS_MEM_START) |
|
/* User tasks define them differently */ |
#if defined (__KERNEL__) |
#define phys_to_virt(addr) ((unsigned int)(addr) + KERNEL_OFFSET) |
#define virt_to_phys(addr) ((unsigned int)(addr) - KERNEL_OFFSET) |
#endif |
|
#define KERN_ADDR(x) ((x >= KERNEL_AREA_START) && (x < KERNEL_AREA_END)) |
#define UTCB_ADDR(x) ((x >= UTCB_AREA_START) && (x < UTCB_AREA_END)) |
#define is_kernel_address(x) (KERN_ADDR(x) || (x >= ARM_HIGH_VECTOR) || \ |
(x >= IO_AREA_START && x < IO_AREA_END)) |
|
#endif /* __MEMLAYOUT_H__ */ |
/include/l4/glue/or1k/smp.h
0,0 → 1,46
/* |
* Copyright 2010 B Labs Ltd. |
* |
* Authors: Prem Mallappa, Bahadir Balban |
* |
* SMP support |
*/ |
#ifndef __GLUE_ARM_SMP_H__ |
#define __GLUE_ARM_SMP_H__ |
|
#include INC_ARCH(scu.h) |
|
struct cpuinfo { |
u32 ncpus; |
u32 flags; |
volatile u32 cpu_spinning; |
void (*send_ipi)(int cpu, int ipi_cmd); |
void (*smp_spin)(void); |
void (*smp_finish)(void); |
|
} __attribute__ ((__packed__)); |
|
extern struct cpuinfo cpuinfo; |
|
#if defined(CONFIG_SMP) |
|
void smp_attach(void); |
void smp_start_cores(void); |
|
#else |
static inline void smp_attach(void) {} |
static inline void smp_start_cores(void) {} |
#endif |
|
void init_smp(void); |
void arch_smp_spin(void); |
void smp_send_ipi(unsigned int cpumask, int ipi_num); |
void platform_smp_init(int ncpus); |
int platform_smp_start(int cpu, void (*start)(int)); |
void secondary_init_platform(void); |
|
extern unsigned long secondary_run_signal; |
|
#define CPUID_TO_MASK(cpu) (1 << (cpu)) |
|
#endif |
/include/l4/glue/or1k/ipi.h
0,0 → 1,16
/* |
* Copyright (C) 2010 B Labs Ltd. |
* |
* By Bahadir Balban |
*/ |
#ifndef __IPI_H__ |
#define __IPI_H__ |
|
#include <l4/generic/irq.h> |
|
int ipi_handler(struct irq_desc *desc); |
|
|
#define IPI_TIMER_EVENT 0 |
|
#endif /* __IPI_H__ */ |
/include/l4/glue/or1k/cache.h
0,0 → 1,26
/* |
* Generic cache api calls |
* |
* Copyright (C) 2010 B Labs Ltd. |
* |
* Author: Bahadir Balban |
*/ |
#ifndef __GLUE_CACHE_H__ |
#define __GLUE_CACHE_H__ |
|
#include INC_SUBARCH(mmu_ops.h) |
|
/* Lowest byte is reserved for and used by capability permissions */ |
#define ARCH_INVALIDATE_ICACHE 0x10 |
#define ARCH_INVALIDATE_DCACHE 0x20 |
#define ARCH_CLEAN_DCACHE 0x30 |
#define ARCH_CLEAN_INVALIDATE_DCACHE 0x40 |
#define ARCH_INVALIDATE_TLB 0x50 |
|
void arch_invalidate_dcache(unsigned long start, unsigned long end); |
void arch_clean_invalidate_dcache(unsigned long start, unsigned long end); |
void arch_invalidate_icache(unsigned long start, unsigned long end); |
void arch_invalidate_tlb(unsigned long start, unsigned long end); |
void arch_clean_dcache(unsigned long start, unsigned long end); |
|
#endif /* __GLUE_CACHE_H__ */ |
/include/l4/glue/or1k/init.h
0,0 → 1,26
/* |
* Copyright (C) 2010 B Labs Ltd. |
* Author: Prem Mallappa <prem.mallappa@b-labs.co.uk> |
*/ |
|
#ifndef __ARM_GLUE_INIT_H__ |
#define __ARM_GLUE_INIT_H__ |
|
#include <l4/generic/tcb.h> |
#include <l4/generic/space.h> |
|
void switch_to_user(struct ktcb *inittask); |
void timer_start(void); |
|
extern struct address_space init_space; |
void init_kernel_mappings(void); |
void start_virtual_memory(void); |
void finalize_virtual_memory(void); |
void init_finalize(void); |
|
void remove_section_mapping(unsigned long vaddr); |
|
void vectors_init(void); |
void setup_idle_caps(void); |
void setup_idle_task(void); |
#endif /* __ARM_GLUE_INIT_H__ */ |
/include/l4/glue/or1k/message.h
0,0 → 1,95
/* |
* Userspace thread control block |
* |
* Copyright (C) 2007-2009 Bahadir Bilgehan Balban |
*/ |
#ifndef __GLUE_ARM_MESSAGE_H__ |
#define __GLUE_ARM_MESSAGE_H__ |
|
/* |
* Here's a summary of how ARM registers are used during IPC: |
* |
* System registers: |
* r0 - r2: Passed as arguments to ipc() call. They are the registers |
* the microkernel will read and they have system-wide meaning. |
* |
* Primary message registers: |
* r3 - r8: These 6 registers are the primary message registers MR0-MR6. |
* Their format is application-specific, i.e. the microkernel imposes no |
* format restrictions on them. |
* |
* TODO: The only exception is that, for ANYTHREAD receivers the predefined |
* MR_SENDER is touched by the kernel to indicate the sender. This register |
* is among the primary MRs and it may be better fit to put it into one of |
* the system registers. |
* |
* l4lib registers: (MR_TAG, MR_SENDER, MR_RETURN) |
* Some of the primary message registers are used by the l4lib convenience |
* library for operations necessary on most or all common ipcs. For example |
* every ipc has a tag that specifies the ipc reason. Also send/receive |
* operations require a return value. Threads that are open to receive from |
* all threads require the sender id. These values are passed in predefined |
* primary message registers, but the microkernel has no knowledge about them. |
* |
* System call registers: L4SYS_ARG0 to ARG4.(See syslib.h for definitions) |
* Finally the rest of the primary message registers are available for |
* implementing system call arguments. For example the POSIX services use |
* these arguments to pass posix system call information. |
* |
* Secondary Message Registers: |
* These are non-real registers and are present in the UTCB memory region. |
* Both real and non-real message registers have a location in the UTCB, but |
* non-real ones are copied only if the FULL IPC flag is set. |
* |
* The big picture: |
* |
* r0 System register |
* r1 System register |
* r2 System register |
* r3 Primary MR0 MR_RETURN, MR_TAG Present in UTCB, Short IPC |
* r4 Primary MR1 MR_SENDER Present in UTCB, Short IPC |
* r5 Primary MR2 L4SYS_ARG0 Present in UTCB, Short IPC |
* r6 Primary MR3 L4SYS_ARG1 Present in UTCB, Short IPC |
* r7 Primary MR4 L4SYS_ARG2 Present in UTCB, Short IPC |
* r8 Primary MR5 L4SYS_ARG3 Present in UTCB, Short IPC |
* x Secondary MR6 Present in UTCB, Full IPC only |
* x Secondary MR64 Present in UTCB, Full IPC only |
* |
* Complicated for you? Suggest a simpler design and it shall be implemented! |
*/ |
|
#define MR_REST ((UTCB_SIZE >> 2) - MR_TOTAL - 4) /* -4 is for fields on utcb */ |
#define MR_TOTAL 6 |
#define MR_TAG 0 /* Contains the purpose of message */ |
#define MR_SENDER 1 /* For anythread receivers to discover sender */ |
#define MR_RETURN 0 /* Contains the posix return value. */ |
|
/* These define the mr start - end range that isn't used by userspace syslib */ |
#define MR_UNUSED_START 2 /* The first mr that's not used by syslib.h */ |
#define MR_UNUSED_TOTAL (MR_TOTAL - MR_UNUSED_START) |
#define MR_USABLE_TOTAL MR_UNUSED_TOTAL |
|
/* These are defined so that we don't hard-code register names */ |
#define MR0_REGISTER r3 |
#define MR_RETURN_REGISTER r3 |
|
#define TASK_NOTIFY_SLOTS 8 |
#define TASK_NOTIFY_MAXVALUE 255 |
|
/* Primaries aren't used for memcopy. Those ops use this as a parameter */ |
#define L4_UTCB_FULL_BUFFER_SIZE (MR_REST * sizeof(int)) |
|
#include INC_GLUE(memlayout.h) |
|
#if !defined (__ASSEMBLY__) |
struct utcb { |
u32 mr[MR_TOTAL]; /* MRs that are mapped to real registers */ |
u32 saved_tag; /* Saved tag field for stacked ipcs */ |
u32 saved_sender; /* Saved sender field for stacked ipcs */ |
u8 notify[TASK_NOTIFY_SLOTS]; /* Irq notification slots */ |
u32 mr_rest[MR_REST]; /* Complete the utcb for up to 64 words */ |
}; |
#endif |
|
|
#endif /* __GLUE_ARM_MESSAGE_H__ */ |
/include/l4/glue/or1k/context.h
0,0 → 1,53
#ifndef __ARM_CONTEXT_H__ |
#define __ARM_CONTEXT_H__ |
|
#include <l4/types.h> |
|
/* |
* This describes the register context of each task. Simply set |
* them and they'll be copied onto real registers upon a context |
* switch to that task. exchange_registers() system call is |
* designed for this, whose input structure is defined further |
* below. |
*/ |
typedef struct arm_context { |
u32 spsr; /* 0x0 */ |
u32 r0; /* 0x4 */ |
u32 r1; /* 0x8 */ |
u32 r2; /* 0xC */ |
u32 r3; /* 0x10 */ |
u32 r4; /* 0x14 */ |
u32 r5; /* 0x18 */ |
u32 r6; /* 0x1C */ |
u32 r7; /* 0x20 */ |
u32 r8; /* 0x24 */ |
u32 r9; /* 0x28 */ |
u32 r10; /* 0x2C */ |
u32 r11; /* 0x30 */ |
u32 r12; /* 0x34 */ |
u32 sp; /* 0x38 */ |
u32 lr; /* 0x3C */ |
u32 pc; /* 0x40 */ |
} __attribute__((__packed__)) task_context_t; |
|
|
typedef struct arm_exregs_context { |
u32 r0; /* 0x4 */ |
u32 r1; /* 0x8 */ |
u32 r2; /* 0xC */ |
u32 r3; /* 0x10 */ |
u32 r4; /* 0x14 */ |
u32 r5; /* 0x18 */ |
u32 r6; /* 0x1C */ |
u32 r7; /* 0x20 */ |
u32 r8; /* 0x24 */ |
u32 r9; /* 0x28 */ |
u32 r10; /* 0x2C */ |
u32 r11; /* 0x30 */ |
u32 r12; /* 0x34 */ |
u32 sp; /* 0x38 */ |
u32 lr; /* 0x3C */ |
u32 pc; /* 0x40 */ |
} __attribute__((__packed__)) exregs_context_t; |
|
#endif /* __ARM_CONTEXT_H__ */ |
/include/l4/glue/or1k/debug.h
0,0 → 1,51
/* |
* ARM-specific syscall type accounting. |
* |
* Copyright (C) 2010 B Labs Ltd. |
* |
* Author: Bahadir Balban |
*/ |
|
#ifndef __ARM_DEBUG_H__ |
#define __ARM_DEBUG_H__ |
|
#include INC_SUBARCH(perfmon.h) |
|
#if defined (CONFIG_DEBUG_ACCOUNTING) |
|
extern struct system_accounting system_accounting; |
|
static inline void |
system_account_syscall_type(unsigned long swi_address) |
{ |
*(((u64 *)&system_accounting.syscalls) + |
((swi_address & 0xFF) >> 2)) += 1; |
} |
|
#else /* End of CONFIG_DEBUG_ACCOUNTING */ |
|
static inline void system_account_syscall_type(unsigned long swi_address) { } |
|
#endif /* End of !CONFIG_DEBUG_ACCOUNTING */ |
|
|
#if defined (CONFIG_DEBUG_PERFMON_KERNEL) |
|
static inline void |
system_measure_syscall_start(void) |
{ |
/* To avoid non-voluntary rescheduling during call */ |
perfmon_reset_start_cyccnt(); |
} |
|
/* Defined in arm/glue/debug.c */ |
void system_measure_syscall_end(unsigned long swi_address); |
|
#else /* End of CONFIG_DEBUG_PERFMON_KERNEL */ |
|
static inline void system_measure_syscall_start(void) { } |
static inline void system_measure_syscall_end(unsigned long swi_address) { } |
|
#endif /* End of !CONFIG_DEBUG_PERFMON_KERNEL */ |
|
#endif /* __ARM_DEBUG_H__ */ |
/include/l4/glue/or1k/memory.h
0,0 → 1,81
/* |
* Includes memory-related architecture specific definitions and their |
* corresponding generic wrappers. |
* |
* Copyright (C) 2007 Bahadir Balban |
*/ |
#ifndef __GLUE_ARM_MEMORY_H__ |
#define __GLUE_ARM_MEMORY_H__ |
|
#include INC_GLUE(memlayout.h) /* Important generic definitions */ |
#include INC_SUBARCH(mm.h) |
|
/* Generic definitions */ |
#define PFN_SHIFT 12 |
#define PAGE_BITS PFN_SHIFT |
#define PAGE_SIZE SZ_4K |
#define PAGE_MASK (PAGE_SIZE - 1) |
|
/* Aligns to the upper page (ceiling) FIXME: Must add a wraparound checker. */ |
#define page_align_up(addr) ((((unsigned long)(addr)) + PAGE_MASK) & \ |
(~PAGE_MASK)) |
|
/* Aligns to the lower page (floor) */ |
#define page_align(addr) (((unsigned long)(addr)) & \ |
(~PAGE_MASK)) |
|
#define is_aligned(val, size) (!(((unsigned long)(val)) & (((unsigned long)size) - 1))) |
#define is_page_aligned(val) (!(((unsigned long)(val)) & PAGE_MASK)) |
#define page_boundary(x) is_page_aligned(x) |
|
/* |
* Align to given size. |
* |
* Note it must be an alignable size i.e. one that is a power of two. |
* E.g. 0x1000 would work but 0x1010 would not. |
*/ |
#define align(addr, size) (((unsigned int)(addr)) & (~((unsigned long)size-1))) |
#define align_up(addr, size) ((((unsigned long)(addr)) + \ |
((size) - 1)) & (~(((unsigned long)size) - 1))) |
|
/* The bytes left until the end of the page that x is in */ |
#define TILL_PAGE_ENDS(x) (PAGE_SIZE - ((unsigned long)(x) & PAGE_MASK)) |
|
/* Extract page frame number from address and vice versa. */ |
#define __pfn(x) (((unsigned long)(x)) >> PAGE_BITS) |
#define __pfn_to_addr(x) (((unsigned long)(x)) << PAGE_BITS) |
|
/* Extract physical address from page table entry (pte) */ |
#define __pte_to_addr(x) (((unsigned long)(x)) & ~PAGE_MASK) |
|
/* Minimum excess needed for word alignment */ |
#define SZ_WORD sizeof(unsigned int) |
#define WORD_BITS 32 |
#define WORD_BITS_LOG2 5 |
#define BITWISE_GETWORD(x) ((x) >> WORD_BITS_LOG2) /* Divide by 32 */ |
#define BITWISE_GETBIT(x) (1 << ((x) % WORD_BITS)) |
|
/* Minimum stack alignment restriction across functions, exceptions */ |
#define STACK_ALIGNMENT 8 |
|
/* Endianness conversion */ |
static inline void be32_to_cpu(unsigned int x) |
{ |
char *p = (char *)&x; |
char tmp; |
|
/* Swap bytes */ |
tmp = p[0]; |
p[0] = p[3]; |
p[3] = tmp; |
|
tmp = p[1]; |
p[1] = p[2]; |
p[2] = tmp; |
} |
|
struct ktcb; |
void task_init_registers(struct ktcb *task, unsigned long pc); |
|
#endif /* __GLUE_ARM_MEMORY_H__ */ |
|
include/l4/glue/or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: include/l4/arch/or1k/v5/or1k926ejs/cpu.h
===================================================================
--- include/l4/arch/or1k/v5/or1k926ejs/cpu.h (nonexistent)
+++ include/l4/arch/or1k/v5/or1k926ejs/cpu.h (revision 7)
@@ -0,0 +1,13 @@
+/*
+ *
+ * Copyright (C) 2005 Bahadir Balban
+ *
+ */
+#ifndef __ARM926EJS__H__
+#define __ARM926EJS__H__
+
+
+
+
+
+#endif /* __ARM926EJS__H__ */
Index: include/l4/arch/or1k/v5/or1k926ejs
===================================================================
--- include/l4/arch/or1k/v5/or1k926ejs (nonexistent)
+++ include/l4/arch/or1k/v5/or1k926ejs (revision 7)
include/l4/arch/or1k/v5/or1k926ejs
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: include/l4/arch/or1k/v5/mm.h
===================================================================
--- include/l4/arch/or1k/v5/mm.h (nonexistent)
+++ include/l4/arch/or1k/v5/mm.h (revision 7)
@@ -0,0 +1,128 @@
+/*
+ * ARM v5-specific virtual memory details
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+#ifndef __V5_MM_H__
+#define __V5_MM_H__
+
+/* ARM specific definitions */
+#define VIRT_MEM_START 0
+#define VIRT_MEM_END 0xFFFFFFFF
+#define SECTION_SIZE SZ_1MB
+#define SECTION_MASK (SECTION_SIZE - 1)
+#define SECTION_ALIGN_MASK (~SECTION_MASK)
+#define SECTION_BITS 20
+#define ARM_PAGE_SIZE SZ_4K
+#define ARM_PAGE_MASK 0xFFF
+#define ARM_PAGE_BITS 12
+
+#define PGD_SIZE SZ_4K * 4
+#define PGD_ENTRY_TOTAL SZ_4K
+
+#define PMD_SIZE SZ_1K
+#define PMD_ENTRY_TOTAL 256
+#define PMD_MAP_SIZE SZ_1MB
+#define PMD_ALIGN_MASK (~(PMD_SIZE - 1))
+#define PMD_TYPE_MASK 0x3
+#define PMD_TYPE_FAULT 0
+#define PMD_TYPE_PMD 1
+#define PMD_TYPE_SECTION 2
+
+#define PTE_TYPE_MASK 0x3
+#define PTE_TYPE_FAULT 0
+#define PTE_TYPE_LARGE 1
+#define PTE_TYPE_SMALL 2
+#define PTE_TYPE_TINY 3
+
+/* Permission field offsets */
+#define SECTION_AP0 10
+
+/*
+ * These are indices into arrays with pgd_t or pmd_t sized elements,
+ * therefore the index must be divided by appropriate element size
+ */
+#define PGD_INDEX(x) (((((unsigned long)(x)) >> 18) \
+ & 0x3FFC) / sizeof(pmd_t))
+
+/*
+ * Strip out the page offset in this
+ * megabyte from a total of 256 pages.
+ */
+#define PMD_INDEX(x) (((((unsigned long)(x)) >> 10) \
+ & 0x3FC) / sizeof (pte_t))
+
+
+/* We need this as print-early.S is including this file */
+#ifndef __ASSEMBLY__
+
+/* Type-checkable page table elements */
+typedef u32 pmd_t;
+typedef u32 pte_t;
+
+/* Page global directory made up of pgd_t entries */
+typedef struct pgd_table {
+ pmd_t entry[PGD_ENTRY_TOTAL];
+} pgd_table_t;
+
+/* Page middle directory made up of pmd_t entries */
+typedef struct pmd_table {
+ pte_t entry[PMD_ENTRY_TOTAL];
+} pmd_table_t;
+
+/* Applies for both small and large pages */
+#define PAGE_AP0 4
+#define PAGE_AP1 6
+#define PAGE_AP2 8
+#define PAGE_AP3 10
+
+/* Permission values with rom and sys bits ignored */
+#define SVC_RW_USR_NONE 1
+#define SVC_RW_USR_RO 2
+#define SVC_RW_USR_RW 3
+
+#define PTE_PROT_MASK (0xFF << 4)
+
+#define CACHEABILITY 3
+#define BUFFERABILITY 2
+#define cacheable (1 << CACHEABILITY)
+#define bufferable (1 << BUFFERABILITY)
+#define uncacheable 0
+#define unbufferable 0
+
+/* Helper macros for common cases */
+#define __MAP_USR_RW (cacheable | bufferable | (SVC_RW_USR_RW << PAGE_AP0) \
+ | (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
+ | (SVC_RW_USR_RW << PAGE_AP3))
+#define __MAP_USR_RO (cacheable | bufferable | (SVC_RW_USR_RO << PAGE_AP0) \
+ | (SVC_RW_USR_RO << PAGE_AP1) | (SVC_RW_USR_RO << PAGE_AP2) \
+ | (SVC_RW_USR_RO << PAGE_AP3))
+#define __MAP_KERN_RW (cacheable | bufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
+ | (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
+ | (SVC_RW_USR_NONE << PAGE_AP3))
+#define __MAP_KERN_IO (uncacheable | unbufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
+ | (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
+ | (SVC_RW_USR_NONE << PAGE_AP3))
+#define __MAP_USR_IO (uncacheable | unbufferable | (SVC_RW_USR_RW << PAGE_AP0) \
+ | (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
+ | (SVC_RW_USR_RW << PAGE_AP3))
+
+/* There is no execute bit in ARMv5, so we ignore it */
+#define __MAP_USR_RWX __MAP_USR_RW
+#define __MAP_USR_RX __MAP_USR_RO
+#define __MAP_KERN_RWX __MAP_KERN_RW
+#define __MAP_KERN_RX __MAP_KERN_RW /* We always have kernel RW */
+#define __MAP_FAULT 0
+
+void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
+ unsigned int size, unsigned int flags);
+
+void remove_section_mapping(unsigned long vaddr);
+
+extern pgd_table_t init_pgd;
+
+void arch_update_utcb(unsigned long utcb_address);
+void system_identify(void);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __V5_MM_H__ */
Index: include/l4/arch/or1k/v5/irq.h
===================================================================
--- include/l4/arch/or1k/v5/irq.h (nonexistent)
+++ include/l4/arch/or1k/v5/irq.h (revision 7)
@@ -0,0 +1,28 @@
+#ifndef __ARM_V5_IRQ_H__
+#define __ARM_V5_IRQ_H__
+
+static inline void enable_irqs()
+{
+ __asm__ __volatile__(
+ "mrs r0, cpsr_fc\n"
+ "bic r0, r0, #0x80\n" /* ARM_IRQ_BIT */
+ "msr cpsr_fc, r0\n"
+ );
+}
+
+static inline void disable_irqs()
+{
+ __asm__ __volatile__(
+ "mrs r0, cpsr_fc\n"
+ "orr r0, r0, #0x80\n" /* ARM_IRQ_BIT */
+ "msr cpsr_fc, r0\n"
+ );
+}
+
+/* Disable the irqs unconditionally, but also keep the previous state such that
+ * if it was already disabled before the call, the restore call would retain
+ * this state. */
+void irq_local_disable_save(unsigned long *state);
+void irq_local_restore(unsigned long state);
+
+#endif
Index: include/l4/arch/or1k/v5/arm926ejs/cpu.h
===================================================================
--- include/l4/arch/or1k/v5/arm926ejs/cpu.h (nonexistent)
+++ include/l4/arch/or1k/v5/arm926ejs/cpu.h (revision 7)
@@ -0,0 +1,13 @@
+/*
+ *
+ * Copyright (C) 2005 Bahadir Balban
+ *
+ */
+#ifndef __ARM926EJS__H__
+#define __ARM926EJS__H__
+
+
+
+
+
+#endif /* __ARM926EJS__H__ */
Index: include/l4/arch/or1k/v5/exception.h
===================================================================
--- include/l4/arch/or1k/v5/exception.h (nonexistent)
+++ include/l4/arch/or1k/v5/exception.h (revision 7)
@@ -0,0 +1,33 @@
+/*
+ * Definitions for exception support on ARMv5
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+#ifndef __ARCH_V5_EXCEPTION_H__
+#define __ARCH_V5_EXCEPTION_H__
+
+#include INC_ARCH(asm.h)
+
+/*
+ * v5 Architecture-defined data abort values for FSR ordered
+ * in highest to lowest priority.
+ */
+#define DABT_TERMINAL 0x2
+#define DABT_VECTOR 0x0 /* Obsolete */
+#define DABT_ALIGN 0x1
+#define DABT_EXT_XLATE_LEVEL1 0xC
+#define DABT_EXT_XLATE_LEVEL2 0xE
+#define DABT_XLATE_SECT 0x5
+#define DABT_XLATE_PAGE 0x7
+#define DABT_DOMAIN_SECT 0x9
+#define DABT_DOMAIN_PAGE 0xB
+#define DABT_PERM_SECT 0xD
+#define DABT_PERM_PAGE 0xF
+#define DABT_EXT_LFETCH_SECT 0x4
+#define DABT_EXT_LFETCH_PAGE 0x6
+#define DABT_EXT_NON_LFETCH_SECT 0x8
+#define DABT_EXT_NON_LFETCH_PAGE 0xA
+
+#define FSR_FS_MASK 0xF
+
+#endif /* __ARCH_V5_EXCEPTION_H__ */
Index: include/l4/arch/or1k/v5/mmu_ops.h
===================================================================
--- include/l4/arch/or1k/v5/mmu_ops.h (nonexistent)
+++ include/l4/arch/or1k/v5/mmu_ops.h (revision 7)
@@ -0,0 +1,53 @@
+#ifndef __MMU__OPS__H__
+#define __MMU__OPS__H__
+/*
+ * Prototypes for low level mmu operations
+ *
+ * Copyright (C) 2005 Bahadir Balban
+ *
+ */
+
+void arm_set_ttb(unsigned int);
+void arm_set_domain(unsigned int);
+unsigned int arm_get_domain(void);
+void arm_enable_mmu(void);
+void arm_enable_icache(void);
+void arm_enable_dcache(void);
+void arm_enable_wbuffer(void);
+void arm_enable_high_vectors(void);
+void arm_invalidate_cache(void);
+void arm_invalidate_icache(void);
+void arm_invalidate_dcache(void);
+void arm_clean_dcache(void);
+void arm_clean_invalidate_dcache(void);
+void arm_clean_invalidate_cache(void);
+void arm_drain_writebuffer(void);
+void arm_invalidate_tlb(void);
+void arm_invalidate_itlb(void);
+void arm_invalidate_dtlb(void);
+
+static inline void arm_enable_caches(void)
+{
+ arm_enable_icache();
+ arm_enable_dcache();
+}
+
+
+static inline void dmb(void)
+{
+ /* This is the closest to its meaning */
+ arm_drain_writebuffer();
+}
+
+static inline void dsb(void)
+{
+ /* No op */
+}
+
+static inline void isb(void)
+{
+ /* No op */
+}
+
+
+#endif /* __MMU__OPS__H__ */
Index: include/l4/arch/or1k/v5/mutex.h
===================================================================
Index: include/l4/arch/or1k/v5/cache.h
===================================================================
Index: include/l4/arch/or1k/v5/perfmon.h
===================================================================
--- include/l4/arch/or1k/v5/perfmon.h (nonexistent)
+++ include/l4/arch/or1k/v5/perfmon.h (revision 7)
@@ -0,0 +1,6 @@
+#ifndef __PERFMON_H__
+#define __PERFMON_H__
+
+static inline void perfmon_init(void) { }
+
+#endif
Index: include/l4/arch/or1k/v5/debug.h
===================================================================
Index: include/l4/arch/or1k/v5/cpu.h
===================================================================
--- include/l4/arch/or1k/v5/cpu.h (nonexistent)
+++ include/l4/arch/or1k/v5/cpu.h (revision 7)
@@ -0,0 +1,24 @@
+/*
+ * Cpu specific features
+ * defined upon the base architecture.
+ *
+ * Copyright (C) 2010 B Labs Ltd.
+ * Written by Bahadir Balban
+ */
+
+#ifndef __V5_CPU_H__
+#define __V5_CPU_H__
+
+#include INC_SUBARCH(mmu_ops.h)
+
+static inline void cpu_startup(void)
+{
+
+}
+
+static inline int smp_get_cpuid()
+{
+ return 0;
+}
+
+#endif /* __V5_CPU_H__ */
Index: include/l4/arch/or1k/v6/or1k11mpcore/cpu.h
===================================================================
--- include/l4/arch/or1k/v6/or1k11mpcore/cpu.h (nonexistent)
+++ include/l4/arch/or1k/v6/or1k11mpcore/cpu.h (revision 7)
@@ -0,0 +1,13 @@
+/*
+ *
+ * Copyright (C) 2005 Bahadir Balban
+ *
+ */
+#ifndef __ARM926EJS__H__
+#define __ARM926EJS__H__
+
+
+
+
+
+#endif /* __ARM926EJS__H__ */
Index: include/l4/arch/or1k/v6/or1k11mpcore
===================================================================
--- include/l4/arch/or1k/v6/or1k11mpcore (nonexistent)
+++ include/l4/arch/or1k/v6/or1k11mpcore (revision 7)
include/l4/arch/or1k/v6/or1k11mpcore
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: include/l4/arch/or1k/v6/or1k1136/cpu.h
===================================================================
--- include/l4/arch/or1k/v6/or1k1136/cpu.h (nonexistent)
+++ include/l4/arch/or1k/v6/or1k1136/cpu.h (revision 7)
@@ -0,0 +1,13 @@
+/*
+ *
+ * Copyright (C) 2005 Bahadir Balban
+ *
+ */
+#ifndef __ARM926EJS__H__
+#define __ARM926EJS__H__
+
+
+
+
+
+#endif /* __ARM926EJS__H__ */
Index: include/l4/arch/or1k/v6/or1k1136
===================================================================
--- include/l4/arch/or1k/v6/or1k1136 (nonexistent)
+++ include/l4/arch/or1k/v6/or1k1136 (revision 7)
include/l4/arch/or1k/v6/or1k1136
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: include/l4/arch/or1k/v6/mm.h
===================================================================
--- include/l4/arch/or1k/v6/mm.h (nonexistent)
+++ include/l4/arch/or1k/v6/mm.h (revision 7)
@@ -0,0 +1,125 @@
+/*
+ * ARM v5-specific virtual memory details
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+#ifndef __V5_MM_H__
+#define __V5_MM_H__
+
+/* ARM specific definitions */
+#define VIRT_MEM_START 0
+#define VIRT_MEM_END 0xFFFFFFFF
+#define SECTION_SIZE SZ_1MB
+#define SECTION_MASK (SECTION_SIZE - 1)
+#define SECTION_ALIGN_MASK (~SECTION_MASK)
+#define SECTION_BITS 20
+#define ARM_PAGE_SIZE SZ_4K
+#define ARM_PAGE_MASK 0xFFF
+#define ARM_PAGE_BITS 12
+
+#define PGD_SIZE SZ_4K * 4
+#define PGD_ENTRY_TOTAL SZ_4K
+
+#define PMD_SIZE SZ_1K
+#define PMD_ENTRY_TOTAL 256
+#define PMD_MAP_SIZE SZ_1MB
+#define PMD_ALIGN_MASK (~(PMD_SIZE - 1))
+#define PMD_TYPE_MASK 0x3
+#define PMD_TYPE_FAULT 0
+#define PMD_TYPE_PMD 1
+#define PMD_TYPE_SECTION 2
+
+#define PTE_TYPE_MASK 0x3
+#define PTE_TYPE_FAULT 0
+#define PTE_TYPE_LARGE 1
+#define PTE_TYPE_SMALL 2
+#define PTE_TYPE_TINY 3
+
+/* Permission field offsets */
+#define SECTION_AP0 10
+
+/*
+ * These are indices into arrays with pgd_t or pmd_t sized elements,
+ * therefore the index must be divided by appropriate element size
+ */
+#define PGD_INDEX(x) (((((unsigned long)(x)) >> 18) \
+ & 0x3FFC) / sizeof(pmd_t))
+
+/*
+ * Strip out the page offset in this
+ * megabyte from a total of 256 pages.
+ */
+#define PMD_INDEX(x) (((((unsigned long)(x)) >> 10) \
+ & 0x3FC) / sizeof (pte_t))
+
+
+/* We need this as print-early.S is including this file */
+#ifndef __ASSEMBLY__
+
+/* Type-checkable page table elements */
+typedef u32 pmd_t;
+typedef u32 pte_t;
+
+/* Page global directory made up of pgd_t entries */
+typedef struct pgd_table {
+ pmd_t entry[PGD_ENTRY_TOTAL];
+} pgd_table_t;
+
+/* Page middle directory made up of pmd_t entries */
+typedef struct pmd_table {
+ pte_t entry[PMD_ENTRY_TOTAL];
+} pmd_table_t;
+
+/* Applies for both small and large pages */
+#define PAGE_AP0 4
+#define PAGE_AP1 6
+#define PAGE_AP2 8
+#define PAGE_AP3 10
+
+/* Permission values with rom and sys bits ignored */
+#define SVC_RW_USR_NONE 1
+#define SVC_RW_USR_RO 2
+#define SVC_RW_USR_RW 3
+
+#define PTE_PROT_MASK (0xFF << 4)
+
+#define CACHEABILITY 3
+#define BUFFERABILITY 2
+#define cacheable (1 << CACHEABILITY)
+#define bufferable (1 << BUFFERABILITY)
+#define uncacheable 0
+#define unbufferable 0
+
+/* Helper macros for common cases */
+#define __MAP_USR_RW (cacheable | bufferable | (SVC_RW_USR_RW << PAGE_AP0) \
+ | (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
+ | (SVC_RW_USR_RW << PAGE_AP3))
+#define __MAP_USR_RO (cacheable | bufferable | (SVC_RW_USR_RO << PAGE_AP0) \
+ | (SVC_RW_USR_RO << PAGE_AP1) | (SVC_RW_USR_RO << PAGE_AP2) \
+ | (SVC_RW_USR_RO << PAGE_AP3))
+#define __MAP_KERN_RW (cacheable | bufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
+ | (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
+ | (SVC_RW_USR_NONE << PAGE_AP3))
+#define __MAP_KERN_IO (uncacheable | unbufferable | (SVC_RW_USR_NONE << PAGE_AP0) \
+ | (SVC_RW_USR_NONE << PAGE_AP1) | (SVC_RW_USR_NONE << PAGE_AP2) \
+ | (SVC_RW_USR_NONE << PAGE_AP3))
+#define __MAP_USR_IO (uncacheable | unbufferable | (SVC_RW_USR_RW << PAGE_AP0) \
+ | (SVC_RW_USR_RW << PAGE_AP1) | (SVC_RW_USR_RW << PAGE_AP2) \
+ | (SVC_RW_USR_RW << PAGE_AP3))
+
+/* There is no execute bit in ARMv5, so we ignore it */
+#define __MAP_USR_RWX __MAP_USR_RW
+#define __MAP_USR_RX __MAP_USR_RO
+#define __MAP_KERN_RWX __MAP_KERN_RW
+#define __MAP_KERN_RX __MAP_KERN_RW /* We always have kernel RW */
+#define __MAP_FAULT 0
+
+void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
+ unsigned int size, unsigned int flags);
+
+void remove_section_mapping(unsigned long vaddr);
+
+extern pgd_table_t init_pgd;
+
+#endif /* __ASSEMBLY__ */
+#endif /* __V5_MM_H__ */
Index: include/l4/arch/or1k/v6/irq.h
===================================================================
--- include/l4/arch/or1k/v6/irq.h (nonexistent)
+++ include/l4/arch/or1k/v6/irq.h (revision 7)
@@ -0,0 +1,26 @@
+#ifndef __ARM_V5_IRQ_H__
+#define __ARM_V5_IRQ_H__
+
+static inline void enable_irqs()
+{
+ __asm__ __volatile__(
+ "mrs r0, cpsr_fc\n"
+ "bic r0, r0, #0x80\n" /* ARM_IRQ_BIT */
+ "msr cpsr_fc, r0\n"
+ );
+}
+
+static inline void disable_irqs()
+{
+ __asm__ __volatile__(
+ "mrs r0, cpsr_fc\n"
+ "orr r0, r0, #0x80\n" /* ARM_IRQ_BIT */
+ "msr cpsr_fc, r0\n"
+ );
+}
+
+/* Disable the irqs unconditionally, but also keep the previous state such that
+ * if it was already disabled before the call, the restore call would retain
+ * this state. */
+void irq_local_disable_save(unsigned long *state);
+#endif
Index: include/l4/arch/or1k/v6/exception.h
===================================================================
--- include/l4/arch/or1k/v6/exception.h (nonexistent)
+++ include/l4/arch/or1k/v6/exception.h (revision 7)
@@ -0,0 +1,33 @@
+/*
+ * Definitions for exception support on ARMv5
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+#ifndef __ARCH_V5_EXCEPTION_H__
+#define __ARCH_V5_EXCEPTION_H__
+
+#include INC_ARCH(asm.h)
+
+/*
+ * v5 Architecture-defined data abort values for FSR ordered
+ * in highest to lowest priority.
+ */
+#define DABT_TERMINAL 0x2
+#define DABT_VECTOR 0x0 /* Obsolete */
+#define DABT_ALIGN 0x1
+#define DABT_EXT_XLATE_LEVEL1 0xC
+#define DABT_EXT_XLATE_LEVEL2 0xE
+#define DABT_XLATE_SECT 0x5
+#define DABT_XLATE_PAGE 0x7
+#define DABT_DOMAIN_SECT 0x9
+#define DABT_DOMAIN_PAGE 0xB
+#define DABT_PERM_SECT 0xD
+#define DABT_PERM_PAGE 0xF
+#define DABT_EXT_LFETCH_SECT 0x4
+#define DABT_EXT_LFETCH_PAGE 0x6
+#define DABT_EXT_NON_LFETCH_SECT 0x8
+#define DABT_EXT_NON_LFETCH_PAGE 0xA
+
+#define FSR_FS_MASK 0xF
+
+#endif /* __ARCH_V5_EXCEPTION_H__ */
Index: include/l4/arch/or1k/v6/mmu_ops.h
===================================================================
--- include/l4/arch/or1k/v6/mmu_ops.h (nonexistent)
+++ include/l4/arch/or1k/v6/mmu_ops.h (revision 7)
@@ -0,0 +1,52 @@
+#ifndef __MMU__OPS__H__
+#define __MMU__OPS__H__
+/*
+ * Prototypes for low level mmu operations
+ *
+ * Copyright (C) 2005 Bahadir Balban
+ *
+ */
+void arm_set_ttb(unsigned int);
+void arm_set_domain(unsigned int);
+unsigned int arm_get_domain(void);
+void arm_enable_mmu(void);
+void arm_enable_icache(void);
+void arm_enable_dcache(void);
+void arm_enable_wbuffer(void);
+void arm_enable_high_vectors(void);
+void arm_invalidate_cache(void);
+void arm_invalidate_icache(void);
+void arm_invalidate_dcache(void);
+void arm_clean_dcache(void);
+void arm_clean_invalidate_dcache(void);
+void arm_clean_invalidate_cache(void);
+void arm_drain_writebuffer(void);
+void arm_invalidate_tlb(void);
+void arm_invalidate_itlb(void);
+void arm_invalidate_dtlb(void);
+
+static inline void arm_enable_caches(void)
+{
+ arm_enable_icache();
+ arm_enable_dcache();
+}
+
+
+static inline void dmb(void)
+{
+ /* This is the closest to its meaning */
+ arm_drain_writebuffer();
+}
+
+static inline void dsb(void)
+{
+ /* No op */
+}
+
+static inline void isb(void)
+{
+ /* No op */
+}
+
+
+#endif /* __MMU__OPS__H__ */
Index: include/l4/arch/or1k/v6/arm11mpcore/cpu.h
===================================================================
--- include/l4/arch/or1k/v6/arm11mpcore/cpu.h (nonexistent)
+++ include/l4/arch/or1k/v6/arm11mpcore/cpu.h (revision 7)
@@ -0,0 +1,13 @@
+/*
+ *
+ * Copyright (C) 2005 Bahadir Balban
+ *
+ */
+#ifndef __ARM926EJS__H__
+#define __ARM926EJS__H__
+
+
+
+
+
+#endif /* __ARM926EJS__H__ */
Index: include/l4/arch/or1k/v6/mutex.h
===================================================================
Index: include/l4/arch/or1k/v6/arm1136/cpu.h
===================================================================
--- include/l4/arch/or1k/v6/arm1136/cpu.h (nonexistent)
+++ include/l4/arch/or1k/v6/arm1136/cpu.h (revision 7)
@@ -0,0 +1,13 @@
+/*
+ *
+ * Copyright (C) 2005 Bahadir Balban
+ *
+ */
+#ifndef __ARM926EJS__H__
+#define __ARM926EJS__H__
+
+
+
+
+
+#endif /* __ARM926EJS__H__ */
Index: include/l4/arch/or1k/v6/cpu.h
===================================================================
--- include/l4/arch/or1k/v6/cpu.h (nonexistent)
+++ include/l4/arch/or1k/v6/cpu.h (revision 7)
@@ -0,0 +1,43 @@
+/*
+ * Cpu specific features
+ * defined upon the base architecture.
+ *
+ * Copyright (C) 2010 B Labs Ltd.
+ * Written by Bahadir Balban
+ */
+
+#ifndef __V6_CPU_H__
+#define __V6_CPU_H__
+
+#include INC_SUBARCH(mmu_ops.h)
+
+#define MPIDR_CPUID_MASK 0x7
+
+/* Read multi-processor affinity register */
+static inline unsigned int __attribute__((always_inline))
+cp15_read_mpidr(void)
+{
+ unsigned int val;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c0, c0, 5\n"
+ : "=r" (val)
+ :
+ );
+
+ return val;
+}
+
+static inline int smp_get_cpuid()
+{
+ volatile u32 mpidr = cp15_read_mpidr();
+
+ return mpidr & MPIDR_CPUID_MASK;
+}
+
+static inline void cpu_startup(void)
+{
+
+}
+
+#endif /* __V6_CPU_H__ */
Index: include/l4/arch/or1k/v6/abort.h
===================================================================
--- include/l4/arch/or1k/v6/abort.h (nonexistent)
+++ include/l4/arch/or1k/v6/abort.h (revision 7)
@@ -0,0 +1 @@
+
Index: include/l4/arch/or1k/asm-macros.S.ARM
===================================================================
--- include/l4/arch/or1k/asm-macros.S.ARM (nonexistent)
+++ include/l4/arch/or1k/asm-macros.S.ARM (revision 7)
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2010 B Labs Ltd.
+ *
+ * Common assembler macros
+ *
+ * Prem Mallappa, Bahadir Balban
+ */
+#ifndef __ASM_MACROS_S__
+#define __ASM_MACROS_S__
+
+ .macro get_cpuid cpuid
+ mrc p15, 0, \cpuid, c0, c0, 5 @ Read MPIDR
+ and \cpuid, \cpuid, #0xF @ Mask lower cpuid bits
+ .endm
+
+#endif /* __ASM_MACROS_S__ */
include/l4/arch/or1k/asm-macros.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: include/l4/arch/or1k/scu.h
===================================================================
--- include/l4/arch/or1k/scu.h (nonexistent)
+++ include/l4/arch/or1k/scu.h (revision 7)
@@ -0,0 +1,34 @@
+/*
+ * SCU registers
+ *
+ * Copyright (C) 2010 B Labs Ltd.
+ *
+ * Author: Prem Mallappa
+ */
+#ifndef __SCU_H__
+#define __SCU_H__
+
+
+/* Following defines may well go into realview/scu.h */
+#define SCU_CTRL_REG 0x00 /* Control Register */
+#define SCU_CFG_REG 0x04 /* Configuration Register */
+#define SCU_CPU_PWR_REG 0x08 /* SCU CPU Power state register */
+#define SCU_INV_ALL_S 0x0C /* SCU Invalidate all Secure Registers */
+#define SCU_ACCESS_REG_S 0x50 /* SCU Access Control Secure */
+#define SCU_ACCESS_REG_NS 0x54 /* SCU Access Control Non-Secure */
+
+/* The contents of CONTROL AND CONFIG are Implementation Defined. so they may go into platform specific scu.h */
+#define SCU_CTRL_EN (1 << 0)
+#define SCU_CTRL_ADDR_FLTR_EN (1 << 1)
+#define SCU_CTRL_PARITY_ON (1 << 2)
+#define SCU_CTRL_STBY_EN (1 << 5) /* SCU StandBy Enable */
+#define SCU_CTRL_GIC_STBY_EN (1 << 6) /* GIC Standby enable */
+
+/* Config register */
+#define SCU_CFG_SMP_MASK 0x000000f0
+#define SCU_CFG_TAG_RAM_MASK 0x0000ff00
+#define SCU_CFG_NCPU_MASK 0x7
+#define SCU_CFG_SMP_NCPU_SHIFT 4
+
+
+#endif /* __SCU_H__ */
Index: include/l4/arch/or1k/irq.h
===================================================================
--- include/l4/arch/or1k/irq.h (nonexistent)
+++ include/l4/arch/or1k/irq.h (revision 7)
@@ -0,0 +1,29 @@
+#ifndef __ARM_IRQ_H__
+#define __ARM_IRQ_H__
+
+#include INC_SUBARCH(irq.h)
+
+void irq_local_restore(unsigned long state);
+void irq_local_disable_save(unsigned long *state);
+int irqs_enabled();
+
+static inline void irq_local_enable()
+{
+ enable_irqs();
+}
+
+static inline void irq_local_disable()
+{
+ disable_irqs();
+}
+
+
+/*
+ * Destructive atomic-read.
+ *
+ * Write 0 to byte at @location as its contents are read back.
+ */
+char l4_atomic_dest_readb(void *location);
+
+
+#endif /* __ARM_IRQ_H__ */
Index: include/l4/arch/or1k/exception.h
===================================================================
--- include/l4/arch/or1k/exception.h (nonexistent)
+++ include/l4/arch/or1k/exception.h (revision 7)
@@ -0,0 +1,73 @@
+/*
+ * Common definitions for exceptions
+ * across ARM sub-architectures.
+ *
+ * Copyright (C) 2010 B Labs Ltd.
+ */
+
+#ifndef __EXCEPTION_H__
+#define __EXCEPTION_H__
+
+#include INC_SUBARCH(exception.h)
+#include INC_ARCH(asm.h)
+
+/* Abort debugging conditions */
+// #define DEBUG_ABORTS
+#if defined (DEBUG_ABORTS)
+#define dbg_abort(...) printk(__VA_ARGS__)
+#else
+#define dbg_abort(...)
+#endif
+
+/* Codezero-specific abort type */
+#define ABORT_TYPE_PREFETCH 1
+#define ABORT_TYPE_DATA 0
+
+/* If abort is handled and resolved in check_aborts */
+#define ABORT_HANDLED 1
+
+/* Codezero makes use of bit 8 (Always Zero) of FSR to define which type of abort */
+#define set_abort_type(fsr, x) { fsr &= ~(1 << 8); fsr |= ((x & 1) << 8); }
+#define is_prefetch_abort(fsr) ((fsr >> 8) & 0x1)
+#define is_data_abort(fsr) (!is_prefetch_abort(fsr))
+
+/* Kernel's data about the fault */
+typedef struct fault_kdata {
+ u32 faulty_pc; /* In DABT: Aborting PC, In PABT: Same as FAR */
+ u32 fsr; /* In DABT: DFSR, In PABT: IFSR */
+ u32 far; /* In DABT: DFAR, in PABT: IFAR */
+ pte_t pte; /* Faulty page table entry */
+} __attribute__ ((__packed__)) fault_kdata_t;
+
+
+/* This is filled on entry to irq handler, only if a process was interrupted.*/
+extern unsigned int preempted_psr;
+
+/* Implementing these as functions cause circular include dependency for tcb.h */
+#define TASK_IN_KERNEL(tcb) (((tcb)->context.spsr & ARM_MODE_MASK) == ARM_MODE_SVC)
+#define TASK_IN_USER(tcb) (!TASK_IN_KERNEL(tcb))
+
+static inline int is_user_mode(u32 spsr)
+{
+ return ((spsr & ARM_MODE_MASK) == ARM_MODE_USR);
+}
+
+static inline int in_kernel()
+{
+ return (((preempted_psr & ARM_MODE_MASK) == ARM_MODE_SVC)) ? 1 : 0;
+}
+
+static inline int in_user()
+{
+ return !in_kernel();
+}
+
+int pager_pagein_request(unsigned long vaddr, unsigned long size,
+ unsigned int flags);
+
+int fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far, u32 ipc_tag);
+
+int is_kernel_abort(u32 faulted_pc, u32 fsr, u32 far, u32 spsr);
+int check_abort_type(u32 faulted_pc, u32 fsr, u32 far, u32 spsr);
+
+#endif /* __EXCEPTION_H__ */
Index: include/l4/arch/or1k/linker.lds
===================================================================
--- include/l4/arch/or1k/linker.lds (nonexistent)
+++ include/l4/arch/or1k/linker.lds (revision 7)
@@ -0,0 +1,85 @@
+/*
+ * Simple linker script
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+
+/* FIXME:
+ * Currently we can't include cpp #defines in linker script.
+ * Check that below offsets are coherent with offsets.h
+ */
+kernel_offset = 0xF0000000;
+kernel_physical = 0x8000;
+kernel_virtual = kernel_physical + kernel_offset;
+
+/* A temporary boot stack is used before a proper kernel stack is set up */
+_bootstack_physical = _bootstack - kernel_offset;
+
+/* The symbols are linked at virtual addresses. So is _start.
+ * We must set the entry point to a physical address, so that
+ * when the image is loaded, it doesn't jump to a non existing
+ * virtual address.
+ */
+ENTRY(kernel_physical)
+
+SECTIONS
+{
+ . = kernel_virtual;
+ _start_kernel = .;
+ .text : AT (ADDR(.text) - kernel_offset)
+ {
+ _start_text = .;
+ /* Make sure head.S comes first */
+ /* *head.o(.text) This only works when given its full path. Bad limitation. */
+ *(.text.head)
+ *(.text)
+ _end_text = .;
+ }
+ . = ALIGN(4);
+ /* rodata is needed else your strings will link at physical! */
+ .rodata : AT (ADDR(.rodata) - kernel_offset) { *(.rodata) }
+ .rodata1 : AT (ADDR(.rodata1) - kernel_offset) { *(.rodata1) }
+ .data : AT (ADDR(.data) - kernel_offset)
+ {
+ _start_data = .;
+ *(.data)
+ _start_vectors = .;
+ *(.data.vectors)
+ . = ALIGN(4K);
+ _end_vectors = .;
+ _start_kip = .;
+ *(.data.kip)
+ . = ALIGN(4K);
+ _end_kip = .;
+ _start_syscalls = .;
+ *(.data.syscalls)
+ . = ALIGN(4K);
+ _end_syscalls = .;
+ _end_data = .;
+ }
+ .bss : AT (ADDR(.bss) - kernel_offset)
+ {
+ *(.bss)
+ }
+ . = ALIGN(4K);
+ . += 0x2000; /* This is required as the link counter does not seem
+ * to increment for the bss section
+ * TODO: Change this with PAGE_SIZE */
+
+ /* Below part is to be discarded after boot */
+ _start_init = .;
+ .init : AT (ADDR(.init) - kernel_offset)
+ {
+ . = ALIGN(16K); /* For initial pgd */
+ *(.init.pgd)
+ *(.init.bootmem)
+ *(.init.data)
+ }
+ /* Space for boot stack */
+ . += 0x1000;
+ . = ALIGN(4K); /* A page aligned stack of at least 4KB */
+ _end_init = .;
+ _bootstack = .;
+ _end_kernel = .;
+ _end = .;
+}
Index: include/l4/arch/or1k/asm.h
===================================================================
--- include/l4/arch/or1k/asm.h (nonexistent)
+++ include/l4/arch/or1k/asm.h (revision 7)
@@ -0,0 +1,67 @@
+
+#ifndef __ARCH_ARM_ASM_H__
+#define __ARCH_ARM_ASM_H__
+
+
+/* Top nibble of the byte denotes irqs/fiqs disabled, ARM state */
+#define ARM_MODE_MASK 0x1F
+
+#define ARM_MODE_SVC 0x13
+#define ARM_MODE_UND 0x1B
+#define ARM_MODE_ABT 0x17
+#define ARM_MODE_IRQ 0x12
+#define ARM_MODE_FIQ 0x11
+#define ARM_MODE_USR 0x10
+#define ARM_MODE_SYS 0x1F
+#define ARM_NOIRQ_SVC 0xD3
+#define ARM_NOIRQ_UND 0xDB
+#define ARM_NOIRQ_ABT 0xD7
+#define ARM_NOIRQ_IRQ 0xD2
+#define ARM_NOIRQ_FIQ 0xD1
+#define ARM_NOIRQ_USR 0xD0
+#define ARM_NOIRQ_SYS 0xDF
+
+/* For enabling *clear* these bits */
+#define ARM_IRQ_BIT 0x080
+#define ARM_FIQ_BIT 0x040
+#define ARM_A_BIT 0x100 /* Asynchronous abort */
+
+/* Notes about ARM instructions:
+ *
+ * TST instruction:
+ *
+ * Essentially TST "AND"s two values and the result affects the Z (Zero bit)
+ * in CPSR, which can be used for conditions. For example in:
+ *
+ * TST r0, #VALUE
+ *
+ * If anding r0 and #VALUE results in a positive value (i.e. they have a
+ * common bit set as 1) then Z bit is 0, which accounts for an NE (Not equal)
+ * condition. Consequently, e.g. a BEQ instruction would be skipped and a BNE
+ * would be executed.
+ *
+ * In the opposite case, r0 and #VALUE has no common bits, and anding them
+ * results in 0. This means Z bit is 1, and any EQ instruction coming afterwards
+ * would be executed.
+ *
+ * I have made this explanation here because I think the behaviour of the Z bit
+ * is not very clear in TST. Normally Z bit is used for equivalence (e.g. CMP
+ * instruction) but in TST case even if two values were equal the Z bit could
+ * point to an NE or EQ condition depending on whether the values have non-zero
+ * bits.
+ */
+
+
+#define dbg_stop_here() __asm__ __volatile__ ( "bkpt #0\n" :: )
+
+#define BEGIN_PROC(name) \
+ .global name; \
+ .type name,function; \
+ .align; \
+name:
+
+#define END_PROC(name) \
+.fend_##name: \
+ .size name,.fend_##name - name;
+
+#endif /* __ARCH_ARM_ASM_H__ */
Index: include/l4/arch/or1k/mutex.h
===================================================================
--- include/l4/arch/or1k/mutex.h (nonexistent)
+++ include/l4/arch/or1k/mutex.h (revision 7)
@@ -0,0 +1,16 @@
+/*
+ * ARM specific low-level mutex interfaces
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+
+#ifndef __ARCH_MUTEX_H__
+#define __ARCH_MUTEX_H__
+
+/* TODO: The return types could be improved for debug checking */
+void __spin_lock(unsigned int *s);
+void __spin_unlock(unsigned int *s);
+unsigned int __mutex_lock(unsigned int *m);
+void __mutex_unlock(unsigned int *m);
+
+#endif /* __ARCH_MUTEX_H__ */
Index: include/l4/arch/or1k/linker.h
===================================================================
--- include/l4/arch/or1k/linker.h (nonexistent)
+++ include/l4/arch/or1k/linker.h (revision 7)
@@ -0,0 +1,36 @@
+/*
+ * Linker-defined variables
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+#ifndef __ARCH_ARM_LINKER_H__
+#define __ARCH_ARM_LINKER_H__
+
+extern unsigned long _start_kernel[];
+extern unsigned long _start_text[];
+extern unsigned long _end_text[];
+extern unsigned long _start_data[];
+extern unsigned long _end_data[];
+extern unsigned long _start_vectors[];
+extern unsigned long arm_high_vector[];
+extern unsigned long _end_vectors[];
+extern unsigned long _start_kip[];
+extern unsigned long _end_kip[];
+extern unsigned long _start_syscalls[];
+extern unsigned long _end_syscalls[];
+extern unsigned long _start_init[];
+extern unsigned long _end_init[];
+extern unsigned long _start_bootstack[];
+extern unsigned long _end_bootstack[];
+extern unsigned long _start_init_pgd[];
+extern unsigned long _end_init_pgd[];
+
+extern unsigned long _end_kernel[];
+extern unsigned long _end[];
+
+/* Link markers that get modified at runtime */
+unsigned long __svc_images_end;
+unsigned long __pt_start;
+unsigned long __pt_end;
+
+#endif /* __ARCH_ARM_LINKER_H__ */
Index: include/l4/arch/or1k/linker.lds.in
===================================================================
--- include/l4/arch/or1k/linker.lds.in (nonexistent)
+++ include/l4/arch/or1k/linker.lds.in (revision 7)
@@ -0,0 +1,90 @@
+/*
+ * Simple linker script
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+#if !defined (CONFIG_NCPU)
+#define CONFIG_NCPU 1
+#endif
+
+phys_ram_start = PLATFORM_PHYS_MEM_START;
+
+#if !defined(kernel_offset)
+kernel_offset = KERNEL_AREA_START - phys_ram_start;
+#endif
+
+kernel_physical = 0x8000 + phys_ram_start;
+kernel_virtual = kernel_physical + kernel_offset;
+
+/* A temporary boot stack is used before a proper kernel stack is set up */
+_bootstack_physical = _end_bootstack - kernel_offset;
+
+/* The symbols are linked at virtual addresses. So is _start.
+ * We must set the entry point to a physical address, so that
+ * when the image is loaded, it doesn't jump to a non existing
+ * virtual address.
+ */
+ENTRY(kernel_physical)
+
+SECTIONS
+{
+ . = kernel_virtual;
+ _start_kernel = .;
+ .text : AT (ADDR(.text) - kernel_offset)
+ {
+ _start_text = .;
+ /* Make sure head.S comes first */
+ /* *head.o(.text) This only works when given its full path. Bad limitation. */
+ *(.text.head)
+ *(.text)
+ _end_text = .;
+ }
+ . = ALIGN(4);
+ /* rodata is needed else your strings will link at physical! */
+ .rodata : AT (ADDR(.rodata) - kernel_offset) { *(.rodata) }
+ .rodata1 : AT (ADDR(.rodata1) - kernel_offset) { *(.rodata1) }
+ .data : AT (ADDR(.data) - kernel_offset)
+ {
+ _start_data = .;
+ *(.data)
+ /* Best alignment because we need 4 x (4K) and 1 x 16K block */
+ . = ALIGN(16K);
+ _start_vectors = .;
+ *(.data.vectors)
+ . = ALIGN(4K);
+ _end_vectors = .;
+ _start_kip = .;
+ *(.data.kip)
+ . = ALIGN(4K);
+ _end_kip = .;
+ _start_syscalls = .;
+ *(.data.syscalls)
+ . = ALIGN(4K);
+ _end_syscalls = .;
+ _start_init_pgd = .;
+ *(.data.pgd);
+ _end_init_pgd = .;
+ _start_bootstack = .;
+ . = ALIGN(4K);
+ . += PAGE_SIZE * CONFIG_NCPU;
+ _end_bootstack = .;
+ _end_data = .;
+ }
+ .bss : AT (ADDR(.bss) - kernel_offset)
+ {
+ *(.bss)
+ }
+ . = ALIGN(4K);
+
+ /* Below part is to be discarded after boot */
+ _start_init = .;
+ .init : AT (ADDR(.init) - kernel_offset)
+ {
+ *(.init.task.pgd) /* Non-global task table on split tables, otherwise nil */
+ *(.init.bootmem)
+ *(.init.data)
+ }
+ _end_init = .;
+ _end_kernel = .;
+ _end = .;
+}
Index: include/l4/arch/or1k/types.h
===================================================================
--- include/l4/arch/or1k/types.h (nonexistent)
+++ include/l4/arch/or1k/types.h (revision 7)
@@ -0,0 +1,18 @@
+#ifndef __ARCH_TYPES_H__
+#define __ARCH_TYPES_H__
+
+#if !defined(__ASSEMBLY__)
+typedef unsigned long long u64;
+typedef unsigned int u32;
+typedef unsigned short u16;
+typedef unsigned char u8;
+typedef signed long long s64;
+typedef signed int s32;
+typedef signed short s16;
+typedef signed char s8;
+
+/* Thread/Space id type */
+typedef unsigned int l4id_t;
+
+#endif /* !__ASSEMBLY__ */
+#endif /* !__ARCH_TYPES_H__ */
Index: include/l4/arch/or1k/io.h
===================================================================
--- include/l4/arch/or1k/io.h (nonexistent)
+++ include/l4/arch/or1k/io.h (revision 7)
@@ -0,0 +1,25 @@
+#ifndef __ARM_IO_H__
+#define __ARM_IO_H__
+/*
+ * Arch-specific io functions/macros.
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+
+#if defined (__KERNEL__)
+
+#include INC_GLUE(memlayout.h)
+
+#define read(address) *((volatile unsigned int *) (address))
+#define write(val, address) *((volatile unsigned int *) (address)) = val
+
+#endif /* ends __KERNEL__ */
+
+/*
+ * Generic uart virtual address until a file-based console access
+ * is available for userspace
+ */
+#define USERSPACE_CONSOLE_VBASE 0xF9800000
+
+
+#endif /* __ARM_IO_H__ */
Index: include/l4/arch/or1k
===================================================================
--- include/l4/arch/or1k (nonexistent)
+++ include/l4/arch/or1k (revision 7)
include/l4/arch/or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/posix/mm0/include/arch/or1k/mm.h
===================================================================
--- conts/posix/mm0/include/arch/or1k/mm.h (nonexistent)
+++ conts/posix/mm0/include/arch/or1k/mm.h (revision 7)
@@ -0,0 +1,15 @@
+#ifndef __INITTASK_ARCH_MM_H__
+#define __INITTASK_ARCH_MM_H__
+
+#include
+#include
+#include INC_GLUE(memory.h)
+#include INC_ARCH(exception.h)
+#include
+
+struct fault_data;
+void set_generic_fault_params(struct fault_data *fault);
+void arch_print_fault_params(struct fault_data *fault);
+void fault_handle_error(struct fault_data *fault);
+
+#endif /* __INITTASK_ARCH_MM_H__ */
Index: conts/posix/mm0/include/arch/or1k/debug.h
===================================================================
--- conts/posix/mm0/include/arch/or1k/debug.h (nonexistent)
+++ conts/posix/mm0/include/arch/or1k/debug.h (revision 7)
@@ -0,0 +1,44 @@
+/*
+ * Debug/performance measurements for mm0
+ *
+ * Copyright (C) 2010 B Labs Ltd.
+ */
+#ifndef __ARCH_DEBUG_H__
+#define __ARCH_DEBUG_H__
+
+#if !defined(CONFIG_DEBUG_PERFMON_USER)
+
+#include
+
+/* Common empty definitions for all arches */
+static inline u32 perfmon_read_cyccnt() { return 0; }
+
+static inline void perfmon_reset_start_cyccnt() { }
+static inline u32 perfmon_read_reset_start_cyccnt() { return 0; }
+
+#define debug_record_cycles(str)
+
+#else /* End of CONFIG_DEBUG_PERFMON_USER */
+
+/* Architecture specific perfmon cycle counting */
+#include L4LIB_INC_SUBARCH(perfmon.h)
+
+extern u64 perfmon_total_cycles;
+extern u64 current_cycles;
+
+/*
+ * This is for Cortex-A9 running at 400Mhz. 25 / 100000 is
+ * a rewriting of 2.5 nanosec / 1,000,000
+ */
+#define debug_record_cycles(str) \
+{ \
+ current_cycles = perfmon_read_cyccnt(); \
+ perfmon_total_cycles += current_cycles; \
+ printf("%s: took %llu milliseconds\n", str, \
+ current_cycles * 64 * 25 / 100000); \
+ perfmon_reset_start_cyccnt(); \
+}
+
+#endif /* End of !CONFIG_DEBUG_PERFMON_USER */
+
+#endif /* __ARCH_DEBUG_H__ */
Index: conts/posix/mm0/include/arch/or1k
===================================================================
--- conts/posix/mm0/include/arch/or1k (nonexistent)
+++ conts/posix/mm0/include/arch/or1k (revision 7)
conts/posix/mm0/include/arch/or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/posix/mm0/mm/arch/or1k/crt0.S.ARM
===================================================================
--- conts/posix/mm0/mm/arch/or1k/crt0.S.ARM (nonexistent)
+++ conts/posix/mm0/mm/arch/or1k/crt0.S.ARM (revision 7)
@@ -0,0 +1,94 @@
+/*
+ * Australian Public Licence B (OZPLB)
+ *
+ * Version 1-0
+ *
+ * Copyright (c) 2004 National ICT Australia
+ *
+ * All rights reserved.
+ *
+ * Developed by: Embedded, Real-time and Operating Systems Program (ERTOS)
+ * National ICT Australia
+ * http://www.ertos.nicta.com.au
+ *
+ * Permission is granted by National ICT Australia, free of charge, to
+ * any person obtaining a copy of this software and any associated
+ * documentation files (the "Software") to deal with the Software without
+ * restriction, including (without limitation) the rights to use, copy,
+ * modify, adapt, merge, publish, distribute, communicate to the public,
+ * sublicense, and/or sell, lend or rent out copies of the Software, and
+ * to permit persons to whom the Software is furnished to do so, subject
+ * to the following conditions:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimers.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimers in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of National ICT Australia, nor the names of its
+ * contributors, may be used to endorse or promote products derived
+ * from this Software without specific prior written permission.
+ *
+ * EXCEPT AS EXPRESSLY STATED IN THIS LICENCE AND TO THE FULL EXTENT
+ * PERMITTED BY APPLICABLE LAW, THE SOFTWARE IS PROVIDED "AS-IS", AND
+ * NATIONAL ICT AUSTRALIA AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS,
+ * WARRANTIES OR CONDITIONS OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO ANY REPRESENTATIONS, WARRANTIES OR CONDITIONS
+ * REGARDING THE CONTENTS OR ACCURACY OF THE SOFTWARE, OR OF TITLE,
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT,
+ * THE ABSENCE OF LATENT OR OTHER DEFECTS, OR THE PRESENCE OR ABSENCE OF
+ * ERRORS, WHETHER OR NOT DISCOVERABLE.
+ *
+ * TO THE FULL EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL
+ * NATIONAL ICT AUSTRALIA OR ITS CONTRIBUTORS BE LIABLE ON ANY LEGAL
+ * THEORY (INCLUDING, WITHOUT LIMITATION, IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHERWISE) FOR ANY CLAIM, LOSS, DAMAGES OR OTHER
+ * LIABILITY, INCLUDING (WITHOUT LIMITATION) LOSS OF PRODUCTION OR
+ * OPERATION TIME, LOSS, DAMAGE OR CORRUPTION OF DATA OR RECORDS; OR LOSS
+ * OF ANTICIPATED SAVINGS, OPPORTUNITY, REVENUE, PROFIT OR GOODWILL, OR
+ * OTHER ECONOMIC LOSS; OR ANY SPECIAL, INCIDENTAL, INDIRECT,
+ * CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES, ARISING OUT OF OR IN
+ * CONNECTION WITH THIS LICENCE, THE SOFTWARE OR THE USE OF OR OTHER
+ * DEALINGS WITH THE SOFTWARE, EVEN IF NATIONAL ICT AUSTRALIA OR ITS
+ * CONTRIBUTORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH CLAIM, LOSS,
+ * DAMAGES OR OTHER LIABILITY.
+ *
+ * If applicable legislation implies representations, warranties, or
+ * conditions, or imposes obligations or liability on National ICT
+ * Australia or one of its contributors in respect of the Software that
+ * cannot be wholly or partly excluded, restricted or modified, the
+ * liability of National ICT Australia or the contributor is limited, to
+ * the full extent permitted by the applicable legislation, at its
+ * option, to:
+ * a. in the case of goods, any one or more of the following:
+ * i. the replacement of the goods or the supply of equivalent goods;
+ * ii. the repair of the goods;
+ * iii. the payment of the cost of replacing the goods or of acquiring
+ * equivalent goods;
+ * iv. the payment of the cost of having the goods repaired; or
+ * b. in the case of services:
+ * i. the supplying of the services again; or
+ * ii. the payment of the cost of having the services supplied again.
+ *
+ * The construction, validity and performance of this licence is governed
+ * by the laws in force in New South Wales, Australia.
+ */
+
+#ifdef __thumb__
+#define bl blx
+#endif
+
+ .section .text.head
+ .code 32
+ .global _start;
+ .align;
+_start:
+ ldr sp, =__stack
+ bl platform_init
+ bl __container_init
+1:
+ b 1b
+
conts/posix/mm0/mm/arch/or1k/crt0.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/posix/mm0/mm/arch/or1k/debug.c
===================================================================
--- conts/posix/mm0/mm/arch/or1k/debug.c (nonexistent)
+++ conts/posix/mm0/mm/arch/or1k/debug.c (revision 7)
@@ -0,0 +1,11 @@
+/*
+ * Perfmon globals
+ */
+
+
+#if defined(CONFIG_DEBUG_PERFMON_USER)
+
+u64 perfmon_total_cycles;
+u64 current_cycles;
+
+#endif
Index: conts/posix/mm0/mm/arch/or1k/v5/mm.c
===================================================================
--- conts/posix/mm0/mm/arch/or1k/v5/mm.c (nonexistent)
+++ conts/posix/mm0/mm/arch/or1k/v5/mm.c (revision 7)
@@ -0,0 +1,65 @@
+/*
+ * ARMv5 specific functions
+ *
+ * Copyright (C) 2008 - 2010 B Labs Ltd.
+ */
+#include
+#include
+#include
+#include __INC_ARCH(mm.h)
+
+/* Extracts generic protection flags from architecture-specific pte */
+unsigned int vm_prot_flags(pte_t pte)
+{
+ unsigned int vm_prot_flags = 0;
+ unsigned int rw_flags = __MAP_USR_RW & PTE_PROT_MASK;
+ unsigned int ro_flags = __MAP_USR_RO & PTE_PROT_MASK;
+
+ /* Clear non-protection flags */
+ pte &= PTE_PROT_MASK;
+
+ if (pte == ro_flags)
+ vm_prot_flags = VM_READ | VM_EXEC;
+ else if (pte == rw_flags)
+ vm_prot_flags = VM_READ | VM_WRITE | VM_EXEC;
+ else
+ vm_prot_flags = VM_NONE;
+
+ return vm_prot_flags;
+}
+
+/*
+ * PTE STATES:
+ * PTE type field: 00 (Translation fault)
+ * PTE type field correct, AP bits: None (Read or Write access fault)
+ * PTE type field correct, AP bits: RO (Write access fault)
+ */
+
+/*
+ * Extracts arch-specific fault parameters
+ * and puts them into generic format
+ */
+void set_generic_fault_params(struct fault_data *fault)
+{
+ unsigned int prot_flags = vm_prot_flags(fault->kdata->pte);
+
+ fault->reason = 0;
+ fault->pte_flags = prot_flags;
+
+ if (is_prefetch_abort(fault->kdata->fsr)) {
+ fault->reason |= VM_READ;
+ fault->address = fault->kdata->faulty_pc;
+ } else {
+ fault->address = fault->kdata->far;
+
+ /* Always assume read fault first */
+ if (prot_flags & VM_NONE)
+ fault->reason |= VM_READ;
+ else if (prot_flags & VM_READ)
+ fault->reason |= VM_WRITE;
+ else
+ BUG();
+ }
+ arch_print_fault_params(fault);
+}
+
Index: conts/posix/mm0/mm/arch/or1k/mm.c
===================================================================
--- conts/posix/mm0/mm/arch/or1k/mm.c (nonexistent)
+++ conts/posix/mm0/mm/arch/or1k/mm.c (revision 7)
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2007 Bahadir Balban
+ */
+#include
+#include
+#include
+#include __INC_ARCH(mm.h)
+
+#if defined(DEBUG_FAULT_HANDLING)
+void arch_print_fault_params(struct fault_data *fault)
+{
+ printf("%s: Handling %s fault (%s abort) from %d. fault @ 0x%x, generic pte flags: 0x%x\n",
+ __TASKNAME__, (fault->reason & VM_READ) ? "read" :
+ (fault->reason & VM_WRITE) ? "write" : "exec",
+ is_prefetch_abort(fault->kdata->fsr) ? "prefetch" : "data",
+ fault->task->tid, fault->address, fault->pte_flags);
+}
+#else
+void arch_print_fault_params(struct fault_data *fault) { }
+#endif
+
+
+void fault_handle_error(struct fault_data *fault)
+{
+ struct task_ids ids;
+
+ /* Suspend the task */
+ ids.tid = fault->task->tid;
+ BUG_ON(l4_thread_control(THREAD_SUSPEND, &ids) < 0);
+
+ BUG();
+}
+
Index: conts/posix/mm0/mm/arch/or1k/v6/mm.c
===================================================================
--- conts/posix/mm0/mm/arch/or1k/v6/mm.c (nonexistent)
+++ conts/posix/mm0/mm/arch/or1k/v6/mm.c (revision 7)
@@ -0,0 +1,65 @@
+/*
+ * ARMv5 specific functions
+ *
+ * Copyright (C) 2008 - 2010 B Labs Ltd.
+ */
+#include
+#include
+#include
+#include __INC_ARCH(mm.h)
+
+/* Extracts generic protection flags from architecture-specific pte */
+unsigned int vm_prot_flags(pte_t pte)
+{
+ unsigned int vm_prot_flags = 0;
+ unsigned int rw_flags = __MAP_USR_RW & PTE_PROT_MASK;
+ unsigned int ro_flags = __MAP_USR_RO & PTE_PROT_MASK;
+
+ /* Clear non-protection flags */
+ pte &= PTE_PROT_MASK;
+
+ if (pte == ro_flags)
+ vm_prot_flags = VM_READ | VM_EXEC;
+ else if (pte == rw_flags)
+ vm_prot_flags = VM_READ | VM_WRITE | VM_EXEC;
+ else
+ vm_prot_flags = VM_NONE;
+
+ return vm_prot_flags;
+}
+
+/*
+ * PTE STATES:
+ * PTE type field: 00 (Translation fault)
+ * PTE type field correct, AP bits: None (Read or Write access fault)
+ * PTE type field correct, AP bits: RO (Write access fault)
+ */
+
+/*
+ * Extracts arch-specific fault parameters
+ * and puts them into generic format
+ */
+void set_generic_fault_params(struct fault_data *fault)
+{
+ unsigned int prot_flags = vm_prot_flags(fault->kdata->pte);
+
+ fault->reason = 0;
+ fault->pte_flags = prot_flags;
+
+ if (is_prefetch_abort(fault->kdata->fsr)) {
+ fault->reason |= VM_READ;
+ fault->address = fault->kdata->faulty_pc;
+ } else {
+ fault->address = fault->kdata->far;
+
+ /* Always assume read fault first */
+ if (prot_flags & VM_NONE)
+ fault->reason |= VM_READ;
+ else if (prot_flags & VM_READ)
+ fault->reason |= VM_WRITE;
+ else
+ BUG();
+ }
+ arch_print_fault_params(fault);
+}
+
Index: conts/posix/mm0/mm/arch/or1k/v7/mm.c
===================================================================
--- conts/posix/mm0/mm/arch/or1k/v7/mm.c (nonexistent)
+++ conts/posix/mm0/mm/arch/or1k/v7/mm.c (revision 7)
@@ -0,0 +1,77 @@
+/*
+ * ARMv7 specific functions
+ *
+ * Copyright (C) 2008 - 2010 B Labs Ltd.
+ */
+#include
+#include
+#include
+#include __INC_ARCH(mm.h)
+#include INC_SUBARCH(mm.h)
+#include INC_SUBARCH(exception.h)
+
+/* Get simplified access permissions */
+int pte_get_access_simple(pte_t pte)
+{
+ /* Place AP[2] and AP[1] in [1:0] positions and return */
+ return (((pte >> PTE_AP2_BIT) & 1) << 1)
+ | ((pte >> PTE_AP1_BIT) & 1);
+}
+
+int is_translation_fault(u32 fsr)
+{
+ return (fsr & FSR_FS_MASK) == ABORT_TRANSLATION_PAGE;
+}
+
+unsigned int vm_prot_flags(pte_t pte, u32 fsr)
+{
+ unsigned int pte_prot_flags = 0;
+
+ /* Translation fault means no permissions */
+ if (is_translation_fault(fsr))
+ return VM_NONE;
+
+ /* Check simplified permission bits */
+ switch (pte_get_access_simple(pte)) {
+ case AP_SIMPLE_USER_RW_KERN_RW:
+ pte_prot_flags |= VM_WRITE;
+ case AP_SIMPLE_USER_RO_KERN_RO:
+ pte_prot_flags |= VM_READ;
+
+ /* Also, check exec never bit */
+ if (!(pte & (1 << PTE_XN_BIT)))
+ pte_prot_flags |= VM_EXEC;
+ break;
+ case AP_SIMPLE_USER_NONE_KERN_RW:
+ case AP_SIMPLE_USER_NONE_KERN_RO:
+ default:
+ pte_prot_flags = VM_NONE;
+ break;
+ }
+
+ return pte_prot_flags;
+}
+
+void set_generic_fault_params(struct fault_data *fault)
+{
+ fault->pte_flags = vm_prot_flags(fault->kdata->pte, fault->kdata->fsr);
+ fault->reason = 0;
+
+ /*
+ * Prefetch fault denotes exec fault.
+ */
+ if (is_prefetch_abort(fault->kdata->fsr)) {
+ fault->reason |= VM_EXEC;
+ fault->address = fault->kdata->faulty_pc;
+ } else {
+ fault->address = fault->kdata->far;
+
+ /* Write-not-read bit determines fault */
+ if (fault->kdata->fsr & (1 << DFSR_WNR_BIT))
+ fault->reason |= VM_WRITE;
+ else
+ fault->reason |= VM_READ;
+ }
+ arch_print_fault_params(fault);
+}
+
Index: conts/posix/mm0/mm/arch/or1k
===================================================================
--- conts/posix/mm0/mm/arch/or1k (nonexistent)
+++ conts/posix/mm0/mm/arch/or1k (revision 7)
conts/posix/mm0/mm/arch/or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/libl4/include/l4lib/arch/or1k/irq.h
===================================================================
--- conts/libl4/include/l4lib/arch/or1k/irq.h (nonexistent)
+++ conts/libl4/include/l4lib/arch/or1k/irq.h (revision 7)
@@ -0,0 +1,11 @@
+#ifndef __L4LIB_ARCH_IRQ_H__
+#define __L4LIB_ARCH_IRQ_H__
+
+/*
+ * Destructive atomic-read.
+ *
+ * Write 0 to byte at @location as its contents are read back.
+ */
+char l4_atomic_dest_readb(void *location);
+
+#endif
Index: conts/libl4/include/l4lib/arch/or1k/utcb.h
===================================================================
--- conts/libl4/include/l4lib/arch/or1k/utcb.h (nonexistent)
+++ conts/libl4/include/l4lib/arch/or1k/utcb.h (revision 7)
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2009 Bahadir Bilgehan Balban
+ */
+#ifndef __ARM_UTCB_H__
+#define __ARM_UTCB_H__
+
+#define USER_UTCB_REF 0xFF000050
+#define L4_KIP_ADDRESS 0xFF000000
+#define UTCB_KIP_OFFSET 0x50
+
+#ifndef __ASSEMBLY__
+#include
+#include
+#include
+#include INC_GLUE(message.h)
+#include INC_GLUE(memory.h)
+#include
+#include
+#include L4LIB_INC_SUBARCH(utcb.h)
+
+/*
+ * See kernel glue/arch/message.h for utcb details
+ */
+extern struct kip *kip;
+
+
+
+
+/* Functions to read/write utcb registers */
+static inline unsigned int read_mr(int offset)
+{
+ if (offset < MR_TOTAL)
+ return l4_get_utcb()->mr[offset];
+ else
+ return l4_get_utcb()->mr_rest[offset - MR_TOTAL];
+}
+
+static inline void write_mr(unsigned int offset, unsigned int val)
+{
+ if (offset < MR_TOTAL)
+ l4_get_utcb()->mr[offset] = val;
+ else
+ l4_get_utcb()->mr_rest[offset - MR_TOTAL] = val;
+}
+
+
+static inline void *utcb_full_buffer()
+{
+ return &l4_get_utcb()->mr_rest[0];
+}
+
+static inline char *utcb_full_strcpy_from(const char *src)
+{
+ return strncpy((char *)&l4_get_utcb()->mr_rest[0], src,
+ L4_UTCB_FULL_BUFFER_SIZE);
+}
+
+static inline void *utcb_full_memcpy_from(const char *src, int size)
+{
+ return memcpy(&l4_get_utcb()->mr_rest[0], src,
+ min(size, L4_UTCB_FULL_BUFFER_SIZE));
+}
+
+static inline char *utcb_full_strcpy_to(char *dst)
+{
+ return strncpy(dst, (char *)&l4_get_utcb()->mr_rest[0],
+ L4_UTCB_FULL_BUFFER_SIZE);
+}
+
+static inline void *utcb_full_memcpy_to(char *dst, int size)
+{
+ return memcpy(dst, &l4_get_utcb()->mr_rest[0],
+ min(size, L4_UTCB_FULL_BUFFER_SIZE));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ARM_UTCB_H__ */
Index: conts/libl4/include/l4lib/arch/or1k/syscalls.h
===================================================================
--- conts/libl4/include/l4lib/arch/or1k/syscalls.h (nonexistent)
+++ conts/libl4/include/l4lib/arch/or1k/syscalls.h (revision 7)
@@ -0,0 +1,95 @@
+/*
+ * System call prototypes.
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+#ifndef __ARM_SYSCALLS_H__
+#define __ARM_SYSCALLS_H__
+
+
+#include L4LIB_INC_ARCH(types.h)
+#include L4LIB_INC_ARCH(utcb.h)
+#include
+#include
+#include
+#include
+#include
+
+struct task_ids {
+ l4id_t tid;
+ l4id_t spid;
+ l4id_t tgid;
+};
+
+static inline void *
+l4_kernel_interface(unsigned int *api_version, unsigned int *api_flags,
+ unsigned int *kernel_id)
+{
+ return (void *)L4_KIP_ADDRESS;
+}
+
+typedef unsigned int (*__l4_thread_switch_t)(u32);
+extern __l4_thread_switch_t __l4_thread_switch;
+unsigned int l4_thread_switch (u32 dest);
+
+typedef int (*__l4_getid_t)(struct task_ids *ids);
+extern __l4_getid_t __l4_getid;
+int l4_getid(struct task_ids *ids);
+
+typedef int (*__l4_ipc_t)(l4id_t to, l4id_t from, u32 flags);
+extern __l4_ipc_t __l4_ipc;
+int l4_ipc(l4id_t to, l4id_t from, u32 flags);
+
+typedef int (*__l4_capability_control_t)(unsigned int req, unsigned int flags, void *buf);
+extern __l4_capability_control_t __l4_capability_control;
+int l4_capability_control(unsigned int req, unsigned int flags, void *buf);
+
+typedef int (*__l4_map_t)(void *phys, void *virt,
+ u32 npages, u32 flags, l4id_t tid);
+extern __l4_map_t __l4_map;
+int l4_map(void *p, void *v, u32 npages, u32 flags, l4id_t tid);
+
+typedef int (*__l4_unmap_t)(void *virt, unsigned long npages, l4id_t tid);
+extern __l4_unmap_t __l4_unmap;
+int l4_unmap(void *virtual, unsigned long numpages, l4id_t tid);
+
+typedef int (*__l4_thread_control_t)(unsigned int action, struct task_ids *ids);
+extern __l4_thread_control_t __l4_thread_control;
+int l4_thread_control(unsigned int action, struct task_ids *ids);
+
+typedef int (*__l4_irq_control_t)(unsigned int req, unsigned int flags, l4id_t id);
+extern __l4_irq_control_t __l4_irq_control;
+int l4_irq_control(unsigned int req, unsigned int flags, l4id_t id);
+
+typedef int (*__l4_ipc_control_t)(unsigned int action, l4id_t blocked_sender,
+ u32 blocked_tag);
+extern __l4_ipc_control_t __l4_ipc_control;
+int l4_ipc_control(unsigned int, l4id_t blocked_sender, u32 blocked_tag);
+
+typedef int (*__l4_exchange_registers_t)(void *exregs_struct, l4id_t tid);
+extern __l4_exchange_registers_t __l4_exchange_registers;
+int l4_exchange_registers(void *exregs_struct, l4id_t tid);
+
+typedef int (*__l4_container_control_t)(unsigned int req, unsigned int flags, void *buf);
+extern __l4_container_control_t __l4_container_control;
+int l4_container_control(unsigned int req, unsigned int flags, void *buf);
+
+typedef int (*__l4_time_t)(void *timeval, int set);
+extern __l4_time_t __l4_time;
+int l4_time(void *timeval, int set);
+
+typedef int (*__l4_mutex_control_t)(void *mutex_word, int op);
+extern __l4_mutex_control_t __l4_mutex_control;
+int l4_mutex_control(void *mutex_word, int op);
+
+typedef int (*__l4_cache_control_t)(void *start, void *end, unsigned int flags);
+extern __l4_cache_control_t __l4_cache_control;
+int l4_cache_control(void *start, void *end, unsigned int flags);
+
+/* To be supplied by server tasks. */
+void *virt_to_phys(void *);
+void *phys_to_virt(void *);
+
+
+#endif /* __ARM_SYSCALLS_H__ */
+
Index: conts/libl4/include/l4lib/arch/or1k/asm.h
===================================================================
--- conts/libl4/include/l4lib/arch/or1k/asm.h (nonexistent)
+++ conts/libl4/include/l4lib/arch/or1k/asm.h (revision 7)
@@ -0,0 +1,15 @@
+#ifndef __ARM_ASM_H__
+#define __ARM_ASM_H__
+
+#define BEGIN_PROC(name) \
+ .global name; \
+ .type name,function; \
+ .align; \
+name:
+
+#define END_PROC(name) \
+.fend_##name: \
+ .size name,.fend_##name - name;
+
+#endif /* __ARM_ASM_H__ */
+
Index: conts/libl4/include/l4lib/arch/or1k/v5/utcb.h
===================================================================
--- conts/libl4/include/l4lib/arch/or1k/v5/utcb.h (nonexistent)
+++ conts/libl4/include/l4lib/arch/or1k/v5/utcb.h (revision 7)
@@ -0,0 +1,21 @@
+#ifndef __ARM_V5_UTCB_H__
+#define __ARM_V5_UTCB_H__
+
+/*
+ * Pointer to Kernel Interface Page's UTCB pointer offset.
+ */
+extern struct utcb **kip_utcb_ref;
+
+static inline struct utcb *l4_get_utcb()
+{
+ /*
+ * By double dereferencing, we get the private TLS
+ * (aka UTCB). First reference is to the KIP's utcb
+ * offset, second is to the utcb itself, to which
+ * the KIP's utcb reference had been updated during
+ * context switch.
+ */
+ return *kip_utcb_ref;
+}
+
+#endif /* __ARM_V5_UTCB_H__ */
Index: conts/libl4/include/l4lib/arch/or1k/v5/perfmon.h
===================================================================
--- conts/libl4/include/l4lib/arch/or1k/v5/perfmon.h (nonexistent)
+++ conts/libl4/include/l4lib/arch/or1k/v5/perfmon.h (revision 7)
@@ -0,0 +1,3 @@
+#ifndef __PERFMON_H__
+
+#endif
Index: conts/libl4/include/l4lib/arch/or1k/types.h
===================================================================
--- conts/libl4/include/l4lib/arch/or1k/types.h (nonexistent)
+++ conts/libl4/include/l4lib/arch/or1k/types.h (revision 7)
@@ -0,0 +1,8 @@
+#ifndef __L4LIB_ARM_TYPES_H___
+#define __L4LIB_ARM_TYPES_H__
+
+#define TASK_ID_INVALID 0xFFFFFFFF
+
+#include
+
+#endif /* __L4LIB_ARM_TYPES_H__ */
Index: conts/libl4/include/l4lib/arch/or1k/syslib.h
===================================================================
--- conts/libl4/include/l4lib/arch/or1k/syslib.h (nonexistent)
+++ conts/libl4/include/l4lib/arch/or1k/syslib.h (revision 7)
@@ -0,0 +1,366 @@
+/*
+ * Helper functions that wrap raw l4 syscalls.
+ *
+ * Copyright (C) 2007-2009 Bahadir Bilgehan Balban
+ */
+
+#ifndef __L4LIB_SYSLIB_H__
+#define __L4LIB_SYSLIB_H__
+
+#include
+#include
+#include L4LIB_INC_ARCH(syscalls.h)
+
+/*
+ * NOTE:
+ * Its best to use these wrappers because they generalise the way
+ * common ipc data like sender id, error, ipc tag are passed
+ * between ipc parties.
+ *
+ * The arguments to l4_ipc() are used by the microkernel to initiate
+ * the ipc. Any data passed in message registers may or may not be
+ * a duplicate of this data, but the distinction is that anything
+ * that is passed via the mrs are meant to be used by the other party
+ * participating in the ipc.
+ */
+
+/* For system call arguments */
+#define L4SYS_ARG0 (MR_UNUSED_START)
+#define L4SYS_ARG1 (MR_UNUSED_START + 1)
+#define L4SYS_ARG2 (MR_UNUSED_START + 2)
+#define L4SYS_ARG3 (MR_UNUSED_START + 3)
+
+
+#define L4_IPC_TAG_MASK 0x00000FFF
+
+
+/*
+ * Servers get sender.
+ */
+static inline l4id_t l4_get_sender(void)
+{
+ return (l4id_t)read_mr(MR_SENDER);
+}
+
+/*
+ * When doing an ipc the sender never has to be explicitly set in
+ * the utcb via this function since this information is found out
+ * by the microkernel by checking the system caller's id. This is
+ * only used for restoring the sender on the utcb in order to
+ * complete an earlier ipc.
+ */
+static inline void l4_set_sender(l4id_t sender)
+{
+ write_mr(MR_SENDER, sender);
+}
+
+static inline unsigned int l4_set_ipc_size(unsigned int word, unsigned int size)
+{
+ word &= ~L4_IPC_FLAGS_SIZE_MASK;
+ word |= ((size << L4_IPC_FLAGS_SIZE_SHIFT) & L4_IPC_FLAGS_SIZE_MASK);
+ return word;
+}
+
+static inline unsigned int l4_get_ipc_size(unsigned int word)
+{
+ return (word & L4_IPC_FLAGS_SIZE_MASK) >> L4_IPC_FLAGS_SIZE_SHIFT;
+}
+
+static inline unsigned int l4_set_ipc_msg_index(unsigned int word, unsigned int index)
+{
+ /* FIXME: Define MR_PRIMARY_TOTAL, MR_TOTAL etc. and use MR_TOTAL HERE! */
+ BUG_ON(index > UTCB_SIZE);
+
+ word &= ~L4_IPC_FLAGS_MSG_INDEX_MASK;
+ word |= (index << L4_IPC_FLAGS_MSG_INDEX_SHIFT) &
+ L4_IPC_FLAGS_MSG_INDEX_MASK;
+ return word;
+}
+
+static inline unsigned int l4_get_ipc_msg_index(unsigned int word)
+{
+ return (word & L4_IPC_FLAGS_MSG_INDEX_MASK)
+ >> L4_IPC_FLAGS_MSG_INDEX_SHIFT;
+}
+
+static inline unsigned int l4_set_ipc_flags(unsigned int word, unsigned int flags)
+{
+ word &= ~L4_IPC_FLAGS_TYPE_MASK;
+ word |= flags & L4_IPC_FLAGS_TYPE_MASK;
+ return word;
+}
+
+static inline unsigned int l4_get_ipc_flags(unsigned int word)
+{
+ return word & L4_IPC_FLAGS_TYPE_MASK;
+}
+
+static inline unsigned int l4_get_tag(void)
+{
+ return read_mr(MR_TAG) & L4_IPC_TAG_MASK;
+}
+
+static inline void l4_set_tag(unsigned int tag)
+{
+ unsigned int tag_flags = read_mr(MR_TAG);
+
+ tag_flags &= ~L4_IPC_TAG_MASK;
+ tag_flags |= tag & L4_IPC_TAG_MASK;
+
+ write_mr(MR_TAG, tag_flags);
+}
+
+/* Servers:
+ * Sets the message register for returning errors back to client task.
+ * These are usually posix error codes.
+ */
+static inline void l4_set_retval(int retval)
+{
+ write_mr(MR_RETURN, retval);
+}
+
+/* Clients:
+ * Learn result of request.
+ */
+static inline int l4_get_retval(void)
+{
+ return read_mr(MR_RETURN);
+}
+
+/*
+ * This is useful for stacked IPC. A stacked IPC happens
+ * when a new IPC is initiated before concluding the current
+ * one.
+ *
+ * This saves the last ipc's parameters such as the sender
+ * and tag information. Any previously saved data in save
+ * slots are destroyed. This is fine as IPC stacking is only
+ * useful if done once.
+ */
+static inline void l4_save_ipcregs(void)
+{
+ l4_get_utcb()->saved_sender = l4_get_sender();
+ l4_get_utcb()->saved_tag = l4_get_tag();
+}
+
+static inline void l4_restore_ipcregs(void)
+{
+ l4_set_tag(l4_get_utcb()->saved_tag);
+ l4_set_sender(l4_get_utcb()->saved_sender);
+}
+
+#define TASK_CID_MASK 0xFF000000
+#define TASK_ID_MASK 0x00FFFFFF
+#define TASK_CID_SHIFT 24
+
+static inline l4id_t __raw_tid(l4id_t tid)
+{
+ return tid & TASK_ID_MASK;
+}
+
+static inline l4id_t __cid(l4id_t tid)
+{
+ return (tid & TASK_CID_MASK) >> TASK_CID_SHIFT;
+}
+
+static inline l4id_t self_tid(void)
+{
+ struct task_ids ids;
+
+ l4_getid(&ids);
+ return ids.tid;
+}
+
+static inline l4id_t __raw_self_tid(void)
+{
+ return __raw_tid(self_tid());
+}
+
+static inline int l4_send_full(l4id_t to, unsigned int tag)
+{
+ l4_set_tag(tag);
+ return l4_ipc(to, L4_NILTHREAD, L4_IPC_FLAGS_FULL);
+}
+
+static inline int l4_receive_full(l4id_t from)
+{
+ return l4_ipc(L4_NILTHREAD, from, L4_IPC_FLAGS_FULL);
+}
+
+static inline int l4_sendrecv_full(l4id_t to, l4id_t from, unsigned int tag)
+{
+ int err;
+
+ BUG_ON(to == L4_NILTHREAD || from == L4_NILTHREAD);
+ l4_set_tag(tag);
+
+ err = l4_ipc(to, from, L4_IPC_FLAGS_FULL);
+
+ return err;
+}
+
+static inline int l4_send_extended(l4id_t to, unsigned int tag,
+ unsigned int size, void *buf)
+{
+ unsigned int flags = 0;
+
+ l4_set_tag(tag);
+
+ /* Set up flags word for extended ipc */
+ flags = l4_set_ipc_flags(flags, L4_IPC_FLAGS_EXTENDED);
+ flags = l4_set_ipc_size(flags, size);
+ flags = l4_set_ipc_msg_index(flags, L4SYS_ARG0);
+
+ /* Write buffer pointer to MR index that we specified */
+ write_mr(L4SYS_ARG0, (unsigned long)buf);
+
+ return l4_ipc(to, L4_NILTHREAD, flags);
+}
+
+static inline int l4_receive_extended(l4id_t from, unsigned int size, void *buf)
+{
+ unsigned int flags = 0;
+
+ /* Indicate extended receive */
+ flags = l4_set_ipc_flags(flags, L4_IPC_FLAGS_EXTENDED);
+
+ /* How much data is accepted */
+ flags = l4_set_ipc_size(flags, size);
+
+ /* Indicate which MR index buffer pointer is stored */
+ flags = l4_set_ipc_msg_index(flags, L4SYS_ARG0);
+
+ /* Set MR with buffer to receive data */
+ write_mr(L4SYS_ARG0, (unsigned long)buf);
+
+ return l4_ipc(L4_NILTHREAD, from, flags);
+}
+
+/*
+ * Return result value as extended IPC.
+ *
+ * Extended IPC copies up to 2KB user address space buffers.
+ * Along with such an ipc, a return value is sent using a primary
+ * mr that is used as the return register.
+ *
+ * It may not be desirable to return a payload on certain conditions,
+ * (such as an error return value) So a nopayload field is provided.
+ */
+static inline int l4_return_extended(int retval, unsigned int size,
+ void *buf, int nopayload)
+{
+ unsigned int flags = 0;
+ l4id_t sender = l4_get_sender();
+
+ l4_set_retval(retval);
+
+ /* Set up flags word for extended ipc */
+ flags = l4_set_ipc_flags(flags, L4_IPC_FLAGS_EXTENDED);
+ flags = l4_set_ipc_msg_index(flags, L4SYS_ARG0);
+
+ /* Write buffer pointer to MR index that we specified */
+ write_mr(L4SYS_ARG0, (unsigned long)buf);
+
+ if (nopayload)
+ flags = l4_set_ipc_size(flags, 0);
+ else
+ flags = l4_set_ipc_size(flags, size);
+
+ return l4_ipc(sender, L4_NILTHREAD, flags);
+}
+
+static inline int l4_sendrecv_extended(l4id_t to, l4id_t from,
+ unsigned int tag, void *buf)
+{
+ /* Need to imitate sendrecv but with extended send/recv flags */
+ return 0;
+}
+
+static inline int l4_send(l4id_t to, unsigned int tag)
+{
+ l4_set_tag(tag);
+
+ return l4_ipc(to, L4_NILTHREAD, 0);
+}
+
+static inline int l4_sendrecv(l4id_t to, l4id_t from, unsigned int tag)
+{
+ int err;
+
+ BUG_ON(to == L4_NILTHREAD || from == L4_NILTHREAD);
+ l4_set_tag(tag);
+
+ err = l4_ipc(to, from, 0);
+
+ return err;
+}
+
+static inline int l4_receive(l4id_t from)
+{
+ return l4_ipc(L4_NILTHREAD, from, 0);
+}
+
+static inline void l4_print_mrs()
+{
+ printf("Message registers: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
+ read_mr(0), read_mr(1), read_mr(2), read_mr(3),
+ read_mr(4), read_mr(5));
+}
+
+/* Servers:
+ * Return the ipc result back to requesting task.
+ */
+static inline int l4_ipc_return(int retval)
+{
+ l4id_t sender = l4_get_sender();
+
+ l4_set_retval(retval);
+
+ /* Setting the tag would overwrite retval so we l4_send without tagging */
+ return l4_ipc(sender, L4_NILTHREAD, 0);
+}
+
+void *l4_new_virtual(int npages);
+void *l4_del_virtual(void *virt, int npages);
+
+/* A helper that translates and maps a physical address to virtual */
+static inline void *l4_map_helper(void *phys, int npages)
+{
+ struct task_ids ids;
+ int err;
+
+ void *virt = l4_new_virtual(npages);
+
+ l4_getid(&ids);
+
+ if ((err = l4_map(phys, virt, npages,
+ MAP_USR_DEFAULT, ids.tid)) < 0)
+ return PTR_ERR(err);
+
+ return virt;
+}
+
+
+/* A helper that translates and maps a physical address to virtual */
+static inline void *l4_unmap_helper(void *virt, int npages)
+{
+ struct task_ids ids;
+
+ l4_getid(&ids);
+ l4_unmap(virt, npages, ids.tid);
+ l4_del_virtual(virt, npages);
+ return 0;
+}
+
+#define L4_EXIT_MASK 0xFFFF
+
+static inline void l4_exit(unsigned int exit_code)
+{
+ struct task_ids ids;
+ l4_getid(&ids);
+ l4_thread_control(THREAD_DESTROY |
+ (exit_code & L4_EXIT_MASK),
+ &ids);
+}
+
+#endif /* __L4LIB_SYSLIB_H__ */
Index: conts/libl4/include/l4lib/arch/or1k/v7/utcb.h
===================================================================
--- conts/libl4/include/l4lib/arch/or1k/v7/utcb.h (nonexistent)
+++ conts/libl4/include/l4lib/arch/or1k/v7/utcb.h (revision 7)
@@ -0,0 +1,59 @@
+#ifndef __ARM_V5_UTCB_H__
+#define __ARM_V5_UTCB_H__
+
+/*
+ * NOTE: Any changes you make here, you *MUST* change
+ * utcb_address() macro in syscall.S assembler.
+ */
+
+/* Read Thread ID User RW register */
+static inline u32 l4_cp15_read_tid_usr_rw(void)
+{
+ volatile u32 val;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c13, c0, 2"
+ : "=r" (val)
+ :
+ );
+
+ return val;
+}
+
+/* Write Thread ID User RW register */
+static inline void l4_cp15_write_tid_usr_rw(volatile u32 val)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 0, %0, c13, c0, 2"
+ :
+ : "r" (val)
+ );
+}
+
+/* Read Thread ID User RO register */
+static inline u32 l4_cp15_read_tid_usr_ro(void)
+{
+ volatile u32 val;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c13, c0, 3"
+ : "=r" (val)
+ :
+ );
+
+ return val;
+}
+
+/*
+ * In ARMv7, utcb resides in the userspace read-only
+ * thread register. This adds the benefit of avoiding
+ * dirtying the cache and extra management for smp since
+ * it is per-cpu.
+ */
+static inline struct utcb *l4_get_utcb()
+{
+// printf("%s: UTCB Adddress: 0x%x\n", __FUNCTION__, l4_cp15_read_tid_usr_ro());
+ return (struct utcb *)l4_cp15_read_tid_usr_ro();
+}
+
+#endif /* __ARM_V5_UTCB_H__ */
Index: conts/libl4/include/l4lib/arch/or1k/v7/perfmon.h
===================================================================
--- conts/libl4/include/l4lib/arch/or1k/v7/perfmon.h (nonexistent)
+++ conts/libl4/include/l4lib/arch/or1k/v7/perfmon.h (revision 7)
@@ -0,0 +1,405 @@
+/*
+ * ARMv7 Performance Monitor operations
+ *
+ * Copyright (C) 2010 B Labs Ltd.
+ *
+ * Author: Bahadir Balban
+ */
+#ifndef __PERFMON_H__
+#define __PERFMON_H__
+
+#include
+
+/* Perfmon control register */
+#define PMCR_DP_BIT 5 /* Disable prohibited */
+#define PMCR_X_BIT 4 /* Export event enable */
+#define PMCR_D_BIT 3 /* 64-cycle granularity */
+#define PMCR_C_BIT 2 /* PMCCNTR reset */
+#define PMCR_P_BIT 1 /* Events all reset */
+#define PMCR_E_BIT 0 /* Enable all */
+
+/* Obtain number of event counters */
+#define PMCR_N_SHIFT 11
+#define PMCR_N_MASK 0x1F
+
+/* Special bit for cycle counter */
+#define PMCCNTR_BIT 31
+
+
+/*
+ * Performance Events
+ */
+
+/* Generic v7 events */
+#define PERFMON_EVENT_SOFTINC 0
+#define PERFMON_EVENT_IFETCH_L1CREFILL 1
+#define PERFMON_EVENT_IFETCH_TLBREFILL 2
+#define PERFMON_EVENT_DFETCH_L1CREFILL 3
+#define PERFMON_EVENT_DFETCH_L1CACCESS 4
+#define PERFMON_EVENT_DFETCH_TLBREFILL 5
+#define PERFMON_EVENT_MEMREAD_INSTR 6
+#define PERFMON_EVENT_MEMWRITE_INSTR 7
+#define PERFMON_EVENT_ALL_INSTR 8
+#define PERFMON_EVENT_EXCEPTION 9
+#define PERFMON_EVENT_EXCEPTION_RETURN 10
+#define PERFMON_EVENT_CONTEXTIDR_CHANGE 11
+#define PERFMON_EVENT_PC_CHANGE 12
+#define PERFMON_EVENT_IMM_BRANCH 13
+#define PERFMON_EVENT_FUNCTION_RETURN 14
+#define PERFMON_EVENT_UNALIGNED_ACCESS 15
+#define PERFMON_EVENT_BRANCH_MISS 16
+#define PERFMON_EVENT_RAW_CYCLE_COUNT 17
+#define PERFMON_EVENT_BRANCH_MAYBEHIT 18
+
+/*
+ * Cortex-A9 events (only relevant ones)
+ * 0x40-2, 0x6E, 0x70, 0x71-4, 0x80-0x81, 0x8A-8B
+ * 0xA0-5 omitted
+ */
+
+/*
+ * Linefill not satisfied from other cpu caches but
+ * has to go to external memory
+ */
+#define PERFMON_EVENT_SMP_LINEFILL_MISS 0x50
+
+/* Linefill satisfied from other cpu caches */
+#define PERFMON_EVENT_SMP_LINEFILL_HIT 0x51
+
+/* Icache refill stall cycles on cpu pipeline */
+#define PERFMON_EVENT_ICACHE_CPU_STALL 0x60
+
+/* Dcache refill stall cycles on cpu pipeline */
+#define PERFMON_EVENT_DCACHE_CPU_STALL 0x61
+
+/* TLB miss stall cycles on cpu pipeline */
+#define PERFMON_EVENT_TLBMISS_CPU_STALL 0x62
+
+#define PERFMON_EVENT_STREX_SUCCESS 0x63
+#define PERFMON_EVENT_STREX_FAIL 0x64
+#define PERFMON_EVENT_DCACHE_EVICTION 0x65
+
+/* Issue stage can't proceed to dispatch any instruction */
+#define PERFMON_EVENT_PIPELINE_CANT_ISSUE 0x66
+
+/* Issue stage empty */
+#define PERFMON_EVENT_PIPELINE_ISSUE_EMPTY 0x67
+
+/* Register renamed instructions */
+#define PERFMON_EVENT_REGRENAMED_INSTR 0x68
+
+#define PERFMON_EVENT_CPUSTALL_ITLB_MISS 0x82
+#define PERFMON_EVENT_CPUSTALL_DTLB_MISS 0x83
+#define PERFMON_EVENT_CPUSTALL_IUTLB_MISS 0x84
+#define PERFMON_EVENT_CPUSTALL_DUTLB_MISS 0x85
+#define PERFMON_EVENT_CPUSTALL_DMB 0x86
+#define PERFMON_EVENT_ISB_COUNT 0x90
+#define PERFMON_EVENT_DSB_COUNT 0x91
+#define PERFMON_EVENT_DMB_COUNT 0x92
+#define PERFMON_EVENT_EXTIRQ_COUNT 0x93
+
+
+static inline u32 __attribute__((always_inline))
+cp15_read_perfmon_ctrl(void)
+{
+ volatile u32 val = 0;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c9, c12, 0\n"
+ "isb\n"
+ : "=r" (val)
+ :
+ );
+
+ return val;
+}
+
+static inline void __attribute__((always_inline))
+cp15_write_perfmon_ctrl(volatile u32 word)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 0, %0, c9, c12, 0"
+ :
+ : "r" (word)
+ );
+}
+
+static inline u32 __attribute__((always_inline))
+cp15_read_perfmon_cntenset(void)
+{
+ volatile u32 val = 0;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c9, c12, 1\n"
+ "isb\n"
+ : "=r" (val)
+ :
+ );
+
+ return val;
+}
+
+static inline void __attribute__((always_inline))
+cp15_write_perfmon_cntenset(volatile u32 word)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 0, %0, c9, c12, 1"
+ :
+ : "r" (word)
+ );
+}
+
+static inline u32 __attribute__((always_inline))
+cp15_read_perfmon_cntenclr(void)
+{
+ u32 val = 0;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c9, c12, 2"
+ : "=r" (val)
+ :
+ );
+
+ return val;
+}
+
+static inline void __attribute__((always_inline))
+cp15_write_perfmon_cntenclr(volatile u32 word)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 0, %0, c9, c12, 2"
+ :
+ : "r" (word)
+ );
+}
+
+
+static inline u32 __attribute__((always_inline))
+cp15_read_perfmon_overflow(void)
+{
+ u32 val = 0;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c9, c12, 3"
+ : "=r" (val)
+ :
+ );
+
+ return val;
+}
+
+static inline void __attribute__((always_inline))
+cp15_write_perfmon_overflow(volatile u32 word)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 0, %0, c9, c12, 3"
+ :
+ : "r" (word)
+ );
+}
+
+static inline void __attribute__((always_inline))
+cp15_write_perfmon_softinc(volatile u32 word)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 0, %0, c9, c12, 4"
+ :
+ : "r" (word)
+ );
+}
+
+static inline u32 __attribute__((always_inline))
+cp15_read_perfmon_evcntsel(void)
+{
+ u32 val = 0;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c9, c12, 5"
+ : "=r" (val)
+ :
+ );
+
+ return val;
+}
+
+static inline void __attribute__((always_inline))
+cp15_write_perfmon_evcntsel(volatile u32 word)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 0, %0, c9, c12, 5"
+ :
+ : "r" (word)
+ );
+}
+
+static inline u32 __attribute__((always_inline))
+cp15_read_perfmon_cyccnt(void)
+{
+ volatile u32 val = 0;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c9, c13, 0\n"
+ "isb\n"
+ : "=r" (val)
+ :
+ );
+
+ return val;
+}
+
+static inline void __attribute__((always_inline))
+cp15_write_perfmon_cyccnt(volatile u32 word)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 0, %0, c9, c13, 0"
+ :
+ : "r" (word)
+ );
+}
+
+static inline u32 __attribute__((always_inline))
+cp15_read_perfmon_evtypesel(void)
+{
+ u32 val = 0;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c9, c13, 1"
+ : "=r" (val)
+ :
+ );
+
+ return val;
+}
+
+static inline void __attribute__((always_inline))
+cp15_write_perfmon_evtypesel(volatile u32 word)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 0, %0, c9, c13, 1"
+ :
+ : "r" (word)
+ );
+}
+
+static inline u32 __attribute__((always_inline))
+cp15_read_perfmon_evcnt(void)
+{
+ u32 val = 0;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c9, c13, 2"
+ : "=r" (val)
+ :
+ );
+
+ return val;
+}
+
+static inline void __attribute__((always_inline))
+cp15_write_perfmon_evcnt(volatile u32 word)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 0, %0, c9, c13, 2"
+ :
+ : "r" (word)
+ );
+}
+
+
+static inline u32 __attribute__((always_inline))
+cp15_read_perfmon_useren(void)
+{
+ u32 val = 0;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c9, c14, 0"
+ : "=r" (val)
+ :
+ );
+
+ return val;
+}
+
+static inline void __attribute__((always_inline))
+cp15_write_perfmon_useren(volatile u32 word)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 0, %0, c9, c14, 0"
+ :
+ : "r" (word)
+ );
+}
+
+static inline u32 __attribute__((always_inline))
+cp15_read_perfmon_intenset(void)
+{
+ u32 val = 0;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c9, c14, 1"
+ : "=r" (val)
+ :
+ );
+
+ return val;
+}
+
+static inline void __attribute__((always_inline))
+cp15_write_perfmon_intenset(volatile u32 word)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 0, %0, c9, c14, 1"
+ :
+ : "r" (word)
+ );
+}
+
+static inline u32 __attribute__((always_inline))
+cp15_read_perfmon_intenclr(void)
+{
+ u32 val = 0;
+
+ __asm__ __volatile__ (
+ "mrc p15, 0, %0, c9, c14, 2"
+ : "=r" (val)
+ :
+ );
+
+ return val;
+}
+
+static inline void __attribute__((always_inline))
+cp15_write_perfmon_intenclr(volatile u32 word)
+{
+ __asm__ __volatile__ (
+ "mcr p15, 0, %0, c9, c14, 2"
+ :
+ : "r" (word)
+ );
+}
+
+#include
+
+#if defined (CONFIG_DEBUG_PERFMON_USER)
+static inline
+u32 perfmon_read_cyccnt()
+{
+ u32 cnt = cp15_read_perfmon_cyccnt();
+ u32 ovfl = cp15_read_perfmon_overflow();
+
+ /* Detect overflow and signal something was wrong */
+ if (ovfl & (1 << PMCCNTR_BIT))
+ printf("%s: Overflow.\n", __FUNCTION__);
+ return cnt;
+}
+
+void perfmon_reset_start_cyccnt();
+u32 perfmon_read_reset_start_cyccnt();
+
+#endif
+
+
+void perfmon_init();
+
+#endif /* __PERFMON_H__ */
+
Index: conts/libl4/include/l4lib/arch/or1k
===================================================================
--- conts/libl4/include/l4lib/arch/or1k (nonexistent)
+++ conts/libl4/include/l4lib/arch/or1k (revision 7)
conts/libl4/include/l4lib/arch/or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/libl4/src/arch/or1k/new_thread.S.ARM
===================================================================
--- conts/libl4/src/arch/or1k/new_thread.S.ARM (nonexistent)
+++ conts/libl4/src/arch/or1k/new_thread.S.ARM (revision 7)
@@ -0,0 +1,21 @@
+/*
+ * Set up new thread's argument and call its function.
+ * Return would be made to thread_exit with the return code.
+ *
+ * Copyright (C) 2010 B Labs Ltd.
+ *
+ * Author: Bahadir Balban
+ */
+#include
+#include L4LIB_INC_ARCH(asm.h)
+
+
+BEGIN_PROC(setup_new_thread)
+ ldr r0, [sp, #-4]! @ Load first argument
+ mov lr, pc @ Save return address
+ ldr pc, [sp, #-4]! @ Load function pointer from stack
+ b thread_exit @ Call l4_thread_exit for cleanup
+1:
+ b 1b @ Never reaches here
+END_PROC(setup_new_thread)
+
conts/libl4/src/arch/or1k/new_thread.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/libl4/src/arch/or1k/v5/mutex.S.ARM
===================================================================
--- conts/libl4/src/arch/or1k/v5/mutex.S.ARM (nonexistent)
+++ conts/libl4/src/arch/or1k/v5/mutex.S.ARM (revision 7)
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2009 Bahadir Balban
+ */
+#include
+#include
+
+/*
+ * These use the same lock word for both being granted
+ * exclusive access to the word, and for storing lock values.
+ */
+
+BEGIN_PROC(__l4_mutex_lock)
+ mov r2, #-2
+1:
+ swp r1, r2, [r0]
+ cmp r1, r2
+ beq 1b
+
+ @ Grabbed the lock,
+ add r1, r1, #1 @ now increment its value
+ str r1, [r0] @ Store and finish
+ cmp r1, #L4_MUTEX_LOCKED @ Have we locked it?
+ moveq r0, #L4_MUTEX_SUCCESS
+ movne r0, #L4_MUTEX_CONTENDED
+ mov pc, lr
+END_PROC(__l4_mutex_lock)
+
+
+BEGIN_PROC(__l4_mutex_unlock)
+ mov r2, #-2
+ mov r1, #L4_MUTEX_UNLOCKED
+1:
+ swp r3, r2, [r0]
+ cmp r3, r2
+ beq 1b
+
+ @ Grabbed the lock
+ str r1, [r0] @ Now store unlocked value and finish
+ mov r0, r3 @ Get the value of contenders
+ mov pc, lr
+END_PROC(__l4_mutex_unlock)
+
conts/libl4/src/arch/or1k/v5/mutex.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/libl4/src/arch/or1k/v5/atomic.S.ARM
===================================================================
--- conts/libl4/src/arch/or1k/v5/atomic.S.ARM (nonexistent)
+++ conts/libl4/src/arch/or1k/v5/atomic.S.ARM (revision 7)
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010 B Labs
+ *
+ * Author: Bahadir Balban
+ */
+
+#include
+
+/*
+ * Atomically and destructively reads a byte. E.g.
+ * byte is read and zero is written back. This is
+ * useful on reading irq counts
+ *
+ * @r0 = byte address
+ */
+BEGIN_PROC(l4_atomic_dest_readb)
+ mov r1, #0
+ swpb r2, r1, [r0]
+ mov r0, r2
+ mov pc, lr
+END_PROC(l4_atomic_dest_readb)
+
+
+
+
conts/libl4/src/arch/or1k/v5/atomic.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/libl4/src/arch/or1k/v7/mutex.S.ARM
===================================================================
--- conts/libl4/src/arch/or1k/v7/mutex.S.ARM (nonexistent)
+++ conts/libl4/src/arch/or1k/v7/mutex.S.ARM (revision 7)
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2009 Bahadir Balban
+ */
+
+#include
+#include
+
+/*
+ * @r0 = address of mutex word
+ */
+BEGIN_PROC(__l4_mutex_lock)
+1:
+ ldrex r1, [r0] @ Load value
+ add r1, r1, #1 @ Add 1
+ strex r3, r1, [r0] @ Store prospective lock state
+ cmp r3, #0 @ If not successful
+ bne 1b @ Retry and decide again on the prospective lock state. No WFE as this would be a problem on single cpu
+ dsb
+
+ cmp r1, #L4_MUTEX_LOCKED @ We succeeded in store, but are we a locker or a contender?
+ movne r2, #L4_MUTEX_CONTENDED
+ moveq r2, #L4_MUTEX_SUCCESS
+ mov r0, r2
+ mov pc, lr
+END_PROC(__l4_mutex_lock)
+
+/*
+ * @r0 = address of mutex word
+ */
+BEGIN_PROC(__l4_mutex_unlock)
+ dsb
+ mov r3, #L4_MUTEX_UNLOCKED
+1:
+ ldrex r1, [r0]
+ strex r2, r3, [r0]
+ cmp r2, #0
+ bne 1b
+ mov r0, r1
+ mov pc, lr
+END_PROC(__l4_mutex_unlock)
+
conts/libl4/src/arch/or1k/v7/mutex.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/libl4/src/arch/or1k/v7/perfmon.c
===================================================================
--- conts/libl4/src/arch/or1k/v7/perfmon.c (nonexistent)
+++ conts/libl4/src/arch/or1k/v7/perfmon.c (revision 7)
@@ -0,0 +1,45 @@
+/*
+ * Performance monitoring
+ *
+ * Copyright (C) 2010 B Labs Ltd.
+ *
+ * Author: Bahadir Balban
+ */
+#include
+
+#if defined (CONFIG_DEBUG_PERFMON_USER)
+/*
+ * Resets/restarts cycle counter
+ */
+void perfmon_reset_start_cyccnt()
+{
+ volatile u32 pmcctrl;
+
+ /* Disable the cycle counter register */
+ cp15_write_perfmon_cntenclr(1 << PMCCNTR_BIT);
+
+ /* Clear the cycle counter on ctrl register */
+ pmcctrl = cp15_read_perfmon_ctrl();
+ pmcctrl |= (1 << PMCR_C_BIT);
+ cp15_write_perfmon_ctrl(pmcctrl);
+
+ /* Clear overflow register */
+ cp15_write_perfmon_overflow(1 << PMCCNTR_BIT);
+
+ /* Enable the cycle count */
+ cp15_write_perfmon_cntenset(1 << PMCCNTR_BIT);
+}
+
+/*
+ * Reads current counter, clears and restarts it
+ */
+u32 perfmon_read_reset_start_cyccnt()
+{
+ volatile u32 cyccnt = cp15_read_perfmon_cyccnt();
+
+ perfmon_reset_start_cyccnt();
+
+ return cyccnt;
+}
+
+#endif /* End of !CONFIG_DEBUG_PERFMON_USER */
Index: conts/libl4/src/arch/or1k/syscalls.S.ARM
===================================================================
--- conts/libl4/src/arch/or1k/syscalls.S.ARM (nonexistent)
+++ conts/libl4/src/arch/or1k/syscalls.S.ARM (revision 7)
@@ -0,0 +1,235 @@
+/*
+ * Userspace system call interface.
+ *
+ * Copyright (C) 2007 - 2009 Bahadir Balban
+ */
+#include L4LIB_INC_ARCH(asm.h)
+#include L4LIB_INC_ARCH(utcb.h)
+#include
+#include
+#include INC_GLUE(message.h)
+
+
+#if defined (CONFIG_ARCH_ARM) && defined (CONFIG_SUBARCH_V7)
+ /* ARMv7 uses a special per-cpu register to keep thread-local utcb pointer */
+ .macro utcb_address rx
+ mrc p15, 0, \rx, c13, c0, 3 @ Read user-RO thread register TPIDRURO
+ .endm
+#else /* End of ARMv7 */
+ /* Get it from KIP page by double dereference */
+ .macro utcb_address rx
+ ldr \rx, =kip_utcb_ref @ First get pointer to utcb pointer in KIP
+ ldr \rx, [\rx] @ Get pointer to UTCB address from UTCB pointer in KIP
+ ldr \rx, [\rx] @ Get the utcb address
+ .endm
+#endif
+
+BEGIN_PROC(l4_thread_switch)
+ ldr r12, =__l4_thread_switch
+ ldr pc, [r12] @ Jump into the SWI. Kernel returns to LR_USR, which is the caller.
+END_PROC(l4_thread_switch)
+
+/*
+ * The syscall returns process ids. This function saves the returned values in the
+ * arguments passed by reference. @r0 = struct task_ids *
+ */
+BEGIN_PROC(l4_getid)
+ ldr r12, =__l4_getid @ See l4_kdata_read for why its so simple.
+ ldr pc, [r12] @ Return.
+END_PROC(l4_getid)
+
+/*
+ * For clone() we need special assembler handling
+ * Same signature as ipc(): @r0 = to, @r1 = from @r2 = flags
+ *
+ * NOTE: Note that this breaks l4 system call interface,
+ * this should be moved elsewhere and modified using existing l4 mechanisms.
+ */
+BEGIN_PROC(arch_clone)
+ stmfd sp!, {r4-r8,lr} @ Save context.
+ utcb_address r12 @ Get utcb address.
+ ldmia r12!, {r3-r8} @ Load 6 Message registers from utcb. MR0-MR5
+
+ ldr r12, =__l4_ipc
+ mov lr, pc
+ ldr pc, [r12] @ Perform the ipc()
+
+ /*
+ * At this moment:
+ * - MR_RETURN tells us whether we are parent or child (or have failed).
+ * - Child has new SP set, with |func_ptr|arg1|{End of stack}SP<-| on stack.
+ * - Child needs exit logic when its function is finished.
+ */
+ cmp r0, #0 @ Check ipc success
+ blt ipc_failed
+ cmp MR_RETURN_REGISTER, #0 @ Check ipc return register MR_RETURN.
+ blt clone_failed @ Ipc was ok but clone() failed.
+ bgt parent_return @ It has child pid, goto parent return.
+child:
+ ldr r0, [sp, #-4]! @ Load child's first argument.
+ mov lr, pc @ Save return address
+ ldr pc, [sp, #-4]! @ Load function pointer from stack
+child_exit:
+ b child_exit @ We infinitely loop for now.
+
+ @ Return with normal ipc return sequence
+parent_return:
+clone_failed:
+ipc_failed:
+ utcb_address r12 @ Get utcb
+ stmia r12, {r3-r8} @ Store mrs.
+ ldmfd sp!, {r4-r8,pc} @ Return restoring pc and context.
+END_PROC(arch_clone)
+
+/*
+ * Inter-process communication. Loads message registers as arguments before the call,
+ * and stores them as results after the call. @r0 = to, @r1 = from.
+ */
+BEGIN_PROC(l4_ipc)
+ stmfd sp!, {r4-r8,lr} @ Save context.
+ utcb_address r12 @ Get utcb address.
+ ldmia r12!, {r3-r8} @ Load 6 Message registers from utcb. MR0-MR5
+ ldr r12, =__l4_ipc
+ mov lr, pc
+ ldr pc, [r12]
+ utcb_address r12 @ Get utcb address.
+ stmia r12, {r3-r8} @ Store 6 Message registers to utcb. MR0-MR5
+ ldmfd sp!, {r4-r8,pc} @ Return restoring pc, and context.
+END_PROC(l4_ipc)
+
+/*
+ * System call that maps an area of memory into the given address space.
+ * @r0 = physical address, @r1 = virtual address, @r2 = map size in pages,
+ * @r3 = map flags, @r4 = The tgid of the address space to map.
+ */
+BEGIN_PROC(l4_map)
+ stmfd sp!, {r4, lr}
+ ldr r4, [sp, #8] @ FIXME: Is this right?
+ ldr r12, =__l4_map
+ mov lr, pc @ We must return here to restore r4.
+ ldr pc, [r12]
+ ldmfd sp!, {r4, pc}
+END_PROC(l4_map)
+
+/*
+ * Reads/manipulates capabilities of a thread, particularly a pager.
+ * @r0 = request type, @r1 = request flags, @r2 = Capability buffer pointer
+ */
+BEGIN_PROC(l4_capability_control)
+ stmfd sp!, {lr}
+ ldr r12, =__l4_capability_control
+ mov lr, pc
+ ldr pc, [r12]
+ ldmfd sp!, {pc} @ Restore original lr and return.
+END_PROC(l4_capability_control)
+
+/*
+ * System call that unmaps an area of memory into the given address space.
+ * @r0 = virtual, @r1 = pages, @r2 = tid of address space to unmap
+ */
+BEGIN_PROC(l4_unmap)
+ stmfd sp!, {lr}
+ ldr r12, =__l4_unmap
+ mov lr, pc
+ ldr pc, [r12]
+ ldmfd sp!, {pc} @ Restore original lr and return.
+END_PROC(l4_unmap)
+
+/*
+ * System call that controls containers and their parameters.
+ * @r0 = request type, @r1 = request flags, @r2 = io buffer ptr
+ */
+BEGIN_PROC(l4_container_control)
+ stmfd sp!, {lr}
+ ldr r12, =__l4_container_control
+ mov lr, pc
+ ldr pc, [r12]
+ ldmfd sp!, {pc} @ Restore original lr and return.
+END_PROC(l4_container_control)
+
+/*
+ * System call that gets or sets the time info structure.
+ * @r0 = ptr to time structure @r1 = set or get. set = 1, get = 0.
+ */
+BEGIN_PROC(l4_time)
+ stmfd sp!, {lr}
+ ldr r12, =__l4_time
+ mov lr, pc
+ ldr pc, [r12]
+ ldmfd sp!, {pc} @ Restore original lr and return.
+END_PROC(l4_time)
+
+/*
+ * System call that controls thread creation, destruction and modification.
+ * @r0 = thread action, @r1 = &ids, @r2 = utcb address
+ */
+BEGIN_PROC(l4_thread_control)
+ stmfd sp!, {lr}
+ ldr r12, =__l4_thread_control
+ mov lr, pc
+ ldr pc, [r12]
+ ldmfd sp!, {pc} @ Restore original lr and return.
+END_PROC(l4_thread_control)
+
+/*
+ * System call that modifies ipc blocked sender lists of receivers.
+ * @r0 = Action (e.g. block/unblock), @r1 = sender id, @r2 = sender tag
+ */
+BEGIN_PROC(l4_ipc_control)
+ stmfd sp!, {lr}
+ ldr r12, =__l4_ipc_control
+ mov lr, pc
+ ldr pc, [r12]
+ ldmfd sp!, {pc} @ Restore original lr and return.
+END_PROC(l4_ipc_control)
+
+/*
+ * Manipulates address spaces, e.g. sets up shared memory areas between threads
+ * @r0 = operation code, @r1 = operation flags, @r2 = An id (irqnum, or capid)
+ */
+BEGIN_PROC(l4_irq_control)
+ stmfd sp!, {lr}
+ ldr r12, =__l4_irq_control
+ mov lr, pc
+ ldr pc, [r12]
+ ldmfd sp!, {pc} @ Restore original lr and return.
+END_PROC(l4_irq_control)
+
+/*
+ * Locks/unlocks a userspace mutex.
+ * @r0 = mutex virtual address, @r1 = mutex operation code
+ */
+BEGIN_PROC(l4_mutex_control)
+ stmfd sp!, {lr}
+ ldr r12, =__l4_mutex_control
+ mov lr, pc
+ ldr pc, [r12]
+ ldmfd sp!, {pc} @ Restore original lr and return.
+END_PROC(l4_mutex_control)
+
+/*
+ * Sets registers of a thread and its pager.
+ * @r0 = ptr to exregs_data structure, @r1 = tid of thread.
+ */
+BEGIN_PROC(l4_exchange_registers)
+ stmfd sp!, {lr}
+ ldr r12, =__l4_exchange_registers
+ mov lr, pc
+ ldr pc, [r12]
+ ldmfd sp!, {pc} @ Restore original lr and return.
+END_PROC(l4_exchange_registers)
+
+/*
+ * System call that manipulates caches and tlbs.
+ *
+ * @r0 = starting virtual address (inclusive),
+ * @r1 = ending virtual address (exclusive),
+ * @r3 = cache operation
+ */
+BEGIN_PROC(l4_cache_control)
+ stmfd sp!, {lr}
+ ldr r12, =__l4_cache_control
+ mov lr, pc
+ ldr pc, [r12]
+ ldmfd sp!, {pc} @ Restore original lr and return.
+END_PROC(l4_cache_control)
conts/libl4/src/arch/or1k/syscalls.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/libl4/src/arch/or1k/v6/mutex.c
===================================================================
--- conts/libl4/src/arch/or1k/v6/mutex.c (nonexistent)
+++ conts/libl4/src/arch/or1k/v6/mutex.c (revision 7)
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2010 B Labs Ltd.
+ * Author: Prem Mallappa
+ */
+
+#include
+#include
+#include L4LIB_INC_ARCH(syslib.h) /* for BUG/BUG_ON, */
+#include L4LIB_INC_ARCH(asm.h)
+#include INC_SUBARCH(mmu_ops.h)
+
+int __l4_mutex_lock(void *m, l4id_t tid)
+{
+ int tmp, ret;
+ loop:
+ __asm__ __volatile__(
+ "ldrex %0, [%1]\n"
+ : "=&r"(tmp)
+ : "r"(m)
+ : "memory"
+ );
+
+ if(tmp != L4_MUTEX_UNLOCKED)
+ ret = L4_MUTEX_CONTENDED;
+ else
+ ret = L4_MUTEX_SUCCESS;
+
+ /* Store our 'tid' */
+ __asm__ __volatile__(
+ "strex %0, %1, [%2]\n"
+ :"=&r"(tmp)
+ :"r"(tid), "r"(m)
+ );
+ if (tmp != 0) {
+ /* We couldn't succeed the store, we retry */
+#ifdef CONFIG_SMP
+ /* don't hog the CPU, sleep till an event */
+ __asm__ __volatile__("wfe\n");
+#endif
+ goto loop;
+ }
+
+ dsb();
+
+ return ret;
+}
+
+int __l4_mutex_unlock(void *m, l4id_t tid)
+{
+ int tmp, ret;
+ loop:
+ /* Load and see if the lock had our tid */
+ __asm__ __volatile__(
+ "ldrex %0, [%1]\n"
+ : "=r"(tmp)
+ : "r"(m)
+ );
+
+ if(tmp != tid)
+ ret = L4_MUTEX_CONTENDED;
+ else
+ ret = L4_MUTEX_SUCCESS;
+
+ /* We store unlock value '0' */
+ __asm__ __volatile__(
+ "strex %0, %1, [%2]\n"
+ :"=&r"(tmp)
+ :"rI"(L4_MUTEX_UNLOCKED), "r"(m)
+ );
+ if(tmp != 0) {
+ /* The store wasn't successfull, retry */
+ goto loop;
+ }
+
+ dsb();
+
+#ifdef CONFIG_SMP
+ __asm__ __volatile__("sev\n");
+#endif
+ return ret;
+}
+
+u8 l4_atomic_dest_readb(unsigned long *location)
+{
+ unsigned int tmp, res;
+ __asm__ __volatile__ (
+ "1: \n"
+ " ldrex %0, [%2] \n"
+ " strex %1, %3, [%2] \n"
+ " teq %1, #0 \n"
+ " bne 1b \n"
+ : "=&r"(tmp), "=&r"(res)
+ : "r"(location), "r"(0)
+ : "cc", "memory"
+ );
+
+ return (u8)tmp;
+}
Index: conts/libl4/src/arch/or1k/exregs.c
===================================================================
--- conts/libl4/src/arch/or1k/exregs.c (nonexistent)
+++ conts/libl4/src/arch/or1k/exregs.c (revision 7)
@@ -0,0 +1,99 @@
+/*
+ * Generic to arch-specific interface for
+ * exchange_registers()
+ *
+ * Copyright (C) 2008 Bahadir Balban
+ */
+#include
+#include
+#include L4LIB_INC_ARCH(syslib.h)
+#include INC_GLUE(message.h)
+
+void exregs_set_read(struct exregs_data *exregs)
+{
+ exregs->flags |= EXREGS_READ;
+}
+
+void exregs_print_registers(void)
+{
+ struct exregs_data exregs;
+
+ /* Read registers */
+ memset(&exregs, 0, sizeof(exregs));
+ exregs.valid_vect = ~0; /* Set all flags */
+ exregs.flags |= EXREGS_READ;
+ exregs.flags |= EXREGS_SET_UTCB;
+ exregs.flags |= EXREGS_SET_PAGER;
+ BUG_ON(l4_exchange_registers(&exregs, self_tid()) < 0);
+
+ /* Print out registers */
+ printf("Task (%x) register state upon fault:\n", self_tid());
+ printf("R0: 0x%x\n", exregs.context.r0);
+ printf("R1: 0x%x\n", exregs.context.r1);
+ printf("R2: 0x%x\n", exregs.context.r2);
+ printf("R3: 0x%x\n", exregs.context.r3);
+ printf("R4: 0x%x\n", exregs.context.r4);
+ printf("R5: 0x%x\n", exregs.context.r5);
+ printf("R6: 0x%x\n", exregs.context.r6);
+ printf("R7: 0x%x\n", exregs.context.r7);
+ printf("R8: 0x%x\n", exregs.context.r8);
+ printf("R9: 0x%x\n", exregs.context.r9);
+ printf("R10: 0x%x\n", exregs.context.r10);
+ printf("R11: 0x%x\n", exregs.context.r11);
+ printf("R12: 0x%x\n", exregs.context.r12);
+ printf("R13: 0x%x\n", exregs.context.sp);
+ printf("R14: 0x%x\n", exregs.context.lr);
+ printf("R15: 0x%x\n", exregs.context.pc);
+ printf("Pager: 0x%x\n", exregs.pagerid);
+ printf("Utcb @ 0x%lx\n", exregs.utcb_address);
+}
+
+void exregs_set_mr(struct exregs_data *s, int offset, unsigned long val)
+{
+ /* Get MR0 */
+ u32 *mr = &s->context.MR0_REGISTER;
+
+ /* Sanity check */
+ BUG_ON(offset > MR_TOTAL || offset < 0);
+
+ /* Set MR */
+ mr[offset] = val;
+
+ /* Set valid bit for mr register */
+ s->valid_vect |= FIELD_TO_BIT(exregs_context_t, MR0_REGISTER) << offset;
+}
+
+void exregs_set_pager(struct exregs_data *s, l4id_t pagerid)
+{
+ s->pagerid = pagerid;
+ s->flags |= EXREGS_SET_PAGER;
+}
+
+unsigned long exregs_get_utcb(struct exregs_data *s)
+{
+ return s->utcb_address;
+}
+
+unsigned long exregs_get_stack(struct exregs_data *s)
+{
+ return s->context.sp;
+}
+
+void exregs_set_utcb(struct exregs_data *s, unsigned long virt)
+{
+ s->utcb_address = virt;
+ s->flags |= EXREGS_SET_UTCB;
+}
+
+void exregs_set_stack(struct exregs_data *s, unsigned long sp)
+{
+ s->context.sp = sp;
+ s->valid_vect |= FIELD_TO_BIT(exregs_context_t, sp);
+}
+
+void exregs_set_pc(struct exregs_data *s, unsigned long pc)
+{
+ s->context.pc = pc;
+ s->valid_vect |= FIELD_TO_BIT(exregs_context_t, pc);
+}
+
Index: conts/libl4/src/arch/or1k
===================================================================
--- conts/libl4/src/arch/or1k (nonexistent)
+++ conts/libl4/src/arch/or1k (revision 7)
conts/libl4/src/arch/or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/test_suite0/src/arch/or1k/v5/mm.c
===================================================================
--- conts/test_suite0/src/arch/or1k/v5/mm.c (nonexistent)
+++ conts/test_suite0/src/arch/or1k/v5/mm.c (revision 7)
@@ -0,0 +1,65 @@
+/*
+ * ARMv5 specific functions
+ *
+ * Copyright (C) 2008 - 2010 B Labs Ltd.
+ */
+#include
+#include
+#include
+#include __INC_ARCH(mm.h)
+
+/* Extracts generic protection flags from architecture-specific pte */
+unsigned int vm_prot_flags(pte_t pte)
+{
+ unsigned int vm_prot_flags = 0;
+ unsigned int rw_flags = __MAP_USR_RW & PTE_PROT_MASK;
+ unsigned int ro_flags = __MAP_USR_RO & PTE_PROT_MASK;
+
+ /* Clear non-protection flags */
+ pte &= PTE_PROT_MASK;
+
+ if (pte == ro_flags)
+ vm_prot_flags = VM_READ | VM_EXEC;
+ else if (pte == rw_flags)
+ vm_prot_flags = VM_READ | VM_WRITE | VM_EXEC;
+ else
+ vm_prot_flags = VM_NONE;
+
+ return vm_prot_flags;
+}
+
+/*
+ * PTE STATES:
+ * PTE type field: 00 (Translation fault)
+ * PTE type field correct, AP bits: None (Read or Write access fault)
+ * PTE type field correct, AP bits: RO (Write access fault)
+ */
+
+/*
+ * Extracts arch-specific fault parameters
+ * and puts them into generic format
+ */
+void set_generic_fault_params(struct fault_data *fault)
+{
+ unsigned int prot_flags = vm_prot_flags(fault->kdata->pte);
+
+ fault->reason = 0;
+ fault->pte_flags = prot_flags;
+
+ if (is_prefetch_abort(fault->kdata->fsr)) {
+ fault->reason |= VM_READ;
+ fault->address = fault->kdata->faulty_pc;
+ } else {
+ fault->address = fault->kdata->far;
+
+ /* Always assume read fault first */
+ if (prot_flags & VM_NONE)
+ fault->reason |= VM_READ;
+ else if (prot_flags & VM_READ)
+ fault->reason |= VM_WRITE;
+ else
+ BUG();
+ }
+ arch_print_fault_params(fault);
+}
+
Index: conts/test_suite0/src/arch/or1k/v7/mm.c
===================================================================
--- conts/test_suite0/src/arch/or1k/v7/mm.c (nonexistent)
+++ conts/test_suite0/src/arch/or1k/v7/mm.c (revision 7)
@@ -0,0 +1,75 @@
+/*
+ * ARMv7 specific functions
+ *
+ * Copyright (C) 2008 - 2010 B Labs Ltd.
+ */
+#include
+#include
+#include
+#include INC_SUBARCH(mm.h)
+#include INC_SUBARCH(exception.h)
+
+/* Get simplified access permissions */
+int pte_get_access_simple(pte_t pte)
+{
+ /* Place AP[2] and AP[1] in [1:0] positions and return */
+ return (((pte >> PTE_AP2_BIT) & 1) << 1)
+ | ((pte >> PTE_AP1_BIT) & 1);
+}
+
+int is_translation_fault(u32 fsr)
+{
+ return (fsr & FSR_FS_MASK) == ABORT_TRANSLATION_PAGE;
+}
+
+unsigned int vm_prot_flags(pte_t pte, u32 fsr)
+{
+ unsigned int pte_prot_flags = 0;
+
+ /* Translation fault means no permissions */
+ if (is_translation_fault(fsr))
+ return VM_NONE;
+
+ /* Check simplified permission bits */
+ switch (pte_get_access_simple(pte)) {
+ case AP_SIMPLE_USER_RW_KERN_RW:
+ pte_prot_flags |= VM_WRITE;
+ case AP_SIMPLE_USER_RO_KERN_RO:
+ pte_prot_flags |= VM_READ;
+
+ /* Also, check exec never bit */
+ if (!(pte & (1 << PTE_XN_BIT)))
+ pte_prot_flags |= VM_EXEC;
+ break;
+ case AP_SIMPLE_USER_NONE_KERN_RW:
+ case AP_SIMPLE_USER_NONE_KERN_RO:
+ default:
+ pte_prot_flags = VM_NONE;
+ break;
+ }
+
+ return pte_prot_flags;
+}
+
+void set_generic_fault_params(struct fault_data *fault)
+{
+ fault->pte_flags = vm_prot_flags(fault->kdata->pte, fault->kdata->fsr);
+ fault->reason = 0;
+
+ /*
+ * Prefetch fault denotes exec fault.
+ */
+ if (is_prefetch_abort(fault->kdata->fsr)) {
+ fault->reason |= VM_EXEC;
+ fault->address = fault->kdata->faulty_pc;
+ } else {
+ fault->address = fault->kdata->far;
+
+ /* Write-not-read bit determines fault */
+ if (fault->kdata->fsr & (1 << DFSR_WNR_BIT))
+ fault->reason |= VM_WRITE;
+ else
+ fault->reason |= VM_READ;
+ }
+}
+
Index: conts/test_suite0/src/arch/or1k
===================================================================
--- conts/test_suite0/src/arch/or1k (nonexistent)
+++ conts/test_suite0/src/arch/or1k (revision 7)
conts/test_suite0/src/arch/or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/libc/include/sys-userspace/arch-or1k/arch/stdint.h
===================================================================
--- conts/libc/include/sys-userspace/arch-or1k/arch/stdint.h (nonexistent)
+++ conts/libc/include/sys-userspace/arch-or1k/arch/stdint.h (revision 7)
@@ -0,0 +1,92 @@
+/*
+ * Australian Public Licence B (OZPLB)
+ *
+ * Version 1-0
+ *
+ * Copyright (c) 2004 National ICT Australia
+ *
+ * All rights reserved.
+ *
+ * Developed by: Embedded, Real-time and Operating Systems Program (ERTOS)
+ * National ICT Australia
+ * http://www.ertos.nicta.com.au
+ *
+ * Permission is granted by National ICT Australia, free of charge, to
+ * any person obtaining a copy of this software and any associated
+ * documentation files (the "Software") to deal with the Software without
+ * restriction, including (without limitation) the rights to use, copy,
+ * modify, adapt, merge, publish, distribute, communicate to the public,
+ * sublicense, and/or sell, lend or rent out copies of the Software, and
+ * to permit persons to whom the Software is furnished to do so, subject
+ * to the following conditions:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimers.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimers in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of National ICT Australia, nor the names of its
+ * contributors, may be used to endorse or promote products derived
+ * from this Software without specific prior written permission.
+ *
+ * EXCEPT AS EXPRESSLY STATED IN THIS LICENCE AND TO THE FULL EXTENT
+ * PERMITTED BY APPLICABLE LAW, THE SOFTWARE IS PROVIDED "AS-IS", AND
+ * NATIONAL ICT AUSTRALIA AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS,
+ * WARRANTIES OR CONDITIONS OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO ANY REPRESENTATIONS, WARRANTIES OR CONDITIONS
+ * REGARDING THE CONTENTS OR ACCURACY OF THE SOFTWARE, OR OF TITLE,
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT,
+ * THE ABSENCE OF LATENT OR OTHER DEFECTS, OR THE PRESENCE OR ABSENCE OF
+ * ERRORS, WHETHER OR NOT DISCOVERABLE.
+ *
+ * TO THE FULL EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL
+ * NATIONAL ICT AUSTRALIA OR ITS CONTRIBUTORS BE LIABLE ON ANY LEGAL
+ * THEORY (INCLUDING, WITHOUT LIMITATION, IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHERWISE) FOR ANY CLAIM, LOSS, DAMAGES OR OTHER
+ * LIABILITY, INCLUDING (WITHOUT LIMITATION) LOSS OF PRODUCTION OR
+ * OPERATION TIME, LOSS, DAMAGE OR CORRUPTION OF DATA OR RECORDS; OR LOSS
+ * OF ANTICIPATED SAVINGS, OPPORTUNITY, REVENUE, PROFIT OR GOODWILL, OR
+ * OTHER ECONOMIC LOSS; OR ANY SPECIAL, INCIDENTAL, INDIRECT,
+ * CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES, ARISING OUT OF OR IN
+ * CONNECTION WITH THIS LICENCE, THE SOFTWARE OR THE USE OF OR OTHER
+ * DEALINGS WITH THE SOFTWARE, EVEN IF NATIONAL ICT AUSTRALIA OR ITS
+ * CONTRIBUTORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH CLAIM, LOSS,
+ * DAMAGES OR OTHER LIABILITY.
+ *
+ * If applicable legislation implies representations, warranties, or
+ * conditions, or imposes obligations or liability on National ICT
+ * Australia or one of its contributors in respect of the Software that
+ * cannot be wholly or partly excluded, restricted or modified, the
+ * liability of National ICT Australia or the contributor is limited, to
+ * the full extent permitted by the applicable legislation, at its
+ * option, to:
+ * a. in the case of goods, any one or more of the following:
+ * i. the replacement of the goods or the supply of equivalent goods;
+ * ii. the repair of the goods;
+ * iii. the payment of the cost of replacing the goods or of acquiring
+ * equivalent goods;
+ * iv. the payment of the cost of having the goods repaired; or
+ * b. in the case of services:
+ * i. the supplying of the services again; or
+ * ii. the payment of the cost of having the services supplied again.
+ *
+ * The construction, validity and performance of this licence is governed
+ * by the laws in force in New South Wales, Australia.
+ */
+/*
+ Author: Ben Leslie
+*/
+typedef signed char int8_t;
+typedef short int16_t;
+typedef int int32_t;
+typedef long long int64_t;
+
+typedef unsigned char uint8_t;
+typedef unsigned short uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+
+#define __PTR_SIZE 32
Index: conts/libc/include/sys-userspace/arch-or1k
===================================================================
--- conts/libc/include/sys-userspace/arch-or1k (nonexistent)
+++ conts/libc/include/sys-userspace/arch-or1k (revision 7)
conts/libc/include/sys-userspace/arch-or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/libc/include/sys-baremetal/arch-or1k/arch/stdint.h
===================================================================
--- conts/libc/include/sys-baremetal/arch-or1k/arch/stdint.h (nonexistent)
+++ conts/libc/include/sys-baremetal/arch-or1k/arch/stdint.h (revision 7)
@@ -0,0 +1,92 @@
+/*
+ * Australian Public Licence B (OZPLB)
+ *
+ * Version 1-0
+ *
+ * Copyright (c) 2004 National ICT Australia
+ *
+ * All rights reserved.
+ *
+ * Developed by: Embedded, Real-time and Operating Systems Program (ERTOS)
+ * National ICT Australia
+ * http://www.ertos.nicta.com.au
+ *
+ * Permission is granted by National ICT Australia, free of charge, to
+ * any person obtaining a copy of this software and any associated
+ * documentation files (the "Software") to deal with the Software without
+ * restriction, including (without limitation) the rights to use, copy,
+ * modify, adapt, merge, publish, distribute, communicate to the public,
+ * sublicense, and/or sell, lend or rent out copies of the Software, and
+ * to permit persons to whom the Software is furnished to do so, subject
+ * to the following conditions:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimers.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimers in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of National ICT Australia, nor the names of its
+ * contributors, may be used to endorse or promote products derived
+ * from this Software without specific prior written permission.
+ *
+ * EXCEPT AS EXPRESSLY STATED IN THIS LICENCE AND TO THE FULL EXTENT
+ * PERMITTED BY APPLICABLE LAW, THE SOFTWARE IS PROVIDED "AS-IS", AND
+ * NATIONAL ICT AUSTRALIA AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS,
+ * WARRANTIES OR CONDITIONS OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO ANY REPRESENTATIONS, WARRANTIES OR CONDITIONS
+ * REGARDING THE CONTENTS OR ACCURACY OF THE SOFTWARE, OR OF TITLE,
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT,
+ * THE ABSENCE OF LATENT OR OTHER DEFECTS, OR THE PRESENCE OR ABSENCE OF
+ * ERRORS, WHETHER OR NOT DISCOVERABLE.
+ *
+ * TO THE FULL EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL
+ * NATIONAL ICT AUSTRALIA OR ITS CONTRIBUTORS BE LIABLE ON ANY LEGAL
+ * THEORY (INCLUDING, WITHOUT LIMITATION, IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHERWISE) FOR ANY CLAIM, LOSS, DAMAGES OR OTHER
+ * LIABILITY, INCLUDING (WITHOUT LIMITATION) LOSS OF PRODUCTION OR
+ * OPERATION TIME, LOSS, DAMAGE OR CORRUPTION OF DATA OR RECORDS; OR LOSS
+ * OF ANTICIPATED SAVINGS, OPPORTUNITY, REVENUE, PROFIT OR GOODWILL, OR
+ * OTHER ECONOMIC LOSS; OR ANY SPECIAL, INCIDENTAL, INDIRECT,
+ * CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES, ARISING OUT OF OR IN
+ * CONNECTION WITH THIS LICENCE, THE SOFTWARE OR THE USE OF OR OTHER
+ * DEALINGS WITH THE SOFTWARE, EVEN IF NATIONAL ICT AUSTRALIA OR ITS
+ * CONTRIBUTORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH CLAIM, LOSS,
+ * DAMAGES OR OTHER LIABILITY.
+ *
+ * If applicable legislation implies representations, warranties, or
+ * conditions, or imposes obligations or liability on National ICT
+ * Australia or one of its contributors in respect of the Software that
+ * cannot be wholly or partly excluded, restricted or modified, the
+ * liability of National ICT Australia or the contributor is limited, to
+ * the full extent permitted by the applicable legislation, at its
+ * option, to:
+ * a. in the case of goods, any one or more of the following:
+ * i. the replacement of the goods or the supply of equivalent goods;
+ * ii. the repair of the goods;
+ * iii. the payment of the cost of replacing the goods or of acquiring
+ * equivalent goods;
+ * iv. the payment of the cost of having the goods repaired; or
+ * b. in the case of services:
+ * i. the supplying of the services again; or
+ * ii. the payment of the cost of having the services supplied again.
+ *
+ * The construction, validity and performance of this licence is governed
+ * by the laws in force in New South Wales, Australia.
+ */
+/*
+ Author: Ben Leslie
+*/
+typedef signed char int8_t;
+typedef short int16_t;
+typedef int int32_t;
+typedef long long int64_t;
+
+typedef unsigned char uint8_t;
+typedef unsigned short uint16_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+
+#define __PTR_SIZE 32
Index: conts/libc/include/sys-baremetal/arch-or1k
===================================================================
--- conts/libc/include/sys-baremetal/arch-or1k (nonexistent)
+++ conts/libc/include/sys-baremetal/arch-or1k (revision 7)
conts/libc/include/sys-baremetal/arch-or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/libc/src/arch-or1k/eabi.c
===================================================================
--- conts/libc/src/arch-or1k/eabi.c (nonexistent)
+++ conts/libc/src/arch-or1k/eabi.c (revision 7)
@@ -0,0 +1,11 @@
+
+
+/* Dummies to keep Codesourcery 4.1.1 libgcc division exceptions silent. */
+void raise(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr0(void)
+{
+}
+
Index: conts/libc/src/arch-or1k
===================================================================
--- conts/libc/src/arch-or1k (nonexistent)
+++ conts/libc/src/arch-or1k (revision 7)
conts/libc/src/arch-or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/libc/src/sys-userspace/arch-or1k/sys_fputc.c
===================================================================
--- conts/libc/src/sys-userspace/arch-or1k/sys_fputc.c (nonexistent)
+++ conts/libc/src/sys-userspace/arch-or1k/sys_fputc.c (revision 7)
@@ -0,0 +1,13 @@
+/*
+ * Ties up platform's uart driver functions with printf
+ *
+ * Copyright (C) 2009 B Labs Ltd.
+ */
+#include
+#include
+
+int __fputc(int c, FILE *stream)
+{
+ uart_tx_char(uart_print_base, c);
+ return 0;
+}
Index: conts/libc/src/sys-userspace/arch-or1k/sys_getc.c
===================================================================
--- conts/libc/src/sys-userspace/arch-or1k/sys_getc.c (nonexistent)
+++ conts/libc/src/sys-userspace/arch-or1k/sys_getc.c (revision 7)
@@ -0,0 +1,31 @@
+/*
+ * Library calls for uart rx.
+ *
+ * Copyright (C) 2009 B Labs Ltd.
+ *
+ */
+#include
+#include
+
+char fgetc(FILE * file)
+{
+ return uart_rx_char(uart_print_base);
+}
+
+#define MAX_LINE_LEN 256
+char data[MAX_LINE_LEN];
+
+char *fgetline(FILE * file)
+{
+ int index = 0;
+
+ /*
+ * Line will end if,
+ * 1. We have recieved 256 chars or
+ * 2. we recieved EOL: '\n' followed by '\r'
+ */
+ while((data[index] != '\n' && ((data[index++] = fgetc(file)) != '\r')) ||
+ index != MAX_LINE_LEN);
+
+ return data;
+}
Index: conts/libc/src/sys-userspace/arch-or1k/sys_stdio.c
===================================================================
--- conts/libc/src/sys-userspace/arch-or1k/sys_stdio.c (nonexistent)
+++ conts/libc/src/sys-userspace/arch-or1k/sys_stdio.c (revision 7)
@@ -0,0 +1,67 @@
+#include
+#include
+
+extern int __fputc(int c, FILE *stream);
+
+static int ser_out(int c)
+{
+ __fputc(c, 0);
+ if (c == '\n')
+ ser_out('\r');
+ return 0;
+}
+
+static size_t
+l4kdb_write(void *data, long int position, size_t count, void *handle /*unused*/)
+{
+ size_t i;
+ char *real_data = data;
+ for (i = 0; i < count; i++)
+ ser_out(real_data[i]);
+ return count;
+}
+
+struct __file __stdin = {
+ .handle = NULL,
+ .read_fn = NULL,
+ .write_fn = NULL,
+ .close_fn = NULL,
+ .eof_fn = NULL,
+ .buffering_mode = _IONBF,
+ .buffer = NULL,
+ .unget_pos = 0,
+ .current_pos = 0,
+ .eof = 0
+};
+
+
+struct __file __stdout = {
+ .handle = NULL,
+ .read_fn = NULL,
+ .write_fn = l4kdb_write,
+ .close_fn = NULL,
+ .eof_fn = NULL,
+ .buffering_mode = _IONBF,
+ .buffer = NULL,
+ .unget_pos = 0,
+ .current_pos = 0,
+ .eof = 0
+};
+
+
+struct __file __stderr = {
+ .handle = NULL,
+ .read_fn = NULL,
+ .write_fn = l4kdb_write,
+ .close_fn = NULL,
+ .eof_fn = NULL,
+ .buffering_mode = _IONBF,
+ .buffer = NULL,
+ .unget_pos = 0,
+ .current_pos = 0,
+ .eof = 0
+};
+
+FILE *stdin = &__stdin;
+FILE *stdout = &__stdout;
+FILE *stderr = &__stderr;
Index: conts/libc/src/sys-userspace/arch-or1k
===================================================================
--- conts/libc/src/sys-userspace/arch-or1k (nonexistent)
+++ conts/libc/src/sys-userspace/arch-or1k (revision 7)
conts/libc/src/sys-userspace/arch-or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/libc/crt/sys-userspace/arch-or1k/crt0.S.ARM
===================================================================
--- conts/libc/crt/sys-userspace/arch-or1k/crt0.S.ARM (nonexistent)
+++ conts/libc/crt/sys-userspace/arch-or1k/crt0.S.ARM (revision 7)
@@ -0,0 +1,93 @@
+/*
+ * Australian Public Licence B (OZPLB)
+ *
+ * Version 1-0
+ *
+ * Copyright (c) 2004 National ICT Australia
+ *
+ * All rights reserved.
+ *
+ * Developed by: Embedded, Real-time and Operating Systems Program (ERTOS)
+ * National ICT Australia
+ * http://www.ertos.nicta.com.au
+ *
+ * Permission is granted by National ICT Australia, free of charge, to
+ * any person obtaining a copy of this software and any associated
+ * documentation files (the "Software") to deal with the Software without
+ * restriction, including (without limitation) the rights to use, copy,
+ * modify, adapt, merge, publish, distribute, communicate to the public,
+ * sublicense, and/or sell, lend or rent out copies of the Software, and
+ * to permit persons to whom the Software is furnished to do so, subject
+ * to the following conditions:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimers.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimers in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of National ICT Australia, nor the names of its
+ * contributors, may be used to endorse or promote products derived
+ * from this Software without specific prior written permission.
+ *
+ * EXCEPT AS EXPRESSLY STATED IN THIS LICENCE AND TO THE FULL EXTENT
+ * PERMITTED BY APPLICABLE LAW, THE SOFTWARE IS PROVIDED "AS-IS", AND
+ * NATIONAL ICT AUSTRALIA AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS,
+ * WARRANTIES OR CONDITIONS OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO ANY REPRESENTATIONS, WARRANTIES OR CONDITIONS
+ * REGARDING THE CONTENTS OR ACCURACY OF THE SOFTWARE, OR OF TITLE,
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT,
+ * THE ABSENCE OF LATENT OR OTHER DEFECTS, OR THE PRESENCE OR ABSENCE OF
+ * ERRORS, WHETHER OR NOT DISCOVERABLE.
+ *
+ * TO THE FULL EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL
+ * NATIONAL ICT AUSTRALIA OR ITS CONTRIBUTORS BE LIABLE ON ANY LEGAL
+ * THEORY (INCLUDING, WITHOUT LIMITATION, IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHERWISE) FOR ANY CLAIM, LOSS, DAMAGES OR OTHER
+ * LIABILITY, INCLUDING (WITHOUT LIMITATION) LOSS OF PRODUCTION OR
+ * OPERATION TIME, LOSS, DAMAGE OR CORRUPTION OF DATA OR RECORDS; OR LOSS
+ * OF ANTICIPATED SAVINGS, OPPORTUNITY, REVENUE, PROFIT OR GOODWILL, OR
+ * OTHER ECONOMIC LOSS; OR ANY SPECIAL, INCIDENTAL, INDIRECT,
+ * CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES, ARISING OUT OF OR IN
+ * CONNECTION WITH THIS LICENCE, THE SOFTWARE OR THE USE OF OR OTHER
+ * DEALINGS WITH THE SOFTWARE, EVEN IF NATIONAL ICT AUSTRALIA OR ITS
+ * CONTRIBUTORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH CLAIM, LOSS,
+ * DAMAGES OR OTHER LIABILITY.
+ *
+ * If applicable legislation implies representations, warranties, or
+ * conditions, or imposes obligations or liability on National ICT
+ * Australia or one of its contributors in respect of the Software that
+ * cannot be wholly or partly excluded, restricted or modified, the
+ * liability of National ICT Australia or the contributor is limited, to
+ * the full extent permitted by the applicable legislation, at its
+ * option, to:
+ * a. in the case of goods, any one or more of the following:
+ * i. the replacement of the goods or the supply of equivalent goods;
+ * ii. the repair of the goods;
+ * iii. the payment of the cost of replacing the goods or of acquiring
+ * equivalent goods;
+ * iv. the payment of the cost of having the goods repaired; or
+ * b. in the case of services:
+ * i. the supplying of the services again; or
+ * ii. the payment of the cost of having the services supplied again.
+ *
+ * The construction, validity and performance of this licence is governed
+ * by the laws in force in New South Wales, Australia.
+ */
+
+#ifdef __thumb__
+#define bl blx
+#endif
+ .section .text.head
+ .code 32
+ .global _start;
+ .align;
+_start:
+ ldr sp, =__stack
+ bl platform_init
+ bl __container_init
+1:
+ b 1b
+
conts/libc/crt/sys-userspace/arch-or1k/crt0.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/libc/crt/sys-userspace/arch-or1k
===================================================================
--- conts/libc/crt/sys-userspace/arch-or1k (nonexistent)
+++ conts/libc/crt/sys-userspace/arch-or1k (revision 7)
conts/libc/crt/sys-userspace/arch-or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/baremetal/test_suite/src/arch-or1k/new_thread.S.ARM
===================================================================
--- conts/baremetal/test_suite/src/arch-or1k/new_thread.S.ARM (nonexistent)
+++ conts/baremetal/test_suite/src/arch-or1k/new_thread.S.ARM (revision 7)
@@ -0,0 +1,11 @@
+#include
+#include L4LIB_INC_ARCH(asm.h)
+
+BEGIN_PROC(local_setup_new_thread)
+ ldr r0, [sp, #-4]! @ Load first argument.
+ mov lr, pc @ Save return address
+ ldr pc, [sp, #-4]! @ Load function pointer from stack
+new_thread_exit:
+ b new_thread_exit @ We infinitely loop for now.
+END_PROC(local_setup_new_thread)
+
conts/baremetal/test_suite/src/arch-or1k/new_thread.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: conts/baremetal/test_suite/src/arch-or1k
===================================================================
--- conts/baremetal/test_suite/src/arch-or1k (nonexistent)
+++ conts/baremetal/test_suite/src/arch-or1k (revision 7)
conts/baremetal/test_suite/src/arch-or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: src/glue/or1k/smp_test.c
===================================================================
--- src/glue/or1k/smp_test.c (nonexistent)
+++ src/glue/or1k/smp_test.c (revision 7)
@@ -0,0 +1,99 @@
+
+#include
+#include
+
+#include INC_GLUE(smp.h)
+#include INC_SUBARCH(cpu.h)
+
+DECLARE_SPINLOCK(smp_lock);
+
+static unsigned long smp_var = 0;
+static unsigned long signal_finished;
+
+static unsigned long basic_var = 0;
+
+void test_basic_coherent(void)
+{
+ dmb();
+ if (smp_get_cpuid() == 0) {
+ if (basic_var != 5555) {
+ printk("FATAL: variable update not seen. var = %lu\n", basic_var);
+ BUG();
+ }
+ } else {
+ basic_var = 5555;
+ dmb();
+ }
+}
+
+void test_smp_coherent(void)
+{
+ int other;
+
+ if (smp_get_cpuid() == 1)
+ other = 0;
+ else
+ other = 1;
+
+ /* Increment var */
+ for (int i = 0; i < 1000; i++) {
+ spin_lock(&smp_lock);
+ smp_var++;
+ spin_unlock(&smp_lock);
+ }
+
+ /* Signal finished */
+ spin_lock(&smp_lock);
+ signal_finished |= (1 << smp_get_cpuid());
+ spin_unlock(&smp_lock);
+
+ /* Wait for other to finish */
+ while (!(signal_finished & (1 << other))) {
+ dmb();
+ }
+ if (smp_get_cpuid() == 0) {
+ printk("Total result: %lu\n", smp_var);
+ if (smp_var != 2000) {
+ printk("FATAL: Total result not as expected\n");
+ BUG();
+ }
+ printk("%s: Success.\n", __FUNCTION__);
+ }
+
+}
+
+
+static u32 make_mask(int ncpus)
+{
+ u32 mask = 0;
+ while(--ncpus){
+ mask |= CPUID_TO_MASK(ncpus);
+ }
+ mask |= CPUID_TO_MASK(0);
+
+ return mask;
+}
+
+#ifndef MAX_IPIS
+#define MAX_IPIS 15
+#endif
+
+void test_ipi(void)
+{
+ int ipi, cpu;
+ for (ipi = 0; ipi <= MAX_IPIS; ipi++) {
+ for (cpu = 0; cpu < CONFIG_NCPU; cpu++) {
+ if (cpu == smp_get_cpuid())
+ continue;
+ printk("IPI %d from %d to %d\n", ipi, smp_get_cpuid(), cpu);
+ arch_send_ipi(CPUID_TO_MASK(cpu), ipi);
+ }
+ }
+ /* Send IPI to all cores at once */
+ cpu = make_mask(CONFIG_NCPU);
+ printk("IPI from %d to all\n", smp_get_cpuid());
+ arch_send_ipi(cpu, 1);
+
+ printk("IPI from %d to self\n", smp_get_cpuid());
+ arch_send_ipi(0, 1); /* Send IPI to self */
+}
Index: src/glue/or1k/smp.c
===================================================================
--- src/glue/or1k/smp.c (nonexistent)
+++ src/glue/or1k/smp.c (revision 7)
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2010 B Labs Ltd.
+ *
+ * Authors: Prem Mallappa, Bahadir Balban
+ *
+ * SMP Initialization of cores.
+ */
+
+#include
+#include INC_GLUE(smp.h)
+#include INC_GLUE(init.h)
+#include INC_GLUE(mapping.h)
+#include INC_SUBARCH(cpu.h)
+#include INC_SUBARCH(proc.h)
+#include INC_SUBARCH(mmu_ops.h)
+#include INC_ARCH(linker.h)
+#include INC_ARCH(io.h)
+#include
+
+unsigned long secondary_run_signal;
+unsigned long secondary_ready_signal;
+
+void __smp_start(void);
+
+void smp_start_cores(void)
+{
+ void (*smp_start_func)(int) =
+ (void (*)(int))virt_to_phys(__smp_start);
+
+ /* FIXME: Check why this high-level version doesn't work */
+ // v7_up_dcache_op_setway(CACHE_SETWAY_CLEAN);
+ v7_clean_invalidate_setway();
+
+ /* We dont probably need this, it is not listed as a requirement */
+ arm_smp_inval_icache_entirely();
+
+ /* Start other cpus */
+ for (int cpu = 1; cpu < CONFIG_NCPU; cpu++) {
+ printk("%s: Bringing up CPU%d\n", __KERNELNAME__, cpu);
+ if ((platform_smp_start(cpu, smp_start_func)) < 0) {
+ printk("FATAL: Could not start secondary cpu. "
+ "cpu=%d\n", cpu);
+ BUG();
+ }
+
+ /* Wait for this particular secondary to become ready */
+ while(!(secondary_ready_signal & CPUID_TO_MASK(cpu)))
+ dmb();
+ }
+
+ scu_print_state();
+}
+
+void init_smp(void)
+{
+ /* Start_secondary_cpus */
+ if (CONFIG_NCPU > 1) {
+ /* This sets IPI function pointer at bare minimum */
+ platform_smp_init(CONFIG_NCPU);
+ }
+}
+
+void secondary_setup_idle_task(void)
+{
+ /* This also has its spid allocated by primary */
+ current->space = &init_space;
+ TASK_PGD(current) = &init_pgd;
+
+ /* We need a thread id */
+ current->tid = id_new(&kernel_resources.ktcb_ids);
+}
+
+/*
+ * Idle wait before any tasks become available for running.
+ *
+ * FIXME: This should be changed such that tasks running on other
+ * cpus can be killed and secondaries wait on an idle task.
+ *
+ * Currently the tasks are held in wfi() even if asked to be killed
+ * until a new runnable task becomes runnable. This may be problematic
+ * for a pager who issued a kill request and is waiting for it to finish.
+ */
+void sched_secondary_start(void)
+{
+ while (!secondary_run_signal)
+ dmb();
+
+ secondary_setup_idle_task();
+
+ setup_idle_caps();
+
+ idle_task();
+
+ BUG();
+}
+
+
+/*
+ * this is where it jumps from secondary_start(), which is called from
+ * board_smp_start() to align each core to start here
+ */
+
+void smp_secondary_init(void)
+{
+ /* Print early core start message */
+ // print_early("Secondary core started.\n");
+
+ /* Start virtual memory */
+ start_virtual_memory();
+
+ arm_smp_inval_tlb_entirely();
+ arm_smp_inval_bpa_entirely();
+ dsb();
+ isb();
+
+ printk("%s: CPU%d: Virtual memory enabled.\n",
+ __KERNELNAME__, smp_get_cpuid());
+
+ /* Mostly initialize GIC CPU interface */
+ secondary_init_platform();
+
+ printk("%s: CPU%d: Initialized.\n",
+ __KERNELNAME__, smp_get_cpuid());
+
+ sched_init();
+
+ /* Signal primary that we are ready */
+ dmb();
+ secondary_ready_signal |= cpu_mask_self();
+
+ /*
+ * Wait for the first runnable task to become available
+ */
+ sched_secondary_start();
+}
+
Index: src/glue/or1k/ipi.c
===================================================================
--- src/glue/or1k/ipi.c (nonexistent)
+++ src/glue/or1k/ipi.c (revision 7)
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2010 B Labs.Ltd.
+ *
+ * Author: Prem Mallappa
+ *
+ * Description: IPI handler for all ARM SMP cores
+ */
+
+#include INC_GLUE(ipi.h)
+#include INC_GLUE(smp.h)
+#include INC_SUBARCH(cpu.h)
+#include
+#include
+#include
+
+/* This should be in a file something like exception.S */
+int ipi_handler(struct irq_desc *desc)
+{
+ int ipi_event = (desc - irq_desc_array) / sizeof(struct irq_desc);
+
+// printk("CPU%d: entered IPI%d\n", smp_get_cpuid(),
+// (desc - irq_desc_array) / sizeof(struct irq_desc));
+
+ switch (ipi_event) {
+ case IPI_TIMER_EVENT:
+ // printk("CPU%d: Handling timer ipi\n", smp_get_cpuid());
+ secondary_timer_irq();
+ break;
+ default:
+ printk("CPU%d: IPI with no meaning: %d\n",
+ smp_get_cpuid(), ipi_event);
+ break;
+ }
+ return 0;
+}
+
+void smp_send_ipi(unsigned int cpumask, int ipi_num)
+{
+ gic_send_ipi(cpumask, ipi_num);
+}
+
Index: src/glue/or1k/init.c
===================================================================
--- src/glue/or1k/init.c (nonexistent)
+++ src/glue/or1k/init.c (revision 7)
@@ -0,0 +1,271 @@
+/*
+ * Main initialisation code for the ARM kernel
+ *
+ * Copyright (C) 2007 - 2010 B Labs Ltd.
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include INC_ARCH(linker.h)
+#include INC_ARCH(asm.h)
+#include INC_SUBARCH(mm.h)
+#include INC_SUBARCH(cpu.h)
+#include INC_SUBARCH(mmu_ops.h)
+#include INC_SUBARCH(perfmon.h)
+#include INC_GLUE(memlayout.h)
+#include INC_GLUE(memory.h)
+#include INC_GLUE(mapping.h)
+#include INC_GLUE(message.h)
+#include INC_GLUE(syscall.h)
+#include INC_GLUE(init.h)
+#include INC_GLUE(smp.h)
+#include INC_PLAT(platform.h)
+#include INC_API(syscall.h)
+#include INC_API(kip.h)
+#include INC_API(mutex.h)
+
+unsigned int kernel_mapping_end;
+
+void print_sections(void)
+{
+ dprintk("_start_kernel: ",(unsigned int)_start_kernel);
+ dprintk("_start_text: ",(unsigned int)_start_text);
+ dprintk("_end_text: ", (unsigned int)_end_text);
+ dprintk("_start_data: ", (unsigned int)_start_data);
+ dprintk("_end_data: ", (unsigned int)_end_data);
+ dprintk("_start_vectors: ",(unsigned int)_start_vectors);
+ dprintk("arm_high_vector: ",(unsigned int)arm_high_vector);
+ dprintk("_end_vectors: ",(unsigned int)_end_vectors);
+ dprintk("_start_kip: ", (unsigned int) _start_kip);
+ dprintk("_end_kip: ", (unsigned int) _end_kip);
+ dprintk("_start_syscalls: ", (unsigned int) _start_syscalls);
+ dprintk("_end_syscalls: ", (unsigned int) _end_syscalls);
+ dprintk("_start_bootstack: ", (unsigned int)_start_bootstack);
+ dprintk("_end_bootstack: ", (unsigned int)_end_bootstack);
+ dprintk("_start_bootstack: ", (unsigned int)_start_bootstack);
+ dprintk("_end_bootstack: ", (unsigned int)_end_bootstack);
+ dprintk("_start_init_pgd: ", (unsigned int)_start_init_pgd);
+ dprintk("_end_init_pgd: ", (unsigned int)_end_init_pgd);
+ dprintk("_end_kernel: ", (unsigned int)_end_kernel);
+ dprintk("_start_init: ", (unsigned int)_start_init);
+ dprintk("_end_init: ", (unsigned int)_end_init);
+ dprintk("_end: ", (unsigned int)_end);
+}
+
+/* This calculates what address the kip field would have in userspace. */
+#define KIP_USR_OFFSETOF(kip, field) ((void *)(((unsigned long)&kip.field - \
+ (unsigned long)&kip) + USER_KIP_PAGE))
+
+/* The kip is non-standard, using 0xBB to indicate mine for now ;-) */
+void kip_init()
+{
+ struct utcb **utcb_ref;
+
+ /*
+ * TODO: Adding utcb size might be useful
+ */
+ memset(&kip, 0, PAGE_SIZE);
+ memcpy(&kip, "L4\230K", 4); /* Name field = l4uK */
+ kip.api_version = 0xBB;
+ kip.api_subversion = 1;
+ kip.api_flags = 0; /* LE, 32-bit architecture */
+ kip.kdesc.magic = 0xBBB;
+ kip.kdesc.version = CODEZERO_VERSION;
+ kip.kdesc.subversion = CODEZERO_SUBVERSION;
+ strncpy(kip.kdesc.date, __DATE__, KDESC_DATE_SIZE);
+ strncpy(kip.kdesc.time, __TIME__, KDESC_TIME_SIZE);
+
+ kip_init_syscalls();
+
+ /* KIP + 0xFF0 is pointer to UTCB segment start address */
+ utcb_ref = (struct utcb **)((unsigned long)&kip + UTCB_KIP_OFFSET);
+
+ add_boot_mapping(virt_to_phys(&kip), USER_KIP_PAGE, PAGE_SIZE,
+ MAP_USR_RO);
+ printk("%s: Kernel built on %s, %s\n", __KERNELNAME__,
+ kip.kdesc.date, kip.kdesc.time);
+}
+
+void vectors_init()
+{
+ unsigned int size = ((u32)_end_vectors - (u32)arm_high_vector);
+
+ /* Map the vectors in high vector page */
+ add_boot_mapping(virt_to_phys(arm_high_vector),
+ ARM_HIGH_VECTOR, size, MAP_KERN_RWX);
+
+ /* Kernel memory trapping is enabled at this point. */
+}
+
+
+#include
+#include
+#include
+
+/* This is what an idle task needs */
+static DECLARE_PERCPU(struct capability, pmd_cap);
+
+/*
+ * FIXME: Add this when initializing kernel resources
+ * This is a hack.
+ */
+void setup_idle_caps()
+{
+ struct capability *cap = &per_cpu(pmd_cap);
+
+ cap_list_init(¤t->cap_list);
+ cap->type = CAP_RTYPE_MAPPOOL | CAP_TYPE_QUANTITY;
+ cap->size = 50;
+
+ link_init(&cap->list);
+ cap_list_insert(cap, ¤t->cap_list);
+}
+
+/*
+ * Set up current stack's beginning, and initial page tables
+ * as a valid task environment for idle task for current cpu
+ */
+void setup_idle_task()
+{
+ memset(current, 0, sizeof(struct ktcb));
+
+ current->space = &init_space;
+ TASK_PGD(current) = &init_pgd;
+
+ /* Initialize space caps list */
+ cap_list_init(¤t->space->cap_list);
+
+ /*
+ * FIXME: This must go to kernel resources init.
+ */
+
+ /* Init scheduler structs */
+ sched_init_task(current, TASK_PRIO_NORMAL);
+
+ /*
+ * If using split page tables, kernel
+ * resources must point at the global pgd
+ * TODO: We may need this for V6, in the future
+ */
+#if defined(CONFIG_SUBARCH_V7)
+ kernel_resources.pgd_global = &init_global_pgd;
+#endif
+}
+
+void remove_initial_mapping(void)
+{
+ /* At this point, execution is on virtual addresses. */
+ remove_section_mapping(virt_to_phys(_start_kernel));
+}
+
+void init_finalize(void)
+{
+ /* Set up idle task capabilities */
+ setup_idle_caps();
+
+ platform_timer_start();
+
+#if defined (CONFIG_SMP)
+ /* Tell other cores to continue */
+ secondary_run_signal = 1;
+ dmb();
+#endif
+
+ idle_task();
+}
+
+void start_kernel(void)
+{
+ print_early("\n"__KERNELNAME__": start kernel...\n");
+
+ // print_sections();
+
+ /* Early cpu initialization */
+ cpu_startup();
+
+ /*
+ * Initialise section mappings
+ * for the kernel area
+ */
+ init_kernel_mappings();
+
+ print_early("\n"__KERNELNAME__": Init kernel mappings...\n");
+
+ /*
+ * Enable virtual memory
+ * and jump to virtual addresses
+ */
+ start_virtual_memory();
+
+ /*
+ * Set up initial page tables and ktcb
+ * as a valid environment for idle task
+ */
+ setup_idle_task();
+
+ /*
+ * Initialise platform-specific
+ * page mappings, and peripherals
+ */
+ platform_init();
+
+ /* Can only print when uart is mapped */
+ printk("%s: Virtual memory enabled.\n",
+ __KERNELNAME__);
+
+ /* Identify CPUs and system */
+ system_identify();
+
+ sched_init();
+
+ /*
+ * Map and enable high vector page.
+ * Faults can be handled after here.
+ */
+ vectors_init();
+
+ /* Try to initialize secondary cores if there are any */
+ smp_start_cores();
+
+ /* Remove one-to-one kernel mapping */
+ remove_initial_mapping();
+
+ /* Remap 1MB kernel sections as 4Kb pages. */
+ remap_as_pages((void *)page_align(_start_kernel),
+ (void *)page_align_up(_end_kernel));
+
+ /*
+ * Initialise kip and map
+ * for userspace access
+ */
+ kip_init();
+
+ /* Initialise system call page */
+ syscall_init();
+
+ /* Init performance monitor, if enabled */
+ perfmon_init();
+
+ /*
+ * Evaluate system resources
+ * and set up resource pools
+ */
+ init_system_resources(&kernel_resources);
+
+ /*
+ * Free boot memory, switch to first
+ * task's stack and start scheduler
+ */
+ init_finalize();
+
+ BUG();
+}
+
Index: src/glue/or1k/cache.c
===================================================================
--- src/glue/or1k/cache.c (nonexistent)
+++ src/glue/or1k/cache.c (revision 7)
@@ -0,0 +1,49 @@
+
+/*
+ * Wrapper for arm specific cache related functions
+ *
+ * Copyright (C) 2009 B Labs Ltd.
+ */
+
+#include INC_GLUE(cache.h)
+
+void invalidate_cache(void)
+{
+}
+
+
+void invalidate_icache(void)
+{
+}
+
+void invalidate_dcache(void)
+{
+}
+
+void clean_dcache(void)
+{
+}
+
+void clean_invalidate_dcache(void)
+{
+}
+
+void clean_invalidate_cache(void)
+{
+}
+
+void drain_writebuffer(void)
+{
+}
+
+void invalidate_tlb(void)
+{
+}
+
+void invalidate_itlb(void)
+{
+}
+
+void invalidate_dtlb(void)
+{
+}
Index: src/glue/or1k/SConscript
===================================================================
--- src/glue/or1k/SConscript (nonexistent)
+++ src/glue/or1k/SConscript (revision 7)
@@ -0,0 +1,22 @@
+
+# Inherit global environment
+import os, sys, glob
+
+PROJRELROOT = '../../'
+
+sys.path.append(PROJRELROOT)
+
+from config.projpaths import *
+from configure import *
+
+Import('env', 'symbols')
+
+# The set of source files associated with this SConscript file.
+src_local = ['init.c', 'memory.c', 'systable.c', 'irq.c', 'cache.c', 'debug.c']
+
+for name, val in symbols:
+ if 'CONFIG_SMP' == name:
+ src_local += ['smp.c', 'ipi.c']
+
+obj = env.Object(src_local)
+Return('obj')
Index: src/glue/or1k/systable.c
===================================================================
--- src/glue/or1k/systable.c (nonexistent)
+++ src/glue/or1k/systable.c (revision 7)
@@ -0,0 +1,203 @@
+/*
+ * System Calls
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include INC_GLUE(memlayout.h)
+#include INC_GLUE(syscall.h)
+#include INC_GLUE(mapping.h)
+#include INC_GLUE(debug.h)
+#include INC_SUBARCH(mm.h)
+#include INC_SUBARCH(perfmon.h)
+#include INC_API(syscall.h)
+#include INC_API(kip.h)
+
+void kip_init_syscalls(void)
+{
+ kip.irq_control = ARM_SYSCALL_PAGE + sys_irq_control_offset;
+ kip.thread_control = ARM_SYSCALL_PAGE + sys_thread_control_offset;
+ kip.ipc_control = ARM_SYSCALL_PAGE + sys_ipc_control_offset;
+ kip.map = ARM_SYSCALL_PAGE + sys_map_offset;
+ kip.ipc = ARM_SYSCALL_PAGE + sys_ipc_offset;
+ kip.capability_control = ARM_SYSCALL_PAGE + sys_capability_control_offset;
+ kip.unmap = ARM_SYSCALL_PAGE + sys_unmap_offset;
+ kip.exchange_registers = ARM_SYSCALL_PAGE + sys_exchange_registers_offset;
+ kip.thread_switch = ARM_SYSCALL_PAGE + sys_thread_switch_offset;
+ kip.schedule = ARM_SYSCALL_PAGE + sys_schedule_offset;
+ kip.getid = ARM_SYSCALL_PAGE + sys_getid_offset;
+ kip.container_control = ARM_SYSCALL_PAGE + sys_container_control_offset;
+ kip.time = ARM_SYSCALL_PAGE + sys_time_offset;
+ kip.mutex_control = ARM_SYSCALL_PAGE + sys_mutex_control_offset;
+ kip.cache_control = ARM_SYSCALL_PAGE + sys_cache_control_offset;
+}
+
+/* Jump table for all system calls. */
+syscall_fn_t syscall_table[SYSCALLS_TOTAL];
+
+
+int arch_sys_ipc(syscall_context_t *regs)
+{
+ return sys_ipc((l4id_t)regs->r0, (l4id_t)regs->r1,
+ (unsigned int)regs->r2);
+}
+
+int arch_sys_thread_switch(syscall_context_t *regs)
+{
+ return sys_thread_switch();
+}
+
+int arch_sys_thread_control(syscall_context_t *regs)
+{
+ return sys_thread_control((unsigned int)regs->r0,
+ (struct task_ids *)regs->r1);
+}
+
+int arch_sys_exchange_registers(syscall_context_t *regs)
+{
+ return sys_exchange_registers((struct exregs_data *)regs->r0,
+ (l4id_t)regs->r1);
+}
+
+int arch_sys_schedule(syscall_context_t *regs)
+{
+ return sys_schedule();
+}
+
+int arch_sys_getid(syscall_context_t *regs)
+{
+ return sys_getid((struct task_ids *)regs->r0);
+}
+
+int arch_sys_unmap(syscall_context_t *regs)
+{
+ return sys_unmap((unsigned long)regs->r0, (unsigned long)regs->r1,
+ (unsigned int)regs->r2);
+}
+
+int arch_sys_irq_control(syscall_context_t *regs)
+{
+ return sys_irq_control((unsigned int)regs->r0,
+ (unsigned int)regs->r1,
+ (l4id_t)regs->r2);
+}
+
+int arch_sys_ipc_control(syscall_context_t *regs)
+{
+ return sys_ipc_control();
+}
+
+int arch_sys_map(syscall_context_t *regs)
+{
+ return sys_map((unsigned long)regs->r0, (unsigned long)regs->r1,
+ (unsigned long)regs->r2, (unsigned long)regs->r3,
+ (l4id_t)regs->r4);
+}
+
+int arch_sys_capability_control(syscall_context_t *regs)
+{
+ return sys_capability_control((unsigned int)regs->r0,
+ (unsigned int)regs->r1,
+ (void *)regs->r2);
+}
+
+int arch_sys_container_control(syscall_context_t *regs)
+{
+ return sys_container_control((unsigned int)regs->r0,
+ (unsigned int)regs->r1,
+ (void *)regs->r2);
+}
+
+int arch_sys_time(syscall_context_t *regs)
+{
+ return sys_time((struct timeval *)regs->r0, (int)regs->r1);
+}
+
+int arch_sys_mutex_control(syscall_context_t *regs)
+{
+ return sys_mutex_control((unsigned long)regs->r0, (int)regs->r1);
+}
+
+int arch_sys_cache_control(syscall_context_t *regs)
+{
+ return sys_cache_control((unsigned long)regs->r0,
+ (unsigned long)regs->r1,
+ (unsigned int)regs->r2);
+}
+
+/*
+ * Initialises the system call jump table, for kernel to use.
+ * Also maps the system call page into userspace.
+ */
+void syscall_init()
+{
+ syscall_table[sys_ipc_offset >> 2] = (syscall_fn_t)arch_sys_ipc;
+ syscall_table[sys_thread_switch_offset >> 2] = (syscall_fn_t)arch_sys_thread_switch;
+ syscall_table[sys_thread_control_offset >> 2] = (syscall_fn_t)arch_sys_thread_control;
+ syscall_table[sys_exchange_registers_offset >> 2] = (syscall_fn_t)arch_sys_exchange_registers;
+ syscall_table[sys_schedule_offset >> 2] = (syscall_fn_t)arch_sys_schedule;
+ syscall_table[sys_getid_offset >> 2] = (syscall_fn_t)arch_sys_getid;
+ syscall_table[sys_unmap_offset >> 2] = (syscall_fn_t)arch_sys_unmap;
+ syscall_table[sys_irq_control_offset >> 2] = (syscall_fn_t)arch_sys_irq_control;
+ syscall_table[sys_ipc_control_offset >> 2] = (syscall_fn_t)arch_sys_ipc_control;
+ syscall_table[sys_map_offset >> 2] = (syscall_fn_t)arch_sys_map;
+ syscall_table[sys_capability_control_offset >> 2] = (syscall_fn_t)arch_sys_capability_control;
+ syscall_table[sys_container_control_offset >> 2] = (syscall_fn_t)arch_sys_container_control;
+ syscall_table[sys_time_offset >> 2] = (syscall_fn_t)arch_sys_time;
+ syscall_table[sys_mutex_control_offset >> 2] = (syscall_fn_t)arch_sys_mutex_control;
+ syscall_table[sys_cache_control_offset >> 2] = (syscall_fn_t)arch_sys_cache_control;
+
+ add_boot_mapping(virt_to_phys(&__syscall_page_start),
+ ARM_SYSCALL_PAGE, PAGE_SIZE, MAP_USR_RX);
+}
+
+/* Checks a syscall is legitimate and dispatches to appropriate handler. */
+int syscall(syscall_context_t *regs, unsigned long swi_addr)
+{
+ int ret = 0;
+
+ /* Check if genuine system call, coming from the syscall page */
+ if ((swi_addr & ARM_SYSCALL_PAGE) == ARM_SYSCALL_PAGE) {
+ /* Check within syscall offset boundary */
+ if (((swi_addr & syscall_offset_mask) >= 0) &&
+ ((swi_addr & syscall_offset_mask) <= syscalls_end_offset)) {
+
+ /* Do system call accounting, if enabled */
+ system_account_syscall();
+ system_account_syscall_type(swi_addr);
+
+ /* Start measure syscall timing, if enabled */
+ system_measure_syscall_start();
+
+ /* Quick jump, rather than compare each */
+ ret = (*syscall_table[(swi_addr & 0xFF) >> 2])(regs);
+
+ /* End measure syscall timing, if enabled */
+ system_measure_syscall_end(swi_addr);
+
+ } else {
+ printk("System call received from call @ 0x%lx."
+ "Instruction: 0x%lx.\n", swi_addr,
+ *((unsigned long *)swi_addr));
+ return -ENOSYS;
+ }
+ } else {
+ printk("System call exception from unknown location 0x%lx."
+ "Discarding.\n", swi_addr);
+ return -ENOSYS;
+ }
+
+ if (current->flags & TASK_SUSPENDING) {
+ BUG_ON(current->nlocks);
+ sched_suspend_sync();
+ }
+
+ return ret;
+}
+
Index: src/glue/or1k/debug.c
===================================================================
--- src/glue/or1k/debug.c (nonexistent)
+++ src/glue/or1k/debug.c (revision 7)
@@ -0,0 +1,43 @@
+
+#include
+#include
+#include INC_SUBARCH(perfmon.h)
+#include INC_GLUE(debug.h)
+
+#if defined (CONFIG_DEBUG_PERFMON_KERNEL)
+
+#define CYCLES_PER_COUNTER_TICKS 64
+void system_measure_syscall_end(unsigned long swi_address)
+{
+ volatile u64 cnt = perfmon_read_cyccnt() * CYCLES_PER_COUNTER_TICKS;
+ unsigned int call_offset = (swi_address & 0xFF) >> 2;
+
+ /* Number of syscalls */
+ u64 call_count =
+ *(((u64 *)&system_accounting.syscalls) + call_offset);
+
+ /* System call timing structure */
+ struct syscall_timing *st =
+ (struct syscall_timing *)
+ &system_accounting.syscall_timings + call_offset;
+
+ /* Set min */
+ if (st->min == 0)
+ st->min = cnt;
+ else if (st->min > cnt)
+ st->min = cnt;
+
+ /* Set max */
+ if (st->max < cnt)
+ st->max = cnt;
+
+ st->total += cnt;
+
+ /* Average = total timings / total calls */
+ st->avg = st->total / call_count;
+
+ /* Update total */
+ system_accounting.syscall_timings.all_total += cnt;
+}
+
+#endif
Index: src/glue/or1k/memory.c
===================================================================
--- src/glue/or1k/memory.c (nonexistent)
+++ src/glue/or1k/memory.c (revision 7)
@@ -0,0 +1,74 @@
+/*
+ * ARM virtual memory implementation
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include INC_SUBARCH(mm.h)
+#include INC_GLUE(memlayout.h)
+#include INC_GLUE(memory.h)
+#include INC_GLUE(mapping.h)
+#include INC_PLAT(offsets.h)
+#include INC_ARCH(linker.h)
+#include INC_ARCH(asm.h)
+
+/*
+ * Return arch-specific pte flags from generic space flags.
+ */
+unsigned int space_flags_to_ptflags(unsigned int flags)
+{
+ switch (flags) {
+ case MAP_FAULT:
+ return __MAP_FAULT;
+ case MAP_USR_RW:
+ return __MAP_USR_RW;
+ case MAP_USR_RO:
+ return __MAP_USR_RO;
+ case MAP_KERN_RW:
+ return __MAP_KERN_RW;
+ case MAP_USR_IO:
+ return __MAP_USR_IO;
+ case MAP_KERN_IO:
+ return __MAP_KERN_IO;
+ case MAP_USR_RWX:
+ return __MAP_USR_RWX;
+ case MAP_KERN_RWX:
+ return __MAP_KERN_RWX;
+ case MAP_USR_RX:
+ return __MAP_USR_RX;
+ case MAP_KERN_RX:
+ return __MAP_KERN_RX;
+ /*
+ * Don't remove this, if a flag with
+ * same value is introduced, compiler will warn us
+ */
+ case MAP_INVALID_FLAGS:
+ default:
+ return MAP_INVALID_FLAGS;
+ }
+
+ return 0;
+}
+
+void task_init_registers(struct ktcb *task, unsigned long pc)
+{
+ task->context.pc = (u32)pc;
+ task->context.spsr = ARM_MODE_USR;
+}
+
+
+/*
+ * Copies all global kernel entries that a user process
+ * should have in its pgd. In split page table setups
+ * this is a noop.
+ */
+void copy_pgd_kernel_entries(pgd_table_t *to)
+{
+ arch_copy_pgd_kernel_entries(to);
+}
+
Index: src/glue/or1k/irq.c
===================================================================
--- src/glue/or1k/irq.c (nonexistent)
+++ src/glue/or1k/irq.c (revision 7)
@@ -0,0 +1,4 @@
+/*
+ * ARM Generic irq handler
+ */
+
Index: src/glue/or1k
===================================================================
--- src/glue/or1k (nonexistent)
+++ src/glue/or1k (revision 7)
src/glue/or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: src/arch/or1k/head.S.ARM
===================================================================
--- src/arch/or1k/head.S.ARM (nonexistent)
+++ src/arch/or1k/head.S.ARM (revision 7)
@@ -0,0 +1,72 @@
+/*
+ * ARM Kernel entry point
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+
+#include INC_ARCH(asm.h)
+
+#define C15_C0_M 0x0001 /* MMU */
+#define C15_C0_A 0x0002 /* Alignment */
+#define C15_C0_C 0x0004 /* (D) Cache */
+#define C15_C0_W 0x0008 /* Write buffer */
+#define C15_C0_B 0x0080 /* Endianness */
+#define C15_C0_S 0x0100 /* System */
+#define C15_C0_R 0x0200 /* ROM */
+#define C15_C0_Z 0x0800 /* Branch Prediction */
+#define C15_C0_I 0x1000 /* I cache */
+#define C15_C0_V 0x2000 /* High vectors */
+
+
+/*
+ * This is the entry point of the L4 ARM architecture.
+ * The boot loader must call _start with the processor in privileged
+ * mode and mmu disabled.
+ */
+ .section .text.head
+BEGIN_PROC(_start)
+ /* Setup status register for supervisor mode, interrupts disabled */
+ msr cpsr_fcxs, #ARM_MODE_SVC
+
+ /* Disable mmu if it is enabled */
+ mrc p15, 0, r0, c1, c0, 0
+ bic r0, r0, #C15_C0_M @ Disable MMU
+ bic r0, r0, #C15_C0_C @ Disable (D) Cache
+ bic r0, r0, #C15_C0_I @ Disable I cache
+ bic r0, r0, #C15_C0_W @ Disable Write buffer
+ bic r0, r0, #C15_C0_Z @ Disable Branch prediction
+ mcr p15, 0, r0, c1, c0, 0
+
+ /* Setup boot stack (physical address) */
+ ldr sp, _kernel_init_stack
+
+ /* Exception stacks are defined in vector page */
+ msr cpsr_fc, #ARM_NOIRQ_ABT
+ ldr sp, _kernel_abt_stack
+ msr cpsr_fc, #ARM_NOIRQ_IRQ
+ ldr sp, _kernel_irq_stack
+ msr cpsr_fc, #ARM_NOIRQ_FIQ
+ ldr sp, _kernel_fiq_stack
+ msr cpsr_fc, #ARM_NOIRQ_UND
+ ldr sp, _kernel_und_stack
+ msr cpsr_fc, #ARM_NOIRQ_SVC
+
+ /* Jump to start_kernel */
+ bl start_kernel
+
+ /* Never reached */
+1:
+ b 1b
+
+_kernel_init_stack:
+ .word _bootstack_physical
+
+/* Exception stacks are defined in vector page */
+_kernel_abt_stack:
+ .word __abt_stack_high
+_kernel_irq_stack:
+ .word __irq_stack_high
+_kernel_fiq_stack:
+ .word __fiq_stack_high
+_kernel_und_stack:
+ .word __und_stack_high
src/arch/or1k/head.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: src/arch/or1k/vectors.S.ARM
===================================================================
--- src/arch/or1k/vectors.S.ARM (nonexistent)
+++ src/arch/or1k/vectors.S.ARM (revision 7)
@@ -0,0 +1,898 @@
+/*
+ * The vectors page. Includes all exception handlers.
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+
+#include INC_ARCH(asm.h)
+#include INC_ARCH(asm-macros.S)
+
+.section .data.vectors
+__vector_vaddr:
+
+BEGIN_PROC(arm_high_vector)
+ b arm_reset_exception
+ b arm_undef_exception_reentrant
+ b arm_swi_exception
+ b arm_prefetch_abort_exception_reentrant
+ b arm_data_abort_exception_reentrant
+ nop
+ b arm_irq_exception_reentrant_with_schedule
+ b arm_fiq_exception
+END_PROC(arm_high_vector)
+
+.balign 4
+
+/*
+ * vect_reset
+ *
+ * Upon Entry:
+ * - All registers are undefined and insignificant,
+ * - FIQ/IRQs are disabled.
+ * - PC: 0x00000000
+ *
+ *
+ * PURPOSE:
+ * CPU always starts executing from this vector
+ * upon a HW reset. It may also be used as a SW reset.
+ */
+BEGIN_PROC(arm_reset_exception)
+END_PROC(arm_reset_exception)
+
+
+#if defined(CONFIG_SUBARCH_V5)
+ .macro disable_irqs rx
+ mrs \rx, cpsr_fc
+ orr \rx, #ARM_IRQ_BIT
+ msr cpsr_fc, \rx
+ .endm
+ .macro enable_irqs rx
+ mrs \rx, cpsr_fc
+ bic \rx, #ARM_IRQ_BIT
+ msr cpsr_fc, \rx
+ .endm
+#endif
+
+#if defined (CONFIG_SUBARCH_V7) || defined(CONFIG_SUBARCH_V6)
+ .macro disable_irqs rx
+ cpsid ia
+ .endm
+ .macro enable_irqs rx
+ cpsie ia
+ .endm
+#endif
+
+#if defined (CONFIG_SUBARCH_V7)
+ .macro clear_exclusive
+ clrex
+ .endm
+#else
+ .macro clear_exclusive
+ .endm
+#endif
+
+ /* Only works in SVC MODE. Know what you are doing! */
+ .macro get_current rx
+ bic \rx, sp, #0xFF0
+ bic \rx, \rx, #0xF
+ .endm
+ /* Saves the address of system call argument registers pushed to stack
+ * to the current task's ktcb. */
+ .macro ktcb_ref_saved_regs regs_addr, ktcb, regs_off
+ get_current \ktcb
+ ldr \regs_off, =syscall_regs_offset
+ ldr \regs_off, [\regs_off]
+ str \regs_addr, [\ktcb, \regs_off]
+ .endm
+ /* Depending on the SPSR condition determines whether irqs should be enabled
+ * during abort handling. If abort occured in userspace it orders irqs
+ * should be enabled. Else if irqs come from kernel mode, it orders irqs are
+ * enabled only if they were alreday enabled before the abort. */
+ .macro can_abort_enable_irqs temp1, r_spsr
+ and \temp1, \r_spsr, #ARM_MODE_MASK
+ cmp \temp1, #ARM_MODE_USR @ Usermode indicates irqs can be enabled.
+ beq 1f @ Z flag set. Which indicates "can enable"
+ and \temp1, \r_spsr, #ARM_IRQ_BIT @ Clear irq bit indicates irqs were enabled
+ cmp \temp1, #0 @ before the abort and can be safely enabled.
+ 1: @ Z flag must be set for "can enable" here.
+ .endm
+
+ /* Pushes the user sp and lr to stack, updates the stack pointer */
+ .macro push_user_sp_lr sp
+ @ stack state: (Low) |..|..|->(Original)| (High)
+ stmfd \sp, {sp, lr}^ @ Push USR banked regs to stack.
+ nop @ Need a NOOP after push/popping user registers.
+ @ stack state: (Low) |SP_USR|LR_USR|->(Original)| (High)
+ sub \sp, \sp, #8 @ Adjust SP, since stack op on banked regs is no writeback.
+ @ stack state: (Low) |->SP_USR|LR_USR|(Original)| (High)
+ .endm
+
+ .macro is_psr_usr rx
+ and \rx, \rx, #ARM_MODE_MASK
+ cmp \rx, #ARM_MODE_USR
+ .endm
+
+/* These really both read the same unified FSR and FAR registers */
+#if defined (CONFIG_SUBARCH_V5)
+ .macro cp15_read_ifsr rx
+ mrc p15, 0, \rx, c5, c0, 0 @ Read FSR (Tells why the fault occured)
+
+ .endm
+ .macro cp15_read_ifar rx
+ mrc p15, 0, \rx, c6, c0, 0 @ Read FAR (Contains the faulted data address)
+ .endm
+ .macro cp15_read_dfsr rx
+ mrc p15, 0, \rx, c5, c0, 0 @ Read FSR (Tells why the fault occured)
+
+ .endm
+ .macro cp15_read_dfar rx
+ mrc p15, 0, \rx, c6, c0, 0 @ Read FAR (Contains the faulted data address)
+ .endm
+#endif
+
+/* These read the distinguished IFSR, IFAR, DFSR and DFAR registers */
+#if defined (CONFIG_SUBARCH_V6) || defined (CONFIG_SUBARCH_V7)
+ .macro cp15_read_ifsr rx
+ mrc p15, 0, \rx, c5, c0, 1 @ Read IFSR (Tells why the fault occured)
+
+ .endm
+ .macro cp15_read_ifar rx
+ mrc p15, 0, \rx, c6, c0, 2 @ Read IFAR (Contains the faulted data address)
+ .endm
+ .macro cp15_read_dfsr rx
+ mrc p15, 0, \rx, c5, c0, 0 @ Read DFSR (Tells why the fault occured)
+
+ .endm
+ .macro cp15_read_dfar rx
+ mrc p15, 0, \rx, c6, c0, 0 @ Read DFAR (Contains the faulted data address)
+ .endm
+#endif
+
+#define UNDEF_R0 0
+#define UNDEF_SPSR -4
+#define UNDEF_R14 -8
+
+/*
+ * vect_undef
+ *
+ * Upon Entry:
+ * - R14: Address of next instruction after undefined instruction
+ * - PC: 0x00000004
+ * - IRQs are disabled (CPSR[7] = 1)
+ *
+ *
+ * PURPOSE:
+ * A co-processor instruction not supported by the core can be
+ * emulated here. Also unrecognised/invalid instructions are handled.
+ */
+BEGIN_PROC(arm_undef_exception_reentrant)
+ clear_exclusive
+ str lr, [sp, #UNDEF_R14] @ Store undef address
+ mrs lr, spsr @ Get SPSR
+ str lr, [sp, #UNDEF_SPSR] @ Store SPSR
+ str r0, [sp, #UNDEF_R0] @ Store r0
+ @ NOTE: Can increase undef nest here.
+ mov r0, sp @ Keep current sp point in R0
+ mrs lr, cpsr @ Change to SVC mode.
+ bic lr, #ARM_MODE_MASK
+ orr lr, lr, #ARM_MODE_SVC
+ msr cpsr_fc, r14
+ @ FIXME: Ensure 8-byte stack here.
+ str lr, [sp, #-8]! @ Save lr_svc 2 words down from interrupted SP_SVC
+ @ Transfer Undef state to SVC
+ ldr lr, [r0, #UNDEF_R14]
+ str lr, [sp, #4]
+ @ Stack state: |LR_SVC<-|LR_UNDEF|{original SP_SVC}|
+ ldr lr, [r0, #UNDEF_SPSR]
+ ldr r0, [r0, #UNDEF_R0]
+ stmfd sp!, {r0-r3,r12,lr}
+ @ Stack state: |R0<-|R1|R2|R3|R12|UNDEF_SPSR|LR_SVC|LR_DUNDEF|{original SP_SVC}|
+ push_user_sp_lr sp @ NOTE: These must be pushed to avoid trashing them if preempted
+ @ Stack state: |SP_USR<-|LR_USR|R0<-|R1|R2|R3|R12|UNDEF_SPSR|LR_SVC|LR_DUNDEF|{original SP_SVC}|
+
+ @ All undef state saved. Can safely enable irqs here, if need be.
+ ldr r3, [sp, #28] @ Load UNDEF_SPSR
+ can_abort_enable_irqs r0, r3 @ Judge if irqs can be enabled depending on prev state.
+ bne 1f @ Branch here based on previous irq judgement.
+ enable_irqs r3
+1:
+ /* Now check in what mode exception occured, and return that mode's LR in R4
+ * Also poplulate r0,r1,r2 parameters for undefined_instr_handler
+ */
+ ldr r1, [sp, #28] @ Load UNDEF_SPSR
+ is_psr_usr r0 @ Test if UNDEF_SPSR was user mode.
+ ldrne r2, [sp, #32] @ Abort occured in kernel, load LR_SVC
+ ldreq r2, [sp, #4] @ Abort occured in user, load LR_USR
+ ldr r0, [sp, #36] @ Load LR_UNDEF saved previously.
+ mov lr, pc
+ ldr pc, =undefined_instr_handler @ Jump to function outside this page.
+ disable_irqs r0 @ Disable irqs to avoid corrupting spsr.
+ @ (i.e. an interrupt could overwrite spsr with current psr)
+ ldmfd sp, {sp, lr}^ @ Restore user sp and lr which might have been corrupt on preemption
+ nop @ User reg mod requires nop
+ add sp, sp, #8 @ Update SP.
+ ldmfd sp!, {r0-r3,r12,lr} @ Restore previous context. (note, lr has spsr)
+ msr spsr_cxsf, r14 @ Restore spsr register from lr.
+ @ Stack state: |LR_SVC<-|LR_PREV(UNDEF)|{original SP_SVC}|
+ ldmfd sp!, {r14, pc}^ @ Return, restoring cpsr. Note r14 gets r14_svc,
+ @ and pc gets lr_undef. Saved at #4 and #8 offsets
+ @ down from where svc stack had left.
+END_PROC(arm_undef_exception_reentrant)
+
+/*
+ * vect_swi
+ *
+ * Upon Entry:
+ * - R14: Address of next instruction after the SWI
+ * - PC: 0x00000008
+ * - R0-R12: Depending on the system call some of them contain
+ * indicators of what the exception means.
+ * - IRQs are disabled (CPSR[7] = 1)
+ * - SWI instruction's bits [7:0] may contain SWI indicator
+ *
+ * PURPOSE:
+ * Used for trapping into a debugger or OS kernel via system calls.
+ * Argument registers from R0 up to R12 and [7:0] of the causing SWI
+ * instruction contains hints of what to do with this exception. What
+ * R0-R12 contains depends on what userspace has put in them. Note this
+ * is the only exception that userspace can generate and thus has control
+ * on what it put into r0-rx.
+ *
+ * RECAP:
+ * Normally across a function call, only r0-r3 are used for passing parameters.
+ * Why r0-r3 only but not r4, r5...? See APCS (ARM procedure call standard)
+ * Short answer: r4-r12 must be preserved across procedures but r0-r3 can be
+ * trashed because they're set aside for argument passing. Arguments more than 4
+ * go on the stack. Note APCS is a *suggestion*, rather than enforcement. So if
+ * a userspace stub library is created that say, preserves and uses r0-r9 for a
+ * system call, and the system call handler (this) knows about it, it is a
+ * perfectly valid setup. In fact this is what we do here, we don't strictly use
+ * r0-r3. Depending on the system call, the set of input registers (and output
+ * registers to return results from the system call) may be redefined. These are
+ * documented for each system call in the reference manual.
+ * Another caveat to note in SWI usage is that we use the address offset of the
+ * SWI instruction to see which offset it has in the system call vector, to
+ * determine the correct system call, rather than [7:0] bits of the SWI.
+ */
+BEGIN_PROC(arm_swi_exception)
+ clear_exclusive
+ sub lr, lr, #4 @ Get address of swi instruction user executed.
+ stmfd sp, {r0-r12,sp,lr}^ @ Push arguments, LR_USR and SP_USR to stack.
+ nop
+
+ @ Future optimisation 1:
+ @ For all syscalls we need not push any more than r8 but we push up to
+ @ r12 because upon a fork, a child's easiest way to restore user
+ @ registers is to pop it from stack during return_from_syscall. In future
+ @ fork function could return back to here, save all context into child
+ @ from actual registers instead of reading from stack, and then return.
+
+ @ Future optimisation 2:
+ @ SP_USR MUST be pushed here, otherwise a kernel preemption could
+ @ cause user mode of another process to overwrite SP_USR. The reason we
+ @ save it here is because the preemption path does not currently save it
+ @ if it is a kernel preemption. User SP can also be used here, as the
+ @ user might have pushed data to its stack to be used by system calls.
+ @ But we dont plan to pass data to kernel in this way, so saving of
+ @ SP_USR can be done in preemption path as an optimisation.
+
+ /*
+ * The LR_usr is important here, because the user application uses a BL
+ * to jump to the system call SWI, so the LR_usr contains the return
+ * address, i.e. the next instruction after the *jumping* instruction to
+ * the system call SWI (not the one after the swi itself, which is in
+ * LR_svc).
+ */
+
+ sub sp, sp, #60 @ stmfd on user registers can't writeback the SP. We do it manually.
+ mrs r0, spsr_fc @ psr also need saving in case this context is interrupted.
+ stmfd sp!, {r0}
+ enable_irqs r0
+ mov r0, sp @ Current SP has pointer to all saved context.
+ ktcb_ref_saved_regs r0, r1, r2 @ Save syscall context pointer in ktcb
+ mov r1, lr @ Pass swi instruction address in LR as arg1
+ mov lr, pc
+ ldr pc, =syscall
+
+.global return_from_syscall; @ Newly created threads use this path to return,
+return_from_syscall: @ if they duplicated another thread's address space.
+ disable_irqs r1 @ Not disabling irqs at this point causes the SP_USR and spsr
+ @ to get corrupt causing havoc.
+ ldmfd sp!, {r1}
+ msr spsr, r1
+ add sp, sp, #4 @ Skip, r0's location, since r0 already has returned result.
+ @ Note we're obliged to preserve at least r3-r8 because they're MRs.
+ ldmfd sp!, {r1-r12} @ Restore r1-r8 pushed to stack earlier. r0 already has return result.
+ ldmfd sp, {sp}^ @ Restore user stack pointer, which might have been corrupt on preemption
+ nop
+ add sp, sp, #4 @ Update sp.
+ ldmfd sp!, {lr} @ Load userspace return address
+ movs pc, lr
+END_PROC(arm_swi_exception)
+
+/* Minimal abort state saved on data abort stack right after abort vector enters: */
+#define ABT_R0 0
+#define ABT_SPSR -4
+#define ABT_R14 -8
+
+/* Minimal prefetch abort state saved on abort stack upon entry. */
+#define ABT_R0 0
+#define ABT_SPSR -4
+#define ABT_R14 -8
+
+/*
+ * vect_pabt
+ *
+ * Upon Entry:
+ * - R14_abt: Address of next instruction after aborted instruction
+ * - R14_usr: Address of return instruction in last function call**
+ * - PC: 0x0000000c
+ * - IRQs are disabled (CPSR[7] = 1)
+ *
+ *
+ * PURPOSE:
+ * Used for handling instructions that caused *memory aborts* during
+ * the *prefetching* of the instruction. The instruction is also marked
+ * as invalid by the core. It handles the cause for the memory abort.
+ *
+ * (One reason why a memory abort would occur is when we were entering
+ * into a new page region that contained executable code and was not
+ * present in memory, or its physical-to-virtual translation was not
+ * present in the page tables. See other causes for memory aborts)
+ *
+ * **In case abort occured in userspace. This is useful if the abort
+ * was due to a null/invalid function pointer call. Since R14_abt
+ * includes the aborting instruction itself, R14_usr gives the clue to
+ * where this call came from.
+ */
+BEGIN_PROC(arm_prefetch_abort_exception_reentrant)
+ clear_exclusive
+ sub lr, lr, #4 @ lr-4 points at aborted instruction
+ str lr, [r13, #ABT_R14] @ Store abort address.
+ mrs lr, spsr @ Get SPSR
+ str lr, [r13, #ABT_SPSR] @ Store SPSR
+ str r0, [r13, #ABT_R0] @ Store R0 to use as temp register.
+ mov r0, r13 @ SP to R0
+ mrs lr, cpsr @ Change to SVC mode.
+ bic lr, #ARM_MODE_MASK
+ orr lr, lr, #ARM_MODE_SVC
+ msr cpsr_fc, r14
+ @ FIXME: Ensure 8-byte stack here.
+ str lr, [sp, #-8]! @ NOTE: Switched mode! Save LR_SVC 2 words down from SP_SVC.
+transfer_pabt_state_to_svc: @ Move data saved on PABT stack to SVC stack.
+ ldr lr, [r0, #ABT_R14]
+ str lr, [sp, #4]
+ @ Stack state: |LR_SVC<-|LR_PABT|{original SP_SVC}|
+ ldr lr, [r0, #ABT_SPSR]
+ ldr r0, [r0, #ABT_R0]
+ stmfd sp!, {r0-r3,r12,lr}
+ @ Stack state: |R0<-|R1|R2|R3|R12|PABT_SPSR|LR_SVC|LR_PABT|{original SP_SVC}|
+ push_user_sp_lr sp @ NOTE: These must be pushed to avoid trashing if preempted
+ @ Stack state: |SP_USR<-|LR_USR|R0|R1|R2|R3|R12|PABT_SPSR|LR_SVC|LR_PABT|{original SP_SVC}|
+read_pabt_state:
+ cp15_read_ifsr r1 @ Reads FSR on ARMv5, IFSR on ARMv6-v7. Fault status information
+ cp15_read_ifar r2 @ Reads FAR on ARMv5, IFAR on ARMv6-v7. Fault address information
+ @ All abort state and (FAR/FSR) saved. Can safely enable irqs here, if need be.
+ ldr r3, [sp, #28] @ Load PABT_SPSR
+ can_abort_enable_irqs r0, r3 @ Judge if irqs can be enabled depending on prev state.
+ bne 1f @ Branch here based on previous irq judgement.
+ enable_irqs r3
+1:
+ ldr r3, [sp, #28] @ Load PABT_SPSR to r3, the spsr for the aborted mode
+ ldr r0, [sp, #36] @ Load LR_PABT - 4 saved previously. (Address that aborted)
+ mov lr, pc
+ ldr pc, =prefetch_abort_handler @ Jump to function outside this page.
+ disable_irqs r0 @ Disable irqs to avoid corrupting spsr.
+ @ (i.e. an interrupt could overwrite spsr with current psr)
+ ldmfd sp, {sp, lr}^ @ Restore user sp and lr which might have been corrupt on preemption
+ nop @ User reg mod requires nop
+ add sp, sp, #8 @ Update SP.
+ ldmfd sp!, {r0-r3,r12,lr} @ Restore previous context. (note, lr has spsr)
+ msr spsr_cxsf, r14 @ Restore spsr register from lr.
+ @ Stack state: |LR_SVC<-|LR_PREV(PABT)|{original SP_SVC}|
+ ldmfd r13!, {r14, pc}^ @ Return, restoring cpsr. Note r14 gets r14_svc,
+ @ and pc gets lr_dabt. Saved at #4 and #8 offsets
+ @ down from where svc stack had left.
+END_PROC(arm_prefetch_abort_exception_reentrant)
+
+/*
+ * vect_dabt
+ *
+ * Upon Entry:
+ * - R14_abt: Address of next instruction after aborted instruction
+ * - PC: 0x00000010
+ * - IRQs are disabled (CPSR[7] = 1)
+ *
+ *
+ * PURPOSE:
+ * Used for handling instructions that caused *memory aborts* during
+ * the *execution* of the current instruction. This may happen if the
+ * instruction accessed a memory address (e.g LDR/STR) that is not
+ * defined as part of the currently executing process (aka illegal
+ * access). Another possibility is the address is within the address
+ * space of the process, but it is not mapped, i.e. does not have
+ * physical-to-virtual translation entry in the page tables.
+ */
+BEGIN_PROC(arm_data_abort_exception)
+ sub lr, lr, #8 @ lr-8 points at aborted instruction
+ mrc p15, 0, r2, c5, c0, 0 @ Read FSR
+ mrc p15, 0, r1, c6, c0, 0 @ Read FAR
+ mov r0, lr @ Get data abort address
+ mov r5, lr @ Save it in r5 in case r0 will get trashed
+ mov lr, pc @ Save return address
+ ldr pc, =data_abort_handler @ Jump to function outside this page.
+1:
+ b 1b
+END_PROC(arm_data_abort_exception)
+
+/*
+ * The method of saving abort state to svc stack is identical with that of
+ * reentrant irq vector. Natural to this, Restoring of the previous state
+ * is also identical.
+ */
+BEGIN_PROC(arm_data_abort_exception_reentrant)
+ clear_exclusive
+ sub lr, lr, #8 @ Get abort address
+ str lr, [r13, #ABT_R14] @ Store abort address
+ mrs lr, spsr @ Get SPSR
+ str lr, [r13, #ABT_SPSR] @ Store SPSR
+ str r0, [r13, #ABT_R0] @ Store r0
+ @ NOTE: Can increase data abort nest here.
+ mov r0, r13 @ Keep current sp point in R0
+ mrs lr, cpsr @ Change to SVC mode.
+ bic lr, #ARM_MODE_MASK
+ orr lr, lr, #ARM_MODE_SVC
+ msr cpsr_fc, r14
+ @ FIXME: Ensure 8-byte stack here.
+ str lr, [sp, #-8]! @ Save lr_svc 2 words down from interrupted SP_SVC
+transfer_dabt_state_to_svc:
+ ldr lr, [r0, #ABT_R14]
+ str lr, [sp, #4]
+ @ Stack state: |LR_SVC<-|LR_DABT|{original SP_SVC}|
+ ldr lr, [r0, #ABT_SPSR]
+ ldr r0, [r0, #ABT_R0]
+ stmfd sp!, {r0-r3,r12,lr}
+ @ Stack state: |R0<-|R1|R2|R3|R12|DABT_SPSR|LR_SVC|LR_DABT|{original SP_SVC}|
+ push_user_sp_lr sp
+ @ Stack state: |SP_USR<-|LR_USR|R0|R1|R2|R3|R12|DABT_SPSR|LR_SVC|LR_DABT|{original SP_SVC}|
+read_dabt_state:
+ cp15_read_dfsr r1 @ Read DFSR (Tells why the fault occured)
+ cp15_read_dfar r2 @ Read DFAR (Contains the faulted data address)
+ @ All abort state and (FAR/FSR) saved. Can safely enable irqs here, if need be.
+ ldr r3, [sp, #28] @ Load DABT_SPSR
+ can_abort_enable_irqs r0, r3 @ Judge if irqs can be enabled depending on prev state.
+ bne 1f @ Branch here based on previous irq judgement.
+ enable_irqs r3
+1:
+ ldr r0, [sp, #36] @ Load LR_DABT saved previously.
+ mov lr, pc
+ ldr pc, =data_abort_handler @ Jump to function outside this page.
+ disable_irqs r0 @ Disable irqs to avoid corrupting spsr.
+ ldmfd sp, {sp, lr}^ @ Restore user sp and lr which might have been corrupt on preemption
+ nop @ User reg mod requires nop
+ add sp, sp, #8 @ Update SP.
+ ldmfd sp!, {r0-r3,r12,lr} @ Restore previous context. (note, lr has spsr)
+ msr spsr_cxsf, r14 @ Restore spsr register from lr.
+ @ Stack state: |LR_SVC<-|LR_PREV(DABT)|{original SP_SVC}|
+ ldmfd r13!, {r14, pc}^ @ Return, restoring cpsr. Note r14 gets r14_svc,
+ @ and pc gets lr_dabt. Saved at #4 and #8 offsets
+ @ down from where svc stack had left.
+END_PROC(arm_data_abort_exception_reentrant)
+
+/*
+ * vect_irq
+ *
+ * Upon Entry:
+ * - R14: Address of next instruction after interrupted instruction.
+ * - PC: 0x00000018
+ * - IRQs are disabled (CPSR[7] = 1)
+ * - A vectored interrupt controller would also provide where to jump in
+ * order to handle the interrupt, or an irq controller in general would
+ * provide registers that indicate what kind of interrupt has occured.
+ *
+ *
+ * PURPOSE:
+ * Used for handling IRQs. IRQs have lower priority compared to other
+ * types of exceptions.
+ */
+
+/* The most basic handler where neither context switching nor re-entry can occur. */
+BEGIN_PROC(arm_irq_exception_basic)
+ sub lr, lr, #4
+ stmfd sp!, {r0-r3,lr}
+ mov lr, pc
+ ldr pc, =do_irq
+ ldmfd sp!, {r0-r3, pc}^
+END_PROC(arm_irq_exception)
+
+/* Minimal IRQ state saved on irq stack right after irq vector enters: */
+#define IRQ_R0 0
+#define IRQ_SPSR -4
+#define IRQ_R14 -8
+
+/* A reentrant handler that uses svc mode stack to prevent banked lr_irq corruption. */
+BEGIN_PROC(arm_irq_exception_reentrant)
+ sub lr, lr, #4
+@ Save minimal state to irq stack:
+ str r14, [r13, #IRQ_R14] @ Save lr_irq
+ mrs r14, spsr @ Copy spsr
+ str r14, [r13, #IRQ_SPSR] @ Save spsr on irq stack
+ str r0, [r13, #IRQ_R0] @ Save r0.
+ mov r0, r13 @ Using r0 to keep banked sp_irq when mode is switched.
+ mrs r14, cpsr @ Get current psr (irq)
+ bic r14, #ARM_MODE_MASK @ Clear mode part from psr
+ orr r14, r14, #ARM_MODE_SVC @ Write SVC mode bits.
+ msr cpsr_fc, r14 @ Change to SVC mode.
+ str r14, [r13, #-8]! @ Save lr_svc 2 words down from where svc stack left.
+@ Transfer minimal irq state saved to svc stack:
+ ldr r14, [r0, #IRQ_R14] @ Load lr_irq to lr using r0 that contains sp_irq.
+ str r14, [r13, #4] @ Save lr_irq 1 word down from where svc stack left.
+ ldr r14, [r0, #IRQ_SPSR] @ Load irq spsr.
+ ldr r0, [r0, #IRQ_R0] @ Restore r0.
+ stmfd sp!, {r0-r3,r12,lr} @ Save all of rest of irq context to svc stack.
+ bl do_irq @ Read irq number etc. Free to re-enable irqs here.
+ ldmfd sp!, {r0-r3-r12,lr} @ Restore previous context. (note, lr has spsr)
+ msr spsr_cxsf, lr @ Restore spsr register from lr.
+ ldmfd r13!, {r14, pc}^ @ Return, restoring cpsr. Note r14 gets r14_svc,
+ @ and pc gets lr_irq. Saved at #4 and #8 offsets
+ @ down from where svc stack had left.
+END_PROC(arm_irq_exception_reentrant)
+
+ .macro need_resched rx, ry
+ get_current \rx
+ ldr \ry, =need_resched_offset
+ ldr \ry, [\ry]
+ ldr \ry, [\rx, \ry]
+ cmp \ry, #1
+ .endm
+
+/*
+ * Keeps the PSR of the last pre-empted process. This helps to tell
+ * what mode the process was in when it was preempted.
+ */
+.global preempted_psr;
+preempted_psr:
+.word 0
+.word 0
+.word 0
+.word 0
+
+/* Keeps track of how many nests of irqs have happened. */
+.global current_irq_nest_count;
+current_irq_nest_count:
+.word 0
+.word 0
+.word 0
+.word 0
+
+#if defined (CONFIG_SMP)
+ @ Rx contains the address of per cpu variable
+ .macro per_cpu adr, temp, varname
+ get_cpuid \temp
+ ldr \adr, =\varname
+ add \adr, \adr, \temp, lsl #2
+ .endm
+#else
+ .macro per_cpu adr, temp, varname
+ ldr \adr, =\varname
+ .endm
+#endif
+
+/*
+ * FIXME: current_irq_nest_count also counts for any preempt_disable() calls.
+ * However this nesting check assumes all nests come from real irqs.
+ * We should make this check just the real ones.
+ */
+#define IRQ_NESTING_MAX 32
+ .macro inc_irq_cnt_with_overnest_check rx, ry
+ per_cpu \rx, \ry, current_irq_nest_count @ Get per-cpu address of variable
+ ldr \ry, [\rx]
+ add \ry, \ry, #1 @ No need for atomic inc since irqs are disabled.
+ str \ry, [\rx]
+ cmp \ry, #IRQ_NESTING_MAX @ Check no more than max nests, and die miserably if so.
+ ldrge pc, =irq_overnest_error
+ .endm
+
+ @ This decrement need not be atomic because if you are *decrementing* this, then it means
+ @ Preemption is already *disabled*. Ruling out preemption, only race could be against irqs.
+ @ If an irq preempts it during decrement and modifies it, it is still responsible to change
+ @ it back to the original value as it was when we read it, before it returns. So effectively
+ @ anything that runs during the decrement does not affect the value of the count.
+ .macro dec_irq_nest_cnt rx, ry
+ per_cpu \ry, \rx, current_irq_nest_count
+ ldr \rx, [\ry]
+ sub \rx, \rx, #1
+ str \rx, [\ry]
+ .endm
+ .macro in_process_context rx, ry
+ per_cpu \rx, \ry, current_irq_nest_count
+ ldr \rx, [\rx]
+ cmp \rx, #0
+ .endm
+ /* If interrupted a process (as opposed to another irq), saves spsr value to preempted_psr */
+ .macro cmp_and_save_process_psr rx, ry
+ in_process_context \rx, \ry @ If nest count is 0, a running process is preempted.
+ bne 9999f @ Branch ahead if not a process
+ per_cpu \rx, \ry, preempted_psr @ Get per-cpu preempted psr
+ mrs \ry, SPSR @ Re-read spsr since register was trashed
+ str \ry, [\rx] @ Store it in per-cpu preempted psr
+ 9999:
+ .endm
+
+ /*
+ * Clear irq bits on register.
+ *
+ * If ARMv5, only I-bit is cleared, but if ARMv6-v7,
+ * A-bit is also cleared.
+ */
+ .macro clr_irq_bits_on_reg rx
+ bic \rx, #ARM_IRQ_BIT
+#if defined (CONFIG_SUBARCH_V6) || defined (CONFIG_SUBARCH_V7)
+ bic \rx, #ARM_A_BIT
+#endif
+ .endm
+
+#define CONTEXT_PSR 0
+#define CONTEXT_R0 4
+#define CONTEXT_R1 8
+#define CONTEXT_R2 12
+#define CONTEXT_R3 16
+#define CONTEXT_R4 20
+#define CONTEXT_R5 24
+#define CONTEXT_R6 28
+#define CONTEXT_R7 32
+#define CONTEXT_R8 36
+#define CONTEXT_r9 40
+#define CONTEXT_R10 44
+#define CONTEXT_R11 48
+#define CONTEXT_R12 52
+#define CONTEXT_R13 56
+#define CONTEXT_R14 60
+#define CONTEXT_PC 64
+
+/*
+ * TODO: Optimization:
+ * May use SRS/RFE on irq exception _only_. But not
+ * yet aware of its implications. Only irq handler can
+ * do it because RFE enables interrupts unconditionally.
+ */
+BEGIN_PROC(arm_irq_exception_reentrant_with_schedule)
+ clear_exclusive
+ sub lr, lr, #4
+ str lr, [r13, #IRQ_R14] @ Save lr_irq
+ mrs r14, spsr @ Copy spsr
+ str r14, [r13, #IRQ_SPSR] @ Save spsr on irq stack
+ str r0, [r13, #IRQ_R0] @ Save r0.
+ cmp_and_save_process_psr r0, r14 @ R14 should have spsr here.
+ inc_irq_cnt_with_overnest_check r0, r14
+ mov r0, r13 @ Using r0 to keep banked sp_irq when mode is switched.
+ mrs r14, cpsr @ Get current psr (irq)
+ bic r14, #ARM_MODE_MASK @ Clear mode part from psr
+ orr r14, r14, #ARM_MODE_SVC @ Write SVC mode bits.
+ msr cpsr_fc, r14 @ Change to SVC mode.
+ @ FIXME: Ensure 8-byte aligned stack here! Make sure to restore original state later!
+ str r14, [r13, #-8]! @ Save lr_svc 2 words down from where svc stack left. SP updated.
+@ Transfer minimal irq state to svc stack:
+ ldr r14, [r0, #IRQ_R14] @ Load lr_irq to lr using r0 that contains sp_irq.
+ str r14, [r13, #4] @ Save lr_irq 1 word down from where svc stack left.
+ ldr r14, [r0, #IRQ_SPSR] @ Load irq spsr.
+ ldr r0, [r0, #IRQ_R0] @ Restore r0.
+ stmfd sp!, {r0-r3,r12,lr} @ Save all of rest of irq context to svc stack.
+ mov lr, pc
+ ldr pc, =do_irq @ Read irq number etc. Free to re-enable irqs here.
+ @ stack state: (Low) r0|r1|r2|r3|r12|SPSR|LR_SVC|LR_IRQ| (High)
+
+/*
+ * Decision point for taking the preemption path
+ */
+#if !defined(CONFIG_PREEMPT_DISABLE)
+ per_cpu r0, r1, current_irq_nest_count
+ ldr r0, [r0]
+ cmp r0, #1 @ Expect 1 as lowest since each irq increase preempt cnt by 1.
+ bgt return_to_prev_context @ if (irq_nest > 1) return_to_prev_context();
+ need_resched r0, r1 @ if (irq_nest == 1 && need_resched) schedule();
+ beq preemption_path @ if (irq_nest == 1 && !need_resched) return_to_prev_context();
+#endif
+
+/*
+ * Return to previous context path
+ */
+return_to_prev_context:
+ dec_irq_nest_cnt r0, r1
+ disable_irqs r0 @ Disable irqs to avoid corrupting spsr.
+ ldmfd sp!, {r0-r3,r12,lr} @ Restore previous context. (note, lr has spsr)
+ msr spsr_cxsf, r14 @ Restore spsr register from lr.
+ @ stack state: (Low) |LR_SVC<-|LR_PREV(IRQ)|{original SP_SVC}| (High)
+ ldmfd r13!, {r14, pc}^ @ Return, restoring cpsr. Note r14 gets r14_svc,
+ @ and pc gets lr_irq. Saved at #4 and #8 offsets
+ @ down from where svc stack had left.
+
+/*
+ * Preemption path
+ */
+#if !defined(CONFIG_PREEMPT_DISABLE)
+preemption_path:
+ disable_irqs r0 @ Interrupts can corrupt stack state.
+ get_current r0 @ Get the interrupted process
+ @ stack state: (Low) |->r0|r1|r2|r3|r12|SPSR|LR_SVC|LR_IRQ()| (High)
+save_interrupted_context:
+ add sp, sp, #4
+ @ stack state: (Low) |r0|->r1|r2|r3|r12|SPSR|LR_SVC|LR_IRQ()| (High)
+ ldmfd sp!, {r1-r3, r12, lr}
+ @ stack state: (Low) |r0|..|..|..|..|..|->LR_SVC|LR_IRQ()| (High)
+ str lr, [r0, #CONTEXT_PSR]
+ is_psr_usr lr
+ add r0, r0, #CONTEXT_R1 @ Points at register save location for #CONTEXT_R1
+ stmia r0!, {r1-r12}
+ ldmfd sp!, {r1-r2} @ At this point SP_SVC is at its original svc location.
+ @ stack state: (Low) |r0|..|..|..|..|..|..|..|->(Original)| (High)
+ @ register state: r0 = (register save loc for #CONTEXT_R13) r1 = LR_SVC, r2 = LR_IRQ
+ beq save_usr_context
+save_svc_context:
+ stmib r0, {r1-r2} @ Save LR_SVC and LR_RETURN in advancing locations.
+ str sp, [r0] @ Current sp is where sp_svc has left, and r0 at #CONTEXT_SP loc.
+ sub r0, r0, #CONTEXT_R13 @ Go back to first word from SP position.
+ ldr r1, [sp, #-32] @ Load r0 from stack
+ str r1, [r0, #CONTEXT_R0] @ Save r0
+ b prepare_schedule @ All registers saved.
+save_usr_context:
+ sub r0, r0, #CONTEXT_R13
+ str r2, [r0, #CONTEXT_PC] @ Save Program counter
+ @ LR_SVC need restoring because it won't be pushed to context frame. SP_SVC is already up-to-date.
+ mov lr, r1
+ stmfd sp, {sp, lr}^ @ Push USR banked regs to stack.
+ @ stack state: (Low) |r0|..|..|..|..|..|SP_USR|LR_USR|->(Original)| (High)
+ nop @ Need a NOP after twiddling with usr registers.
+ sub sp, sp, #8 @ Adjust SP, since stack op on banked regs is no writeback.
+ @ stack state: (Low) |r0|..|..|..|..|..|->SP_USR|LR_USR|(Original)| (High)
+ ldmfd sp!, {r1-r2} @ Pop USR Banked regs.
+ @ stack state: (Low) |r0|..|..|..|..|..|..|..|->(Original)| (High)
+ str r1, [r0, #CONTEXT_R13] @ Save SP_USR to context frame.
+ str r2, [r0, #CONTEXT_R14] @ Save LR_USR to context frame.
+ ldr r1, [sp, #-32]
+ str r1, [r0, #CONTEXT_R0]
+ @ stack state: (Low) |..|..|..|..|..|..|..|..|->(Original)| (High)
+prepare_schedule:
+ mov lr, pc
+ ldr pc, =schedule
+1:
+ b 1b /* To catch if schedule returns in irq mode */
+#endif /* End of !CONFIG_PREEMPT_DISABLE */
+
+END_PROC(arm_irq_exception_reentrant_with_schedule)
+
+/*
+ * Context switch implementation.
+ *
+ * Upon entry:
+ *
+ * - r0 = current ktcb ptr, r1 = next ktcb ptr. r2 and r3 = insignificant.
+ * - The current mode is always SVC, but the call may be coming from interrupt
+ * or process context.
+ * - If coming from interrupt, the interrupted context is already copied to current
+ * ktcb in the irq handler, before coming here. Interrupted context can be SVC or USR.
+ *
+ * PURPOSE: Handles all paths from irq exception, thread_switch system call,
+ * and sleeping in the kernel.
+ *
+ * NOTES:
+ * - If coming from interrupt, the interrupted context is already copied to current
+ * ktcb in the irq handler, before coming here. Interrupted context can be SVC or USR.
+ * - If coming from a process context, the current process context need saving here.
+ * - From irq contexts, preemption is disabled, i.e. preemption count is 1. This is because
+ * irqs naturally increase preemption count. From process context preemption count is 0.
+ * Process context disables preemption during schedule(), but re-enables before calling
+ * switch_to(). Irq and process contexts are distinguished by preemption_count.
+ * Furthermore, irqs are also disabled shortly before calling switch_to() from both contexts.
+ * This happens at points where stack state would be irrecoverable if an irq occured.
+ */
+BEGIN_PROC(arch_context_switch)
+ clear_exclusive
+ in_process_context r2, r3 @ Note this depends on preempt count being 0.
+ beq save_process_context @ Voluntary switch needs explicit saving of current state.
+ dec_irq_nest_cnt r2, r3 @ Soon leaving irq context, so reduce preempt count here.
+ b load_next_context @ Interrupted context already saved by irq handler.
+save_process_context: @ Voluntary process schedules enter here:
+ mrs r2, cpsr_fc
+ str r2, [r0]
+ stmib r0, {r0-r14} @ Voluntary scheduling always in SVC mode, so using svc regs.
+ str r14, [r0, #CONTEXT_PC] @ Store R15 as R14. R14 has return address for switch_to().
+load_next_context:
+ @ stack state: (Low) |..|..|..|..|..|..|..|..|..|->(Original)| (High)
+ mov sp, r1
+ ldr r0, [sp, #CONTEXT_PSR] @ Load r0 with SPSR
+ clr_irq_bits_on_reg r0 @ Enable irqs on will-be-restored context.
+ msr spsr_fcxs, r0 @ Restore spsr from r0.
+ is_psr_usr r0
+ bne load_next_context_svc @ Loading user context is different than svc.
+load_next_context_usr:
+ ldmib sp, {r0-r14}^ @ Load all including banked user regs.
+ ldr lr, [sp, #CONTEXT_PC] @ Load value of PC to r14
+ orr sp, sp, #0xFF0
+ orr sp, sp, #0x8 @ 8-byte aligned.
+ movs pc, lr @ Jump to user changing modes.
+load_next_context_svc:
+ ldmib sp, {r0-r15}^ @ Switch to svc context and jump, loading R13 and R14 from stack.
+ @ This is OK since the jump is to current context.
+END_PROC(arch_context_switch)
+
+
+/*
+ * vect_fiq
+ *
+ * Upon Entry:
+ * - R14: Address of next instruction after interrupted instruction.
+ * - PC: 0x00000014
+ * - FIQs are disabled (CPSR[6] = 1)
+ * - IRQs are disabled (CPSR[7] = 1)
+ * - As in IRQ, the irq controller would provide registers that indicate
+ * what kind of interrupt has occured.
+ *
+ * PURPOSE:
+ * Handling of high-priority interrupts. FIQs have highest priority after
+ * reset and data abort exceptions. They're mainly used for achieving
+ * low-latency interrupts, e.g. for DMA.
+ */
+BEGIN_PROC(arm_fiq_exception)
+END_PROC(arm_fiq_exception)
+
+/* * * * * * * * * * * * * * * * * * * * * * * *
+ * External functions with absolute addresses *
+ * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * NOTE: Notes on relative and absolute symbols on this file:
+ *
+ * Note that branches (B and BL) are *RELATIVE* on ARM. So no need to take any
+ * special action to access symbols within this file, even though this page
+ * (in virtual memory) is relocated to another address at run-time (high or low
+ * vectors) - this is an address other than where it is linked at, at
+ * compile-time.
+ *
+ * To access external symbols from this file, (e.g. calling some function in the
+ * kernel) one needs to use the: `LDR, pc, =external_symbol' pseudo-instruction,
+ * (note the "=") and use absolute addressing. This automatically generates an
+ * inline data word within the current module and indirectly loads the value in
+ * that word to resolve the undefined reference. All other methods, (LDR, B
+ * instructions, or ADR pseudoinstruction) generate relative addresses, and they
+ * will complain for external symbols because a relative offset cannot be
+ * calculated for an unknown distance. In conclusion, relative branches are
+ * useful for accessing symbols on this page, but they mean nothing outside this
+ * page, because the page is relocated at run-time. So, wherever you access
+ * *relatively* outside this page, would be *relative* to where this page is at
+ * that moment.
+ */
+
+/* * * * * * * * * * * * * * * * *
+ * Stacks for Exception Vectors *
+ * * * * * * * * * * * * * * * * */
+.global __stacks_end;
+.global __abt_stack_high;
+.global __irq_stack_high;
+.global __fiq_stack_high;
+.global __und_stack_high;
+
+/*
+ * These are also linked at high vectors, just as any other symbol
+ * on this page.
+ */
+.balign 4
+.equ __abt_stack_high, (__abt_stack - __vector_vaddr + 0xFFFF0000);
+.equ __irq_stack_high, (__irq_stack - __vector_vaddr + 0xFFFF0000);
+.equ __fiq_stack_high, (__fiq_stack - __vector_vaddr + 0xFFFF0000);
+.equ __und_stack_high, (__und_stack - __vector_vaddr + 0xFFFF0000);
+
+/*
+ * NOTE: This could be cache line aligned.
+ * (use a macro, e.g. ____arm_asm_cache_aligned)
+ */
+.balign 4
+
+/* 16 bytes each per-cpu, up to 8 cpus */
+__stacks_end: .space 128
+__abt_stack: .space 128
+__irq_stack: .space 128
+__fiq_stack: .space 128
+__und_stack: .space 128
+
+
src/arch/or1k/vectors.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: src/arch/or1k/memcpy.S.ARM
===================================================================
--- src/arch/or1k/memcpy.S.ARM (nonexistent)
+++ src/arch/or1k/memcpy.S.ARM (revision 7)
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2010 B Labs.Ltd.
+ *
+ * Author: Prem Mallappa
+ *
+ * Description: Optimized memcpy for ARM
+ *
+ */
+
+#include INC_ARCH(asm.h)
+/*
+void*
+_memcpy(void *dst, const void *src, register uint len)
+*/
+BEGIN_PROC(_memcpy)
+ push {r0, r4-r11, lr}
+ loop32:
+ cmp r2, #32
+ blt loop16
+ ldmia r1!, {r4 - r11}
+ stmia r0!, {r4 - r11}
+ sub r2, r2, #32
+ b loop32
+
+ loop16:
+ cmp r2, #16
+ blt loop8
+ ldmia r1!, {r4 - r7}
+ stmia r0!, {r4 - r7}
+ sub r2, r2, #16
+ b loop16
+
+ loop8:
+ cmp r2, #8
+ blt loop4
+ ldmia r1!, {r4, r5}
+ stmia r0!, {r4, r5}
+ sub r2, r2, #8
+ b loop8
+
+ loop4:
+ cmp r2, #4
+ blt end
+ ldmia r1!, {r4}
+ stmia r0!, {r4}
+ sub r2, r2, #4
+ b loop4
+ end:
+ last:
+ teq r2, #0
+ ldrgtb r4, [r1]
+ strneb r4, [r0] // V7 supports strneb
src/arch/or1k/memcpy.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: src/arch/or1k/memset.S.ARM
===================================================================
--- src/arch/or1k/memset.S.ARM (nonexistent)
+++ src/arch/or1k/memset.S.ARM (revision 7)
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2010 (C) B Labs.
+ * Author: Prem Mallappa
+ * Description: Optimized memset for ARM
+ */
+
+#include INC_ARCH(asm.h)
+
+/*
+void *
+memset(void *dst, int c, int len)
+*/
+BEGIN_PROC(_memset)
+ stmfd sp!, {r4 - r11, lr}
+
+ and r1, r1, #255 /* c &= 0xff */
+ orr r1, r1, lsl #8 /* c |= c<<8 */
+ orr r1, r1, lsl #16 /* c |= c<<16 */
+ mov r4, r1
+ cmp r2, #8
+ blt 4f
+ movge r5, r4
+ cmpge r2, #16
+ blt 8f
+ movge r6, r4
+ movge r7, r4
+ cmpge r2, #32
+ blt 16f
+ movge r8, r4
+ movge r9, r4
+ movge r10, r4
+ movge r11, r4
+ 32:
+ cmp r2, #32
+ blt 16f
+ stmia r0!, {r4 - r11}
+ sub r2, r2, #32
+ b 32b
+
+ 16:
+ cmp r2, #16
+ blt 8f
+ stmia r0!, {r4 - r7}
+ sub r2, r2, #16
+ b 16b
+
+ 8:
+ cmp r2, #8
+ blt 4f
+ stmia r0!, {r4, r5}
+ sub r2, r2, #8
+ b 8b
+
+ 4:
+ cmp r2, #4
+ blt end
+ stmia r0!, {r4}
+ sub r2, r2, #4
+ b 4b
+ end:
+ teq r2, #0
+ strneb r4, [r0, #0]
+ subne r2, r2, #1
+ addne r0, r0, #1
+ bne end
+
+ ldmfd sp!, {r4 - r11, pc}
+END_PROC(_memset)
src/arch/or1k/memset.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: src/arch/or1k/syscall.S.ARM
===================================================================
--- src/arch/or1k/syscall.S.ARM (nonexistent)
+++ src/arch/or1k/syscall.S.ARM (revision 7)
@@ -0,0 +1,36 @@
+/*
+ * The syscall page.
+ *
+ * Exported to userspace, used merely for entering the kernel.
+ * Actual handling happens elsewhere.
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+
+#include INC_ARCH(asm.h)
+
+.balign 4096
+.section .data.syscalls
+
+.global __syscall_page_start;
+__syscall_page_start:
+
+/* LR_USR is inspected to find out which system call. */
+BEGIN_PROC(arm_system_calls)
+ swi 0x14 @ ipc /* 0x0 */
+ swi 0x14 @ thread_switch /* 0x4 */
+ swi 0x14 @ thread_control /* 0x8 */
+ swi 0x14 @ exchange_registers /* 0xc */
+ swi 0x14 @ schedule /* 0x10 */
+ swi 0x14 @ unmap /* 0x14 */
+ swi 0x14 @ space_control /* 0x18 */
+ swi 0x14 @ processor_control /* 0x1c */
+ swi 0x14 @ memory_control /* 0x20 */
+ swi 0x14 @ getid /* 0x24 */
+ swi 0x14 @ kread /* 0x28 */
+ swi 0x14 @ kmem_control /* 0x2C */
+ swi 0x14 @ time /* 0x30 */
+ swi 0x14 @ mutex_control /* 0x34 */
+ swi 0x14 @ cache_control /* 0x38 */
+END_PROC(arm_system_calls)
+
src/arch/or1k/syscall.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: src/arch/or1k/v5/atomic.S.ARM
===================================================================
--- src/arch/or1k/v5/atomic.S.ARM (nonexistent)
+++ src/arch/or1k/v5/atomic.S.ARM (revision 7)
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010 B Labs
+ *
+ * Author: Bahadir Balban
+ */
+
+#include INC_ARCH(asm.h)
+
+/*
+ * Atomically and destructively reads a byte. E.g.
+ * byte is read and zero is written back. This is
+ * useful on reading irq counts
+ *
+ * @r0 = byte address
+ */
+BEGIN_PROC(l4_atomic_dest_readb)
+ mov r1, #0
+ swpb r2, r1, [r0]
+ mov r0, r2
+ mov pc, lr
+END_PROC(l4_atomic_dest_readb)
+
+
+
+
src/arch/or1k/v5/atomic.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: src/arch/or1k/v5/mmu_ops.S.ARM
===================================================================
--- src/arch/or1k/v5/mmu_ops.S.ARM (nonexistent)
+++ src/arch/or1k/v5/mmu_ops.S.ARM (revision 7)
@@ -0,0 +1,155 @@
+/*
+ * low-level mmu operations
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+
+#include INC_ARCH(asm.h)
+
+#define C15_id c0
+#define C15_control c1
+#define C15_ttb c2
+#define C15_dom c3
+#define C15_fsr c5
+#define C15_far c6
+#define C15_tlb c8
+
+#define C15_C0_M 0x0001 /* MMU */
+#define C15_C0_A 0x0002 /* Alignment */
+#define C15_C0_C 0x0004 /* (D) Cache */
+#define C15_C0_W 0x0008 /* Write buffer */
+#define C15_C0_B 0x0080 /* Endianness */
+#define C15_C0_S 0x0100 /* System */
+#define C15_C0_R 0x0200 /* ROM */
+#define C15_C0_Z 0x0800 /* Branch Prediction */
+#define C15_C0_I 0x1000 /* I cache */
+#define C15_C0_V 0x2000 /* High vectors */
+
+/* FIXME: Make sure the ops that need r0 dont trash r0, or if they do,
+ * save it on stack before these operations.
+ */
+
+/*
+ * In ARM terminology, flushing the cache means invalidating its contents.
+ * Cleaning the cache means, writing the contents of the cache back to
+ * main memory. In write-back caches the cache must be cleaned before
+ * flushing otherwise in-cache data is lost.
+ */
+
+BEGIN_PROC(arm_set_ttb)
+ mcr p15, 0, r0, C15_ttb, c0, 0
+ mov pc, lr
+END_PROC(arm_set_ttb)
+
+BEGIN_PROC(arm_get_domain)
+ mrc p15, 0, r0, C15_dom, c0, 0
+ mov pc, lr
+END_PROC(arm_get_domain)
+
+BEGIN_PROC(arm_set_domain)
+ mcr p15, 0, r0, C15_dom, c0, 0
+ mov pc, lr
+END_PROC(arm_set_domain)
+
+BEGIN_PROC(arm_enable_mmu)
+ mrc p15, 0, r0, C15_control, c0, 0
+ orr r0, r0, #C15_C0_M
+ mcr p15, 0, r0, C15_control, c0, 0
+ mov pc, lr
+END_PROC(arm_enable_mmu)
+
+BEGIN_PROC(arm_enable_icache)
+ mrc p15, 0, r0, C15_control, c0, 0
+ orr r0, r0, #C15_C0_I
+ mcr p15, 0, r0, C15_control, c0, 0
+ mov pc, lr
+END_PROC(arm_enable_icache)
+
+BEGIN_PROC(arm_enable_dcache)
+ mrc p15, 0, r0, C15_control, c0, 0
+ orr r0, r0, #C15_C0_C
+ mcr p15, 0, r0, C15_control, c0, 0
+ mov pc, lr
+END_PROC(arm_enable_dcache)
+
+BEGIN_PROC(arm_enable_wbuffer)
+ mrc p15, 0, r0, C15_control, c0, 0
+ orr r0, r0, #C15_C0_W
+ mcr p15, 0, r0, C15_control, c0, 0
+ mov pc, lr
+END_PROC(arm_enable_wbuffer)
+
+BEGIN_PROC(arm_enable_high_vectors)
+ mrc p15, 0, r0, C15_control, c0, 0
+ orr r0, r0, #C15_C0_V
+ mcr p15, 0, r0, C15_control, c0, 0
+ mov pc, lr
+END_PROC(arm_enable_high_vectors)
+
+BEGIN_PROC(arm_invalidate_cache)
+ mov r0, #0 @ FIX THIS
+ mcr p15, 0, r0, c7, c7, 0 @ Flush I cache and D cache
+ mov pc, lr
+END_PROC(arm_invalidate_cache)
+
+BEGIN_PROC(arm_invalidate_icache)
+ mov r0, #0 @ FIX THIS
+ mcr p15, 0, r0, c7, c5, 0 @ Flush I cache
+ mov pc, lr
+END_PROC(arm_invalidate_icache)
+
+BEGIN_PROC(arm_invalidate_dcache)
+ mov r0, #0 @ FIX THIS
+ mcr p15, 0, r0, c7, c6, 0 @ Flush D cache
+ mov pc, lr
+END_PROC(arm_invalidate_dcache)
+
+BEGIN_PROC(arm_clean_dcache)
+ mrc p15, 0 , r15, c7, c10, 3 @ Test/clean dcache line
+ bne arm_clean_dcache
+ mcr p15, 0, ip, c7, c10, 4 @ Drain WB
+ mov pc, lr
+END_PROC(arm_clean_dcache)
+
+BEGIN_PROC(arm_clean_invalidate_dcache)
+1:
+ mrc p15, 0, r15, c7, c14, 3 @ Test/clean/flush dcache line
+ @ COMMENT: Why use PC?
+ bne 1b
+ mcr p15, 0, ip, c7, c10, 4 @ Drain WB
+ mov pc, lr
+END_PROC(arm_clean_invalidate_dcache)
+
+BEGIN_PROC(arm_clean_invalidate_cache)
+1:
+ mrc p15, 0, r15, c7, c14, 3 @ Test/clean/flush dcache line
+ @ COMMENT: Why use PC?
+ bne 1b
+ mcr p15, 0, ip, c7, c5, 0 @ Flush icache
+ mcr p15, 0, ip, c7, c10, 4 @ Drain WB
+ mov pc, lr
+END_PROC(arm_clean_invalidate_cache)
+
+BEGIN_PROC(arm_drain_writebuffer)
+ mov r0, #0 @ FIX THIS
+ mcr p15, 0, r0, c7, c10, 4
+ mov pc, lr
+END_PROC(arm_drain_writebuffer)
+
+BEGIN_PROC(arm_invalidate_tlb)
+ mcr p15, 0, ip, c8, c7
+ mov pc, lr
+END_PROC(arm_invalidate_tlb)
+
+BEGIN_PROC(arm_invalidate_itlb)
+ mov r0, #0 @ FIX THIS
+ mcr p15, 0, r0, c8, c5, 0
+ mov pc, lr
+END_PROC(arm_invalidate_itlb)
+
+BEGIN_PROC(arm_invalidate_dtlb)
+ mov r0, #0 @ FIX THIS
+ mcr p15, 0, r0, c8, c6, 0
+ mov pc, lr
+END_PROC(arm_invalidate_dtlb)
+
src/arch/or1k/v5/mmu_ops.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: src/arch/or1k/v5/exception.c
===================================================================
--- src/arch/or1k/v5/exception.c (nonexistent)
+++ src/arch/or1k/v5/exception.c (revision 7)
@@ -0,0 +1,246 @@
+/*
+ * Memory exception handling in process context.
+ *
+ * Copyright (C) 2007, 2008 Bahadir Balban
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include INC_ARCH(exception.h)
+#include INC_GLUE(memlayout.h)
+#include INC_GLUE(memory.h)
+#include INC_GLUE(mapping.h)
+#include INC_GLUE(message.h)
+#include INC_GLUE(ipc.h)
+#include INC_SUBARCH(mm.h)
+
+int check_abort_type(u32 faulted_pc, u32 fsr, u32 far, u32 spsr)
+{
+ int ret = 0;
+
+ /*
+ * On ARMv5, prefetch aborts dont have different
+ * status values. We validate them here and return.
+ */
+ if (is_prefetch_abort(fsr)) {
+ dbg_abort("Prefetch abort: 0x%x\n", faulted_pc);
+
+ /* Happened in any mode other than user */
+ if (!is_user_mode(spsr)) {
+ dprintk("Unhandled kernel prefetch "
+ "abort at address ", far);
+ return -EABORT;
+ }
+ return 0;
+ }
+
+ switch (fsr & FSR_FS_MASK) {
+ /* Aborts that are expected on page faults: */
+ case DABT_PERM_PAGE:
+ dbg_abort("Page permission fault 0x%x\n", far);
+ ret = 0;
+ break;
+ case DABT_XLATE_PAGE:
+ dbg_abort("Page translation fault 0x%x\n", far);
+ ret = 0;
+ break;
+ case DABT_XLATE_SECT:
+ dbg_abort("Section translation fault 0x%x\n", far);
+ ret = 0;
+ break;
+
+ /* Aborts that can't be handled by a pager yet: */
+ case DABT_TERMINAL:
+ dprintk("Terminal fault dabt ", far);
+ ret = -EABORT;
+ break;
+ case DABT_VECTOR:
+ dprintk("Vector abort (obsolete!) ", far);
+ ret = -EABORT;
+ break;
+ case DABT_ALIGN:
+ dprintk("Alignment fault dabt ", far);
+ ret = -EABORT;
+ break;
+ case DABT_EXT_XLATE_LEVEL1:
+ dprintk("External LVL1 translation fault ", far);
+ ret = -EABORT;
+ break;
+ case DABT_EXT_XLATE_LEVEL2:
+ dprintk("External LVL2 translation fault ", far);
+ ret = -EABORT;
+ break;
+ case DABT_DOMAIN_SECT:
+ dprintk("Section domain fault dabt ", far);
+ ret = -EABORT;
+ break;
+ case DABT_DOMAIN_PAGE:
+ dprintk("Page domain fault dabt ", far);
+ ret = -EABORT;
+ break;
+ case DABT_PERM_SECT:
+ dprintk("Section permission fault dabt ", far);
+ ret = -EABORT;
+ break;
+ case DABT_EXT_LFETCH_SECT:
+ dprintk("External section linefetch "
+ "fault dabt ", far);
+ ret = -EABORT;
+ break;
+ case DABT_EXT_LFETCH_PAGE:
+ dprintk("Page perm fault dabt ", far);
+ ret = -EABORT;
+ break;
+ case DABT_EXT_NON_LFETCH_SECT:
+ dprintk("External section non-linefetch "
+ "fault dabt ", far);
+ ret = -EABORT;
+ break;
+ case DABT_EXT_NON_LFETCH_PAGE:
+ dprintk("External page non-linefetch "
+ "fault dabt ", far);
+ ret = -EABORT;
+ break;
+ default:
+ dprintk("FATAL: Unrecognised/Unknown "
+ "data abort ", far);
+ dprintk("FATAL: FSR code: ", fsr);
+ ret = -EABORT;
+ }
+
+ /*
+ * Check validity of data abort's source.
+ *
+ * FIXME: Why not use spsr to do this?
+ */
+ if (is_kernel_address(faulted_pc)) {
+ dprintk("Unhandled kernel data "
+ "abort at address ",
+ faulted_pc);
+ ret = -EABORT;
+ }
+
+ return ret;
+}
+
+#if 0
+void data_abort_handler(u32 faulted_pc, u32 fsr, u32 far)
+{
+ set_abort_type(fsr, ARM_DABT);
+
+ dbg_abort("Data abort @ PC: ", faulted_pc);
+
+ //printk("Data abort: %d, PC: 0x%x\n",
+ //current->tid, faulted_pc);
+
+ /* Check for more details */
+ if (check_aborts(faulted_pc, fsr, far) < 0) {
+ printascii("This abort can't be handled by "
+ "any pager.\n");
+ goto error;
+ }
+
+ /* This notifies the pager */
+ fault_ipc_to_pager(faulted_pc, fsr, far, L4_IPC_TAG_PFAULT);
+
+ if (current->flags & TASK_SUSPENDING) {
+ BUG_ON(current->nlocks);
+ sched_suspend_sync();
+ } else if (current->flags & TASK_EXITING) {
+ BUG_ON(current->nlocks);
+ sched_exit_sync();
+ }
+
+ return;
+
+error:
+ disable_irqs();
+ dprintk("Unhandled data abort @ PC address: ", faulted_pc);
+ dprintk("FAR:", far);
+ dprintk("FSR:", fsr);
+ printascii("Kernel panic.\n");
+ printascii("Halting system...\n");
+ while (1)
+ ;
+}
+
+void prefetch_abort_handler(u32 faulted_pc, u32 fsr, u32 far, u32 spsr)
+{
+ set_abort_type(fsr, ARM_PABT);
+
+ if (check_aborts(faulted_pc, fsr, far) < 0) {
+ printascii("This abort can't be handled by any pager.\n");
+ goto error;
+ }
+
+ /* Did the abort occur in kernel mode? */
+ if ((spsr & ARM_MODE_MASK) == ARM_MODE_SVC)
+ goto error;
+
+ fault_ipc_to_pager(faulted_pc, fsr, far, L4_IPC_TAG_PFAULT);
+
+ if (current->flags & TASK_SUSPENDING) {
+ BUG_ON(current->nlocks);
+ sched_suspend_sync();
+ } else if (current->flags & TASK_EXITING) {
+ BUG_ON(current->nlocks);
+ sched_exit_sync();
+ }
+
+ return;
+
+error:
+ disable_irqs();
+ dprintk("Unhandled prefetch abort @ address: ", faulted_pc);
+ dprintk("FAR:", far);
+ dprintk("FSR:", fsr);
+ dprintk("Aborted PSR:", spsr);
+ printascii("Kernel panic.\n");
+ printascii("Halting system...\n");
+ while (1)
+ ;
+}
+
+void undef_handler(u32 undef_addr, u32 spsr, u32 lr)
+{
+ dbg_abort("Undefined instruction @ PC: ", undef_addr);
+
+ //printk("Undefined instruction: tid: %d, PC: 0x%x, Mode: %s\n",
+ // current->tid, undef_addr,
+ // (spsr & ARM_MODE_MASK) == ARM_MODE_SVC ? "SVC" : "User");
+
+ if ((spsr & ARM_MODE_MASK) == ARM_MODE_SVC) {
+ printk("Panic: Undef in Kernel\n");
+ goto error;
+ }
+
+ fault_ipc_to_pager(undef_addr, 0, undef_addr, L4_IPC_TAG_UNDEF_FAULT);
+
+ if (current->flags & TASK_SUSPENDING) {
+ BUG_ON(current->nlocks);
+ sched_suspend_sync();
+ } else if (current->flags & TASK_EXITING) {
+ BUG_ON(current->nlocks);
+ sched_exit_sync();
+ }
+
+ return;
+
+error:
+ disable_irqs();
+ dprintk("SPSR:", spsr);
+ dprintk("LR:", lr);
+ printascii("Kernel panic.\n");
+ printascii("Halting system...\n");
+ while(1)
+ ;
+}
+#endif
+
Index: src/arch/or1k/v5/mutex.c
===================================================================
--- src/arch/or1k/v5/mutex.c (nonexistent)
+++ src/arch/or1k/v5/mutex.c (revision 7)
@@ -0,0 +1,75 @@
+/*
+ * ARM v5 Binary semaphore (mutex) implementation.
+ *
+ * Copyright (C) 2007-2010 B Labs Ltd.
+ * Author: Prem Mallappa
+ */
+
+#include
+
+/* Recap on swp:
+ * swp rx, ry, [rz]
+ * In one instruction:
+ * 1) Stores the value in ry into location pointed by rz.
+ * 2) Loads the value in the location of rz into rx.
+ * By doing so, in one instruction one can attempt to lock
+ * a word, and discover whether it was already locked.
+ */
+
+#define MUTEX_UNLOCKED 0
+#define MUTEX_LOCKED 1
+
+void __spin_lock(unsigned long *s)
+{
+ int tmp = 0, tmp2;
+ __asm__ __volatile__(
+ "1: \n"
+ "swp %0, %1, [%2] \n"
+ "teq %0, %3 \n"
+ "bne 1b \n"
+ : "=&r" (tmp2)
+ : "r" (tmp), "r"(s), "r"(0)
+ : "cc", "memory"
+ );
+}
+
+void __spin_unlock(unsigned long *s)
+{
+ int tmp = 1, tmp2;
+ __asm__ __volatile__(
+ "1: \n"
+ "swp %0, %1, [%2] \n"
+ "teq %0, %3 \n"
+ "bne 1b \n"
+ : "=&r" (tmp2)
+ : "r" (tmp), "r"(s), "r"(0)
+ : "cc", "memory"
+ );
+}
+
+int __mutex_lock(unsigned long *s)
+{
+ int tmp = MUTEX_LOCKED, tmp2;
+ __asm__ __volatile__(
+ "swp %0, %1, [%2] \n"
+ : "=&r"(tmp2)
+ : "r"(tmp), "r"(s)
+ : "cc", "memory"
+ );
+ if (tmp2 == MUTEX_UNLOCKED)
+ return 1;
+
+ return 0;
+}
+
+void __mutex_unlock(unsigned long *s)
+{
+ int tmp, tmp2=MUTEX_UNLOCKED;
+ __asm__ __volatile__(
+ "swp %0, %1, [%2] \n"
+ : "=&r"(tmp)
+ : "r"(tmp2), "r"(s)
+ : "cc", "memory"
+ );
+ BUG_ON(tmp != MUTEX_LOCKED);
+}
Index: src/arch/or1k/v5/cache.c
===================================================================
--- src/arch/or1k/v5/cache.c (nonexistent)
+++ src/arch/or1k/v5/cache.c (revision 7)
@@ -0,0 +1,32 @@
+/*
+ * Generic layer over ARMv5 soecific cache calls
+ *
+ * Copyright B-Labs Ltd 2010.
+ */
+
+#include INC_SUBARCH(mmu_ops.h)
+
+void arch_invalidate_dcache(unsigned long start, unsigned long end)
+{
+ arm_invalidate_dcache();
+}
+
+void arch_clean_invalidate_dcache(unsigned long start, unsigned long end)
+{
+ arm_clean_invalidate_dcache();
+}
+
+void arch_invalidate_icache(unsigned long start, unsigned long end)
+{
+ arm_invalidate_icache();
+}
+
+void arch_clean_dcache(unsigned long start, unsigned long end)
+{
+ arm_clean_dcache();
+}
+
+void arch_invalidate_tlb(unsigned long start, unsigned long end)
+{
+ arm_invalidate_tlb();
+}
Index: src/arch/or1k/v5/init.c
===================================================================
--- src/arch/or1k/v5/init.c (nonexistent)
+++ src/arch/or1k/v5/init.c (revision 7)
@@ -0,0 +1,146 @@
+/*
+ * ARM v5 specific init routines
+ *
+ * Copyright (C) 2007 - 2010 B Labs Ltd.
+ */
+
+#include
+#include
+#include
+#include INC_SUBARCH(mm.h)
+#include INC_SUBARCH(mmu_ops.h)
+#include INC_GLUE(memory.h)
+#include INC_GLUE(mapping.h)
+#include INC_ARCH(linker.h)
+
+SECTION(".data.pgd") ALIGN(PGD_SIZE) pgd_table_t init_pgd;
+struct address_space init_space;
+
+void system_identify(void)
+{
+
+}
+
+
+void jump(struct ktcb *task)
+{
+ __asm__ __volatile__ (
+ "mov lr, %0\n" /* Load pointer to context area */
+ "ldr r0, [lr]\n" /* Load spsr value to r0 */
+ "msr spsr, r0\n" /* Set SPSR as ARM_MODE_USR */
+ "add sp, lr, %1\n" /* Reset SVC stack */
+ "sub sp, sp, %2\n" /* Align to stack alignment */
+ "ldmib lr, {r0-r14}^\n" /* Load all USR registers */
+
+ "nop \n" /* Spec says dont touch banked registers
+ * right after LDM {no-pc}^ for one instruction */
+ "add lr, lr, #64\n" /* Manually move to PC location. */
+ "ldr lr, [lr]\n" /* Load the PC_USR to LR */
+ "movs pc, lr\n" /* Jump to userspace, also switching SPSR/CPSR */
+ :
+ : "r" (task), "r" (PAGE_SIZE), "r" (STACK_ALIGNMENT)
+ );
+}
+
+void switch_to_user(struct ktcb *task)
+{
+ arm_clean_invalidate_cache();
+ arm_invalidate_tlb();
+ arm_set_ttb(virt_to_phys(TASK_PGD(task)));
+ arm_invalidate_tlb();
+ jump(task);
+}
+
+/* Maps the early memory regions needed to bootstrap the system */
+void init_kernel_mappings(void)
+{
+ //memset((void *)virt_to_phys(&init_pgd), 0, sizeof(pgd_table_t));
+
+ /* Map kernel area to its virtual region */
+ add_section_mapping_init(align(virt_to_phys(_start_text), SZ_1MB),
+ align((unsigned int)_start_text, SZ_1MB), 1,
+ cacheable | bufferable);
+
+ /* Map kernel one-to-one to its physical region */
+ add_section_mapping_init(align(virt_to_phys(_start_text), SZ_1MB),
+ align(virt_to_phys(_start_text), SZ_1MB),
+ 1, 0);
+}
+
+/*
+ * Enable virtual memory using kernel's pgd
+ * and continue execution on virtual addresses.
+ */
+void start_virtual_memory()
+{
+ /*
+ * TTB must be 16K aligned. This is because first level tables are
+ * sized 16K.
+ */
+ if ((unsigned int)&init_pgd & 0x3FFF)
+ dprintk("kspace not properly aligned for ttb:",
+ (u32)&init_pgd);
+ // memset((void *)&kspace, 0, sizeof(pgd_table_t));
+ arm_set_ttb(virt_to_phys(&init_pgd));
+
+ /*
+ * This sets all 16 domains to zero and domain 0 to 1. The outcome
+ * is that page table access permissions are in effect for domain 0.
+ * All other domains have no access whatsoever.
+ */
+ arm_set_domain(1);
+
+ /* Enable everything before mmu permissions are in place */
+ arm_enable_caches();
+ arm_enable_wbuffer();
+
+ arm_enable_high_vectors();
+
+ /*
+ * Leave the past behind. Tlbs are invalidated, write buffer is drained.
+ * The whole of I + D caches are invalidated unconditionally. This is
+ * important to ensure that the cache is free of previously loaded
+ * values. Otherwise unpredictable data aborts may occur at arbitrary
+ * times, each time a load/store operation hits one of the invalid
+ * entries and those entries are cleaned to main memory.
+ */
+ arm_invalidate_cache();
+ arm_drain_writebuffer();
+ arm_invalidate_tlb();
+ arm_enable_mmu();
+
+ /* Jump to virtual memory addresses */
+ __asm__ __volatile__ (
+ "add sp, sp, %0 \n" /* Update stack pointer */
+ "add fp, fp, %0 \n" /* Update frame pointer */
+ /* On the next instruction below, r0 gets
+ * current PC + KOFFSET + 2 instructions after itself. */
+ "add r0, pc, %0 \n"
+ /* Special symbol that is extracted and included in the loader.
+ * Debuggers can break on it to load the virtual symbol table */
+ ".global break_virtual;\n"
+ "break_virtual:\n"
+ "mov pc, r0 \n" /* (r0 has next instruction) */
+ :
+ : "r" (KERNEL_OFFSET)
+ : "r0"
+ );
+
+ /*
+ * Restore link register (LR) for this function.
+ *
+ * NOTE: LR values are pushed onto the stack at each function call,
+ * which means the restored return values will be physical for all
+ * functions in the call stack except this function. So the caller
+ * of this function must never return but initiate scheduling etc.
+ */
+ __asm__ __volatile__ (
+ "add %0, %0, %1 \n"
+ "mov pc, %0 \n"
+ :: "r" (__builtin_return_address(0)), "r" (KERNEL_OFFSET)
+ );
+
+ /* should never come here */
+ while(1);
+}
+
Index: src/arch/or1k/v5/SConscript
===================================================================
--- src/arch/or1k/v5/SConscript (nonexistent)
+++ src/arch/or1k/v5/SConscript (revision 7)
@@ -0,0 +1,10 @@
+
+
+# Inherit global environment
+Import('env')
+
+# The set of source files associated with this SConscript file.
+src_local = ['mapping.c', 'exception.c', 'mmu_ops.S', 'cache.c', 'mutex.c', 'irq.c', 'init.c', 'atomic.S']
+
+obj = env.Object(src_local)
+Return('obj')
Index: src/arch/or1k/v5/irq.c
===================================================================
--- src/arch/or1k/v5/irq.c (nonexistent)
+++ src/arch/or1k/v5/irq.c (revision 7)
@@ -0,0 +1,45 @@
+/*
+ * Low-level irq routines.
+ *
+ * Copyright (C) 2010 B Labs Ltd.
+ * Written by Bahadir Balban
+ * Prem Mallappa
+ */
+
+void irq_local_disable_save(unsigned long *state)
+{
+ unsigned int tmp, tmp2;
+ __asm__ __volatile__ (
+ "mrs %0, cpsr_fc \n"
+ "orr %1, %0, #0x80 \n"
+ "msr cpsr_fc, %1 \n"
+ : "=&r"(tmp), "=r"(tmp2)
+ :
+ : "cc"
+ );
+ *state = tmp;
+}
+
+void irq_local_restore(unsigned long state)
+{
+ __asm__ __volatile__ (
+ "msr cpsr_fc, %0\n"
+ :
+ : "r"(state)
+ : "cc"
+ );
+}
+
+int irqs_enabled(void)
+{
+ int tmp;
+ __asm__ __volatile__ (
+ "mrs %0, cpsr_fc\n"
+ : "=r"(tmp)
+ );
+
+ if (tmp & 0x80)
+ return 0;
+
+ return 1;
+}
Index: src/arch/or1k/v5/mapping.c
===================================================================
--- src/arch/or1k/v5/mapping.c (nonexistent)
+++ src/arch/or1k/v5/mapping.c (revision 7)
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) 2007 Bahadir Balban
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include INC_SUBARCH(mm.h)
+#include INC_SUBARCH(mmu_ops.h)
+#include INC_GLUE(memory.h)
+#include INC_GLUE(mapping.h)
+#include INC_GLUE(memlayout.h)
+#include INC_ARCH(linker.h)
+#include INC_ARCH(asm.h)
+#include INC_API(kip.h)
+#include INC_ARCH(io.h)
+
+/*
+ * Removes initial mappings needed for transition to virtual memory.
+ * Used one-time only.
+ */
+void remove_section_mapping(unsigned long vaddr)
+{
+ pgd_table_t *pgd = &init_pgd;
+ pmd_t pgd_i = PGD_INDEX(vaddr);
+ if (!((pgd->entry[pgd_i] & PMD_TYPE_MASK)
+ & PMD_TYPE_SECTION))
+ while(1);
+ pgd->entry[pgd_i] = 0;
+ pgd->entry[pgd_i] |= PMD_TYPE_FAULT;
+ arm_invalidate_tlb();
+}
+
+/*
+ * Maps given section-aligned @paddr to @vaddr using enough number
+ * of section-units to fulfill @size in sections. Note this overwrites
+ * a mapping if same virtual address was already mapped.
+ */
+void __add_section_mapping_init(unsigned int paddr,
+ unsigned int vaddr,
+ unsigned int size,
+ unsigned int flags)
+{
+ pte_t *ppte;
+ unsigned int l1_ptab;
+ unsigned int l1_offset;
+
+ /* 1st level page table address */
+ l1_ptab = virt_to_phys(&init_pgd);
+
+ /* Get the section offset for this vaddr */
+ l1_offset = (vaddr >> 18) & 0x3FFC;
+
+ /* The beginning entry for mapping */
+ ppte = (unsigned int *)(l1_ptab + l1_offset);
+ for(int i = 0; i < size; i++) {
+ *ppte = 0; /* Clear out old value */
+ *ppte |= paddr; /* Assign physical address */
+ *ppte |= PMD_TYPE_SECTION; /* Assign translation type */
+ /* Domain is 0, therefore no writes. */
+ /* Only kernel access allowed */
+ *ppte |= (SVC_RW_USR_NONE << SECTION_AP0);
+ /* Cacheability/Bufferability flags */
+ *ppte |= flags;
+ ppte++; /* Next section entry */
+ paddr += SECTION_SIZE; /* Next physical section */
+ }
+ return;
+}
+
+void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
+ unsigned int size, unsigned int flags)
+{
+ unsigned int psection;
+ unsigned int vsection;
+
+ /* Align each address to the pages they reside in */
+ psection = paddr & ~SECTION_MASK;
+ vsection = vaddr & ~SECTION_MASK;
+
+ if (size == 0)
+ return;
+
+ __add_section_mapping_init(psection, vsection, size, flags);
+
+ return;
+}
+
+void arch_prepare_pte(u32 paddr, u32 vaddr, unsigned int flags,
+ pte_t *ptep)
+{
+ /* They must be aligned at this stage */
+ if (!is_page_aligned(paddr) || !is_page_aligned(vaddr)) {
+ printk("address not aligned, phys address %x"
+ " virtual address %x\n", paddr, vaddr);
+ BUG();
+ }
+
+ /*
+ * NOTE: In v5, the flags converted from generic
+ * by space_flags_to_ptflags() can be directly
+ * written to the pte. No further conversion is needed.
+ * Therefore this function doesn't do much on flags. In
+ * contrast in ARMv7 the flags need an extra level of
+ * processing.
+ */
+ if (flags == __MAP_FAULT)
+ *ptep = paddr | flags | PTE_TYPE_FAULT;
+ else
+ *ptep = paddr | flags | PTE_TYPE_SMALL;
+}
+
+void arch_write_pte(pte_t *ptep, pte_t pte, u32 vaddr)
+{
+ /* FIXME:
+ * Clean the dcache and invalidate the icache
+ * for the old translation first?
+ *
+ * The dcache is virtual, therefore the data
+ * in those entries should be cleaned first,
+ * before the translation of that virtual
+ * address is changed to a new physical address.
+ *
+ * Check that the entry was not faulty first.
+ */
+ arm_clean_invalidate_cache();
+
+ *ptep = pte;
+
+ /* FIXME: Fix this!
+ * - Use vaddr to clean the dcache pte by MVA.
+ * - Use mapped area to invalidate the icache
+ * - Invalidate the tlb for mapped area
+ */
+ arm_clean_invalidate_cache();
+ arm_invalidate_tlb();
+}
+
+
+void arch_prepare_write_pte(u32 paddr, u32 vaddr,
+ unsigned int flags, pte_t *ptep)
+{
+ pte_t pte = 0;
+
+ /* They must be aligned at this stage */
+ BUG_ON(!is_page_aligned(paddr));
+ BUG_ON(!is_page_aligned(vaddr));
+
+ arch_prepare_pte(paddr, vaddr, flags, &pte);
+
+ arch_write_pte(ptep, pte, vaddr);
+}
+
+pmd_t *
+arch_pick_pmd(pgd_table_t *pgd, unsigned long vaddr)
+{
+ return &pgd->entry[PGD_INDEX(vaddr)];
+}
+
+/*
+ * v5 pmd writes
+ */
+void arch_write_pmd(pmd_t *pmd_entry, u32 pmd_phys, u32 vaddr)
+{
+ /* FIXME: Clean the dcache if there was a valid entry */
+ *pmd_entry = (pmd_t)(pmd_phys | PMD_TYPE_PMD);
+ arm_clean_invalidate_cache(); /*FIXME: Write these properly! */
+ arm_invalidate_tlb();
+}
+
+
+int arch_check_pte_access_perms(pte_t pte, unsigned int flags)
+{
+ if ((pte & PTE_PROT_MASK) >= (flags & PTE_PROT_MASK))
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * Tell if a pgd index is a common kernel index.
+ * This is used to distinguish common kernel entries
+ * in a pgd, when copying page tables.
+ */
+int is_global_pgdi(int i)
+{
+ if ((i >= PGD_INDEX(KERNEL_AREA_START) &&
+ i < PGD_INDEX(KERNEL_AREA_END)) ||
+ (i >= PGD_INDEX(IO_AREA_START) &&
+ i < PGD_INDEX(IO_AREA_END)) ||
+ (i == PGD_INDEX(USER_KIP_PAGE)) ||
+ (i == PGD_INDEX(ARM_HIGH_VECTOR)) ||
+ (i == PGD_INDEX(ARM_SYSCALL_VECTOR)) ||
+ (i == PGD_INDEX(USERSPACE_CONSOLE_VBASE)))
+ return 1;
+ else
+ return 0;
+}
+
+extern pmd_table_t *pmd_array;
+
+void remove_mapping_pgd_all_user(pgd_table_t *pgd)
+{
+ pmd_table_t *pmd;
+
+ /* Traverse through all pgd entries. */
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
+ if (!is_global_pgdi(i)) {
+ /* Detect a pmd entry */
+ if (((pgd->entry[i] & PMD_TYPE_MASK)
+ == PMD_TYPE_PMD)) {
+
+ /* Obtain the user pmd handle */
+ pmd = (pmd_table_t *)
+ phys_to_virt((pgd->entry[i] &
+ PMD_ALIGN_MASK));
+ /* Free it */
+ free_pmd(pmd);
+ }
+
+ /* Clear the pgd entry */
+ pgd->entry[i] = PMD_TYPE_FAULT;
+ }
+ }
+}
+
+
+int pgd_count_boot_pmds()
+{
+ int npmd = 0;
+ pgd_table_t *pgd = &init_pgd;
+
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++)
+ if ((pgd->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD)
+ npmd++;
+ return npmd;
+}
+
+
+/*
+ * Jumps from boot pmd/pgd page tables to tables allocated from the cache.
+ */
+pgd_table_t *arch_realloc_page_tables(void)
+{
+ pgd_table_t *pgd_new = alloc_pgd();
+ pgd_table_t *pgd_old = &init_pgd;
+ pmd_table_t *orig, *pmd;
+
+ /* Copy whole pgd entries */
+ memcpy(pgd_new, pgd_old, sizeof(pgd_table_t));
+
+ /* Allocate and copy all pmds */
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
+ /* Detect a pmd entry */
+ if ((pgd_old->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD) {
+ /* Allocate new pmd */
+ if (!(pmd = alloc_pmd())) {
+ printk("FATAL: PMD allocation "
+ "failed during system initialization\n");
+ BUG();
+ }
+
+ /* Find original pmd */
+ orig = (pmd_table_t *)
+ phys_to_virt((pgd_old->entry[i] &
+ PMD_ALIGN_MASK));
+
+ /* Copy original to new */
+ memcpy(pmd, orig, sizeof(pmd_table_t));
+
+ /* Replace original pmd entry in pgd with new */
+ pgd_new->entry[i] = (pmd_t)virt_to_phys(pmd);
+ pgd_new->entry[i] |= PMD_TYPE_PMD;
+ }
+ }
+
+ /* Switch the virtual memory system into new area */
+ arm_clean_invalidate_cache();
+ arm_drain_writebuffer();
+ arm_invalidate_tlb();
+ arm_set_ttb(virt_to_phys(pgd_new));
+ arm_invalidate_tlb();
+
+ printk("%s: Initial page tables moved from 0x%x to 0x%x physical\n",
+ __KERNELNAME__, virt_to_phys(pgd_old),
+ virt_to_phys(pgd_new));
+
+ return pgd_new;
+}
+
+/*
+ * Copies global kernel entries into another pgd. Even for
+ * sub-pmd ranges the associated pmd entries are copied,
+ * assuming any pmds copied are applicable to all tasks in
+ * the system.
+ */
+void copy_pgd_global_by_vrange(pgd_table_t *to, pgd_table_t *from,
+ unsigned long start, unsigned long end)
+{
+ /* Extend sub-pmd ranges to their respective pmd boundaries */
+ start = align(start, PMD_MAP_SIZE);
+
+ if (end < start)
+ end = 0;
+
+ /* Aligning would overflow if mapping the last virtual pmd */
+ if (end < align(~0, PMD_MAP_SIZE) ||
+ start > end) /* end may have already overflown as input */
+ end = align_up(end, PMD_MAP_SIZE);
+ else
+ end = 0;
+
+ copy_pgds_by_vrange(to, from, start, end);
+}
+
+void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
+ unsigned long start, unsigned long end)
+{
+ unsigned long start_i = PGD_INDEX(start);
+ unsigned long end_i = PGD_INDEX(end);
+ unsigned long irange = (end_i != 0) ? (end_i - start_i)
+ : (PGD_ENTRY_TOTAL - start_i);
+
+ memcpy(&to->entry[start_i], &from->entry[start_i],
+ irange * sizeof(pmd_t));
+}
+
+void arch_copy_pgd_kernel_entries(pgd_table_t *to)
+{
+ pgd_table_t *from = TASK_PGD(current);
+
+ copy_pgd_global_by_vrange(to, from, KERNEL_AREA_START,
+ KERNEL_AREA_END);
+ copy_pgd_global_by_vrange(to, from, IO_AREA_START, IO_AREA_END);
+ copy_pgd_global_by_vrange(to, from, USER_KIP_PAGE,
+ USER_KIP_PAGE + PAGE_SIZE);
+ copy_pgd_global_by_vrange(to, from, ARM_HIGH_VECTOR,
+ ARM_HIGH_VECTOR + PAGE_SIZE);
+ copy_pgd_global_by_vrange(to, from, ARM_SYSCALL_VECTOR,
+ ARM_SYSCALL_VECTOR + PAGE_SIZE);
+
+ /* We temporarily map uart registers to every process */
+ copy_pgd_global_by_vrange(to, from, USERSPACE_CONSOLE_VBASE,
+ USERSPACE_CONSOLE_VBASE + PAGE_SIZE);
+}
+
+void arch_update_utcb(unsigned long utcb_address)
+{
+ /* Update the KIP pointer */
+ kip.utcb = utcb_address;
+}
+
+/* Scheduler uses this to switch context */
+void arch_space_switch(struct ktcb *to)
+{
+ pgd_table_t *pgd = TASK_PGD(to);
+
+ system_account_space_switch();
+
+ arm_clean_invalidate_cache();
+ arm_invalidate_tlb();
+ arm_set_ttb(virt_to_phys(pgd));
+ arm_invalidate_tlb();
+}
+
+void idle_task(void)
+{
+ while(1) {
+ /* Do maintenance */
+ tcb_delete_zombies();
+
+ schedule();
+ }
+}
+
Index: src/arch/or1k/v6/mutex.S.ARM
===================================================================
--- src/arch/or1k/v6/mutex.S.ARM (nonexistent)
+++ src/arch/or1k/v6/mutex.S.ARM (revision 7)
@@ -0,0 +1,90 @@
+/*
+ * ARM v5 Binary semaphore (mutex) implementation.
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ *
+ */
+
+#include INC_ARCH(asm.h)
+
+/* Recap on swp:
+ * swp rx, ry, [rz]
+ * In one instruction:
+ * 1) Stores the value in ry into location pointed by rz.
+ * 2) Loads the value in the location of rz into rx.
+ * By doing so, in one instruction one can attempt to lock
+ * a word, and discover whether it was already locked.
+ */
+
+#define MUTEX_UNLOCKED 0
+#define MUTEX_LOCKED 1
+
+BEGIN_PROC(__spin_lock)
+ mov r1, #1
+__spin:
+ swp r2, r1, [r0]
+ cmp r2, #0
+ bne __spin
+ mov pc, lr
+END_PROC(__spin_lock)
+
+BEGIN_PROC(__spin_unlock)
+ mov r1, #0
+ swp r2, r1, [r0]
+ cmp r2, #1 @ Debug check.
+1:
+ bne 1b
+ mov pc, lr
+END_PROC(__spin_unlock)
+
+
+/*
+ * @r0: Address of mutex location.
+ */
+BEGIN_PROC(__mutex_lock)
+ mov r1, #1
+ swp r2, r1, [r0]
+ cmp r2, #0
+ movne r0, #0
+ moveq r0, #1
+ mov pc, lr
+END_PROC(__mutex_lock)
+
+/*
+ * @r0: Address of mutex location.
+ */
+BEGIN_PROC(__mutex_unlock)
+ mov r1, #0
+ swp r2, r1, [r0]
+ cmp r2, #1
+1: @ Debug check.
+ bne 1b
+ mov pc, lr
+END_PROC(__mutex_unlock)
+
+/*
+ * @r0: Address of mutex location.
+ */
+BEGIN_PROC(__mutex_inc)
+ swp r2, r1, [r0]
+ mov r1, #1
+ swp r2, r1, [r0]
+ cmp r2, #0
+ movne r0, #0
+ moveq r0, #1
+ mov pc, lr
+END_PROC(__mutex_inc)
+
+/*
+ * @r0: Address of mutex location.
+ */
+BEGIN_PROC(__mutex_dec)
+ mov r1, #0
+ swp r2, r1, [r0]
+ cmp r2, #1
+1: @ Debug check.
+ bne 1b
+ mov pc, lr
+END_PROC(__mutex_dec)
+
+
src/arch/or1k/v6/mutex.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: src/arch/or1k/v6/mmu_ops.S.ARM
===================================================================
--- src/arch/or1k/v6/mmu_ops.S.ARM (nonexistent)
+++ src/arch/or1k/v6/mmu_ops.S.ARM (revision 7)
@@ -0,0 +1,155 @@
+/*
+ * low-level mmu operations
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+
+#include INC_ARCH(asm.h)
+
+#define C15_id c0
+#define C15_control c1
+#define C15_ttb c2
+#define C15_dom c3
+#define C15_fsr c5
+#define C15_far c6
+#define C15_tlb c8
+
+#define C15_C0_M 0x0001 /* MMU */
+#define C15_C0_A 0x0002 /* Alignment */
+#define C15_C0_C 0x0004 /* (D) Cache */
+#define C15_C0_W 0x0008 /* Write buffer */
+#define C15_C0_B 0x0080 /* Endianness */
+#define C15_C0_S 0x0100 /* System */
+#define C15_C0_R 0x0200 /* ROM */
+#define C15_C0_Z 0x0800 /* Branch Prediction */
+#define C15_C0_I 0x1000 /* I cache */
+#define C15_C0_V 0x2000 /* High vectors */
+
+/* FIXME: Make sure the ops that need r0 dont trash r0, or if they do,
+ * save it on stack before these operations.
+ */
+
+/*
+ * In ARM terminology, flushing the cache means invalidating its contents.
+ * Cleaning the cache means, writing the contents of the cache back to
+ * main memory. In write-back caches the cache must be cleaned before
+ * flushing otherwise in-cache data is lost.
+ */
+
+BEGIN_PROC(arm_set_ttb)
+ mcr p15, 0, r0, C15_ttb, c0, 0
+ mov pc, lr
+END_PROC(arm_set_ttb)
+
+BEGIN_PROC(arm_get_domain)
+ mrc p15, 0, r0, C15_dom, c0, 0
+ mov pc, lr
+END_PROC(arm_get_domain)
+
+BEGIN_PROC(arm_set_domain)
+ mcr p15, 0, r0, C15_dom, c0, 0
+ mov pc, lr
+END_PROC(arm_set_domain)
+
+BEGIN_PROC(arm_enable_mmu)
+ mrc p15, 0, r0, C15_control, c0, 0
+ orr r0, r0, #C15_C0_M
+ mcr p15, 0, r0, C15_control, c0, 0
+ mov pc, lr
+END_PROC(arm_enable_mmu)
+
+BEGIN_PROC(arm_enable_icache)
+ mrc p15, 0, r0, C15_control, c0, 0
+ orr r0, r0, #C15_C0_I
+ mcr p15, 0, r0, C15_control, c0, 0
+ mov pc, lr
+END_PROC(arm_enable_icache)
+
+BEGIN_PROC(arm_enable_dcache)
+ mrc p15, 0, r0, C15_control, c0, 0
+ orr r0, r0, #C15_C0_C
+ mcr p15, 0, r0, C15_control, c0, 0
+ mov pc, lr
+END_PROC(arm_enable_dcache)
+
+BEGIN_PROC(arm_enable_wbuffer)
+ mrc p15, 0, r0, C15_control, c0, 0
+ orr r0, r0, #C15_C0_W
+ mcr p15, 0, r0, C15_control, c0, 0
+ mov pc, lr
+END_PROC(arm_enable_wbuffer)
+
+BEGIN_PROC(arm_enable_high_vectors)
+ mrc p15, 0, r0, C15_control, c0, 0
+ orr r0, r0, #C15_C0_V
+ mcr p15, 0, r0, C15_control, c0, 0
+ mov pc, lr
+END_PROC(arm_enable_high_vectors)
+
+BEGIN_PROC(arm_invalidate_cache)
+ mov r0, #0 @ FIX THIS
+ mcr p15, 0, r0, c7, c7 @ Flush I cache and D cache
+ mov pc, lr
+END_PROC(arm_invalidate_cache)
+
+BEGIN_PROC(arm_invalidate_icache)
+ mov r0, #0 @ FIX THIS
+ mcr p15, 0, r0, c7, c5, 0 @ Flush I cache
+ mov pc, lr
+END_PROC(arm_invalidate_icache)
+
+BEGIN_PROC(arm_invalidate_dcache)
+ mov r0, #0 @ FIX THIS
+ mcr p15, 0, r0, c7, c6, 0 @ Flush D cache
+ mov pc, lr
+END_PROC(arm_invalidate_dcache)
+
+BEGIN_PROC(arm_clean_dcache)
+ mrc p15, 0 , pc, c7, c10, 3 @ Test/clean dcache line
+ bne arm_clean_dcache
+ mcr p15, 0, ip, c7, c10, 4 @ Drain WB
+ mov pc, lr
+END_PROC(arm_clean_dcache)
+
+BEGIN_PROC(arm_clean_invalidate_dcache)
+1:
+ mrc p15, 0, pc, c7, c14, 3 @ Test/clean/flush dcache line
+ @ COMMENT: Why use PC?
+ bne 1b
+ mcr p15, 0, ip, c7, c10, 4 @ Drain WB
+ mov pc, lr
+END_PROC(arm_clean_invalidate_dcache)
+
+BEGIN_PROC(arm_clean_invalidate_cache)
+1:
+ mrc p15, 0, r15, c7, c14, 3 @ Test/clean/flush dcache line
+ @ COMMENT: Why use PC?
+ bne 1b
+ mcr p15, 0, ip, c7, c5, 0 @ Flush icache
+ mcr p15, 0, ip, c7, c10, 4 @ Drain WB
+ mov pc, lr
+END_PROC(arm_clean_invalidate_cache)
+
+BEGIN_PROC(arm_drain_writebuffer)
+ mov r0, #0 @ FIX THIS
+ mcr p15, 0, r0, c7, c10, 4
+ mov pc, lr
+END_PROC(arm_drain_writebuffer)
+
+BEGIN_PROC(arm_invalidate_tlb)
+ mcr p15, 0, ip, c8, c7
+ mov pc, lr
+END_PROC(arm_invalidate_tlb)
+
+BEGIN_PROC(arm_invalidate_itlb)
+ mov r0, #0 @ FIX THIS
+ mcr p15, 0, r0, c8, c5, 0
+ mov pc, lr
+END_PROC(arm_invalidate_itlb)
+
+BEGIN_PROC(arm_invalidate_dtlb)
+ mov r0, #0 @ FIX THIS
+ mcr p15, 0, r0, c8, c6, 0
+ mov pc, lr
+END_PROC(arm_invalidate_dtlb)
+
src/arch/or1k/v6/mmu_ops.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: src/arch/or1k/v6/exception.c
===================================================================
--- src/arch/or1k/v6/exception.c (nonexistent)
+++ src/arch/or1k/v6/exception.c (revision 7)
@@ -0,0 +1,246 @@
+/*
+ * Memory exception handling in process context.
+ *
+ * Copyright (C) 2007, 2008 Bahadir Balban
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include INC_ARCH(exception.h)
+#include INC_GLUE(memlayout.h)
+#include INC_GLUE(memory.h)
+#include INC_GLUE(mapping.h)
+#include INC_GLUE(message.h)
+#include INC_GLUE(ipc.h)
+#include INC_SUBARCH(mm.h)
+
+int check_abort_type(u32 faulted_pc, u32 fsr, u32 far, u32 spsr)
+{
+ int ret = 0;
+
+ /*
+ * On ARMv5, prefetch aborts dont have different
+ * status values. We validate them here and return.
+ */
+ if (is_prefetch_abort(fsr)) {
+ dbg_abort("Prefetch abort @ ", faulted_pc);
+
+ /* Happened in any mode other than user */
+ if (!is_user_mode(spsr)) {
+ dprintk("Unhandled kernel prefetch "
+ "abort at address ", far);
+ return -EABORT;
+ }
+ return 0;
+ }
+
+ switch (fsr & FSR_FS_MASK) {
+ /* Aborts that are expected on page faults: */
+ case DABT_PERM_PAGE:
+ dbg_abort("Page permission fault @ ", far);
+ ret = 0;
+ break;
+ case DABT_XLATE_PAGE:
+ dbg_abort("Page translation fault @ ", far);
+ ret = 0;
+ break;
+ case DABT_XLATE_SECT:
+ dbg_abort("Section translation fault @ ", far);
+ ret = 0;
+ break;
+
+ /* Aborts that can't be handled by a pager yet: */
+ case DABT_TERMINAL:
+ dprintk("Terminal fault dabt @ ", far);
+ ret = -EABORT;
+ break;
+ case DABT_VECTOR:
+ dprintk("Vector abort (obsolete!) @ ", far);
+ ret = -EABORT;
+ break;
+ case DABT_ALIGN:
+ dprintk("Alignment fault dabt @ ", far);
+ ret = -EABORT;
+ break;
+ case DABT_EXT_XLATE_LEVEL1:
+ dprintk("External LVL1 translation fault @ ", far);
+ ret = -EABORT;
+ break;
+ case DABT_EXT_XLATE_LEVEL2:
+ dprintk("External LVL2 translation fault @ ", far);
+ ret = -EABORT;
+ break;
+ case DABT_DOMAIN_SECT:
+ dprintk("Section domain fault dabt @ ", far);
+ ret = -EABORT;
+ break;
+ case DABT_DOMAIN_PAGE:
+ dprintk("Page domain fault dabt @ ", far);
+ ret = -EABORT;
+ break;
+ case DABT_PERM_SECT:
+ dprintk("Section permission fault dabt @ ", far);
+ ret = -EABORT;
+ break;
+ case DABT_EXT_LFETCH_SECT:
+ dprintk("External section linefetch "
+ "fault dabt @ ", far);
+ ret = -EABORT;
+ break;
+ case DABT_EXT_LFETCH_PAGE:
+ dprintk("Page perm fault dabt @ ", far);
+ ret = -EABORT;
+ break;
+ case DABT_EXT_NON_LFETCH_SECT:
+ dprintk("External section non-linefetch "
+ "fault dabt @ ", far);
+ ret = -EABORT;
+ break;
+ case DABT_EXT_NON_LFETCH_PAGE:
+ dprintk("External page non-linefetch "
+ "fault dabt @ ", far);
+ ret = -EABORT;
+ break;
+ default:
+ dprintk("FATAL: Unrecognised/Unknown "
+ "data abort @ ", far);
+ dprintk("FATAL: FSR code: ", fsr);
+ ret = -EABORT;
+ }
+
+ /*
+ * Check validity of data abort's source.
+ *
+ * FIXME: Why not use spsr to do this?
+ */
+ if (is_kernel_address(faulted_pc)) {
+ dprintk("Unhandled kernel data "
+ "abort at address ",
+ faulted_pc);
+ ret = -EABORT;
+ }
+
+ return ret;
+}
+
+#if 0
+void data_abort_handler(u32 faulted_pc, u32 fsr, u32 far)
+{
+ set_abort_type(fsr, ARM_DABT);
+
+ dbg_abort("Data abort @ PC: ", faulted_pc);
+
+ //printk("Data abort: %d, PC: 0x%x\n",
+ //current->tid, faulted_pc);
+
+ /* Check for more details */
+ if (check_aborts(faulted_pc, fsr, far) < 0) {
+ printascii("This abort can't be handled by "
+ "any pager.\n");
+ goto error;
+ }
+
+ /* This notifies the pager */
+ fault_ipc_to_pager(faulted_pc, fsr, far, L4_IPC_TAG_PFAULT);
+
+ if (current->flags & TASK_SUSPENDING) {
+ BUG_ON(current->nlocks);
+ sched_suspend_sync();
+ } else if (current->flags & TASK_EXITING) {
+ BUG_ON(current->nlocks);
+ sched_exit_sync();
+ }
+
+ return;
+
+error:
+ disable_irqs();
+ dprintk("Unhandled data abort @ PC address: ", faulted_pc);
+ dprintk("FAR:", far);
+ dprintk("FSR:", fsr);
+ printascii("Kernel panic.\n");
+ printascii("Halting system...\n");
+ while (1)
+ ;
+}
+
+void prefetch_abort_handler(u32 faulted_pc, u32 fsr, u32 far, u32 spsr)
+{
+ set_abort_type(fsr, ARM_PABT);
+
+ if (check_aborts(faulted_pc, fsr, far) < 0) {
+ printascii("This abort can't be handled by any pager.\n");
+ goto error;
+ }
+
+ /* Did the abort occur in kernel mode? */
+ if ((spsr & ARM_MODE_MASK) == ARM_MODE_SVC)
+ goto error;
+
+ fault_ipc_to_pager(faulted_pc, fsr, far, L4_IPC_TAG_PFAULT);
+
+ if (current->flags & TASK_SUSPENDING) {
+ BUG_ON(current->nlocks);
+ sched_suspend_sync();
+ } else if (current->flags & TASK_EXITING) {
+ BUG_ON(current->nlocks);
+ sched_exit_sync();
+ }
+
+ return;
+
+error:
+ disable_irqs();
+ dprintk("Unhandled prefetch abort @ address: ", faulted_pc);
+ dprintk("FAR:", far);
+ dprintk("FSR:", fsr);
+ dprintk("Aborted PSR:", spsr);
+ printascii("Kernel panic.\n");
+ printascii("Halting system...\n");
+ while (1)
+ ;
+}
+
+void undef_handler(u32 undef_addr, u32 spsr, u32 lr)
+{
+ dbg_abort("Undefined instruction @ PC: ", undef_addr);
+
+ //printk("Undefined instruction: tid: %d, PC: 0x%x, Mode: %s\n",
+ // current->tid, undef_addr,
+ // (spsr & ARM_MODE_MASK) == ARM_MODE_SVC ? "SVC" : "User");
+
+ if ((spsr & ARM_MODE_MASK) == ARM_MODE_SVC) {
+ printk("Panic: Undef in Kernel\n");
+ goto error;
+ }
+
+ fault_ipc_to_pager(undef_addr, 0, undef_addr, L4_IPC_TAG_UNDEF_FAULT);
+
+ if (current->flags & TASK_SUSPENDING) {
+ BUG_ON(current->nlocks);
+ sched_suspend_sync();
+ } else if (current->flags & TASK_EXITING) {
+ BUG_ON(current->nlocks);
+ sched_exit_sync();
+ }
+
+ return;
+
+error:
+ disable_irqs();
+ dprintk("SPSR:", spsr);
+ dprintk("LR:", lr);
+ printascii("Kernel panic.\n");
+ printascii("Halting system...\n");
+ while(1)
+ ;
+}
+#endif
+
Index: src/arch/or1k/v6/mutex.c
===================================================================
--- src/arch/or1k/v6/mutex.c (nonexistent)
+++ src/arch/or1k/v6/mutex.c (revision 7)
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2009 B Labs Ltd.
+ */
+
+#include
+#include
+#include
+#include INC_SUBARCH(mmu_ops.h)
+
+#define MUTEX_UNLOCKED 0
+#define MUTEX_LOCKED 1
+
+
+/* Notes on ldrex/strex:
+ * ldrex rD, [rN, #imm] : loads rD with contents of at address (rN + imm)
+ * strex rD, rS, [rN, #imm]: pushes contents of rS to memory location (rN + imm)
+ * rD is 0 if operation is successful, 1 otherwise
+ */
+
+void __spin_lock(unsigned int *s)
+{
+ unsigned int tmp;
+ __asm__ __volatile__ (
+ "1:\n"
+ "ldrex %0, [%2]\n"
+ "teq %0, #0\n"
+ "strexeq %0, %1, [%2]\n"
+ "teq %0, #0\n"
+#ifdef CONFIG_SMP
+ "wfene\n"
+#endif
+ "bne 1b\n"
+ : "=&r" (tmp)
+ : "r"(1), "r"(s)
+ : "cc", "memory"
+ );
+
+ dsb();
+}
+
+void __spin_unlock(unsigned int *s)
+{
+ __asm__ __volatile__ (
+ "str %0, [%1]\n"
+ :
+ : "r"(0), "r"(s)
+ : "memory"
+ );
+
+#ifdef CONFIG_SMP
+ dsb();
+ __asm__ __volatile__ ("sev\n");
+#endif
+}
+
+
+/*
+ * Current implementation uses __mutex_(un)lock within a protected
+ * spinlock, needs to be revisited in the future
+ */
+unsigned int __mutex_lock(unsigned int *m)
+{
+ unsigned int tmp, res;
+ __asm__ __volatile__ (
+ "1:\n"
+ "ldrex %0, [%3]\n"
+ "tst %0, #0\n"
+ "strexeq %1, %2, [%3]\n"
+ "tsteq %1, #0\n"
+ "bne 1b\n"
+ : "=&r" (tmp), "=&r"(res)
+ : "r"(1), "r"(m)
+ : "cc", "memory"
+ );
+
+ if ((tmp | res) != 0)
+ return 0;
+ return 1;
+}
+
+void __mutex_unlock(unsigned int *m)
+{
+ __asm__ __volatile__ (
+ "str %0, [%1] \n"
+ :
+ : "r"(0), "r"(m)
+ : "memory"
+ );
+
+}
+
Index: src/arch/or1k/v6/init.c
===================================================================
--- src/arch/or1k/v6/init.c (nonexistent)
+++ src/arch/or1k/v6/init.c (revision 7)
@@ -0,0 +1,139 @@
+/*
+ * ARM v5 specific init routines
+ *
+ * Copyright (C) 2007 - 2010 B Labs Ltd.
+ */
+
+#include
+#include
+#include
+#include INC_SUBARCH(mm.h)
+#include INC_SUBARCH(mmu_ops.h)
+#include INC_GLUE(memory.h)
+#include INC_GLUE(mapping.h)
+#include INC_ARCH(linker.h)
+
+SECTION(".init.pgd") ALIGN(PGD_SIZE) pgd_table_t init_pgd;
+
+
+void jump(struct ktcb *task)
+{
+ __asm__ __volatile__ (
+ "mov lr, %0\n" /* Load pointer to context area */
+ "ldr r0, [lr]\n" /* Load spsr value to r0 */
+ "msr spsr, r0\n" /* Set SPSR as ARM_MODE_USR */
+ "add sp, lr, %1\n" /* Reset SVC stack */
+ "sub sp, sp, %2\n" /* Align to stack alignment */
+ "ldmib lr, {r0-r14}^\n" /* Load all USR registers */
+
+ "nop \n" /* Spec says dont touch banked registers
+ * right after LDM {no-pc}^ for one instruction */
+ "add lr, lr, #64\n" /* Manually move to PC location. */
+ "ldr lr, [lr]\n" /* Load the PC_USR to LR */
+ "movs pc, lr\n" /* Jump to userspace, also switching SPSR/CPSR */
+ :
+ : "r" (task), "r" (PAGE_SIZE), "r" (STACK_ALIGNMENT)
+ );
+}
+
+void switch_to_user(struct ktcb *task)
+{
+ arm_clean_invalidate_cache();
+ arm_invalidate_tlb();
+ arm_set_ttb(virt_to_phys(TASK_PGD(task)));
+ arm_invalidate_tlb();
+ jump(task);
+}
+
+/* Maps the early memory regions needed to bootstrap the system */
+void init_kernel_mappings(void)
+{
+ memset((void *)virt_to_phys(&init_pgd), 0, sizeof(pgd_table_t));
+
+ /* Map kernel area to its virtual region */
+ add_section_mapping_init(align(virt_to_phys(_start_text), SZ_1MB),
+ align((unsigned int)_start_text, SZ_1MB), 1,
+ cacheable | bufferable);
+
+ /* Map kernel one-to-one to its physical region */
+ add_section_mapping_init(align(virt_to_phys(_start_text), SZ_1MB),
+ align(virt_to_phys(_start_text), SZ_1MB),
+ 1, 0);
+}
+
+/*
+ * Enable virtual memory using kernel's pgd
+ * and continue execution on virtual addresses.
+ */
+void start_virtual_memory()
+{
+ /*
+ * TTB must be 16K aligned. This is because first level tables are
+ * sized 16K.
+ */
+ if ((unsigned int)&init_pgd & 0x3FFF)
+ dprintk("kspace not properly aligned for ttb:",
+ (u32)&init_pgd);
+ // memset((void *)&kspace, 0, sizeof(pgd_table_t));
+ arm_set_ttb(virt_to_phys(&init_pgd));
+
+ /*
+ * This sets all 16 domains to zero and domain 0 to 1. The outcome
+ * is that page table access permissions are in effect for domain 0.
+ * All other domains have no access whatsoever.
+ */
+ arm_set_domain(1);
+
+ /* Enable everything before mmu permissions are in place */
+ arm_enable_caches();
+ arm_enable_wbuffer();
+
+ arm_enable_high_vectors();
+
+ /*
+ * Leave the past behind. Tlbs are invalidated, write buffer is drained.
+ * The whole of I + D caches are invalidated unconditionally. This is
+ * important to ensure that the cache is free of previously loaded
+ * values. Otherwise unpredictable data aborts may occur at arbitrary
+ * times, each time a load/store operation hits one of the invalid
+ * entries and those entries are cleaned to main memory.
+ */
+ arm_invalidate_cache();
+ arm_drain_writebuffer();
+ arm_invalidate_tlb();
+ arm_enable_mmu();
+
+ /* Jump to virtual memory addresses */
+ __asm__ __volatile__ (
+ "add sp, sp, %0 \n" /* Update stack pointer */
+ "add fp, fp, %0 \n" /* Update frame pointer */
+ /* On the next instruction below, r0 gets
+ * current PC + KOFFSET + 2 instructions after itself. */
+ "add r0, pc, %0 \n"
+ /* Special symbol that is extracted and included in the loader.
+ * Debuggers can break on it to load the virtual symbol table */
+ ".global break_virtual;\n"
+ "break_virtual:\n"
+ "mov pc, r0 \n" /* (r0 has next instruction) */
+ :
+ : "r" (KERNEL_OFFSET)
+ : "r0"
+ );
+
+ /*
+ * Restore link register (LR) for this function.
+ *
+ * NOTE: LR values are pushed onto the stack at each function call,
+ * which means the restored return values will be physical for all
+ * functions in the call stack except this function. So the caller
+ * of this function must never return but initiate scheduling etc.
+ */
+ __asm__ __volatile__ (
+ "add %0, %0, %1 \n"
+ "mov pc, %0 \n"
+ :: "r" (__builtin_return_address(0)), "r" (KERNEL_OFFSET)
+ );
+
+ /* should never come here */
+ while(1);
+}
Index: src/arch/or1k/v6/SConscript
===================================================================
--- src/arch/or1k/v6/SConscript (nonexistent)
+++ src/arch/or1k/v6/SConscript (revision 7)
@@ -0,0 +1,10 @@
+
+
+# Inherit global environment
+Import('env')
+
+# The set of source files associated with this SConscript file.
+src_local = ['mapping.c', 'exception.c', 'mmu_ops.S', 'mutex.c', 'irq.c', 'init.c', 'cpu_startup.c']
+
+obj = env.Object(src_local)
+Return('obj')
Index: src/arch/or1k/v6/mm.c
===================================================================
--- src/arch/or1k/v6/mm.c (nonexistent)
+++ src/arch/or1k/v6/mm.c (revision 7)
@@ -0,0 +1,726 @@
+/*
+ * Copyright (C) 2007 Bahadir Balban
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include INC_SUBARCH(mm.h)
+#include INC_SUBARCH(mmu_ops.h)
+#include INC_GLUE(memory.h)
+#include INC_PLAT(printascii.h)
+#include INC_GLUE(memlayout.h)
+#include INC_ARCH(linker.h)
+#include INC_ARCH(asm.h)
+#include INC_API(kip.h)
+
+/*
+ * These are indices into arrays with pgd_t or pmd_t sized elements,
+ * therefore the index must be divided by appropriate element size
+ */
+#define PGD_INDEX(x) (((((unsigned long)(x)) >> 18) & 0x3FFC) / sizeof(pgd_t))
+/* Strip out the page offset in this megabyte from a total of 256 pages. */
+#define PMD_INDEX(x) (((((unsigned long)(x)) >> 10) & 0x3FC) / sizeof (pmd_t))
+
+/*
+ * Removes initial mappings needed for transition to virtual memory.
+ * Used one-time only.
+ */
+void remove_section_mapping(unsigned long vaddr)
+{
+ pgd_table_t *pgd = &init_pgd;;
+ pgd_t pgd_i = PGD_INDEX(vaddr);
+ if (!((pgd->entry[pgd_i] & PGD_TYPE_MASK)
+ & PGD_TYPE_SECTION))
+ while(1);
+ pgd->entry[pgd_i] = 0;
+ pgd->entry[pgd_i] |= PGD_TYPE_FAULT;
+ arm_invalidate_tlb();
+}
+
+/*
+ * Maps given section-aligned @paddr to @vaddr using enough number
+ * of section-units to fulfill @size in sections. Note this overwrites
+ * a mapping if same virtual address was already mapped.
+ */
+void __add_section_mapping_init(unsigned int paddr,
+ unsigned int vaddr,
+ unsigned int size,
+ unsigned int flags)
+{
+ pte_t *ppte;
+ unsigned int l1_ptab;
+ unsigned int l1_offset;
+
+ /* 1st level page table address */
+ l1_ptab = virt_to_phys(&init_pgd);
+
+ /* Get the section offset for this vaddr */
+ l1_offset = (vaddr >> 18) & 0x3FFC;
+
+ /* The beginning entry for mapping */
+ ppte = (unsigned int *)(l1_ptab + l1_offset);
+ for(int i = 0; i < size; i++) {
+ *ppte = 0; /* Clear out old value */
+ *ppte |= paddr; /* Assign physical address */
+ *ppte |= PGD_TYPE_SECTION; /* Assign translation type */
+ /* Domain is 0, therefore no writes. */
+ /* Only kernel access allowed */
+ *ppte |= (SVC_RW_USR_NONE << SECTION_AP0);
+ /* Cacheability/Bufferability flags */
+ *ppte |= flags;
+ ppte++; /* Next section entry */
+ paddr += ARM_SECTION_SIZE; /* Next physical section */
+ }
+ return;
+}
+
+void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
+ unsigned int size, unsigned int flags)
+{
+ unsigned int psection;
+ unsigned int vsection;
+
+ /* Align each address to the pages they reside in */
+ psection = paddr & ~ARM_SECTION_MASK;
+ vsection = vaddr & ~ARM_SECTION_MASK;
+
+ if(size == 0)
+ return;
+
+ __add_section_mapping_init(psection, vsection, size, flags);
+
+ return;
+}
+
+/* TODO: Make sure to flush tlb entry and caches */
+void __add_mapping(unsigned int paddr, unsigned int vaddr,
+ unsigned int flags, pmd_table_t *pmd)
+{
+ unsigned int pmd_i = PMD_INDEX(vaddr);
+ pmd->entry[pmd_i] = paddr;
+ pmd->entry[pmd_i] |= PMD_TYPE_SMALL; /* Small page type */
+ pmd->entry[pmd_i] |= flags;
+
+ /* TODO: Is both required? Investigate */
+
+ /* TEST:
+ * I think cleaning or invalidating the cache is not required,
+ * because the entries in the cache aren't for the new mapping anyway.
+ * It's required if a mapping is removed, but not when newly added.
+ */
+ arm_clean_invalidate_cache();
+
+ /* TEST: tlb must be flushed because a new mapping is present in page
+ * tables, and tlb is inconsistent with the page tables */
+ arm_invalidate_tlb();
+}
+
+/* Return whether a pmd associated with @vaddr is mapped on a pgd or not. */
+pmd_table_t *pmd_exists(pgd_table_t *pgd, unsigned long vaddr)
+{
+ unsigned int pgd_i = PGD_INDEX(vaddr);
+
+ /* Return true if non-zero pgd entry */
+ switch (pgd->entry[pgd_i] & PGD_TYPE_MASK) {
+ case PGD_TYPE_COARSE:
+ return (pmd_table_t *)
+ phys_to_virt((pgd->entry[pgd_i] &
+ PGD_COARSE_ALIGN_MASK));
+ break;
+
+ case PGD_TYPE_FAULT:
+ return 0;
+ break;
+
+ case PGD_TYPE_SECTION:
+ dprintk("Warning, a section is already mapped "
+ "where a coarse page mapping is attempted:",
+ (u32)(pgd->entry[pgd_i]
+ & PGD_SECTION_ALIGN_MASK));
+ BUG();
+ break;
+
+ case PGD_TYPE_FINE:
+ dprintk("Warning, a fine page table is already mapped "
+ "where a coarse page mapping is attempted:",
+ (u32)(pgd->entry[pgd_i]
+ & PGD_FINE_ALIGN_MASK));
+ printk("Fine tables are unsupported. ");
+ printk("What is this doing here?");
+ BUG();
+ break;
+
+ default:
+ dprintk("Unrecognised pmd type @ pgd index:", pgd_i);
+ BUG();
+ break;
+ }
+ return 0;
+}
+
+/* Convert a virtual address to a pte if it exists in the page tables. */
+pte_t virt_to_pte_from_pgd(unsigned long virtual, pgd_table_t *pgd)
+{
+ pmd_table_t *pmd = pmd_exists(pgd, virtual);
+
+ if (pmd)
+ return (pte_t)pmd->entry[PMD_INDEX(virtual)];
+ else
+ return (pte_t)0;
+}
+
+/* Convert a virtual address to a pte if it exists in the page tables. */
+pte_t virt_to_pte(unsigned long virtual)
+{
+ return virt_to_pte_from_pgd(virtual, TASK_PGD(current));
+}
+
+unsigned long virt_to_phys_by_pgd(unsigned long vaddr, pgd_table_t *pgd)
+{
+ pte_t pte = virt_to_pte_from_pgd(vaddr, pgd);
+ return pte & ~PAGE_MASK;
+}
+
+unsigned long virt_to_phys_by_task(unsigned long vaddr, struct ktcb *task)
+{
+ return virt_to_phys_by_pgd(vaddr, TASK_PGD(task));
+}
+
+void attach_pmd(pgd_table_t *pgd, pmd_table_t *pmd, unsigned int vaddr)
+{
+ u32 pgd_i = PGD_INDEX(vaddr);
+ u32 pmd_phys = virt_to_phys(pmd);
+
+ /* Domain is 0, therefore no writes. */
+ pgd->entry[pgd_i] = (pgd_t)pmd_phys;
+ pgd->entry[pgd_i] |= PGD_TYPE_COARSE;
+}
+
+/*
+ * Same as normal mapping but with some boot tweaks.
+ */
+void add_boot_mapping(unsigned int paddr, unsigned int vaddr,
+ unsigned int size, unsigned int flags)
+{
+ pmd_table_t *pmd;
+ pgd_table_t *pgd = &init_pgd;
+ unsigned int numpages = (size >> PAGE_BITS);
+
+ if (size < PAGE_SIZE) {
+ printascii("Error: Mapping size must be in bytes not pages.\n");
+ while(1);
+ }
+ if (size & PAGE_MASK)
+ numpages++;
+
+ /* Convert generic map flags to pagetable-specific */
+ BUG_ON(!(flags = space_flags_to_ptflags(flags)));
+
+ /* Map all consecutive pages that cover given size */
+ for (int i = 0; i < numpages; i++) {
+ /* Check if another mapping already has a pmd attached. */
+ pmd = pmd_exists(pgd, vaddr);
+ if (!pmd) {
+ /*
+ * If this is the first vaddr in
+ * this pmd, allocate new pmd
+ */
+ pmd = alloc_boot_pmd();
+
+ /* Attach pmd to its entry in pgd */
+ attach_pmd(pgd, pmd, vaddr);
+ }
+
+ /* Attach paddr to this pmd */
+ __add_mapping(page_align(paddr),
+ page_align(vaddr), flags, pmd);
+
+ /* Go to the next page to be mapped */
+ paddr += PAGE_SIZE;
+ vaddr += PAGE_SIZE;
+ }
+}
+
+/*
+ * Maps @paddr to @vaddr, covering @size bytes also allocates new pmd if
+ * necessary. This flavor explicitly supplies the pgd to modify. This is useful
+ * when modifying userspace of processes that are not currently running. (Only
+ * makes sense for userspace mappings since kernel mappings are common.)
+ */
+void add_mapping_pgd(unsigned int paddr, unsigned int vaddr,
+ unsigned int size, unsigned int flags,
+ pgd_table_t *pgd)
+{
+ pmd_table_t *pmd;
+ unsigned int numpages = (size >> PAGE_BITS);
+
+
+ if (size < PAGE_SIZE) {
+ printascii("Error: Mapping size must be in bytes not pages.\n");
+ while(1);
+ }
+ if (size & PAGE_MASK)
+ numpages++;
+
+ /* Convert generic map flags to pagetable-specific */
+ BUG_ON(!(flags = space_flags_to_ptflags(flags)));
+
+ /* Map all consecutive pages that cover given size */
+ for (int i = 0; i < numpages; i++) {
+ /* Check if another mapping already has a pmd attached. */
+ pmd = pmd_exists(pgd, vaddr);
+ if (!pmd) {
+ /*
+ * If this is the first vaddr in
+ * this pmd, allocate new pmd
+ */
+ pmd = alloc_pmd();
+
+ /* Attach pmd to its entry in pgd */
+ attach_pmd(pgd, pmd, vaddr);
+ }
+
+ /* Attach paddr to this pmd */
+ __add_mapping(page_align(paddr),
+ page_align(vaddr), flags, pmd);
+
+ /* Go to the next page to be mapped */
+ paddr += PAGE_SIZE;
+ vaddr += PAGE_SIZE;
+ }
+}
+
+void add_mapping(unsigned int paddr, unsigned int vaddr,
+ unsigned int size, unsigned int flags)
+{
+ add_mapping_pgd(paddr, vaddr, size, flags, TASK_PGD(current));
+}
+
+/*
+ * Checks if a virtual address range has same or more permissive
+ * flags than the given ones, returns 0 if not, and 1 if OK.
+ */
+int check_mapping_pgd(unsigned long vaddr, unsigned long size,
+ unsigned int flags, pgd_table_t *pgd)
+{
+ unsigned int npages = __pfn(align_up(size, PAGE_SIZE));
+ pte_t pte;
+
+ /* Convert generic map flags to pagetable-specific */
+ BUG_ON(!(flags = space_flags_to_ptflags(flags)));
+
+ for (int i = 0; i < npages; i++) {
+ pte = virt_to_pte_from_pgd(vaddr + i * PAGE_SIZE, pgd);
+
+ /* Check if pte perms are equal or gt given flags */
+ if ((pte & PTE_PROT_MASK) >= (flags & PTE_PROT_MASK))
+ continue;
+ else
+ return 0;
+ }
+
+ return 1;
+}
+
+int check_mapping(unsigned long vaddr, unsigned long size,
+ unsigned int flags)
+{
+ return check_mapping_pgd(vaddr, size, flags, TASK_PGD(current));
+}
+
+/* FIXME: Empty PMDs should be returned here !!! */
+int __remove_mapping(pmd_table_t *pmd, unsigned long vaddr)
+{
+ pmd_t pmd_i = PMD_INDEX(vaddr);
+ int ret;
+
+ switch (pmd->entry[pmd_i] & PMD_TYPE_MASK) {
+ case PMD_TYPE_FAULT:
+ ret = -ENOENT;
+ break;
+ case PMD_TYPE_LARGE:
+ pmd->entry[pmd_i] = 0;
+ pmd->entry[pmd_i] |= PMD_TYPE_FAULT;
+ ret = 0;
+ break;
+ case PMD_TYPE_SMALL:
+ pmd->entry[pmd_i] = 0;
+ pmd->entry[pmd_i] |= PMD_TYPE_FAULT;
+ ret = 0;
+ break;
+ default:
+ printk("Unknown page mapping in pmd. Assuming bug.\n");
+ BUG();
+ }
+ return ret;
+}
+
+/*
+ * Tell if a pgd index is a common kernel index. This is used to distinguish
+ * common kernel entries in a pgd, when copying page tables.
+ */
+int is_kern_pgdi(int i)
+{
+ if ((i >= PGD_INDEX(KERNEL_AREA_START) && i < PGD_INDEX(KERNEL_AREA_END)) ||
+ (i >= PGD_INDEX(IO_AREA_START) && i < PGD_INDEX(IO_AREA_END)) ||
+ (i == PGD_INDEX(USER_KIP_PAGE)) ||
+ (i == PGD_INDEX(ARM_HIGH_VECTOR)) ||
+ (i == PGD_INDEX(ARM_SYSCALL_VECTOR)) ||
+ (i == PGD_INDEX(USERSPACE_UART_BASE)))
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * Removes all userspace mappings from a pgd. Frees any pmds that it
+ * detects to be user pmds
+ */
+int remove_mapping_pgd_all_user(pgd_table_t *pgd)
+{
+ pmd_table_t *pmd;
+
+ /* Traverse through all pgd entries */
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
+
+ /* Detect a pgd entry that is not a kernel entry */
+ if (!is_kern_pgdi(i)) {
+
+ /* Detect a pmd entry */
+ if (((pgd->entry[i] & PGD_TYPE_MASK)
+ == PGD_TYPE_COARSE)) {
+
+ /* Obtain the user pmd handle */
+ pmd = (pmd_table_t *)
+ phys_to_virt((pgd->entry[i] &
+ PGD_COARSE_ALIGN_MASK));
+ /* Free it */
+ free_pmd(pmd);
+ }
+
+ /* Clear the pgd entry */
+ pgd->entry[i] = PGD_TYPE_FAULT;
+ }
+ }
+
+ return 0;
+}
+
+int remove_mapping_pgd(unsigned long vaddr, pgd_table_t *pgd)
+{
+ pgd_t pgd_i = PGD_INDEX(vaddr);
+ pmd_table_t *pmd;
+ pmd_t pmd_i;
+ int ret;
+
+ /*
+ * Clean the cache to main memory before removing the mapping. Otherwise
+ * entries in the cache for this mapping will cause tranlation faults
+ * if they're cleaned to main memory after the mapping is removed.
+ */
+ arm_clean_invalidate_cache();
+
+ /* TEST:
+ * Can't think of a valid reason to flush tlbs here, but keeping it just
+ * to be safe. REMOVE: Remove it if it's unnecessary.
+ */
+ arm_invalidate_tlb();
+
+ /* Return true if non-zero pgd entry */
+ switch (pgd->entry[pgd_i] & PGD_TYPE_MASK) {
+ case PGD_TYPE_COARSE:
+ // printk("Removing coarse mapping @ 0x%x\n", vaddr);
+ pmd = (pmd_table_t *)
+ phys_to_virt((pgd->entry[pgd_i]
+ & PGD_COARSE_ALIGN_MASK));
+ pmd_i = PMD_INDEX(vaddr);
+ ret = __remove_mapping(pmd, vaddr);
+ break;
+
+ case PGD_TYPE_FAULT:
+ ret = -1;
+ break;
+
+ case PGD_TYPE_SECTION:
+ printk("Removing section mapping for 0x%lx",
+ vaddr);
+ pgd->entry[pgd_i] = 0;
+ pgd->entry[pgd_i] |= PGD_TYPE_FAULT;
+ ret = 0;
+ break;
+
+ case PGD_TYPE_FINE:
+ printk("Table mapped is a fine page table.\n"
+ "Fine tables are unsupported. Assuming bug.\n");
+ BUG();
+ break;
+
+ default:
+ dprintk("Unrecognised pmd type @ pgd index:", pgd_i);
+ printk("Assuming bug.\n");
+ BUG();
+ break;
+ }
+ /* The tlb must be invalidated here because it might have cached the
+ * old translation for this mapping. */
+ arm_invalidate_tlb();
+
+ return ret;
+}
+
+int remove_mapping(unsigned long vaddr)
+{
+ return remove_mapping_pgd(vaddr, TASK_PGD(current));
+}
+
+int delete_page_tables(struct address_space *space)
+{
+ remove_mapping_pgd_all_user(space->pgd);
+ free_pgd(space->pgd);
+ return 0;
+}
+
+/*
+ * Copies userspace entries of one task to another. In order to do that,
+ * it allocates new pmds and copies the original values into new ones.
+ */
+int copy_user_tables(struct address_space *new, struct address_space *orig_space)
+{
+ pgd_table_t *to = new->pgd, *from = orig_space->pgd;
+ pmd_table_t *pmd, *orig;
+
+ /* Allocate and copy all pmds that will be exclusive to new task. */
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
+ /* Detect a pmd entry that is not a kernel pmd? */
+ if (!is_kern_pgdi(i) &&
+ ((from->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE)) {
+ /* Allocate new pmd */
+ if (!(pmd = alloc_pmd()))
+ goto out_error;
+
+ /* Find original pmd */
+ orig = (pmd_table_t *)
+ phys_to_virt((from->entry[i] &
+ PGD_COARSE_ALIGN_MASK));
+
+ /* Copy original to new */
+ memcpy(pmd, orig, sizeof(pmd_table_t));
+
+ /* Replace original pmd entry in pgd with new */
+ to->entry[i] = (pgd_t)virt_to_phys(pmd);
+ to->entry[i] |= PGD_TYPE_COARSE;
+ }
+ }
+
+ return 0;
+
+out_error:
+ /* Find all non-kernel pmds we have just allocated and free them */
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
+ /* Non-kernel pmd that has just been allocated. */
+ if (!is_kern_pgdi(i) &&
+ (to->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE) {
+ /* Obtain the pmd handle */
+ pmd = (pmd_table_t *)
+ phys_to_virt((to->entry[i] &
+ PGD_COARSE_ALIGN_MASK));
+ /* Free pmd */
+ free_pmd(pmd);
+ }
+ }
+ return -ENOMEM;
+}
+
+int pgd_count_pmds(pgd_table_t *pgd)
+{
+ int npmd = 0;
+
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++)
+ if ((pgd->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE)
+ npmd++;
+ return npmd;
+}
+
+/*
+ * Allocates and copies all levels of page tables from one task to another.
+ * Useful when forking.
+ *
+ * The copied page tables end up having shared pmds for kernel entries
+ * and private copies of same pmds for user entries.
+ */
+pgd_table_t *copy_page_tables(pgd_table_t *from)
+{
+ pmd_table_t *pmd, *orig;
+ pgd_table_t *pgd;
+
+ /* Allocate and copy pgd. This includes all kernel entries */
+ if (!(pgd = alloc_pgd()))
+ return PTR_ERR(-ENOMEM);
+
+ /* First copy whole pgd entries */
+ memcpy(pgd, from, sizeof(pgd_table_t));
+
+ /* Allocate and copy all pmds that will be exclusive to new task. */
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
+ /* Detect a pmd entry that is not a kernel pmd? */
+ if (!is_kern_pgdi(i) &&
+ ((pgd->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE)) {
+ /* Allocate new pmd */
+ if (!(pmd = alloc_pmd()))
+ goto out_error;
+
+ /* Find original pmd */
+ orig = (pmd_table_t *)
+ phys_to_virt((pgd->entry[i] &
+ PGD_COARSE_ALIGN_MASK));
+
+ /* Copy original to new */
+ memcpy(pmd, orig, sizeof(pmd_table_t));
+
+ /* Replace original pmd entry in pgd with new */
+ pgd->entry[i] = (pgd_t)virt_to_phys(pmd);
+ pgd->entry[i] |= PGD_TYPE_COARSE;
+ }
+ }
+
+ return pgd;
+
+out_error:
+ /* Find all allocated non-kernel pmds and free them */
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
+ /* Non-kernel pmd that has just been allocated. */
+ if (!is_kern_pgdi(i) &&
+ (pgd->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE) {
+ /* Obtain the pmd handle */
+ pmd = (pmd_table_t *)
+ phys_to_virt((pgd->entry[i] &
+ PGD_COARSE_ALIGN_MASK));
+ /* Free pmd */
+ free_pmd(pmd);
+ }
+ }
+ /* Free the pgd */
+ free_pgd(pgd);
+ return PTR_ERR(-ENOMEM);
+}
+
+extern pmd_table_t *pmd_array;
+
+/*
+ * Jumps from boot pmd/pgd page tables to tables allocated from the cache.
+ */
+pgd_table_t *realloc_page_tables(void)
+{
+ pgd_table_t *pgd_new = alloc_pgd();
+ pgd_table_t *pgd_old = &init_pgd;
+ pmd_table_t *orig, *pmd;
+
+ /* Copy whole pgd entries */
+ memcpy(pgd_new, pgd_old, sizeof(pgd_table_t));
+
+ /* Allocate and copy all pmds */
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
+ /* Detect a pmd entry */
+ if ((pgd_old->entry[i] & PGD_TYPE_MASK) == PGD_TYPE_COARSE) {
+ /* Allocate new pmd */
+ if (!(pmd = alloc_pmd())) {
+ printk("FATAL: PMD allocation "
+ "failed during system initialization\n");
+ BUG();
+ }
+
+ /* Find original pmd */
+ orig = (pmd_table_t *)
+ phys_to_virt((pgd_old->entry[i] &
+ PGD_COARSE_ALIGN_MASK));
+
+ /* Copy original to new */
+ memcpy(pmd, orig, sizeof(pmd_table_t));
+
+ /* Replace original pmd entry in pgd with new */
+ pgd_new->entry[i] = (pgd_t)virt_to_phys(pmd);
+ pgd_new->entry[i] |= PGD_TYPE_COARSE;
+ }
+ }
+
+ /* Switch the virtual memory system into new area */
+ arm_clean_invalidate_cache();
+ arm_drain_writebuffer();
+ arm_invalidate_tlb();
+ arm_set_ttb(virt_to_phys(pgd_new));
+ arm_invalidate_tlb();
+
+ printk("%s: Initial page tables moved from 0x%x to 0x%x physical\n",
+ __KERNELNAME__, virt_to_phys(pgd_old),
+ virt_to_phys(pgd_new));
+
+ return pgd_new;
+}
+
+/*
+ * Useful for upgrading to page-grained control over a section mapping:
+ * Remaps a section mapping in pages. It allocates a pmd, (at all times because
+ * there can't really be an already existing pmd for a section mapping) fills
+ * in the page information, and origaces the direct section physical translation
+ * with the address of the pmd. Flushes the caches/tlbs.
+ */
+void remap_as_pages(void *vstart, void *vend)
+{
+ unsigned long pstart = virt_to_phys(vstart);
+ unsigned long pend = virt_to_phys(vend);
+ unsigned long paddr = pstart;
+ pgd_t pgd_i = PGD_INDEX(vstart);
+ pmd_t pmd_i = PMD_INDEX(vstart);
+ pgd_table_t *pgd = &init_pgd;
+ pmd_table_t *pmd = alloc_boot_pmd();
+ u32 pmd_phys = virt_to_phys(pmd);
+ int numpages = __pfn(pend - pstart);
+
+ /* Fill in the pmd first */
+ for (int n = 0; n < numpages; n++) {
+ pmd->entry[pmd_i + n] = paddr;
+ pmd->entry[pmd_i + n] |= PMD_TYPE_SMALL; /* Small page type */
+ pmd->entry[pmd_i + n] |= space_flags_to_ptflags(MAP_SVC_DEFAULT_FLAGS);
+ paddr += PAGE_SIZE;
+ }
+
+ /* Fill in the type to produce a complete pmd translator information */
+ pmd_phys |= PGD_TYPE_COARSE;
+
+ /* Make sure memory is coherent first. */
+ arm_clean_invalidate_cache();
+ arm_invalidate_tlb();
+
+ /* Replace the direct section physical address with pmd's address */
+ pgd->entry[pgd_i] = (pgd_t)pmd_phys;
+ printk("%s: Kernel area 0x%lx - 0x%lx remapped as %d pages\n", __KERNELNAME__,
+ (unsigned long)vstart, (unsigned long)vend, numpages);
+}
+
+void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
+ unsigned long start, unsigned long end)
+{
+ unsigned long start_i = PGD_INDEX(start);
+ unsigned long end_i = PGD_INDEX(end);
+ unsigned long irange = (end_i != 0) ? (end_i - start_i)
+ : (PGD_ENTRY_TOTAL - start_i);
+
+ memcpy(&to->entry[start_i], &from->entry[start_i],
+ irange * sizeof(pgd_t));
+}
+
+
+/* Scheduler uses this to switch context */
+void arch_hardware_flush(pgd_table_t *pgd)
+{
+ arm_clean_invalidate_cache();
+ arm_invalidate_tlb();
+ arm_set_ttb(virt_to_phys(pgd));
+ arm_invalidate_tlb();
+}
+
Index: src/arch/or1k/v6/cpu_startup.c
===================================================================
--- src/arch/or1k/v6/cpu_startup.c (nonexistent)
+++ src/arch/or1k/v6/cpu_startup.c (revision 7)
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2010 B Labs Ltd.
+ * Author: Prem Mallappa
+ */
+
+#include INC_CPU(cpu.h)
+//#include INC_SUBARCH(cpu.h)
+//#include INC_ARCH(cpu.h)
+
+
+/* This code is guaranteed to be executed before MMU is enabled */
+
+void cpu_startup(void)
+{
+ /* For now this should have
+ * cache disabling
+ * branch prediction disabling
+ */
+
+ /* Here enable the common bits
+ * cache
+ * branch prediction
+ * write buffers
+ */
+
+ /* Enable V6 page tables */
+ //unsigned int val = arm_get_cp15_cr() | 1<<23;
+ //arm_set_cp15_cr(val);
+
+
+#if defined (CONFIG_SMP)
+ /* Enable SCU*/
+ /* Enable SMP bit in CP15 */
+#endif
+
+}
Index: src/arch/or1k/v6/irq.c
===================================================================
--- src/arch/or1k/v6/irq.c (nonexistent)
+++ src/arch/or1k/v6/irq.c (revision 7)
@@ -0,0 +1,60 @@
+/*
+ * Low-level irq routines.
+ *
+ * Copyright (C) 2010 B Labs Ltd.
+ * Written by Bahadir Balban
+ * Prem Mallappa
+ */
+
+void irq_local_disable_save(unsigned long *state)
+{
+ unsigned int tmp;
+ __asm__ __volatile__ (
+ "mrs %0, cpsr_fc \n"
+ "cpsid ia \n"
+ : "=r"(tmp)
+ :
+ : "cc"
+ );
+ *state = tmp;
+}
+
+void irq_local_restore(unsigned long state)
+{
+ __asm__ __volatile__ (
+ "msr cpsr_fc, %0\n"
+ :
+ : "r"(state)
+ : "cc"
+ );
+}
+
+u8 l4_atomic_dest_readb(u8 *location)
+{
+ unsigned int tmp, res;
+ __asm__ __volatile__ (
+ "1: \n"
+ "ldrex %0, [%2] \n"
+ "strex %1, %3, [%2] \n"
+ "teq %1, #0 \n"
+ "bne 1b \n"
+ : "=&r"(tmp), "=&r"(res)
+ : "r"(location), "r"(0)
+ : "cc", "memory"
+ );
+
+ return (u8)tmp;
+}
+
+int irqs_enabled(void)
+{
+ int tmp;
+ __asm__ __volatile__ (
+ "mrs %0, cpsr_fc\n"
+ : "=r"(tmp)
+ );
+ if (tmp & 0x80)
+ return 0;
+
+ return 1;
+}
Index: src/arch/or1k/v6/mapping.c
===================================================================
--- src/arch/or1k/v6/mapping.c (nonexistent)
+++ src/arch/or1k/v6/mapping.c (revision 7)
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2007 Bahadir Balban
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include INC_SUBARCH(mm.h)
+#include INC_SUBARCH(mmu_ops.h)
+#include INC_GLUE(memory.h)
+#include INC_GLUE(mapping.h)
+#include INC_GLUE(memlayout.h)
+#include INC_ARCH(linker.h)
+#include INC_ARCH(asm.h)
+#include INC_API(kip.h)
+#include INC_ARCH(io.h)
+
+/*
+ * Removes initial mappings needed for transition to virtual memory.
+ * Used one-time only.
+ */
+void remove_section_mapping(unsigned long vaddr)
+{
+ pgd_table_t *pgd = &init_pgd;
+ pmd_t pgd_i = PGD_INDEX(vaddr);
+ if (!((pgd->entry[pgd_i] & PMD_TYPE_MASK)
+ & PMD_TYPE_SECTION))
+ while(1);
+ pgd->entry[pgd_i] = 0;
+ pgd->entry[pgd_i] |= PMD_TYPE_FAULT;
+ arm_invalidate_tlb();
+}
+
+/*
+ * Maps given section-aligned @paddr to @vaddr using enough number
+ * of section-units to fulfill @size in sections. Note this overwrites
+ * a mapping if same virtual address was already mapped.
+ */
+void __add_section_mapping_init(unsigned int paddr,
+ unsigned int vaddr,
+ unsigned int size,
+ unsigned int flags)
+{
+ pte_t *ppte;
+ unsigned int l1_ptab;
+ unsigned int l1_offset;
+
+ /* 1st level page table address */
+ l1_ptab = virt_to_phys(&init_pgd);
+
+ /* Get the section offset for this vaddr */
+ l1_offset = (vaddr >> 18) & 0x3FFC;
+
+ /* The beginning entry for mapping */
+ ppte = (unsigned int *)(l1_ptab + l1_offset);
+ for(int i = 0; i < size; i++) {
+ *ppte = 0; /* Clear out old value */
+ *ppte |= paddr; /* Assign physical address */
+ *ppte |= PMD_TYPE_SECTION; /* Assign translation type */
+ /* Domain is 0, therefore no writes. */
+ /* Only kernel access allowed */
+ *ppte |= (SVC_RW_USR_NONE << SECTION_AP0);
+ /* Cacheability/Bufferability flags */
+ *ppte |= flags;
+ ppte++; /* Next section entry */
+ paddr += SECTION_SIZE; /* Next physical section */
+ }
+ return;
+}
+
+void add_section_mapping_init(unsigned int paddr, unsigned int vaddr,
+ unsigned int size, unsigned int flags)
+{
+ unsigned int psection;
+ unsigned int vsection;
+
+ /* Align each address to the pages they reside in */
+ psection = paddr & ~SECTION_MASK;
+ vsection = vaddr & ~SECTION_MASK;
+
+ if (size == 0)
+ return;
+
+ __add_section_mapping_init(psection, vsection, size, flags);
+
+ return;
+}
+
+void arch_prepare_pte(u32 paddr, u32 vaddr, unsigned int flags,
+ pte_t *ptep)
+{
+ /* They must be aligned at this stage */
+ BUG_ON(!is_page_aligned(paddr));
+ BUG_ON(!is_page_aligned(vaddr));
+
+ /*
+ * NOTE: In v5, the flags converted from generic
+ * by space_flags_to_ptflags() can be directly
+ * written to the pte. No further conversion is needed.
+ * Therefore this function doesn't do much on flags. In
+ * contrast in ARMv7 the flags need an extra level of
+ * processing.
+ */
+ if (flags == __MAP_FAULT)
+ *ptep = paddr | flags | PTE_TYPE_FAULT;
+ else
+ *ptep = paddr | flags | PTE_TYPE_SMALL;
+}
+
+void arch_write_pte(pte_t *ptep, pte_t pte, u32 vaddr)
+{
+ /* FIXME:
+ * Clean the dcache and invalidate the icache
+ * for the old translation first?
+ *
+ * The dcache is virtual, therefore the data
+ * in those entries should be cleaned first,
+ * before the translation of that virtual
+ * address is changed to a new physical address.
+ *
+ * Check that the entry was not faulty first.
+ */
+ arm_clean_invalidate_cache();
+
+ *ptep = pte;
+
+ /* FIXME: Fix this!
+ * - Use vaddr to clean the dcache pte by MVA.
+ * - Use mapped area to invalidate the icache
+ * - Invalidate the tlb for mapped area
+ */
+ arm_clean_invalidate_cache();
+ arm_invalidate_tlb();
+}
+
+
+void arch_prepare_write_pte(u32 paddr, u32 vaddr,
+ unsigned int flags, pte_t *ptep)
+{
+ pte_t pte = 0;
+
+ /* They must be aligned at this stage */
+ BUG_ON(!is_page_aligned(paddr));
+ BUG_ON(!is_page_aligned(vaddr));
+
+ arch_prepare_pte(paddr, vaddr, flags, &pte);
+
+ arch_write_pte(ptep, pte, vaddr);
+}
+
+pmd_t *
+arch_pick_pmd(pgd_table_t *pgd, unsigned long vaddr)
+{
+ return &pgd->entry[PGD_INDEX(vaddr)];
+}
+
+/*
+ * v5 pmd writes
+ */
+void arch_write_pmd(pmd_t *pmd_entry, u32 pmd_phys, u32 vaddr)
+{
+ /* FIXME: Clean the dcache if there was a valid entry */
+ *pmd_entry = (pmd_t)(pmd_phys | PMD_TYPE_PMD);
+ arm_clean_invalidate_cache(); /*FIXME: Write these properly! */
+ arm_invalidate_tlb();
+}
+
+
+int arch_check_pte_access_perms(pte_t pte, unsigned int flags)
+{
+ if ((pte & PTE_PROT_MASK) >= (flags & PTE_PROT_MASK))
+ return 1;
+ else
+ return 0;
+}
+
+/*
+ * Tell if a pgd index is a common kernel index.
+ * This is used to distinguish common kernel entries
+ * in a pgd, when copying page tables.
+ */
+int is_global_pgdi(int i)
+{
+ if ((i >= PGD_INDEX(KERNEL_AREA_START) &&
+ i < PGD_INDEX(KERNEL_AREA_END)) ||
+ (i >= PGD_INDEX(IO_AREA_START) &&
+ i < PGD_INDEX(IO_AREA_END)) ||
+ (i == PGD_INDEX(USER_KIP_PAGE)) ||
+ (i == PGD_INDEX(ARM_HIGH_VECTOR)) ||
+ (i == PGD_INDEX(ARM_SYSCALL_VECTOR)) ||
+ (i == PGD_INDEX(USERSPACE_CONSOLE_VBASE)))
+ return 1;
+ else
+ return 0;
+}
+
+extern pmd_table_t *pmd_array;
+
+void remove_mapping_pgd_all_user(pgd_table_t *pgd)
+{
+ pmd_table_t *pmd;
+
+ /* Traverse through all pgd entries. */
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
+ if (!is_global_pgdi(i)) {
+ /* Detect a pmd entry */
+ if (((pgd->entry[i] & PMD_TYPE_MASK)
+ == PMD_TYPE_PMD)) {
+
+ /* Obtain the user pmd handle */
+ pmd = (pmd_table_t *)
+ phys_to_virt((pgd->entry[i] &
+ PMD_ALIGN_MASK));
+ /* Free it */
+ free_pmd(pmd);
+ }
+
+ /* Clear the pgd entry */
+ pgd->entry[i] = PMD_TYPE_FAULT;
+ }
+ }
+}
+
+
+int pgd_count_boot_pmds()
+{
+ int npmd = 0;
+ pgd_table_t *pgd = &init_pgd;
+
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++)
+ if ((pgd->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD)
+ npmd++;
+ return npmd;
+}
+
+
+/*
+ * Jumps from boot pmd/pgd page tables to tables allocated from the cache.
+ */
+pgd_table_t *arch_realloc_page_tables(void)
+{
+ pgd_table_t *pgd_new = alloc_pgd();
+ pgd_table_t *pgd_old = &init_pgd;
+ pmd_table_t *orig, *pmd;
+
+ /* Copy whole pgd entries */
+ memcpy(pgd_new, pgd_old, sizeof(pgd_table_t));
+
+ /* Allocate and copy all pmds */
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
+ /* Detect a pmd entry */
+ if ((pgd_old->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD) {
+ /* Allocate new pmd */
+ if (!(pmd = alloc_pmd())) {
+ printk("FATAL: PMD allocation "
+ "failed during system initialization\n");
+ BUG();
+ }
+
+ /* Find original pmd */
+ orig = (pmd_table_t *)
+ phys_to_virt((pgd_old->entry[i] &
+ PMD_ALIGN_MASK));
+
+ /* Copy original to new */
+ memcpy(pmd, orig, sizeof(pmd_table_t));
+
+ /* Replace original pmd entry in pgd with new */
+ pgd_new->entry[i] = (pmd_t)virt_to_phys(pmd);
+ pgd_new->entry[i] |= PMD_TYPE_PMD;
+ }
+ }
+
+ /* Switch the virtual memory system into new area */
+ arm_clean_invalidate_cache();
+ arm_drain_writebuffer();
+ arm_invalidate_tlb();
+ arm_set_ttb(virt_to_phys(pgd_new));
+ arm_invalidate_tlb();
+
+ printk("%s: Initial page tables moved from 0x%x to 0x%x physical\n",
+ __KERNELNAME__, virt_to_phys(pgd_old),
+ virt_to_phys(pgd_new));
+
+ return pgd_new;
+}
+
+/*
+ * Copies global kernel entries into another pgd. Even for
+ * sub-pmd ranges the associated pmd entries are copied,
+ * assuming any pmds copied are applicable to all tasks in
+ * the system.
+ */
+void copy_pgd_global_by_vrange(pgd_table_t *to, pgd_table_t *from,
+ unsigned long start, unsigned long end)
+{
+ /* Extend sub-pmd ranges to their respective pmd boundaries */
+ start = align(start, PMD_MAP_SIZE);
+
+ if (end < start)
+ end = 0;
+
+ /* Aligning would overflow if mapping the last virtual pmd */
+ if (end < align(~0, PMD_MAP_SIZE) ||
+ start > end) /* end may have already overflown as input */
+ end = align_up(end, PMD_MAP_SIZE);
+ else
+ end = 0;
+
+ copy_pgds_by_vrange(to, from, start, end);
+}
+
+void copy_pgds_by_vrange(pgd_table_t *to, pgd_table_t *from,
+ unsigned long start, unsigned long end)
+{
+ unsigned long start_i = PGD_INDEX(start);
+ unsigned long end_i = PGD_INDEX(end);
+ unsigned long irange = (end_i != 0) ? (end_i - start_i)
+ : (PGD_ENTRY_TOTAL - start_i);
+
+ memcpy(&to->entry[start_i], &from->entry[start_i],
+ irange * sizeof(pmd_t));
+}
+
+void arch_copy_pgd_kernel_entries(pgd_table_t *to)
+{
+ pgd_table_t *from = TASK_PGD(current);
+
+ copy_pgd_global_by_vrange(to, from, KERNEL_AREA_START,
+ KERNEL_AREA_END);
+ copy_pgd_global_by_vrange(to, from, IO_AREA_START, IO_AREA_END);
+ copy_pgd_global_by_vrange(to, from, USER_KIP_PAGE,
+ USER_KIP_PAGE + PAGE_SIZE);
+ copy_pgd_global_by_vrange(to, from, ARM_HIGH_VECTOR,
+ ARM_HIGH_VECTOR + PAGE_SIZE);
+ copy_pgd_global_by_vrange(to, from, ARM_SYSCALL_VECTOR,
+ ARM_SYSCALL_VECTOR + PAGE_SIZE);
+
+ /* We temporarily map uart registers to every process */
+ copy_pgd_global_by_vrange(to, from, USERSPACE_CONSOLE_VBASE,
+ USERSPACE_CONSOLE_VBASE + PAGE_SIZE);
+}
+
+/* Scheduler uses this to switch context */
+void arch_space_switch(struct ktcb *to)
+{
+ pgd_table_t *pgd = TASK_PGD(to);
+
+ arm_clean_invalidate_cache();
+ arm_invalidate_tlb();
+ arm_set_ttb(virt_to_phys(pgd));
+ arm_invalidate_tlb();
+}
+
+void idle_task(void)
+{
+ printk("Idle task.\n");
+
+ while(1);
+}
+
Index: src/arch/or1k/head-smp.S.ARM
===================================================================
--- src/arch/or1k/head-smp.S.ARM (nonexistent)
+++ src/arch/or1k/head-smp.S.ARM (revision 7)
@@ -0,0 +1,114 @@
+/*
+ * Kernel Entry point for secondary cpus
+ *
+ * Copyright (C) 2010 B Labs Ltd.
+ * Author: Prem Mallappa
+ */
+
+#include INC_ARCH(asm.h)
+#include INC_PLAT(offsets.h)
+#include INC_ARCH(asm-macros.S)
+
+#define C15_C0_M 0x0001 /* MMU */
+#define C15_C0_A 0x0002 /* Alignment */
+#define C15_C0_C 0x0004 /* (D) Cache */
+#define C15_C0_W 0x0008 /* Write buffer */
+#define C15_C0_B 0x0080 /* Endianness */
+#define C15_C0_S 0x0100 /* System */
+#define C15_C0_R 0x0200 /* ROM */
+#define C15_C0_Z 0x0800 /* Branch Prediction */
+#define C15_C0_I 0x1000 /* I cache */
+#define C15_C0_V 0x2000 /* High vectors */
+
+ .section .text.head
+
+BEGIN_PROC(__smp_start)
+ msr cpsr_fxsc, #ARM_NOIRQ_SVC
+
+ /* Disable mmu if it is enabled */
+ mrc p15, 0, r0, c1, c0, 0
+ bic r0, r0, #C15_C0_M @ Disable MMU
+ bic r0, r0, #C15_C0_C @ Disable (D) Cache
+ bic r0, r0, #C15_C0_I @ Disable I cache
+ bic r0, r0, #C15_C0_W @ Disable Write buffer
+ mcr p15, 0, r0, c1, c0, 0
+
+ /* Setup boot stack (physical address) */
+
+ /*
+ * Each processor gets a unique 1024 byte stack.
+ * This stack is used until the first task becomes
+ * runnable, so there needs to be one for each core
+ *
+ * +----------+
+ * |CPU3 Stack|
+ * +----------+
+ * |CPU2 Stack|
+ * +----------+
+ * |CPU1 Stack|
+ * +----------+
+ * |CPU0 Stack|
+ * +----------+ _bootstack_physical
+ */
+ get_cpuid r0
+ mov r0, r0, lsl #12 /* 4 KB stack per-cpu */
+ ldr sp, _secondary_cpu_stack
+ sub sp, sp, r0
+
+ /*
+ * Each processor will get its own irq/fiq/abt/und/svc stack
+ * of size 16 bytes per mode. Each mode would have 64 bytes
+ * of stack used in total for 4 cores.
+ *
+ * Note, unlike SVC mode all abort modes also include the
+ * stack for primary core, i.e CPU0. There's no separation
+ * of primary and secondary stack regions.
+ *
+ * +------------------+ __abt_stack_high
+ * | CPU0 ABT Stack |
+ * +------------------+ __abt_stack_high - 0x10
+ * | CPU1 ABT Stack |
+ * +------------------+ __abt_stack_high - 0x20
+ * | CPU2 ABT Stack |
+ * +------------------+ __abt_stack_high - 0x30
+ * | CPU3 ABT Stack |
+ * +------------------+ __abt_stack_high - 0x40
+ *
+ */
+ get_cpuid r0
+ mov r0, r0, lsl #4 /* 16 byte stack for each core */
+
+ /* Exception stacks are defined in vector page */
+ msr cpsr_fcx, #ARM_NOIRQ_ABT
+ ldr sp, _sec_kern_abt_stack
+ sub sp, sp, r0
+ msr cpsr_fcx, #ARM_NOIRQ_IRQ
+ ldr sp, _sec_kern_irq_stack
+ sub sp, sp, r0
+ msr cpsr_fcx, #ARM_NOIRQ_FIQ
+ ldr sp, _sec_kern_fiq_stack
+ sub sp, sp, r0
+ msr cpsr_fcx, #ARM_NOIRQ_UND
+ ldr sp, _sec_kern_und_stack
+ sub sp, sp, r0
+ msr cpsr_fcx, #ARM_NOIRQ_SVC
+
+ /* Jump to start_kernel */
+ bl smp_secondary_init
+
+ /* Never reached */
+1:
+ b 1b
+
+_secondary_cpu_stack:
+ .word _bootstack_physical
+
+/* Exception stacks are defined in vector page */
+_sec_kern_abt_stack:
+ .word __abt_stack_high
+_sec_kern_irq_stack:
+ .word __irq_stack_high
+_sec_kern_fiq_stack:
+ .word __fiq_stack_high
+_sec_kern_und_stack:
+ .word __und_stack_high
src/arch/or1k/head-smp.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: src/arch/or1k/mapping-common.c
===================================================================
--- src/arch/or1k/mapping-common.c (nonexistent)
+++ src/arch/or1k/mapping-common.c (revision 7)
@@ -0,0 +1,418 @@
+/*
+ * Low-level page table functions that are common
+ * and abstracted between v5-v7 ARM architectures
+ *
+ * Copyright (C) 2007 - 2010 B Labs Ltd.
+ * Written by Bahadir Balban
+ */
+
+#include INC_SUBARCH(mm.h)
+#include INC_SUBARCH(mmu_ops.h)
+#include INC_GLUE(memory.h)
+#include INC_GLUE(memlayout.h)
+#include INC_ARCH(linker.h)
+#include INC_GLUE(mapping.h)
+#include
+#include
+#include
+#include
+#include
+#include
+
+/* Find out whether a pmd exists or not and return it */
+pmd_table_t *pmd_exists(pgd_table_t *task_pgd, unsigned long vaddr)
+{
+ pmd_t *pmd = arch_pick_pmd(task_pgd, vaddr);
+
+ /*
+ * Check that it has a valid pmd
+ * (i.e. not a fault, not a section)
+ */
+ if ((*pmd & PMD_TYPE_MASK) == PMD_TYPE_PMD)
+ return (pmd_table_t *)
+ phys_to_virt(*pmd & PMD_ALIGN_MASK);
+ else if ((*pmd & PMD_TYPE_MASK) == 0)
+ return 0;
+ else
+ BUG(); /* Anything that's not a pmd or fault is bug */
+ return 0;
+}
+
+/*
+ * Convert virtual address to a pte from a task-specific pgd
+ * FIXME: Remove this by using ptep version, leaving due to
+ * too many things to test right now.
+ */
+pte_t virt_to_pte_from_pgd(pgd_table_t *task_pgd,
+ unsigned long virtual)
+{
+ pmd_table_t *pmd = pmd_exists(task_pgd, virtual);
+
+ if (pmd)
+ return (pte_t)pmd->entry[PMD_INDEX(virtual)];
+ else
+ return (pte_t)0;
+}
+
+/* Convert virtual address to a pte from a task-specific pgd */
+pte_t *virt_to_ptep_from_pgd(pgd_table_t *task_pgd,
+ unsigned long virtual)
+{
+ pmd_table_t *pmd = pmd_exists(task_pgd, virtual);
+
+ if (pmd)
+ return (pte_t *)&pmd->entry[PMD_INDEX(virtual)];
+ else
+ return (pte_t *)0;
+}
+
+/*
+ * Convert a virtual address to a pte if it
+ * exists in the page tables.
+ */
+pte_t virt_to_pte(unsigned long virtual)
+{
+ return virt_to_pte_from_pgd(TASK_PGD(current), virtual);
+}
+
+pte_t *virt_to_ptep(unsigned long virtual)
+{
+ return virt_to_ptep_from_pgd(TASK_PGD(current), virtual);
+}
+
+unsigned long virt_to_phys_by_pgd(pgd_table_t *pgd, unsigned long vaddr)
+{
+ pte_t pte = virt_to_pte_from_pgd(pgd, vaddr);
+ return pte & ~PAGE_MASK;
+}
+
+static inline unsigned long
+virt_to_phys_by_task(struct ktcb *task, unsigned long vaddr)
+{
+ return virt_to_phys_by_pgd(TASK_PGD(task), vaddr);
+}
+
+/*
+ * Attaches a pmd to either a task or the global pgd
+ * depending on the virtual address passed.
+ */
+void attach_pmd(pgd_table_t *task_pgd, pmd_table_t *pmd_table,
+ unsigned long vaddr)
+{
+ u32 pmd_phys = virt_to_phys(pmd_table);
+ pmd_t *pmd;
+
+ BUG_ON(!is_aligned(pmd_phys, PMD_SIZE));
+
+ /*
+ * Pick the right pmd from the right pgd.
+ * It makes a difference if split tables are used.
+ */
+ pmd = arch_pick_pmd(task_pgd, vaddr);
+
+ /* Write the pmd into hardware pgd */
+ arch_write_pmd(pmd, pmd_phys, vaddr);
+}
+
+void add_mapping_pgd(unsigned long physical, unsigned long virtual,
+ unsigned int sz_bytes, unsigned int flags,
+ pgd_table_t *task_pgd)
+{
+ unsigned long npages = (sz_bytes >> PFN_SHIFT);
+ pmd_table_t *pmd_table;
+
+ if (sz_bytes < PAGE_SIZE) {
+ print_early("Error: Mapping size less than PAGE_SIZE. "
+ "Mapping size is in bytes not pages.\n");
+ BUG();
+ }
+
+ if (sz_bytes & PAGE_MASK)
+ npages++;
+
+ /* Convert generic map flags to arch specific flags */
+ BUG_ON(!(flags = space_flags_to_ptflags(flags)));
+
+ /* Map all pages that cover given size */
+ for (int i = 0; i < npages; i++) {
+ /* Check if a pmd was attached previously */
+ if (!(pmd_table = pmd_exists(task_pgd, virtual))) {
+
+ /* First mapping in pmd, allocate it */
+ pmd_table = alloc_pmd();
+
+ /* Prepare the pte but don't sync */
+ arch_prepare_pte(physical, virtual, flags,
+ &pmd_table->entry[PMD_INDEX(virtual)]);
+
+ /* Attach pmd to its pgd and sync it */
+ attach_pmd(task_pgd, pmd_table, virtual);
+ } else {
+ /* Prepare, write the pte and sync */
+ arch_prepare_write_pte(physical, virtual,
+ flags, &pmd_table->entry[PMD_INDEX(virtual)]);
+ }
+
+ /* Move on to the next page */
+ physical += PAGE_SIZE;
+ virtual += PAGE_SIZE;
+ }
+}
+
+void add_boot_mapping(unsigned long physical, unsigned long virtual,
+ unsigned int sz_bytes, unsigned int flags)
+{
+ unsigned long npages = (sz_bytes >> PFN_SHIFT);
+ pmd_table_t *pmd_table;
+
+ if (sz_bytes < PAGE_SIZE) {
+ print_early("Error: Mapping size less than PAGE_SIZE. "
+ "Mapping size should be in _bytes_ "
+ "not pages.\n");
+ BUG();
+ }
+
+ if (sz_bytes & PAGE_MASK)
+ npages++;
+
+ /* Convert generic map flags to arch specific flags */
+ BUG_ON(!(flags = space_flags_to_ptflags(flags)));
+
+ /* Map all pages that cover given size */
+ for (int i = 0; i < npages; i++) {
+ /* Check if a pmd was attached previously */
+ if (!(pmd_table = pmd_exists(&init_pgd, virtual))) {
+
+ /* First mapping in pmd, allocate it */
+ pmd_table = alloc_boot_pmd();
+
+ /* Prepare the pte but don't sync */
+ arch_prepare_pte(physical, virtual, flags,
+ &pmd_table->entry[PMD_INDEX(virtual)]);
+
+ /* Attach pmd to its pgd and sync it */
+ attach_pmd(&init_pgd, pmd_table, virtual);
+ } else {
+ /* Prepare, write the pte and sync */
+ arch_prepare_write_pte(physical, virtual,
+ flags, &pmd_table->entry[PMD_INDEX(virtual)]);
+ }
+
+ /* Move on to the next page */
+ physical += PAGE_SIZE;
+ virtual += PAGE_SIZE;
+ }
+}
+
+void add_mapping(unsigned long paddr, unsigned long vaddr,
+ unsigned int size, unsigned int flags)
+{
+ add_mapping_pgd(paddr, vaddr, size, flags, TASK_PGD(current));
+}
+
+/*
+ * Checks if a virtual address range has same or more permissive
+ * flags than the given ones, returns 0 if not, and 1 if OK.
+ */
+int check_mapping_pgd(unsigned long vaddr, unsigned long size,
+ unsigned int flags, pgd_table_t *pgd)
+{
+ unsigned int npages = __pfn(align_up(size, PAGE_SIZE));
+ pte_t pte;
+
+ /* Convert generic map flags to pagetable-specific */
+ BUG_ON(!(flags = space_flags_to_ptflags(flags)));
+
+ for (int i = 0; i < npages; i++) {
+ pte = virt_to_pte_from_pgd(pgd, vaddr + i * PAGE_SIZE);
+
+ /* Check if pte perms are equal or gt given flags */
+ if (arch_check_pte_access_perms(pte, flags))
+ continue;
+ else
+ return 0;
+ }
+
+ return 1;
+}
+
+int check_mapping(unsigned long vaddr, unsigned long size,
+ unsigned int flags)
+{
+ return check_mapping_pgd(vaddr, size, flags,
+ TASK_PGD(current));
+}
+
+/*
+ * This can be made common for v5/v7, keeping split/page table
+ * and cache flush parts in arch-specific files.
+ */
+int remove_mapping_pgd(pgd_table_t *task_pgd, unsigned long vaddr)
+{
+ pmd_table_t *pmd_table;
+ int pgd_i, pmd_i;
+ pmd_t *pmd;
+ unsigned int pmd_type, pte_type;
+
+ vaddr = page_align(vaddr);
+ pgd_i = PGD_INDEX(vaddr);
+ pmd_i = PMD_INDEX(vaddr);
+
+ /*
+ * Get the right pgd's pmd according to whether
+ * the address is global or task-specific.
+ */
+ pmd = arch_pick_pmd(task_pgd, vaddr);
+
+ pmd_type = *pmd & PMD_TYPE_MASK;
+
+ if (pmd_type == PMD_TYPE_FAULT)
+ return -ENOMAP;
+
+ /* Anything else must be a proper pmd */
+ BUG_ON(pmd_type != PMD_TYPE_PMD);
+
+ /* Get the 2nd level pmd table */
+ pmd_table = (pmd_table_t *)
+ phys_to_virt((unsigned long)*pmd
+ & PMD_ALIGN_MASK);
+
+ /* Get the pte type already there */
+ pte_type = pmd_table->entry[pmd_i] & PTE_TYPE_MASK;
+
+ /* If it's a fault we're done */
+ if (pte_type == PTE_TYPE_FAULT)
+ return -ENOMAP;
+ /* It must be a small pte if not fault */
+ else if (pte_type != PTE_TYPE_SMALL)
+ BUG();
+
+ /* Write to pte, also syncing it as required by arch */
+ arch_prepare_write_pte(0, vaddr,
+ space_flags_to_ptflags(MAP_FAULT),
+ (pte_t *)&pmd_table->entry[pmd_i]);
+ return 0;
+}
+
+int remove_mapping(unsigned long vaddr)
+{
+ return remove_mapping_pgd(TASK_PGD(current), vaddr);
+}
+
+
+int delete_page_tables(struct address_space *space)
+{
+ remove_mapping_pgd_all_user(space->pgd);
+ free_pgd(space->pgd);
+ return 0;
+}
+
+/*
+ * Copies userspace entries of one task to another.
+ * In order to do that, it allocates new pmds and
+ * copies the original values into new ones.
+ */
+int copy_user_tables(struct address_space *new,
+ struct address_space *orig_space)
+{
+ pgd_table_t *to = new->pgd, *from = orig_space->pgd;
+ pmd_table_t *pmd, *orig;
+
+ /* Allocate and copy all pmds that will be exclusive to new task. */
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
+ /* Detect a pmd entry that is not a global pmd? */
+ if (!is_global_pgdi(i) &&
+ ((from->entry[i] & PMD_TYPE_MASK)
+ == PMD_TYPE_PMD)) {
+ /* Allocate new pmd */
+ if (!(pmd = alloc_pmd()))
+ goto out_error;
+
+ /* Find original pmd */
+ orig = (pmd_table_t *)
+ phys_to_virt((from->entry[i] &
+ PMD_ALIGN_MASK));
+
+ /* Copy original to new */
+ memcpy(pmd, orig, sizeof(pmd_table_t));
+
+ /* Replace original pmd entry in pgd with new */
+ to->entry[i] = (pmd_t)(virt_to_phys(pmd)
+ | PMD_TYPE_PMD);
+ }
+ }
+
+ /* Just in case the new table is written to any ttbr
+ * after here, make sure all writes on it are complete. */
+ dmb();
+
+ return 0;
+
+out_error:
+ /* Find all non-kernel pmds we have just allocated and free them */
+ for (int i = 0; i < PGD_ENTRY_TOTAL; i++) {
+ /* Non-kernel pmd that has just been allocated. */
+ if (!is_global_pgdi(i) &&
+ (to->entry[i] & PMD_TYPE_MASK) == PMD_TYPE_PMD) {
+ /* Obtain the pmd handle */
+ pmd = (pmd_table_t *)
+ phys_to_virt((to->entry[i] &
+ PMD_ALIGN_MASK));
+ /* Free pmd */
+ free_pmd(pmd);
+ }
+ }
+ return -ENOMEM;
+}
+
+
+
+/*
+ * Useful for upgrading to page-grained control
+ * over the kernel section mapping.
+ *
+ * Remaps a section mapping in pages. It allocates a pmd,
+ * fills in the page information, and replaces the direct
+ * section physical translation with the address of the
+ * pmd. Syncs the caches.
+ *
+ * NOTE: Assumes only a single pmd is enough.
+ */
+void remap_as_pages(void *vstart, void *vend)
+{
+ unsigned long pstart = virt_to_phys(vstart);
+ unsigned long pend = virt_to_phys(vend);
+ unsigned long paddr = pstart;
+ unsigned long vaddr = (unsigned long)vstart;
+ int pmd_i = PMD_INDEX(vstart);
+ pgd_table_t *pgd = &init_pgd;
+ pmd_table_t *pmd = alloc_boot_pmd();
+ int npages = __pfn(pend - pstart);
+ int map_flags;
+
+ /* Map the whole kernel into the pmd first */
+ for (int n = 0; n < npages; n++) {
+ /* Map text pages as executable */
+ if ((vaddr >= (unsigned long)_start_text &&
+ vaddr < page_align_up(_end_text)) ||
+ (vaddr >= (unsigned long)_start_vectors &&
+ vaddr < page_align_up(_end_vectors)))
+ map_flags = MAP_KERN_RWX;
+ else
+ map_flags = MAP_KERN_RW;
+
+ arch_prepare_pte(paddr, vaddr,
+ space_flags_to_ptflags(map_flags),
+ &pmd->entry[pmd_i + n]);
+ paddr += PAGE_SIZE;
+ vaddr += PAGE_SIZE;
+ }
+
+ attach_pmd(pgd, pmd, (unsigned long)vstart);
+
+ printk("%s: Kernel area 0x%lx - 0x%lx "
+ "remapped as %d pages\n", __KERNELNAME__,
+ (unsigned long)vstart, (unsigned long)vend,
+ npages);
+}
+
Index: src/arch/or1k/exception-common.c
===================================================================
--- src/arch/or1k/exception-common.c (nonexistent)
+++ src/arch/or1k/exception-common.c (revision 7)
@@ -0,0 +1,334 @@
+/*
+ * Common exception handling code
+ *
+ * Copyright (C) 2008 - 2010 B Labs Ltd.
+ * Written by Bahadir Balban
+ */
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include INC_ARCH(exception.h)
+#include INC_GLUE(memlayout.h)
+#include INC_GLUE(memory.h)
+#include INC_GLUE(mapping.h)
+#include INC_GLUE(message.h)
+#include INC_GLUE(ipc.h)
+#include INC_SUBARCH(mm.h)
+
+
+void abort_die(void)
+{
+ disable_irqs();
+ print_early("Unhandled kernel abort.\n");
+ print_early("Kernel panic.\n");
+ print_early("Halting system...\n");
+ while (1)
+ ;
+}
+
+struct ipc_state {
+ u32 mr[MR_TOTAL];
+ unsigned int flags;
+};
+
+void ipc_save_state(struct ipc_state *state)
+{
+ unsigned int *mr0_current = KTCB_REF_MR0(current);
+
+ BUG_ON(!mr0_current);
+
+ /* Save primary message registers */
+ for (int i = 0; i < MR_TOTAL; i++)
+ state->mr[i] = mr0_current[i];
+
+ /* Save ipc flags */
+ state->flags = tcb_get_ipc_flags(current);
+}
+
+void ipc_restore_state(struct ipc_state *state)
+{
+ unsigned int *mr0_current = KTCB_REF_MR0(current);
+
+ BUG_ON(!mr0_current);
+
+ /* Restore primary message registers */
+ for (int i = 0; i < MR_TOTAL; i++)
+ mr0_current[i] = state->mr[i];
+
+ /* Restore ipc flags */
+ tcb_set_ipc_flags(current, state->flags);
+}
+
+/* Send data fault ipc to the faulty task's pager */
+int __attribute__((optimize("O0")))
+fault_ipc_to_pager(u32 faulty_pc, u32 fsr, u32 far, u32 ipc_tag)
+{
+ int err;
+
+ /* mr[0] has the fault tag. The rest is the fault structure */
+ u32 mr[MR_TOTAL] = {
+ [MR_TAG] = ipc_tag,
+ [MR_SENDER] = current->tid
+ };
+
+ fault_kdata_t *fault = (fault_kdata_t *)&mr[MR_UNUSED_START];
+
+ /* Fill in fault information to pass over during ipc */
+ fault->faulty_pc = faulty_pc;
+ fault->fsr = fsr;
+ fault->far = far;
+
+ /*
+ * Write pte of the abort address,
+ * which is different on pabt/dabt
+ */
+ if (is_prefetch_abort(fsr))
+ fault->pte = virt_to_pte(faulty_pc);
+ else
+ fault->pte = virt_to_pte(far);
+
+ /*
+ * System calls save arguments (and message registers)
+ * on the kernel stack. They are then referenced from
+ * the caller's ktcb. Here, we forge a fault structure
+ * as if an ipc syscall has occured. Then the reference
+ * to the fault structure is set in the ktcb such that
+ * it lies on the mr0 offset when referred as the syscall
+ * context.
+ */
+
+ /*
+ * Assign fault such that it overlaps
+ * as the MR0 reference in ktcb.
+ */
+ current->syscall_regs = (syscall_context_t *)
+ ((unsigned long)&mr[0] -
+ offsetof(syscall_context_t, r3));
+
+ /* Set current flags to short ipc */
+ tcb_set_ipc_flags(current, IPC_FLAGS_SHORT);
+
+ /* Detect if a pager is self-faulting */
+ if (current->tid == current->pagerid) {
+ printk("Pager (%d) faulted on itself. "
+ "FSR: 0x%x, FAR: 0x%x, PC: 0x%x pte: 0x%x CPU%d Exiting.\n",
+ current->tid, fault->fsr, fault->far,
+ fault->faulty_pc, fault->pte, smp_get_cpuid());
+ thread_destroy(current);
+ }
+
+ /* Send ipc to the task's pager */
+ if ((err = ipc_sendrecv(current->pagerid,
+ current->pagerid, 0)) < 0) {
+ BUG_ON(current->nlocks);
+
+ /* Return on interrupt */
+ if (err == -EINTR) {
+ printk("Thread (%d) page-faulted "
+ "and got interrupted by its pager.\n",
+ current->tid);
+ return err;
+ } else { /* Suspend on any other error */
+ printk("Thread (%d) faulted in kernel "
+ "and an error occured during "
+ "page-fault ipc. err=%d. "
+ "Suspending task.\n",
+ current->tid, err);
+ current->flags |= TASK_SUSPENDING;
+ sched_suspend_sync();
+ }
+ }
+ return 0;
+}
+
+/*
+ * When a task calls the kernel and the supplied user buffer is
+ * not mapped, the kernel generates a page fault to the task's
+ * pager so that the pager can make the decision on mapping the
+ * buffer. Remember that if a task maps its own user buffer to
+ * itself this way, the kernel can access it, since it shares
+ * that task's page table.
+ */
+int pager_pagein_request(unsigned long addr, unsigned long size,
+ unsigned int flags)
+{
+ int err;
+ u32 abort = 0;
+ unsigned long npages = __pfn(align_up(size, PAGE_SIZE));
+ struct ipc_state ipc_state;
+
+ set_abort_type(abort, ABORT_TYPE_DATA);
+
+ /* Save current ipc state */
+ ipc_save_state(&ipc_state);
+
+ /* For every page to be used by the
+ * kernel send a page-in request */
+ for (int i = 0; i < npages; i++)
+ if ((err = fault_ipc_to_pager(0, abort,
+ addr + (i * PAGE_SIZE),
+ L4_IPC_TAG_PFAULT)) < 0)
+ return err;
+
+ /* Restore ipc state */
+ ipc_restore_state(&ipc_state);
+
+ return 0;
+}
+
+/*
+ * @r0: The address where the program counter was during the fault.
+ * @r1: Contains the fault status register
+ * @r2: Contains the fault address register
+ */
+void data_abort_handler(u32 faulted_pc, u32 dfsr, u32 dfar, u32 spsr)
+{
+ int ret;
+
+ system_account_dabort();
+
+ /* Indicate abort type on dfsr */
+ set_abort_type(dfsr, ABORT_TYPE_DATA);
+
+ dbg_abort("Data abort PC:0x%x, FAR: 0x%x, FSR: 0x%x, CPU%d\n",
+ faulted_pc, dfar, dfsr, smp_get_cpuid());
+
+ /*
+ * Check abort type and tell
+ * if it's an irrecoverable fault
+ */
+ if ((ret = check_abort_type(faulted_pc, dfsr, dfar, spsr)) < 0)
+ goto die; /* Die if irrecoverable */
+ else if (ret == ABORT_HANDLED)
+ return;
+
+ /* Notify the pager */
+ fault_ipc_to_pager(faulted_pc, dfsr, dfar, L4_IPC_TAG_PFAULT);
+
+ /*
+ * FIXME:
+ * Check return value of pager, and also make a record of
+ * the fault that has occured. We ought to expect progress
+ * from the pager. If the same fault is occuring a number
+ * of times consecutively, we might want to kill the pager.
+ */
+
+ /* See if current task has various flags set by its pager */
+ if (current->flags & TASK_SUSPENDING) {
+ BUG_ON(current->nlocks);
+ sched_suspend_sync();
+ }
+
+ return;
+die:
+ dprintk("FAR:", dfar);
+ dprintk("PC:", faulted_pc);
+ abort_die();
+}
+
+void prefetch_abort_handler(u32 faulted_pc, u32 ifsr, u32 ifar, u32 spsr)
+{
+ int ret;
+
+ system_account_pabort();
+
+ /* Indicate abort type on dfsr */
+ set_abort_type(ifsr, ABORT_TYPE_PREFETCH);
+
+ dbg_abort("Prefetch abort PC:0x%x, FAR: 0x%x, FSR: 0x%x, CPU%d\n",
+ faulted_pc, ifar, ifsr, smp_get_cpuid());
+
+ /*
+ * Check abort type and tell
+ * if it's an irrecoverable fault
+ */
+
+ if ((ret = check_abort_type(0, ifsr, ifar, spsr)) < 0)
+ goto die; /* Die if irrecoverable */
+ else if (ret == ABORT_HANDLED)
+ return; /* Return if handled internally */
+
+ /* Notify the pager */
+ fault_ipc_to_pager(faulted_pc, ifsr, ifar, L4_IPC_TAG_PFAULT);
+
+ /*
+ * FIXME:
+ * Check return value of pager, and also make a record of
+ * the fault that has occured. We ought to expect progress
+ * from the pager. If the same fault is occuring a number
+ * of times consecutively, we might want to kill the pager.
+ */
+
+ /* See if current task has various flags set by its pager */
+ if (current->flags & TASK_SUSPENDING) {
+ BUG_ON(current->nlocks);
+ sched_suspend_sync();
+ }
+
+ return;
+die:
+ dprintk("FAR:", ifar);
+ abort_die();
+
+}
+
+void undefined_instr_handler(u32 undefined_address, u32 spsr, u32 lr)
+{
+ dbg_abort("Undefined instruction. PC:0x%x", undefined_address);
+
+ system_account_undef_abort();
+
+ fault_ipc_to_pager(undefined_address, 0, undefined_address,
+ L4_IPC_TAG_UNDEF_FAULT);
+
+ if (!is_user_mode(spsr)) {
+ dprintk("Undefined instruction occured in "
+ "non-user mode. addr=", undefined_address);
+ goto die;
+ }
+
+ /* See if current task has various flags set by its pager */
+ if (current->flags & TASK_SUSPENDING) {
+ BUG_ON(current->nlocks);
+ sched_suspend_sync();
+ }
+
+ return;
+
+die:
+ abort_die();
+}
+
+extern int current_irq_nest_count;
+
+/*
+ * This is called right where the nest count is increased
+ * in case the nesting is beyond the predefined max limit.
+ * It is another matter whether this limit is enough to
+ * guarantee the kernel stack is not overflown.
+ *
+ * FIXME: Take measures to recover. (E.g. disable irqs etc)
+ *
+ * Note that this is called in irq context, and it *also*
+ * thrashes the designated irq stack which is only 12 bytes.
+ *
+ * It really is assumed the system has come to a halt when
+ * this happens.
+ */
+void irq_overnest_error(void)
+{
+ printk("Irqs nested beyond limit. Current count: %d",
+ current_irq_nest_count);
+ print_early("System halted...\n");
+ while(1)
+ ;
+}
+
Index: src/arch/or1k/linker.c
===================================================================
--- src/arch/or1k/linker.c (nonexistent)
+++ src/arch/or1k/linker.c (revision 7)
@@ -0,0 +1,12 @@
+/*
+ * Any link-related marking variable that gets updated at runtime is listed here
+ *
+ * Copyright (C) 2007 Bahadir Balban
+ */
+
+/* The first free address after the last image loaded in physical memory */
+unsigned long __svc_images_end;
+
+/* The new boundaries of page tables after they're relocated */
+unsigned long __pt_start;
+unsigned long __pt_end;
Index: src/arch/or1k/SConscript
===================================================================
--- src/arch/or1k/SConscript (nonexistent)
+++ src/arch/or1k/SConscript (revision 7)
@@ -0,0 +1,23 @@
+# Inherit global environment
+import os, sys, glob
+
+PROJRELROOT = '../../'
+
+sys.path.append(PROJRELROOT)
+
+from config.projpaths import *
+from configure import *
+
+Import('env', 'symbols')
+
+
+# The set of source files associated with this SConscript file.
+src_local = ['head.S', 'vectors.S', 'syscall.S', 'exception-common.c', 'mapping-common.c', 'memset.S', 'memcpy.S']
+
+for name, val in symbols:
+ if 'CONFIG_SMP' == name:
+ src_local += ['head-smp.S']
+
+obj = env.Object(src_local)
+
+Return('obj')
Index: src/arch/or1k
===================================================================
--- src/arch/or1k (nonexistent)
+++ src/arch/or1k (revision 7)
src/arch/or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: loader/libs/c/src/sys-baremetal/arch-or1k/sys_fputc.c
===================================================================
--- loader/libs/c/src/sys-baremetal/arch-or1k/sys_fputc.c (nonexistent)
+++ loader/libs/c/src/sys-baremetal/arch-or1k/sys_fputc.c (revision 7)
@@ -0,0 +1,16 @@
+/*
+ * Ties up platform's uart driver functions with printf
+ *
+ * Copyright (C) 2009 B Labs Ltd.
+ */
+#include
+#include
+#include
+
+int __fputc(int c, FILE *stream)
+{
+ uart_tx_char(uart_print_base, c);
+
+ return 0;
+}
+
Index: loader/libs/c/src/sys-baremetal/arch-or1k/sys_stdio.c
===================================================================
--- loader/libs/c/src/sys-baremetal/arch-or1k/sys_stdio.c (nonexistent)
+++ loader/libs/c/src/sys-baremetal/arch-or1k/sys_stdio.c (revision 7)
@@ -0,0 +1,67 @@
+#include
+#include
+
+extern int __fputc(int c, FILE *stream);
+
+static int ser_out(int c)
+{
+ __fputc(c, 0);
+ if (c == '\n')
+ ser_out('\r');
+ return 0;
+}
+
+static size_t
+l4kdb_write(void *data, long int position, size_t count, void *handle /*unused*/)
+{
+ size_t i;
+ char *real_data = data;
+ for (i = 0; i < count; i++)
+ ser_out(real_data[i]);
+ return count;
+}
+
+struct __file __stdin = {
+ .handle = NULL,
+ .read_fn = NULL,
+ .write_fn = NULL,
+ .close_fn = NULL,
+ .eof_fn = NULL,
+ .buffering_mode = _IONBF,
+ .buffer = NULL,
+ .unget_pos = 0,
+ .current_pos = 0,
+ .eof = 0
+};
+
+
+struct __file __stdout = {
+ .handle = NULL,
+ .read_fn = NULL,
+ .write_fn = l4kdb_write,
+ .close_fn = NULL,
+ .eof_fn = NULL,
+ .buffering_mode = _IONBF,
+ .buffer = NULL,
+ .unget_pos = 0,
+ .current_pos = 0,
+ .eof = 0
+};
+
+
+struct __file __stderr = {
+ .handle = NULL,
+ .read_fn = NULL,
+ .write_fn = l4kdb_write,
+ .close_fn = NULL,
+ .eof_fn = NULL,
+ .buffering_mode = _IONBF,
+ .buffer = NULL,
+ .unget_pos = 0,
+ .current_pos = 0,
+ .eof = 0
+};
+
+FILE *stdin = &__stdin;
+FILE *stdout = &__stdout;
+FILE *stderr = &__stderr;
Index: loader/libs/c/src/sys-baremetal/arch-or1k
===================================================================
--- loader/libs/c/src/sys-baremetal/arch-or1k (nonexistent)
+++ loader/libs/c/src/sys-baremetal/arch-or1k (revision 7)
loader/libs/c/src/sys-baremetal/arch-or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: loader/libs/c/crt/sys-userspace/arch-or1k/crt0.S.ARM
===================================================================
--- loader/libs/c/crt/sys-userspace/arch-or1k/crt0.S.ARM (nonexistent)
+++ loader/libs/c/crt/sys-userspace/arch-or1k/crt0.S.ARM (revision 7)
@@ -0,0 +1,93 @@
+/*
+ * Australian Public Licence B (OZPLB)
+ *
+ * Version 1-0
+ *
+ * Copyright (c) 2004 National ICT Australia
+ *
+ * All rights reserved.
+ *
+ * Developed by: Embedded, Real-time and Operating Systems Program (ERTOS)
+ * National ICT Australia
+ * http://www.ertos.nicta.com.au
+ *
+ * Permission is granted by National ICT Australia, free of charge, to
+ * any person obtaining a copy of this software and any associated
+ * documentation files (the "Software") to deal with the Software without
+ * restriction, including (without limitation) the rights to use, copy,
+ * modify, adapt, merge, publish, distribute, communicate to the public,
+ * sublicense, and/or sell, lend or rent out copies of the Software, and
+ * to permit persons to whom the Software is furnished to do so, subject
+ * to the following conditions:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimers.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimers in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of National ICT Australia, nor the names of its
+ * contributors, may be used to endorse or promote products derived
+ * from this Software without specific prior written permission.
+ *
+ * EXCEPT AS EXPRESSLY STATED IN THIS LICENCE AND TO THE FULL EXTENT
+ * PERMITTED BY APPLICABLE LAW, THE SOFTWARE IS PROVIDED "AS-IS", AND
+ * NATIONAL ICT AUSTRALIA AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS,
+ * WARRANTIES OR CONDITIONS OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO ANY REPRESENTATIONS, WARRANTIES OR CONDITIONS
+ * REGARDING THE CONTENTS OR ACCURACY OF THE SOFTWARE, OR OF TITLE,
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT,
+ * THE ABSENCE OF LATENT OR OTHER DEFECTS, OR THE PRESENCE OR ABSENCE OF
+ * ERRORS, WHETHER OR NOT DISCOVERABLE.
+ *
+ * TO THE FULL EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL
+ * NATIONAL ICT AUSTRALIA OR ITS CONTRIBUTORS BE LIABLE ON ANY LEGAL
+ * THEORY (INCLUDING, WITHOUT LIMITATION, IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHERWISE) FOR ANY CLAIM, LOSS, DAMAGES OR OTHER
+ * LIABILITY, INCLUDING (WITHOUT LIMITATION) LOSS OF PRODUCTION OR
+ * OPERATION TIME, LOSS, DAMAGE OR CORRUPTION OF DATA OR RECORDS; OR LOSS
+ * OF ANTICIPATED SAVINGS, OPPORTUNITY, REVENUE, PROFIT OR GOODWILL, OR
+ * OTHER ECONOMIC LOSS; OR ANY SPECIAL, INCIDENTAL, INDIRECT,
+ * CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES, ARISING OUT OF OR IN
+ * CONNECTION WITH THIS LICENCE, THE SOFTWARE OR THE USE OF OR OTHER
+ * DEALINGS WITH THE SOFTWARE, EVEN IF NATIONAL ICT AUSTRALIA OR ITS
+ * CONTRIBUTORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH CLAIM, LOSS,
+ * DAMAGES OR OTHER LIABILITY.
+ *
+ * If applicable legislation implies representations, warranties, or
+ * conditions, or imposes obligations or liability on National ICT
+ * Australia or one of its contributors in respect of the Software that
+ * cannot be wholly or partly excluded, restricted or modified, the
+ * liability of National ICT Australia or the contributor is limited, to
+ * the full extent permitted by the applicable legislation, at its
+ * option, to:
+ * a. in the case of goods, any one or more of the following:
+ * i. the replacement of the goods or the supply of equivalent goods;
+ * ii. the repair of the goods;
+ * iii. the payment of the cost of replacing the goods or of acquiring
+ * equivalent goods;
+ * iv. the payment of the cost of having the goods repaired; or
+ * b. in the case of services:
+ * i. the supplying of the services again; or
+ * ii. the payment of the cost of having the services supplied again.
+ *
+ * The construction, validity and performance of this licence is governed
+ * by the laws in force in New South Wales, Australia.
+ */
+
+#ifdef __thumb__
+#define bl blx
+#endif
+
+ .code 32
+ .global _start;
+ .align;
+_start:
+ ldr sp, =__stack
+ bl platform_init
+ bl __container_init
+1:
+ b 1b
+
loader/libs/c/crt/sys-userspace/arch-or1k/crt0.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: loader/libs/c/crt/sys-userspace/arch-or1k
===================================================================
--- loader/libs/c/crt/sys-userspace/arch-or1k (nonexistent)
+++ loader/libs/c/crt/sys-userspace/arch-or1k (revision 7)
loader/libs/c/crt/sys-userspace/arch-or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: loader/libs/c/crt/sys-baremetal/arch-or1k/crt0.S.ARM
===================================================================
--- loader/libs/c/crt/sys-baremetal/arch-or1k/crt0.S.ARM (nonexistent)
+++ loader/libs/c/crt/sys-baremetal/arch-or1k/crt0.S.ARM (revision 7)
@@ -0,0 +1,116 @@
+/*
+ * Australian Public Licence B (OZPLB)
+ *
+ * Version 1-0
+ *
+ * Copyright (c) 2004 National ICT Australia
+ *
+ * All rights reserved.
+ *
+ * Developed by: Embedded, Real-time and Operating Systems Program (ERTOS)
+ * National ICT Australia
+ * http://www.ertos.nicta.com.au
+ *
+ * Permission is granted by National ICT Australia, free of charge, to
+ * any person obtaining a copy of this software and any associated
+ * documentation files (the "Software") to deal with the Software without
+ * restriction, including (without limitation) the rights to use, copy,
+ * modify, adapt, merge, publish, distribute, communicate to the public,
+ * sublicense, and/or sell, lend or rent out copies of the Software, and
+ * to permit persons to whom the Software is furnished to do so, subject
+ * to the following conditions:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimers.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimers in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of National ICT Australia, nor the names of its
+ * contributors, may be used to endorse or promote products derived
+ * from this Software without specific prior written permission.
+ *
+ * EXCEPT AS EXPRESSLY STATED IN THIS LICENCE AND TO THE FULL EXTENT
+ * PERMITTED BY APPLICABLE LAW, THE SOFTWARE IS PROVIDED "AS-IS", AND
+ * NATIONAL ICT AUSTRALIA AND ITS CONTRIBUTORS MAKE NO REPRESENTATIONS,
+ * WARRANTIES OR CONDITIONS OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+ * BUT NOT LIMITED TO ANY REPRESENTATIONS, WARRANTIES OR CONDITIONS
+ * REGARDING THE CONTENTS OR ACCURACY OF THE SOFTWARE, OR OF TITLE,
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT,
+ * THE ABSENCE OF LATENT OR OTHER DEFECTS, OR THE PRESENCE OR ABSENCE OF
+ * ERRORS, WHETHER OR NOT DISCOVERABLE.
+ *
+ * TO THE FULL EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT SHALL
+ * NATIONAL ICT AUSTRALIA OR ITS CONTRIBUTORS BE LIABLE ON ANY LEGAL
+ * THEORY (INCLUDING, WITHOUT LIMITATION, IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHERWISE) FOR ANY CLAIM, LOSS, DAMAGES OR OTHER
+ * LIABILITY, INCLUDING (WITHOUT LIMITATION) LOSS OF PRODUCTION OR
+ * OPERATION TIME, LOSS, DAMAGE OR CORRUPTION OF DATA OR RECORDS; OR LOSS
+ * OF ANTICIPATED SAVINGS, OPPORTUNITY, REVENUE, PROFIT OR GOODWILL, OR
+ * OTHER ECONOMIC LOSS; OR ANY SPECIAL, INCIDENTAL, INDIRECT,
+ * CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES, ARISING OUT OF OR IN
+ * CONNECTION WITH THIS LICENCE, THE SOFTWARE OR THE USE OF OR OTHER
+ * DEALINGS WITH THE SOFTWARE, EVEN IF NATIONAL ICT AUSTRALIA OR ITS
+ * CONTRIBUTORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH CLAIM, LOSS,
+ * DAMAGES OR OTHER LIABILITY.
+ *
+ * If applicable legislation implies representations, warranties, or
+ * conditions, or imposes obligations or liability on National ICT
+ * Australia or one of its contributors in respect of the Software that
+ * cannot be wholly or partly excluded, restricted or modified, the
+ * liability of National ICT Australia or the contributor is limited, to
+ * the full extent permitted by the applicable legislation, at its
+ * option, to:
+ * a. in the case of goods, any one or more of the following:
+ * i. the replacement of the goods or the supply of equivalent goods;
+ * ii. the repair of the goods;
+ * iii. the payment of the cost of replacing the goods or of acquiring
+ * equivalent goods;
+ * iv. the payment of the cost of having the goods repaired; or
+ * b. in the case of services:
+ * i. the supplying of the services again; or
+ * ii. the payment of the cost of having the services supplied again.
+ *
+ * The construction, validity and performance of this licence is governed
+ * by the laws in force in New South Wales, Australia.
+ */
+#include INC_PLAT(offsets.h)
+#include INC_ARCH(scu.h)
+#include INC_ARCH(asm.h)
+#include INC_ARCH(asm-macros.S)
+
+
+ .section .text
+ .code 32
+ .global _start;
+ .align;
+_start:
+ ldr sp, 1f
+
+#if defined(CONFIG_SMP)
+ /* In case all cores start executing at _start */
+ get_cpuid r0
+ teq r0, #0
+ beq core0
+wfiloop: /* Secondary cores wait here */
+ mov r0, #0x10000000 /* System Controller base */
+ orr r0, r0, #0x30
+ ldr r1, [r0]
+ teq r1, #0
+ wfeeq
+ beq wfiloop
+ mov pc, r1 /* Jump to the address specified */
+#endif
+
+core0:
+ bl platform_init
+ bl main
+1: .word _stack_top
+
+ .bss
+ .align
+_stack:
+ .space 1024
+_stack_top:
loader/libs/c/crt/sys-baremetal/arch-or1k/crt0.S.ARM
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##
Index: loader/libs/c/crt/sys-baremetal/arch-or1k
===================================================================
--- loader/libs/c/crt/sys-baremetal/arch-or1k (nonexistent)
+++ loader/libs/c/crt/sys-baremetal/arch-or1k (revision 7)
loader/libs/c/crt/sys-baremetal/arch-or1k
Property changes :
Added: svn:mergeinfo
## -0,0 +0,0 ##