OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /
    from Rev 1716 to Rev 1717
    Reverse comparison

Rev 1716 → Rev 1717

/trunk/or1ksim/sim-config.h
38,23 → 38,6
} tick;
struct {
int enabled; /* Whether IMMU is enabled */
int nways; /* Number of ITLB ways */
int nsets; /* Number of ITLB sets */
int pagesize; /* ITLB page size */
int pagesize_log2; /* ITLB page size (log2(pagesize)) */
oraddr_t page_offset_mask; /* Address mask to get page offset */
oraddr_t page_mask; /* Page number mask (diff. from vpn) */
oraddr_t vpn_mask; /* Address mask to get vpn */
int lru_reload; /* What to reload the lru value to */
oraddr_t set_mask; /* Mask to get set of an address */
int entrysize; /* ITLB entry size */
int ustates; /* number of ITLB usage states */
int missdelay; /* How much cycles does the miss cost */
int hitdelay; /* How much cycles does the hit cost */
} immu;
struct {
int enabled; /* Whether DMMU is enabled */
int nways; /* Number of DTLB ways */
int nsets; /* Number of DTLB sets */
/trunk/or1ksim/cpu/or32/op_support.c
104,7 → 104,7
/* Wrapper around analysis() that contains all the recompiler specific stuff */
void op_support_analysis(void)
{
oraddr_t off = (cpu_state.pc & config.immu.page_offset_mask) >> 2;
oraddr_t off = (cpu_state.pc & immu_state->page_offset_mask) >> 2;
runtime.cpu.instructions++;
cpu_state.iqueue.insn_index = cpu_state.curr_page->insn_indexs[off];
cpu_state.iqueue.insn = cpu_state.curr_page->insns[off];
/trunk/or1ksim/cpu/or32/op.c
38,6 → 38,7
#include "execute.h"
#include "sprs.h"
#include "sched.h"
#include "immu.h"
 
#include "op_support.h"
 
94,7 → 95,7
{
int reg;
 
pc = (pc & config.immu.page_offset_mask) / 4;
pc = (pc & immu_state->page_offset_mask) / 4;
reg = env->curr_page->ts_bound[pc];
 
if(reg & 0x1f)
137,7 → 138,7
uint32_t t0_reg = t0, t1_reg = t1, t2_reg = t2;
struct cpu_state *cpu_reg = env;
 
addr &= config.immu.pagesize - 1;
addr &= immu_state->page_offset_mask;
addr >>= 2;
 
if(addr)
/trunk/or1ksim/cpu/or32/dyn_rec.c
126,7 → 126,7
sigsegv_state++;
case 1:
/* Run through the recompiled pages, dumping them to disk as we go */
for(i = 0; i < (2 << (32 - config.dmmu.pagesize_log2)); i++) {
for(i = 0; i < (2 << (32 - immu_state->pagesize_log2)); i++) {
dp = cpu_state.dyn_pages[i];
if(!dp)
continue;
158,19 → 158,18
struct dyn_page *dp = malloc(sizeof(struct dyn_page));
dp->or_page = IADDR_PAGE(page);
 
dp->locs = malloc(sizeof(void *) * (config.immu.pagesize / 4));
dp->locs = malloc(sizeof(void *) * (immu_state->pagesize / 4));
 
dp->host_len = 0;
dp->host_page = NULL;
dp->dirty = 1;
 
cpu_state.dyn_pages[dp->or_page >> config.immu.pagesize_log2] = dp;
 
if(do_stats) {
dp->insns = malloc(config.immu.pagesize);
dp->insn_indexs = malloc(sizeof(unsigned int) * (config.immu.pagesize / 4));
dp->insns = malloc(immu_state->pagesize);
dp->insn_indexs = malloc(sizeof(unsigned int) * (immu_state->pagesize / 4));
}
 
cpu_state.dyn_pages[dp->or_page >> immu_state->pagesize_log2] = dp;
return dp;
}
 
193,7 → 192,7
* now it will produce wrong results */
runtime.sim.mem_cycles = 0;
 
target_dp = cpu_state.dyn_pages[phys_page >> config.immu.pagesize_log2];
target_dp = cpu_state.dyn_pages[phys_page >> immu_state->pagesize_log2];
 
if(!target_dp)
target_dp = new_dp(phys_page);
213,7 → 212,7
cpu_state.cycles_dec = target_dp->delayr;
if(cpu_state.sprs[SPR_SR] & SPR_SR_IME)
/* Add the mmu hit delay to the cycle counter */
cpu_state.cycles_dec -= config.immu.hitdelay;
cpu_state.cycles_dec -= immu_state->hitdelay;
 
/* FIXME: ebp, ebx, esi and edi are expected to be preserved across function
* calls but the recompiled code trashes them... */
230,7 → 229,7
/* Only update the cycle decrementer if the mmu got enabled or disabled */
if(got_en_dis == IMMU_GOT_ENABLED)
/* Add the mmu hit delay to the cycle counter */
cpu_state.cycles_dec = cpu_state.curr_page->delayr - config.immu.hitdelay;
cpu_state.cycles_dec = cpu_state.curr_page->delayr - immu_state->hitdelay;
else if(got_en_dis == IMMU_GOT_DISABLED)
cpu_state.cycles_dec = cpu_state.curr_page->delayr;
}
255,7 → 254,7
/* Runs the scheduler. Called from except_handler (and dirtyfy_page below) */
void run_sched_out_of_line(void)
{
oraddr_t off = (cpu_state.pc & config.immu.page_offset_mask) >> 2;
oraddr_t off = (cpu_state.pc & immu_state->page_offset_mask) >> 2;
 
if(do_stats) {
cpu_state.iqueue.insn_addr = cpu_state.pc;
302,7 → 301,7
void dyn_checkwrite(oraddr_t addr)
{
/* FIXME: Do this with mprotect() */
struct dyn_page *dp = cpu_state.dyn_pages[addr >> config.immu.pagesize_log2];
struct dyn_page *dp = cpu_state.dyn_pages[addr >> immu_state->pagesize_log2];
 
/* Since the locations 0x0-0xff are nearly always written to in an exception
* handler, ignore any writes to these locations. If code ends up jumping
463,7 → 462,7
cpu_state.opqs = NULL;
 
/* Allocate the operation queue list (+1 for the page chaining) */
for(i = 0; i < (config.immu.pagesize / 4) + 1; i++) {
for(i = 0; i < (immu_state->pagesize / 4) + 1; i++) {
if(!(opq = malloc(sizeof(struct op_queue)))) {
fprintf(stderr, "OOM\n");
exit(1);
487,12 → 486,12
 
cpu_state.curr_page = NULL;
if(!(cpu_state.dyn_pages = malloc(sizeof(void *) * (2 << (32 -
config.immu.pagesize_log2))))) {
immu_state->pagesize_log2))))) {
fprintf(stderr, "OOM\n");
exit(1);
}
memset(cpu_state.dyn_pages, 0,
sizeof(void *) * (2 << (32 - config.immu.pagesize_log2)));
sizeof(void *) * (2 << (32 - immu_state->pagesize_log2)));
 
/* Register our segmentation fault handler */
sigact.sa_sigaction = dyn_sigsegv_debug;
682,7 → 681,7
void **loc;
 
/* The start of the next page */
rec_page += config.immu.pagesize;
rec_page += immu_state->pagesize;
 
printf("Recompileing page %"PRIxADDR"\n", rec_addr);
fflush(stdout);
762,7 → 761,7
gen_code(cpu_state.opqs, dyn);
 
/* Fix up the locations */
for(loc = dyn->locs; loc < &dyn->locs[config.immu.pagesize / 4]; loc++)
for(loc = dyn->locs; loc < &dyn->locs[immu_state->pagesize / 4]; loc++)
*loc += (unsigned int)dyn->host_page;
 
cpu_state.opqs->ops_param[0] += (unsigned int)dyn->host_page;
769,7 → 768,7
 
/* Search for page-local jumps */
opq = cpu_state.opqs;
for(j = 0; j < (config.immu.pagesize / 4); opq = opq->next, j++) {
for(j = 0; j < (immu_state->pagesize / 4); opq = opq->next, j++) {
if(opq->jump_local != -1)
opq->ops_param[opq->jump_local] =
(unsigned int)dyn->locs[opq->jump_local_loc >> 2];
795,7 → 794,7
 
if(do_stats) {
opq = cpu_state.opqs;
for(j = 0; j < (config.immu.pagesize / 4); j++, opq = opq->next) {
for(j = 0; j < (immu_state->pagesize / 4); j++, opq = opq->next) {
dyn->insns[j] = opq->insn;
dyn->insn_indexs[j] = opq->insn_index;
}
922,7 → 921,7
if(jump_local) {
gen_op_jmp_imm(opq, 1, 0);
opq->jump_local = opq->num_ops_param - 1;
opq->jump_local_loc = (opq->insn_addr + (orreg_t)off) & config.immu.page_offset_mask;
opq->jump_local_loc = (opq->insn_addr + (orreg_t)off) & immu_state->page_offset_mask;
} else
gen_op_do_jump(opq, 1);
}
/trunk/or1ksim/cpu/common/abstract.h
164,7 → 164,6
#endif /* ! LONGEST */
 
/* Returns the page that addr belongs to */
#define IADDR_PAGE(addr) ((addr) & config.immu.page_mask)
#define DADDR_PAGE(addr) ((addr) & config.dmmu.page_mask)
/* Endianness convinience macros */
#define le16_(x) bswap_16(x)
/trunk/or1ksim/sim-cmd.c
479,7 → 479,6
sprs_status();
PRINTF ("\n");
memory_table_status ();
if (config.immu.enabled) itlb_status(-1);
if (config.dmmu.enabled) dtlb_status(-1);
if (config.ic.enabled) ic_info();
if (config.dc.enabled) dc_info();
/trunk/or1ksim/mmu/immu.c
41,10 → 41,13
 
DEFAULT_DEBUG_CHANNEL(immu);
 
struct immu *immu_state;
 
/* Insn MMU */
 
 
static inline uorreg_t *immu_find_tlbmr(oraddr_t virtaddr, uorreg_t **itlbmr_lru)
static inline uorreg_t *immu_find_tlbmr(oraddr_t virtaddr,
uorreg_t **itlbmr_lru,
struct immu *immu)
{
int set;
int i;
52,9 → 55,9
uorreg_t *itlbmr;
 
/* Which set to check out? */
set = IADDR_PAGE(virtaddr) >> config.immu.pagesize_log2;
set &= config.immu.set_mask;
vpn = virtaddr & config.immu.vpn_mask;
set = IADDR_PAGE(virtaddr) >> immu->pagesize_log2;
set &= immu->set_mask;
vpn = virtaddr & immu->vpn_mask;
 
itlbmr = &cpu_state.sprs[SPR_ITLBMR_BASE(0) + set];
*itlbmr_lru = itlbmr;
61,8 → 64,8
 
/* Scan all ways and try to find a matching way. */
/* FIXME: Should this be reversed? */
for(i = config.immu.nways; i; i--, itlbmr += (128 * 2)) {
if(((*itlbmr & config.immu.vpn_mask) == vpn) && (*itlbmr & SPR_ITLBMR_V))
for(i = immu->nways; i; i--, itlbmr += (128 * 2)) {
if(((*itlbmr & immu->vpn_mask) == vpn) && (*itlbmr & SPR_ITLBMR_V))
return itlbmr;
}
 
75,6 → 78,7
uorreg_t *itlbmr;
uorreg_t *itlbtr;
uorreg_t *itlbmr_lru;
struct immu *immu = immu_state;
 
if (!(cpu_state.sprs[SPR_SR] & SPR_SR_IME) ||
!(cpu_state.sprs[SPR_UPR] & SPR_UPR_IMP)) {
82,7 → 86,7
return virtaddr;
}
 
itlbmr = immu_find_tlbmr(virtaddr, &itlbmr_lru);
itlbmr = immu_find_tlbmr(virtaddr, &itlbmr_lru, immu);
 
/* Did we find our tlb entry? */
if(itlbmr) { /* Yes, we did. */
92,7 → 96,7
itlbtr = itlbmr + 128;
/* Set LRUs */
for(i = 0; i < config.immu.nways; i++, itlbmr_lru += (128 * 2)) {
for(i = 0; i < immu->nways; i++, itlbmr_lru += (128 * 2)) {
if(*itlbmr_lru & SPR_ITLBMR_LRU)
*itlbmr_lru = (*itlbmr_lru & ~SPR_ITLBMR_LRU) |
((*itlbmr_lru & SPR_ITLBMR_LRU) - 0x40);
102,12 → 106,12
* is always decremented and the number of sets is always a power of two and
* as such lru_reload has all bits set that get touched during decrementing
* SPR_DTLBMR_LRU */
*itlbmr |= config.immu.lru_reload;
*itlbmr |= immu->lru_reload;
 
/* Check if page is cache inhibited */
insn_ci = *itlbtr & SPR_ITLBTR_CI;
 
runtime.sim.mem_cycles += config.immu.hitdelay;
runtime.sim.mem_cycles += immu->hitdelay;
 
/* Test for page fault */
if (cpu_state.sprs[SPR_SR] & SPR_SR_SM) {
119,22 → 123,20
}
 
TRACE("Returning physical address %"PRIxADDR"\n",
(*itlbtr & SPR_ITLBTR_PPN) | (virtaddr &
(config.immu.page_offset_mask)));
return (*itlbtr & SPR_ITLBTR_PPN) | (virtaddr &
(config.immu.page_offset_mask));
(*itlbtr & SPR_ITLBTR_PPN) | (virtaddr & immu->page_offset_mask));
return (*itlbtr & SPR_ITLBTR_PPN) | (virtaddr & immu->page_offset_mask);
}
 
/* No, we didn't. */
immu_stats.fetch_tlbmiss++;
#if 0
for (i = 0; i < config.immu.nways; i++)
for (i = 0; i < immu->nways; i++)
if (((cpu_state.sprs[SPR_ITLBMR_BASE(i) + set] & SPR_ITLBMR_LRU) >> 6) < minlru)
minway = i;
cpu_state.sprs[SPR_ITLBMR_BASE(minway) + set] &= ~SPR_ITLBMR_VPN;
cpu_state.sprs[SPR_ITLBMR_BASE(minway) + set] |= vpn << 12;
for (i = 0; i < config.immu.nways; i++) {
for (i = 0; i < immu->nways; i++) {
uorreg_t lru = cpu_state.sprs[SPR_ITLBMR_BASE(i) + set];
if (lru & SPR_ITLBMR_LRU) {
lru = (lru & ~SPR_ITLBMR_LRU) | ((lru & SPR_ITLBMR_LRU) - 0x40);
142,7 → 144,7
}
}
cpu_state.sprs[SPR_ITLBMR_BASE(way) + set] &= ~SPR_ITLBMR_LRU;
cpu_state.sprs[SPR_ITLBMR_BASE(way) + set] |= (config.immu.nsets - 1) << 6;
cpu_state.sprs[SPR_ITLBMR_BASE(way) + set] |= (immu->nsets - 1) << 6;
 
/* 1 to 1 mapping */
cpu_state.sprs[SPR_ITLBTR_BASE(minway) + set] &= ~SPR_ITLBTR_PPN;
152,8 → 154,8
#endif
 
/* if tlb refill implemented in HW */
/* return ((cpu_state.sprs[SPR_ITLBTR_BASE(minway) + set] & SPR_ITLBTR_PPN) >> 12) * config.immu.pagesize + (virtaddr % config.immu.pagesize); */
runtime.sim.mem_cycles += config.immu.missdelay;
/* return ((cpu_state.sprs[SPR_ITLBTR_BASE(minway) + set] & SPR_ITLBTR_PPN) >> 12) * immu->pagesize + (virtaddr % immu->pagesize); */
runtime.sim.mem_cycles += immu->missdelay;
 
except_handle(EXCEPT_ITLBMISS, virtaddr);
return 0;
174,6 → 176,7
uorreg_t *itlbmr;
uorreg_t *itlbtr;
uorreg_t *itlbmr_lru;
struct immu *immu = immu_state;
 
if (!(cpu_state.sprs[SPR_SR] & SPR_SR_IME) ||
!(cpu_state.sprs[SPR_UPR] & SPR_UPR_IMP)) {
180,7 → 183,7
return(virtaddr);
}
 
itlbmr = immu_find_tlbmr(virtaddr, &itlbmr_lru);
itlbmr = immu_find_tlbmr(virtaddr, &itlbmr_lru, immu);
 
/* Did we find our tlb entry? */
if(itlbmr) { /* Yes, we did. */
199,8 → 202,7
}
}
 
return (*itlbtr & SPR_ITLBTR_PPN) | (virtaddr &
(config.immu.page_offset_mask));
return (*itlbtr & SPR_ITLBTR_PPN) | (virtaddr & immu->page_offset_mask);
}
 
return(0);
207,20 → 209,10
}
 
 
void itlb_info(void)
{
if (!(cpu_state.sprs[SPR_UPR] & SPR_UPR_IMP)) {
PRINTF("IMMU not implemented. Set UPR[IMP].\n");
return;
}
 
PRINTF("Insn MMU %dKB: ", config.immu.nsets * config.immu.entrysize * config.immu.nways / 1024);
PRINTF("%d ways, %d sets, entry size %d bytes\n", config.immu.nways, config.immu.nsets, config.immu.entrysize);
}
 
/* FIXME: Check validity */
/* First check if virtual address is covered by ITLB and if it is:
- increment ITLB read hit stats,
- set 'lru' at this way to config.immu.ustates - 1 and
- set 'lru' at this way to immu->ustates - 1 and
decrement 'lru' of other ways unless they have reached 0,
- check page access attributes and invoke IMMU page fault exception
handler if necessary
227,15 → 219,16
and if not:
- increment ITLB read miss stats
- find lru way and entry and invoke ITLB miss exception handler
- set 'lru' with config.immu.ustates - 1 and decrement 'lru' of other
- set 'lru' with immu->ustates - 1 and decrement 'lru' of other
ways unless they have reached 0
*/
 
void itlb_status(int start_set)
static void itlb_status(void *dat)
{
struct immu *immu = dat;
int set;
int way;
int end_set = config.immu.nsets;
int end_set = immu->nsets;
 
if (!(cpu_state.sprs[SPR_UPR] & SPR_UPR_IMP)) {
PRINTF("IMMU not implemented. Set UPR[IMP].\n");
242,17 → 235,10
return;
}
 
if ((start_set >= 0) && (start_set < end_set))
end_set = start_set + 1;
else
start_set = 0;
 
if (start_set < end_set) PRINTF("\nIMMU: ");
if (0 < end_set) PRINTF("\nIMMU: ");
/* Scan set(s) and way(s). */
for (set = start_set; set < end_set; set++) {
PRINTF("\nSet %x: ", set);
for (way = 0; way < config.immu.nways; way++) {
PRINTF(" way %d: ", way);
for (set = 0; set < end_set; set++) {
for (way = 0; way < immu->nways; way++) {
PRINTF("%s\n", dump_spr(SPR_ITLBMR_BASE(way) + set,
cpu_state.sprs[SPR_ITLBMR_BASE(way) + set]));
PRINTF("%s\n", dump_spr(SPR_ITLBTR_BASE(way) + set,
259,23 → 245,27
cpu_state.sprs[SPR_ITLBTR_BASE(way) + set]));
}
}
if (start_set < end_set) PRINTF("\n");
if (0 < end_set) PRINTF("\n");
}
 
/*---------------------------------------------------[ IMMU configuration ]---*/
static void immu_enabled(union param_val val, void *dat)
{
struct immu *immu = dat;
 
if(val.int_val)
cpu_state.sprs[SPR_UPR] |= SPR_UPR_IMP;
else
cpu_state.sprs[SPR_UPR] &= ~SPR_UPR_IMP;
config.immu.enabled = val.int_val;
immu->enabled = val.int_val;
}
 
static void immu_nsets(union param_val val, void *dat)
{
struct immu *immu = dat;
 
if (is_power2(val.int_val) && val.int_val <= 256) {
config.immu.nsets = val.int_val;
immu->nsets = val.int_val;
cpu_state.sprs[SPR_IMMUCFGR] &= ~SPR_IMMUCFGR_NTS;
cpu_state.sprs[SPR_IMMUCFGR] |= log2_int(val.int_val) << 3;
}
285,8 → 275,10
 
static void immu_nways(union param_val val, void *dat)
{
struct immu *immu = dat;
 
if (val.int_val >= 1 && val.int_val <= 4) {
config.immu.nways = val.int_val;
immu->nways = val.int_val;
cpu_state.sprs[SPR_IMMUCFGR] &= ~SPR_IMMUCFGR_NTW;
cpu_state.sprs[SPR_IMMUCFGR] |= val.int_val - 1;
}
296,8 → 288,10
 
static void immu_pagesize(union param_val val, void *dat)
{
struct immu *immu = dat;
 
if (is_power2(val.int_val))
config.immu.pagesize = val.int_val;
immu->pagesize = val.int_val;
else
CONFIG_ERROR("value of power of two expected.");
}
304,8 → 298,10
 
static void immu_entrysize(union param_val val, void *dat)
{
struct immu *immu = dat;
 
if (is_power2(val.int_val))
config.immu.entrysize = val.int_val;
immu->entrysize = val.int_val;
else
CONFIG_ERROR("value of power of two expected.");
}
312,8 → 308,10
 
static void immu_ustates(union param_val val, void *dat)
{
struct immu *immu = dat;
 
if (val.int_val >= 2 && val.int_val <= 4)
config.immu.ustates = val.int_val;
immu->ustates = val.int_val;
else
CONFIG_ERROR("invalid USTATE.");
}
320,28 → 318,57
 
static void immu_missdelay(union param_val val, void *dat)
{
config.immu.missdelay = val.int_val;
struct immu *immu = dat;
 
immu->missdelay = val.int_val;
}
 
static void immu_hitdelay(union param_val val, void *dat)
{
config.immu.hitdelay = val.int_val;
struct immu *immu = dat;
 
immu->hitdelay = val.int_val;
}
 
static void *immu_start_sec(void)
{
return NULL;
struct immu *immu;
 
if(!(immu = malloc(sizeof(struct immu)))) {
fprintf(stderr, "OOM\n");
exit(1);
}
 
immu->enabled = 0;
immu->hitdelay = 1;
immu->missdelay = 1;
immu->pagesize = 8192;
/* FIXME: Something sane */
immu->entrysize = 0;
 
immu_state = immu;
 
return immu;
}
 
static void immu_end_sec(void *dat)
{
struct immu *immu = dat;
 
/* Precalculate some values for use during address translation */
config.immu.pagesize_log2 = log2_int(config.immu.pagesize);
config.immu.page_offset_mask = config.immu.pagesize - 1;
config.immu.page_mask = ~config.immu.page_offset_mask;
config.immu.vpn_mask = ~((config.immu.pagesize * config.immu.nsets) - 1);
config.immu.set_mask = config.immu.nsets - 1;
config.immu.lru_reload = (config.immu.set_mask << 6) & SPR_ITLBMR_LRU;
immu->pagesize_log2 = log2_int(immu->pagesize);
immu->page_offset_mask = immu->pagesize - 1;
immu->page_mask = ~immu->page_offset_mask;
immu->vpn_mask = ~((immu->pagesize * immu->nsets) - 1);
immu->set_mask = immu->nsets - 1;
immu->lru_reload = (immu->set_mask << 6) & SPR_ITLBMR_LRU;
 
if(immu->enabled) {
PRINTF("Insn MMU %dKB: %d ways, %d sets, entry size %d bytes\n",
immu->nsets * immu->entrysize * immu->nways / 1024, immu->nways,
immu->nsets, immu->entrysize);
reg_sim_stat(itlb_status, immu);
}
}
 
void reg_immu_sec(void)
/trunk/or1ksim/mmu/immu.h
17,8 → 17,27
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
 
/* FIXME: Move to immu.c once the dust settles */
struct immu {
int enabled; /* Whether IMMU is enabled */
int nways; /* Number of ITLB ways */
int nsets; /* Number of ITLB sets */
oraddr_t pagesize; /* ITLB page size */
int pagesize_log2; /* ITLB page size (log2(pagesize)) */
oraddr_t page_offset_mask; /* Address mask to get page offset */
oraddr_t page_mask; /* Page number mask (diff. from vpn) */
oraddr_t vpn_mask; /* Address mask to get vpn */
int lru_reload; /* What to reload the lru value to */
oraddr_t set_mask; /* Mask to get set of an address */
int entrysize; /* ITLB entry size */
int ustates; /* number of ITLB usage states */
int missdelay; /* How much cycles does the miss cost */
int hitdelay; /* How much cycles does the hit cost */
};
#define IADDR_PAGE(addr) ((addr) & immu_state->page_mask)
/* FIXME: Remove the need for this global */
extern struct immu *immu_state;
 
oraddr_t immu_translate(oraddr_t virtaddr);
oraddr_t immu_simulate_tlb(oraddr_t virtaddr);
oraddr_t peek_into_itlb(oraddr_t virtaddr);
void itlb_status(int start_set);
void init_immu(void);
/trunk/or1ksim/sim-config.c
88,11 → 88,6
else config.sim.system_kfreq = INT_MAX;
if (config.sim.system_kfreq <= 0) config.sim.system_kfreq = 1;
/* IMMU & DMMU*/
config.immu.enabled = 0;
config.immu.hitdelay = 1;
config.immu.missdelay = 1;
config.immu.pagesize = 8192;
config.dmmu.enabled = 0;
config.dmmu.hitdelay = 1;
config.dmmu.missdelay = 1;

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.