OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /
    from Rev 1538 to Rev 1539
    Reverse comparison

Rev 1538 → Rev 1539

/trunk/or1ksim/sim-config.h
59,6 → 59,12
int nways; /* Number of DTLB ways */
int nsets; /* Number of DTLB sets */
int pagesize; /* DTLB page size */
int pagesize_log2; /* DTLB page size (log2(pagesize)) */
oraddr_t page_offset_mask; /* Address mask to get page offset */
oraddr_t page_mask; /* Page number mask (diff. from vpn) */
oraddr_t vpn_mask; /* Address mask to get vpn */
int lru_reload; /* What to reload the lru value to */
oraddr_t set_mask; /* Mask to get set of an address */
int entrysize; /* DTLB entry size */
int ustates; /* number of DTLB usage states */
int missdelay; /* How much cycles does the miss cost */
/trunk/or1ksim/cpu/common/abstract.h
161,6 → 161,7
 
/* Returns the page that addr belongs to */
#define IADDR_PAGE(addr) ((addr) & config.immu.page_mask)
#define DADDR_PAGE(addr) ((addr) & config.dmmu.page_mask)
 
/* History of execution */
#define HISTEXEC_LEN 200
/trunk/or1ksim/mmu/dmmu.c
17,7 → 17,7
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
 
/* DMMU model (not functional yet, currently just copy of data cache). */
/* DMMU model, perfectly functional. */
 
#include "config.h"
 
40,17 → 40,50
 
DEFAULT_DEBUG_CHANNEL(dmmu);
 
extern int cont_run;
 
/* Data MMU */
 
inline oraddr_t dmmu_simulate_tlb(oraddr_t virtaddr, int write_access)
/* Precalculates some values for use during address translation */
void init_dmmu(void)
{
int set, way = -1;
config.dmmu.pagesize_log2 = log2(config.dmmu.pagesize);
config.dmmu.page_offset_mask = config.dmmu.pagesize - 1;
config.dmmu.page_mask = ~config.dmmu.page_offset_mask;
config.dmmu.vpn_mask = ~((config.dmmu.pagesize * config.dmmu.nsets) - 1);
config.dmmu.set_mask = config.dmmu.nsets - 1;
config.dmmu.lru_reload = (config.dmmu.set_mask << 6) & SPR_DTLBMR_LRU;
}
 
inline uorreg_t *dmmu_find_tlbmr(oraddr_t virtaddr, uorreg_t **dtlbmr_lru)
{
int set;
int i;
oraddr_t tagaddr;
oraddr_t vpn, ppn;
oraddr_t vpn;
uorreg_t *dtlbmr;
 
/* Which set to check out? */
set = DADDR_PAGE(virtaddr) >> config.dmmu.pagesize_log2;
set &= config.dmmu.set_mask;
vpn = virtaddr & config.dmmu.vpn_mask;
 
dtlbmr = &cpu_state.sprs[SPR_DTLBMR_BASE(0) + set];
*dtlbmr_lru = dtlbmr;
 
/* FIXME: Should this be reversed? */
for(i = config.dmmu.nways; i; i--, dtlbmr += (128 * 2)) {
if(((*dtlbmr & config.dmmu.vpn_mask) == vpn) && (*dtlbmr & SPR_DTLBMR_V))
return dtlbmr;
}
 
return NULL;
}
 
oraddr_t dmmu_translate(oraddr_t virtaddr, int write_access)
{
int i;
uorreg_t *dtlbmr;
uorreg_t *dtlbtr;
uorreg_t *dtlbmr_lru;
 
if (!(cpu_state.sprs[SPR_SR] & SPR_SR_DME) ||
!(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP)) {
data_ci = (virtaddr >= 0x80000000);
57,86 → 90,86
return virtaddr;
}
 
/* Which set to check out? */
set = (virtaddr / config.dmmu.pagesize) % config.dmmu.nsets;
tagaddr = (virtaddr / config.dmmu.pagesize) / config.dmmu.nsets;
vpn = virtaddr / (config.dmmu.pagesize * config.dmmu.nsets);
/* Scan all ways and try to find a matching way. */
for (i = 0; i < config.dmmu.nways; i++)
if (((cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] / (config.dmmu.pagesize * config.dmmu.nsets)) == vpn) &&
(cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] & SPR_DTLBMR_V))
way = i;
dtlbmr = dmmu_find_tlbmr(virtaddr, &dtlbmr_lru);
 
/* Did we find our tlb entry? */
if (way >= 0) { /* Yes, we did. */
/* Did we find our tlb entry? */
if(dtlbmr) { /* Yes, we did. */
dmmu_stats.loads_tlbhit++;
 
dtlbtr = dtlbmr + 128;
TRACE("DTLB hit (virtaddr=%"PRIxADDR") at %lli.\n", virtaddr,
runtime.sim.cycles);
/* Set LRUs */
for (i = 0; i < config.dmmu.nways; i++) {
uorreg_t lru = cpu_state.sprs[SPR_DTLBMR_BASE(i) + set];
if (lru & SPR_DTLBMR_LRU) {
lru = (lru & ~SPR_DTLBMR_LRU) | ((lru & SPR_DTLBMR_LRU) - 0x40);
cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] = lru;
}
for(i = 0; i < config.dmmu.nways; i++, dtlbmr_lru += (128 * 2)) {
if(*dtlbmr_lru & SPR_DTLBMR_LRU)
*dtlbmr_lru = (*dtlbmr_lru & ~SPR_DTLBMR_LRU) |
((*dtlbmr_lru & SPR_DTLBMR_LRU) - 0x40);
}
cpu_state.sprs[SPR_DTLBMR_BASE(way) + set] &= ~SPR_DTLBMR_LRU;
cpu_state.sprs[SPR_DTLBMR_BASE(way) + set] |= (config.dmmu.nsets - 1) << 6;
 
/* This is not necessary `*dtlbmr &= ~SPR_DTLBMR_LRU;' since SPR_DTLBMR_LRU
* is always decremented and the number of sets is always a power of two and
* as such lru_reload has all bits set that get touched during decrementing
* SPR_DTLBMR_LRU */
*dtlbmr |= config.dmmu.lru_reload;
 
/* Check if page is cache inhibited */
data_ci = (cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_CI) == SPR_DTLBTR_CI;
data_ci = *dtlbtr & SPR_DTLBTR_CI;
 
runtime.sim.mem_cycles += config.dmmu.hitdelay;
ppn = cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] / config.dmmu.pagesize;
 
/* Test for page fault */
if (cpu_state.sprs[SPR_SR] & SPR_SR_SM) {
if ( write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_SWE)
|| !write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_SRE))
if ( (write_access && !(*dtlbtr & SPR_DTLBTR_SWE))
|| (!write_access && !(*dtlbtr & SPR_DTLBTR_SRE)))
except_handle(EXCEPT_DPF, virtaddr);
} else {
if ( write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_UWE)
|| !write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_URE))
if ( (write_access && !(*dtlbtr & SPR_DTLBTR_UWE))
|| (!write_access && !(*dtlbtr & SPR_DTLBTR_URE)))
except_handle(EXCEPT_DPF, virtaddr);
}
 
return (ppn * config.dmmu.pagesize) + (virtaddr % config.dmmu.pagesize);
TRACE("Returning physical address %"PRIxADDR"\n",
(*dtlbtr & SPR_DTLBTR_PPN) | (virtaddr &
(config.dmmu.page_offset_mask)));
return (*dtlbtr & SPR_DTLBTR_PPN) | (virtaddr &
(config.dmmu.page_offset_mask));
}
else { /* No, we didn't. */
dmmu_stats.loads_tlbmiss++;
 
/* No, we didn't. */
dmmu_stats.loads_tlbmiss++;
#if 0
for (i = 0; i < config.dmmu.nways; i++)
if (((cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] & SPR_DTLBMR_LRU) >> 6) < minlru)
minway = i;
cpu_state.sprs[SPR_DTLBMR_BASE(minway) + set] &= ~SPR_DTLBMR_VPN;
cpu_state.sprs[SPR_DTLBMR_BASE(minway) + set] |= vpn << 12;
for (i = 0; i < config.dmmu.nways; i++) {
uorreg_t lru = cpu_state.sprs[SPR_DTLBMR_BASE(i) + set];
if (lru & SPR_DTLBMR_LRU) {
lru = (lru & ~SPR_DTLBMR_LRU) | ((lru & SPR_DTLBMR_LRU) - 0x40);
cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] = lru;
}
for (i = 0; i < config.dmmu.nways; i++)
if (((cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] & SPR_DTLBMR_LRU) >> 6) < minlru)
minway = i;
cpu_state.sprs[SPR_DTLBMR_BASE(minway) + set] &= ~SPR_DTLBMR_VPN;
cpu_state.sprs[SPR_DTLBMR_BASE(minway) + set] |= vpn << 12;
for (i = 0; i < config.dmmu.nways; i++) {
uorreg_t lru = cpu_state.sprs[SPR_DTLBMR_BASE(i) + set];
if (lru & SPR_DTLBMR_LRU) {
lru = (lru & ~SPR_DTLBMR_LRU) | ((lru & SPR_DTLBMR_LRU) - 0x40);
cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] = lru;
}
cpu_state.sprs[SPR_DTLBMR_BASE(way) + set] &= ~SPR_DTLBMR_LRU;
cpu_state.sprs[SPR_DTLBMR_BASE(way) + set] |= (config.dmmu.nsets - 1) << 6;
}
cpu_state.sprs[SPR_DTLBMR_BASE(way) + set] &= ~SPR_DTLBMR_LRU;
cpu_state.sprs[SPR_DTLBMR_BASE(way) + set] |= (config.dmmu.nsets - 1) << 6;
 
/* 1 to 1 mapping */
cpu_state.sprs[SPR_DTLBTR_BASE(minway) + set] &= ~SPR_DTLBTR_PPN;
cpu_state.sprs[SPR_DTLBTR_BASE(minway) + set] |= vpn << 12;
/* 1 to 1 mapping */
cpu_state.sprs[SPR_DTLBTR_BASE(minway) + set] &= ~SPR_DTLBTR_PPN;
cpu_state.sprs[SPR_DTLBTR_BASE(minway) + set] |= vpn << 12;
 
cpu_state.sprs[SPR_DTLBMR_BASE(minway) + set] |= SPR_DTLBMR_V;
cpu_state.sprs[SPR_DTLBMR_BASE(minway) + set] |= SPR_DTLBMR_V;
#endif
TRACE("DTLB miss (virtaddr=%"PRIxADDR") at %lli.\n", virtaddr,
runtime.sim.cycles);
runtime.sim.mem_cycles += config.dmmu.missdelay;
/* if tlb refill implemented in HW */
/* return ((cpu_state.sprs[SPR_DTLBTR_BASE(minway) + set] & SPR_DTLBTR_PPN) >> 12) * config.dmmu.pagesize + (virtaddr % config.dmmu.pagesize); */
except_handle(EXCEPT_DTLBMISS, virtaddr);
return 0;
}
TRACE("DTLB miss (virtaddr=%"PRIxADDR") at %lli.\n", virtaddr,
runtime.sim.cycles);
runtime.sim.mem_cycles += config.dmmu.missdelay;
/* if tlb refill implemented in HW */
/* return ((cpu_state.sprs[SPR_DTLBTR_BASE(minway) + set] & SPR_DTLBTR_PPN) >> 12) * config.dmmu.pagesize + (virtaddr % config.dmmu.pagesize); */
 
except_handle(EXCEPT_DTLBMISS, virtaddr);
return 0;
}
 
/* DESC: try to find EA -> PA transaltion without changing
158,10 → 191,9
*/
oraddr_t peek_into_dtlb(oraddr_t virtaddr, int write_access, int through_dc)
{
int set, way = -1;
int i;
oraddr_t tagaddr;
oraddr_t vpn, ppn;
uorreg_t *dtlbmr;
uorreg_t *dtlbtr;
uorreg_t *dtlbmr_lru;
 
if (!(cpu_state.sprs[SPR_SR] & SPR_SR_DME) ||
!(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP)) {
170,33 → 202,27
return virtaddr;
}
 
/* Which set to check out? */
set = (virtaddr / config.dmmu.pagesize) % config.dmmu.nsets;
tagaddr = (virtaddr / config.dmmu.pagesize) / config.dmmu.nsets;
vpn = virtaddr / (config.dmmu.pagesize * config.dmmu.nsets);
/* Scan all ways and try to find a matching way. */
for (i = 0; i < config.dmmu.nways; i++)
if (((cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] / (config.dmmu.pagesize * config.dmmu.nsets)) == vpn) &&
(cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] & SPR_DTLBMR_V))
way = i;
dtlbmr = dmmu_find_tlbmr(virtaddr, &dtlbmr_lru);
 
/* Did we find our tlb entry? */
if (way >= 0) { /* Yes, we did. */
/* Did we find our tlb entry? */
if (dtlbmr) { /* Yes, we did. */
dmmu_stats.loads_tlbhit++;
 
dtlbtr = dtlbmr + 128;
TRACE("DTLB hit (virtaddr=%"PRIxADDR") at %lli.\n", virtaddr,
runtime.sim.cycles);
/* Test for page fault */
if (cpu_state.sprs[SPR_SR] & SPR_SR_SM) {
if ( write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_SWE)
|| !write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_SRE))
if((write_access && !(*dtlbtr & SPR_DTLBTR_SWE)) ||
(!write_access && !(*dtlbtr & SPR_DTLBTR_SRE)))
/* otherwise exception DPF would be raised */
return(0);
} else {
if ( write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_UWE)
|| !write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_URE))
if((write_access && !(*dtlbtr & SPR_DTLBTR_UWE)) ||
(!write_access && !(*dtlbtr & SPR_DTLBTR_URE)))
/* otherwise exception DPF would be raised */
return(0);
204,30 → 230,17
 
if (through_dc) {
/* Check if page is cache inhibited */
data_ci = (cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_CI) == SPR_DTLBTR_CI;
data_ci = *dtlbtr & SPR_DTLBTR_CI;
}
 
ppn = cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] / config.dmmu.pagesize;
return (ppn * config.dmmu.pagesize) + (virtaddr % config.dmmu.pagesize);
return (*dtlbtr & SPR_DTLBTR_PPN) | (virtaddr &
(config.dmmu.page_offset_mask));
}
else { /* No, we didn't. */
return(0);
}
ERR("ERR, should never have happened\n");
 
return(0);
}
 
 
oraddr_t dmmu_translate(oraddr_t virtaddr, int write_access)
{
oraddr_t phyaddr = dmmu_simulate_tlb(virtaddr, write_access);
/* PRINTF("DMMU translate(%"PRIxADDR") = %"PRIxADDR"\n", virtaddr, phyaddr);*/
return phyaddr;
}
 
 
void dtlb_info(void)
{
if (!(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP)) {
/trunk/or1ksim/mmu/dmmu.h
21,3 → 21,4
oraddr_t dmmu_simulate_tlb(oraddr_t virtaddr, int write_access);
oraddr_t peek_into_dtlb(oraddr_t virtaddr, int write_access, int through_dc);
void dtlb_status(int start_set);
void init_dmmu(void);
/trunk/or1ksim/toplevel.c
68,7 → 68,7
#include "cuc.h"
 
/* CVS revision number. */
const char rcsrev[] = "$Revision: 1.130 $";
const char rcsrev[] = "$Revision: 1.131 $";
 
inline void debug(int level, const char *format, ...)
{
199,6 → 199,7
/* Initalizes all devices and sim */
void sim_init (void)
{
init_dmmu();
init_immu();
init_labels();
init_breakpoints();

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.