OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /
    from Rev 1717 to Rev 1718
    Reverse comparison

Rev 1717 → Rev 1718

/trunk/or1ksim/sim-config.h
36,25 → 36,8
struct {
int enabled; /* Is tick timer enabled? */
} tick;
 
struct {
int enabled; /* Whether DMMU is enabled */
int nways; /* Number of DTLB ways */
int nsets; /* Number of DTLB sets */
int pagesize; /* DTLB page size */
int pagesize_log2; /* DTLB page size (log2(pagesize)) */
oraddr_t page_offset_mask; /* Address mask to get page offset */
oraddr_t page_mask; /* Page number mask (diff. from vpn) */
oraddr_t vpn_mask; /* Address mask to get vpn */
int lru_reload; /* What to reload the lru value to */
oraddr_t set_mask; /* Mask to get set of an address */
int entrysize; /* DTLB entry size */
int ustates; /* number of DTLB usage states */
int missdelay; /* How much cycles does the miss cost */
int hitdelay; /* How much cycles does the hit cost */
} dmmu;
struct {
int enabled; /* Whether instruction cache is enabled */
int nways; /* Number of IC ways */
int nsets; /* Number of IC sets */
/trunk/or1ksim/cpu/common/abstract.h
163,8 → 163,6
#define ULONGEST unsigned long long
#endif /* ! LONGEST */
 
/* Returns the page that addr belongs to */
#define DADDR_PAGE(addr) ((addr) & config.dmmu.page_mask)
/* Endianness convinience macros */
#define le16_(x) bswap_16(x)
 
/trunk/or1ksim/sim-cmd.c
479,7 → 479,6
sprs_status();
PRINTF ("\n");
memory_table_status ();
if (config.dmmu.enabled) dtlb_status(-1);
if (config.ic.enabled) ic_info();
if (config.dc.enabled) dc_info();
/trunk/or1ksim/mmu/dmmu.c
41,10 → 41,13
 
DEFAULT_DEBUG_CHANNEL(dmmu);
 
struct dmmu *dmmu_state;
 
/* Data MMU */
 
 
inline uorreg_t *dmmu_find_tlbmr(oraddr_t virtaddr, uorreg_t **dtlbmr_lru)
static inline uorreg_t *dmmu_find_tlbmr(oraddr_t virtaddr,
uorreg_t **dtlbmr_lru,
struct dmmu *dmmu)
{
int set;
int i;
52,16 → 55,16
uorreg_t *dtlbmr;
 
/* Which set to check out? */
set = DADDR_PAGE(virtaddr) >> config.dmmu.pagesize_log2;
set &= config.dmmu.set_mask;
vpn = virtaddr & config.dmmu.vpn_mask;
set = DADDR_PAGE(virtaddr) >> dmmu->pagesize_log2;
set &= dmmu->set_mask;
vpn = virtaddr & dmmu->vpn_mask;
 
dtlbmr = &cpu_state.sprs[SPR_DTLBMR_BASE(0) + set];
*dtlbmr_lru = dtlbmr;
 
/* FIXME: Should this be reversed? */
for(i = config.dmmu.nways; i; i--, dtlbmr += (128 * 2)) {
if(((*dtlbmr & config.dmmu.vpn_mask) == vpn) && (*dtlbmr & SPR_DTLBMR_V))
for(i = dmmu->nways; i; i--, dtlbmr += (128 * 2)) {
if(((*dtlbmr & dmmu->vpn_mask) == vpn) && (*dtlbmr & SPR_DTLBMR_V))
return dtlbmr;
}
 
74,6 → 77,7
uorreg_t *dtlbmr;
uorreg_t *dtlbtr;
uorreg_t *dtlbmr_lru;
struct dmmu *dmmu = dmmu_state;
 
if (!(cpu_state.sprs[SPR_SR] & SPR_SR_DME) ||
!(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP)) {
81,7 → 85,7
return virtaddr;
}
 
dtlbmr = dmmu_find_tlbmr(virtaddr, &dtlbmr_lru);
dtlbmr = dmmu_find_tlbmr(virtaddr, &dtlbmr_lru, dmmu);
 
/* Did we find our tlb entry? */
if(dtlbmr) { /* Yes, we did. */
93,7 → 97,7
runtime.sim.cycles);
/* Set LRUs */
for(i = 0; i < config.dmmu.nways; i++, dtlbmr_lru += (128 * 2)) {
for(i = 0; i < dmmu->nways; i++, dtlbmr_lru += (128 * 2)) {
if(*dtlbmr_lru & SPR_DTLBMR_LRU)
*dtlbmr_lru = (*dtlbmr_lru & ~SPR_DTLBMR_LRU) |
((*dtlbmr_lru & SPR_DTLBMR_LRU) - 0x40);
103,12 → 107,12
* is always decremented and the number of sets is always a power of two and
* as such lru_reload has all bits set that get touched during decrementing
* SPR_DTLBMR_LRU */
*dtlbmr |= config.dmmu.lru_reload;
*dtlbmr |= dmmu->lru_reload;
 
/* Check if page is cache inhibited */
data_ci = *dtlbtr & SPR_DTLBTR_CI;
 
runtime.sim.mem_cycles += config.dmmu.hitdelay;
runtime.sim.mem_cycles += dmmu->hitdelay;
 
/* Test for page fault */
if (cpu_state.sprs[SPR_SR] & SPR_SR_SM) {
122,22 → 126,20
}
 
TRACE("Returning physical address %"PRIxADDR"\n",
(*dtlbtr & SPR_DTLBTR_PPN) | (virtaddr &
(config.dmmu.page_offset_mask)));
return (*dtlbtr & SPR_DTLBTR_PPN) | (virtaddr &
(config.dmmu.page_offset_mask));
(*dtlbtr & SPR_DTLBTR_PPN) | (virtaddr & (dmmu->page_offset_mask)));
return (*dtlbtr & SPR_DTLBTR_PPN) | (virtaddr & (dmmu->page_offset_mask));
}
 
/* No, we didn't. */
dmmu_stats.loads_tlbmiss++;
#if 0
for (i = 0; i < config.dmmu.nways; i++)
for (i = 0; i < dmmu->nways; i++)
if (((cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] & SPR_DTLBMR_LRU) >> 6) < minlru)
minway = i;
cpu_state.sprs[SPR_DTLBMR_BASE(minway) + set] &= ~SPR_DTLBMR_VPN;
cpu_state.sprs[SPR_DTLBMR_BASE(minway) + set] |= vpn << 12;
for (i = 0; i < config.dmmu.nways; i++) {
for (i = 0; i < dmmu->nways; i++) {
uorreg_t lru = cpu_state.sprs[SPR_DTLBMR_BASE(i) + set];
if (lru & SPR_DTLBMR_LRU) {
lru = (lru & ~SPR_DTLBMR_LRU) | ((lru & SPR_DTLBMR_LRU) - 0x40);
145,7 → 147,7
}
}
cpu_state.sprs[SPR_DTLBMR_BASE(way) + set] &= ~SPR_DTLBMR_LRU;
cpu_state.sprs[SPR_DTLBMR_BASE(way) + set] |= (config.dmmu.nsets - 1) << 6;
cpu_state.sprs[SPR_DTLBMR_BASE(way) + set] |= (dmmu->nsets - 1) << 6;
 
/* 1 to 1 mapping */
cpu_state.sprs[SPR_DTLBTR_BASE(minway) + set] &= ~SPR_DTLBTR_PPN;
155,9 → 157,9
#endif
TRACE("DTLB miss (virtaddr=%"PRIxADDR") at %lli.\n", virtaddr,
runtime.sim.cycles);
runtime.sim.mem_cycles += config.dmmu.missdelay;
runtime.sim.mem_cycles += dmmu->missdelay;
/* if tlb refill implemented in HW */
/* return ((cpu_state.sprs[SPR_DTLBTR_BASE(minway) + set] & SPR_DTLBTR_PPN) >> 12) * config.dmmu.pagesize + (virtaddr % config.dmmu.pagesize); */
/* return ((cpu_state.sprs[SPR_DTLBTR_BASE(minway) + set] & SPR_DTLBTR_PPN) >> 12) * dmmu->pagesize + (virtaddr % dmmu->pagesize); */
 
except_handle(EXCEPT_DTLBMISS, virtaddr);
return 0;
185,6 → 187,7
uorreg_t *dtlbmr;
uorreg_t *dtlbtr;
uorreg_t *dtlbmr_lru;
struct dmmu *dmmu = dmmu_state;
 
if (!(cpu_state.sprs[SPR_SR] & SPR_SR_DME) ||
!(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP)) {
193,7 → 196,7
return virtaddr;
}
 
dtlbmr = dmmu_find_tlbmr(virtaddr, &dtlbmr_lru);
dtlbmr = dmmu_find_tlbmr(virtaddr, &dtlbmr_lru, dmmu);
 
/* Did we find our tlb entry? */
if (dtlbmr) { /* Yes, we did. */
224,28 → 227,16
data_ci = *dtlbtr & SPR_DTLBTR_CI;
}
 
return (*dtlbtr & SPR_DTLBTR_PPN) | (virtaddr &
(config.dmmu.page_offset_mask));
return (*dtlbtr & SPR_DTLBTR_PPN) | (virtaddr & (dmmu->page_offset_mask));
}
 
return(0);
}
 
 
void dtlb_info(void)
{
if (!(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP)) {
PRINTF("DMMU not implemented. Set UPR[DMP].\n");
return;
}
PRINTF("Data MMU %dKB: ", config.dmmu.nsets * config.dmmu.entrysize * config.dmmu.nways / 1024);
PRINTF("%d ways, %d sets, entry size %d bytes\n", config.dmmu.nways, config.dmmu.nsets, config.dmmu.entrysize);
}
 
/* FIXME: Is this comment valid? */
/* First check if virtual address is covered by DTLB and if it is:
- increment DTLB read hit stats,
- set 'lru' at this way to config.dmmu.ustates - 1 and
- set 'lru' at this way to dmmu->ustates - 1 and
decrement 'lru' of other ways unless they have reached 0,
- check page access attributes and invoke DMMU page fault exception
handler if necessary
252,15 → 243,16
and if not:
- increment DTLB read miss stats
- find lru way and entry and invoke DTLB miss exception handler
- set 'lru' with config.dmmu.ustates - 1 and decrement 'lru' of other
- set 'lru' with dmmu->ustates - 1 and decrement 'lru' of other
ways unless they have reached 0
*/
 
void dtlb_status(int start_set)
static void dtlb_status(void *dat)
{
struct dmmu *dmmu = dat;
int set;
int way;
int end_set = config.dmmu.nsets;
int end_set = dmmu->nsets;
 
if (!(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP)) {
PRINTF("DMMU not implemented. Set UPR[DMP].\n");
267,17 → 259,10
return;
}
 
if ((start_set >= 0) && (start_set < end_set))
end_set = start_set + 1;
else
start_set = 0;
 
if (start_set < end_set) PRINTF("\nDMMU: ");
if (0 < end_set) PRINTF("\nDMMU: ");
/* Scan set(s) and way(s). */
for (set = start_set; set < end_set; set++) {
PRINTF("\nSet %x: ", set);
for (way = 0; way < config.dmmu.nways; way++) {
PRINTF(" way %d: ", way);
for (set = 0; set < end_set; set++) {
for (way = 0; way < dmmu->nways; way++) {
PRINTF("%s\n", dump_spr(SPR_DTLBMR_BASE(way) + set,
cpu_state.sprs[SPR_DTLBMR_BASE(way) + set]));
PRINTF("%s\n", dump_spr(SPR_DTLBTR_BASE(way) + set,
284,23 → 269,27
cpu_state.sprs[SPR_DTLBTR_BASE(way) + set]));
}
}
if (start_set < end_set) PRINTF("\n");
if (0 < end_set) PRINTF("\n");
}
 
/*---------------------------------------------------[ DMMU configuration ]---*/
static void dmmu_enabled(union param_val val, void *dat)
{
struct dmmu *dmmu = dat;
 
if(val.int_val)
cpu_state.sprs[SPR_UPR] |= SPR_UPR_DMP;
else
cpu_state.sprs[SPR_UPR] &= ~SPR_UPR_DMP;
config.dmmu.enabled = val.int_val;
dmmu->enabled = val.int_val;
}
 
static void dmmu_nsets(union param_val val, void *dat)
{
struct dmmu *dmmu = dat;
 
if (is_power2(val.int_val) && val.int_val <= 256) {
config.dmmu.nsets = val.int_val;
dmmu->nsets = val.int_val;
cpu_state.sprs[SPR_DMMUCFGR] &= ~SPR_DMMUCFGR_NTS;
cpu_state.sprs[SPR_DMMUCFGR] |= log2_int(val.int_val) << 3;
} else
309,8 → 298,10
 
static void dmmu_nways(union param_val val, void *dat)
{
struct dmmu *dmmu = dat;
 
if (val.int_val >= 1 && val.int_val <= 4) {
config.dmmu.nways = val.int_val;
dmmu->nways = val.int_val;
cpu_state.sprs[SPR_DMMUCFGR] &= ~SPR_DMMUCFGR_NTW;
cpu_state.sprs[SPR_DMMUCFGR] |= val.int_val - 1;
}
320,8 → 311,10
 
static void dmmu_pagesize(union param_val val, void *dat)
{
struct dmmu *dmmu = dat;
 
if (is_power2(val.int_val))
config.dmmu.pagesize = val.int_val;
dmmu->pagesize = val.int_val;
else
CONFIG_ERROR("value of power of two expected.");
}
328,8 → 321,10
 
static void dmmu_entrysize(union param_val val, void *dat)
{
struct dmmu *dmmu = dat;
 
if (is_power2(val.int_val))
config.dmmu.entrysize = val.int_val;
dmmu->entrysize = val.int_val;
else
CONFIG_ERROR("value of power of two expected.");
}
336,8 → 331,10
 
static void dmmu_ustates(union param_val val, void *dat)
{
struct dmmu *dmmu = dat;
 
if (val.int_val >= 2 && val.int_val <= 4)
config.dmmu.ustates = val.int_val;
dmmu->ustates = val.int_val;
else
CONFIG_ERROR("invalid USTATE.");
}
344,17 → 341,37
 
static void dmmu_missdelay(union param_val val, void *dat)
{
config.dmmu.missdelay = val.int_val;
struct dmmu *dmmu = dat;
 
dmmu->missdelay = val.int_val;
}
 
static void dmmu_hitdelay(union param_val val, void *dat)
{
config.dmmu.hitdelay = val.int_val;
struct dmmu *dmmu = dat;
 
dmmu->hitdelay = val.int_val;
}
 
static void *dmmu_start_sec(void)
{
return NULL;
struct dmmu *dmmu;
 
if(!(dmmu = malloc(sizeof(struct dmmu)))) {
fprintf(stderr, "OOM\n");
exit(1);
}
 
dmmu->enabled = 0;
dmmu->hitdelay = 1;
dmmu->missdelay = 1;
dmmu->pagesize = 8192;
/* FIXME: Something sane */
dmmu->entrysize = 0;
 
dmmu_state = dmmu;
 
return dmmu;
}
 
static void dmmu_end_sec(void *dat)
362,12 → 379,19
struct dmmu *dmmu = dat;
 
/* Precalculate some values for use during address translation */
config.dmmu.pagesize_log2 = log2_int(config.dmmu.pagesize);
config.dmmu.page_offset_mask = config.dmmu.pagesize - 1;
config.dmmu.page_mask = ~config.dmmu.page_offset_mask;
config.dmmu.vpn_mask = ~((config.dmmu.pagesize * config.dmmu.nsets) - 1);
config.dmmu.set_mask = config.dmmu.nsets - 1;
config.dmmu.lru_reload = (config.dmmu.set_mask << 6) & SPR_DTLBMR_LRU;
dmmu->pagesize_log2 = log2_int(dmmu->pagesize);
dmmu->page_offset_mask = dmmu->pagesize - 1;
dmmu->page_mask = ~dmmu->page_offset_mask;
dmmu->vpn_mask = ~((dmmu->pagesize * dmmu->nsets) - 1);
dmmu->set_mask = dmmu->nsets - 1;
dmmu->lru_reload = (dmmu->set_mask << 6) & SPR_DTLBMR_LRU;
 
if(dmmu->enabled) {
PRINTF("Data MMU %dKB: %d ways, %d sets, entry size %d bytes\n",
dmmu->nsets * dmmu->entrysize * dmmu->nways / 1024, dmmu->nways,
dmmu->nsets, dmmu->entrysize);
reg_sim_stat(dtlb_status, dmmu);
}
}
 
void reg_dmmu_sec(void)
/trunk/or1ksim/mmu/dmmu.h
16,9 → 16,28
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
 
/* FIXME: Move to dmmu.c once the dust settles */
struct dmmu {
int enabled; /* Whether DMMU is enabled */
int nways; /* Number of DTLB ways */
int nsets; /* Number of DTLB sets */
int pagesize; /* DTLB page size */
int pagesize_log2; /* DTLB page size (log2(pagesize)) */
oraddr_t page_offset_mask; /* Address mask to get page offset */
oraddr_t page_mask; /* Page number mask (diff. from vpn) */
oraddr_t vpn_mask; /* Address mask to get vpn */
int lru_reload; /* What to reload the lru value to */
oraddr_t set_mask; /* Mask to get set of an address */
int entrysize; /* DTLB entry size */
int ustates; /* number of DTLB usage states */
int missdelay; /* How much cycles does the miss cost */
int hitdelay; /* How much cycles does the hit cost */
};
#define DADDR_PAGE(addr) ((addr) & dmmu_state->page_mask)
/* FIXME: Remove the need for this global */
extern struct dmmu *dmmu_state;
 
oraddr_t dmmu_translate(oraddr_t virtaddr, int write_access);
oraddr_t dmmu_simulate_tlb(oraddr_t virtaddr, int write_access);
oraddr_t peek_into_dtlb(oraddr_t virtaddr, int write_access, int through_dc);
void dtlb_status(int start_set);
void init_dmmu(void);
/trunk/or1ksim/sim-config.c
88,11 → 88,6
else config.sim.system_kfreq = INT_MAX;
if (config.sim.system_kfreq <= 0) config.sim.system_kfreq = 1;
config.dmmu.enabled = 0;
config.dmmu.hitdelay = 1;
config.dmmu.missdelay = 1;
config.dmmu.pagesize = 8192;
/* IC & DC */
config.ic.enabled = 0;
config.ic.hitdelay = 1;

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.