Line 15... |
Line 15... |
|
|
You should have received a copy of the GNU General Public License
|
You should have received a copy of the GNU General Public License
|
along with this program; if not, write to the Free Software
|
along with this program; if not, write to the Free Software
|
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
|
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
|
|
|
/* DMMU model (not functional yet, currently just copy of data cache). */
|
/* DMMU model, perfectly functional. */
|
|
|
#include "config.h"
|
#include "config.h"
|
|
|
#ifdef HAVE_INTTYPES_H
|
#ifdef HAVE_INTTYPES_H
|
#include <inttypes.h>
|
#include <inttypes.h>
|
Line 38... |
Line 38... |
#include "sim-config.h"
|
#include "sim-config.h"
|
#include "debug.h"
|
#include "debug.h"
|
|
|
DEFAULT_DEBUG_CHANNEL(dmmu);
|
DEFAULT_DEBUG_CHANNEL(dmmu);
|
|
|
extern int cont_run;
|
|
|
|
/* Data MMU */
|
/* Data MMU */
|
|
|
inline oraddr_t dmmu_simulate_tlb(oraddr_t virtaddr, int write_access)
|
/* Precalculates some values for use during address translation */
|
|
void init_dmmu(void)
|
|
{
|
|
config.dmmu.pagesize_log2 = log2(config.dmmu.pagesize);
|
|
config.dmmu.page_offset_mask = config.dmmu.pagesize - 1;
|
|
config.dmmu.page_mask = ~config.dmmu.page_offset_mask;
|
|
config.dmmu.vpn_mask = ~((config.dmmu.pagesize * config.dmmu.nsets) - 1);
|
|
config.dmmu.set_mask = config.dmmu.nsets - 1;
|
|
config.dmmu.lru_reload = (config.dmmu.set_mask << 6) & SPR_DTLBMR_LRU;
|
|
}
|
|
|
|
inline uorreg_t *dmmu_find_tlbmr(oraddr_t virtaddr, uorreg_t **dtlbmr_lru)
|
{
|
{
|
int set, way = -1;
|
int set;
|
int i;
|
int i;
|
oraddr_t tagaddr;
|
oraddr_t vpn;
|
oraddr_t vpn, ppn;
|
uorreg_t *dtlbmr;
|
|
|
|
/* Which set to check out? */
|
|
set = DADDR_PAGE(virtaddr) >> config.dmmu.pagesize_log2;
|
|
set &= config.dmmu.set_mask;
|
|
vpn = virtaddr & config.dmmu.vpn_mask;
|
|
|
|
dtlbmr = &cpu_state.sprs[SPR_DTLBMR_BASE(0) + set];
|
|
*dtlbmr_lru = dtlbmr;
|
|
|
|
/* FIXME: Should this be reversed? */
|
|
for(i = config.dmmu.nways; i; i--, dtlbmr += (128 * 2)) {
|
|
if(((*dtlbmr & config.dmmu.vpn_mask) == vpn) && (*dtlbmr & SPR_DTLBMR_V))
|
|
return dtlbmr;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
oraddr_t dmmu_translate(oraddr_t virtaddr, int write_access)
|
|
{
|
|
int i;
|
|
uorreg_t *dtlbmr;
|
|
uorreg_t *dtlbtr;
|
|
uorreg_t *dtlbmr_lru;
|
|
|
if (!(cpu_state.sprs[SPR_SR] & SPR_SR_DME) ||
|
if (!(cpu_state.sprs[SPR_SR] & SPR_SR_DME) ||
|
!(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP)) {
|
!(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP)) {
|
data_ci = (virtaddr >= 0x80000000);
|
data_ci = (virtaddr >= 0x80000000);
|
return virtaddr;
|
return virtaddr;
|
}
|
}
|
|
|
/* Which set to check out? */
|
dtlbmr = dmmu_find_tlbmr(virtaddr, &dtlbmr_lru);
|
set = (virtaddr / config.dmmu.pagesize) % config.dmmu.nsets;
|
|
tagaddr = (virtaddr / config.dmmu.pagesize) / config.dmmu.nsets;
|
|
vpn = virtaddr / (config.dmmu.pagesize * config.dmmu.nsets);
|
|
|
|
/* Scan all ways and try to find a matching way. */
|
|
for (i = 0; i < config.dmmu.nways; i++)
|
|
if (((cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] / (config.dmmu.pagesize * config.dmmu.nsets)) == vpn) &&
|
|
(cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] & SPR_DTLBMR_V))
|
|
way = i;
|
|
|
|
/* Did we find our tlb entry? */
|
/* Did we find our tlb entry? */
|
if (way >= 0) { /* Yes, we did. */
|
if(dtlbmr) { /* Yes, we did. */
|
dmmu_stats.loads_tlbhit++;
|
dmmu_stats.loads_tlbhit++;
|
|
|
|
dtlbtr = dtlbmr + 128;
|
|
|
TRACE("DTLB hit (virtaddr=%"PRIxADDR") at %lli.\n", virtaddr,
|
TRACE("DTLB hit (virtaddr=%"PRIxADDR") at %lli.\n", virtaddr,
|
runtime.sim.cycles);
|
runtime.sim.cycles);
|
|
|
/* Set LRUs */
|
/* Set LRUs */
|
for (i = 0; i < config.dmmu.nways; i++) {
|
for(i = 0; i < config.dmmu.nways; i++, dtlbmr_lru += (128 * 2)) {
|
uorreg_t lru = cpu_state.sprs[SPR_DTLBMR_BASE(i) + set];
|
if(*dtlbmr_lru & SPR_DTLBMR_LRU)
|
if (lru & SPR_DTLBMR_LRU) {
|
*dtlbmr_lru = (*dtlbmr_lru & ~SPR_DTLBMR_LRU) |
|
lru = (lru & ~SPR_DTLBMR_LRU) | ((lru & SPR_DTLBMR_LRU) - 0x40);
|
((*dtlbmr_lru & SPR_DTLBMR_LRU) - 0x40);
|
cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] = lru;
|
|
}
|
|
}
|
}
|
cpu_state.sprs[SPR_DTLBMR_BASE(way) + set] &= ~SPR_DTLBMR_LRU;
|
|
cpu_state.sprs[SPR_DTLBMR_BASE(way) + set] |= (config.dmmu.nsets - 1) << 6;
|
/* This is not necessary `*dtlbmr &= ~SPR_DTLBMR_LRU;' since SPR_DTLBMR_LRU
|
|
* is always decremented and the number of sets is always a power of two and
|
|
* as such lru_reload has all bits set that get touched during decrementing
|
|
* SPR_DTLBMR_LRU */
|
|
*dtlbmr |= config.dmmu.lru_reload;
|
|
|
/* Check if page is cache inhibited */
|
/* Check if page is cache inhibited */
|
data_ci = (cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_CI) == SPR_DTLBTR_CI;
|
data_ci = *dtlbtr & SPR_DTLBTR_CI;
|
|
|
runtime.sim.mem_cycles += config.dmmu.hitdelay;
|
runtime.sim.mem_cycles += config.dmmu.hitdelay;
|
ppn = cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] / config.dmmu.pagesize;
|
|
|
|
/* Test for page fault */
|
/* Test for page fault */
|
if (cpu_state.sprs[SPR_SR] & SPR_SR_SM) {
|
if (cpu_state.sprs[SPR_SR] & SPR_SR_SM) {
|
if ( write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_SWE)
|
if ( (write_access && !(*dtlbtr & SPR_DTLBTR_SWE))
|
|| !write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_SRE))
|
|| (!write_access && !(*dtlbtr & SPR_DTLBTR_SRE)))
|
except_handle(EXCEPT_DPF, virtaddr);
|
except_handle(EXCEPT_DPF, virtaddr);
|
} else {
|
} else {
|
if ( write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_UWE)
|
if ( (write_access && !(*dtlbtr & SPR_DTLBTR_UWE))
|
|| !write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_URE))
|
|| (!write_access && !(*dtlbtr & SPR_DTLBTR_URE)))
|
except_handle(EXCEPT_DPF, virtaddr);
|
except_handle(EXCEPT_DPF, virtaddr);
|
}
|
}
|
|
|
return (ppn * config.dmmu.pagesize) + (virtaddr % config.dmmu.pagesize);
|
TRACE("Returning physical address %"PRIxADDR"\n",
|
|
(*dtlbtr & SPR_DTLBTR_PPN) | (virtaddr &
|
|
(config.dmmu.page_offset_mask)));
|
|
return (*dtlbtr & SPR_DTLBTR_PPN) | (virtaddr &
|
|
(config.dmmu.page_offset_mask));
|
}
|
}
|
else { /* No, we didn't. */
|
|
|
/* No, we didn't. */
|
dmmu_stats.loads_tlbmiss++;
|
dmmu_stats.loads_tlbmiss++;
|
#if 0
|
#if 0
|
for (i = 0; i < config.dmmu.nways; i++)
|
for (i = 0; i < config.dmmu.nways; i++)
|
if (((cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] & SPR_DTLBMR_LRU) >> 6) < minlru)
|
if (((cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] & SPR_DTLBMR_LRU) >> 6) < minlru)
|
minway = i;
|
minway = i;
|
Line 132... |
Line 165... |
TRACE("DTLB miss (virtaddr=%"PRIxADDR") at %lli.\n", virtaddr,
|
TRACE("DTLB miss (virtaddr=%"PRIxADDR") at %lli.\n", virtaddr,
|
runtime.sim.cycles);
|
runtime.sim.cycles);
|
runtime.sim.mem_cycles += config.dmmu.missdelay;
|
runtime.sim.mem_cycles += config.dmmu.missdelay;
|
/* if tlb refill implemented in HW */
|
/* if tlb refill implemented in HW */
|
/* return ((cpu_state.sprs[SPR_DTLBTR_BASE(minway) + set] & SPR_DTLBTR_PPN) >> 12) * config.dmmu.pagesize + (virtaddr % config.dmmu.pagesize); */
|
/* return ((cpu_state.sprs[SPR_DTLBTR_BASE(minway) + set] & SPR_DTLBTR_PPN) >> 12) * config.dmmu.pagesize + (virtaddr % config.dmmu.pagesize); */
|
|
|
except_handle(EXCEPT_DTLBMISS, virtaddr);
|
except_handle(EXCEPT_DTLBMISS, virtaddr);
|
return 0;
|
return 0;
|
}
|
}
|
}
|
|
|
|
/* DESC: try to find EA -> PA transaltion without changing
|
/* DESC: try to find EA -> PA transaltion without changing
|
* any of precessor states. if this is not passible gives up
|
* any of precessor states. if this is not passible gives up
|
* (without triggering exceptions)
|
* (without triggering exceptions)
|
*
|
*
|
Line 156... |
Line 189... |
* else - appropriate PA (note it DMMU is not present
|
* else - appropriate PA (note it DMMU is not present
|
* PA === EA)
|
* PA === EA)
|
*/
|
*/
|
oraddr_t peek_into_dtlb(oraddr_t virtaddr, int write_access, int through_dc)
|
oraddr_t peek_into_dtlb(oraddr_t virtaddr, int write_access, int through_dc)
|
{
|
{
|
int set, way = -1;
|
uorreg_t *dtlbmr;
|
int i;
|
uorreg_t *dtlbtr;
|
oraddr_t tagaddr;
|
uorreg_t *dtlbmr_lru;
|
oraddr_t vpn, ppn;
|
|
|
|
if (!(cpu_state.sprs[SPR_SR] & SPR_SR_DME) ||
|
if (!(cpu_state.sprs[SPR_SR] & SPR_SR_DME) ||
|
!(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP)) {
|
!(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP)) {
|
if (through_dc)
|
if (through_dc)
|
data_ci = (virtaddr >= 0x80000000);
|
data_ci = (virtaddr >= 0x80000000);
|
return virtaddr;
|
return virtaddr;
|
}
|
}
|
|
|
/* Which set to check out? */
|
dtlbmr = dmmu_find_tlbmr(virtaddr, &dtlbmr_lru);
|
set = (virtaddr / config.dmmu.pagesize) % config.dmmu.nsets;
|
|
tagaddr = (virtaddr / config.dmmu.pagesize) / config.dmmu.nsets;
|
|
vpn = virtaddr / (config.dmmu.pagesize * config.dmmu.nsets);
|
|
|
|
/* Scan all ways and try to find a matching way. */
|
|
for (i = 0; i < config.dmmu.nways; i++)
|
|
if (((cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] / (config.dmmu.pagesize * config.dmmu.nsets)) == vpn) &&
|
|
(cpu_state.sprs[SPR_DTLBMR_BASE(i) + set] & SPR_DTLBMR_V))
|
|
way = i;
|
|
|
|
/* Did we find our tlb entry? */
|
/* Did we find our tlb entry? */
|
if (way >= 0) { /* Yes, we did. */
|
if (dtlbmr) { /* Yes, we did. */
|
dmmu_stats.loads_tlbhit++;
|
dmmu_stats.loads_tlbhit++;
|
|
|
|
dtlbtr = dtlbmr + 128;
|
|
|
TRACE("DTLB hit (virtaddr=%"PRIxADDR") at %lli.\n", virtaddr,
|
TRACE("DTLB hit (virtaddr=%"PRIxADDR") at %lli.\n", virtaddr,
|
runtime.sim.cycles);
|
runtime.sim.cycles);
|
|
|
/* Test for page fault */
|
/* Test for page fault */
|
if (cpu_state.sprs[SPR_SR] & SPR_SR_SM) {
|
if (cpu_state.sprs[SPR_SR] & SPR_SR_SM) {
|
if ( write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_SWE)
|
if((write_access && !(*dtlbtr & SPR_DTLBTR_SWE)) ||
|
|| !write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_SRE))
|
(!write_access && !(*dtlbtr & SPR_DTLBTR_SRE)))
|
|
|
/* otherwise exception DPF would be raised */
|
/* otherwise exception DPF would be raised */
|
return(0);
|
return(0);
|
} else {
|
} else {
|
if ( write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_UWE)
|
if((write_access && !(*dtlbtr & SPR_DTLBTR_UWE)) ||
|
|| !write_access && !(cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_URE))
|
(!write_access && !(*dtlbtr & SPR_DTLBTR_URE)))
|
|
|
/* otherwise exception DPF would be raised */
|
/* otherwise exception DPF would be raised */
|
return(0);
|
return(0);
|
}
|
}
|
|
|
if (through_dc) {
|
if (through_dc) {
|
/* Check if page is cache inhibited */
|
/* Check if page is cache inhibited */
|
data_ci = (cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] & SPR_DTLBTR_CI) == SPR_DTLBTR_CI;
|
data_ci = *dtlbtr & SPR_DTLBTR_CI;
|
}
|
}
|
|
|
ppn = cpu_state.sprs[SPR_DTLBTR_BASE(way) + set] / config.dmmu.pagesize;
|
return (*dtlbtr & SPR_DTLBTR_PPN) | (virtaddr &
|
return (ppn * config.dmmu.pagesize) + (virtaddr % config.dmmu.pagesize);
|
(config.dmmu.page_offset_mask));
|
}
|
|
else { /* No, we didn't. */
|
|
return(0);
|
|
}
|
}
|
|
|
ERR("ERR, should never have happened\n");
|
|
return(0);
|
return(0);
|
}
|
}
|
|
|
|
|
oraddr_t dmmu_translate(oraddr_t virtaddr, int write_access)
|
|
{
|
|
oraddr_t phyaddr = dmmu_simulate_tlb(virtaddr, write_access);
|
|
|
|
/* PRINTF("DMMU translate(%"PRIxADDR") = %"PRIxADDR"\n", virtaddr, phyaddr);*/
|
|
return phyaddr;
|
|
}
|
|
|
|
|
|
void dtlb_info(void)
|
void dtlb_info(void)
|
{
|
{
|
if (!(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP)) {
|
if (!(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP)) {
|
PRINTF("DMMU not implemented. Set UPR[DMP].\n");
|
PRINTF("DMMU not implemented. Set UPR[DMP].\n");
|
return;
|
return;
|