OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [or1ksim/] [mmu/] [dmmu.c] - Blame information for rev 402

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 19 jeremybenn
/* dmmu.c -- Data MMU simulation
2
 
3
   Copyright (C) 1999 Damjan Lampret, lampret@opencores.org
4
   Copyright (C) 2008 Embecosm Limited
5
 
6
   Contributor Jeremy Bennett <jeremy.bennett@embecosm.com>
7
 
8
   This file is part of Or1ksim, the OpenRISC 1000 Architectural Simulator.
9
 
10
   This program is free software; you can redistribute it and/or modify it
11
   under the terms of the GNU General Public License as published by the Free
12
   Software Foundation; either version 3 of the License, or (at your option)
13
   any later version.
14
 
15
   This program is distributed in the hope that it will be useful, but WITHOUT
16
   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18
   more details.
19
 
20
   You should have received a copy of the GNU General Public License along
21
   with this program.  If not, see <http://www.gnu.org/licenses/>.  */
22
 
23
/* This program is commented throughout in a fashion suitable for processing
24
   with Doxygen. */
25
 
26
/* DMMU model, perfectly functional. */
27
 
28
 
29
/* Autoconf and/or portability configuration */
30
#include "config.h"
31
#include "port.h"
32
 
33
/* System includes */
34
#include <stdlib.h>
35
 
36
/* Package includes */
37
#include "dmmu.h"
38
#include "sim-config.h"
39
#include "arch.h"
40
#include "execute.h"
41
#include "spr-defs.h"
42
#include "stats.h"
43
#include "except.h"
44
#include "sprs.h"
45
#include "misc.h"
46
#include "sim-cmd.h"
47
 
48
 
49
struct dmmu *dmmu_state;
50
 
51
/* Data MMU */
52
 
53
static uorreg_t *
54
dmmu_find_tlbmr (oraddr_t virtaddr, uorreg_t ** dtlbmr_lru, struct dmmu *dmmu)
55
{
56
  int set;
57
  int i;
58
  oraddr_t vpn;
59
  uorreg_t *dtlbmr;
60
 
61
  /* Which set to check out? */
62
  set = DADDR_PAGE (virtaddr) >> dmmu->pagesize_log2;
63
  set &= dmmu->set_mask;
64
  vpn = virtaddr & dmmu->vpn_mask;
65
 
66
  dtlbmr = &cpu_state.sprs[SPR_DTLBMR_BASE (0) + set];
67
  *dtlbmr_lru = dtlbmr;
68
 
69
  /* FIXME: Should this be reversed? */
70
  for (i = dmmu->nways; i; i--, dtlbmr += (128 * 2))
71
    {
72
      if (((*dtlbmr & dmmu->vpn_mask) == vpn) && (*dtlbmr & SPR_DTLBMR_V))
73
        return dtlbmr;
74
    }
75
 
76
  return NULL;
77
}
78
 
79
oraddr_t
80
dmmu_translate (oraddr_t virtaddr, int write_access)
81
{
82
  int i;
83
  uorreg_t *dtlbmr;
84
  uorreg_t *dtlbtr;
85
  uorreg_t *dtlbmr_lru;
86
  struct dmmu *dmmu = dmmu_state;
87
 
88
  if (!(cpu_state.sprs[SPR_SR] & SPR_SR_DME) ||
89
      !(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP))
90
    {
91
      data_ci = (virtaddr >= 0x80000000);
92
      return virtaddr;
93
    }
94
 
95
  dtlbmr = dmmu_find_tlbmr (virtaddr, &dtlbmr_lru, dmmu);
96
 
97
  /* Did we find our tlb entry? */
98
  if (dtlbmr)
99
    {                           /* Yes, we did. */
100
      dmmu_stats.loads_tlbhit++;
101
 
102
      dtlbtr = dtlbmr + 128;
103
 
104
      /* Set LRUs */
105
      for (i = 0; i < dmmu->nways; i++, dtlbmr_lru += (128 * 2))
106
        {
107
          if (*dtlbmr_lru & SPR_DTLBMR_LRU)
108
            *dtlbmr_lru = (*dtlbmr_lru & ~SPR_DTLBMR_LRU) |
109
              ((*dtlbmr_lru & SPR_DTLBMR_LRU) - 0x40);
110
        }
111
 
112
      /* This is not necessary `*dtlbmr &= ~SPR_DTLBMR_LRU;' since SPR_DTLBMR_LRU
113
       * is always decremented and the number of sets is always a power of two and
114
       * as such lru_reload has all bits set that get touched during decrementing
115
       * SPR_DTLBMR_LRU */
116
      *dtlbmr |= dmmu->lru_reload;
117
 
118
      /* Check if page is cache inhibited */
119
      data_ci = *dtlbtr & SPR_DTLBTR_CI;
120
 
121
      runtime.sim.mem_cycles += dmmu->hitdelay;
122
 
123
      /* Test for page fault */
124
      if (cpu_state.sprs[SPR_SR] & SPR_SR_SM)
125
        {
126
          if ((write_access && !(*dtlbtr & SPR_DTLBTR_SWE))
127
              || (!write_access && !(*dtlbtr & SPR_DTLBTR_SRE)))
128
            except_handle (EXCEPT_DPF, virtaddr);
129
        }
130
      else
131
        {
132
          if ((write_access && !(*dtlbtr & SPR_DTLBTR_UWE))
133
              || (!write_access && !(*dtlbtr & SPR_DTLBTR_URE)))
134
            except_handle (EXCEPT_DPF, virtaddr);
135
        }
136
 
137
      return (*dtlbtr & SPR_DTLBTR_PPN) | (virtaddr &
138
                                           (dmmu->page_offset_mask));
139
    }
140
 
141
  /* No, we didn't. */
142
  dmmu_stats.loads_tlbmiss++;
143
#if 0
144
  for (i = 0; i < dmmu->nways; i++)
145
    if (((cpu_state.sprs[SPR_DTLBMR_BASE (i) + set] & SPR_DTLBMR_LRU) >> 6) <
146
        minlru)
147
      minway = i;
148
 
149
  cpu_state.sprs[SPR_DTLBMR_BASE (minway) + set] &= ~SPR_DTLBMR_VPN;
150
  cpu_state.sprs[SPR_DTLBMR_BASE (minway) + set] |= vpn << 12;
151
  for (i = 0; i < dmmu->nways; i++)
152
    {
153
      uorreg_t lru = cpu_state.sprs[SPR_DTLBMR_BASE (i) + set];
154
      if (lru & SPR_DTLBMR_LRU)
155
        {
156
          lru = (lru & ~SPR_DTLBMR_LRU) | ((lru & SPR_DTLBMR_LRU) - 0x40);
157
          cpu_state.sprs[SPR_DTLBMR_BASE (i) + set] = lru;
158
        }
159
    }
160
  cpu_state.sprs[SPR_DTLBMR_BASE (way) + set] &= ~SPR_DTLBMR_LRU;
161
  cpu_state.sprs[SPR_DTLBMR_BASE (way) + set] |= (dmmu->nsets - 1) << 6;
162
 
163
  /* 1 to 1 mapping */
164
  cpu_state.sprs[SPR_DTLBTR_BASE (minway) + set] &= ~SPR_DTLBTR_PPN;
165
  cpu_state.sprs[SPR_DTLBTR_BASE (minway) + set] |= vpn << 12;
166
 
167
  cpu_state.sprs[SPR_DTLBMR_BASE (minway) + set] |= SPR_DTLBMR_V;
168
#endif
169
  runtime.sim.mem_cycles += dmmu->missdelay;
170
  /* if tlb refill implemented in HW */
171
  /* return ((cpu_state.sprs[SPR_DTLBTR_BASE(minway) + set] & SPR_DTLBTR_PPN) >> 12) * dmmu->pagesize + (virtaddr % dmmu->pagesize); */
172
 
173
  except_handle (EXCEPT_DTLBMISS, virtaddr);
174
  return 0;
175
}
176
 
177
/* DESC: try to find EA -> PA transaltion without changing
178
 *       any of precessor states. if this is not passible gives up
179
 *       (without triggering exceptions)
180
 *
181
 * PRMS: virtaddr     - EA for which to find translation
182
 *
183
 *       write_access - 0 ignore testing for write access
184
 *                      1 test for write access, if fails
185
 *                        do not return translation
186
 *
187
 *       through_dc   - 1 go through data cache
188
 *                      0 ignore data cache
189
 *
190
 * RTRN: 0            - no DMMU, DMMU disabled or ITLB miss
191
 *       else         - appropriate PA (note it DMMU is not present
192
 *                      PA === EA)
193
 */
194
oraddr_t
195
peek_into_dtlb (oraddr_t virtaddr, int write_access, int through_dc)
196
{
197
  uorreg_t *dtlbmr;
198
  uorreg_t *dtlbtr;
199
  uorreg_t *dtlbmr_lru;
200
  struct dmmu *dmmu = dmmu_state;
201
 
202
  if (!(cpu_state.sprs[SPR_SR] & SPR_SR_DME) ||
203
      !(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP))
204
    {
205
      if (through_dc)
206
        data_ci = (virtaddr >= 0x80000000);
207
      return virtaddr;
208
    }
209
 
210
  dtlbmr = dmmu_find_tlbmr (virtaddr, &dtlbmr_lru, dmmu);
211
 
212
  /* Did we find our tlb entry? */
213
  if (dtlbmr)
214
    {                           /* Yes, we did. */
215
      dmmu_stats.loads_tlbhit++;
216
 
217
      dtlbtr = dtlbmr + 128;
218
 
219
      /* Test for page fault */
220
      if (cpu_state.sprs[SPR_SR] & SPR_SR_SM)
221
        {
222
          if ((write_access && !(*dtlbtr & SPR_DTLBTR_SWE)) ||
223
              (!write_access && !(*dtlbtr & SPR_DTLBTR_SRE)))
224
 
225
            /* otherwise exception DPF would be raised */
226
            return (0);
227
        }
228
      else
229
        {
230
          if ((write_access && !(*dtlbtr & SPR_DTLBTR_UWE)) ||
231
              (!write_access && !(*dtlbtr & SPR_DTLBTR_URE)))
232
 
233
            /* otherwise exception DPF would be raised */
234
            return (0);
235
        }
236
 
237
      if (through_dc)
238
        {
239
          /* Check if page is cache inhibited */
240
          data_ci = *dtlbtr & SPR_DTLBTR_CI;
241
        }
242
 
243
      return (*dtlbtr & SPR_DTLBTR_PPN) | (virtaddr &
244
                                           (dmmu->page_offset_mask));
245
    }
246
 
247
  return (0);
248
}
249
 
250
/* FIXME: Is this comment valid? */
251
/* First check if virtual address is covered by DTLB and if it is:
252
    - increment DTLB read hit stats,
253
    - set 'lru' at this way to dmmu->ustates - 1 and
254
      decrement 'lru' of other ways unless they have reached 0,
255
    - check page access attributes and invoke DMMU page fault exception
256
      handler if necessary
257
   and if not:
258
    - increment DTLB read miss stats
259
    - find lru way and entry and invoke DTLB miss exception handler
260
    - set 'lru' with dmmu->ustates - 1 and decrement 'lru' of other
261
      ways unless they have reached 0
262
*/
263
 
264
static void
265
dtlb_status (void *dat)
266
{
267
  struct dmmu *dmmu = dat;
268
  int set;
269
  int way;
270
  int end_set = dmmu->nsets;
271
 
272
  if (!(cpu_state.sprs[SPR_UPR] & SPR_UPR_DMP))
273
    {
274
      PRINTF ("DMMU not implemented. Set UPR[DMP].\n");
275
      return;
276
    }
277
 
278
  if (0 < end_set)
279
    PRINTF ("\nDMMU: ");
280
  /* Scan set(s) and way(s). */
281
  for (set = 0; set < end_set; set++)
282
    {
283
      for (way = 0; way < dmmu->nways; way++)
284
        {
285
          PRINTF ("%s\n", dump_spr (SPR_DTLBMR_BASE (way) + set,
286
                                    cpu_state.sprs[SPR_DTLBMR_BASE (way) +
287
                                                   set]));
288
          PRINTF ("%s\n",
289
                  dump_spr (SPR_DTLBTR_BASE (way) + set,
290
                            cpu_state.sprs[SPR_DTLBTR_BASE (way) + set]));
291
        }
292
    }
293
  if (0 < end_set)
294
    PRINTF ("\n");
295
}
296
 
297
/*---------------------------------------------------[ DMMU configuration ]---*/
298
 
299
/*---------------------------------------------------------------------------*/
300
/*!Enable or disable the DMMU
301
 
302
   Set the corresponding field in the UPR
303
 
304
   @param[in] val  The value to use
305
   @param[in] dat  The config data structure                                 */
306
/*---------------------------------------------------------------------------*/
307
static void
308
dmmu_enabled (union param_val val, void *dat)
309
{
310
  struct dmmu *dmmu = dat;
311
 
312
  if (val.int_val)
313
    {
314
      cpu_state.sprs[SPR_UPR] |= SPR_UPR_DMP;
315
    }
316
  else
317
    {
318
      cpu_state.sprs[SPR_UPR] &= ~SPR_UPR_DMP;
319
    }
320
 
321
  dmmu->enabled = val.int_val;
322
 
323
}       /* dmmu_enabled() */
324
 
325
 
326
/*---------------------------------------------------------------------------*/
327
/*!Set the number of DMMU sets
328
 
329
   Value must be a power of 2 <= 256. Ignore any other values with a
330
   warning. Set the corresponding DMMU configuration flags.
331
 
332
   @param[in] val  The value to use
333
   @param[in] dat  The config data structure                                 */
334
/*---------------------------------------------------------------------------*/
335
static void
336
dmmu_nsets (union param_val  val,
337
            void            *dat)
338
{
339
  struct dmmu *dmmu = dat;
340
 
341
  if (is_power2 (val.int_val) && val.int_val <= 128)
342
    {
343
      int  set_bits = log2_int (val.int_val);
344
 
345
      dmmu->nsets = val.int_val;
346
 
347
      cpu_state.sprs[SPR_DMMUCFGR] &= ~SPR_DMMUCFGR_NTS;
348
      cpu_state.sprs[SPR_DMMUCFGR] |= set_bits << SPR_DMMUCFGR_NTS_OFF;
349
    }
350
  else
351
    {
352
      fprintf (stderr, "Warning DMMU nsets not a power of 2 <= 128: ignored\n");
353
    }
354
}       /* dmmu_nsets() */
355
 
356
 
357
/*---------------------------------------------------------------------------*/
358
/*!Set the number of DMMU ways
359
 
360
   Value must be in the range 1-4. Ignore other values with a warning. Set the
361
   corresponding DMMU configuration flags.
362
 
363
   @param[in] val  The value to use
364
   @param[in] dat  The config data structure                                 */
365
/*---------------------------------------------------------------------------*/
366
static void
367
dmmu_nways (union param_val  val,
368
            void            *dat)
369
{
370
  struct dmmu *dmmu = dat;
371
 
372
  if (val.int_val >= 1 && val.int_val <= 4)
373
    {
374
      int  way_bits = val.int_val - 1;
375
 
376
      dmmu->nways = val.int_val;
377
 
378
      cpu_state.sprs[SPR_DMMUCFGR] &= ~SPR_DMMUCFGR_NTW;
379
      cpu_state.sprs[SPR_DMMUCFGR] |= way_bits << SPR_DMMUCFGR_NTW_OFF;
380
    }
381
  else
382
    {
383
      fprintf (stderr, "Warning DMMU nways not in range 1-4: ignored\n");
384
    }
385
}       /* dmmu_nways() */
386
 
387
 
388
/*---------------------------------------------------------------------------*/
389
/*!Set the DMMU page size
390
 
391
   Value must be a power of 2. Ignore other values with a warning
392
 
393
   @param[in] val  The value to use
394
   @param[in] dat  The config data structure                                 */
395
/*---------------------------------------------------------------------------*/
396
static void
397
dmmu_pagesize (union param_val  val,
398
               void            *dat)
399
{
400
  struct dmmu *dmmu = dat;
401
 
402
  if (is_power2 (val.int_val))
403
    {
404
      dmmu->pagesize = val.int_val;
405
    }
406
  else
407
    {
408
      fprintf (stderr, "Warning DMMU page size must be power of 2: ignored\n");
409
    }
410
}       /* dmmu_pagesize() */
411
 
412
 
413
/*---------------------------------------------------------------------------*/
414
/*!Set the DMMU entry size
415
 
416
   Value must be a power of 2. Ignore other values with a warning
417
 
418
   @param[in] val  The value to use
419
   @param[in] dat  The config data structure                                 */
420
/*---------------------------------------------------------------------------*/
421
static void
422
dmmu_entrysize (union param_val  val,
423
                void            *dat)
424
{
425
  struct dmmu *dmmu = dat;
426
 
427
  if (is_power2 (val.int_val))
428
    {
429
      dmmu->entrysize = val.int_val;
430
    }
431
  else
432
    {
433
      fprintf (stderr, "Warning DMMU entry size must be power of 2: ignored\n");
434
    }
435
}       /* dmmu_entrysize() */
436
 
437
 
438
/*---------------------------------------------------------------------------*/
439
/*!Set the number of DMMU usage states
440
 
441
   Value must be 2, 3 or 4. Ignore other values with a warning
442
 
443
   @param[in] val  The value to use
444
   @param[in] dat  The config data structure                                 */
445
/*---------------------------------------------------------------------------*/
446
static void
447
dmmu_ustates (union param_val  val,
448
              void            *dat)
449
{
450
  struct dmmu *dmmu = dat;
451
 
452
  if ((val.int_val >= 2) && (val.int_val <= 4))
453
    {
454
      dmmu->ustates = val.int_val;
455
    }
456
  else
457
    {
458
      fprintf (stderr, "Warning number of DMMU usage states must be 2, 3 or 4:"
459
               "ignored\n");
460
    }
461
}       /* dmmu_ustates() */
462
 
463
 
464
static void
465
dmmu_missdelay (union param_val val, void *dat)
466
{
467
  struct dmmu *dmmu = dat;
468
 
469
  dmmu->missdelay = val.int_val;
470
}
471
 
472
static void
473
dmmu_hitdelay (union param_val val, void *dat)
474
{
475
  struct dmmu *dmmu = dat;
476
 
477
  dmmu->hitdelay = val.int_val;
478
}
479
 
480
/*---------------------------------------------------------------------------*/
481
/*!Initialize a new DMMU configuration
482
 
483
   ALL parameters are set explicitly to default values. Corresponding SPR
484
   flags are set as appropriate.
485
 
486
   @return  The new memory configuration data structure                      */
487
/*---------------------------------------------------------------------------*/
488
static void *
489
dmmu_start_sec ()
490
{
491
  struct dmmu *dmmu;
492
  int          set_bits;
493
  int          way_bits;
494
 
495
  if (NULL == (dmmu = malloc (sizeof (struct dmmu))))
496
    {
497
      fprintf (stderr, "OOM\n");
498
      exit (1);
499
    }
500
 
501
  dmmu->enabled   = 0;
502
  dmmu->nsets     = 1;
503
  dmmu->nways     = 1;
504
  dmmu->pagesize  = 8192;
505
  dmmu->entrysize = 1;          /* Not currently used */
506
  dmmu->ustates   = 2;
507
  dmmu->hitdelay  = 1;
508
  dmmu->missdelay = 1;
509
 
510
  if (dmmu->enabled)
511
    {
512
      cpu_state.sprs[SPR_UPR] |= SPR_UPR_DMP;
513
    }
514
  else
515
    {
516
      cpu_state.sprs[SPR_UPR] &= ~SPR_UPR_DMP;
517
    }
518
 
519
  set_bits = log2_int (dmmu->nsets);
520
  cpu_state.sprs[SPR_DMMUCFGR] &= ~SPR_DMMUCFGR_NTS;
521
  cpu_state.sprs[SPR_DMMUCFGR] |= set_bits << SPR_DMMUCFGR_NTS_OFF;
522
 
523
  way_bits = dmmu->nways - 1;
524
  cpu_state.sprs[SPR_DMMUCFGR] &= ~SPR_DMMUCFGR_NTW;
525
  cpu_state.sprs[SPR_DMMUCFGR] |= way_bits << SPR_DMMUCFGR_NTW_OFF;
526
 
527
  dmmu_state = dmmu;
528
  return dmmu;
529
 
530
}       /* dmmu_start_sec() */
531
 
532
 
533
static void
534
dmmu_end_sec (void *dat)
535
{
536
  struct dmmu *dmmu = dat;
537
 
538
  /* Precalculate some values for use during address translation */
539
  dmmu->pagesize_log2 = log2_int (dmmu->pagesize);
540
  dmmu->page_offset_mask = dmmu->pagesize - 1;
541
  dmmu->page_mask = ~dmmu->page_offset_mask;
542
  dmmu->vpn_mask = ~((dmmu->pagesize * dmmu->nsets) - 1);
543
  dmmu->set_mask = dmmu->nsets - 1;
544
  dmmu->lru_reload = (dmmu->set_mask << 6) & SPR_DTLBMR_LRU;
545
 
546
  if (dmmu->enabled)
547
    {
548
      PRINTF ("Data MMU %dKB: %d ways, %d sets, entry size %d bytes\n",
549
              dmmu->nsets * dmmu->entrysize * dmmu->nways / 1024, dmmu->nways,
550
              dmmu->nsets, dmmu->entrysize);
551
      reg_sim_stat (dtlb_status, dmmu);
552
    }
553
}
554
 
555
void
556
reg_dmmu_sec (void)
557
{
558
  struct config_section *sec = reg_config_sec ("dmmu", dmmu_start_sec,
559
                                               dmmu_end_sec);
560
 
561 224 jeremybenn
  reg_config_param (sec, "enabled",   PARAMT_INT, dmmu_enabled);
562
  reg_config_param (sec, "nsets",     PARAMT_INT, dmmu_nsets);
563
  reg_config_param (sec, "nways",     PARAMT_INT, dmmu_nways);
564
  reg_config_param (sec, "pagesize",  PARAMT_INT, dmmu_pagesize);
565
  reg_config_param (sec, "entrysize", PARAMT_INT, dmmu_entrysize);
566
  reg_config_param (sec, "ustates",   PARAMT_INT, dmmu_ustates);
567
  reg_config_param (sec, "hitdelay",  PARAMT_INT, dmmu_hitdelay);
568
  reg_config_param (sec, "missdelay", PARAMT_INT, dmmu_missdelay);
569 19 jeremybenn
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.