OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [or1ksim/] [mmu/] [immu.c] - Blame information for rev 197

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 19 jeremybenn
/* immu.c -- Instruction MMU simulation
2
 
3
   Copyright (C) 1999 Damjan Lampret, lampret@opencores.org
4
   Copyright (C) 2008 Embecosm Limited
5
 
6
   Contributor Jeremy Bennett <jeremy.bennett@embecosm.com>
7
 
8
   This file is part of Or1ksim, the OpenRISC 1000 Architectural Simulator.
9
 
10
   This program is free software; you can redistribute it and/or modify it
11
   under the terms of the GNU General Public License as published by the Free
12
   Software Foundation; either version 3 of the License, or (at your option)
13
   any later version.
14
 
15
   This program is distributed in the hope that it will be useful, but WITHOUT
16
   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18
   more details.
19
 
20
   You should have received a copy of the GNU General Public License along
21
   with this program.  If not, see <http://www.gnu.org/licenses/>.  */
22
 
23
/* This program is commented throughout in a fashion suitable for processing
24
   with Doxygen. */
25
 
26
 
27
/* Autoconf and/or portability configuration */
28
#include "config.h"
29
 
30
/* System includes */
31
#include <stdlib.h>
32
 
33
/* Package includes */
34
#include "immu.h"
35
#include "sim-config.h"
36
#include "execute.h"
37
#include "stats.h"
38
#include "except.h"
39
#include "spr-dump.h"
40
#include "misc.h"
41
#include "sim-cmd.h"
42
 
43
 
44
struct immu *immu_state;
45
 
46
/* Insn MMU */
47
 
48
static uorreg_t *
49
immu_find_tlbmr (oraddr_t virtaddr, uorreg_t ** itlbmr_lru, struct immu *immu)
50
{
51
  int set;
52
  int i;
53
  oraddr_t vpn;
54
  uorreg_t *itlbmr;
55
 
56
  /* Which set to check out? */
57
  set = IADDR_PAGE (virtaddr) >> immu->pagesize_log2;
58
  set &= immu->set_mask;
59
  vpn = virtaddr & immu->vpn_mask;
60
 
61
  itlbmr = &cpu_state.sprs[SPR_ITLBMR_BASE (0) + set];
62
  *itlbmr_lru = itlbmr;
63
 
64
  /* Scan all ways and try to find a matching way. */
65
  /* FIXME: Should this be reversed? */
66
  for (i = immu->nways; i; i--, itlbmr += (128 * 2))
67
    {
68
      if (((*itlbmr & immu->vpn_mask) == vpn) && (*itlbmr & SPR_ITLBMR_V))
69
        return itlbmr;
70
    }
71
 
72
  return NULL;
73
}
74
 
75
oraddr_t
76
immu_translate (oraddr_t virtaddr)
77
{
78
  int i;
79
  uorreg_t *itlbmr;
80
  uorreg_t *itlbtr;
81
  uorreg_t *itlbmr_lru;
82
  struct immu *immu = immu_state;
83
 
84
  if (!(cpu_state.sprs[SPR_SR] & SPR_SR_IME) ||
85
      !(cpu_state.sprs[SPR_UPR] & SPR_UPR_IMP))
86
    {
87
      insn_ci = (virtaddr >= 0x80000000);
88
      return virtaddr;
89
    }
90
 
91
  itlbmr = immu_find_tlbmr (virtaddr, &itlbmr_lru, immu);
92
 
93
  /* Did we find our tlb entry? */
94
  if (itlbmr)
95
    {                           /* Yes, we did. */
96
      immu_stats.fetch_tlbhit++;
97
      itlbtr = itlbmr + 128;
98
 
99
      /* Set LRUs */
100
      for (i = 0; i < immu->nways; i++, itlbmr_lru += (128 * 2))
101
        {
102
          if (*itlbmr_lru & SPR_ITLBMR_LRU)
103
            *itlbmr_lru = (*itlbmr_lru & ~SPR_ITLBMR_LRU) |
104
              ((*itlbmr_lru & SPR_ITLBMR_LRU) - 0x40);
105
        }
106
 
107
      /* This is not necessary `*itlbmr &= ~SPR_ITLBMR_LRU;' since SPR_DTLBMR_LRU
108
       * is always decremented and the number of sets is always a power of two and
109
       * as such lru_reload has all bits set that get touched during decrementing
110
       * SPR_DTLBMR_LRU */
111
      *itlbmr |= immu->lru_reload;
112
 
113
      /* Check if page is cache inhibited */
114
      insn_ci = *itlbtr & SPR_ITLBTR_CI;
115
 
116
      runtime.sim.mem_cycles += immu->hitdelay;
117
 
118
      /* Test for page fault */
119
      if (cpu_state.sprs[SPR_SR] & SPR_SR_SM)
120
        {
121
          if (!(*itlbtr & SPR_ITLBTR_SXE))
122
            except_handle (EXCEPT_IPF, virtaddr);
123
        }
124
      else
125
        {
126
          if (!(*itlbtr & SPR_ITLBTR_UXE))
127
            except_handle (EXCEPT_IPF, virtaddr);
128
        }
129
 
130
      return (*itlbtr & SPR_ITLBTR_PPN) | (virtaddr & immu->page_offset_mask);
131
    }
132
 
133
  /* No, we didn't. */
134
  immu_stats.fetch_tlbmiss++;
135
#if 0
136
  for (i = 0; i < immu->nways; i++)
137
    if (((cpu_state.sprs[SPR_ITLBMR_BASE (i) + set] & SPR_ITLBMR_LRU) >> 6) <
138
        minlru)
139
      minway = i;
140
 
141
  cpu_state.sprs[SPR_ITLBMR_BASE (minway) + set] &= ~SPR_ITLBMR_VPN;
142
  cpu_state.sprs[SPR_ITLBMR_BASE (minway) + set] |= vpn << 12;
143
  for (i = 0; i < immu->nways; i++)
144
    {
145
      uorreg_t lru = cpu_state.sprs[SPR_ITLBMR_BASE (i) + set];
146
      if (lru & SPR_ITLBMR_LRU)
147
        {
148
          lru = (lru & ~SPR_ITLBMR_LRU) | ((lru & SPR_ITLBMR_LRU) - 0x40);
149
          cpu_state.sprs[SPR_ITLBMR_BASE (i) + set] = lru;
150
        }
151
    }
152
  cpu_state.sprs[SPR_ITLBMR_BASE (way) + set] &= ~SPR_ITLBMR_LRU;
153
  cpu_state.sprs[SPR_ITLBMR_BASE (way) + set] |= (immu->nsets - 1) << 6;
154
 
155
  /* 1 to 1 mapping */
156
  cpu_state.sprs[SPR_ITLBTR_BASE (minway) + set] &= ~SPR_ITLBTR_PPN;
157
  cpu_state.sprs[SPR_ITLBTR_BASE (minway) + set] |= vpn << 12;
158
 
159
  cpu_state.sprs[SPR_ITLBMR_BASE (minway) + set] |= SPR_ITLBMR_V;
160
#endif
161
 
162
  /* if tlb refill implemented in HW */
163
  /* return ((cpu_state.sprs[SPR_ITLBTR_BASE(minway) + set] & SPR_ITLBTR_PPN) >> 12) * immu->pagesize + (virtaddr % immu->pagesize); */
164
  runtime.sim.mem_cycles += immu->missdelay;
165
 
166
  except_handle (EXCEPT_ITLBMISS, virtaddr);
167
  return 0;
168
}
169
 
170
/* DESC: try to find EA -> PA transaltion without changing
171
 *       any of precessor states. if this is not passible gives up
172
 *       (without triggering exceptions).
173
 *
174
 * PRMS: virtaddr  - EA for which to find translation
175
 *
176
 * RTRN: 0         - no IMMU, IMMU disabled or ITLB miss
177
 *       else      - appropriate PA (note it IMMU is not present
178
 *                   PA === EA)
179
 */
180
oraddr_t
181
peek_into_itlb (oraddr_t virtaddr)
182
{
183
  uorreg_t *itlbmr;
184
  uorreg_t *itlbtr;
185
  uorreg_t *itlbmr_lru;
186
  struct immu *immu = immu_state;
187
 
188
  if (!(cpu_state.sprs[SPR_SR] & SPR_SR_IME) ||
189
      !(cpu_state.sprs[SPR_UPR] & SPR_UPR_IMP))
190
    {
191
      return (virtaddr);
192
    }
193
 
194
  itlbmr = immu_find_tlbmr (virtaddr, &itlbmr_lru, immu);
195
 
196
  /* Did we find our tlb entry? */
197
  if (itlbmr)
198
    {                           /* Yes, we did. */
199
      itlbtr = itlbmr + 128;
200
 
201
      /* Test for page fault */
202
      if (cpu_state.sprs[SPR_SR] & SPR_SR_SM)
203
        {
204
          if (!(*itlbtr & SPR_ITLBTR_SXE))
205
            {
206
              /* no luck, giving up */
207
              return (0);
208
            }
209
        }
210
      else
211
        {
212
          if (!(*itlbtr & SPR_ITLBTR_UXE))
213
            {
214
              /* no luck, giving up */
215
              return (0);
216
            }
217
        }
218
 
219
      return (*itlbtr & SPR_ITLBTR_PPN) | (virtaddr & immu->page_offset_mask);
220
    }
221
 
222
  return (0);
223
}
224
 
225
 
226
/* FIXME: Check validity */
227
/* First check if virtual address is covered by ITLB and if it is:
228
    - increment ITLB read hit stats,
229
    - set 'lru' at this way to immu->ustates - 1 and
230
      decrement 'lru' of other ways unless they have reached 0,
231
    - check page access attributes and invoke IMMU page fault exception
232
      handler if necessary
233
   and if not:
234
    - increment ITLB read miss stats
235
    - find lru way and entry and invoke ITLB miss exception handler
236
    - set 'lru' with immu->ustates - 1 and decrement 'lru' of other
237
      ways unless they have reached 0
238
*/
239
 
240
static void
241
itlb_status (void *dat)
242
{
243
  struct immu *immu = dat;
244
  int set;
245
  int way;
246
  int end_set = immu->nsets;
247
 
248
  if (!(cpu_state.sprs[SPR_UPR] & SPR_UPR_IMP))
249
    {
250
      PRINTF ("IMMU not implemented. Set UPR[IMP].\n");
251
      return;
252
    }
253
 
254
  if (0 < end_set)
255
    PRINTF ("\nIMMU: ");
256
  /* Scan set(s) and way(s). */
257
  for (set = 0; set < end_set; set++)
258
    {
259
      for (way = 0; way < immu->nways; way++)
260
        {
261
          PRINTF ("%s\n", dump_spr (SPR_ITLBMR_BASE (way) + set,
262
                                    cpu_state.sprs[SPR_ITLBMR_BASE (way) +
263
                                                   set]));
264
          PRINTF ("%s\n",
265
                  dump_spr (SPR_ITLBTR_BASE (way) + set,
266
                            cpu_state.sprs[SPR_ITLBTR_BASE (way) + set]));
267
        }
268
    }
269
  if (0 < end_set)
270
    PRINTF ("\n");
271
}
272
 
273
/*---------------------------------------------------[ IMMU configuration ]---*/
274
 
275
/*---------------------------------------------------------------------------*/
276
/*!Enable or disable the IMMU
277
 
278
   Set the corresponding field in the UPR
279
 
280
   @param[in] val  The value to use
281
   @param[in] dat  The config data structure                                 */
282
/*---------------------------------------------------------------------------*/
283
static void
284
immu_enabled (union param_val val, void *dat)
285
{
286
  struct immu *immu = dat;
287
 
288
  if (val.int_val)
289
    {
290
      cpu_state.sprs[SPR_UPR] |= SPR_UPR_IMP;
291
    }
292
  else
293
    {
294
      cpu_state.sprs[SPR_UPR] &= ~SPR_UPR_IMP;
295
    }
296
 
297
  immu->enabled = val.int_val;
298
}
299
 
300
 
301
/*---------------------------------------------------------------------------*/
302
/*!Set the number of DMMU sets
303
 
304
   Value must be a power of 2 <= 256. Ignore any other values with a
305
   warning. Set the corresponding IMMU configuration flags.
306
 
307
   @param[in] val  The value to use
308
   @param[in] dat  The config data structure                                 */
309
/*---------------------------------------------------------------------------*/
310
static void
311
immu_nsets (union param_val  val,
312
            void            *dat)
313
{
314
  struct immu *immu = dat;
315
 
316
  if (is_power2 (val.int_val) && (val.int_val <= 128))
317
    {
318
      int  set_bits = log2_int (val.int_val);
319
 
320
      immu->nsets = val.int_val;
321
 
322
      cpu_state.sprs[SPR_IMMUCFGR] &= ~SPR_IMMUCFGR_NTS;
323
      cpu_state.sprs[SPR_IMMUCFGR] |= set_bits << SPR_IMMUCFGR_NTS_OFF;
324
    }
325
  else
326
    {
327
      fprintf (stderr, "Warning IMMU nsets not a power of 2 <= 128: ignored\n");
328
    }
329
}       /* immu_nsets() */
330
 
331
 
332
/*---------------------------------------------------------------------------*/
333
/*!Set the number of IMMU ways
334
 
335
   Value must be in the range 1-4. Ignore other values with a warning.  Set
336
   the corresponding IMMU configuration flags.
337
 
338
   @param[in] val  The value to use
339
   @param[in] dat  The config data structure                                 */
340
/*---------------------------------------------------------------------------*/
341
static void
342
immu_nways (union param_val  val,
343
            void            *dat)
344
{
345
  struct immu *immu = dat;
346
 
347
  if (val.int_val >= 1 && val.int_val <= 4)
348
    {
349
      int  way_bits = val.int_val - 1;
350
 
351
      immu->nways = val.int_val;
352
 
353
      cpu_state.sprs[SPR_IMMUCFGR] &= ~SPR_IMMUCFGR_NTW;
354
      cpu_state.sprs[SPR_IMMUCFGR] |= way_bits << SPR_IMMUCFGR_NTW_OFF;
355
    }
356
  else
357
    {
358
      fprintf (stderr, "Warning IMMU nways not in range 1-4: ignored\n");
359
    }
360
}       /* immu_nways() */
361
 
362
 
363
/*---------------------------------------------------------------------------*/
364
/*!Set the IMMU page size
365
 
366
   Value must be a power of 2. Ignore other values with a warning
367
 
368
   @param[in] val  The value to use
369
   @param[in] dat  The config data structure                                 */
370
/*---------------------------------------------------------------------------*/
371
static void
372
immu_pagesize (union param_val  val,
373
               void            *dat)
374
{
375
  struct immu *immu = dat;
376
 
377
  if (is_power2 (val.int_val))
378
    {
379
      immu->pagesize = val.int_val;
380
    }
381
  else
382
    {
383
      fprintf (stderr, "Warning IMMU page size must be power of 2: ignored\n");
384
    }
385
}       /* immu_pagesize() */
386
 
387
 
388
/*---------------------------------------------------------------------------*/
389
/*!Set the IMMU entry size
390
 
391
   Value must be a power of 2. Ignore other values with a warning
392
 
393
   @param[in] val  The value to use
394
   @param[in] dat  The config data structure                                 */
395
/*---------------------------------------------------------------------------*/
396
static void
397
immu_entrysize (union param_val  val,
398
                void            *dat)
399
{
400
  struct immu *immu = dat;
401
 
402
  if (is_power2 (val.int_val))
403
    {
404
      immu->entrysize = val.int_val;
405
    }
406
  else
407
    {
408
      fprintf (stderr, "Warning IMMU entry size must be power of 2: ignored\n");
409
    }
410
}       /* immu_entrysize() */
411
 
412
 
413
/*---------------------------------------------------------------------------*/
414
/*!Set the number of IMMU usage states
415
 
416
   Value must be 2, 3 or 4. Ignore other values with a warning
417
 
418
   @param[in] val  The value to use
419
   @param[in] dat  The config data structure                                 */
420
/*---------------------------------------------------------------------------*/
421
static void
422
immu_ustates (union param_val  val,
423
              void            *dat)
424
{
425
  struct immu *immu = dat;
426
 
427
  if ((val.int_val >= 2) && (val.int_val <= 4))
428
    {
429
      immu->ustates = val.int_val;
430
    }
431
  else
432
    {
433
      fprintf (stderr, "Warning number of IMMU usage states must be 2, 3 or 4:"
434
               "ignored\n");
435
    }
436
}       /* immu_ustates() */
437
 
438
 
439
static void
440
immu_missdelay (union param_val val, void *dat)
441
{
442
  struct immu *immu = dat;
443
 
444
  immu->missdelay = val.int_val;
445
}
446
 
447
static void
448
immu_hitdelay (union param_val val, void *dat)
449
{
450
  struct immu *immu = dat;
451
 
452
  immu->hitdelay = val.int_val;
453
}
454
 
455
/*---------------------------------------------------------------------------*/
456
/*!Initialize a new DMMU configuration
457
 
458
   ALL parameters are set explicitly to default values.                      */
459
/*---------------------------------------------------------------------------*/
460
static void *
461
immu_start_sec ()
462
{
463
  struct immu *immu;
464
  int          set_bits;
465
  int          way_bits;
466
 
467
  if (NULL == (immu = malloc (sizeof (struct immu))))
468
    {
469
      fprintf (stderr, "OOM\n");
470
      exit (1);
471
    }
472
 
473
  immu->enabled   = 0;
474
  immu->nsets     = 1;
475
  immu->nways     = 1;
476
  immu->pagesize  = 8192;
477
  immu->entrysize = 1;          /* Not currently used */
478
  immu->ustates   = 2;
479
  immu->hitdelay  = 1;
480
  immu->missdelay = 1;
481
 
482
  if (immu->enabled)
483
    {
484
      cpu_state.sprs[SPR_UPR] |= SPR_UPR_IMP;
485
    }
486
  else
487
    {
488
      cpu_state.sprs[SPR_UPR] &= ~SPR_UPR_IMP;
489
    }
490
 
491
  set_bits = log2_int (immu->nsets);
492
  cpu_state.sprs[SPR_IMMUCFGR] &= ~SPR_IMMUCFGR_NTS;
493
  cpu_state.sprs[SPR_IMMUCFGR] |= set_bits << SPR_IMMUCFGR_NTS_OFF;
494
 
495
  way_bits = immu->nways - 1;
496
  cpu_state.sprs[SPR_IMMUCFGR] &= ~SPR_IMMUCFGR_NTW;
497
  cpu_state.sprs[SPR_IMMUCFGR] |= way_bits << SPR_IMMUCFGR_NTW_OFF;
498
 
499
  immu_state = immu;
500
  return immu;
501
 
502
}       /* immu_start_sec() */
503
 
504
 
505
static void
506
immu_end_sec (void *dat)
507
{
508
  struct immu *immu = dat;
509
 
510
  /* Precalculate some values for use during address translation */
511
  immu->pagesize_log2 = log2_int (immu->pagesize);
512
  immu->page_offset_mask = immu->pagesize - 1;
513
  immu->page_mask = ~immu->page_offset_mask;
514
  immu->vpn_mask = ~((immu->pagesize * immu->nsets) - 1);
515
  immu->set_mask = immu->nsets - 1;
516
  immu->lru_reload = (immu->set_mask << 6) & SPR_ITLBMR_LRU;
517
 
518
  if (immu->enabled)
519
    {
520
      PRINTF ("Insn MMU %dKB: %d ways, %d sets, entry size %d bytes\n",
521
              immu->nsets * immu->entrysize * immu->nways / 1024, immu->nways,
522
              immu->nsets, immu->entrysize);
523
      reg_sim_stat (itlb_status, immu);
524
    }
525
}
526
 
527
void
528
reg_immu_sec (void)
529
{
530
  struct config_section *sec = reg_config_sec ("immu", immu_start_sec,
531
                                               immu_end_sec);
532
 
533
  reg_config_param (sec, "enabled", paramt_int, immu_enabled);
534
  reg_config_param (sec, "nsets", paramt_int, immu_nsets);
535
  reg_config_param (sec, "nways", paramt_int, immu_nways);
536
  reg_config_param (sec, "pagesize", paramt_int, immu_pagesize);
537
  reg_config_param (sec, "entrysize", paramt_int, immu_entrysize);
538
  reg_config_param (sec, "ustates", paramt_int, immu_ustates);
539
  reg_config_param (sec, "missdelay", paramt_int, immu_missdelay);
540
  reg_config_param (sec, "hitdelay", paramt_int, immu_hitdelay);
541
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.