OpenCores
URL https://opencores.org/ocsvn/aor3000/aor3000/trunk

Subversion Repositories aor3000

[/] [aor3000/] [trunk/] [sim/] [vmips/] [vmips_emulator.cpp] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 alfik
/*
2
 * This file is subject to the terms and conditions of the GPL License. See
3
 * the file "LICENSE" in the main directory of this archive for more details.
4
 *
5
 * Copyright (C) 2014 Aleksander Osman
6
 */
7
 
8
#include <cstdio>
9
#include <cstdlib>
10
#include <cstring>
11
 
12
#include "shared_mem.h"
13
#include "vmips_emulator.h"
14
 
15
//------------------------------------------------------------------------------ Code from vmips-1.4.1 project under the GPL license
16
//------------------------------------------------------------------------------
17
//------------------------------------------------------------------------------
18
 
19
/* MIPS R3000 CPU emulation.
20
   Copyright 2001, 2002, 2003, 2004 Brian R. Gaeke.
21
 
22
This file is part of VMIPS.
23
 
24
VMIPS is free software; you can redistribute it and/or modify it
25
under the terms of the GNU General Public License as published by the
26
Free Software Foundation; either version 2 of the License, or (at your
27
option) any later version.
28
 
29
VMIPS is distributed in the hope that it will be useful, but
30
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
31
or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
32
for more details.
33
 
34
You should have received a copy of the GNU General Public License along
35
with VMIPS; if not, write to the Free Software Foundation, Inc.,
36
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.  */
37
 
38
//------------------------------------------------------------------------------ cpzero.cc
39
 
40
static uint32 read_masks[] = {
41
    Index_MASK, Random_MASK, EntryLo_MASK, 0, Context_MASK,
42
    PageMask_MASK, Wired_MASK, Error_MASK, BadVAddr_MASK, Count_MASK,
43
    EntryHi_MASK, Compare_MASK, Status_MASK, Cause_MASK, EPC_MASK,
44
    PRId_MASK, Config_MASK, LLAddr_MASK, WatchLo_MASK, WatchHi_MASK,
45
    0, 0, 0, 0, 0, 0, ECC_MASK, CacheErr_MASK, TagLo_MASK, TagHi_MASK,
46
    ErrorEPC_MASK, 0
47
};
48
 
49
static uint32 write_masks[] = {
50
    Index_MASK, 0, EntryLo_MASK, 0, Context_MASK & ~Context_BadVPN_MASK,
51
    PageMask_MASK, Wired_MASK, Error_MASK, 0, Count_MASK,
52
    EntryHi_MASK, Compare_MASK, Status_MASK,
53
    Cause_MASK & ~Cause_IP_Ext_MASK, 0, 0, Config_MASK, LLAddr_MASK,
54
    WatchLo_MASK, WatchHi_MASK, 0, 0, 0, 0, 0, 0, ECC_MASK,
55
    CacheErr_MASK, TagLo_MASK, TagHi_MASK, ErrorEPC_MASK, 0
56
};
57
 
58
CPZero::CPZero(CPU *m) : cpu (m) { }
59
 
60
/* Reset (warm or cold) */
61
void
62
CPZero::reset(void)
63
{
64
    int r;
65
    for (r = 0; r < 16; r++) {
66
        reg[r] = 0;
67
    }
68
    /* Turn off any randomly-set pending-interrupt bits, as these
69
     * can impact correctness. */
70
    reg[Cause] &= ~Cause_IP_MASK;
71
    /* Reset Random register to upper bound (8<=Random<=63) */
72
    reg[Random] = Random_UPPER_BOUND << 8;
73
    /* Reset Status register: clear KUc, IEc, SwC (i.e., caches are not
74
     * switched), TS (TLB shutdown has not occurred), and set
75
     * BEV (Bootstrap exception vectors ARE in effect).
76
     */
77
    reg[Status] = (reg[Status] | Status_DS_BEV_MASK) &
78
        ~(Status_KUc_MASK | Status_IEc_MASK | Status_DS_SwC_MASK |
79
          Status_DS_TS_MASK);
80
    reg[PRId] = 0x00000230; /* MIPS R3000A */
81
}
82
 
83
/* Yow!! Are we in KERNEL MODE yet?? ...Read the Status register. */
84
bool
85
CPZero::kernel_mode(void) const
86
{
87
    return !(reg[Status] & Status_KUc_MASK);
88
}
89
 
90
/* Request for address translation (possibly using the TLB). */
91
uint32
92
CPZero::address_trans(uint32 vaddr, int mode, bool *cacheable, bool *cache_isolated)
93
{
94
    (*cache_isolated) = caches_isolated();
95
 
96
    if (kernel_mode()) {
97
        switch(vaddr & KSEG_SELECT_MASK) {
98
        case KSEG0:
99
            *cacheable = true;
100
            return vaddr - KSEG0_CONST_TRANSLATION;
101
        case KSEG1:
102
            *cacheable = false;
103
            return vaddr - KSEG1_CONST_TRANSLATION;
104
        case KSEG2:
105
        case KSEG2_top:
106
            return tlb_translate(KSEG2, vaddr, mode, cacheable);
107
        default: /* KUSEG */
108
            return tlb_translate(KUSEG, vaddr, mode, cacheable);
109
        }
110
    }
111
 
112
    /* user mode */
113
    if (vaddr & KERNEL_SPACE_MASK) {
114
        /* Can't go there. */
115
        cpu->exception(mode == DATASTORE ? AdES : AdEL, mode);
116
        return 0xffffffff;
117
    } else /* user space address */ {
118
        return tlb_translate(KUSEG, vaddr, mode, cacheable);
119
    }
120
}
121
 
122
void
123
CPZero::load_addr_trans_excp_info(uint32 va, uint32 vpn, TLBEntry *match)
124
{
125
    reg[BadVAddr] = va;
126
    reg[Context] = (reg[Context] & ~Context_BadVPN_MASK) | ((va & 0x7ffff000) >> 10);
127
    reg[EntryHi] = (va & EntryHi_VPN_MASK) | (reg[EntryHi] & ~EntryHi_VPN_MASK);
128
}
129
 
130
int
131
CPZero::find_matching_tlb_entry(uint32 vpn, uint32 asid)
132
{
133
    for (uint16 x = 0; x < TLB_ENTRIES; x++)
134
        if (tlb[x].vpn() == vpn && (tlb[x].global() || tlb[x].asid() == asid))
135
            return x;
136
    return -1;
137
}
138
 
139
uint32
140
CPZero::tlb_translate(uint32 seg, uint32 vaddr, int mode, bool *cacheable)
141
{
142
    uint32 asid = reg[EntryHi] & EntryHi_ASID_MASK;
143
    uint32 vpn = vaddr & EntryHi_VPN_MASK;
144
    int index = find_matching_tlb_entry(vpn, asid);
145
    TLBEntry *match = (index == -1) ? 0 : &tlb[index];
146
    tlb_miss_user = false;
147
    if (match && match->valid()) {
148
        if (mode == DATASTORE && !match->dirty()) {
149
            /* TLB Mod exception - write to page not marked "dirty" */
150
            load_addr_trans_excp_info(vaddr,vpn,match);
151
            cpu->exception(Mod, DATASTORE);
152
            return 0xffffffff;
153
        } else {
154
            /* We have a matching TLB entry which is valid. */
155
            *cacheable = !match->noncacheable();
156
            return match->pfn() | (vaddr & ~EntryHi_VPN_MASK);
157
        }
158
    }
159
    // If we got here, then there was no matching tlb entry, or it wasn't valid.
160
    // Use special refill handler vector for user TLB miss.
161
    tlb_miss_user = (seg == KUSEG && !match);
162
    load_addr_trans_excp_info(vaddr,vpn,match);
163
    //fprintf(stderr, "TLB: Miss for vaddr=%x (vpn=%x)\n", vaddr, (vaddr>>12));
164
    cpu->exception(mode == DATASTORE ? TLBS : TLBL, mode);
165
    return 0xffffffff;
166
}
167
 
168
uint32 CPZero::read_reg(const uint16 r) {
169
    // This ensures that non-existent CP0 registers read as zero.
170
    return reg[r] & read_masks[r];
171
}
172
 
173
void CPZero::write_reg(const uint16 r, const uint32 data) {
174
    // This preserves the bits which are readable but not writable, and writes
175
    // the bits which are writable with new data, thus making it suitable
176
    // for mtc0-type operations.  If you want to write all the bits which
177
    // are _connected_, use: reg[r] = new_data & write_masks[r]; .
178
    reg[r] = (reg[r] & (read_masks[r] & ~write_masks[r]))
179
             | (data & write_masks[r]);
180
}
181
 
182
void
183
CPZero::mfc0_emulate(uint32 instr, uint32 pc)
184
{
185
    cpu->put_reg (CPU::rt (instr), read_reg (CPU::rd (instr)));
186
}
187
 
188
void
189
CPZero::mtc0_emulate(uint32 instr, uint32 pc)
190
{
191
    write_reg (CPU::rd (instr), cpu->get_reg (CPU::rt (instr)));
192
}
193
 
194
void
195
CPZero::bc0x_emulate(uint32 instr, uint32 pc)
196
{
197
    uint16 condition = CPU::rt (instr);
198
    switch (condition) {
199
    case 0: /* bc0f */ if (! cpCond ()) { cpu->branch (instr, pc); } break;
200
    case 1: /* bc0t */ if (cpCond ()) { cpu->branch (instr, pc); } break;
201
    case 2: /* bc0fl - not valid, but not reserved(A-17, H&K) - no-op. */ break;
202
    case 3: /* bc0tl - not valid, but not reserved(A-21, H&K) - no-op. */ break;
203
    default: cpu->exception (RI); break; /* reserved */
204
    }
205
}
206
 
207
void
208
CPZero::tlbr_emulate(uint32 instr, uint32 pc)
209
{
210
    reg[EntryHi] = (tlb[(reg[Index] & Index_Index_MASK) >> 8].entryHi) &
211
        write_masks[EntryHi];
212
    reg[EntryLo] = (tlb[(reg[Index] & Index_Index_MASK) >> 8].entryLo) &
213
        write_masks[EntryLo];
214
}
215
 
216
void
217
CPZero::tlb_write(unsigned index)
218
{
219
    tlb[index].entryHi = read_reg(EntryHi);
220
    tlb[index].entryLo = read_reg(EntryLo);
221
}
222
 
223
void
224
CPZero::tlbwi_emulate(uint32 instr, uint32 pc)
225
{
226
    tlb_write ((reg[Index] & Index_Index_MASK) >> 8);
227
}
228
 
229
void
230
CPZero::tlbwr_emulate(uint32 instr, uint32 pc)
231
{
232
    tlb_write ((reg[Random] & Random_Random_MASK) >> 8);
233
 
234
    adjust_random();
235
}
236
 
237
void
238
CPZero::tlbp_emulate(uint32 instr, uint32 pc)
239
{
240
    uint32 vpn = reg[EntryHi] & EntryHi_VPN_MASK;
241
    uint32 asid = reg[EntryHi] & EntryHi_ASID_MASK;
242
    int idx = find_matching_tlb_entry (vpn, asid);
243
    if (idx != -1)
244
      reg[Index] = (idx << 8);
245
    else
246
      reg[Index] = (1 << 31);
247
}
248
 
249
void
250
CPZero::rfe_emulate(uint32 instr, uint32 pc)
251
{
252
    reg[Status] = (reg[Status] & 0xfffffff0) | ((reg[Status] >> 2) & 0x0f);
253
}
254
 
255
void
256
CPZero::cpzero_emulate(uint32 instr, uint32 pc)
257
{
258
    uint16 rs = CPU::rs (instr);
259
    if (CPU::rs (instr) > 15) {
260
        switch (CPU::funct (instr)) {
261
        case 1: tlbr_emulate (instr, pc); break;
262
        case 2: tlbwi_emulate (instr, pc); break;
263
        case 6: tlbwr_emulate (instr, pc); break;
264
        case 8: tlbp_emulate (instr, pc); break;
265
        case 16: rfe_emulate (instr, pc); break;
266
        default: cpu->exception (RI, ANY, 0); break;
267
        }
268
    } else {
269
        switch (rs) {
270
        case 0: mfc0_emulate (instr, pc); break;
271
        case 2: cpu->exception (RI, ANY, 0); break; /* cfc0 - reserved */
272
        case 4: mtc0_emulate (instr, pc); break;
273
        case 6: cpu->exception (RI, ANY, 0); break; /* ctc0 - reserved */
274
        case 8: bc0x_emulate (instr,pc); break;
275
        default: cpu->exception (RI, ANY, 0); break;
276
        }
277
    }
278
}
279
 
280
void
281
CPZero::adjust_random(void)
282
{
283
//ao modified
284
    int32 r = (int32) (reg[Random] >> 8);
285
    if(r <= 8) r = 63; else r--;
286
    reg[Random] = (uint32) (r << 8);
287
}
288
 
289
uint32
290
CPZero::getIP(void)
291
{
292
    return (reg[Cause] & Cause_IP_SW_MASK) | ao_interrupts();
293
}
294
 
295
void
296
CPZero::enter_exception(uint32 pc, uint32 excCode, uint32 ce, bool dly)
297
{
298
    /* Save exception PC in EPC. */
299
    reg[EPC] = pc;
300
    /* Disable interrupts and enter Kernel mode. */
301
    reg[Status] = (reg[Status] & ~Status_KU_IE_MASK) |
302
        ((reg[Status] & Status_KU_IE_MASK) << 2);
303
    /* Clear Cause register BD, CE, and ExcCode fields. */
304
    reg[Cause] &= ~(Cause_BD_MASK|Cause_CE_MASK|Cause_ExcCode_MASK);
305
    /* Set Cause register CE field if this is a Coprocessor
306
     * Unusable exception. (If we are passed ce=-1 we don't want
307
     * to toggle bits in Cause.) */
308
    if (excCode == CpU) {
309
        reg[Cause] |= ((ce & 0x3) << 28);
310
    }
311
    /* Update IP, BD, ExcCode fields of Cause register. */
312
    reg[Cause] &= ~Cause_IP_MASK;
313
    reg[Cause] |= getIP () | (dly << 31) | (excCode << 2);
314
}
315
 
316
bool
317
CPZero::use_boot_excp_address(void)
318
{
319
    return (reg[Status] & Status_DS_BEV_MASK);
320
}
321
 
322
bool
323
CPZero::caches_isolated(void)
324
{
325
    return (reg[Status] & Status_DS_IsC_MASK);
326
}
327
 
328
bool
329
CPZero::caches_swapped(void)
330
{
331
    return (reg[Status] & Status_DS_SwC_MASK);
332
}
333
 
334
bool
335
CPZero::cop_usable(int coprocno)
336
{
337
    switch (coprocno) {
338
    case 3: return (reg[Status] & Status_CU3_MASK);
339
    case 2: return (reg[Status] & Status_CU2_MASK);
340
    case 1: return (reg[Status] & Status_CU1_MASK);
341
    case 0: return (reg[Status] & Status_CU0_MASK);
342
    default: fatal_error ("Bad coprocno passed to CPZero::cop_usable()");
343
    };
344
}
345
 
346
bool
347
CPZero::interrupts_enabled(void) const
348
{
349
    return (reg[Status] & Status_IEc_MASK);
350
}
351
 
352
bool
353
CPZero::interrupt_pending(void)
354
{
355
    if (! interrupts_enabled())
356
        return false;   /* Can't very well argue with IEc == 0... */
357
    /* Mask IP with the interrupt mask, and return true if nonzero: */
358
    return ((getIP () & (reg[Status] & Status_IM_MASK)) != 0);
359
}
360
 
361
//------------------------------------------------------------------------------ cpu.cc
362
 
363
/* certain fixed register numbers which are handy to know */
364
static const int reg_zero = 0;  /* always zero */
365
static const int reg_sp = 29;   /* stack pointer */
366
static const int reg_ra = 31;   /* return address */
367
 
368
/* pointer to CPU method returning void and taking two uint32's */
369
typedef void (CPU::*emulate_funptr)(uint32, uint32);
370
 
371
CPU::CPU () : last_epc (0), last_prio (0),
372
             cpzero (new CPZero (this)), delay_state (NORMAL)
373
{
374
    reg[reg_zero] = 0;
375
}
376
 
377
CPU::~CPU() {
378
}
379
 
380
void CPU::reset () {
381
    reg[reg_zero] = 0;
382
    pc = 0xbfc00000;
383
    cpzero->reset();
384
}
385
 
386
int
387
CPU::exception_priority(uint16 excCode, int mode) const
388
{
389
    /* See doc/excprio for an explanation of this table. */
390
    static const struct excPriority prio[] = {
391
        {1, AdEL, INSTFETCH},
392
        {2, TLBL, INSTFETCH}, {2, TLBS, INSTFETCH},
393
        {3, IBE, ANY},
394
        {4, Ov, ANY}, {4, Tr, ANY}, {4, Sys, ANY},
395
        {4, Bp, ANY}, {4, RI, ANY}, {4, CpU, ANY},
396
        {5, AdEL, DATALOAD}, {5, AdES, ANY},
397
        {6, TLBL, DATALOAD}, {6, TLBS, DATALOAD},
398
        {6, TLBL, DATASTORE}, {6, TLBS, DATASTORE},
399
        {7, Mod, ANY},
400
        {8, DBE, ANY},
401
        {9, Int, ANY},
402
        {0, ANY, ANY} /* catch-all */
403
    };
404
    const struct excPriority *p;
405
 
406
    for (p = prio; p->priority != 0; p++) {
407
        if (excCode == p->excCode || p->excCode == ANY) {
408
            if (mode == p->mode || p->mode == ANY) {
409
                return p->priority;
410
            }
411
        }
412
    }
413
    return 0;
414
}
415
 
416
void
417
CPU::exception(uint16 excCode, int mode /* = ANY */, int coprocno /* = -1 */)
418
{
419
printf("Exception: code: 0x%x, mode: %x, coprocno: %x\n", (uint32)excCode, mode, coprocno);
420
    int prio;
421
    uint32 base, vector, epc;
422
    bool delaying = (delay_state == DELAYSLOT);
423
 
424
    /* step() ensures that next_epc will always contain the correct
425
     * EPC whenever exception() is called.
426
     */
427
    epc = next_epc;
428
 
429
    /* Prioritize exception -- if the last exception to occur _also_ was
430
     * caused by this EPC, only report this exception if it has a higher
431
     * priority.  Otherwise, exception handling terminates here,
432
     * because only one exception will be reported per instruction
433
     * (as per MIPS RISC Architecture, p. 6-35). Note that this only
434
     * applies IFF the previous exception was caught during the current
435
     * _execution_ of the instruction at this EPC, so we check that
436
     * EXCEPTION_PENDING is true before aborting exception handling.
437
     * (This flag is reset by each call to step().)
438
     */
439
    prio = exception_priority(excCode, mode);
440
    if (epc == last_epc) {
441
        if (prio <= last_prio && exception_pending) {
442
            return;
443
        } else {
444
            last_prio = prio;
445
        }
446
    }
447
    last_epc = epc;
448
 
449
    /* Set processor to Kernel mode, disable interrupts, and save
450
     * exception PC.
451
     */
452
    cpzero->enter_exception(epc,excCode,coprocno,delaying);
453
 
454
    /* Calculate the exception handler address; this is of the form BASE +
455
     * VECTOR. The BASE is determined by whether we're using boot-time
456
     * exception vectors, according to the BEV bit in the CP0 Status register.
457
     */
458
    if (cpzero->use_boot_excp_address()) {
459
        base = 0xbfc00100;
460
    } else {
461
        base = 0x80000000;
462
    }
463
 
464
    /* Do we have a User TLB Miss exception? If so, jump to the
465
     * User TLB Miss exception vector, otherwise jump to the
466
     * common exception vector.
467
     */
468
    if ((excCode == TLBL || excCode == TLBS) && (cpzero->tlb_miss_user)) {
469
        vector = 0x000;
470
    } else {
471
        vector = 0x080;
472
    }
473
 
474
    pc = base + vector;
475
    exception_pending = true;
476
}
477
 
478
/* emulation of instructions */
479
void
480
CPU::cpzero_emulate(uint32 instr, uint32 pc)
481
{
482
    cpzero->cpzero_emulate(instr, pc);
483
}
484
 
485
/* Called when the program wants to use coprocessor COPROCNO, and there
486
 * isn't any implementation for that coprocessor.
487
 * Results in a Coprocessor Unusable exception, along with an error
488
 * message being printed if the coprocessor is marked usable in the
489
 * CP0 Status register.
490
 */
491
void
492
CPU::cop_unimpl (int coprocno, uint32 instr, uint32 pc)
493
{
494
    exception (CpU, ANY, coprocno);
495
}
496
 
497
void
498
CPU::cpone_emulate(uint32 instr, uint32 pc)
499
{
500
    /* If it's a cfc1 <reg>, $0 then we copy 0 into reg,
501
        * which is supposed to mean there is NO cp1...
502
        * for now, though, ANYTHING else asked of cp1 results
503
        * in the default "unimplemented" behavior. */
504
    if (cpzero->cop_usable (1) && rs (instr) == 2
505
                && rd (instr) == 0) {
506
        reg[rt (instr)] = 0; /* No cp1. */
507
    } else {
508
        cop_unimpl (1, instr, pc);
509
    }
510
}
511
 
512
void
513
CPU::cptwo_emulate(uint32 instr, uint32 pc)
514
{
515
    cop_unimpl (2, instr, pc);
516
}
517
 
518
void
519
CPU::cpthree_emulate(uint32 instr, uint32 pc)
520
{
521
    cop_unimpl (3, instr, pc);
522
}
523
 
524
void
525
CPU::control_transfer (uint32 new_pc)
526
{
527
    delay_state = DELAYING;
528
    delay_pc = new_pc;
529
}
530
 
531
/// calc_jump_target - Calculate the address to jump to as a result of
532
/// the J-format (jump) instruction INSTR at address PC.  (PC is the address
533
/// of the jump instruction, and INSTR is the jump instruction word.)
534
///
535
uint32
536
CPU::calc_jump_target (uint32 instr, uint32 pc)
537
{
538
    // Must use address of delay slot (pc + 4) to calculate.
539
    return ((pc + 4) & 0xf0000000) | (jumptarg(instr) << 2);
540
}
541
 
542
void
543
CPU::jump(uint32 instr, uint32 pc)
544
{
545
    control_transfer (calc_jump_target (instr, pc));
546
}
547
 
548
void
549
CPU::j_emulate(uint32 instr, uint32 pc)
550
{
551
    jump (instr, pc);
552
}
553
 
554
void
555
CPU::jal_emulate(uint32 instr, uint32 pc)
556
{
557
    jump (instr, pc);
558
    // RA gets addr of instr after delay slot (2 words after this one).
559
    reg[reg_ra] = pc + 8;
560
}
561
 
562
/// calc_branch_target - Calculate the address to jump to for the
563
/// PC-relative branch for which the offset is specified by the immediate field
564
/// of the branch instruction word INSTR, with the program counter equal to PC.
565
/// 
566
uint32
567
CPU::calc_branch_target(uint32 instr, uint32 pc)
568
{
569
    return (pc + 4) + (s_immed(instr) << 2);
570
}
571
 
572
void
573
CPU::branch(uint32 instr, uint32 pc)
574
{
575
    control_transfer (calc_branch_target (instr, pc));
576
}
577
 
578
void
579
CPU::beq_emulate(uint32 instr, uint32 pc)
580
{
581
    if (reg[rs(instr)] == reg[rt(instr)])
582
        branch (instr, pc);
583
}
584
 
585
void
586
CPU::bne_emulate(uint32 instr, uint32 pc)
587
{
588
    if (reg[rs(instr)] != reg[rt(instr)])
589
        branch (instr, pc);
590
}
591
 
592
void
593
CPU::blez_emulate(uint32 instr, uint32 pc)
594
{
595
    if (rt(instr) != 0) {
596
        exception(RI);
597
        return;
598
    }
599
    if (reg[rs(instr)] == 0 || (reg[rs(instr)] & 0x80000000))
600
        branch(instr, pc);
601
}
602
 
603
void
604
CPU::bgtz_emulate(uint32 instr, uint32 pc)
605
{
606
    if (rt(instr) != 0) {
607
        exception(RI);
608
        return;
609
    }
610
    if (reg[rs(instr)] != 0 && (reg[rs(instr)] & 0x80000000) == 0)
611
        branch(instr, pc);
612
}
613
 
614
void
615
CPU::addi_emulate(uint32 instr, uint32 pc)
616
{
617
    int32 a, b, sum;
618
 
619
    a = (int32)reg[rs(instr)];
620
    b = s_immed(instr);
621
    sum = a + b;
622
    if ((a < 0 && b < 0 && !(sum < 0)) || (a >= 0 && b >= 0 && !(sum >= 0))) {
623
        exception(Ov);
624
        return;
625
    } else {
626
        reg[rt(instr)] = (uint32)sum;
627
    }
628
}
629
 
630
void
631
CPU::addiu_emulate(uint32 instr, uint32 pc)
632
{
633
    int32 a, b, sum;
634
 
635
    a = (int32)reg[rs(instr)];
636
    b = s_immed(instr);
637
    sum = a + b;
638
    reg[rt(instr)] = (uint32)sum;
639
}
640
 
641
void
642
CPU::slti_emulate(uint32 instr, uint32 pc)
643
{
644
    int32 s_rs = reg[rs(instr)];
645
 
646
    if (s_rs < s_immed(instr)) {
647
        reg[rt(instr)] = 1;
648
    } else {
649
        reg[rt(instr)] = 0;
650
    }
651
}
652
 
653
void
654
CPU::sltiu_emulate(uint32 instr, uint32 pc)
655
{
656
    if (reg[rs(instr)] < (uint32)(int32)s_immed(instr)) {
657
        reg[rt(instr)] = 1;
658
    } else {
659
        reg[rt(instr)] = 0;
660
    }
661
}
662
 
663
void
664
CPU::andi_emulate(uint32 instr, uint32 pc)
665
{
666
    reg[rt(instr)] = (reg[rs(instr)] & 0x0ffff) & immed(instr);
667
}
668
 
669
void
670
CPU::ori_emulate(uint32 instr, uint32 pc)
671
{
672
    reg[rt(instr)] = reg[rs(instr)] | immed(instr);
673
}
674
 
675
void
676
CPU::xori_emulate(uint32 instr, uint32 pc)
677
{
678
    reg[rt(instr)] = reg[rs(instr)] ^ immed(instr);
679
}
680
 
681
void
682
CPU::lui_emulate(uint32 instr, uint32 pc)
683
{
684
    reg[rt(instr)] = immed(instr) << 16;
685
}
686
 
687
void
688
CPU::lb_emulate(uint32 instr, uint32 pc)
689
{
690
    uint32 phys, virt, base;
691
    int8 byte;
692
    int32 offset;
693
    bool cacheable, isolated;
694
 
695
    /* Calculate virtual address. */
696
    base = reg[rs(instr)];
697
    offset = s_immed(instr);
698
    virt = base + offset;
699
 
700
    /* Translate virtual address to physical address. */
701
    phys = cpzero->address_trans(virt, DATALOAD, &cacheable, &isolated);
702
    if (exception_pending) return;
703
 
704
    /* Fetch byte.
705
     * Because it is assigned to a signed variable (int32 byte)
706
     * it will be sign-extended.
707
     */
708
    byte = ao_fetch_byte(phys, cacheable, isolated);
709
    if (exception_pending) return;
710
 
711
    /* Load target register with data. */
712
    reg[rt(instr)] = byte;
713
}
714
 
715
void
716
CPU::lh_emulate(uint32 instr, uint32 pc)
717
{
718
    uint32 phys, virt, base;
719
    int16 halfword;
720
    int32 offset;
721
    bool cacheable, isolated;
722
 
723
    /* Calculate virtual address. */
724
    base = reg[rs(instr)];
725
    offset = s_immed(instr);
726
    virt = base + offset;
727
 
728
    /* This virtual address must be halfword-aligned. */
729
    if (virt % 2 != 0) {
730
        exception(AdEL,DATALOAD);
731
        return;
732
    }
733
 
734
    /* Translate virtual address to physical address. */
735
    phys = cpzero->address_trans(virt, DATALOAD, &cacheable, &isolated);
736
    if (exception_pending) return;
737
 
738
    /* Fetch halfword.
739
     * Because it is assigned to a signed variable (int32 halfword)
740
     * it will be sign-extended.
741
     */
742
    halfword = ao_fetch_halfword(phys, cacheable, isolated);
743
    if (exception_pending) return;
744
 
745
    /* Load target register with data. */
746
    reg[rt(instr)] = halfword;
747
}
748
 
749
/* The lwr and lwl algorithms here are taken from SPIM 6.0,
750
 * since I didn't manage to come up with a better way to write them.
751
 * Improvements are welcome.
752
 */
753
uint32
754
CPU::lwr(uint32 regval, uint32 memval, uint8 offset)
755
{
756
    switch (offset)
757
    {
758
        /* The SPIM source claims that "The description of the
759
            * little-endian case in Kane is totally wrong." The fact
760
            * that I ripped off the LWR algorithm from them could be
761
            * viewed as a sort of passive assumption that their claim
762
            * is correct.
763
            */
764
        case 0: /* 3 in book */
765
            return memval;
766
        case 1: /* 0 in book */
767
            return (regval & 0xff000000) | ((memval & 0xffffff00) >> 8);
768
        case 2: /* 1 in book */
769
            return (regval & 0xffff0000) | ((memval & 0xffff0000) >> 16);
770
        case 3: /* 2 in book */
771
            return (regval & 0xffffff00) | ((memval & 0xff000000) >> 24);
772
    }
773
    fatal_error("Invalid offset %x passed to lwr\n", offset);
774
}
775
 
776
uint32
777
CPU::lwl(uint32 regval, uint32 memval, uint8 offset)
778
{
779
    switch (offset)
780
    {
781
        case 0: return (memval & 0xff) << 24 | (regval & 0xffffff);
782
        case 1: return (memval & 0xffff) << 16 | (regval & 0xffff);
783
        case 2: return (memval & 0xffffff) << 8 | (regval & 0xff);
784
        case 3: return memval;
785
    }
786
    fatal_error("Invalid offset %x passed to lwl\n", offset);
787
}
788
 
789
void
790
CPU::lwl_emulate(uint32 instr, uint32 pc)
791
{
792
    uint32 phys, virt, wordvirt, base, memword;
793
    uint8 which_byte;
794
    int32 offset;
795
    bool cacheable, isolated;
796
 
797
    /* Calculate virtual address. */
798
    base = reg[rs(instr)];
799
    offset = s_immed(instr);
800
    virt = base + offset;
801
    /* We request the word containing the byte-address requested. */
802
    wordvirt = virt & ~0x03UL;
803
 
804
    /* Translate virtual address to physical address. */
805
    phys = cpzero->address_trans(wordvirt, DATALOAD, &cacheable, &isolated);
806
    if (exception_pending) return;
807
 
808
    /* Fetch word. */
809
    memword = ao_fetch_word(phys, DATALOAD, cacheable, isolated);
810
    if (exception_pending) return;
811
 
812
    /* Insert bytes into the left side of the register. */
813
    which_byte = virt & 0x03;
814
    reg[rt(instr)] = lwl(reg[rt(instr)], memword, which_byte);
815
}
816
 
817
void
818
CPU::lw_emulate(uint32 instr, uint32 pc)
819
{
820
    uint32 phys, virt, base, word;
821
    int32 offset;
822
    bool cacheable, isolated;
823
 
824
    /* Calculate virtual address. */
825
    base = reg[rs(instr)];
826
    offset = s_immed(instr);
827
    virt = base + offset;
828
 
829
    /* This virtual address must be word-aligned. */
830
    if (virt % 4 != 0) {
831
        exception(AdEL,DATALOAD);
832
        return;
833
    }
834
 
835
    /* Translate virtual address to physical address. */
836
    phys = cpzero->address_trans(virt, DATALOAD, &cacheable, &isolated);
837
    if (exception_pending) return;
838
 
839
    /* Fetch word. */
840
    word = ao_fetch_word(phys, DATALOAD, cacheable, isolated);
841
    if (exception_pending) return;
842
 
843
    /* Load target register with data. */
844
    reg[rt(instr)] = word;
845
}
846
 
847
void
848
CPU::lbu_emulate(uint32 instr, uint32 pc)
849
{
850
    uint32 phys, virt, base, byte;
851
    int32 offset;
852
    bool cacheable, isolated;
853
 
854
    /* Calculate virtual address. */
855
    base = reg[rs(instr)];
856
    offset = s_immed(instr);
857
    virt = base + offset;
858
 
859
    /* Translate virtual address to physical address. */
860
    phys = cpzero->address_trans(virt, DATALOAD, &cacheable, &isolated);
861
    if (exception_pending) return;
862
 
863
    /* Fetch byte.  */
864
    byte = ao_fetch_byte(phys, cacheable, isolated) & 0x000000ff;
865
    if (exception_pending) return;
866
 
867
    /* Load target register with data. */
868
    reg[rt(instr)] = byte;
869
}
870
 
871
void
872
CPU::lhu_emulate(uint32 instr, uint32 pc)
873
{
874
    uint32 phys, virt, base, halfword;
875
    int32 offset;
876
    bool cacheable, isolated;
877
 
878
    /* Calculate virtual address. */
879
    base = reg[rs(instr)];
880
    offset = s_immed(instr);
881
    virt = base + offset;
882
 
883
    /* This virtual address must be halfword-aligned. */
884
    if (virt % 2 != 0) {
885
        exception(AdEL,DATALOAD);
886
        return;
887
    }
888
 
889
    /* Translate virtual address to physical address. */
890
    phys = cpzero->address_trans(virt, DATALOAD, &cacheable, &isolated);
891
    if (exception_pending) return;
892
 
893
    /* Fetch halfword.  */
894
    halfword = ao_fetch_halfword(phys, cacheable, isolated) & 0x0000ffff;
895
    if (exception_pending) return;
896
 
897
    /* Load target register with data. */
898
    reg[rt(instr)] = halfword;
899
}
900
 
901
void
902
CPU::lwr_emulate(uint32 instr, uint32 pc)
903
{
904
    uint32 phys, virt, wordvirt, base, memword;
905
    uint8 which_byte;
906
    int32 offset;
907
    bool cacheable, isolated;
908
 
909
    /* Calculate virtual address. */
910
    base = reg[rs(instr)];
911
    offset = s_immed(instr);
912
    virt = base + offset;
913
    /* We request the word containing the byte-address requested. */
914
    wordvirt = virt & ~0x03UL;
915
 
916
    /* Translate virtual address to physical address. */
917
    phys = cpzero->address_trans(wordvirt, DATALOAD, &cacheable, &isolated);
918
    if (exception_pending) return;
919
 
920
    /* Fetch word. */
921
    memword = ao_fetch_word(phys, DATALOAD, cacheable, isolated);
922
    if (exception_pending) return;
923
 
924
    /* Insert bytes into the left side of the register. */
925
    which_byte = virt & 0x03;
926
    reg[rt(instr)] = lwr(reg[rt(instr)], memword, which_byte);
927
}
928
 
929
void
930
CPU::sb_emulate(uint32 instr, uint32 pc)
931
{
932
    uint32 phys, virt, base;
933
    uint8 data;
934
    int32 offset;
935
    bool cacheable, isolated;
936
 
937
    /* Load data from register. */
938
    data = reg[rt(instr)] & 0x0ff;
939
 
940
    /* Calculate virtual address. */
941
    base = reg[rs(instr)];
942
    offset = s_immed(instr);
943
    virt = base + offset;
944
 
945
    /* Translate virtual address to physical address. */
946
    phys = cpzero->address_trans(virt, DATASTORE, &cacheable, &isolated);
947
    if (exception_pending) return;
948
 
949
    /* Store byte. */
950
    ao_store_byte(phys, data, cacheable, isolated);
951
}
952
 
953
void
954
CPU::sh_emulate(uint32 instr, uint32 pc)
955
{
956
    uint32 phys, virt, base;
957
    uint16 data;
958
    int32 offset;
959
    bool cacheable, isolated;
960
 
961
    /* Load data from register. */
962
    data = reg[rt(instr)] & 0x0ffff;
963
 
964
    /* Calculate virtual address. */
965
    base = reg[rs(instr)];
966
    offset = s_immed(instr);
967
    virt = base + offset;
968
 
969
    /* This virtual address must be halfword-aligned. */
970
    if (virt % 2 != 0) {
971
        exception(AdES,DATASTORE);
972
        return;
973
    }
974
 
975
    /* Translate virtual address to physical address. */
976
    phys = cpzero->address_trans(virt, DATASTORE, &cacheable, &isolated);
977
    if (exception_pending) return;
978
 
979
    /* Store halfword. */
980
    ao_store_halfword(phys, data, cacheable, isolated);
981
}
982
 
983
uint32
984
CPU::swl(uint32 regval, uint32 memval, uint8 offset)
985
{
986
    switch (offset) {
987
        case 0: return (memval & 0xffffff00) | (regval >> 24 & 0xff);
988
        case 1: return (memval & 0xffff0000) | (regval >> 16 & 0xffff);
989
        case 2: return (memval & 0xff000000) | (regval >> 8 & 0xffffff);
990
        case 3: return regval;
991
    }
992
    fatal_error("Invalid offset %x passed to swl\n", offset);
993
}
994
 
995
uint32
996
CPU::swr(uint32 regval, uint32 memval, uint8 offset)
997
{
998
    switch (offset) {
999
        case 0: return regval;
1000
        case 1: return ((regval << 8) & 0xffffff00) | (memval & 0xff);
1001
        case 2: return ((regval << 16) & 0xffff0000) | (memval & 0xffff);
1002
        case 3: return ((regval << 24) & 0xff000000) | (memval & 0xffffff);
1003
    }
1004
    fatal_error("Invalid offset %x passed to swr\n", offset);
1005
}
1006
 
1007
void
1008
CPU::swl_emulate(uint32 instr, uint32 pc)
1009
{
1010
    uint32 phys, virt, wordvirt, base, regdata, memdata;
1011
    int32 offset;
1012
    uint8 which_byte;
1013
    bool cacheable, isolated;
1014
 
1015
    /* Load data from register. */
1016
    regdata = reg[rt(instr)];
1017
 
1018
    /* Calculate virtual address. */
1019
    base = reg[rs(instr)];
1020
    offset = s_immed(instr);
1021
    virt = base + offset;
1022
    /* We request the word containing the byte-address requested. */
1023
    wordvirt = virt & ~0x03UL;
1024
 
1025
    /* Translate virtual address to physical address. */
1026
    phys = cpzero->address_trans(wordvirt, DATASTORE, &cacheable, &isolated);
1027
    if (exception_pending) return;
1028
 
1029
    /* Read data from memory. */
1030
    //memdata = ao_fetch_word(phys, DATASTORE, cacheable);
1031
    //if (exception_pending) return;
1032
 
1033
    /* Write back the left side of the register. */
1034
    which_byte = virt & 0x03UL;
1035
    //ao_store_word(phys, swl(regdata, memdata, which_byte), cacheable);
1036
    uint32 store_value =
1037
        (which_byte == 0)?  (regdata >> 24 & 0xff) :
1038
        (which_byte == 1)?  (regdata >> 16 & 0xffff) :
1039
        (which_byte == 2)?  (regdata >> 8 & 0xffffff) :
1040
                            regdata;
1041
    uint32 store_byteena =
1042
        (which_byte == 0)?  0b0001 :
1043
        (which_byte == 1)?  0b0011 :
1044
        (which_byte == 2)?  0b0111 :
1045
                            0b1111;
1046
    ao_store_word(phys, store_value, cacheable, isolated, store_byteena);
1047
}
1048
 
1049
void
1050
CPU::sw_emulate(uint32 instr, uint32 pc)
1051
{
1052
    uint32 phys, virt, base, data;
1053
    int32 offset;
1054
    bool cacheable, isolated;
1055
 
1056
    /* Load data from register. */
1057
    data = reg[rt(instr)];
1058
 
1059
    /* Calculate virtual address. */
1060
    base = reg[rs(instr)];
1061
    offset = s_immed(instr);
1062
    virt = base + offset;
1063
 
1064
    /* This virtual address must be word-aligned. */
1065
    if (virt % 4 != 0) {
1066
        exception(AdES,DATASTORE);
1067
        return;
1068
    }
1069
 
1070
    /* Translate virtual address to physical address. */
1071
    phys = cpzero->address_trans(virt, DATASTORE, &cacheable, &isolated);
1072
    if (exception_pending) return;
1073
 
1074
    /* Store word. */
1075
    ao_store_word(phys, data, cacheable, isolated);
1076
}
1077
 
1078
void
1079
CPU::swr_emulate(uint32 instr, uint32 pc)
1080
{
1081
    uint32 phys, virt, wordvirt, base, regdata, memdata;
1082
    int32 offset;
1083
    uint8 which_byte;
1084
    bool cacheable, isolated;
1085
 
1086
    /* Load data from register. */
1087
    regdata = reg[rt(instr)];
1088
 
1089
    /* Calculate virtual address. */
1090
    base = reg[rs(instr)];
1091
    offset = s_immed(instr);
1092
    virt = base + offset;
1093
    /* We request the word containing the byte-address requested. */
1094
    wordvirt = virt & ~0x03UL;
1095
 
1096
    /* Translate virtual address to physical address. */
1097
    phys = cpzero->address_trans(wordvirt, DATASTORE, &cacheable, &isolated);
1098
    if (exception_pending) return;
1099
 
1100
    /* Read data from memory. */
1101
    //memdata = ao_fetch_word(phys, DATASTORE, cacheable);
1102
    //if (exception_pending) return;
1103
 
1104
    /* Write back the right side of the register. */
1105
    which_byte = virt & 0x03UL;
1106
    //ao_store_word(phys, swr(regdata, memdata, which_byte), cacheable);
1107
 
1108
    uint32 store_value =
1109
        (which_byte == 0)?  regdata :
1110
        (which_byte == 1)?  ((regdata << 8) & 0xffffff00) :
1111
        (which_byte == 2)?  ((regdata << 16) & 0xffff0000) :
1112
                            ((regdata << 24) & 0xff000000);
1113
    uint32 store_byteena =
1114
        (which_byte == 0)?  0b1111 :
1115
        (which_byte == 1)?  0b1110 :
1116
        (which_byte == 2)?  0b1100 :
1117
                            0b1000;
1118
    ao_store_word(phys, store_value, cacheable, isolated, store_byteena);
1119
}
1120
 
1121
void
1122
CPU::lwc1_emulate(uint32 instr, uint32 pc)
1123
{
1124
    cop_unimpl (1, instr, pc);
1125
}
1126
 
1127
void
1128
CPU::lwc2_emulate(uint32 instr, uint32 pc)
1129
{
1130
    cop_unimpl (2, instr, pc);
1131
}
1132
 
1133
void
1134
CPU::lwc3_emulate(uint32 instr, uint32 pc)
1135
{
1136
    cop_unimpl (3, instr, pc);
1137
}
1138
 
1139
void
1140
CPU::swc1_emulate(uint32 instr, uint32 pc)
1141
{
1142
    cop_unimpl (1, instr, pc);
1143
}
1144
 
1145
void
1146
CPU::swc2_emulate(uint32 instr, uint32 pc)
1147
{
1148
    cop_unimpl (2, instr, pc);
1149
}
1150
 
1151
void
1152
CPU::swc3_emulate(uint32 instr, uint32 pc)
1153
{
1154
    cop_unimpl (3, instr, pc);
1155
}
1156
 
1157
void
1158
CPU::sll_emulate(uint32 instr, uint32 pc)
1159
{
1160
    reg[rd(instr)] = reg[rt(instr)] << shamt(instr);
1161
}
1162
 
1163
int32
1164
srl(int32 a, int32 b)
1165
{
1166
    if (b == 0) {
1167
        return a;
1168
    } else if (b == 32) {
1169
        return 0;
1170
    } else {
1171
        return (a >> b) & ((1 << (32 - b)) - 1);
1172
    }
1173
}
1174
 
1175
int32
1176
sra(int32 a, int32 b)
1177
{
1178
    if (b == 0) {
1179
        return a;
1180
    } else {
1181
        return (a >> b) | (((a >> 31) & 0x01) * (((1 << b) - 1) << (32 - b)));
1182
    }
1183
}
1184
 
1185
void
1186
CPU::srl_emulate(uint32 instr, uint32 pc)
1187
{
1188
    reg[rd(instr)] = srl(reg[rt(instr)], shamt(instr));
1189
}
1190
 
1191
void
1192
CPU::sra_emulate(uint32 instr, uint32 pc)
1193
{
1194
    reg[rd(instr)] = sra(reg[rt(instr)], shamt(instr));
1195
}
1196
 
1197
void
1198
CPU::sllv_emulate(uint32 instr, uint32 pc)
1199
{
1200
    reg[rd(instr)] = reg[rt(instr)] << (reg[rs(instr)] & 0x01f);
1201
}
1202
 
1203
void
1204
CPU::srlv_emulate(uint32 instr, uint32 pc)
1205
{
1206
    reg[rd(instr)] = srl(reg[rt(instr)], reg[rs(instr)] & 0x01f);
1207
}
1208
 
1209
void
1210
CPU::srav_emulate(uint32 instr, uint32 pc)
1211
{
1212
    reg[rd(instr)] = sra(reg[rt(instr)], reg[rs(instr)] & 0x01f);
1213
}
1214
 
1215
void
1216
CPU::jr_emulate(uint32 instr, uint32 pc)
1217
{
1218
    if (reg[rd(instr)] != 0) {
1219
        exception(RI);
1220
        return;
1221
    }
1222
    control_transfer (reg[rs(instr)]);
1223
}
1224
 
1225
void
1226
CPU::jalr_emulate(uint32 instr, uint32 pc)
1227
{
1228
    control_transfer (reg[rs(instr)]);
1229
    /* RA gets addr of instr after delay slot (2 words after this one). */
1230
    reg[rd(instr)] = pc + 8;
1231
}
1232
 
1233
void
1234
CPU::syscall_emulate(uint32 instr, uint32 pc)
1235
{
1236
    exception(Sys);
1237
}
1238
 
1239
void
1240
CPU::break_emulate(uint32 instr, uint32 pc)
1241
{
1242
    exception(Bp);
1243
}
1244
 
1245
void
1246
CPU::mfhi_emulate(uint32 instr, uint32 pc)
1247
{
1248
    reg[rd(instr)] = hi;
1249
}
1250
 
1251
void
1252
CPU::mthi_emulate(uint32 instr, uint32 pc)
1253
{
1254
    if (rd(instr) != 0) {
1255
        exception(RI);
1256
        return;
1257
    }
1258
    hi = reg[rs(instr)];
1259
}
1260
 
1261
void
1262
CPU::mflo_emulate(uint32 instr, uint32 pc)
1263
{
1264
    reg[rd(instr)] = lo;
1265
}
1266
 
1267
void
1268
CPU::mtlo_emulate(uint32 instr, uint32 pc)
1269
{
1270
    if (rd(instr) != 0) {
1271
        exception(RI);
1272
        return;
1273
    }
1274
    lo = reg[rs(instr)];
1275
}
1276
 
1277
void
1278
CPU::mult_emulate(uint32 instr, uint32 pc)
1279
{
1280
    if (rd(instr) != 0) {
1281
        exception(RI);
1282
        return;
1283
    }
1284
    mult64s(&hi, &lo, reg[rs(instr)], reg[rt(instr)]);
1285
}
1286
 
1287
void
1288
CPU::mult64(uint32 *hi, uint32 *lo, uint32 n, uint32 m)
1289
{
1290
    uint64 result;
1291
    result = ((uint64)n) * ((uint64)m);
1292
    *hi = (uint32) (result >> 32);
1293
    *lo = (uint32) result;
1294
}
1295
 
1296
void
1297
CPU::mult64s(uint32 *hi, uint32 *lo, int32 n, int32 m)
1298
{
1299
    int64 result;
1300
    result = ((int64)n) * ((int64)m);
1301
    *hi = (uint32) (result >> 32);
1302
    *lo = (uint32) result;
1303
}
1304
 
1305
void
1306
CPU::multu_emulate(uint32 instr, uint32 pc)
1307
{
1308
    if (rd(instr) != 0) {
1309
        exception(RI);
1310
        return;
1311
    }
1312
    mult64(&hi, &lo, reg[rs(instr)], reg[rt(instr)]);
1313
}
1314
 
1315
void
1316
CPU::div_emulate(uint32 instr, uint32 pc)
1317
{
1318
    int32 signed_rs = (int32)reg[rs(instr)];
1319
    int32 signed_rt = (int32)reg[rt(instr)];
1320
 
1321
    if(signed_rt == 0) {
1322
        lo = (signed_rs >= 0)? 0xFFFFFFFF : 0x00000001;
1323
        hi = signed_rs;
1324
    }
1325
    else {
1326
        lo = signed_rs / signed_rt;
1327
        hi = signed_rs % signed_rt;
1328
    }
1329
}
1330
 
1331
void
1332
CPU::divu_emulate(uint32 instr, uint32 pc)
1333
{
1334
    if(reg[rt(instr)] == 0) {
1335
        lo = 0xFFFFFFFF;
1336
        hi = reg[rs(instr)];
1337
    }
1338
    else {
1339
        lo = reg[rs(instr)] / reg[rt(instr)];
1340
        hi = reg[rs(instr)] % reg[rt(instr)];
1341
    }
1342
}
1343
 
1344
void
1345
CPU::add_emulate(uint32 instr, uint32 pc)
1346
{
1347
    int32 a, b, sum;
1348
    a = (int32)reg[rs(instr)];
1349
    b = (int32)reg[rt(instr)];
1350
    sum = a + b;
1351
    if ((a < 0 && b < 0 && !(sum < 0)) || (a >= 0 && b >= 0 && !(sum >= 0))) {
1352
        exception(Ov);
1353
        return;
1354
    } else {
1355
        reg[rd(instr)] = (uint32)sum;
1356
    }
1357
}
1358
 
1359
void
1360
CPU::addu_emulate(uint32 instr, uint32 pc)
1361
{
1362
    int32 a, b, sum;
1363
    a = (int32)reg[rs(instr)];
1364
    b = (int32)reg[rt(instr)];
1365
    sum = a + b;
1366
    reg[rd(instr)] = (uint32)sum;
1367
}
1368
 
1369
void
1370
CPU::sub_emulate(uint32 instr, uint32 pc)
1371
{
1372
    int32 a, b, diff;
1373
    a = (int32)reg[rs(instr)];
1374
    b = (int32)reg[rt(instr)];
1375
    diff = a - b;
1376
    if ((a < 0 && !(b < 0) && !(diff < 0)) || (!(a < 0) && b < 0 && diff < 0)) {
1377
        exception(Ov);
1378
        return;
1379
    } else {
1380
        reg[rd(instr)] = (uint32)diff;
1381
    }
1382
}
1383
 
1384
void
1385
CPU::subu_emulate(uint32 instr, uint32 pc)
1386
{
1387
    int32 a, b, diff;
1388
    a = (int32)reg[rs(instr)];
1389
    b = (int32)reg[rt(instr)];
1390
    diff = a - b;
1391
    reg[rd(instr)] = (uint32)diff;
1392
}
1393
 
1394
void
1395
CPU::and_emulate(uint32 instr, uint32 pc)
1396
{
1397
    reg[rd(instr)] = reg[rs(instr)] & reg[rt(instr)];
1398
}
1399
 
1400
void
1401
CPU::or_emulate(uint32 instr, uint32 pc)
1402
{
1403
    reg[rd(instr)] = reg[rs(instr)] | reg[rt(instr)];
1404
}
1405
 
1406
void
1407
CPU::xor_emulate(uint32 instr, uint32 pc)
1408
{
1409
    reg[rd(instr)] = reg[rs(instr)] ^ reg[rt(instr)];
1410
}
1411
 
1412
void
1413
CPU::nor_emulate(uint32 instr, uint32 pc)
1414
{
1415
    reg[rd(instr)] = ~(reg[rs(instr)] | reg[rt(instr)]);
1416
}
1417
 
1418
void
1419
CPU::slt_emulate(uint32 instr, uint32 pc)
1420
{
1421
    int32 s_rs = (int32)reg[rs(instr)];
1422
    int32 s_rt = (int32)reg[rt(instr)];
1423
    if (s_rs < s_rt) {
1424
        reg[rd(instr)] = 1;
1425
    } else {
1426
        reg[rd(instr)] = 0;
1427
    }
1428
}
1429
 
1430
void
1431
CPU::sltu_emulate(uint32 instr, uint32 pc)
1432
{
1433
    if (reg[rs(instr)] < reg[rt(instr)]) {
1434
        reg[rd(instr)] = 1;
1435
    } else {
1436
        reg[rd(instr)] = 0;
1437
    }
1438
}
1439
 
1440
void
1441
CPU::bltz_emulate(uint32 instr, uint32 pc)
1442
{
1443
    if ((int32)reg[rs(instr)] < 0)
1444
        branch(instr, pc);
1445
}
1446
 
1447
void
1448
CPU::bgez_emulate(uint32 instr, uint32 pc)
1449
{
1450
    if ((int32)reg[rs(instr)] >= 0)
1451
        branch(instr, pc);
1452
}
1453
 
1454
/* As with JAL, BLTZAL and BGEZAL cause RA to get the address of the
1455
 * instruction two words after the current one (pc + 8).
1456
 */
1457
void
1458
CPU::bltzal_emulate(uint32 instr, uint32 pc)
1459
{
1460
    reg[reg_ra] = pc + 8;
1461
    if ((int32)reg[rs(instr)] < 0)
1462
        branch(instr, pc);
1463
}
1464
 
1465
void
1466
CPU::bgezal_emulate(uint32 instr, uint32 pc)
1467
{
1468
    reg[reg_ra] = pc + 8;
1469
    if ((int32)reg[rs(instr)] >= 0)
1470
        branch(instr, pc);
1471
}
1472
 
1473
/* reserved instruction */
1474
void
1475
CPU::RI_emulate(uint32 instr, uint32 pc)
1476
{
1477
    exception(RI);
1478
}
1479
 
1480
/* dispatching */
1481
int
1482
CPU::step(bool debug)
1483
{
1484
    // Table of emulation functions.
1485
    static const emulate_funptr opcodeJumpTable[] = {
1486
        &CPU::funct_emulate, &CPU::regimm_emulate,  &CPU::j_emulate,
1487
        &CPU::jal_emulate,   &CPU::beq_emulate,     &CPU::bne_emulate,
1488
        &CPU::blez_emulate,  &CPU::bgtz_emulate,    &CPU::addi_emulate,
1489
        &CPU::addiu_emulate, &CPU::slti_emulate,    &CPU::sltiu_emulate,
1490
        &CPU::andi_emulate,  &CPU::ori_emulate,     &CPU::xori_emulate,
1491
        &CPU::lui_emulate,   &CPU::cpzero_emulate,  &CPU::cpone_emulate,
1492
        &CPU::cptwo_emulate, &CPU::cpthree_emulate, &CPU::RI_emulate,
1493
        &CPU::RI_emulate,    &CPU::RI_emulate,      &CPU::RI_emulate,
1494
        &CPU::RI_emulate,    &CPU::RI_emulate,      &CPU::RI_emulate,
1495
        &CPU::RI_emulate,    &CPU::RI_emulate,      &CPU::RI_emulate,
1496
        &CPU::RI_emulate,    &CPU::RI_emulate,      &CPU::lb_emulate,
1497
        &CPU::lh_emulate,    &CPU::lwl_emulate,     &CPU::lw_emulate,
1498
        &CPU::lbu_emulate,   &CPU::lhu_emulate,     &CPU::lwr_emulate,
1499
        &CPU::RI_emulate,    &CPU::sb_emulate,      &CPU::sh_emulate,
1500
        &CPU::swl_emulate,   &CPU::sw_emulate,      &CPU::RI_emulate,
1501
        &CPU::RI_emulate,    &CPU::swr_emulate,     &CPU::RI_emulate,
1502
        &CPU::RI_emulate,    &CPU::lwc1_emulate,    &CPU::lwc2_emulate,
1503
        &CPU::lwc3_emulate,  &CPU::RI_emulate,      &CPU::RI_emulate,
1504
        &CPU::RI_emulate,    &CPU::RI_emulate,      &CPU::RI_emulate,
1505
        &CPU::swc1_emulate,  &CPU::swc2_emulate,    &CPU::swc3_emulate,
1506
        &CPU::RI_emulate,    &CPU::RI_emulate,      &CPU::RI_emulate,
1507
        &CPU::RI_emulate
1508
    };
1509
 
1510
    // Clear exception_pending flag if it was set by a prior instruction.
1511
    exception_pending = false;
1512
 
1513
    // Decrement Random register every clock cycle.
1514
    //changed only after tlbwr
1515
    //cpzero->adjust_random();
1516
 
1517
    // Save address of instruction responsible for exceptions which may occur.
1518
    if (delay_state != DELAYSLOT)
1519
        next_epc = pc;
1520
 
1521
    bool cacheable, isolated;
1522
    uint32 real_pc;
1523
 
1524
    //AdE
1525
    if (pc % 4 != 0) {
1526
        exception(AdEL,INSTFETCH);
1527
        goto out;
1528
    }
1529
 
1530
    // Get physical address of next instruction.
1531
    real_pc = cpzero->address_trans(pc,INSTFETCH,&cacheable,&isolated);
1532
    if (exception_pending) {
1533
        goto out;
1534
    }
1535
 
1536
    // Fetch next instruction.
1537
    instr = ao_fetch_word(real_pc,INSTFETCH,cacheable,isolated);
1538
    if (exception_pending) {
1539
        goto out;
1540
    }
1541
 
1542
    //interrupt check moved below
1543
 
1544
    // Emulate the instruction by jumping to the appropriate emulation method.
1545
 
1546
static uint32 instr_cnt = 0;
1547
if(debug) {
1548
    printf("[%d] table: %d instr: %08x pc: %08x\n", instr_cnt, opcode(instr), instr, pc);
1549
    for(int i=1; i<32; i++) printf("%08x ", reg[i]); printf("\n");
1550
}
1551
instr_cnt++;
1552
 
1553
    (this->*opcodeJumpTable[opcode(instr)])(instr, pc);
1554
 
1555
out:
1556
    // Force register zero to contain zero.
1557
    reg[reg_zero] = 0;
1558
 
1559
    // If an exception is pending, then the PC has already been changed to
1560
    // contain the exception vector.  Return now, so that we don't clobber it.
1561
    if (exception_pending) {
1562
        // Instruction at beginning of exception handler is NOT in delay slot,
1563
        // no matter what the last instruction was.
1564
        delay_state = NORMAL;
1565
        return 1;
1566
    }
1567
 
1568
    // Recall the delay_state values: 0=NORMAL, 1=DELAYING, 2=DELAYSLOT.
1569
    // This is what the delay_state values mean (at this point in the code):
1570
    // DELAYING: The last instruction caused a branch to be taken.
1571
    //  The next instruction is in the delay slot.
1572
    //  The next instruction EPC will be PC - 4.
1573
    // DELAYSLOT: The last instruction was executed in a delay slot.
1574
    //  The next instruction is on the other end of the branch.
1575
    //  The next instruction EPC will be PC.
1576
    // NORMAL: No branch was executed; next instruction is at PC + 4.
1577
    //  Next instruction EPC is PC.
1578
 
1579
    // Update the pc and delay_state values.
1580
    pc += 4;
1581
    was_delayed_transfer = false;
1582
    if (delay_state == DELAYSLOT) {
1583
        was_delayed_transfer = true;
1584
        was_delayed_pc = pc;
1585
 
1586
        pc = delay_pc;
1587
    }
1588
    delay_state = (delay_state << 1) & 0x03; // 0->0, 1->2, 2->0
1589
 
1590
    // Check for a (hardware or software) interrupt.
1591
    if (cpzero->interrupt_pending()) {
1592
        if(delay_state != DELAYSLOT) next_epc = pc;
1593
 
1594
        exception(Int);
1595
        delay_state = NORMAL;
1596
        return 2;
1597
    }
1598
 
1599
    return 0;
1600
}
1601
 
1602
void
1603
CPU::funct_emulate(uint32 instr, uint32 pc)
1604
{
1605
    static const emulate_funptr functJumpTable[] = {
1606
        &CPU::sll_emulate,     &CPU::RI_emulate,
1607
        &CPU::srl_emulate,     &CPU::sra_emulate,
1608
        &CPU::sllv_emulate,    &CPU::RI_emulate,
1609
        &CPU::srlv_emulate,    &CPU::srav_emulate,
1610
        &CPU::jr_emulate,      &CPU::jalr_emulate,
1611
        &CPU::RI_emulate,      &CPU::RI_emulate,
1612
        &CPU::syscall_emulate, &CPU::break_emulate,
1613
        &CPU::RI_emulate,      &CPU::RI_emulate,
1614
        &CPU::mfhi_emulate,    &CPU::mthi_emulate,
1615
        &CPU::mflo_emulate,    &CPU::mtlo_emulate,
1616
        &CPU::RI_emulate,      &CPU::RI_emulate,
1617
        &CPU::RI_emulate,      &CPU::RI_emulate,
1618
        &CPU::mult_emulate,    &CPU::multu_emulate,
1619
        &CPU::div_emulate,     &CPU::divu_emulate,
1620
        &CPU::RI_emulate,      &CPU::RI_emulate,
1621
        &CPU::RI_emulate,      &CPU::RI_emulate,
1622
        &CPU::add_emulate,     &CPU::addu_emulate,
1623
        &CPU::sub_emulate,     &CPU::subu_emulate,
1624
        &CPU::and_emulate,     &CPU::or_emulate,
1625
        &CPU::xor_emulate,     &CPU::nor_emulate,
1626
        &CPU::RI_emulate,      &CPU::RI_emulate,
1627
        &CPU::slt_emulate,     &CPU::sltu_emulate,
1628
        &CPU::RI_emulate,      &CPU::RI_emulate,
1629
        &CPU::RI_emulate,      &CPU::RI_emulate,
1630
        &CPU::RI_emulate,      &CPU::RI_emulate,
1631
        &CPU::RI_emulate,      &CPU::RI_emulate,
1632
        &CPU::RI_emulate,      &CPU::RI_emulate,
1633
        &CPU::RI_emulate,      &CPU::RI_emulate,
1634
        &CPU::RI_emulate,      &CPU::RI_emulate,
1635
        &CPU::RI_emulate,      &CPU::RI_emulate,
1636
        &CPU::RI_emulate,      &CPU::RI_emulate,
1637
        &CPU::RI_emulate,      &CPU::RI_emulate
1638
    };
1639
    (this->*functJumpTable[funct(instr)])(instr, pc);
1640
}
1641
 
1642
void
1643
CPU::regimm_emulate(uint32 instr, uint32 pc)
1644
{
1645
    switch(rt(instr))
1646
    {
1647
        case 0: bltz_emulate(instr, pc); break;
1648
        case 1: bgez_emulate(instr, pc); break;
1649
        case 16: bltzal_emulate(instr, pc); break;
1650
        case 17: bgezal_emulate(instr, pc); break;
1651
        default: exception(RI); break; /* reserved instruction */
1652
    }
1653
}
1654
 
1655
//------------------------------------------------------------------------------
1656
//------------------------------------------------------------------------------
1657
//------------------------------------------------------------------------------

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.