OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [rel-0-3-0-rc1/] [or1ksim/] [cpu/] [or32/] [dyn-rec.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1748 jeremybenn
/* dyn-rec.c -- Dynamic recompiler implementation for or32
2
   Copyright (C) 2005 György `nog' Jeney, nog@sdf.lonestar.org
3
 
4
This file is part of OpenRISC 1000 Architectural Simulator.
5
 
6
This program is free software; you can redistribute it and/or modify
7
it under the terms of the GNU General Public License as published by
8
the Free Software Foundation; either version 2 of the License, or
9
(at your option) any later version.
10
 
11
This program is distributed in the hope that it will be useful,
12
but WITHOUT ANY WARRANTY; without even the implied warranty of
13
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
GNU General Public License for more details.
15
 
16
You should have received a copy of the GNU General Public License
17
along with this program; if not, write to the Free Software
18
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
19
 
20
#include <stdio.h>
21
#include <stdlib.h>
22
#include <string.h>
23
#include <sys/mman.h>
24
#include <signal.h>
25
#include <errno.h>
26
#include <execinfo.h>
27
 
28
#include "config.h"
29
 
30
#ifdef HAVE_INTTYPES_H
31
#include <inttypes.h>
32
#endif
33
 
34
#include "port.h"
35
#include "arch.h"
36
#include "immu.h"
37
#include "abstract.h"
38
#include "opcode/or32.h"
39
#include "spr-defs.h"
40
#include "execute.h"
41
#include "except.h"
42
#include "spr-defs.h"
43
#include "sim-config.h"
44
#include "sched.h"
45
 
46
#include "i386-regs.h"
47
 
48
#include "def-op-t.h"
49
#include "dyn-rec.h"
50
#include "gen-ops.h"
51
 
52
#include "op-support.h"
53
 
54
/* NOTE: All openrisc (or) addresses in this file are *PHYSICAL* addresses */
55
 
56
/* FIXME: Optimise sorted list adding */
57
 
58
typedef void (*generic_gen_op)(struct op_queue *opq, int end);
59
typedef void (*imm_gen_op)(struct op_queue *opq, int end, uorreg_t imm);
60
 
61
void gen_l_invalid(struct op_queue *opq, int param_t[3], int delay_slot);
62
 
63
/* ttg->temporary to gpr */
64
DEF_GPR_OP(generic_gen_op, gen_op_move_gpr_t, gen_op_ttg_gpr);
65
/* gtt->gpr to temporary */
66
DEF_GPR_OP(generic_gen_op, gen_op_move_t_gpr, gen_op_gtt_gpr);
67
 
68
DEF_1T_OP(imm_gen_op, calc_insn_ea_table, gen_op_calc_insn_ea);
69
 
70
/* Linker stubs.  This will allow the linker to link in op.o.  The relocations
71
 * that the linker does for these will be irrelevent anyway, since we patch the
72
 * relocations during recompilation. */
73
uorreg_t __op_param1;
74
uorreg_t __op_param2;
75
uorreg_t __op_param3;
76
 
77
/* The number of bytes that a dynamicly recompiled page should be enlarged by */
78
#define RECED_PAGE_ENLARGE_BY 51200
79
 
80
/* The number of entries that the micro operations array in op_queue should be
81
 * enlarged by */
82
#define OPS_ENLARGE_BY 5
83
 
84
#define T_NONE (-1)
85
 
86
/* Temporary is used as a source operand */
87
#define TFLAG_SRC 1
88
/* Temporary is used as a destination operand */
89
#define TFLAG_DST 2
90
/* Temporary has been saved to permanent storage */
91
#define TFLAG_SAVED 4
92
/* Temporary contains the value of the register before the instruction execution
93
 * occurs (either by an explicit reg->t move or implicitly being left over from
94
 * a previous instruction) */
95
#define TFLAG_SOURCED 8
96
 
97
/* FIXME: Put this into some header */
98
extern int do_stats;
99
 
100
static int sigsegv_state = 0;
101
static void *sigsegv_addr = NULL;
102
 
103
void dyn_ret_stack_prot(void);
104
 
105
void dyn_sigsegv_debug(int u, siginfo_t *siginf, void *dat)
106
{
107
  struct dyn_page *dp;
108
  FILE *f;
109
  char filen[18]; /* 18 == strlen("or_page.%08x") + 1 */
110
  int i;
111
  struct sigcontext *sigc = dat;
112
 
113
  if(!sigsegv_state) {
114
    sigsegv_addr = siginf->si_addr;
115
  } else {
116
    fprintf(stderr, "Nested SIGSEGV occured, dumping next chuck of info\n");
117
    sigsegv_state++;
118
  }
119
 
120
  /* First dump all the data that does not need dereferenceing to get */
121
  switch(sigsegv_state) {
122
  case 0:
123
    fflush(stderr);
124
    fprintf(stderr, "Segmentation fault on acces to %p at 0x%08lx, (or address: 0x%"PRIxADDR")\n\n",
125
            sigsegv_addr, sigc->eip, cpu_state.pc);
126
    sigsegv_state++;
127
  case 1:
128
    /* Run through the recompiled pages, dumping them to disk as we go */
129
    for(i = 0; i < (2 << (32 - immu_state->pagesize_log2)); i++) {
130
      dp = cpu_state.dyn_pages[i];
131
      if(!dp)
132
        continue;
133
      fprintf(stderr, "Dumping%s page 0x%"PRIxADDR" recompiled to %p (len: %u) to disk\n",
134
             dp->dirty ? " dirty" : "", dp->or_page, dp->host_page,
135
             dp->host_len);
136
      fflush(stdout);
137
 
138
      sprintf(filen, "or_page.%"PRIxADDR, dp->or_page);
139
      if(!(f = fopen(filen, "w"))) {
140
        fprintf(stderr, "Unable to open %s to dump the recompiled page to: %s\n",
141
                filen, strerror(errno));
142
        continue;
143
      }
144
      if(fwrite(dp->host_page, dp->host_len, 1, f) < 1)
145
        fprintf(stderr, "Unable to write recompiled data to file: %s\n",
146
                strerror(errno));
147
 
148
      fclose(f);
149
    }
150
    sigsegv_state++;
151
  case 2:
152
    sim_done();
153
  }
154
}
155
 
156
struct dyn_page *new_dp(oraddr_t page)
157
{
158
  struct dyn_page *dp = malloc(sizeof(struct dyn_page));
159
  dp->or_page = IADDR_PAGE(page);
160
 
161
  dp->locs = malloc(sizeof(void *) * (immu_state->pagesize / 4));
162
 
163
  dp->host_len = 0;
164
  dp->host_page = NULL;
165
  dp->dirty = 1;
166
 
167
  if(do_stats) {
168
    dp->insns = malloc(immu_state->pagesize);
169
    dp->insn_indexs = malloc(sizeof(unsigned int) * (immu_state->pagesize / 4));
170
  }
171
 
172
  cpu_state.dyn_pages[dp->or_page >> immu_state->pagesize_log2] = dp;
173
  return dp;
174
}
175
 
176
void dyn_main(void)
177
{
178
  struct dyn_page *target_dp;
179
  oraddr_t phys_page;
180
 
181
  setjmp(cpu_state.excpt_loc);
182
  for(;;) {
183
    phys_page = immu_translate(cpu_state.pc);
184
 
185
/*
186
    printf("Recompiled code jumping out to %"PRIxADDR" from %"PRIxADDR"\n",
187
           phys_page, cpu_state.sprs[SPR_PPC] - 4);
188
*/
189
 
190
    /* immu_translate() adds the hit delay to runtime.sim.mem_cycles but we add
191
     * it to the cycles when the instruction is executed so if we don't reset it
192
     * now it will produce wrong results */
193
    runtime.sim.mem_cycles = 0;
194
 
195
    target_dp = cpu_state.dyn_pages[phys_page >> immu_state->pagesize_log2];
196
 
197
    if(!target_dp)
198
      target_dp = new_dp(phys_page);
199
 
200
    /* Since writes to the 0x0-0xff range do not dirtyfy a page recompile the
201
     *  0x0 page if the jump is to that location */
202
    if(phys_page < 0x100)
203
      target_dp->dirty = 1;
204
 
205
    if(target_dp->dirty)
206
      recompile_page(target_dp);
207
 
208
    cpu_state.curr_page = target_dp;
209
 
210
    /* FIXME: If the page is backed by more than one type of memory, this will
211
     * produce wrong results */
212
    cpu_state.cycles_dec = target_dp->delayr;
213
    if(cpu_state.sprs[SPR_SR] & SPR_SR_IME)
214
      /* Add the mmu hit delay to the cycle counter */
215
      cpu_state.cycles_dec -= immu_state->hitdelay;
216
 
217
    /* FIXME: ebp, ebx, esi and edi are expected to be preserved across function
218
     * calls but the recompiled code trashes them... */
219
    enter_dyn_code(phys_page, target_dp);
220
  }
221
}
222
 
223
static void immu_retranslate(void *dat)
224
{
225
  int got_en_dis = (int)dat;
226
  immu_translate(cpu_state.pc);
227
  runtime.sim.mem_cycles = 0;
228
 
229
  /* Only update the cycle decrementer if the mmu got enabled or disabled */
230
  if(got_en_dis == IMMU_GOT_ENABLED)
231
    /* Add the mmu hit delay to the cycle counter */
232
    cpu_state.cycles_dec = cpu_state.curr_page->delayr - immu_state->hitdelay;
233
  else if(got_en_dis == IMMU_GOT_DISABLED)
234
    cpu_state.cycles_dec = cpu_state.curr_page->delayr;
235
}
236
 
237
/* This is called whenever the immu is either enabled/disabled or reconfigured
238
 * while enabled.  This checks if an itlb miss would occour and updates the immu
239
 * hit delay counter */
240
void recheck_immu(int got_en_dis)
241
{
242
  oraddr_t addr;
243
 
244
  if(cpu_state.delay_insn)
245
    addr = cpu_state.pc_delay;
246
  else
247
    addr = cpu_state.pc + 4;
248
 
249
  if(IADDR_PAGE(cpu_state.pc) == IADDR_PAGE(addr))
250
    /* Schedule a job to do immu_translate() */
251
    SCHED_ADD(immu_retranslate, (void *)got_en_dis, 0);
252
}
253
 
254
/* Runs the scheduler.  Called from except_handler (and dirtyfy_page below) */
255
void run_sched_out_of_line(void)
256
{
257
  oraddr_t off = (cpu_state.pc & immu_state->page_offset_mask) >> 2;
258
 
259
  if(do_stats) {
260
    cpu_state.iqueue.insn_addr = cpu_state.pc;
261
    cpu_state.iqueue.insn = cpu_state.curr_page->insns[off];
262
    cpu_state.iqueue.insn_index = cpu_state.curr_page->insn_indexs[off];
263
    runtime.cpu.instructions++;
264
    analysis(&cpu_state.iqueue);
265
  }
266
 
267
  /* Run the scheduler */
268
  scheduler.job_queue->time += cpu_state.cycles_dec;
269
  runtime.sim.cycles -= cpu_state.cycles_dec;
270
 
271
  op_join_mem_cycles();
272
  if(scheduler.job_queue->time <= 0)
273
    do_scheduler();
274
}
275
 
276
/* Signals a page as dirty */
277
static void dirtyfy_page(struct dyn_page *dp)
278
{
279
  oraddr_t check;
280
 
281
  printf("Dirtyfying page 0x%"PRIxADDR"\n", dp->or_page);
282
 
283
  dp->dirty = 1;
284
 
285
  /* If the execution is currently in the page that was touched then recompile
286
   * it now and jump back to the point of execution */
287
  check = cpu_state.delay_insn ? cpu_state.pc_delay : cpu_state.pc + 4;
288
  if(IADDR_PAGE(check) == dp->or_page) {
289
    run_sched_out_of_line();
290
    recompile_page(dp);
291
 
292
    cpu_state.delay_insn = 0;
293
 
294
    /* Jump out to the next instruction */
295
    do_jump(check);
296
  }
297
}
298
 
299
/* Checks to see if a write happened to a recompiled page.  If so marks it as
300
 * dirty */
301
void dyn_checkwrite(oraddr_t addr)
302
{
303
  /* FIXME: Do this with mprotect() */
304
  struct dyn_page *dp = cpu_state.dyn_pages[addr >> immu_state->pagesize_log2];
305
 
306
  /* Since the locations 0x0-0xff are nearly always written to in an exception
307
   * handler, ignore any writes to these locations.  If code ends up jumping
308
   * out there, we'll recompile when the jump actually happens. */
309
  if((addr > 0x100) && dp && !dp->dirty)
310
    dirtyfy_page(dp);
311
}
312
 
313
/* Moves the temprary t to its permanent storage if it has been used as a
314
 * destination register */
315
static void ship_t_out(struct op_queue *opq, unsigned int t)
316
{
317
  unsigned int gpr = opq->reg_t[t];
318
 
319
  for(; opq; opq = opq->prev) {
320
    if(opq->reg_t[t] != gpr)
321
      return;
322
    if((opq->tflags[t] & TFLAG_DST) && !(opq->tflags[t] & TFLAG_SAVED)) {
323
      opq->tflags[t] |= TFLAG_SAVED;
324
 
325
      /* FIXME: Check if this is still neccesary */
326
      /* Before takeing the temporaries out, temporarily remove the op_do_sched
327
       * operation such that dyn_page->ts_bound shall be correct before the
328
       * scheduler runs */
329
      if(opq->num_ops && (opq->ops[opq->num_ops - 1] == op_do_sched_indx)) {
330
        opq->num_ops--;
331
        gen_op_move_gpr_t[t][gpr](opq, 1);
332
        gen_op_do_sched(opq, 1);
333
        return;
334
      }
335
 
336
      gen_op_move_gpr_t[t][gpr](opq, 1);
337
 
338
      return;
339
    }
340
  }
341
}
342
 
343
static void ship_gprs_out_t(struct op_queue *opq)
344
{
345
  int i;
346
 
347
  if(!opq)
348
    return;
349
 
350
  for(i = 0; i < NUM_T_REGS; i++) {
351
    if(opq->reg_t[i] < 32)
352
      /* Ship temporaries out in the last opq that actually touched it */
353
      ship_t_out(opq, i);
354
  }
355
}
356
 
357
/* FIXME: Look at the following instructions to make a better guess at which
358
 * temporary to return */
359
static int find_t(struct op_queue *opq, unsigned int reg)
360
{
361
  int i, j, t = -1;
362
 
363
  for(i = 0; i < NUM_T_REGS; i++) {
364
    if(opq->reg_t[i] == reg)
365
      return i;
366
 
367
    /* Ok, we have found an as-yet unused temporary, check if it is needed
368
     * later in this instruction */
369
    for(j = 0; j < opq->param_num; j++) {
370
      if((opq->param_type[j] & OPTYPE_REG) && (opq->param[j] == opq->reg_t[i]))
371
        break;
372
    }
373
 
374
    if(j != opq->param_num)
375
      continue;
376
 
377
    /* We have found the temporary (temporarily:) fit for use */
378
    if((t == -1) || (opq->reg_t[i] == 32))
379
      t = i;
380
  }
381
 
382
  return t;
383
}
384
 
385
/* Checks if there is enough space in dp->host_page, if not grow it */
386
void *enough_host_page(struct dyn_page *dp, void *cur, unsigned int *len,
387
                       unsigned int amount)
388
{
389
  unsigned int used = cur - dp->host_page;
390
 
391
  /* The array is long enough */
392
  if((used + amount) <= *len)
393
    return cur;
394
 
395
  /* Reallocate */
396
  *len += RECED_PAGE_ENLARGE_BY;
397
 
398
  if(!(dp->host_page = realloc(dp->host_page, *len))) {
399
    fprintf(stderr, "OOM\n");
400
    exit(1);
401
  }
402
 
403
  return dp->host_page + used;
404
}
405
 
406
/* Adds an operation to the opq */
407
void add_to_opq(struct op_queue *opq, int end, int op)
408
{
409
  if(opq->num_ops == opq->ops_len) {
410
    opq->ops_len += OPS_ENLARGE_BY;
411
    if(!(opq->ops = realloc(opq->ops, opq->ops_len * sizeof(int)))) {
412
      fprintf(stderr, "OOM\n");
413
      exit(1);
414
    }
415
  }
416
 
417
  if(end)
418
    opq->ops[opq->num_ops] = op;
419
  else {
420
    /* Shift everything over by one */
421
    memmove(opq->ops + 1, opq->ops, opq->num_ops* sizeof(int));
422
    opq->ops[0] = op;
423
  }
424
 
425
  opq->num_ops++;
426
}
427
 
428
static void gen_op_mark_loc(struct op_queue *opq, int end)
429
{
430
  add_to_opq(opq, end, op_mark_loc_indx);
431
}
432
 
433
/* Adds a parameter to the opq */
434
void add_to_op_params(struct op_queue *opq, int end, unsigned long param)
435
{
436
  if(opq->num_ops_param == opq->ops_param_len) {
437
    opq->ops_param_len += OPS_ENLARGE_BY;
438
    if(!(opq->ops_param = realloc(opq->ops_param, opq->ops_param_len * sizeof(int)))) {
439
      fprintf(stderr, "OOM\n");
440
      exit(1);
441
    }
442
  }
443
 
444
  if(end)
445
    opq->ops_param[opq->num_ops_param] = param;
446
  else {
447
    /* Shift everything over by one */
448
    memmove(opq->ops_param + 1, opq->ops_param, opq->num_ops_param);
449
    opq->ops_param[0] = param;
450
  }
451
 
452
  opq->num_ops_param++;
453
}
454
 
455
/* Initialises the recompiler */
456
void init_dyn_recomp(void)
457
{
458
  struct sigaction sigact;
459
  struct op_queue *opq = NULL;
460
  unsigned int i;
461
 
462
  cpu_state.opqs = NULL;
463
 
464
  /* Allocate the operation queue list (+1 for the page chaining) */
465
  for(i = 0; i < (immu_state->pagesize / 4) + 1; i++) {
466
    if(!(opq = malloc(sizeof(struct op_queue)))) {
467
      fprintf(stderr, "OOM\n");
468
      exit(1);
469
    }
470
 
471
    /* initialise some fields */
472
    opq->ops_len = 0;
473
    opq->ops = NULL;
474
    opq->ops_param_len = 0;
475
    opq->ops_param = NULL;
476
    opq->xref = 0;
477
 
478
    if(cpu_state.opqs)
479
      cpu_state.opqs->prev = opq;
480
 
481
    opq->next = cpu_state.opqs;
482
    cpu_state.opqs = opq;
483
  }
484
 
485
  opq->prev = NULL;
486
 
487
  cpu_state.curr_page = NULL;
488
  if(!(cpu_state.dyn_pages = malloc(sizeof(void *) * (2 << (32 -
489
                                                immu_state->pagesize_log2))))) {
490
    fprintf(stderr, "OOM\n");
491
    exit(1);
492
  }
493
  memset(cpu_state.dyn_pages, 0,
494
         sizeof(void *) * (2 << (32 - immu_state->pagesize_log2)));
495
 
496
  /* Register our segmentation fault handler */
497
  sigact.sa_sigaction = dyn_sigsegv_debug;
498
  memset(&sigact.sa_mask, 0, sizeof(sigact.sa_mask));
499
  sigact.sa_flags = SA_SIGINFO | SA_NOMASK;
500
  if(sigaction(SIGSEGV, &sigact, NULL))
501
    printf("WARN: Unable to install SIGSEGV handler! Don't expect to be able to debug the recompiler.\n");
502
 
503
  /* FIXME: Find a better place for this */
504
    { /* Needed by execution */
505
      extern int do_stats;
506
      do_stats = config.cpu.dependstats || config.cpu.superscalar || config.cpu.dependstats
507
              || config.sim.history || config.sim.exe_log;
508
    }
509
 
510
  printf("Recompile engine up and running\n");
511
}
512
 
513
/* Parses instructions and their operands and populates opq with them */
514
static void eval_insn_ops(struct op_queue *opq, oraddr_t addr)
515
{
516
  int breakp;
517
  struct insn_op_struct *opd;
518
 
519
  for(; opq->next; opq = opq->next, addr += 4) {
520
    opq->param_num = 0;
521
    breakp = 0;
522
    opq->insn = eval_insn(addr, &breakp);
523
 
524
    /* FIXME: If a breakpoint is set at this location, insert exception code */
525
    if(breakp) {
526
      fprintf(stderr, "FIXME: Insert breakpoint code\n");
527
    }
528
 
529
    opq->insn_index = insn_decode(opq->insn);
530
 
531
    if(opq->insn_index == -1)
532
      continue;
533
 
534
    opd = op_start[opq->insn_index];
535
 
536
    do {
537
      opq->param[opq->param_num] = eval_operand_val(opq->insn, opd);
538
      opq->param_type[opq->param_num] = opd->type;
539
 
540
      opq->param_num++;
541
      while(!(opd->type & OPTYPE_OP)) opd++;
542
    } while(!(opd++->type & OPTYPE_LAST));
543
  }
544
}
545
 
546
/* Adds code to the opq for the instruction pointed to by addr */
547
static void recompile_insn(struct op_queue *opq, int delay_insn)
548
{
549
  int j, k;
550
  int param_t[5]; /* Which temporary the parameters reside in */
551
 
552
  /* Check if we have an illegal instruction */
553
  if(opq->insn_index == -1) {
554
    gen_l_invalid(opq, NULL, delay_insn);
555
    return;
556
  }
557
 
558
  /* If we are recompileing an instruction that has a delay slot and is in the
559
   * delay slot, ignore it.  This is undefined behavour. */
560
  if(delay_insn && (or32_opcodes[opq->insn_index].flags & OR32_IF_DELAY))
561
    return;
562
 
563
  param_t[0] = T_NONE;
564
  param_t[1] = T_NONE;
565
  param_t[2] = T_NONE;
566
  param_t[3] = T_NONE;
567
  param_t[4] = T_NONE;
568
 
569
  /* Jump instructions are special since they have a delay slot and thus they
570
   * need to control the exact operation sequence.  Special case these here to
571
   * avoid haveing loads of if(!(.& OR32_IF_DELAY)) below */
572
  if(or32_opcodes[opq->insn_index].flags & OR32_IF_DELAY) {
573
    /* Jump instructions don't have a disposition */
574
    or32_opcodes[opq->insn_index].exec(opq, param_t, delay_insn);
575
 
576
    /* Analysis is done by the individual jump instructions */
577
    /* Jump instructions don't touch runtime.sim.mem_cycles */
578
    /* Jump instructions run their own scheduler */
579
    return;
580
  }
581
 
582
  /* Before an exception takes place, all registers must be stored. */
583
  if((or32_opcodes[opq->insn_index].func_unit == it_exception)) {
584
    ship_gprs_out_t(opq);
585
 
586
    or32_opcodes[opq->insn_index].exec(opq, param_t, delay_insn);
587
    return;
588
  }
589
 
590
  for(j = 0; j < opq->param_num; j++) {
591
    if(!(opq->param_type[j] & OPTYPE_REG))
592
      continue;
593
 
594
    /* Never, ever, move r0 into a temporary */
595
    if(!opq->param[j])
596
      continue;
597
 
598
    k = find_t(opq, opq->param[j]);
599
 
600
    param_t[j] = k;
601
 
602
    if(opq->reg_t[k] == opq->param[j]) {
603
      if(!(opq->param_type[j] & OPTYPE_DST) &&
604
         !(opq->tflags[k] & TFLAG_SOURCED)) {
605
        gen_op_move_t_gpr[k][opq->reg_t[k]](opq, 0);
606
        opq->tflags[k] |= TFLAG_SOURCED;
607
      }
608
 
609
      if(opq->param_type[j] & OPTYPE_DST)
610
        opq->tflags[k] |= TFLAG_DST;
611
      else
612
        opq->tflags[k] |= TFLAG_SRC;
613
 
614
      continue;
615
    }
616
 
617
    if(opq->reg_t[k] < 32) {
618
      /* Only ship the temporary out if it has been used as a destination
619
       * register */
620
      ship_t_out(opq, k);
621
    }
622
 
623
    if(opq->param_type[j] & OPTYPE_DST)
624
      opq->tflags[k] = TFLAG_DST;
625
    else
626
      opq->tflags[k] = TFLAG_SRC;
627
 
628
    opq->reg_t[k] = opq->param[j];
629
 
630
    /* Only generate code to move the register into a temporary if it is used as
631
     * a source operand */
632
    if(!(opq->param_type[j] & OPTYPE_DST)) {
633
      gen_op_move_t_gpr[k][opq->reg_t[k]](opq, 0);
634
      opq->tflags[k] |= TFLAG_SOURCED;
635
    }
636
  }
637
 
638
  /* To get the execution log correct for instructions like l.lwz r4,0(r4) the
639
   * effective address needs to be calculated before the instruction is
640
   * simulated */
641
  if(do_stats) {
642
    for(j = 0; j < opq->param_num; j++) {
643
      if(!(opq->param_type[j] & OPTYPE_DIS))
644
        continue;
645
 
646
      if(!opq->param[j + 1])
647
        gen_op_store_insn_ea(opq, 1, opq->param[j]);
648
      else
649
        calc_insn_ea_table[param_t[j + 1]](opq, 1, opq->param[j]);
650
    }
651
  }
652
 
653
  or32_opcodes[opq->insn_index].exec(opq, param_t, delay_insn);
654
 
655
  if(do_stats) {
656
    ship_gprs_out_t(opq);
657
    gen_op_analysis(opq, 1);
658
  }
659
 
660
  /* The call to join_mem_cycles() could be put into the individual operations
661
   * that emulate the load/store instructions, but then it would be added to
662
   * the cycle counter before analysis() is called, which is not how the complex
663
   * execution model does it. */
664
  if((or32_opcodes[opq->insn_index].func_unit == it_load) ||
665
     (or32_opcodes[opq->insn_index].func_unit == it_store))
666
    gen_op_join_mem_cycles(opq, 1);
667
 
668
  /* Delay slot instructions get a special scheduler, thus don't generate it
669
   * here */
670
  if(!delay_insn)
671
    gen_op_do_sched(opq, 1);
672
}
673
 
674
/* Recompiles the page associated with *dyn */
675
void recompile_page(struct dyn_page *dyn)
676
{
677
  unsigned int j;
678
  struct op_queue *opq = cpu_state.opqs;
679
  oraddr_t rec_addr = dyn->or_page;
680
  oraddr_t rec_page = dyn->or_page;
681
  void **loc;
682
 
683
  /* The start of the next page */
684
  rec_page += immu_state->pagesize;
685
 
686
  printf("Recompileing page %"PRIxADDR"\n", rec_addr);
687
  fflush(stdout);
688
 
689
  /* Mark all temporaries as not containing a register */
690
  for(j = 0; j < NUM_T_REGS; j++) {
691
    opq->reg_t[j] = 32; /* Out-of-range registers */
692
    opq->tflags[j] = 0;
693
  }
694
 
695
  dyn->delayr = -verify_memoryarea(rec_addr)->ops.delayr;
696
 
697
  opq->num_ops = 0;
698
  opq->num_ops_param = 0;
699
 
700
  eval_insn_ops(opq, rec_addr);
701
 
702
  /* Insert code to check if the first instruction is exeucted in a delay slot*/
703
  gen_op_check_delay_slot(opq, 1, 0);
704
  recompile_insn(opq, 1);
705
  ship_gprs_out_t(opq);
706
  gen_op_do_sched_delay(opq, 1);
707
  gen_op_clear_delay_insn(opq, 1);
708
  gen_op_do_jump_delay(opq, 1);
709
  gen_op_do_jump(opq, 1);
710
  gen_op_mark_loc(opq, 1);
711
 
712
  for(j = 0; j < NUM_T_REGS; j++)
713
    opq->reg_t[j] = 32; /* Out-of-range registers */
714
 
715
  for(; rec_addr < rec_page; rec_addr += 4, opq = opq->next) {
716
    if(opq->prev) {
717
      opq->num_ops = 0;
718
      opq->num_ops_param = 0;
719
    }
720
    opq->jump_local = -1;
721
    opq->not_jump_loc = -1;
722
 
723
    opq->insn_addr = rec_addr;
724
 
725
    for(j = 0; j < NUM_T_REGS; j++)
726
      opq->tflags[j] = TFLAG_SOURCED;
727
 
728
    /* Check if this location is cross referenced */
729
    if(opq->xref) {
730
      /* If the current address is cross-referenced, the temporaries shall be
731
       * in an undefined state, so we must assume that no registers reside in
732
       * them */
733
      /* Ship out the current set of registers from the temporaries */
734
      if(opq->prev) {
735
        ship_gprs_out_t(opq->prev);
736
        for(j = 0; j < NUM_T_REGS; j++) {
737
          opq->reg_t[j] = 32;
738
          opq->prev->reg_t[j] = 32;
739
        }
740
      }
741
    }
742
 
743
    recompile_insn(opq, 0);
744
 
745
    /* Store the state of the temporaries */
746
    memcpy(opq->next->reg_t, opq->reg_t, sizeof(opq->reg_t));
747
  }
748
 
749
  dyn->dirty = 0;
750
 
751
  /* Ship temporaries out to the corrisponding registers */
752
  ship_gprs_out_t(opq->prev);
753
 
754
  opq->num_ops = 0;
755
  opq->num_ops_param = 0;
756
  opq->not_jump_loc = -1;
757
  opq->jump_local = -1;
758
 
759
  /* Insert code to jump to the next page */
760
  gen_op_do_jump(opq, 1);
761
 
762
  /* Generate the code */
763
  gen_code(cpu_state.opqs, dyn);
764
 
765
  /* Fix up the locations */
766
  for(loc = dyn->locs; loc < &dyn->locs[immu_state->pagesize / 4]; loc++)
767
    *loc += (unsigned int)dyn->host_page;
768
 
769
  cpu_state.opqs->ops_param[0] += (unsigned int)dyn->host_page;
770
 
771
  /* Search for page-local jumps */
772
  opq = cpu_state.opqs;
773
  for(j = 0; j < (immu_state->pagesize / 4); opq = opq->next, j++) {
774
    if(opq->jump_local != -1)
775
      opq->ops_param[opq->jump_local] =
776
                              (unsigned int)dyn->locs[opq->jump_local_loc >> 2];
777
 
778
    if(opq->not_jump_loc != -1)
779
      opq->ops_param[opq->not_jump_loc] = (unsigned int)dyn->locs[j + 1];
780
 
781
    /* Store the state of the temporaries into dyn->ts_bound */
782
    dyn->ts_bound[j] = 0;
783
    if(opq->reg_t[0] < 32)
784
      dyn->ts_bound[j] = opq->reg_t[0];
785
    if(opq->reg_t[1] < 32)
786
      dyn->ts_bound[j] |= opq->reg_t[1] << 5;
787
    if(opq->reg_t[2] < 32)
788
      dyn->ts_bound[j] |= opq->reg_t[2] << 10;
789
 
790
    /* Reset for the next page to be recompiled */
791
    opq->xref = 0;
792
  }
793
 
794
  /* Patch the relocations */
795
  patch_relocs(cpu_state.opqs, dyn->host_page);
796
 
797
  if(do_stats) {
798
    opq = cpu_state.opqs;
799
    for(j = 0; j < (immu_state->pagesize / 4); j++, opq = opq->next) {
800
      dyn->insns[j] = opq->insn;
801
      dyn->insn_indexs[j] = opq->insn_index;
802
    }
803
  }
804
 
805
  /* FIXME: Fix the issue below in a more elegent way */
806
  /* Since eval_insn is called to get the instruction, runtime.sim.mem_cycles is
807
   * updated but the recompiler expectes it to start a 0, so reset it */
808
  runtime.sim.mem_cycles = 0;
809
}
810
 
811
/* Recompiles a delay-slot instruction (opq is the opq of the instruction
812
 * haveing the delay-slot) */
813
static void recompile_delay_insn(struct op_queue *opq)
814
{
815
  struct op_queue delay_opq;
816
  int i;
817
 
818
  /* Setup a fake opq that looks very much like the delay slot instruction */
819
  memcpy(&delay_opq, opq, sizeof(struct op_queue));
820
  /* `Fix' a couple of bits */
821
  for(i = 0; i < NUM_T_REGS; i++)
822
    delay_opq.tflags[i] = TFLAG_SOURCED;
823
  delay_opq.insn_index = opq->next->insn_index;
824
  memcpy(delay_opq.param_type, opq->next->param_type, sizeof(delay_opq.param_type));
825
  memcpy(delay_opq.param, opq->next->param, sizeof(delay_opq.param));
826
  delay_opq.param_num = opq->next->param_num;
827
  delay_opq.insn = opq->next->insn;
828
 
829
  delay_opq.xref = 0;
830
  delay_opq.insn_addr = opq->insn_addr + 4;
831
  delay_opq.prev = opq->prev;
832
  delay_opq.next = NULL;
833
 
834
  /* Generate the delay slot instruction */
835
  recompile_insn(&delay_opq, 1);
836
 
837
  ship_gprs_out_t(&delay_opq);
838
 
839
  opq->num_ops = delay_opq.num_ops;
840
  opq->ops_len = delay_opq.ops_len;
841
  opq->ops = delay_opq.ops;
842
  opq->num_ops_param = delay_opq.num_ops_param;
843
  opq->ops_param_len = delay_opq.ops_param_len;
844
  opq->ops_param = delay_opq.ops_param;
845
 
846
  for(i = 0; i < NUM_T_REGS; i++)
847
    opq->reg_t[i] = 32;
848
}
849
 
850
/* Returns non-zero if the jump is into this page, 0 otherwise */
851
static int find_jump_loc(oraddr_t j_ea, struct op_queue *opq)
852
{
853
  int i;
854
 
855
  /* Mark the jump as non page local if the delay slot instruction is on the
856
   * next page to the jump instruction.  This should not be needed */
857
  if(IADDR_PAGE(j_ea) != IADDR_PAGE(opq->insn_addr))
858
    /* We can't do anything as the j_ea (as passed to find_jump_loc) is a
859
     * VIRTUAL offset and the next physical page may not be the next VIRTUAL
860
     * page */
861
    return 0;
862
 
863
  /* The jump is into the page currently undergoing dynamic recompilation */
864
 
865
  /* If we haven't got to the location of the jump, everything is ok */
866
  if(j_ea > opq->insn_addr) {
867
    /* Find the corissponding opq and mark it as cross referenced */
868
    for(i = (j_ea - opq->insn_addr) / 4; i; i--)
869
      opq = opq->next;
870
    opq->xref = 1;
871
    return 1;
872
  }
873
 
874
  /* Insert temporary -> register code before the jump ea and register ->
875
   * temporary at the x-ref address */
876
  for(i = (opq->insn_addr - j_ea) / 4; i; i--)
877
    opq = opq->prev;
878
 
879
  if(!opq->prev)
880
    /* We're at the begining of a page, no need to do anything */
881
    return 1;
882
 
883
  /* Found location, insert code */
884
 
885
  ship_gprs_out_t(opq->prev);
886
 
887
  for(i = 0; i < NUM_T_REGS; i++) {
888
    if(opq->prev->reg_t[i] < 32)
889
      /* FIXME: Ship temporaries in the begining of the opq that needs it */
890
      gen_op_move_t_gpr[i][opq->prev->reg_t[i]](opq, 0);
891
  }
892
 
893
  opq->xref = 1;
894
 
895
  return 1;
896
}
897
 
898
static void gen_j_imm(struct op_queue *opq, oraddr_t off)
899
{
900
  int jump_local;
901
 
902
  off <<= 2;
903
 
904
  if(IADDR_PAGE(opq->insn_addr) != IADDR_PAGE(opq->insn_addr + 4)) {
905
    gen_op_set_pc_delay_imm(opq, 1, off);
906
    gen_op_do_sched(opq, 1);
907
    return;
908
  }
909
 
910
  jump_local = find_jump_loc(opq->insn_addr + off, opq);
911
 
912
  gen_op_set_delay_insn(opq, 1);
913
  gen_op_do_sched(opq, 1);
914
 
915
  recompile_delay_insn(opq);
916
 
917
  gen_op_add_pc(opq, 1, (orreg_t)off - 8);
918
  gen_op_clear_delay_insn(opq, 1);
919
  gen_op_do_sched_delay(opq, 1);
920
 
921
  if(jump_local) {
922
    gen_op_jmp_imm(opq, 1, 0);
923
    opq->jump_local = opq->num_ops_param - 1;
924
    opq->jump_local_loc = (opq->insn_addr + (orreg_t)off) & immu_state->page_offset_mask;
925
  } else
926
    gen_op_do_jump(opq, 1);
927
}
928
 
929
static const generic_gen_op set_pc_delay_gpr[32] = {
930
 NULL,
931
 gen_op_move_gpr1_pc_delay,
932
 gen_op_move_gpr2_pc_delay,
933
 gen_op_move_gpr3_pc_delay,
934
 gen_op_move_gpr4_pc_delay,
935
 gen_op_move_gpr5_pc_delay,
936
 gen_op_move_gpr6_pc_delay,
937
 gen_op_move_gpr7_pc_delay,
938
 gen_op_move_gpr8_pc_delay,
939
 gen_op_move_gpr9_pc_delay,
940
 gen_op_move_gpr10_pc_delay,
941
 gen_op_move_gpr11_pc_delay,
942
 gen_op_move_gpr12_pc_delay,
943
 gen_op_move_gpr13_pc_delay,
944
 gen_op_move_gpr14_pc_delay,
945
 gen_op_move_gpr15_pc_delay,
946
 gen_op_move_gpr16_pc_delay,
947
 gen_op_move_gpr17_pc_delay,
948
 gen_op_move_gpr18_pc_delay,
949
 gen_op_move_gpr19_pc_delay,
950
 gen_op_move_gpr20_pc_delay,
951
 gen_op_move_gpr21_pc_delay,
952
 gen_op_move_gpr22_pc_delay,
953
 gen_op_move_gpr23_pc_delay,
954
 gen_op_move_gpr24_pc_delay,
955
 gen_op_move_gpr25_pc_delay,
956
 gen_op_move_gpr26_pc_delay,
957
 gen_op_move_gpr27_pc_delay,
958
 gen_op_move_gpr28_pc_delay,
959
 gen_op_move_gpr29_pc_delay,
960
 gen_op_move_gpr30_pc_delay,
961
 gen_op_move_gpr31_pc_delay };
962
 
963
static void gen_j_reg(struct op_queue *opq, unsigned int gpr)
964
{
965
  int i;
966
 
967
  /* Ship the jump-to register out (if it exists).  It requires special
968
   * handleing */
969
  for(i = 0; i < NUM_T_REGS; i++) {
970
    if(opq->reg_t[i] == opq->param[0])
971
      /* Ship temporary out in the last opq that used it */
972
      ship_t_out(opq, i);
973
  }
974
 
975
  if(do_stats)
976
    gen_op_analysis(opq, 1);
977
 
978
  if(!gpr)
979
    gen_op_clear_pc_delay(opq, 1);
980
  else
981
    set_pc_delay_gpr[gpr](opq, 1);
982
 
983
  gen_op_do_sched(opq, 1);
984
 
985
  if(IADDR_PAGE(opq->insn_addr) != IADDR_PAGE(opq->insn_addr + 4))
986
    return;
987
 
988
  recompile_delay_insn(opq);
989
 
990
  gen_op_set_pc_pc_delay(opq, 1);
991
  gen_op_clear_delay_insn(opq, 1);
992
  gen_op_do_sched_delay(opq, 1);
993
 
994
  gen_op_do_jump_delay(opq, 1);
995
  gen_op_do_jump(opq, 1);
996
}
997
 
998
/*------------------------------[ Operation generation for an instruction ]---*/
999
/* FIXME: Flag setting is not done in any instruction */
1000
/* FIXME: Since r0 is not moved into a temporary, check all arguments below! */
1001
 
1002
DEF_1T_OP(generic_gen_op, clear_t, gen_op_clear);
1003
DEF_2T_OP_NEQ(generic_gen_op, move_t_t, gen_op_move);
1004
DEF_1T_OP(imm_gen_op, mov_t_imm, gen_op_imm);
1005
 
1006
DEF_2T_OP(imm_gen_op, l_add_imm_t_table, gen_op_add_imm);
1007
DEF_3T_OP(generic_gen_op, l_add_t_table, gen_op_add);
1008
 
1009
void gen_l_add(struct op_queue *opq, int param_t[3], int delay_slot)
1010
{
1011
  if(!opq->param[0])
1012
    /* Screw this, the operation shall do nothing */
1013
    return;
1014
 
1015
  if(!opq->param[1] && !opq->param[2]) {
1016
    /* Just clear param_t[0] */
1017
    clear_t[param_t[0]](opq, 1);
1018
    return;
1019
  }
1020
 
1021
  if(!opq->param[2]) {
1022
    if(opq->param[0] != opq->param[1])
1023
      /* This just moves a register */
1024
      move_t_t[param_t[0]][param_t[1]](opq, 1);
1025
    return;
1026
  }
1027
 
1028
  if(!opq->param[1]) {
1029
    /* Check if we are moveing an immediate */
1030
    if(param_t[2] == T_NONE) {
1031
      /* Yep, an immediate */
1032
      mov_t_imm[param_t[0]](opq, 1, opq->param[2]);
1033
      return;
1034
    }
1035
    /* Just another move */
1036
    if(opq->param[0] != opq->param[2])
1037
      move_t_t[param_t[0]][param_t[2]](opq, 1);
1038
    return;
1039
  }
1040
 
1041
  /* Ok, This _IS_ an add... */
1042
  if(param_t[2] == T_NONE)
1043
    /* immediate */
1044
    l_add_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
1045
  else
1046
    l_add_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1047
}
1048
 
1049
DEF_3T_OP(generic_gen_op, l_addc_t_table, gen_op_addc);
1050
 
1051
void gen_l_addc(struct op_queue *opq, int param_t[3], int delay_slot)
1052
{
1053
  if(!opq->param[0])
1054
    /* Screw this, the operation shall do nothing */
1055
    return;
1056
 
1057
  /* FIXME: More optimisations !! (...and immediate...) */
1058
  l_addc_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1059
}
1060
 
1061
DEF_2T_OP(imm_gen_op, l_and_imm_t_table, gen_op_and_imm);
1062
DEF_3T_OP_NEQ(generic_gen_op, l_and_t_table, gen_op_and);
1063
 
1064
void gen_l_and(struct op_queue *opq, int param_t[3], int delay_slot)
1065
{
1066
  if(!opq->param[0])
1067
    /* Screw this, the operation shall do nothing */
1068
    return;
1069
 
1070
  if(!opq->param[1] || !opq->param[2]) {
1071
    /* Just clear param_t[0] */
1072
    clear_t[param_t[0]](opq, 1);
1073
    return;
1074
  }
1075
 
1076
  if((opq->param[0] == opq->param[1] == opq->param[2]) && (param_t[2] != T_NONE))
1077
    return;
1078
 
1079
  if(param_t[2] == T_NONE)
1080
    l_and_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
1081
  else
1082
    l_and_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1083
}
1084
 
1085
void gen_l_bf(struct op_queue *opq, int param_t[3], int delay_slot)
1086
{
1087
  if(do_stats)
1088
    /* All gprs are current since this insn doesn't touch any reg */
1089
    gen_op_analysis(opq, 1);
1090
 
1091
  /* The temporaries are expected to be shiped out after the execution of the
1092
   * branch instruction wether it branched or not */
1093
  ship_gprs_out_t(opq->prev);
1094
 
1095
  if(IADDR_PAGE(opq->insn_addr) != IADDR_PAGE(opq->insn_addr + 4)) {
1096
    gen_op_check_flag_delay(opq, 1, opq->param[0] << 2);
1097
    gen_op_do_sched(opq, 1);
1098
    opq->not_jump_loc = -1;
1099
    return;
1100
  }
1101
 
1102
  gen_op_check_flag(opq, 1, 0);
1103
  opq->not_jump_loc = opq->num_ops_param - 1;
1104
 
1105
  gen_j_imm(opq, opq->param[0]);
1106
}
1107
 
1108
void gen_l_bnf(struct op_queue *opq, int param_t[3], int delay_slot)
1109
{
1110
  if(do_stats)
1111
    /* All gprs are current since this insn doesn't touch any reg */
1112
    gen_op_analysis(opq, 1);
1113
 
1114
  /* The temporaries are expected to be shiped out after the execution of the
1115
   * branch instruction wether it branched or not */
1116
  ship_gprs_out_t(opq->prev);
1117
 
1118
  if(IADDR_PAGE(opq->insn_addr) != IADDR_PAGE(opq->insn_addr + 4)) {
1119
    gen_op_check_not_flag_delay(opq, 1, opq->param[0] << 2);
1120
    gen_op_do_sched(opq, 1);
1121
    opq->not_jump_loc = -1;
1122
    return;
1123
  }
1124
 
1125
  gen_op_check_not_flag(opq, 1, 0);
1126
  opq->not_jump_loc = opq->num_ops_param - 1;
1127
 
1128
  gen_j_imm(opq, opq->param[0]);
1129
}
1130
 
1131
DEF_3T_OP_NEQ(generic_gen_op, l_cmov_t_table, gen_op_cmov);
1132
 
1133
/* FIXME: Check if either opperand 1 or 2 is r0 */
1134
void gen_l_cmov(struct op_queue *opq, int param_t[3], int delay_slot)
1135
{
1136
  if(!opq->param[0])
1137
    return;
1138
 
1139
  if(!opq->param[1] && !opq->param[2]) {
1140
    clear_t[param_t[0]](opq, 1);
1141
    return;
1142
  }
1143
 
1144
  if((opq->param[1] == opq->param[2]) && (opq->param[0] == opq->param[1]))
1145
    return;
1146
 
1147
  if(opq->param[1] == opq->param[2]) {
1148
    move_t_t[param_t[0]][param_t[1]](opq, 1);
1149
    return;
1150
  }
1151
 
1152
  l_cmov_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1153
}
1154
 
1155
void gen_l_cust1(struct op_queue *opq, int param_t[3], int delay_slot)
1156
{
1157
}
1158
 
1159
void gen_l_cust2(struct op_queue *opq, int param_t[3], int delay_slot)
1160
{
1161
}
1162
 
1163
void gen_l_cust3(struct op_queue *opq, int param_t[3], int delay_slot)
1164
{
1165
}
1166
 
1167
void gen_l_cust4(struct op_queue *opq, int param_t[3], int delay_slot)
1168
{
1169
}
1170
 
1171
void gen_l_cust5(struct op_queue *opq, int param_t[3], int delay_slot)
1172
{
1173
}
1174
 
1175
void gen_l_cust6(struct op_queue *opq, int param_t[3], int delay_slot)
1176
{
1177
}
1178
 
1179
void gen_l_cust7(struct op_queue *opq, int param_t[3], int delay_slot)
1180
{
1181
}
1182
 
1183
void gen_l_cust8(struct op_queue *opq, int param_t[3], int delay_slot)
1184
{
1185
}
1186
 
1187
/* FIXME: All registers need to be stored before the div instructions as they
1188
 * have the potenticial to cause an exception */
1189
 
1190
DEF_1T_OP(generic_gen_op, check_null_excpt, gen_op_check_null_except);
1191
DEF_1T_OP(generic_gen_op, check_null_excpt_delay, gen_op_check_null_except_delay);
1192
DEF_3T_OP(generic_gen_op, l_div_t_table, gen_op_div);
1193
 
1194
void gen_l_div(struct op_queue *opq, int param_t[3], int delay_slot)
1195
{
1196
  if(!opq->param[2]) {
1197
    /* There is no option.  This _will_ cause an illeagal exception */
1198
    if(!delay_slot) {
1199
      gen_op_illegal(opq, 1);
1200
      gen_op_do_jump(opq, 1);
1201
    } else {
1202
      gen_op_illegal(opq, 1);
1203
      gen_op_do_jump(opq, 1);
1204
    }
1205
    return;
1206
  }
1207
 
1208
  if(!delay_slot)
1209
    check_null_excpt[param_t[2]](opq, 1);
1210
  else
1211
    check_null_excpt_delay[param_t[2]](opq, 1);
1212
 
1213
  if(!opq->param[0])
1214
    return;
1215
 
1216
  if(!opq->param[1]) {
1217
    /* Clear param_t[0] */
1218
    clear_t[param_t[0]](opq, 1);
1219
    return;
1220
  }
1221
 
1222
  l_div_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1223
}
1224
 
1225
DEF_3T_OP(generic_gen_op, l_divu_t_table, gen_op_divu);
1226
 
1227
void gen_l_divu(struct op_queue *opq, int param_t[3], int delay_slot)
1228
{
1229
  if(!opq->param[2]) {
1230
    /* There is no option.  This _will_ cause an illeagal exception */
1231
    if(!delay_slot) {
1232
      gen_op_illegal(opq, 1);
1233
      gen_op_do_jump(opq, 1);
1234
    } else {
1235
      gen_op_illegal(opq, 1);
1236
      gen_op_do_jump(opq, 1);
1237
    }
1238
    return;
1239
  }
1240
 
1241
  if(!delay_slot)
1242
    check_null_excpt[param_t[2]](opq, 1);
1243
  else
1244
    check_null_excpt_delay[param_t[2]](opq, 1);
1245
 
1246
  if(!opq->param[0])
1247
    return;
1248
 
1249
  if(!opq->param[1]) {
1250
    /* Clear param_t[0] */
1251
    clear_t[param_t[0]](opq, 1);
1252
    return;
1253
  }
1254
 
1255
  l_divu_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1256
}
1257
 
1258
DEF_2T_OP(generic_gen_op, l_extbs_t_table, gen_op_extbs);
1259
 
1260
void gen_l_extbs(struct op_queue *opq, int param_t[3], int delay_slot)
1261
{
1262
  if(!opq->param[0])
1263
    return;
1264
 
1265
  if(!opq->param[1]) {
1266
    clear_t[param_t[0]](opq, 1);
1267
    return;
1268
  }
1269
 
1270
  l_extbs_t_table[param_t[0]][param_t[1]](opq, 1);
1271
}
1272
 
1273
DEF_2T_OP(generic_gen_op, l_extbz_t_table, gen_op_extbz);
1274
 
1275
void gen_l_extbz(struct op_queue *opq, int param_t[3], int delay_slot)
1276
{
1277
  if(!opq->param[0])
1278
    return;
1279
 
1280
  if(!opq->param[1]) {
1281
    clear_t[param_t[0]](opq, 1);
1282
    return;
1283
  }
1284
 
1285
  l_extbz_t_table[param_t[0]][param_t[1]](opq, 1);
1286
}
1287
 
1288
DEF_2T_OP(generic_gen_op, l_exths_t_table, gen_op_exths);
1289
 
1290
void gen_l_exths(struct op_queue *opq, int param_t[3], int delay_slot)
1291
{
1292
  if(!opq->param[0])
1293
    return;
1294
 
1295
  if(!opq->param[1]) {
1296
    clear_t[param_t[0]](opq, 1);
1297
    return;
1298
  }
1299
 
1300
  l_exths_t_table[param_t[0]][param_t[1]](opq, 1);
1301
}
1302
 
1303
DEF_2T_OP(generic_gen_op, l_exthz_t_table, gen_op_exthz);
1304
 
1305
void gen_l_exthz(struct op_queue *opq, int param_t[3], int delay_slot)
1306
{
1307
  if(!opq->param[0])
1308
    return;
1309
 
1310
  if(!opq->param[1]) {
1311
    clear_t[param_t[0]](opq, 1);
1312
    return;
1313
  }
1314
 
1315
  l_exthz_t_table[param_t[0]][param_t[1]](opq, 1);
1316
}
1317
 
1318
void gen_l_extws(struct op_queue *opq, int param_t[3], int delay_slot)
1319
{
1320
  if(!opq->param[0])
1321
    return;
1322
 
1323
  if(!opq->param[1]) {
1324
    clear_t[param_t[0]](opq, 1);
1325
    return;
1326
  }
1327
 
1328
  if(opq->param[0] == opq->param[1])
1329
    return;
1330
 
1331
  /* In the 32-bit architechture this instruction reduces to a move */
1332
  move_t_t[param_t[0]][param_t[1]](opq, 1);
1333
}
1334
 
1335
void gen_l_extwz(struct op_queue *opq, int param_t[3], int delay_slot)
1336
{
1337
  if(!opq->param[0])
1338
    return;
1339
 
1340
  if(!opq->param[1]) {
1341
    clear_t[param_t[0]](opq, 1);
1342
    return;
1343
  }
1344
 
1345
  if(opq->param[0] == opq->param[1])
1346
    return;
1347
 
1348
  /* In the 32-bit architechture this instruction reduces to a move */
1349
  move_t_t[param_t[0]][param_t[1]](opq, 1);
1350
}
1351
 
1352
DEF_2T_OP(generic_gen_op, l_ff1_t_table, gen_op_ff1);
1353
 
1354
void gen_l_ff1(struct op_queue *opq, int param_t[3], int delay_slot)
1355
{
1356
  if(!opq->param[0])
1357
    return;
1358
 
1359
  if(!opq->param[1]) {
1360
    clear_t[param_t[0]](opq, 1);
1361
    return;
1362
  }
1363
 
1364
  l_ff1_t_table[param_t[0]][param_t[1]](opq, 1);
1365
}
1366
 
1367
void gen_l_j(struct op_queue *opq, int param_t[3], int delay_slot)
1368
{
1369
  if(do_stats)
1370
    /* All gprs are current since this insn doesn't touch any reg */
1371
    gen_op_analysis(opq, 1);
1372
 
1373
  gen_j_imm(opq, opq->param[0]);
1374
}
1375
 
1376
void gen_l_jal(struct op_queue *opq, int param_t[3], int delay_slot)
1377
{
1378
  int i;
1379
 
1380
  /* In the case of a l.jal instruction, make sure that LINK_REGNO is not in
1381
   * a temporary.  The problem is that the l.jal(r) instruction stores the
1382
   * `return address' in LINK_REGNO.  The temporaries are shiped out only
1383
   * after the delay slot instruction has executed and so it overwrittes the
1384
   * `return address'. */
1385
  for(i = 0; i < NUM_T_REGS; i++) {
1386
    if(opq->reg_t[i] == LINK_REGNO) {
1387
      /* Don't bother storeing the register, it is going to get clobered in this
1388
       * instruction anyway */
1389
      opq->reg_t[i] = 32;
1390
      break;
1391
    }
1392
  }
1393
 
1394
  /* Store the return address */
1395
  gen_op_store_link_addr_gpr(opq, 1);
1396
 
1397
  if(do_stats)
1398
    /* All gprs are current since this insn doesn't touch any reg */
1399
    gen_op_analysis(opq, 1);
1400
 
1401
  gen_j_imm(opq, opq->param[0]);
1402
}
1403
 
1404
void gen_l_jr(struct op_queue *opq, int param_t[3], int delay_slot)
1405
{
1406
  gen_j_reg(opq, opq->param[0]);
1407
}
1408
 
1409
void gen_l_jalr(struct op_queue *opq, int param_t[3], int delay_slot)
1410
{
1411
  int i;
1412
 
1413
  /* In the case of a l.jal instruction, make sure that LINK_REGNO is not in
1414
   * a temporary.  The problem is that the l.jal(r) instruction stores the
1415
   * `return address' in LINK_REGNO.  The temporaries are shiped out only
1416
   * after the delay slot instruction has executed and so it overwrittes the
1417
   * `return address'. */
1418
  for(i = 0; i < NUM_T_REGS; i++) {
1419
    if(opq->reg_t[i] == LINK_REGNO) {
1420
      /* Don't bother storeing the register, it is going to get clobered in this
1421
       * instruction anyway */
1422
      opq->reg_t[i] = 32;
1423
      break;
1424
    }
1425
  }
1426
 
1427
  /* Store the return address */
1428
  gen_op_store_link_addr_gpr(opq, 1);
1429
 
1430
  gen_j_reg(opq, opq->param[0]);
1431
}
1432
 
1433
/* FIXME: Optimise all load instruction when the disposition == 0 */
1434
 
1435
DEF_1T_OP(imm_gen_op, l_lbs_imm_t_table, gen_op_lbs_imm);
1436
DEF_2T_OP(imm_gen_op, l_lbs_t_table, gen_op_lbs);
1437
 
1438
void gen_l_lbs(struct op_queue *opq, int param_t[3], int delay_slot)
1439
{
1440
  if(!opq->param[0]) {
1441
    /* FIXME: This will work, but the statistics need to be updated... */
1442
    return;
1443
  }
1444
 
1445
  /* Just in case an exception happens */
1446
  ship_gprs_out_t(opq->prev);
1447
 
1448
  if(!opq->param[2]) {
1449
    /* Load the data from the immediate */
1450
    l_lbs_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1451
    return;
1452
  }
1453
 
1454
  l_lbs_t_table[param_t[0]][param_t[2]](opq, 1, opq->param[1]);
1455
}
1456
 
1457
DEF_1T_OP(imm_gen_op, l_lbz_imm_t_table, gen_op_lbz_imm);
1458
DEF_2T_OP(imm_gen_op, l_lbz_t_table, gen_op_lbz);
1459
 
1460
void gen_l_lbz(struct op_queue *opq, int param_t[3], int delay_slot)
1461
{
1462
  if(!opq->param[0]) {
1463
    /* FIXME: This will work, but the statistics need to be updated... */
1464
    return;
1465
  }
1466
 
1467
  /* Just in case an exception happens */
1468
  ship_gprs_out_t(opq->prev);
1469
 
1470
  if(!opq->param[2]) {
1471
    /* Load the data from the immediate */
1472
    l_lbz_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1473
    return;
1474
  }
1475
 
1476
  l_lbz_t_table[param_t[0]][param_t[2]](opq, 1, opq->param[1]);
1477
}
1478
 
1479
DEF_1T_OP(imm_gen_op, l_lhs_imm_t_table, gen_op_lhs_imm);
1480
DEF_2T_OP(imm_gen_op, l_lhs_t_table, gen_op_lhs);
1481
 
1482
void gen_l_lhs(struct op_queue *opq, int param_t[3], int delay_slot)
1483
{
1484
  if(!opq->param[0]) {
1485
    /* FIXME: This will work, but the statistics need to be updated... */
1486
    return;
1487
  }
1488
 
1489
  /* Just in case an exception happens */
1490
  ship_gprs_out_t(opq->prev);
1491
 
1492
  if(!opq->param[2]) {
1493
    /* Load the data from the immediate */
1494
    l_lhs_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1495
    return;
1496
  }
1497
 
1498
  l_lhs_t_table[param_t[0]][param_t[2]](opq, 1, opq->param[1]);
1499
}
1500
 
1501
DEF_1T_OP(imm_gen_op, l_lhz_imm_t_table, gen_op_lhz_imm);
1502
DEF_2T_OP(imm_gen_op, l_lhz_t_table, gen_op_lhz);
1503
 
1504
void gen_l_lhz(struct op_queue *opq, int param_t[3], int delay_slot)
1505
{
1506
  if(!opq->param[0]) {
1507
    /* FIXME: This will work, but the statistics need to be updated... */
1508
    return;
1509
  }
1510
 
1511
  /* Just in case an exception happens */
1512
  ship_gprs_out_t(opq->prev);
1513
 
1514
  if(!opq->param[2]) {
1515
    /* Load the data from the immediate */
1516
    l_lhz_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1517
    return;
1518
  }
1519
 
1520
  l_lhz_t_table[param_t[0]][param_t[2]](opq, 1, opq->param[1]);
1521
}
1522
 
1523
DEF_1T_OP(imm_gen_op, l_lws_imm_t_table, gen_op_lws_imm);
1524
DEF_2T_OP(imm_gen_op, l_lws_t_table, gen_op_lws);
1525
 
1526
void gen_l_lws(struct op_queue *opq, int param_t[3], int delay_slot)
1527
{
1528
  if(!opq->param[0]) {
1529
    /* FIXME: This will work, but the statistics need to be updated... */
1530
    return;
1531
  }
1532
 
1533
  /* Just in case an exception happens */
1534
  ship_gprs_out_t(opq->prev);
1535
 
1536
  if(!opq->param[2]) {
1537
    /* Load the data from the immediate */
1538
    l_lws_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1539
    return;
1540
  }
1541
 
1542
  l_lws_t_table[param_t[0]][param_t[2]](opq, 1, opq->param[1]);
1543
}
1544
 
1545
DEF_1T_OP(imm_gen_op, l_lwz_imm_t_table, gen_op_lwz_imm);
1546
DEF_2T_OP(imm_gen_op, l_lwz_t_table, gen_op_lwz);
1547
 
1548
void gen_l_lwz(struct op_queue *opq, int param_t[3], int delay_slot)
1549
{
1550
  if(!opq->param[0]) {
1551
    /* FIXME: This will work, but the statistics need to be updated... */
1552
    return;
1553
  }
1554
 
1555
  /* Just in case an exception happens */
1556
  ship_gprs_out_t(opq->prev);
1557
 
1558
  if(!opq->param[2]) {
1559
    /* Load the data from the immediate */
1560
    l_lwz_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1561
    return;
1562
  }
1563
 
1564
  l_lwz_t_table[param_t[0]][param_t[2]](opq, 1, opq->param[1]);
1565
}
1566
 
1567
DEF_1T_OP(imm_gen_op, l_mac_imm_t_table, gen_op_mac_imm);
1568
DEF_2T_OP(generic_gen_op, l_mac_t_table, gen_op_mac);
1569
 
1570
void gen_l_mac(struct op_queue *opq, int param_t[3], int delay_slot)
1571
{
1572
  if(!opq->param[0] || !opq->param[1])
1573
    return;
1574
 
1575
  if(param_t[1] == T_NONE)
1576
    l_mac_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1577
  else
1578
    l_mac_t_table[param_t[0]][param_t[1]](opq, 1);
1579
}
1580
 
1581
DEF_1T_OP(generic_gen_op, l_macrc_t_table, gen_op_macrc);
1582
 
1583
void gen_l_macrc(struct op_queue *opq, int param_t[3], int delay_slot)
1584
{
1585
  if(!opq->param[0]) {
1586
    gen_op_macc(opq, 1);
1587
    return;
1588
  }
1589
 
1590
  l_macrc_t_table[param_t[0]](opq, 1);
1591
}
1592
 
1593
DEF_1T_OP(imm_gen_op, l_mfspr_imm_t_table, gen_op_mfspr_imm);
1594
DEF_2T_OP(imm_gen_op, l_mfspr_t_table, gen_op_mfspr);
1595
 
1596
void gen_l_mfspr(struct op_queue *opq, int param_t[3], int delay_slot)
1597
{
1598
  if(!opq->param[0])
1599
    return;
1600
 
1601
  if(!opq->param[1]) {
1602
    l_mfspr_imm_t_table[param_t[0]](opq, 1, opq->param[2]);
1603
    return;
1604
  }
1605
 
1606
  l_mfspr_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
1607
}
1608
 
1609
void gen_l_movhi(struct op_queue *opq, int param_t[3], int delay_slot)
1610
{
1611
  if(!opq->param[0])
1612
    return;
1613
 
1614
  if(!opq->param[1]) {
1615
    clear_t[param_t[0]](opq, 1);
1616
    return;
1617
  }
1618
 
1619
  mov_t_imm[param_t[0]](opq, 1, opq->param[1] << 16);
1620
}
1621
 
1622
DEF_2T_OP(generic_gen_op, l_msb_t_table, gen_op_msb);
1623
 
1624
void gen_l_msb(struct op_queue *opq, int param_t[3], int delay_slot)
1625
{
1626
  if(!opq->param[0] || !opq->param[1])
1627
    return;
1628
 
1629
  l_msb_t_table[param_t[0]][param_t[1]](opq, 1);
1630
}
1631
 
1632
DEF_1T_OP(imm_gen_op, l_mtspr_clear_t_table, gen_op_mtspr_clear);
1633
DEF_1T_OP(imm_gen_op, l_mtspr_imm_t_table, gen_op_mtspr_imm);
1634
DEF_2T_OP(imm_gen_op, l_mtspr_t_table, gen_op_mtspr);
1635
 
1636
void gen_l_mtspr(struct op_queue *opq, int param_t[3], int delay_slot)
1637
{
1638
  /* Just in case an exception happens */
1639
  ship_gprs_out_t(opq->prev);
1640
 
1641
  if(!opq->param[0]) {
1642
    if(!opq->param[1]) {
1643
      /* Clear the immediate SPR */
1644
      gen_op_mtspr_imm_clear(opq, 1, opq->param[2]);
1645
      return;
1646
    }
1647
    l_mtspr_imm_t_table[param_t[1]](opq, 1, opq->param[2]);
1648
    return;
1649
  }
1650
 
1651
  if(!opq->param[1]) {
1652
    l_mtspr_clear_t_table[param_t[0]](opq, 1, opq->param[2]);
1653
    return;
1654
  }
1655
 
1656
  l_mtspr_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
1657
}
1658
 
1659
DEF_2T_OP(imm_gen_op, l_mul_imm_t_table, gen_op_mul_imm);
1660
DEF_3T_OP(generic_gen_op, l_mul_t_table, gen_op_mul);
1661
 
1662
void gen_l_mul(struct op_queue *opq, int param_t[3], int delay_slot)
1663
{
1664
  if(!opq->param[0])
1665
    return;
1666
 
1667
  if(!opq->param[1] || !opq->param[2]) {
1668
    clear_t[param_t[0]](opq, 1);
1669
    return;
1670
  }
1671
 
1672
  if(param_t[2] == T_NONE)
1673
    l_mul_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
1674
  else
1675
    l_mul_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1676
}
1677
 
1678
DEF_3T_OP(generic_gen_op, l_mulu_t_table, gen_op_mulu);
1679
 
1680
void gen_l_mulu(struct op_queue *opq, int param_t[3], int delay_slot)
1681
{
1682
  if(!opq->param[0])
1683
    return;
1684
 
1685
  if(!opq->param[1] || !opq->param[2]) {
1686
    clear_t[param_t[0]](opq, 1);
1687
    return;
1688
  }
1689
 
1690
  l_mulu_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1691
}
1692
 
1693
void gen_l_nop(struct op_queue *opq, int param_t[3], int delay_slot)
1694
{
1695
  /* Do parameter switch now */
1696
  switch(opq->param[0]) {
1697
  case NOP_NOP:
1698
    break;
1699
  case NOP_EXIT:
1700
    ship_gprs_out_t(opq->prev);
1701
    gen_op_nop_exit(opq, 1);
1702
    break;
1703
  case NOP_CNT_RESET:
1704
    gen_op_nop_reset(opq, 1);
1705
    break;
1706
  case NOP_PRINTF:
1707
    ship_gprs_out_t(opq->prev);
1708
    gen_op_nop_printf(opq, 1);
1709
    break;
1710
  case NOP_REPORT:
1711
    ship_gprs_out_t(opq->prev);
1712
    gen_op_nop_report(opq, 1);
1713
    break;
1714
  default:
1715
    if((opq->param[0] >= NOP_REPORT_FIRST) && (opq->param[0] <= NOP_REPORT_LAST)) {
1716
      ship_gprs_out_t(opq->prev);
1717
      gen_op_nop_report_imm(opq, 1, opq->param[0] - NOP_REPORT_FIRST);
1718
    }
1719
    break;
1720
  }
1721
}
1722
 
1723
DEF_2T_OP(imm_gen_op, l_or_imm_t_table, gen_op_or_imm);
1724
DEF_3T_OP_NEQ(generic_gen_op, l_or_t_table, gen_op_or);
1725
 
1726
void gen_l_or(struct op_queue *opq, int param_t[3], int delay_slot)
1727
{
1728
  if(!opq->param[0])
1729
    return;
1730
 
1731
  if((opq->param[0] == opq->param[1] == opq->param[2]) && (param_t[2] != T_NONE))
1732
    return;
1733
 
1734
  if(!opq->param[1] && !opq->param[2]) {
1735
    clear_t[param_t[0]](opq, 1);
1736
    return;
1737
  }
1738
 
1739
  if(!opq->param[2]) {
1740
    if((param_t[2] == T_NONE) && (opq->param[0] == opq->param[1]))
1741
      return;
1742
    move_t_t[param_t[0]][param_t[1]](opq, 1);
1743
    return;
1744
  }
1745
 
1746
  if(!opq->param[1]) {
1747
    /* Check if we are moveing an immediate */
1748
    if(param_t[2] == T_NONE) {
1749
      /* Yep, an immediate */
1750
      mov_t_imm[param_t[0]](opq, 1, opq->param[2]);
1751
      return;
1752
    }
1753
    /* Just another move */
1754
    move_t_t[param_t[0]][param_t[2]](opq, 1);
1755
    return;
1756
  }
1757
 
1758
  if(param_t[2] == T_NONE)
1759
    l_or_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
1760
  else
1761
    l_or_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1762
}
1763
 
1764
void gen_l_rfe(struct op_queue *opq, int param_t[3], int delay_slot)
1765
{
1766
  if(do_stats)
1767
    /* All gprs are current since this insn doesn't touch any reg */
1768
    gen_op_analysis(opq, 1);
1769
 
1770
  gen_op_prep_rfe(opq, 1);
1771
  /* FIXME: rename op_do_sched_delay */
1772
  gen_op_do_sched_delay(opq, 1);
1773
  gen_op_do_jump(opq, 1);
1774
}
1775
 
1776
/* FIXME: All store instructions should be optimised when the disposition = 0 */
1777
 
1778
DEF_1T_OP(imm_gen_op, l_sb_clear_table, gen_op_sb_clear);
1779
DEF_1T_OP(imm_gen_op, l_sb_imm_t_table, gen_op_sb_imm);
1780
DEF_2T_OP(imm_gen_op, l_sb_t_table, gen_op_sb);
1781
 
1782
void gen_l_sb(struct op_queue *opq, int param_t[3], int delay_slot)
1783
{
1784
  /* Just in case an exception happens */
1785
  ship_gprs_out_t(opq->prev);
1786
 
1787
  if(!opq->param[2]) {
1788
    if(!opq->param[1]) {
1789
      gen_op_sb_clear_imm(opq, 1, opq->param[0]);
1790
      return;
1791
    }
1792
    l_sb_clear_table[param_t[1]](opq, 1, opq->param[0]);
1793
    return;
1794
  }
1795
 
1796
  if(!opq->param[1]) {
1797
    /* Store the data to the immediate */
1798
    l_sb_imm_t_table[param_t[2]](opq, 1, opq->param[0]);
1799
    return;
1800
  }
1801
 
1802
  l_sb_t_table[param_t[1]][param_t[2]](opq, 1, opq->param[0]);
1803
}
1804
 
1805
DEF_1T_OP(imm_gen_op, l_sh_clear_table, gen_op_sh_clear);
1806
DEF_1T_OP(imm_gen_op, l_sh_imm_t_table, gen_op_sh_imm);
1807
DEF_2T_OP(imm_gen_op, l_sh_t_table, gen_op_sh);
1808
 
1809
void gen_l_sh(struct op_queue *opq, int param_t[3], int delay_slot)
1810
{
1811
  /* Just in case an exception happens */
1812
  ship_gprs_out_t(opq->prev);
1813
 
1814
  if(!opq->param[2]) {
1815
    if(!opq->param[1]) {
1816
      gen_op_sh_clear_imm(opq, 1, opq->param[0]);
1817
      return;
1818
    }
1819
    l_sh_clear_table[param_t[1]](opq, 1, opq->param[0]);
1820
    return;
1821
  }
1822
 
1823
  if(!opq->param[1]) {
1824
    /* Store the data to the immediate */
1825
    l_sh_imm_t_table[param_t[2]](opq, 1, opq->param[0]);
1826
    return;
1827
  }
1828
 
1829
  l_sh_t_table[param_t[1]][param_t[2]](opq, 1, opq->param[0]);
1830
}
1831
 
1832
DEF_1T_OP(imm_gen_op, l_sw_clear_table, gen_op_sw_clear);
1833
DEF_1T_OP(imm_gen_op, l_sw_imm_t_table, gen_op_sw_imm);
1834
DEF_2T_OP(imm_gen_op, l_sw_t_table, gen_op_sw);
1835
 
1836
void gen_l_sw(struct op_queue *opq, int param_t[3], int delay_slot)
1837
{
1838
  /* Just in case an exception happens */
1839
  ship_gprs_out_t(opq->prev);
1840
 
1841
  if(!opq->param[2]) {
1842
    if(!opq->param[1]) {
1843
      gen_op_sw_clear_imm(opq, 1, opq->param[0]);
1844
      return;
1845
    }
1846
    l_sw_clear_table[param_t[1]](opq, 1, opq->param[0]);
1847
    return;
1848
  }
1849
 
1850
  if(!opq->param[1]) {
1851
    /* Store the data to the immediate */
1852
    l_sw_imm_t_table[param_t[2]](opq, 1, opq->param[0]);
1853
    return;
1854
  }
1855
 
1856
  l_sw_t_table[param_t[1]][param_t[2]](opq, 1, opq->param[0]);
1857
}
1858
 
1859
DEF_1T_OP(generic_gen_op, l_sfeq_null_t_table, gen_op_sfeq_null);
1860
DEF_1T_OP(imm_gen_op, l_sfeq_imm_t_table, gen_op_sfeq_imm);
1861
DEF_2T_OP(generic_gen_op, l_sfeq_t_table, gen_op_sfeq);
1862
 
1863
void gen_l_sfeq(struct op_queue *opq, int param_t[3], int delay_slot)
1864
{
1865
  if(!opq->param[0] && !opq->param[1]) {
1866
    gen_op_set_flag(opq, 1);
1867
    return;
1868
  }
1869
 
1870
  if(!opq->param[0]) {
1871
    if(param_t[1] == T_NONE) {
1872
      if(!opq->param[1])
1873
        gen_op_set_flag(opq, 1);
1874
      else
1875
        gen_op_clear_flag(opq, 1);
1876
    } else
1877
      l_sfeq_null_t_table[param_t[1]](opq, 1);
1878
    return;
1879
  }
1880
 
1881
  if(!opq->param[1]) {
1882
    l_sfeq_null_t_table[param_t[0]](opq, 1);
1883
    return;
1884
  }
1885
 
1886
  if(param_t[1] == T_NONE)
1887
    l_sfeq_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1888
  else
1889
    l_sfeq_t_table[param_t[0]][param_t[1]](opq, 1);
1890
}
1891
 
1892
DEF_1T_OP(generic_gen_op, l_sfges_null_t_table, gen_op_sfges_null);
1893
DEF_1T_OP(generic_gen_op, l_sfles_null_t_table, gen_op_sfles_null);
1894
DEF_1T_OP(imm_gen_op, l_sfges_imm_t_table, gen_op_sfges_imm);
1895
DEF_2T_OP(generic_gen_op, l_sfges_t_table, gen_op_sfges);
1896
 
1897
void gen_l_sfges(struct op_queue *opq, int param_t[3], int delay_slot)
1898
{
1899
  if(!opq->param[0] && !opq->param[1]) {
1900
    gen_op_set_flag(opq, 1);
1901
    return;
1902
  }
1903
 
1904
  if(!opq->param[0]) {
1905
    /* sfles IS correct */
1906
    if(param_t[1] == T_NONE) {
1907
      if(0 >= (orreg_t)opq->param[1])
1908
        gen_op_set_flag(opq, 1);
1909
      else
1910
        gen_op_clear_flag(opq, 1);
1911
    } else
1912
      l_sfles_null_t_table[param_t[1]](opq, 1);
1913
    return;
1914
  }
1915
 
1916
  if(!opq->param[1]) {
1917
    l_sfges_null_t_table[param_t[0]](opq, 1);
1918
    return;
1919
  }
1920
 
1921
  if(param_t[1] == T_NONE)
1922
    l_sfges_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1923
  else
1924
    l_sfges_t_table[param_t[0]][param_t[1]](opq, 1);
1925
}
1926
 
1927
DEF_1T_OP(generic_gen_op, l_sfgeu_null_t_table, gen_op_sfgeu_null);
1928
DEF_1T_OP(generic_gen_op, l_sfleu_null_t_table, gen_op_sfleu_null);
1929
DEF_1T_OP(imm_gen_op, l_sfgeu_imm_t_table, gen_op_sfgeu_imm);
1930
DEF_2T_OP(generic_gen_op, l_sfgeu_t_table, gen_op_sfgeu);
1931
 
1932
void gen_l_sfgeu(struct op_queue *opq, int param_t[3], int delay_slot)
1933
{
1934
  if(!opq->param[0] && !opq->param[1]) {
1935
    gen_op_set_flag(opq, 1);
1936
    return;
1937
  }
1938
 
1939
  if(!opq->param[0]) {
1940
    /* sfleu IS correct */
1941
    if(param_t[1] == T_NONE) {
1942
      if(0 >= opq->param[1])
1943
        gen_op_set_flag(opq, 1);
1944
      else
1945
        gen_op_clear_flag(opq, 1);
1946
    } else
1947
      l_sfleu_null_t_table[param_t[1]](opq, 1);
1948
    return;
1949
  }
1950
 
1951
  if(!opq->param[1]) {
1952
    l_sfgeu_null_t_table[param_t[0]](opq, 1);
1953
    return;
1954
  }
1955
  if(param_t[1] == T_NONE)
1956
    l_sfgeu_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1957
  else
1958
    l_sfgeu_t_table[param_t[0]][param_t[1]](opq, 1);
1959
}
1960
 
1961
DEF_1T_OP(generic_gen_op, l_sfgts_null_t_table, gen_op_sfgts_null);
1962
DEF_1T_OP(generic_gen_op, l_sflts_null_t_table, gen_op_sflts_null);
1963
DEF_1T_OP(imm_gen_op, l_sfgts_imm_t_table, gen_op_sfgts_imm);
1964
DEF_2T_OP(generic_gen_op, l_sfgts_t_table, gen_op_sfgts);
1965
 
1966
void gen_l_sfgts(struct op_queue *opq, int param_t[3], int delay_slot)
1967
{
1968
  if(!opq->param[0] && !opq->param[1]) {
1969
    gen_op_clear_flag(opq, 1);
1970
    return;
1971
  }
1972
 
1973
  if(!opq->param[0]) {
1974
    /* sflts IS correct */
1975
    if(param_t[1] == T_NONE) {
1976
      if(0 > (orreg_t)opq->param[1])
1977
        gen_op_set_flag(opq, 1);
1978
      else
1979
        gen_op_clear_flag(opq, 1);
1980
    } else
1981
      l_sflts_null_t_table[param_t[1]](opq, 1);
1982
    return;
1983
  }
1984
 
1985
  if(!opq->param[1]) {
1986
    l_sfgts_null_t_table[param_t[0]](opq, 1);
1987
    return;
1988
  }
1989
 
1990
  if(param_t[1] == T_NONE)
1991
    l_sfgts_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1992
  else
1993
    l_sfgts_t_table[param_t[0]][param_t[1]](opq, 1);
1994
}
1995
 
1996
DEF_1T_OP(generic_gen_op, l_sfgtu_null_t_table, gen_op_sfgtu_null);
1997
DEF_1T_OP(generic_gen_op, l_sfltu_null_t_table, gen_op_sfltu_null);
1998
DEF_1T_OP(imm_gen_op, l_sfgtu_imm_t_table, gen_op_sfgtu_imm);
1999
DEF_2T_OP(generic_gen_op, l_sfgtu_t_table, gen_op_sfgtu);
2000
 
2001
void gen_l_sfgtu(struct op_queue *opq, int param_t[3], int delay_slot)
2002
{
2003
  if(!opq->param[0] && !opq->param[1]) {
2004
    gen_op_clear_flag(opq, 1);
2005
    return;
2006
  }
2007
 
2008
  if(!opq->param[0]) {
2009
    /* sfltu IS correct */
2010
    if(param_t[1] == T_NONE) {
2011
      if(0 > opq->param[1])
2012
        gen_op_set_flag(opq, 1);
2013
      else
2014
        gen_op_clear_flag(opq, 1);
2015
    } else
2016
      l_sfltu_null_t_table[param_t[1]](opq, 1);
2017
    return;
2018
  }
2019
 
2020
  if(!opq->param[1]) {
2021
    l_sfgtu_null_t_table[param_t[0]](opq, 1);
2022
    return;
2023
  }
2024
 
2025
  if(param_t[1] == T_NONE)
2026
    l_sfgtu_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
2027
  else
2028
    l_sfgtu_t_table[param_t[0]][param_t[1]](opq, 1);
2029
}
2030
 
2031
DEF_1T_OP(imm_gen_op, l_sfles_imm_t_table, gen_op_sfles_imm);
2032
DEF_2T_OP(generic_gen_op, l_sfles_t_table, gen_op_sfles);
2033
 
2034
void gen_l_sfles(struct op_queue *opq, int param_t[3], int delay_slot)
2035
{
2036
  if(!opq->param[0] && !opq->param[1]) {
2037
    gen_op_set_flag(opq, 1);
2038
    return;
2039
  }
2040
 
2041
  if(!opq->param[0]) {
2042
    /* sfges IS correct */
2043
    if(param_t[1] == T_NONE) {
2044
      if(0 <= (orreg_t)opq->param[1])
2045
        gen_op_set_flag(opq, 1);
2046
      else
2047
        gen_op_clear_flag(opq, 1);
2048
    } else
2049
      l_sfges_null_t_table[param_t[1]](opq, 1);
2050
    return;
2051
  }
2052
 
2053
  if(!opq->param[1]) {
2054
    l_sfles_null_t_table[param_t[0]](opq, 1);
2055
    return;
2056
  }
2057
 
2058
  if(param_t[1] == T_NONE)
2059
    l_sfles_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
2060
  else
2061
    l_sfles_t_table[param_t[0]][param_t[1]](opq, 1);
2062
}
2063
 
2064
DEF_1T_OP(imm_gen_op, l_sfleu_imm_t_table, gen_op_sfleu_imm);
2065
DEF_2T_OP(generic_gen_op, l_sfleu_t_table, gen_op_sfleu);
2066
 
2067
void gen_l_sfleu(struct op_queue *opq, int param_t[3], int delay_slot)
2068
{
2069
  if(!opq->param[0] && !opq->param[1]) {
2070
    gen_op_set_flag(opq, 1);
2071
    return;
2072
  }
2073
 
2074
  if(!opq->param[0]) {
2075
    /* sfleu IS correct */
2076
    if(param_t[1] == T_NONE) {
2077
      if(0 <= opq->param[1])
2078
        gen_op_set_flag(opq, 1);
2079
      else
2080
        gen_op_clear_flag(opq, 1);
2081
    } else
2082
      l_sfgeu_null_t_table[param_t[1]](opq, 1);
2083
    return;
2084
  }
2085
 
2086
  if(!opq->param[1]) {
2087
    l_sfleu_null_t_table[param_t[0]](opq, 1);
2088
    return;
2089
  }
2090
 
2091
  if(param_t[1] == T_NONE)
2092
    l_sfleu_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
2093
  else
2094
    l_sfleu_t_table[param_t[0]][param_t[1]](opq, 1);
2095
}
2096
 
2097
DEF_1T_OP(imm_gen_op, l_sflts_imm_t_table, gen_op_sflts_imm);
2098
DEF_2T_OP(generic_gen_op, l_sflts_t_table, gen_op_sflts);
2099
 
2100
void gen_l_sflts(struct op_queue *opq, int param_t[3], int delay_slot)
2101
{
2102
  if(!opq->param[0] && !opq->param[1]) {
2103
    gen_op_clear_flag(opq, 1);
2104
    return;
2105
  }
2106
 
2107
  if(!opq->param[0]) {
2108
    /* sfgts IS correct */
2109
    if(param_t[1] == T_NONE) {
2110
      if(0 < (orreg_t)opq->param[1])
2111
        gen_op_set_flag(opq, 1);
2112
      else
2113
        gen_op_clear_flag(opq, 1);
2114
    } else
2115
      l_sfgts_null_t_table[param_t[1]](opq, 1);
2116
    return;
2117
  }
2118
 
2119
  if(!opq->param[1]) {
2120
    l_sflts_null_t_table[param_t[0]](opq, 1);
2121
    return;
2122
  }
2123
 
2124
  if(param_t[1] == T_NONE)
2125
    l_sflts_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
2126
  else
2127
    l_sflts_t_table[param_t[0]][param_t[1]](opq, 1);
2128
}
2129
 
2130
DEF_1T_OP(imm_gen_op, l_sfltu_imm_t_table, gen_op_sfltu_imm);
2131
DEF_2T_OP(generic_gen_op, l_sfltu_t_table, gen_op_sfltu);
2132
 
2133
void gen_l_sfltu(struct op_queue *opq, int param_t[3], int delay_slot)
2134
{
2135
  if(!opq->param[0] && !opq->param[1]) {
2136
    gen_op_clear_flag(opq, 1);
2137
    return;
2138
  }
2139
 
2140
  if(!opq->param[0]) {
2141
    /* sfgtu IS correct */
2142
    if(param_t[1] == T_NONE) {
2143
      if(0 < opq->param[1])
2144
        gen_op_set_flag(opq, 1);
2145
      else
2146
        gen_op_clear_flag(opq, 1);
2147
    } else
2148
      l_sfgtu_null_t_table[param_t[1]](opq, 1);
2149
    return;
2150
  }
2151
 
2152
  if(!opq->param[1]) {
2153
    l_sfltu_null_t_table[param_t[0]](opq, 1);
2154
    return;
2155
  }
2156
 
2157
  if(param_t[1] == T_NONE)
2158
    l_sfltu_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
2159
  else
2160
    l_sfltu_t_table[param_t[0]][param_t[1]](opq, 1);
2161
}
2162
 
2163
DEF_1T_OP(generic_gen_op, l_sfne_null_t_table, gen_op_sfne_null);
2164
DEF_1T_OP(imm_gen_op, l_sfne_imm_t_table, gen_op_sfne_imm);
2165
DEF_2T_OP(generic_gen_op, l_sfne_t_table, gen_op_sfne);
2166
 
2167
void gen_l_sfne(struct op_queue *opq, int param_t[3], int delay_slot)
2168
{
2169
  if(!opq->param[0] && !opq->param[1]) {
2170
    gen_op_set_flag(opq, 1);
2171
    return;
2172
  }
2173
 
2174
  if(!opq->param[0]) {
2175
    if(param_t[1] == T_NONE)
2176
      if(opq->param[1])
2177
        gen_op_set_flag(opq, 1);
2178
      else
2179
        gen_op_clear_flag(opq, 1);
2180
    else
2181
      l_sfne_null_t_table[param_t[1]](opq, 1);
2182
    return;
2183
  }
2184
 
2185
  if(!opq->param[1]) {
2186
    l_sfne_null_t_table[param_t[0]](opq, 1);
2187
    return;
2188
  }
2189
 
2190
  if(param_t[1] == T_NONE)
2191
    l_sfne_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
2192
  else
2193
    l_sfne_t_table[param_t[0]][param_t[1]](opq, 1);
2194
}
2195
 
2196
DEF_2T_OP(imm_gen_op, l_sll_imm_t_table, gen_op_sll_imm);
2197
DEF_3T_OP(generic_gen_op, l_sll_t_table, gen_op_sll);
2198
 
2199
void gen_l_sll(struct op_queue *opq, int param_t[3], int delay_slot)
2200
{
2201
  if(!opq->param[0])
2202
    return;
2203
 
2204
  if(!opq->param[1]) {
2205
    clear_t[param_t[0]](opq, 1);
2206
    return;
2207
  }
2208
 
2209
  if(!opq->param[2]) {
2210
    move_t_t[param_t[0]][param_t[1]](opq, 1);
2211
    return;
2212
  }
2213
 
2214
  if(param_t[2] == T_NONE)
2215
    l_sll_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
2216
  else
2217
    l_sll_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
2218
}
2219
 
2220
DEF_2T_OP(imm_gen_op, l_sra_imm_t_table, gen_op_sra_imm);
2221
DEF_3T_OP(generic_gen_op, l_sra_t_table, gen_op_sra);
2222
 
2223
void gen_l_sra(struct op_queue *opq, int param_t[3], int delay_slot)
2224
{
2225
  if(!opq->param[0])
2226
    return;
2227
 
2228
  if(!opq->param[1]) {
2229
    clear_t[param_t[0]](opq, 1);
2230
    return;
2231
  }
2232
 
2233
  if(!opq->param[2]) {
2234
    move_t_t[param_t[0]][param_t[1]](opq, 1);
2235
    return;
2236
  }
2237
 
2238
  if(param_t[2] == T_NONE)
2239
    l_sra_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
2240
  else
2241
    l_sra_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
2242
}
2243
 
2244
DEF_2T_OP(imm_gen_op, l_srl_imm_t_table, gen_op_srl_imm);
2245
DEF_3T_OP(generic_gen_op, l_srl_t_table, gen_op_srl);
2246
 
2247
void gen_l_srl(struct op_queue *opq, int param_t[3], int delay_slot)
2248
{
2249
  if(!opq->param[0])
2250
    return;
2251
 
2252
  if(!opq->param[1]) {
2253
    clear_t[param_t[0]](opq, 1);
2254
    return;
2255
  }
2256
 
2257
  if(!opq->param[2]) {
2258
    move_t_t[param_t[0]][param_t[1]](opq, 1);
2259
    return;
2260
  }
2261
 
2262
  if(param_t[2] == T_NONE)
2263
    l_srl_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
2264
  else
2265
    l_srl_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
2266
}
2267
 
2268
DEF_2T_OP(generic_gen_op, l_neg_t_table, gen_op_neg);
2269
DEF_3T_OP(generic_gen_op, l_sub_t_table, gen_op_sub);
2270
 
2271
void gen_l_sub(struct op_queue *opq, int param_t[3], int delay_slot)
2272
{
2273
  if(!opq->param[0])
2274
    return;
2275
 
2276
  if((param_t[2] != T_NONE) && (opq->param[1] == opq->param[2])) {
2277
    clear_t[param_t[0]](opq, 1);
2278
    return;
2279
  }
2280
 
2281
  if(!opq->param[1] && !opq->param[2]) {
2282
    clear_t[param_t[0]](opq, 1);
2283
    return;
2284
  }
2285
 
2286
  if(!opq->param[1]) {
2287
    if(param_t[2] == T_NONE)
2288
      mov_t_imm[param_t[0]](opq, 1, -opq->param[2]);
2289
    else
2290
      l_neg_t_table[param_t[0]][param_t[2]](opq, 1);
2291
    return;
2292
  }
2293
 
2294
  if(!opq->param[2]) {
2295
    move_t_t[param_t[0]][param_t[1]](opq, 1);
2296
    return;
2297
  }
2298
 
2299
  l_sub_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
2300
}
2301
 
2302
/* FIXME: This will not work if the l.sys is in a delay slot */
2303
void gen_l_sys(struct op_queue *opq, int param_t[3], int delay_slot)
2304
{
2305
  if(do_stats)
2306
    /* All gprs are current since this insn doesn't touch any reg */
2307
    gen_op_analysis(opq, 1);
2308
 
2309
  if(!delay_slot)
2310
    gen_op_prep_sys(opq, 1);
2311
  else
2312
    gen_op_prep_sys_delay(opq, 1);
2313
 
2314
  gen_op_do_sched(opq, 1);
2315
  gen_op_do_jump(opq, 1);
2316
}
2317
 
2318
/* FIXME: This will not work if the l.trap is in a delay slot */
2319
void gen_l_trap(struct op_queue *opq, int param_t[3], int delay_slot)
2320
{
2321
  if(do_stats)
2322
    /* All gprs are current since this insn doesn't touch any reg */
2323
    gen_op_analysis(opq, 1);
2324
 
2325
  if(!delay_slot)
2326
    gen_op_prep_trap(opq, 1);
2327
  else
2328
    gen_op_prep_trap_delay(opq, 1);
2329
}
2330
 
2331
DEF_2T_OP(imm_gen_op, l_xor_imm_t_table, gen_op_xor_imm);
2332
/* FIXME: Make unused elements NULL */
2333
DEF_3T_OP_NEQ(generic_gen_op, l_xor_t_table, gen_op_xor);
2334
 
2335
void gen_l_xor(struct op_queue *opq, int param_t[3], int delay_slot)
2336
{
2337
  if(!opq->param[0])
2338
    return;
2339
 
2340
  if((param_t[2] != T_NONE) && (opq->param[1] == opq->param[2])) {
2341
    clear_t[param_t[0]](opq, 1);
2342
    return;
2343
  }
2344
 
2345
  if(!opq->param[2]) {
2346
    if((param_t[2] == T_NONE) && (opq->param[0] == opq->param[1]))
2347
      return;
2348
    move_t_t[param_t[0]][param_t[1]](opq, 1);
2349
    return;
2350
  }
2351
 
2352
  if(!opq->param[1]) {
2353
    if(param_t[2] == T_NONE) {
2354
      mov_t_imm[param_t[0]](opq, 1, opq->param[2]);
2355
      return;
2356
    }
2357
    move_t_t[param_t[0]][param_t[2]](opq, 1);
2358
    return;
2359
  }
2360
 
2361
  if(param_t[2] == T_NONE)
2362
    l_xor_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
2363
  else
2364
    l_xor_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
2365
}
2366
 
2367
void gen_l_invalid(struct op_queue *opq, int param_t[3], int delay_slot)
2368
{
2369
  if(!delay_slot) {
2370
    gen_op_illegal(opq, 1);
2371
    gen_op_do_jump(opq, 1);
2372
  } else {
2373
    gen_op_illegal_delay(opq, 1);
2374
    gen_op_do_jump(opq, 1);
2375
  }
2376
}
2377
 
2378
/*----------------------------------[ Floating point instructions (stubs) ]---*/
2379
void gen_lf_add_s(struct op_queue *opq, int param_t[3], int delay_slot)
2380
{
2381
  gen_l_invalid(opq, param_t, delay_slot);
2382
}
2383
 
2384
void gen_lf_div_s(struct op_queue *opq, int param_t[3], int delay_slot)
2385
{
2386
  gen_l_invalid(opq, param_t, delay_slot);
2387
}
2388
 
2389
void gen_lf_ftoi_s(struct op_queue *opq, int param_t[3], int delay_slot)
2390
{
2391
  gen_l_invalid(opq, param_t, delay_slot);
2392
}
2393
 
2394
void gen_lf_itof_s(struct op_queue *opq, int param_t[3], int delay_slot)
2395
{
2396
  gen_l_invalid(opq, param_t, delay_slot);
2397
}
2398
 
2399
void gen_lf_madd_s(struct op_queue *opq, int param_t[3], int delay_slot)
2400
{
2401
  gen_l_invalid(opq, param_t, delay_slot);
2402
}
2403
 
2404
void gen_lf_mul_s(struct op_queue *opq, int param_t[3], int delay_slot)
2405
{
2406
  gen_l_invalid(opq, param_t, delay_slot);
2407
}
2408
 
2409
void gen_lf_rem_s(struct op_queue *opq, int param_t[3], int delay_slot)
2410
{
2411
  gen_l_invalid(opq, param_t, delay_slot);
2412
}
2413
 
2414
void gen_lf_sfeq_s(struct op_queue *opq, int param_t[3], int delay_slot)
2415
{
2416
  gen_l_invalid(opq, param_t, delay_slot);
2417
}
2418
 
2419
void gen_lf_sfge_s(struct op_queue *opq, int param_t[3], int delay_slot)
2420
{
2421
  gen_l_invalid(opq, param_t, delay_slot);
2422
}
2423
 
2424
void gen_lf_sfgt_s(struct op_queue *opq, int param_t[3], int delay_slot)
2425
{
2426
  gen_l_invalid(opq, param_t, delay_slot);
2427
}
2428
 
2429
void gen_lf_sfle_s(struct op_queue *opq, int param_t[3], int delay_slot)
2430
{
2431
  gen_l_invalid(opq, param_t, delay_slot);
2432
}
2433
 
2434
void gen_lf_sflt_s(struct op_queue *opq, int param_t[3], int delay_slot)
2435
{
2436
  gen_l_invalid(opq, param_t, delay_slot);
2437
}
2438
 
2439
void gen_lf_sfne_s(struct op_queue *opq, int param_t[3], int delay_slot)
2440
{
2441
  gen_l_invalid(opq, param_t, delay_slot);
2442
}
2443
 
2444
void gen_lf_sub_s(struct op_queue *opq, int param_t[3], int delay_slot)
2445
{
2446
  gen_l_invalid(opq, param_t, delay_slot);
2447
}
2448
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.