OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [tags/] [or1ksim/] [or1ksim-0.4.0rc1/] [cpu/] [or32/] [dyn-rec.c] - Blame information for rev 403

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 19 jeremybenn
/* dyn-rec.c -- Dynamic recompiler implementation for or32
2
   Copyright (C) 2005 György `nog' Jeney, nog@sdf.lonestar.org
3
 
4
This file is part of OpenRISC 1000 Architectural Simulator.
5
 
6
This program is free software; you can redistribute it and/or modify
7
it under the terms of the GNU General Public License as published by
8
the Free Software Foundation; either version 2 of the License, or
9
(at your option) any later version.
10
 
11
This program is distributed in the hope that it will be useful,
12
but WITHOUT ANY WARRANTY; without even the implied warranty of
13
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
GNU General Public License for more details.
15
 
16
You should have received a copy of the GNU General Public License
17
along with this program; if not, write to the Free Software
18
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
19
 
20
#include <stdio.h>
21
#include <stdlib.h>
22
#include <string.h>
23
#include <sys/mman.h>
24
#include <signal.h>
25
#include <errno.h>
26
#include <execinfo.h>
27
 
28
#include "config.h"
29
 
30
#ifdef HAVE_INTTYPES_H
31
#include <inttypes.h>
32
#endif
33
 
34
#include "port.h"
35
#include "arch.h"
36
#include "immu.h"
37
#include "abstract.h"
38
#include "opcode/or32.h"
39
#include "spr-defs.h"
40
#include "execute.h"
41
#include "except.h"
42
#include "spr-defs.h"
43
#include "sim-config.h"
44
#include "sched.h"
45
#include "i386-regs.h"
46
#include "def-op-t.h"
47
#include "dyn-rec.h"
48
#include "gen-ops.h"
49
#include "op-support.h"
50
#include "toplevel-support.h"
51
 
52
 
53
/* NOTE: All openrisc (or) addresses in this file are *PHYSICAL* addresses */
54
 
55
/* FIXME: Optimise sorted list adding */
56
 
57
typedef void (*generic_gen_op)(struct op_queue *opq, int end);
58
typedef void (*imm_gen_op)(struct op_queue *opq, int end, uorreg_t imm);
59
 
60
void gen_l_invalid(struct op_queue *opq, int param_t[3], int delay_slot);
61
 
62
/* ttg->temporary to gpr */
63
DEF_GPR_OP(generic_gen_op, gen_op_move_gpr_t, gen_op_ttg_gpr);
64
/* gtt->gpr to temporary */
65
DEF_GPR_OP(generic_gen_op, gen_op_move_t_gpr, gen_op_gtt_gpr);
66
 
67
DEF_1T_OP(imm_gen_op, calc_insn_ea_table, gen_op_calc_insn_ea);
68
 
69
/* Linker stubs.  This will allow the linker to link in op.o.  The relocations
70
 * that the linker does for these will be irrelevent anyway, since we patch the
71
 * relocations during recompilation. */
72
uorreg_t __op_param1;
73
uorreg_t __op_param2;
74
uorreg_t __op_param3;
75
 
76
/* The number of bytes that a dynamicly recompiled page should be enlarged by */
77
#define RECED_PAGE_ENLARGE_BY 51200
78
 
79
/* The number of entries that the micro operations array in op_queue should be
80
 * enlarged by */
81
#define OPS_ENLARGE_BY 5
82
 
83
#define T_NONE (-1)
84
 
85
/* Temporary is used as a source operand */
86
#define TFLAG_SRC 1
87
/* Temporary is used as a destination operand */
88
#define TFLAG_DST 2
89
/* Temporary has been saved to permanent storage */
90
#define TFLAG_SAVED 4
91
/* Temporary contains the value of the register before the instruction execution
92
 * occurs (either by an explicit reg->t move or implicitly being left over from
93
 * a previous instruction) */
94
#define TFLAG_SOURCED 8
95
 
96
/* FIXME: Put this into some header */
97
extern int do_stats;
98
 
99
static int sigsegv_state = 0;
100
static void *sigsegv_addr = NULL;
101
 
102
void dyn_ret_stack_prot(void);
103
 
104
void dyn_sigsegv_debug(int u, siginfo_t *siginf, void *dat)
105
{
106
  struct dyn_page *dp;
107
  FILE *f;
108
  char filen[18]; /* 18 == strlen("or_page.%08x") + 1 */
109
  int i;
110
  struct sigcontext *sigc = dat;
111
 
112
  if(!sigsegv_state) {
113
    sigsegv_addr = siginf->si_addr;
114
  } else {
115
    fprintf(stderr, "Nested SIGSEGV occured, dumping next chuck of info\n");
116
    sigsegv_state++;
117
  }
118
 
119
  /* First dump all the data that does not need dereferenceing to get */
120
  switch(sigsegv_state) {
121
  case 0:
122
    fflush(stderr);
123
    fprintf(stderr, "Segmentation fault on acces to %p at 0x%08lx, (or address: 0x%"PRIxADDR")\n\n",
124
            sigsegv_addr, sigc->eip, cpu_state.pc);
125
    sigsegv_state++;
126
  case 1:
127
    /* Run through the recompiled pages, dumping them to disk as we go */
128
    for(i = 0; i < (2 << (32 - immu_state->pagesize_log2)); i++) {
129
      dp = cpu_state.dyn_pages[i];
130
      if(!dp)
131
        continue;
132
      fprintf(stderr, "Dumping%s page 0x%"PRIxADDR" recompiled to %p (len: %u) to disk\n",
133
             dp->dirty ? " dirty" : "", dp->or_page, dp->host_page,
134
             dp->host_len);
135
      fflush(stdout);
136
 
137
      sprintf(filen, "or_page.%"PRIxADDR, dp->or_page);
138
      if(!(f = fopen(filen, "w"))) {
139
        fprintf(stderr, "Unable to open %s to dump the recompiled page to: %s\n",
140
                filen, strerror(errno));
141
        continue;
142
      }
143
      if(fwrite(dp->host_page, dp->host_len, 1, f) < 1)
144
        fprintf(stderr, "Unable to write recompiled data to file: %s\n",
145
                strerror(errno));
146
 
147
      fclose(f);
148
    }
149
    sigsegv_state++;
150
  case 2:
151
    sim_done();
152
  }
153
}
154
 
155
struct dyn_page *new_dp(oraddr_t page)
156
{
157
  struct dyn_page *dp = malloc(sizeof(struct dyn_page));
158
  dp->or_page = IADDR_PAGE(page);
159
 
160
  dp->locs = malloc(sizeof(void *) * (immu_state->pagesize / 4));
161
 
162
  dp->host_len = 0;
163
  dp->host_page = NULL;
164
  dp->dirty = 1;
165
 
166
  if(do_stats) {
167
    dp->insns = malloc(immu_state->pagesize);
168
    dp->insn_indexs = malloc(sizeof(unsigned int) * (immu_state->pagesize / 4));
169
  }
170
 
171
  cpu_state.dyn_pages[dp->or_page >> immu_state->pagesize_log2] = dp;
172
  return dp;
173
}
174
 
175
void dyn_main(void)
176
{
177
  struct dyn_page *target_dp;
178
  oraddr_t phys_page;
179
 
180
  setjmp(cpu_state.excpt_loc);
181
  for(;;) {
182
    phys_page = immu_translate(cpu_state.pc);
183
 
184
/*
185
    printf("Recompiled code jumping out to %"PRIxADDR" from %"PRIxADDR"\n",
186
           phys_page, cpu_state.sprs[SPR_PPC] - 4);
187
*/
188
 
189
    /* immu_translate() adds the hit delay to runtime.sim.mem_cycles but we add
190
     * it to the cycles when the instruction is executed so if we don't reset it
191
     * now it will produce wrong results */
192
    runtime.sim.mem_cycles = 0;
193
 
194
    target_dp = cpu_state.dyn_pages[phys_page >> immu_state->pagesize_log2];
195
 
196
    if(!target_dp)
197
      target_dp = new_dp(phys_page);
198
 
199
    /* Since writes to the 0x0-0xff range do not dirtyfy a page recompile the
200
     *  0x0 page if the jump is to that location */
201
    if(phys_page < 0x100)
202
      target_dp->dirty = 1;
203
 
204
    if(target_dp->dirty)
205
      recompile_page(target_dp);
206
 
207
    cpu_state.curr_page = target_dp;
208
 
209
    /* FIXME: If the page is backed by more than one type of memory, this will
210
     * produce wrong results */
211
    cpu_state.cycles_dec = target_dp->delayr;
212
    if(cpu_state.sprs[SPR_SR] & SPR_SR_IME)
213
      /* Add the mmu hit delay to the cycle counter */
214
      cpu_state.cycles_dec -= immu_state->hitdelay;
215
 
216
    /* FIXME: ebp, ebx, esi and edi are expected to be preserved across function
217
     * calls but the recompiled code trashes them... */
218
    enter_dyn_code(phys_page, target_dp);
219
  }
220
}
221
 
222
static void immu_retranslate(void *dat)
223
{
224
  int got_en_dis = (int)dat;
225
  immu_translate(cpu_state.pc);
226
  runtime.sim.mem_cycles = 0;
227
 
228
  /* Only update the cycle decrementer if the mmu got enabled or disabled */
229
  if(got_en_dis == IMMU_GOT_ENABLED)
230
    /* Add the mmu hit delay to the cycle counter */
231
    cpu_state.cycles_dec = cpu_state.curr_page->delayr - immu_state->hitdelay;
232
  else if(got_en_dis == IMMU_GOT_DISABLED)
233
    cpu_state.cycles_dec = cpu_state.curr_page->delayr;
234
}
235
 
236
/* This is called whenever the immu is either enabled/disabled or reconfigured
237
 * while enabled.  This checks if an itlb miss would occour and updates the immu
238
 * hit delay counter */
239
void recheck_immu(int got_en_dis)
240
{
241
  oraddr_t addr;
242
 
243
  if(cpu_state.delay_insn)
244
    addr = cpu_state.pc_delay;
245
  else
246
    addr = cpu_state.pc + 4;
247
 
248
  if(IADDR_PAGE(cpu_state.pc) == IADDR_PAGE(addr))
249
    /* Schedule a job to do immu_translate() */
250
    SCHED_ADD(immu_retranslate, (void *)got_en_dis, 0);
251
}
252
 
253
/* Runs the scheduler.  Called from except_handler (and dirtyfy_page below) */
254
void run_sched_out_of_line(void)
255
{
256
  oraddr_t off = (cpu_state.pc & immu_state->page_offset_mask) >> 2;
257
 
258
  if(do_stats) {
259
    cpu_state.iqueue.insn_addr = cpu_state.pc;
260
    cpu_state.iqueue.insn = cpu_state.curr_page->insns[off];
261
    cpu_state.iqueue.insn_index = cpu_state.curr_page->insn_indexs[off];
262
    runtime.cpu.instructions++;
263
    analysis(&cpu_state.iqueue);
264
  }
265
 
266
  /* Run the scheduler */
267
  scheduler.job_queue->time += cpu_state.cycles_dec;
268
  runtime.sim.cycles -= cpu_state.cycles_dec;
269
 
270
  op_join_mem_cycles();
271
  if(scheduler.job_queue->time <= 0)
272
    do_scheduler();
273
}
274
 
275
/* Signals a page as dirty */
276
static void dirtyfy_page(struct dyn_page *dp)
277
{
278
  oraddr_t check;
279
 
280
  printf("Dirtyfying page 0x%"PRIxADDR"\n", dp->or_page);
281
 
282
  dp->dirty = 1;
283
 
284
  /* If the execution is currently in the page that was touched then recompile
285
   * it now and jump back to the point of execution */
286
  check = cpu_state.delay_insn ? cpu_state.pc_delay : cpu_state.pc + 4;
287
  if(IADDR_PAGE(check) == dp->or_page) {
288
    run_sched_out_of_line();
289
    recompile_page(dp);
290
 
291
    cpu_state.delay_insn = 0;
292
 
293
    /* Jump out to the next instruction */
294
    do_jump(check);
295
  }
296
}
297
 
298
/* Checks to see if a write happened to a recompiled page.  If so marks it as
299
 * dirty */
300
void dyn_checkwrite(oraddr_t addr)
301
{
302
  /* FIXME: Do this with mprotect() */
303
  struct dyn_page *dp = cpu_state.dyn_pages[addr >> immu_state->pagesize_log2];
304
 
305
  /* Since the locations 0x0-0xff are nearly always written to in an exception
306
   * handler, ignore any writes to these locations.  If code ends up jumping
307
   * out there, we'll recompile when the jump actually happens. */
308
  if((addr > 0x100) && dp && !dp->dirty)
309
    dirtyfy_page(dp);
310
}
311
 
312
/* Moves the temprary t to its permanent storage if it has been used as a
313
 * destination register */
314
static void ship_t_out(struct op_queue *opq, unsigned int t)
315
{
316
  unsigned int gpr = opq->reg_t[t];
317
 
318
  for(; opq; opq = opq->prev) {
319
    if(opq->reg_t[t] != gpr)
320
      return;
321
    if((opq->tflags[t] & TFLAG_DST) && !(opq->tflags[t] & TFLAG_SAVED)) {
322
      opq->tflags[t] |= TFLAG_SAVED;
323
 
324
      /* FIXME: Check if this is still neccesary */
325
      /* Before takeing the temporaries out, temporarily remove the op_do_sched
326
       * operation such that dyn_page->ts_bound shall be correct before the
327
       * scheduler runs */
328
      if(opq->num_ops && (opq->ops[opq->num_ops - 1] == op_do_sched_indx)) {
329
        opq->num_ops--;
330
        gen_op_move_gpr_t[t][gpr](opq, 1);
331
        gen_op_do_sched(opq, 1);
332
        return;
333
      }
334
 
335
      gen_op_move_gpr_t[t][gpr](opq, 1);
336
 
337
      return;
338
    }
339
  }
340
}
341
 
342
static void ship_gprs_out_t(struct op_queue *opq)
343
{
344
  int i;
345
 
346
  if(!opq)
347
    return;
348
 
349
  for(i = 0; i < NUM_T_REGS; i++) {
350
    if(opq->reg_t[i] < 32)
351
      /* Ship temporaries out in the last opq that actually touched it */
352
      ship_t_out(opq, i);
353
  }
354
}
355
 
356
/* FIXME: Look at the following instructions to make a better guess at which
357
 * temporary to return */
358
static int find_t(struct op_queue *opq, unsigned int reg)
359
{
360
  int i, j, t = -1;
361
 
362
  for(i = 0; i < NUM_T_REGS; i++) {
363
    if(opq->reg_t[i] == reg)
364
      return i;
365
 
366
    /* Ok, we have found an as-yet unused temporary, check if it is needed
367
     * later in this instruction */
368
    for(j = 0; j < opq->param_num; j++) {
369
      if((opq->param_type[j] & OPTYPE_REG) && (opq->param[j] == opq->reg_t[i]))
370
        break;
371
    }
372
 
373
    if(j != opq->param_num)
374
      continue;
375
 
376
    /* We have found the temporary (temporarily:) fit for use */
377
    if((t == -1) || (opq->reg_t[i] == 32))
378
      t = i;
379
  }
380
 
381
  return t;
382
}
383
 
384
/* Checks if there is enough space in dp->host_page, if not grow it */
385
void *enough_host_page(struct dyn_page *dp, void *cur, unsigned int *len,
386
                       unsigned int amount)
387
{
388
  unsigned int used = cur - dp->host_page;
389
 
390
  /* The array is long enough */
391
  if((used + amount) <= *len)
392
    return cur;
393
 
394
  /* Reallocate */
395
  *len += RECED_PAGE_ENLARGE_BY;
396
 
397
  if(!(dp->host_page = realloc(dp->host_page, *len))) {
398
    fprintf(stderr, "OOM\n");
399
    exit(1);
400
  }
401
 
402
  return dp->host_page + used;
403
}
404
 
405
/* Adds an operation to the opq */
406
void add_to_opq(struct op_queue *opq, int end, int op)
407
{
408
  if(opq->num_ops == opq->ops_len) {
409
    opq->ops_len += OPS_ENLARGE_BY;
410
    if(!(opq->ops = realloc(opq->ops, opq->ops_len * sizeof(int)))) {
411
      fprintf(stderr, "OOM\n");
412
      exit(1);
413
    }
414
  }
415
 
416
  if(end)
417
    opq->ops[opq->num_ops] = op;
418
  else {
419
    /* Shift everything over by one */
420
    memmove(opq->ops + 1, opq->ops, opq->num_ops* sizeof(int));
421
    opq->ops[0] = op;
422
  }
423
 
424
  opq->num_ops++;
425
}
426
 
427
static void gen_op_mark_loc(struct op_queue *opq, int end)
428
{
429
  add_to_opq(opq, end, op_mark_loc_indx);
430
}
431
 
432
/* Adds a parameter to the opq */
433
void add_to_op_params(struct op_queue *opq, int end, unsigned long param)
434
{
435
  if(opq->num_ops_param == opq->ops_param_len) {
436
    opq->ops_param_len += OPS_ENLARGE_BY;
437
    if(!(opq->ops_param = realloc(opq->ops_param, opq->ops_param_len * sizeof(int)))) {
438
      fprintf(stderr, "OOM\n");
439
      exit(1);
440
    }
441
  }
442
 
443
  if(end)
444
    opq->ops_param[opq->num_ops_param] = param;
445
  else {
446
    /* Shift everything over by one */
447
    memmove(opq->ops_param + 1, opq->ops_param, opq->num_ops_param);
448
    opq->ops_param[0] = param;
449
  }
450
 
451
  opq->num_ops_param++;
452
}
453
 
454
/* Initialises the recompiler */
455
void init_dyn_recomp(void)
456
{
457
  struct sigaction sigact;
458
  struct op_queue *opq = NULL;
459
  unsigned int i;
460
 
461
  cpu_state.opqs = NULL;
462
 
463
  /* Allocate the operation queue list (+1 for the page chaining) */
464
  for(i = 0; i < (immu_state->pagesize / 4) + 1; i++) {
465
    if(!(opq = malloc(sizeof(struct op_queue)))) {
466
      fprintf(stderr, "OOM\n");
467
      exit(1);
468
    }
469
 
470
    /* initialise some fields */
471
    opq->ops_len = 0;
472
    opq->ops = NULL;
473
    opq->ops_param_len = 0;
474
    opq->ops_param = NULL;
475
    opq->xref = 0;
476
 
477
    if(cpu_state.opqs)
478
      cpu_state.opqs->prev = opq;
479
 
480
    opq->next = cpu_state.opqs;
481
    cpu_state.opqs = opq;
482
  }
483
 
484
  opq->prev = NULL;
485
 
486
  cpu_state.curr_page = NULL;
487
  if(!(cpu_state.dyn_pages = malloc(sizeof(void *) * (2 << (32 -
488
                                                immu_state->pagesize_log2))))) {
489
    fprintf(stderr, "OOM\n");
490
    exit(1);
491
  }
492
  memset(cpu_state.dyn_pages, 0,
493
         sizeof(void *) * (2 << (32 - immu_state->pagesize_log2)));
494
 
495
  /* Register our segmentation fault handler */
496
  sigact.sa_sigaction = dyn_sigsegv_debug;
497
  memset(&sigact.sa_mask, 0, sizeof(sigact.sa_mask));
498
  sigact.sa_flags = SA_SIGINFO | SA_NOMASK;
499
  if(sigaction(SIGSEGV, &sigact, NULL))
500
    printf("WARN: Unable to install SIGSEGV handler! Don't expect to be able to debug the recompiler.\n");
501
 
502
  /* FIXME: Find a better place for this */
503
    { /* Needed by execution */
504
      extern int do_stats;
505
      do_stats = config.cpu.dependstats || config.cpu.superscalar || config.cpu.dependstats
506
              || config.sim.history || config.sim.exe_log;
507
    }
508
 
509
  printf("Recompile engine up and running\n");
510
}
511
 
512
/* Parses instructions and their operands and populates opq with them */
513
static void eval_insn_ops(struct op_queue *opq, oraddr_t addr)
514
{
515
  int breakp;
516
  struct insn_op_struct *opd;
517
 
518
  for(; opq->next; opq = opq->next, addr += 4) {
519
    opq->param_num = 0;
520
    breakp = 0;
521
    opq->insn = eval_insn(addr, &breakp);
522
 
523
    /* FIXME: If a breakpoint is set at this location, insert exception code */
524
    if(breakp) {
525
      fprintf(stderr, "FIXME: Insert breakpoint code\n");
526
    }
527
 
528
    opq->insn_index = insn_decode(opq->insn);
529
 
530
    if(opq->insn_index == -1)
531
      continue;
532
 
533
    opd = op_start[opq->insn_index];
534
 
535
    do {
536
      opq->param[opq->param_num] = eval_operand_val(opq->insn, opd);
537
      opq->param_type[opq->param_num] = opd->type;
538
 
539
      opq->param_num++;
540
      while(!(opd->type & OPTYPE_OP)) opd++;
541
    } while(!(opd++->type & OPTYPE_LAST));
542
  }
543
}
544
 
545
/* Adds code to the opq for the instruction pointed to by addr */
546
static void recompile_insn(struct op_queue *opq, int delay_insn)
547
{
548
  int j, k;
549
  int param_t[5]; /* Which temporary the parameters reside in */
550
 
551
  /* Check if we have an illegal instruction */
552
  if(opq->insn_index == -1) {
553
    gen_l_invalid(opq, NULL, delay_insn);
554
    return;
555
  }
556
 
557
  /* If we are recompileing an instruction that has a delay slot and is in the
558
   * delay slot, ignore it.  This is undefined behavour. */
559
  if(delay_insn && (or32_opcodes[opq->insn_index].flags & OR32_IF_DELAY))
560
    return;
561
 
562
  param_t[0] = T_NONE;
563
  param_t[1] = T_NONE;
564
  param_t[2] = T_NONE;
565
  param_t[3] = T_NONE;
566
  param_t[4] = T_NONE;
567
 
568
  /* Jump instructions are special since they have a delay slot and thus they
569
   * need to control the exact operation sequence.  Special case these here to
570
   * avoid haveing loads of if(!(.& OR32_IF_DELAY)) below */
571
  if(or32_opcodes[opq->insn_index].flags & OR32_IF_DELAY) {
572
    /* Jump instructions don't have a disposition */
573
    or32_opcodes[opq->insn_index].exec(opq, param_t, delay_insn);
574
 
575
    /* Analysis is done by the individual jump instructions */
576
    /* Jump instructions don't touch runtime.sim.mem_cycles */
577
    /* Jump instructions run their own scheduler */
578
    return;
579
  }
580
 
581
  /* Before an exception takes place, all registers must be stored. */
582
  if((or32_opcodes[opq->insn_index].func_unit == it_exception)) {
583
    ship_gprs_out_t(opq);
584
 
585
    or32_opcodes[opq->insn_index].exec(opq, param_t, delay_insn);
586
    return;
587
  }
588
 
589
  for(j = 0; j < opq->param_num; j++) {
590
    if(!(opq->param_type[j] & OPTYPE_REG))
591
      continue;
592
 
593
    /* Never, ever, move r0 into a temporary */
594
    if(!opq->param[j])
595
      continue;
596
 
597
    k = find_t(opq, opq->param[j]);
598
 
599
    param_t[j] = k;
600
 
601
    if(opq->reg_t[k] == opq->param[j]) {
602
      if(!(opq->param_type[j] & OPTYPE_DST) &&
603
         !(opq->tflags[k] & TFLAG_SOURCED)) {
604
        gen_op_move_t_gpr[k][opq->reg_t[k]](opq, 0);
605
        opq->tflags[k] |= TFLAG_SOURCED;
606
      }
607
 
608
      if(opq->param_type[j] & OPTYPE_DST)
609
        opq->tflags[k] |= TFLAG_DST;
610
      else
611
        opq->tflags[k] |= TFLAG_SRC;
612
 
613
      continue;
614
    }
615
 
616
    if(opq->reg_t[k] < 32) {
617
      /* Only ship the temporary out if it has been used as a destination
618
       * register */
619
      ship_t_out(opq, k);
620
    }
621
 
622
    if(opq->param_type[j] & OPTYPE_DST)
623
      opq->tflags[k] = TFLAG_DST;
624
    else
625
      opq->tflags[k] = TFLAG_SRC;
626
 
627
    opq->reg_t[k] = opq->param[j];
628
 
629
    /* Only generate code to move the register into a temporary if it is used as
630
     * a source operand */
631
    if(!(opq->param_type[j] & OPTYPE_DST)) {
632
      gen_op_move_t_gpr[k][opq->reg_t[k]](opq, 0);
633
      opq->tflags[k] |= TFLAG_SOURCED;
634
    }
635
  }
636
 
637
  /* To get the execution log correct for instructions like l.lwz r4,0(r4) the
638
   * effective address needs to be calculated before the instruction is
639
   * simulated */
640
  if(do_stats) {
641
    for(j = 0; j < opq->param_num; j++) {
642
      if(!(opq->param_type[j] & OPTYPE_DIS))
643
        continue;
644
 
645
      if(!opq->param[j + 1])
646
        gen_op_store_insn_ea(opq, 1, opq->param[j]);
647
      else
648
        calc_insn_ea_table[param_t[j + 1]](opq, 1, opq->param[j]);
649
    }
650
  }
651
 
652
  or32_opcodes[opq->insn_index].exec(opq, param_t, delay_insn);
653
 
654
  if(do_stats) {
655
    ship_gprs_out_t(opq);
656
    gen_op_analysis(opq, 1);
657
  }
658
 
659
  /* The call to join_mem_cycles() could be put into the individual operations
660
   * that emulate the load/store instructions, but then it would be added to
661
   * the cycle counter before analysis() is called, which is not how the complex
662
   * execution model does it. */
663
  if((or32_opcodes[opq->insn_index].func_unit == it_load) ||
664
     (or32_opcodes[opq->insn_index].func_unit == it_store))
665
    gen_op_join_mem_cycles(opq, 1);
666
 
667
  /* Delay slot instructions get a special scheduler, thus don't generate it
668
   * here */
669
  if(!delay_insn)
670
    gen_op_do_sched(opq, 1);
671
}
672
 
673
/* Recompiles the page associated with *dyn */
674
void recompile_page(struct dyn_page *dyn)
675
{
676
  unsigned int j;
677
  struct op_queue *opq = cpu_state.opqs;
678
  oraddr_t rec_addr = dyn->or_page;
679
  oraddr_t rec_page = dyn->or_page;
680
  void **loc;
681
 
682
  /* The start of the next page */
683
  rec_page += immu_state->pagesize;
684
 
685
  printf("Recompileing page %"PRIxADDR"\n", rec_addr);
686
  fflush(stdout);
687
 
688
  /* Mark all temporaries as not containing a register */
689
  for(j = 0; j < NUM_T_REGS; j++) {
690
    opq->reg_t[j] = 32; /* Out-of-range registers */
691
    opq->tflags[j] = 0;
692
  }
693
 
694
  dyn->delayr = -verify_memoryarea(rec_addr)->ops.delayr;
695
 
696
  opq->num_ops = 0;
697
  opq->num_ops_param = 0;
698
 
699
  eval_insn_ops(opq, rec_addr);
700
 
701
  /* Insert code to check if the first instruction is exeucted in a delay slot*/
702
  gen_op_check_delay_slot(opq, 1, 0);
703
  recompile_insn(opq, 1);
704
  ship_gprs_out_t(opq);
705
  gen_op_do_sched_delay(opq, 1);
706
  gen_op_clear_delay_insn(opq, 1);
707
  gen_op_do_jump_delay(opq, 1);
708
  gen_op_do_jump(opq, 1);
709
  gen_op_mark_loc(opq, 1);
710
 
711
  for(j = 0; j < NUM_T_REGS; j++)
712
    opq->reg_t[j] = 32; /* Out-of-range registers */
713
 
714
  for(; rec_addr < rec_page; rec_addr += 4, opq = opq->next) {
715
    if(opq->prev) {
716
      opq->num_ops = 0;
717
      opq->num_ops_param = 0;
718
    }
719
    opq->jump_local = -1;
720
    opq->not_jump_loc = -1;
721
 
722
    opq->insn_addr = rec_addr;
723
 
724
    for(j = 0; j < NUM_T_REGS; j++)
725
      opq->tflags[j] = TFLAG_SOURCED;
726
 
727
    /* Check if this location is cross referenced */
728
    if(opq->xref) {
729
      /* If the current address is cross-referenced, the temporaries shall be
730
       * in an undefined state, so we must assume that no registers reside in
731
       * them */
732
      /* Ship out the current set of registers from the temporaries */
733
      if(opq->prev) {
734
        ship_gprs_out_t(opq->prev);
735
        for(j = 0; j < NUM_T_REGS; j++) {
736
          opq->reg_t[j] = 32;
737
          opq->prev->reg_t[j] = 32;
738
        }
739
      }
740
    }
741
 
742
    recompile_insn(opq, 0);
743
 
744
    /* Store the state of the temporaries */
745
    memcpy(opq->next->reg_t, opq->reg_t, sizeof(opq->reg_t));
746
  }
747
 
748
  dyn->dirty = 0;
749
 
750
  /* Ship temporaries out to the corrisponding registers */
751
  ship_gprs_out_t(opq->prev);
752
 
753
  opq->num_ops = 0;
754
  opq->num_ops_param = 0;
755
  opq->not_jump_loc = -1;
756
  opq->jump_local = -1;
757
 
758
  /* Insert code to jump to the next page */
759
  gen_op_do_jump(opq, 1);
760
 
761
  /* Generate the code */
762
  gen_code(cpu_state.opqs, dyn);
763
 
764
  /* Fix up the locations */
765
  for(loc = dyn->locs; loc < &dyn->locs[immu_state->pagesize / 4]; loc++)
766
    *loc += (unsigned int)dyn->host_page;
767
 
768
  cpu_state.opqs->ops_param[0] += (unsigned int)dyn->host_page;
769
 
770
  /* Search for page-local jumps */
771
  opq = cpu_state.opqs;
772
  for(j = 0; j < (immu_state->pagesize / 4); opq = opq->next, j++) {
773
    if(opq->jump_local != -1)
774
      opq->ops_param[opq->jump_local] =
775
                              (unsigned int)dyn->locs[opq->jump_local_loc >> 2];
776
 
777
    if(opq->not_jump_loc != -1)
778
      opq->ops_param[opq->not_jump_loc] = (unsigned int)dyn->locs[j + 1];
779
 
780
    /* Store the state of the temporaries into dyn->ts_bound */
781
    dyn->ts_bound[j] = 0;
782
    if(opq->reg_t[0] < 32)
783
      dyn->ts_bound[j] = opq->reg_t[0];
784
    if(opq->reg_t[1] < 32)
785
      dyn->ts_bound[j] |= opq->reg_t[1] << 5;
786
    if(opq->reg_t[2] < 32)
787
      dyn->ts_bound[j] |= opq->reg_t[2] << 10;
788
 
789
    /* Reset for the next page to be recompiled */
790
    opq->xref = 0;
791
  }
792
 
793
  /* Patch the relocations */
794
  patch_relocs(cpu_state.opqs, dyn->host_page);
795
 
796
  if(do_stats) {
797
    opq = cpu_state.opqs;
798
    for(j = 0; j < (immu_state->pagesize / 4); j++, opq = opq->next) {
799
      dyn->insns[j] = opq->insn;
800
      dyn->insn_indexs[j] = opq->insn_index;
801
    }
802
  }
803
 
804
  /* FIXME: Fix the issue below in a more elegent way */
805
  /* Since eval_insn is called to get the instruction, runtime.sim.mem_cycles is
806
   * updated but the recompiler expectes it to start a 0, so reset it */
807
  runtime.sim.mem_cycles = 0;
808
}
809
 
810
/* Recompiles a delay-slot instruction (opq is the opq of the instruction
811
 * haveing the delay-slot) */
812
static void recompile_delay_insn(struct op_queue *opq)
813
{
814
  struct op_queue delay_opq;
815
  int i;
816
 
817
  /* Setup a fake opq that looks very much like the delay slot instruction */
818
  memcpy(&delay_opq, opq, sizeof(struct op_queue));
819
  /* `Fix' a couple of bits */
820
  for(i = 0; i < NUM_T_REGS; i++)
821
    delay_opq.tflags[i] = TFLAG_SOURCED;
822
  delay_opq.insn_index = opq->next->insn_index;
823
  memcpy(delay_opq.param_type, opq->next->param_type, sizeof(delay_opq.param_type));
824
  memcpy(delay_opq.param, opq->next->param, sizeof(delay_opq.param));
825
  delay_opq.param_num = opq->next->param_num;
826
  delay_opq.insn = opq->next->insn;
827
 
828
  delay_opq.xref = 0;
829
  delay_opq.insn_addr = opq->insn_addr + 4;
830
  delay_opq.prev = opq->prev;
831
  delay_opq.next = NULL;
832
 
833
  /* Generate the delay slot instruction */
834
  recompile_insn(&delay_opq, 1);
835
 
836
  ship_gprs_out_t(&delay_opq);
837
 
838
  opq->num_ops = delay_opq.num_ops;
839
  opq->ops_len = delay_opq.ops_len;
840
  opq->ops = delay_opq.ops;
841
  opq->num_ops_param = delay_opq.num_ops_param;
842
  opq->ops_param_len = delay_opq.ops_param_len;
843
  opq->ops_param = delay_opq.ops_param;
844
 
845
  for(i = 0; i < NUM_T_REGS; i++)
846
    opq->reg_t[i] = 32;
847
}
848
 
849
/* Returns non-zero if the jump is into this page, 0 otherwise */
850
static int find_jump_loc(oraddr_t j_ea, struct op_queue *opq)
851
{
852
  int i;
853
 
854
  /* Mark the jump as non page local if the delay slot instruction is on the
855
   * next page to the jump instruction.  This should not be needed */
856
  if(IADDR_PAGE(j_ea) != IADDR_PAGE(opq->insn_addr))
857
    /* We can't do anything as the j_ea (as passed to find_jump_loc) is a
858
     * VIRTUAL offset and the next physical page may not be the next VIRTUAL
859
     * page */
860
    return 0;
861
 
862
  /* The jump is into the page currently undergoing dynamic recompilation */
863
 
864
  /* If we haven't got to the location of the jump, everything is ok */
865
  if(j_ea > opq->insn_addr) {
866
    /* Find the corissponding opq and mark it as cross referenced */
867
    for(i = (j_ea - opq->insn_addr) / 4; i; i--)
868
      opq = opq->next;
869
    opq->xref = 1;
870
    return 1;
871
  }
872
 
873
  /* Insert temporary -> register code before the jump ea and register ->
874
   * temporary at the x-ref address */
875
  for(i = (opq->insn_addr - j_ea) / 4; i; i--)
876
    opq = opq->prev;
877
 
878
  if(!opq->prev)
879
    /* We're at the begining of a page, no need to do anything */
880
    return 1;
881
 
882
  /* Found location, insert code */
883
 
884
  ship_gprs_out_t(opq->prev);
885
 
886
  for(i = 0; i < NUM_T_REGS; i++) {
887
    if(opq->prev->reg_t[i] < 32)
888
      /* FIXME: Ship temporaries in the begining of the opq that needs it */
889
      gen_op_move_t_gpr[i][opq->prev->reg_t[i]](opq, 0);
890
  }
891
 
892
  opq->xref = 1;
893
 
894
  return 1;
895
}
896
 
897
static void gen_j_imm(struct op_queue *opq, oraddr_t off)
898
{
899
  int jump_local;
900
 
901
  off <<= 2;
902
 
903
  if(IADDR_PAGE(opq->insn_addr) != IADDR_PAGE(opq->insn_addr + 4)) {
904
    gen_op_set_pc_delay_imm(opq, 1, off);
905
    gen_op_do_sched(opq, 1);
906
    return;
907
  }
908
 
909
  jump_local = find_jump_loc(opq->insn_addr + off, opq);
910
 
911
  gen_op_set_delay_insn(opq, 1);
912
  gen_op_do_sched(opq, 1);
913
 
914
  recompile_delay_insn(opq);
915
 
916
  gen_op_add_pc(opq, 1, (orreg_t)off - 8);
917
  gen_op_clear_delay_insn(opq, 1);
918
  gen_op_do_sched_delay(opq, 1);
919
 
920
  if(jump_local) {
921
    gen_op_jmp_imm(opq, 1, 0);
922
    opq->jump_local = opq->num_ops_param - 1;
923
    opq->jump_local_loc = (opq->insn_addr + (orreg_t)off) & immu_state->page_offset_mask;
924
  } else
925
    gen_op_do_jump(opq, 1);
926
}
927
 
928
static const generic_gen_op set_pc_delay_gpr[32] = {
929
 NULL,
930
 gen_op_move_gpr1_pc_delay,
931
 gen_op_move_gpr2_pc_delay,
932
 gen_op_move_gpr3_pc_delay,
933
 gen_op_move_gpr4_pc_delay,
934
 gen_op_move_gpr5_pc_delay,
935
 gen_op_move_gpr6_pc_delay,
936
 gen_op_move_gpr7_pc_delay,
937
 gen_op_move_gpr8_pc_delay,
938
 gen_op_move_gpr9_pc_delay,
939
 gen_op_move_gpr10_pc_delay,
940
 gen_op_move_gpr11_pc_delay,
941
 gen_op_move_gpr12_pc_delay,
942
 gen_op_move_gpr13_pc_delay,
943
 gen_op_move_gpr14_pc_delay,
944
 gen_op_move_gpr15_pc_delay,
945
 gen_op_move_gpr16_pc_delay,
946
 gen_op_move_gpr17_pc_delay,
947
 gen_op_move_gpr18_pc_delay,
948
 gen_op_move_gpr19_pc_delay,
949
 gen_op_move_gpr20_pc_delay,
950
 gen_op_move_gpr21_pc_delay,
951
 gen_op_move_gpr22_pc_delay,
952
 gen_op_move_gpr23_pc_delay,
953
 gen_op_move_gpr24_pc_delay,
954
 gen_op_move_gpr25_pc_delay,
955
 gen_op_move_gpr26_pc_delay,
956
 gen_op_move_gpr27_pc_delay,
957
 gen_op_move_gpr28_pc_delay,
958
 gen_op_move_gpr29_pc_delay,
959
 gen_op_move_gpr30_pc_delay,
960
 gen_op_move_gpr31_pc_delay };
961
 
962
static void gen_j_reg(struct op_queue *opq, unsigned int gpr)
963
{
964
  int i;
965
 
966
  /* Ship the jump-to register out (if it exists).  It requires special
967
   * handleing */
968
  for(i = 0; i < NUM_T_REGS; i++) {
969
    if(opq->reg_t[i] == opq->param[0])
970
      /* Ship temporary out in the last opq that used it */
971
      ship_t_out(opq, i);
972
  }
973
 
974
  if(do_stats)
975
    gen_op_analysis(opq, 1);
976
 
977
  if(!gpr)
978
    gen_op_clear_pc_delay(opq, 1);
979
  else
980
    set_pc_delay_gpr[gpr](opq, 1);
981
 
982
  gen_op_do_sched(opq, 1);
983
 
984
  if(IADDR_PAGE(opq->insn_addr) != IADDR_PAGE(opq->insn_addr + 4))
985
    return;
986
 
987
  recompile_delay_insn(opq);
988
 
989
  gen_op_set_pc_pc_delay(opq, 1);
990
  gen_op_clear_delay_insn(opq, 1);
991
  gen_op_do_sched_delay(opq, 1);
992
 
993
  gen_op_do_jump_delay(opq, 1);
994
  gen_op_do_jump(opq, 1);
995
}
996
 
997
/*------------------------------[ Operation generation for an instruction ]---*/
998
/* FIXME: Flag setting is not done in any instruction */
999
/* FIXME: Since r0 is not moved into a temporary, check all arguments below! */
1000
 
1001
DEF_1T_OP(generic_gen_op, clear_t, gen_op_clear);
1002
DEF_2T_OP_NEQ(generic_gen_op, move_t_t, gen_op_move);
1003
DEF_1T_OP(imm_gen_op, mov_t_imm, gen_op_imm);
1004
 
1005
DEF_2T_OP(imm_gen_op, l_add_imm_t_table, gen_op_add_imm);
1006
DEF_3T_OP(generic_gen_op, l_add_t_table, gen_op_add);
1007
 
1008
void gen_l_add(struct op_queue *opq, int param_t[3], int delay_slot)
1009
{
1010
  if(!opq->param[0])
1011
    /* Screw this, the operation shall do nothing */
1012
    return;
1013
 
1014
  if(!opq->param[1] && !opq->param[2]) {
1015
    /* Just clear param_t[0] */
1016
    clear_t[param_t[0]](opq, 1);
1017
    return;
1018
  }
1019
 
1020
  if(!opq->param[2]) {
1021
    if(opq->param[0] != opq->param[1])
1022
      /* This just moves a register */
1023
      move_t_t[param_t[0]][param_t[1]](opq, 1);
1024
    return;
1025
  }
1026
 
1027
  if(!opq->param[1]) {
1028
    /* Check if we are moveing an immediate */
1029
    if(param_t[2] == T_NONE) {
1030
      /* Yep, an immediate */
1031
      mov_t_imm[param_t[0]](opq, 1, opq->param[2]);
1032
      return;
1033
    }
1034
    /* Just another move */
1035
    if(opq->param[0] != opq->param[2])
1036
      move_t_t[param_t[0]][param_t[2]](opq, 1);
1037
    return;
1038
  }
1039
 
1040
  /* Ok, This _IS_ an add... */
1041
  if(param_t[2] == T_NONE)
1042
    /* immediate */
1043
    l_add_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
1044
  else
1045
    l_add_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1046
}
1047
 
1048
DEF_3T_OP(generic_gen_op, l_addc_t_table, gen_op_addc);
1049
 
1050
void gen_l_addc(struct op_queue *opq, int param_t[3], int delay_slot)
1051
{
1052
  if(!opq->param[0])
1053
    /* Screw this, the operation shall do nothing */
1054
    return;
1055
 
1056
  /* FIXME: More optimisations !! (...and immediate...) */
1057
  l_addc_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1058
}
1059
 
1060
DEF_2T_OP(imm_gen_op, l_and_imm_t_table, gen_op_and_imm);
1061
DEF_3T_OP_NEQ(generic_gen_op, l_and_t_table, gen_op_and);
1062
 
1063
void gen_l_and(struct op_queue *opq, int param_t[3], int delay_slot)
1064
{
1065
  if(!opq->param[0])
1066
    /* Screw this, the operation shall do nothing */
1067
    return;
1068
 
1069
  if(!opq->param[1] || !opq->param[2]) {
1070
    /* Just clear param_t[0] */
1071
    clear_t[param_t[0]](opq, 1);
1072
    return;
1073
  }
1074
 
1075
  if((opq->param[0] == opq->param[1]) &&
1076
     (opq->param[0] == opq->param[2]) &&
1077
     (param_t[2] != T_NONE))
1078
    return;
1079
 
1080
  if(param_t[2] == T_NONE)
1081
    l_and_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
1082
  else
1083
    l_and_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1084
}
1085
 
1086
void gen_l_bf(struct op_queue *opq, int param_t[3], int delay_slot)
1087
{
1088
  if(do_stats)
1089
    /* All gprs are current since this insn doesn't touch any reg */
1090
    gen_op_analysis(opq, 1);
1091
 
1092
  /* The temporaries are expected to be shiped out after the execution of the
1093
   * branch instruction wether it branched or not */
1094
  ship_gprs_out_t(opq->prev);
1095
 
1096
  if(IADDR_PAGE(opq->insn_addr) != IADDR_PAGE(opq->insn_addr + 4)) {
1097
    gen_op_check_flag_delay(opq, 1, opq->param[0] << 2);
1098
    gen_op_do_sched(opq, 1);
1099
    opq->not_jump_loc = -1;
1100
    return;
1101
  }
1102
 
1103
  gen_op_check_flag(opq, 1, 0);
1104
  opq->not_jump_loc = opq->num_ops_param - 1;
1105
 
1106
  gen_j_imm(opq, opq->param[0]);
1107
}
1108
 
1109
void gen_l_bnf(struct op_queue *opq, int param_t[3], int delay_slot)
1110
{
1111
  if(do_stats)
1112
    /* All gprs are current since this insn doesn't touch any reg */
1113
    gen_op_analysis(opq, 1);
1114
 
1115
  /* The temporaries are expected to be shiped out after the execution of the
1116
   * branch instruction wether it branched or not */
1117
  ship_gprs_out_t(opq->prev);
1118
 
1119
  if(IADDR_PAGE(opq->insn_addr) != IADDR_PAGE(opq->insn_addr + 4)) {
1120
    gen_op_check_not_flag_delay(opq, 1, opq->param[0] << 2);
1121
    gen_op_do_sched(opq, 1);
1122
    opq->not_jump_loc = -1;
1123
    return;
1124
  }
1125
 
1126
  gen_op_check_not_flag(opq, 1, 0);
1127
  opq->not_jump_loc = opq->num_ops_param - 1;
1128
 
1129
  gen_j_imm(opq, opq->param[0]);
1130
}
1131
 
1132
DEF_3T_OP_NEQ(generic_gen_op, l_cmov_t_table, gen_op_cmov);
1133
 
1134
/* FIXME: Check if either opperand 1 or 2 is r0 */
1135
void gen_l_cmov(struct op_queue *opq, int param_t[3], int delay_slot)
1136
{
1137
  if(!opq->param[0])
1138
    return;
1139
 
1140
  if(!opq->param[1] && !opq->param[2]) {
1141
    clear_t[param_t[0]](opq, 1);
1142
    return;
1143
  }
1144
 
1145
  if((opq->param[1] == opq->param[2]) && (opq->param[0] == opq->param[1]))
1146
    return;
1147
 
1148
  if(opq->param[1] == opq->param[2]) {
1149
    move_t_t[param_t[0]][param_t[1]](opq, 1);
1150
    return;
1151
  }
1152
 
1153
  l_cmov_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1154
}
1155
 
1156
void gen_l_cust1(struct op_queue *opq, int param_t[3], int delay_slot)
1157
{
1158
}
1159
 
1160
void gen_l_cust2(struct op_queue *opq, int param_t[3], int delay_slot)
1161
{
1162
}
1163
 
1164
void gen_l_cust3(struct op_queue *opq, int param_t[3], int delay_slot)
1165
{
1166
}
1167
 
1168
void gen_l_cust4(struct op_queue *opq, int param_t[3], int delay_slot)
1169
{
1170
}
1171
 
1172
void gen_l_cust5(struct op_queue *opq, int param_t[3], int delay_slot)
1173
{
1174
}
1175
 
1176
void gen_l_cust6(struct op_queue *opq, int param_t[3], int delay_slot)
1177
{
1178
}
1179
 
1180
void gen_l_cust7(struct op_queue *opq, int param_t[3], int delay_slot)
1181
{
1182
}
1183
 
1184
void gen_l_cust8(struct op_queue *opq, int param_t[3], int delay_slot)
1185
{
1186
}
1187
 
1188
/* FIXME: All registers need to be stored before the div instructions as they
1189
 * have the potenticial to cause an exception */
1190
 
1191
DEF_1T_OP(generic_gen_op, check_null_excpt, gen_op_check_null_except);
1192
DEF_1T_OP(generic_gen_op, check_null_excpt_delay, gen_op_check_null_except_delay);
1193
DEF_3T_OP(generic_gen_op, l_div_t_table, gen_op_div);
1194
 
1195
void gen_l_div(struct op_queue *opq, int param_t[3], int delay_slot)
1196
{
1197
  if(!opq->param[2]) {
1198
    /* There is no option.  This _will_ cause an illeagal exception */
1199
    if(!delay_slot) {
1200
      gen_op_illegal(opq, 1);
1201
      gen_op_do_jump(opq, 1);
1202
    } else {
1203
      gen_op_illegal(opq, 1);
1204
      gen_op_do_jump(opq, 1);
1205
    }
1206
    return;
1207
  }
1208
 
1209
  if(!delay_slot)
1210
    check_null_excpt[param_t[2]](opq, 1);
1211
  else
1212
    check_null_excpt_delay[param_t[2]](opq, 1);
1213
 
1214
  if(!opq->param[0])
1215
    return;
1216
 
1217
  if(!opq->param[1]) {
1218
    /* Clear param_t[0] */
1219
    clear_t[param_t[0]](opq, 1);
1220
    return;
1221
  }
1222
 
1223
  l_div_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1224
}
1225
 
1226
DEF_3T_OP(generic_gen_op, l_divu_t_table, gen_op_divu);
1227
 
1228
void gen_l_divu(struct op_queue *opq, int param_t[3], int delay_slot)
1229
{
1230
  if(!opq->param[2]) {
1231
    /* There is no option.  This _will_ cause an illeagal exception */
1232
    if(!delay_slot) {
1233
      gen_op_illegal(opq, 1);
1234
      gen_op_do_jump(opq, 1);
1235
    } else {
1236
      gen_op_illegal(opq, 1);
1237
      gen_op_do_jump(opq, 1);
1238
    }
1239
    return;
1240
  }
1241
 
1242
  if(!delay_slot)
1243
    check_null_excpt[param_t[2]](opq, 1);
1244
  else
1245
    check_null_excpt_delay[param_t[2]](opq, 1);
1246
 
1247
  if(!opq->param[0])
1248
    return;
1249
 
1250
  if(!opq->param[1]) {
1251
    /* Clear param_t[0] */
1252
    clear_t[param_t[0]](opq, 1);
1253
    return;
1254
  }
1255
 
1256
  l_divu_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1257
}
1258
 
1259
DEF_2T_OP(generic_gen_op, l_extbs_t_table, gen_op_extbs);
1260
 
1261
void gen_l_extbs(struct op_queue *opq, int param_t[3], int delay_slot)
1262
{
1263
  if(!opq->param[0])
1264
    return;
1265
 
1266
  if(!opq->param[1]) {
1267
    clear_t[param_t[0]](opq, 1);
1268
    return;
1269
  }
1270
 
1271
  l_extbs_t_table[param_t[0]][param_t[1]](opq, 1);
1272
}
1273
 
1274
DEF_2T_OP(generic_gen_op, l_extbz_t_table, gen_op_extbz);
1275
 
1276
void gen_l_extbz(struct op_queue *opq, int param_t[3], int delay_slot)
1277
{
1278
  if(!opq->param[0])
1279
    return;
1280
 
1281
  if(!opq->param[1]) {
1282
    clear_t[param_t[0]](opq, 1);
1283
    return;
1284
  }
1285
 
1286
  l_extbz_t_table[param_t[0]][param_t[1]](opq, 1);
1287
}
1288
 
1289
DEF_2T_OP(generic_gen_op, l_exths_t_table, gen_op_exths);
1290
 
1291
void gen_l_exths(struct op_queue *opq, int param_t[3], int delay_slot)
1292
{
1293
  if(!opq->param[0])
1294
    return;
1295
 
1296
  if(!opq->param[1]) {
1297
    clear_t[param_t[0]](opq, 1);
1298
    return;
1299
  }
1300
 
1301
  l_exths_t_table[param_t[0]][param_t[1]](opq, 1);
1302
}
1303
 
1304
DEF_2T_OP(generic_gen_op, l_exthz_t_table, gen_op_exthz);
1305
 
1306
void gen_l_exthz(struct op_queue *opq, int param_t[3], int delay_slot)
1307
{
1308
  if(!opq->param[0])
1309
    return;
1310
 
1311
  if(!opq->param[1]) {
1312
    clear_t[param_t[0]](opq, 1);
1313
    return;
1314
  }
1315
 
1316
  l_exthz_t_table[param_t[0]][param_t[1]](opq, 1);
1317
}
1318
 
1319
void gen_l_extws(struct op_queue *opq, int param_t[3], int delay_slot)
1320
{
1321
  if(!opq->param[0])
1322
    return;
1323
 
1324
  if(!opq->param[1]) {
1325
    clear_t[param_t[0]](opq, 1);
1326
    return;
1327
  }
1328
 
1329
  if(opq->param[0] == opq->param[1])
1330
    return;
1331
 
1332
  /* In the 32-bit architechture this instruction reduces to a move */
1333
  move_t_t[param_t[0]][param_t[1]](opq, 1);
1334
}
1335
 
1336
void gen_l_extwz(struct op_queue *opq, int param_t[3], int delay_slot)
1337
{
1338
  if(!opq->param[0])
1339
    return;
1340
 
1341
  if(!opq->param[1]) {
1342
    clear_t[param_t[0]](opq, 1);
1343
    return;
1344
  }
1345
 
1346
  if(opq->param[0] == opq->param[1])
1347
    return;
1348
 
1349
  /* In the 32-bit architechture this instruction reduces to a move */
1350
  move_t_t[param_t[0]][param_t[1]](opq, 1);
1351
}
1352
 
1353
DEF_2T_OP(generic_gen_op, l_ff1_t_table, gen_op_ff1);
1354
 
1355
void gen_l_ff1(struct op_queue *opq, int param_t[3], int delay_slot)
1356
{
1357
  if(!opq->param[0])
1358
    return;
1359
 
1360
  if(!opq->param[1]) {
1361
    clear_t[param_t[0]](opq, 1);
1362
    return;
1363
  }
1364
 
1365
  l_ff1_t_table[param_t[0]][param_t[1]](opq, 1);
1366
}
1367
 
1368
void gen_l_j(struct op_queue *opq, int param_t[3], int delay_slot)
1369
{
1370
  if(do_stats)
1371
    /* All gprs are current since this insn doesn't touch any reg */
1372
    gen_op_analysis(opq, 1);
1373
 
1374
  gen_j_imm(opq, opq->param[0]);
1375
}
1376
 
1377
void gen_l_jal(struct op_queue *opq, int param_t[3], int delay_slot)
1378
{
1379
  int i;
1380
 
1381
  /* In the case of a l.jal instruction, make sure that LINK_REGNO is not in
1382
   * a temporary.  The problem is that the l.jal(r) instruction stores the
1383
   * `return address' in LINK_REGNO.  The temporaries are shiped out only
1384
   * after the delay slot instruction has executed and so it overwrittes the
1385
   * `return address'. */
1386
  for(i = 0; i < NUM_T_REGS; i++) {
1387
    if(opq->reg_t[i] == LINK_REGNO) {
1388
      /* Don't bother storeing the register, it is going to get clobered in this
1389
       * instruction anyway */
1390
      opq->reg_t[i] = 32;
1391
      break;
1392
    }
1393
  }
1394
 
1395
  /* Store the return address */
1396
  gen_op_store_link_addr_gpr(opq, 1);
1397
 
1398
  if(do_stats)
1399
    /* All gprs are current since this insn doesn't touch any reg */
1400
    gen_op_analysis(opq, 1);
1401
 
1402
  gen_j_imm(opq, opq->param[0]);
1403
}
1404
 
1405
void gen_l_jr(struct op_queue *opq, int param_t[3], int delay_slot)
1406
{
1407
  gen_j_reg(opq, opq->param[0]);
1408
}
1409
 
1410
void gen_l_jalr(struct op_queue *opq, int param_t[3], int delay_slot)
1411
{
1412
  int i;
1413
 
1414
  /* In the case of a l.jal instruction, make sure that LINK_REGNO is not in
1415
   * a temporary.  The problem is that the l.jal(r) instruction stores the
1416
   * `return address' in LINK_REGNO.  The temporaries are shiped out only
1417
   * after the delay slot instruction has executed and so it overwrittes the
1418
   * `return address'. */
1419
  for(i = 0; i < NUM_T_REGS; i++) {
1420
    if(opq->reg_t[i] == LINK_REGNO) {
1421
      /* Don't bother storeing the register, it is going to get clobered in this
1422
       * instruction anyway */
1423
      opq->reg_t[i] = 32;
1424
      break;
1425
    }
1426
  }
1427
 
1428
  /* Store the return address */
1429
  gen_op_store_link_addr_gpr(opq, 1);
1430
 
1431
  gen_j_reg(opq, opq->param[0]);
1432
}
1433
 
1434
/* FIXME: Optimise all load instruction when the disposition == 0 */
1435
 
1436
DEF_1T_OP(imm_gen_op, l_lbs_imm_t_table, gen_op_lbs_imm);
1437
DEF_2T_OP(imm_gen_op, l_lbs_t_table, gen_op_lbs);
1438
 
1439
void gen_l_lbs(struct op_queue *opq, int param_t[3], int delay_slot)
1440
{
1441
  if(!opq->param[0]) {
1442
    /* FIXME: This will work, but the statistics need to be updated... */
1443
    return;
1444
  }
1445
 
1446
  /* Just in case an exception happens */
1447
  ship_gprs_out_t(opq->prev);
1448
 
1449
  if(!opq->param[2]) {
1450
    /* Load the data from the immediate */
1451
    l_lbs_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1452
    return;
1453
  }
1454
 
1455
  l_lbs_t_table[param_t[0]][param_t[2]](opq, 1, opq->param[1]);
1456
}
1457
 
1458
DEF_1T_OP(imm_gen_op, l_lbz_imm_t_table, gen_op_lbz_imm);
1459
DEF_2T_OP(imm_gen_op, l_lbz_t_table, gen_op_lbz);
1460
 
1461
void gen_l_lbz(struct op_queue *opq, int param_t[3], int delay_slot)
1462
{
1463
  if(!opq->param[0]) {
1464
    /* FIXME: This will work, but the statistics need to be updated... */
1465
    return;
1466
  }
1467
 
1468
  /* Just in case an exception happens */
1469
  ship_gprs_out_t(opq->prev);
1470
 
1471
  if(!opq->param[2]) {
1472
    /* Load the data from the immediate */
1473
    l_lbz_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1474
    return;
1475
  }
1476
 
1477
  l_lbz_t_table[param_t[0]][param_t[2]](opq, 1, opq->param[1]);
1478
}
1479
 
1480
DEF_1T_OP(imm_gen_op, l_lhs_imm_t_table, gen_op_lhs_imm);
1481
DEF_2T_OP(imm_gen_op, l_lhs_t_table, gen_op_lhs);
1482
 
1483
void gen_l_lhs(struct op_queue *opq, int param_t[3], int delay_slot)
1484
{
1485
  if(!opq->param[0]) {
1486
    /* FIXME: This will work, but the statistics need to be updated... */
1487
    return;
1488
  }
1489
 
1490
  /* Just in case an exception happens */
1491
  ship_gprs_out_t(opq->prev);
1492
 
1493
  if(!opq->param[2]) {
1494
    /* Load the data from the immediate */
1495
    l_lhs_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1496
    return;
1497
  }
1498
 
1499
  l_lhs_t_table[param_t[0]][param_t[2]](opq, 1, opq->param[1]);
1500
}
1501
 
1502
DEF_1T_OP(imm_gen_op, l_lhz_imm_t_table, gen_op_lhz_imm);
1503
DEF_2T_OP(imm_gen_op, l_lhz_t_table, gen_op_lhz);
1504
 
1505
void gen_l_lhz(struct op_queue *opq, int param_t[3], int delay_slot)
1506
{
1507
  if(!opq->param[0]) {
1508
    /* FIXME: This will work, but the statistics need to be updated... */
1509
    return;
1510
  }
1511
 
1512
  /* Just in case an exception happens */
1513
  ship_gprs_out_t(opq->prev);
1514
 
1515
  if(!opq->param[2]) {
1516
    /* Load the data from the immediate */
1517
    l_lhz_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1518
    return;
1519
  }
1520
 
1521
  l_lhz_t_table[param_t[0]][param_t[2]](opq, 1, opq->param[1]);
1522
}
1523
 
1524
DEF_1T_OP(imm_gen_op, l_lws_imm_t_table, gen_op_lws_imm);
1525
DEF_2T_OP(imm_gen_op, l_lws_t_table, gen_op_lws);
1526
 
1527
void gen_l_lws(struct op_queue *opq, int param_t[3], int delay_slot)
1528
{
1529
  if(!opq->param[0]) {
1530
    /* FIXME: This will work, but the statistics need to be updated... */
1531
    return;
1532
  }
1533
 
1534
  /* Just in case an exception happens */
1535
  ship_gprs_out_t(opq->prev);
1536
 
1537
  if(!opq->param[2]) {
1538
    /* Load the data from the immediate */
1539
    l_lws_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1540
    return;
1541
  }
1542
 
1543
  l_lws_t_table[param_t[0]][param_t[2]](opq, 1, opq->param[1]);
1544
}
1545
 
1546
DEF_1T_OP(imm_gen_op, l_lwz_imm_t_table, gen_op_lwz_imm);
1547
DEF_2T_OP(imm_gen_op, l_lwz_t_table, gen_op_lwz);
1548
 
1549
void gen_l_lwz(struct op_queue *opq, int param_t[3], int delay_slot)
1550
{
1551
  if(!opq->param[0]) {
1552
    /* FIXME: This will work, but the statistics need to be updated... */
1553
    return;
1554
  }
1555
 
1556
  /* Just in case an exception happens */
1557
  ship_gprs_out_t(opq->prev);
1558
 
1559
  if(!opq->param[2]) {
1560
    /* Load the data from the immediate */
1561
    l_lwz_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1562
    return;
1563
  }
1564
 
1565
  l_lwz_t_table[param_t[0]][param_t[2]](opq, 1, opq->param[1]);
1566
}
1567
 
1568
DEF_1T_OP(imm_gen_op, l_mac_imm_t_table, gen_op_mac_imm);
1569
DEF_2T_OP(generic_gen_op, l_mac_t_table, gen_op_mac);
1570
 
1571
void gen_l_mac(struct op_queue *opq, int param_t[3], int delay_slot)
1572
{
1573
  if(!opq->param[0] || !opq->param[1])
1574
    return;
1575
 
1576
  if(param_t[1] == T_NONE)
1577
    l_mac_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1578
  else
1579
    l_mac_t_table[param_t[0]][param_t[1]](opq, 1);
1580
}
1581
 
1582
DEF_1T_OP(generic_gen_op, l_macrc_t_table, gen_op_macrc);
1583
 
1584
void gen_l_macrc(struct op_queue *opq, int param_t[3], int delay_slot)
1585
{
1586
  if(!opq->param[0]) {
1587
    gen_op_macc(opq, 1);
1588
    return;
1589
  }
1590
 
1591
  l_macrc_t_table[param_t[0]](opq, 1);
1592
}
1593
 
1594
DEF_1T_OP(imm_gen_op, l_mfspr_imm_t_table, gen_op_mfspr_imm);
1595
DEF_2T_OP(imm_gen_op, l_mfspr_t_table, gen_op_mfspr);
1596
 
1597
void gen_l_mfspr(struct op_queue *opq, int param_t[3], int delay_slot)
1598
{
1599
  if(!opq->param[0])
1600
    return;
1601
 
1602
  if(!opq->param[1]) {
1603
    l_mfspr_imm_t_table[param_t[0]](opq, 1, opq->param[2]);
1604
    return;
1605
  }
1606
 
1607
  l_mfspr_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
1608
}
1609
 
1610
void gen_l_movhi(struct op_queue *opq, int param_t[3], int delay_slot)
1611
{
1612
  if(!opq->param[0])
1613
    return;
1614
 
1615
  if(!opq->param[1]) {
1616
    clear_t[param_t[0]](opq, 1);
1617
    return;
1618
  }
1619
 
1620
  mov_t_imm[param_t[0]](opq, 1, opq->param[1] << 16);
1621
}
1622
 
1623
DEF_2T_OP(generic_gen_op, l_msb_t_table, gen_op_msb);
1624
 
1625
void gen_l_msb(struct op_queue *opq, int param_t[3], int delay_slot)
1626
{
1627
  if(!opq->param[0] || !opq->param[1])
1628
    return;
1629
 
1630
  l_msb_t_table[param_t[0]][param_t[1]](opq, 1);
1631
}
1632
 
1633
DEF_1T_OP(imm_gen_op, l_mtspr_clear_t_table, gen_op_mtspr_clear);
1634
DEF_1T_OP(imm_gen_op, l_mtspr_imm_t_table, gen_op_mtspr_imm);
1635
DEF_2T_OP(imm_gen_op, l_mtspr_t_table, gen_op_mtspr);
1636
 
1637
void gen_l_mtspr(struct op_queue *opq, int param_t[3], int delay_slot)
1638
{
1639
  /* Just in case an exception happens */
1640
  ship_gprs_out_t(opq->prev);
1641
 
1642
  if(!opq->param[0]) {
1643
    if(!opq->param[1]) {
1644
      /* Clear the immediate SPR */
1645
      gen_op_mtspr_imm_clear(opq, 1, opq->param[2]);
1646
      return;
1647
    }
1648
    l_mtspr_imm_t_table[param_t[1]](opq, 1, opq->param[2]);
1649
    return;
1650
  }
1651
 
1652
  if(!opq->param[1]) {
1653
    l_mtspr_clear_t_table[param_t[0]](opq, 1, opq->param[2]);
1654
    return;
1655
  }
1656
 
1657
  l_mtspr_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
1658
}
1659
 
1660
DEF_2T_OP(imm_gen_op, l_mul_imm_t_table, gen_op_mul_imm);
1661
DEF_3T_OP(generic_gen_op, l_mul_t_table, gen_op_mul);
1662
 
1663
void gen_l_mul(struct op_queue *opq, int param_t[3], int delay_slot)
1664
{
1665
  if(!opq->param[0])
1666
    return;
1667
 
1668
  if(!opq->param[1] || !opq->param[2]) {
1669
    clear_t[param_t[0]](opq, 1);
1670
    return;
1671
  }
1672
 
1673
  if(param_t[2] == T_NONE)
1674
    l_mul_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
1675
  else
1676
    l_mul_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1677
}
1678
 
1679
DEF_3T_OP(generic_gen_op, l_mulu_t_table, gen_op_mulu);
1680
 
1681
void gen_l_mulu(struct op_queue *opq, int param_t[3], int delay_slot)
1682
{
1683
  if(!opq->param[0])
1684
    return;
1685
 
1686
  if(!opq->param[1] || !opq->param[2]) {
1687
    clear_t[param_t[0]](opq, 1);
1688
    return;
1689
  }
1690
 
1691
  l_mulu_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1692
}
1693
 
1694
void gen_l_nop(struct op_queue *opq, int param_t[3], int delay_slot)
1695
{
1696
  /* Do parameter switch now */
1697
  switch(opq->param[0]) {
1698
  case NOP_NOP:
1699
    break;
1700
  case NOP_EXIT:
1701
    ship_gprs_out_t(opq->prev);
1702
    gen_op_nop_exit(opq, 1);
1703
    break;
1704
  case NOP_CNT_RESET:
1705
    gen_op_nop_reset(opq, 1);
1706
    break;
1707
  case NOP_PRINTF:
1708
    ship_gprs_out_t(opq->prev);
1709
    gen_op_nop_printf(opq, 1);
1710
    break;
1711
  case NOP_REPORT:
1712
    ship_gprs_out_t(opq->prev);
1713
    gen_op_nop_report(opq, 1);
1714
    break;
1715
  default:
1716
    if((opq->param[0] >= NOP_REPORT_FIRST) && (opq->param[0] <= NOP_REPORT_LAST)) {
1717
      ship_gprs_out_t(opq->prev);
1718
      gen_op_nop_report_imm(opq, 1, opq->param[0] - NOP_REPORT_FIRST);
1719
    }
1720
    break;
1721
  }
1722
}
1723
 
1724
DEF_2T_OP(imm_gen_op, l_or_imm_t_table, gen_op_or_imm);
1725
DEF_3T_OP_NEQ(generic_gen_op, l_or_t_table, gen_op_or);
1726
 
1727
void gen_l_or(struct op_queue *opq, int param_t[3], int delay_slot)
1728
{
1729
  if(!opq->param[0])
1730
    return;
1731
 
1732
  if((opq->param[0] == opq->param[1]) &&
1733
     (opq->param[0] == opq->param[2]) &&
1734
     (param_t[2] != T_NONE))
1735
    return;
1736
 
1737
  if(!opq->param[1] && !opq->param[2]) {
1738
    clear_t[param_t[0]](opq, 1);
1739
    return;
1740
  }
1741
 
1742
  if(!opq->param[2]) {
1743
    if((param_t[2] == T_NONE) && (opq->param[0] == opq->param[1]))
1744
      return;
1745
    move_t_t[param_t[0]][param_t[1]](opq, 1);
1746
    return;
1747
  }
1748
 
1749
  if(!opq->param[1]) {
1750
    /* Check if we are moveing an immediate */
1751
    if(param_t[2] == T_NONE) {
1752
      /* Yep, an immediate */
1753
      mov_t_imm[param_t[0]](opq, 1, opq->param[2]);
1754
      return;
1755
    }
1756
    /* Just another move */
1757
    move_t_t[param_t[0]][param_t[2]](opq, 1);
1758
    return;
1759
  }
1760
 
1761
  if(param_t[2] == T_NONE)
1762
    l_or_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
1763
  else
1764
    l_or_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
1765
}
1766
 
1767
void gen_l_rfe(struct op_queue *opq, int param_t[3], int delay_slot)
1768
{
1769
  if(do_stats)
1770
    /* All gprs are current since this insn doesn't touch any reg */
1771
    gen_op_analysis(opq, 1);
1772
 
1773
  gen_op_prep_rfe(opq, 1);
1774
  /* FIXME: rename op_do_sched_delay */
1775
  gen_op_do_sched_delay(opq, 1);
1776
  gen_op_do_jump(opq, 1);
1777
}
1778
 
1779
/* FIXME: All store instructions should be optimised when the disposition = 0 */
1780
 
1781
DEF_1T_OP(imm_gen_op, l_sb_clear_table, gen_op_sb_clear);
1782
DEF_1T_OP(imm_gen_op, l_sb_imm_t_table, gen_op_sb_imm);
1783
DEF_2T_OP(imm_gen_op, l_sb_t_table, gen_op_sb);
1784
 
1785
void gen_l_sb(struct op_queue *opq, int param_t[3], int delay_slot)
1786
{
1787
  /* Just in case an exception happens */
1788
  ship_gprs_out_t(opq->prev);
1789
 
1790
  if(!opq->param[2]) {
1791
    if(!opq->param[1]) {
1792
      gen_op_sb_clear_imm(opq, 1, opq->param[0]);
1793
      return;
1794
    }
1795
    l_sb_clear_table[param_t[1]](opq, 1, opq->param[0]);
1796
    return;
1797
  }
1798
 
1799
  if(!opq->param[1]) {
1800
    /* Store the data to the immediate */
1801
    l_sb_imm_t_table[param_t[2]](opq, 1, opq->param[0]);
1802
    return;
1803
  }
1804
 
1805
  l_sb_t_table[param_t[1]][param_t[2]](opq, 1, opq->param[0]);
1806
}
1807
 
1808
DEF_1T_OP(imm_gen_op, l_sh_clear_table, gen_op_sh_clear);
1809
DEF_1T_OP(imm_gen_op, l_sh_imm_t_table, gen_op_sh_imm);
1810
DEF_2T_OP(imm_gen_op, l_sh_t_table, gen_op_sh);
1811
 
1812
void gen_l_sh(struct op_queue *opq, int param_t[3], int delay_slot)
1813
{
1814
  /* Just in case an exception happens */
1815
  ship_gprs_out_t(opq->prev);
1816
 
1817
  if(!opq->param[2]) {
1818
    if(!opq->param[1]) {
1819
      gen_op_sh_clear_imm(opq, 1, opq->param[0]);
1820
      return;
1821
    }
1822
    l_sh_clear_table[param_t[1]](opq, 1, opq->param[0]);
1823
    return;
1824
  }
1825
 
1826
  if(!opq->param[1]) {
1827
    /* Store the data to the immediate */
1828
    l_sh_imm_t_table[param_t[2]](opq, 1, opq->param[0]);
1829
    return;
1830
  }
1831
 
1832
  l_sh_t_table[param_t[1]][param_t[2]](opq, 1, opq->param[0]);
1833
}
1834
 
1835
DEF_1T_OP(imm_gen_op, l_sw_clear_table, gen_op_sw_clear);
1836
DEF_1T_OP(imm_gen_op, l_sw_imm_t_table, gen_op_sw_imm);
1837
DEF_2T_OP(imm_gen_op, l_sw_t_table, gen_op_sw);
1838
 
1839
void gen_l_sw(struct op_queue *opq, int param_t[3], int delay_slot)
1840
{
1841
  /* Just in case an exception happens */
1842
  ship_gprs_out_t(opq->prev);
1843
 
1844
  if(!opq->param[2]) {
1845
    if(!opq->param[1]) {
1846
      gen_op_sw_clear_imm(opq, 1, opq->param[0]);
1847
      return;
1848
    }
1849
    l_sw_clear_table[param_t[1]](opq, 1, opq->param[0]);
1850
    return;
1851
  }
1852
 
1853
  if(!opq->param[1]) {
1854
    /* Store the data to the immediate */
1855
    l_sw_imm_t_table[param_t[2]](opq, 1, opq->param[0]);
1856
    return;
1857
  }
1858
 
1859
  l_sw_t_table[param_t[1]][param_t[2]](opq, 1, opq->param[0]);
1860
}
1861
 
1862
DEF_1T_OP(generic_gen_op, l_sfeq_null_t_table, gen_op_sfeq_null);
1863
DEF_1T_OP(imm_gen_op, l_sfeq_imm_t_table, gen_op_sfeq_imm);
1864
DEF_2T_OP(generic_gen_op, l_sfeq_t_table, gen_op_sfeq);
1865
 
1866
void gen_l_sfeq(struct op_queue *opq, int param_t[3], int delay_slot)
1867
{
1868
  if(!opq->param[0] && !opq->param[1]) {
1869
    gen_op_set_flag(opq, 1);
1870
    return;
1871
  }
1872
 
1873
  if(!opq->param[0]) {
1874
    if(param_t[1] == T_NONE) {
1875
      if(!opq->param[1])
1876
        gen_op_set_flag(opq, 1);
1877
      else
1878
        gen_op_clear_flag(opq, 1);
1879
    } else
1880
      l_sfeq_null_t_table[param_t[1]](opq, 1);
1881
    return;
1882
  }
1883
 
1884
  if(!opq->param[1]) {
1885
    l_sfeq_null_t_table[param_t[0]](opq, 1);
1886
    return;
1887
  }
1888
 
1889
  if(param_t[1] == T_NONE)
1890
    l_sfeq_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1891
  else
1892
    l_sfeq_t_table[param_t[0]][param_t[1]](opq, 1);
1893
}
1894
 
1895
DEF_1T_OP(generic_gen_op, l_sfges_null_t_table, gen_op_sfges_null);
1896
DEF_1T_OP(generic_gen_op, l_sfles_null_t_table, gen_op_sfles_null);
1897
DEF_1T_OP(imm_gen_op, l_sfges_imm_t_table, gen_op_sfges_imm);
1898
DEF_2T_OP(generic_gen_op, l_sfges_t_table, gen_op_sfges);
1899
 
1900
void gen_l_sfges(struct op_queue *opq, int param_t[3], int delay_slot)
1901
{
1902
  if(!opq->param[0] && !opq->param[1]) {
1903
    gen_op_set_flag(opq, 1);
1904
    return;
1905
  }
1906
 
1907
  if(!opq->param[0]) {
1908
    /* sfles IS correct */
1909
    if(param_t[1] == T_NONE) {
1910
      if(0 >= (orreg_t)opq->param[1])
1911
        gen_op_set_flag(opq, 1);
1912
      else
1913
        gen_op_clear_flag(opq, 1);
1914
    } else
1915
      l_sfles_null_t_table[param_t[1]](opq, 1);
1916
    return;
1917
  }
1918
 
1919
  if(!opq->param[1]) {
1920
    l_sfges_null_t_table[param_t[0]](opq, 1);
1921
    return;
1922
  }
1923
 
1924
  if(param_t[1] == T_NONE)
1925
    l_sfges_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1926
  else
1927
    l_sfges_t_table[param_t[0]][param_t[1]](opq, 1);
1928
}
1929
 
1930
DEF_1T_OP(generic_gen_op, l_sfgeu_null_t_table, gen_op_sfgeu_null);
1931
DEF_1T_OP(generic_gen_op, l_sfleu_null_t_table, gen_op_sfleu_null);
1932
DEF_1T_OP(imm_gen_op, l_sfgeu_imm_t_table, gen_op_sfgeu_imm);
1933
DEF_2T_OP(generic_gen_op, l_sfgeu_t_table, gen_op_sfgeu);
1934
 
1935
void gen_l_sfgeu(struct op_queue *opq, int param_t[3], int delay_slot)
1936
{
1937
  if(!opq->param[0] && !opq->param[1]) {
1938
    gen_op_set_flag(opq, 1);
1939
    return;
1940
  }
1941
 
1942
  if(!opq->param[0]) {
1943
    /* sfleu IS correct */
1944
    if(param_t[1] == T_NONE) {
1945
      if(0 >= opq->param[1])
1946
        gen_op_set_flag(opq, 1);
1947
      else
1948
        gen_op_clear_flag(opq, 1);
1949
    } else
1950
      l_sfleu_null_t_table[param_t[1]](opq, 1);
1951
    return;
1952
  }
1953
 
1954
  if(!opq->param[1]) {
1955
    l_sfgeu_null_t_table[param_t[0]](opq, 1);
1956
    return;
1957
  }
1958
  if(param_t[1] == T_NONE)
1959
    l_sfgeu_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1960
  else
1961
    l_sfgeu_t_table[param_t[0]][param_t[1]](opq, 1);
1962
}
1963
 
1964
DEF_1T_OP(generic_gen_op, l_sfgts_null_t_table, gen_op_sfgts_null);
1965
DEF_1T_OP(generic_gen_op, l_sflts_null_t_table, gen_op_sflts_null);
1966
DEF_1T_OP(imm_gen_op, l_sfgts_imm_t_table, gen_op_sfgts_imm);
1967
DEF_2T_OP(generic_gen_op, l_sfgts_t_table, gen_op_sfgts);
1968
 
1969
void gen_l_sfgts(struct op_queue *opq, int param_t[3], int delay_slot)
1970
{
1971
  if(!opq->param[0] && !opq->param[1]) {
1972
    gen_op_clear_flag(opq, 1);
1973
    return;
1974
  }
1975
 
1976
  if(!opq->param[0]) {
1977
    /* sflts IS correct */
1978
    if(param_t[1] == T_NONE) {
1979
      if(0 > (orreg_t)opq->param[1])
1980
        gen_op_set_flag(opq, 1);
1981
      else
1982
        gen_op_clear_flag(opq, 1);
1983
    } else
1984
      l_sflts_null_t_table[param_t[1]](opq, 1);
1985
    return;
1986
  }
1987
 
1988
  if(!opq->param[1]) {
1989
    l_sfgts_null_t_table[param_t[0]](opq, 1);
1990
    return;
1991
  }
1992
 
1993
  if(param_t[1] == T_NONE)
1994
    l_sfgts_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
1995
  else
1996
    l_sfgts_t_table[param_t[0]][param_t[1]](opq, 1);
1997
}
1998
 
1999
DEF_1T_OP(generic_gen_op, l_sfgtu_null_t_table, gen_op_sfgtu_null);
2000
DEF_1T_OP(generic_gen_op, l_sfltu_null_t_table, gen_op_sfltu_null);
2001
DEF_1T_OP(imm_gen_op, l_sfgtu_imm_t_table, gen_op_sfgtu_imm);
2002
DEF_2T_OP(generic_gen_op, l_sfgtu_t_table, gen_op_sfgtu);
2003
 
2004
void gen_l_sfgtu(struct op_queue *opq, int param_t[3], int delay_slot)
2005
{
2006
  if(!opq->param[0] && !opq->param[1]) {
2007
    gen_op_clear_flag(opq, 1);
2008
    return;
2009
  }
2010
 
2011
  if(!opq->param[0]) {
2012
    /* sfltu IS correct */
2013
    if(param_t[1] == T_NONE) {
2014
      if(0 > opq->param[1])
2015
        gen_op_set_flag(opq, 1);
2016
      else
2017
        gen_op_clear_flag(opq, 1);
2018
    } else
2019
      l_sfltu_null_t_table[param_t[1]](opq, 1);
2020
    return;
2021
  }
2022
 
2023
  if(!opq->param[1]) {
2024
    l_sfgtu_null_t_table[param_t[0]](opq, 1);
2025
    return;
2026
  }
2027
 
2028
  if(param_t[1] == T_NONE)
2029
    l_sfgtu_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
2030
  else
2031
    l_sfgtu_t_table[param_t[0]][param_t[1]](opq, 1);
2032
}
2033
 
2034
DEF_1T_OP(imm_gen_op, l_sfles_imm_t_table, gen_op_sfles_imm);
2035
DEF_2T_OP(generic_gen_op, l_sfles_t_table, gen_op_sfles);
2036
 
2037
void gen_l_sfles(struct op_queue *opq, int param_t[3], int delay_slot)
2038
{
2039
  if(!opq->param[0] && !opq->param[1]) {
2040
    gen_op_set_flag(opq, 1);
2041
    return;
2042
  }
2043
 
2044
  if(!opq->param[0]) {
2045
    /* sfges IS correct */
2046
    if(param_t[1] == T_NONE) {
2047
      if(0 <= (orreg_t)opq->param[1])
2048
        gen_op_set_flag(opq, 1);
2049
      else
2050
        gen_op_clear_flag(opq, 1);
2051
    } else
2052
      l_sfges_null_t_table[param_t[1]](opq, 1);
2053
    return;
2054
  }
2055
 
2056
  if(!opq->param[1]) {
2057
    l_sfles_null_t_table[param_t[0]](opq, 1);
2058
    return;
2059
  }
2060
 
2061
  if(param_t[1] == T_NONE)
2062
    l_sfles_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
2063
  else
2064
    l_sfles_t_table[param_t[0]][param_t[1]](opq, 1);
2065
}
2066
 
2067
DEF_1T_OP(imm_gen_op, l_sfleu_imm_t_table, gen_op_sfleu_imm);
2068
DEF_2T_OP(generic_gen_op, l_sfleu_t_table, gen_op_sfleu);
2069
 
2070
void gen_l_sfleu(struct op_queue *opq, int param_t[3], int delay_slot)
2071
{
2072
  if(!opq->param[0] && !opq->param[1]) {
2073
    gen_op_set_flag(opq, 1);
2074
    return;
2075
  }
2076
 
2077
  if(!opq->param[0]) {
2078
    /* sfleu IS correct */
2079
    if(param_t[1] == T_NONE) {
2080
      if(0 <= opq->param[1])
2081
        gen_op_set_flag(opq, 1);
2082
      else
2083
        gen_op_clear_flag(opq, 1);
2084
    } else
2085
      l_sfgeu_null_t_table[param_t[1]](opq, 1);
2086
    return;
2087
  }
2088
 
2089
  if(!opq->param[1]) {
2090
    l_sfleu_null_t_table[param_t[0]](opq, 1);
2091
    return;
2092
  }
2093
 
2094
  if(param_t[1] == T_NONE)
2095
    l_sfleu_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
2096
  else
2097
    l_sfleu_t_table[param_t[0]][param_t[1]](opq, 1);
2098
}
2099
 
2100
DEF_1T_OP(imm_gen_op, l_sflts_imm_t_table, gen_op_sflts_imm);
2101
DEF_2T_OP(generic_gen_op, l_sflts_t_table, gen_op_sflts);
2102
 
2103
void gen_l_sflts(struct op_queue *opq, int param_t[3], int delay_slot)
2104
{
2105
  if(!opq->param[0] && !opq->param[1]) {
2106
    gen_op_clear_flag(opq, 1);
2107
    return;
2108
  }
2109
 
2110
  if(!opq->param[0]) {
2111
    /* sfgts IS correct */
2112
    if(param_t[1] == T_NONE) {
2113
      if(0 < (orreg_t)opq->param[1])
2114
        gen_op_set_flag(opq, 1);
2115
      else
2116
        gen_op_clear_flag(opq, 1);
2117
    } else
2118
      l_sfgts_null_t_table[param_t[1]](opq, 1);
2119
    return;
2120
  }
2121
 
2122
  if(!opq->param[1]) {
2123
    l_sflts_null_t_table[param_t[0]](opq, 1);
2124
    return;
2125
  }
2126
 
2127
  if(param_t[1] == T_NONE)
2128
    l_sflts_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
2129
  else
2130
    l_sflts_t_table[param_t[0]][param_t[1]](opq, 1);
2131
}
2132
 
2133
DEF_1T_OP(imm_gen_op, l_sfltu_imm_t_table, gen_op_sfltu_imm);
2134
DEF_2T_OP(generic_gen_op, l_sfltu_t_table, gen_op_sfltu);
2135
 
2136
void gen_l_sfltu(struct op_queue *opq, int param_t[3], int delay_slot)
2137
{
2138
  if(!opq->param[0] && !opq->param[1]) {
2139
    gen_op_clear_flag(opq, 1);
2140
    return;
2141
  }
2142
 
2143
  if(!opq->param[0]) {
2144
    /* sfgtu IS correct */
2145
    if(param_t[1] == T_NONE) {
2146
      if(0 < opq->param[1])
2147
        gen_op_set_flag(opq, 1);
2148
      else
2149
        gen_op_clear_flag(opq, 1);
2150
    } else
2151
      l_sfgtu_null_t_table[param_t[1]](opq, 1);
2152
    return;
2153
  }
2154
 
2155
  if(!opq->param[1]) {
2156
    l_sfltu_null_t_table[param_t[0]](opq, 1);
2157
    return;
2158
  }
2159
 
2160
  if(param_t[1] == T_NONE)
2161
    l_sfltu_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
2162
  else
2163
    l_sfltu_t_table[param_t[0]][param_t[1]](opq, 1);
2164
}
2165
 
2166
DEF_1T_OP(generic_gen_op, l_sfne_null_t_table, gen_op_sfne_null);
2167
DEF_1T_OP(imm_gen_op, l_sfne_imm_t_table, gen_op_sfne_imm);
2168
DEF_2T_OP(generic_gen_op, l_sfne_t_table, gen_op_sfne);
2169
 
2170
void gen_l_sfne(struct op_queue *opq, int param_t[3], int delay_slot)
2171
{
2172
  if(!opq->param[0] && !opq->param[1]) {
2173
    gen_op_set_flag(opq, 1);
2174
    return;
2175
  }
2176
 
2177
  if(!opq->param[0]) {
2178
    if(param_t[1] == T_NONE)
2179
      if(opq->param[1])
2180
        gen_op_set_flag(opq, 1);
2181
      else
2182
        gen_op_clear_flag(opq, 1);
2183
    else
2184
      l_sfne_null_t_table[param_t[1]](opq, 1);
2185
    return;
2186
  }
2187
 
2188
  if(!opq->param[1]) {
2189
    l_sfne_null_t_table[param_t[0]](opq, 1);
2190
    return;
2191
  }
2192
 
2193
  if(param_t[1] == T_NONE)
2194
    l_sfne_imm_t_table[param_t[0]](opq, 1, opq->param[1]);
2195
  else
2196
    l_sfne_t_table[param_t[0]][param_t[1]](opq, 1);
2197
}
2198
 
2199
DEF_2T_OP(imm_gen_op, l_sll_imm_t_table, gen_op_sll_imm);
2200
DEF_3T_OP(generic_gen_op, l_sll_t_table, gen_op_sll);
2201
 
2202
void gen_l_sll(struct op_queue *opq, int param_t[3], int delay_slot)
2203
{
2204
  if(!opq->param[0])
2205
    return;
2206
 
2207
  if(!opq->param[1]) {
2208
    clear_t[param_t[0]](opq, 1);
2209
    return;
2210
  }
2211
 
2212
  if(!opq->param[2]) {
2213
    move_t_t[param_t[0]][param_t[1]](opq, 1);
2214
    return;
2215
  }
2216
 
2217
  if(param_t[2] == T_NONE)
2218
    l_sll_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
2219
  else
2220
    l_sll_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
2221
}
2222
 
2223
DEF_2T_OP(imm_gen_op, l_sra_imm_t_table, gen_op_sra_imm);
2224
DEF_3T_OP(generic_gen_op, l_sra_t_table, gen_op_sra);
2225
 
2226
void gen_l_sra(struct op_queue *opq, int param_t[3], int delay_slot)
2227
{
2228
  if(!opq->param[0])
2229
    return;
2230
 
2231
  if(!opq->param[1]) {
2232
    clear_t[param_t[0]](opq, 1);
2233
    return;
2234
  }
2235
 
2236
  if(!opq->param[2]) {
2237
    move_t_t[param_t[0]][param_t[1]](opq, 1);
2238
    return;
2239
  }
2240
 
2241
  if(param_t[2] == T_NONE)
2242
    l_sra_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
2243
  else
2244
    l_sra_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
2245
}
2246
 
2247
DEF_2T_OP(imm_gen_op, l_srl_imm_t_table, gen_op_srl_imm);
2248
DEF_3T_OP(generic_gen_op, l_srl_t_table, gen_op_srl);
2249
 
2250
void gen_l_srl(struct op_queue *opq, int param_t[3], int delay_slot)
2251
{
2252
  if(!opq->param[0])
2253
    return;
2254
 
2255
  if(!opq->param[1]) {
2256
    clear_t[param_t[0]](opq, 1);
2257
    return;
2258
  }
2259
 
2260
  if(!opq->param[2]) {
2261
    move_t_t[param_t[0]][param_t[1]](opq, 1);
2262
    return;
2263
  }
2264
 
2265
  if(param_t[2] == T_NONE)
2266
    l_srl_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
2267
  else
2268
    l_srl_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
2269
}
2270
 
2271
DEF_2T_OP(generic_gen_op, l_neg_t_table, gen_op_neg);
2272
DEF_3T_OP(generic_gen_op, l_sub_t_table, gen_op_sub);
2273
 
2274
void gen_l_sub(struct op_queue *opq, int param_t[3], int delay_slot)
2275
{
2276
  if(!opq->param[0])
2277
    return;
2278
 
2279
  if((param_t[2] != T_NONE) && (opq->param[1] == opq->param[2])) {
2280
    clear_t[param_t[0]](opq, 1);
2281
    return;
2282
  }
2283
 
2284
  if(!opq->param[1] && !opq->param[2]) {
2285
    clear_t[param_t[0]](opq, 1);
2286
    return;
2287
  }
2288
 
2289
  if(!opq->param[1]) {
2290
    if(param_t[2] == T_NONE)
2291
      mov_t_imm[param_t[0]](opq, 1, -opq->param[2]);
2292
    else
2293
      l_neg_t_table[param_t[0]][param_t[2]](opq, 1);
2294
    return;
2295
  }
2296
 
2297
  if(!opq->param[2]) {
2298
    move_t_t[param_t[0]][param_t[1]](opq, 1);
2299
    return;
2300
  }
2301
 
2302
  l_sub_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
2303
}
2304
 
2305
/* FIXME: This will not work if the l.sys is in a delay slot */
2306
void gen_l_sys(struct op_queue *opq, int param_t[3], int delay_slot)
2307
{
2308
  if(do_stats)
2309
    /* All gprs are current since this insn doesn't touch any reg */
2310
    gen_op_analysis(opq, 1);
2311
 
2312
  if(!delay_slot)
2313
    gen_op_prep_sys(opq, 1);
2314
  else
2315
    gen_op_prep_sys_delay(opq, 1);
2316
 
2317
  gen_op_do_sched(opq, 1);
2318
  gen_op_do_jump(opq, 1);
2319
}
2320
 
2321
/* FIXME: This will not work if the l.trap is in a delay slot */
2322
void gen_l_trap(struct op_queue *opq, int param_t[3], int delay_slot)
2323
{
2324
  if(do_stats)
2325
    /* All gprs are current since this insn doesn't touch any reg */
2326
    gen_op_analysis(opq, 1);
2327
 
2328
  if(!delay_slot)
2329
    gen_op_prep_trap(opq, 1);
2330
  else
2331
    gen_op_prep_trap_delay(opq, 1);
2332
}
2333
 
2334
DEF_2T_OP(imm_gen_op, l_xor_imm_t_table, gen_op_xor_imm);
2335
/* FIXME: Make unused elements NULL */
2336
DEF_3T_OP_NEQ(generic_gen_op, l_xor_t_table, gen_op_xor);
2337
 
2338
void gen_l_xor(struct op_queue *opq, int param_t[3], int delay_slot)
2339
{
2340
  if(!opq->param[0])
2341
    return;
2342
 
2343
  if((param_t[2] != T_NONE) && (opq->param[1] == opq->param[2])) {
2344
    clear_t[param_t[0]](opq, 1);
2345
    return;
2346
  }
2347
 
2348
  if(!opq->param[2]) {
2349
    if((param_t[2] == T_NONE) && (opq->param[0] == opq->param[1]))
2350
      return;
2351
    move_t_t[param_t[0]][param_t[1]](opq, 1);
2352
    return;
2353
  }
2354
 
2355
  if(!opq->param[1]) {
2356
    if(param_t[2] == T_NONE) {
2357
      mov_t_imm[param_t[0]](opq, 1, opq->param[2]);
2358
      return;
2359
    }
2360
    move_t_t[param_t[0]][param_t[2]](opq, 1);
2361
    return;
2362
  }
2363
 
2364
  if(param_t[2] == T_NONE)
2365
    l_xor_imm_t_table[param_t[0]][param_t[1]](opq, 1, opq->param[2]);
2366
  else
2367
    l_xor_t_table[param_t[0]][param_t[1]][param_t[2]](opq, 1);
2368
}
2369
 
2370
void gen_l_invalid(struct op_queue *opq, int param_t[3], int delay_slot)
2371
{
2372
  if(!delay_slot) {
2373
    gen_op_illegal(opq, 1);
2374
    gen_op_do_jump(opq, 1);
2375
  } else {
2376
    gen_op_illegal_delay(opq, 1);
2377
    gen_op_do_jump(opq, 1);
2378
  }
2379
}
2380
 
2381
/*----------------------------------[ Floating point instructions (stubs) ]---*/
2382
void gen_lf_add_s(struct op_queue *opq, int param_t[3], int delay_slot)
2383
{
2384
  gen_l_invalid(opq, param_t, delay_slot);
2385
}
2386
 
2387
void gen_lf_div_s(struct op_queue *opq, int param_t[3], int delay_slot)
2388
{
2389
  gen_l_invalid(opq, param_t, delay_slot);
2390
}
2391
 
2392
void gen_lf_ftoi_s(struct op_queue *opq, int param_t[3], int delay_slot)
2393
{
2394
  gen_l_invalid(opq, param_t, delay_slot);
2395
}
2396
 
2397
void gen_lf_itof_s(struct op_queue *opq, int param_t[3], int delay_slot)
2398
{
2399
  gen_l_invalid(opq, param_t, delay_slot);
2400
}
2401
 
2402
void gen_lf_madd_s(struct op_queue *opq, int param_t[3], int delay_slot)
2403
{
2404
  gen_l_invalid(opq, param_t, delay_slot);
2405
}
2406
 
2407
void gen_lf_mul_s(struct op_queue *opq, int param_t[3], int delay_slot)
2408
{
2409
  gen_l_invalid(opq, param_t, delay_slot);
2410
}
2411
 
2412
void gen_lf_rem_s(struct op_queue *opq, int param_t[3], int delay_slot)
2413
{
2414
  gen_l_invalid(opq, param_t, delay_slot);
2415
}
2416
 
2417
void gen_lf_sfeq_s(struct op_queue *opq, int param_t[3], int delay_slot)
2418
{
2419
  gen_l_invalid(opq, param_t, delay_slot);
2420
}
2421
 
2422
void gen_lf_sfge_s(struct op_queue *opq, int param_t[3], int delay_slot)
2423
{
2424
  gen_l_invalid(opq, param_t, delay_slot);
2425
}
2426
 
2427
void gen_lf_sfgt_s(struct op_queue *opq, int param_t[3], int delay_slot)
2428
{
2429
  gen_l_invalid(opq, param_t, delay_slot);
2430
}
2431
 
2432
void gen_lf_sfle_s(struct op_queue *opq, int param_t[3], int delay_slot)
2433
{
2434
  gen_l_invalid(opq, param_t, delay_slot);
2435
}
2436
 
2437
void gen_lf_sflt_s(struct op_queue *opq, int param_t[3], int delay_slot)
2438
{
2439
  gen_l_invalid(opq, param_t, delay_slot);
2440
}
2441
 
2442
void gen_lf_sfne_s(struct op_queue *opq, int param_t[3], int delay_slot)
2443
{
2444
  gen_l_invalid(opq, param_t, delay_slot);
2445
}
2446
 
2447
void gen_lf_sub_s(struct op_queue *opq, int param_t[3], int delay_slot)
2448
{
2449
  gen_l_invalid(opq, param_t, delay_slot);
2450
}
2451
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.