OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gcc-4.2.2/] [gcc/] [config/] [score/] [score-mdaux.c] - Blame information for rev 820

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* score-mdaux.c for Sunplus S+CORE processor
2
   Copyright (C) 2005, 2007 Free Software Foundation, Inc.
3
   Contributed by Sunnorth
4
 
5
   This file is part of GCC.
6
 
7
   GCC is free software; you can redistribute it and/or modify it
8
   under the terms of the GNU General Public License as published
9
   by the Free Software Foundation; either version 3, or (at your
10
   option) any later version.
11
 
12
   GCC is distributed in the hope that it will be useful, but WITHOUT
13
   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14
   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15
   License for more details.
16
 
17
   You should have received a copy of the GNU General Public License
18
   along with GCC; see the file COPYING3.  If not see
19
   <http://www.gnu.org/licenses/>.  */
20
 
21
#include "config.h"
22
#include "system.h"
23
#include "coretypes.h"
24
#include "tm.h"
25
#include <signal.h>
26
#include "rtl.h"
27
#include "regs.h"
28
#include "hard-reg-set.h"
29
#include "real.h"
30
#include "insn-config.h"
31
#include "conditions.h"
32
#include "insn-attr.h"
33
#include "recog.h"
34
#include "toplev.h"
35
#include "output.h"
36
#include "tree.h"
37
#include "function.h"
38
#include "expr.h"
39
#include "optabs.h"
40
#include "flags.h"
41
#include "reload.h"
42
#include "tm_p.h"
43
#include "ggc.h"
44
#include "gstab.h"
45
#include "hashtab.h"
46
#include "debug.h"
47
#include "target.h"
48
#include "target-def.h"
49
#include "integrate.h"
50
#include "langhooks.h"
51
#include "cfglayout.h"
52
#include "score-mdaux.h"
53
 
54
#define BITSET_P(VALUE, BIT)      (((VALUE) & (1L << (BIT))) != 0)
55
#define INS_BUF_SZ                100
56
 
57
/* Define the information needed to generate branch insns.  This is
58
   stored from the compare operation.  */
59
rtx cmp_op0, cmp_op1;
60
 
61
static char ins[INS_BUF_SZ + 8];
62
 
63
/* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
64
   to the same object as SYMBOL.  */
65
static int
66
score_offset_within_object_p (rtx symbol, HOST_WIDE_INT offset)
67
{
68
  if (GET_CODE (symbol) != SYMBOL_REF)
69
    return 0;
70
 
71
  if (CONSTANT_POOL_ADDRESS_P (symbol)
72
      && offset >= 0
73
      && offset < (int)GET_MODE_SIZE (get_pool_mode (symbol)))
74
    return 1;
75
 
76
  if (SYMBOL_REF_DECL (symbol) != 0
77
      && offset >= 0
78
      && offset < int_size_in_bytes (TREE_TYPE (SYMBOL_REF_DECL (symbol))))
79
    return 1;
80
 
81
  return 0;
82
}
83
 
84
/* Split X into a base and a constant offset, storing them in *BASE
85
   and *OFFSET respectively.  */
86
static void
87
score_split_const (rtx x, rtx *base, HOST_WIDE_INT *offset)
88
{
89
  *offset = 0;
90
 
91
  if (GET_CODE (x) == CONST)
92
    x = XEXP (x, 0);
93
 
94
  if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
95
    {
96
      *offset += INTVAL (XEXP (x, 1));
97
      x = XEXP (x, 0);
98
    }
99
 
100
  *base = x;
101
}
102
 
103
/* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF.  */
104
static enum
105
score_symbol_type score_classify_symbol (rtx x)
106
{
107
  if (GET_CODE (x) == LABEL_REF)
108
    return SYMBOL_GENERAL;
109
 
110
  gcc_assert (GET_CODE (x) == SYMBOL_REF);
111
 
112
  if (CONSTANT_POOL_ADDRESS_P (x))
113
    {
114
      if (GET_MODE_SIZE (get_pool_mode (x)) <= SCORE_SDATA_MAX)
115
        return SYMBOL_SMALL_DATA;
116
      return SYMBOL_GENERAL;
117
    }
118
  if (SYMBOL_REF_SMALL_P (x))
119
    return SYMBOL_SMALL_DATA;
120
  return SYMBOL_GENERAL;
121
}
122
 
123
/* Return true if the current function must save REGNO.  */
124
static int
125
score_save_reg_p (unsigned int regno)
126
{
127
  /* Check call-saved registers.  */
128
  if (regs_ever_live[regno] && !call_used_regs[regno])
129
    return 1;
130
 
131
  /* We need to save the old frame pointer before setting up a new one.  */
132
  if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
133
    return 1;
134
 
135
  /* We need to save the incoming return address if it is ever clobbered
136
     within the function.  */
137
  if (regno == RA_REGNUM && regs_ever_live[regno])
138
    return 1;
139
 
140
  return 0;
141
}
142
 
143
/* Return one word of double-word value OP, taking into account the fixed
144
   endianness of certain registers.  HIGH_P is true to select the high part,
145
   false to select the low part.  */
146
static rtx
147
subw (rtx op, int high_p)
148
{
149
  unsigned int byte;
150
  enum machine_mode mode = GET_MODE (op);
151
 
152
  if (mode == VOIDmode)
153
    mode = DImode;
154
 
155
  byte = (TARGET_LITTLE_ENDIAN ? high_p : !high_p) ? UNITS_PER_WORD : 0;
156
 
157
  if (GET_CODE (op) == REG && REGNO (op) == HI_REGNUM)
158
    return gen_rtx_REG (SImode, high_p ? HI_REGNUM : LO_REGNUM);
159
 
160
  if (GET_CODE (op) == MEM)
161
    return adjust_address (op, SImode, byte);
162
 
163
  return simplify_gen_subreg (SImode, op, mode, byte);
164
}
165
 
166
struct score_frame_info *
167
mda_cached_frame (void)
168
{
169
  static struct score_frame_info _frame_info;
170
  return &_frame_info;
171
}
172
 
173
/* Return the bytes needed to compute the frame pointer from the current
174
   stack pointer.  SIZE is the size (in bytes) of the local variables.  */
175
struct score_frame_info *
176
mda_compute_frame_size (HOST_WIDE_INT size)
177
{
178
  unsigned int regno;
179
  struct score_frame_info *f = mda_cached_frame ();
180
 
181
  memset (f, 0, sizeof (struct score_frame_info));
182
  f->gp_reg_size = 0;
183
  f->mask = 0;
184
  f->var_size = SCORE_STACK_ALIGN (size);
185
  f->args_size = current_function_outgoing_args_size;
186
  f->cprestore_size = flag_pic ? UNITS_PER_WORD : 0;
187
  if (f->var_size == 0 && current_function_is_leaf)
188
    f->args_size = f->cprestore_size = 0;
189
 
190
  if (f->args_size == 0 && current_function_calls_alloca)
191
    f->args_size = UNITS_PER_WORD;
192
 
193
  f->total_size = f->var_size + f->args_size + f->cprestore_size;
194
  for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
195
    {
196
      if (score_save_reg_p (regno))
197
        {
198
          f->gp_reg_size += GET_MODE_SIZE (SImode);
199
          f->mask |= 1 << (regno - GP_REG_FIRST);
200
        }
201
    }
202
 
203
  if (current_function_calls_eh_return)
204
    {
205
      unsigned int i;
206
      for (i = 0;; ++i)
207
        {
208
          regno = EH_RETURN_DATA_REGNO (i);
209
          if (regno == INVALID_REGNUM)
210
            break;
211
          f->gp_reg_size += GET_MODE_SIZE (SImode);
212
          f->mask |= 1 << (regno - GP_REG_FIRST);
213
        }
214
    }
215
 
216
  f->total_size += f->gp_reg_size;
217
  f->num_gp = f->gp_reg_size / UNITS_PER_WORD;
218
 
219
  if (f->mask)
220
    {
221
      HOST_WIDE_INT offset;
222
      offset = (f->args_size + f->cprestore_size + f->var_size
223
                + f->gp_reg_size - GET_MODE_SIZE (SImode));
224
      f->gp_sp_offset = offset;
225
    }
226
  else
227
    f->gp_sp_offset = 0;
228
 
229
  return f;
230
}
231
 
232
/* Generate the prologue instructions for entry into a S+core function.  */
233
void
234
mdx_prologue (void)
235
{
236
#define EMIT_PL(_rtx)        RTX_FRAME_RELATED_P (_rtx) = 1
237
 
238
  struct score_frame_info *f = mda_compute_frame_size (get_frame_size ());
239
  HOST_WIDE_INT size;
240
  int regno;
241
 
242
  size = f->total_size - f->gp_reg_size;
243
 
244
  if (flag_pic)
245
    emit_insn (gen_cpload ());
246
 
247
  for (regno = (int) GP_REG_LAST; regno >= (int) GP_REG_FIRST; regno--)
248
    {
249
      if (BITSET_P (f->mask, regno - GP_REG_FIRST))
250
        {
251
          rtx mem = gen_rtx_MEM (SImode,
252
                                 gen_rtx_PRE_DEC (SImode, stack_pointer_rtx));
253
          rtx reg = gen_rtx_REG (SImode, regno);
254
          if (!current_function_calls_eh_return)
255
            MEM_READONLY_P (mem) = 1;
256
          EMIT_PL (emit_insn (gen_pushsi (mem, reg)));
257
        }
258
    }
259
 
260
  if (size > 0)
261
    {
262
      rtx insn;
263
 
264
      if (CONST_OK_FOR_LETTER_P (-size, 'L'))
265
        EMIT_PL (emit_insn (gen_add3_insn (stack_pointer_rtx,
266
                                           stack_pointer_rtx,
267
                                           GEN_INT (-size))));
268
      else
269
        {
270
          EMIT_PL (emit_move_insn (gen_rtx_REG (Pmode, PROLOGUE_TEMP_REGNUM),
271
                                   GEN_INT (size)));
272
          EMIT_PL (emit_insn
273
                   (gen_sub3_insn (stack_pointer_rtx,
274
                                   stack_pointer_rtx,
275
                                   gen_rtx_REG (Pmode,
276
                                                PROLOGUE_TEMP_REGNUM))));
277
        }
278
      insn = get_last_insn ();
279
      REG_NOTES (insn) =
280
        alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
281
                         gen_rtx_SET (VOIDmode, stack_pointer_rtx,
282
                                      plus_constant (stack_pointer_rtx,
283
                                                     -size)),
284
                                      REG_NOTES (insn));
285
    }
286
 
287
  if (frame_pointer_needed)
288
    EMIT_PL (emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx));
289
 
290
  if (flag_pic && f->cprestore_size)
291
    {
292
      if (frame_pointer_needed)
293
        emit_insn (gen_cprestore_use_fp (GEN_INT (size - f->cprestore_size)));
294
      else
295
        emit_insn (gen_cprestore_use_sp (GEN_INT (size - f->cprestore_size)));
296
    }
297
 
298
#undef EMIT_PL
299
}
300
 
301
/* Generate the epilogue instructions in a S+core function.  */
302
void
303
mdx_epilogue (int sibcall_p)
304
{
305
  struct score_frame_info *f = mda_compute_frame_size (get_frame_size ());
306
  HOST_WIDE_INT size;
307
  int regno;
308
  rtx base;
309
 
310
  size = f->total_size - f->gp_reg_size;
311
 
312
  if (!frame_pointer_needed)
313
    base = stack_pointer_rtx;
314
  else
315
    base = hard_frame_pointer_rtx;
316
 
317
  if (size)
318
    {
319
      if (CONST_OK_FOR_LETTER_P (size, 'L'))
320
        emit_insn (gen_add3_insn (base, base, GEN_INT (size)));
321
      else
322
        {
323
          emit_move_insn (gen_rtx_REG (Pmode, EPILOGUE_TEMP_REGNUM),
324
                          GEN_INT (size));
325
          emit_insn (gen_add3_insn (base, base,
326
                                    gen_rtx_REG (Pmode,
327
                                                 EPILOGUE_TEMP_REGNUM)));
328
        }
329
    }
330
 
331
  if (base != stack_pointer_rtx)
332
    emit_move_insn (stack_pointer_rtx, base);
333
 
334
  if (current_function_calls_eh_return)
335
    emit_insn (gen_add3_insn (stack_pointer_rtx,
336
                              stack_pointer_rtx,
337
                              EH_RETURN_STACKADJ_RTX));
338
 
339
  for (regno = (int) GP_REG_FIRST; regno <= (int) GP_REG_LAST; regno++)
340
    {
341
      if (BITSET_P (f->mask, regno - GP_REG_FIRST))
342
        {
343
          rtx mem = gen_rtx_MEM (SImode,
344
                                 gen_rtx_POST_INC (SImode, stack_pointer_rtx));
345
          rtx reg = gen_rtx_REG (SImode, regno);
346
 
347
          if (!current_function_calls_eh_return)
348
            MEM_READONLY_P (mem) = 1;
349
 
350
          emit_insn (gen_popsi (reg, mem));
351
        }
352
    }
353
 
354
  if (!sibcall_p)
355
    emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, RA_REGNUM)));
356
}
357
 
358
/* Return true if X is a valid base register for the given mode.
359
   Allow only hard registers if STRICT.  */
360
int
361
mda_valid_base_register_p (rtx x, int strict)
362
{
363
  if (!strict && GET_CODE (x) == SUBREG)
364
    x = SUBREG_REG (x);
365
 
366
  return (GET_CODE (x) == REG
367
          && score_regno_mode_ok_for_base_p (REGNO (x), strict));
368
}
369
 
370
/* Return true if X is a valid address for machine mode MODE.  If it is,
371
   fill in INFO appropriately.  STRICT is true if we should only accept
372
   hard base registers.  */
373
int
374
mda_classify_address (struct score_address_info *info,
375
                      enum machine_mode mode, rtx x, int strict)
376
{
377
  info->code = GET_CODE (x);
378
 
379
  switch (info->code)
380
    {
381
    case REG:
382
    case SUBREG:
383
      info->type = ADD_REG;
384
      info->reg = x;
385
      info->offset = const0_rtx;
386
      return mda_valid_base_register_p (info->reg, strict);
387
    case PLUS:
388
      info->type = ADD_REG;
389
      info->reg = XEXP (x, 0);
390
      info->offset = XEXP (x, 1);
391
      return (mda_valid_base_register_p (info->reg, strict)
392
              && GET_CODE (info->offset) == CONST_INT
393
              && IMM_IN_RANGE (INTVAL (info->offset), 15, 1));
394
    case PRE_DEC:
395
    case POST_DEC:
396
    case PRE_INC:
397
    case POST_INC:
398
      if (GET_MODE_SIZE (mode) > GET_MODE_SIZE (SImode))
399
        return false;
400
      info->type = ADD_REG;
401
      info->reg = XEXP (x, 0);
402
      info->offset = GEN_INT (GET_MODE_SIZE (mode));
403
      return mda_valid_base_register_p (info->reg, strict);
404
    case CONST_INT:
405
      info->type = ADD_CONST_INT;
406
      return IMM_IN_RANGE (INTVAL (x), 15, 1);
407
    case CONST:
408
    case LABEL_REF:
409
    case SYMBOL_REF:
410
      info->type = ADD_SYMBOLIC;
411
      return (mda_symbolic_constant_p (x, &info->symbol_type)
412
              && (info->symbol_type == SYMBOL_GENERAL
413
                  || info->symbol_type == SYMBOL_SMALL_DATA));
414
    default:
415
      return 0;
416
    }
417
}
418
 
419
void
420
mda_gen_cmp (enum machine_mode mode)
421
{
422
  emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (mode, CC_REGNUM),
423
                          gen_rtx_COMPARE (mode, cmp_op0, cmp_op1)));
424
}
425
 
426
/* Return true if X is a symbolic constant that can be calculated in
427
   the same way as a bare symbol.  If it is, store the type of the
428
   symbol in *SYMBOL_TYPE.  */
429
int
430
mda_symbolic_constant_p (rtx x, enum score_symbol_type *symbol_type)
431
{
432
  HOST_WIDE_INT offset;
433
 
434
  score_split_const (x, &x, &offset);
435
  if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
436
    *symbol_type = score_classify_symbol (x);
437
  else
438
    return 0;
439
 
440
  if (offset == 0)
441
    return 1;
442
 
443
  /* if offset > 15bit, must reload  */
444
  if (!IMM_IN_RANGE (offset, 15, 1))
445
    return 0;
446
 
447
  switch (*symbol_type)
448
    {
449
    case SYMBOL_GENERAL:
450
      return 1;
451
    case SYMBOL_SMALL_DATA:
452
      return score_offset_within_object_p (x, offset);
453
    }
454
  gcc_unreachable ();
455
}
456
 
457
void
458
mdx_movsicc (rtx *ops)
459
{
460
  enum machine_mode mode;
461
 
462
  mode = score_select_cc_mode (GET_CODE (ops[1]), ops[2], ops[3]);
463
  emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (mode, CC_REGNUM),
464
                          gen_rtx_COMPARE (mode, cmp_op0, cmp_op1)));
465
}
466
 
467
/* Call and sibcall pattern all need call this function.  */
468
void
469
mdx_call (rtx *ops, bool sib)
470
{
471
  rtx addr = XEXP (ops[0], 0);
472
  if (!call_insn_operand (addr, VOIDmode))
473
    {
474
      rtx oaddr = addr;
475
      addr = gen_reg_rtx (Pmode);
476
      gen_move_insn (addr, oaddr);
477
    }
478
 
479
  if (sib)
480
    emit_call_insn (gen_sibcall_internal (addr, ops[1]));
481
  else
482
    emit_call_insn (gen_call_internal (addr, ops[1]));
483
}
484
 
485
/* Call value and sibcall value pattern all need call this function.  */
486
void
487
mdx_call_value (rtx *ops, bool sib)
488
{
489
  rtx result = ops[0];
490
  rtx addr = XEXP (ops[1], 0);
491
  rtx arg = ops[2];
492
 
493
  if (!call_insn_operand (addr, VOIDmode))
494
    {
495
      rtx oaddr = addr;
496
      addr = gen_reg_rtx (Pmode);
497
      gen_move_insn (addr, oaddr);
498
    }
499
 
500
  if (sib)
501
    emit_call_insn (gen_sibcall_value_internal (result, addr, arg));
502
  else
503
    emit_call_insn (gen_call_value_internal (result, addr, arg));
504
}
505
 
506
/* Machine Split  */
507
void
508
mds_movdi (rtx *ops)
509
{
510
  rtx dst = ops[0];
511
  rtx src = ops[1];
512
  rtx dst0 = subw (dst, 0);
513
  rtx dst1 = subw (dst, 1);
514
  rtx src0 = subw (src, 0);
515
  rtx src1 = subw (src, 1);
516
 
517
  if (GET_CODE (dst0) == REG && reg_overlap_mentioned_p (dst0, src))
518
    {
519
      emit_move_insn (dst1, src1);
520
      emit_move_insn (dst0, src0);
521
    }
522
  else
523
    {
524
      emit_move_insn (dst0, src0);
525
      emit_move_insn (dst1, src1);
526
    }
527
}
528
 
529
void
530
mds_zero_extract_andi (rtx *ops)
531
{
532
  if (INTVAL (ops[1]) == 1 && const_uimm5 (ops[2], SImode))
533
    emit_insn (gen_zero_extract_bittst (ops[0], ops[2]));
534
  else
535
    {
536
      unsigned HOST_WIDE_INT mask;
537
      mask = (0xffffffffU & ((1U << INTVAL (ops[1])) - 1U));
538
      mask = mask << INTVAL (ops[2]);
539
      emit_insn (gen_andsi3_cmp (ops[3], ops[0],
540
                                 gen_int_mode (mask, SImode)));
541
    }
542
}
543
 
544
/* Check addr could be present as PRE/POST mode.  */
545
static bool
546
mda_pindex_mem (rtx addr)
547
{
548
  if (GET_CODE (addr) == MEM)
549
    {
550
      switch (GET_CODE (XEXP (addr, 0)))
551
        {
552
        case PRE_DEC:
553
        case POST_DEC:
554
        case PRE_INC:
555
        case POST_INC:
556
          return true;
557
        default:
558
          break;
559
        }
560
    }
561
  return false;
562
}
563
 
564
/* Output asm code for ld/sw insn.  */
565
static int
566
pr_addr_post (rtx *ops, int idata, int iaddr, char *ip, enum mda_mem_unit unit)
567
{
568
  struct score_address_info ai;
569
 
570
  gcc_assert (GET_CODE (ops[idata]) == REG);
571
  gcc_assert (mda_classify_address (&ai, SImode, XEXP (ops[iaddr], 0), true));
572
 
573
  if (!mda_pindex_mem (ops[iaddr])
574
      && ai.type == ADD_REG
575
      && GET_CODE (ai.offset) == CONST_INT
576
      && G16_REG_P (REGNO (ops[idata]))
577
      && G16_REG_P (REGNO (ai.reg)))
578
    {
579
      if (INTVAL (ai.offset) == 0)
580
        {
581
          ops[iaddr] = ai.reg;
582
          return snprintf (ip, INS_BUF_SZ,
583
                           "!        %%%d, [%%%d]", idata, iaddr);
584
        }
585
      if (REGNO (ai.reg) == HARD_FRAME_POINTER_REGNUM)
586
        {
587
          HOST_WIDE_INT offset = INTVAL (ai.offset);
588
          if (MDA_ALIGN_UNIT (offset, unit)
589
              && CONST_OK_FOR_LETTER_P (offset >> unit, 'J'))
590
            {
591
              ops[iaddr] = ai.offset;
592
              return snprintf (ip, INS_BUF_SZ,
593
                               "p!        %%%d, %%c%d", idata, iaddr);
594
            }
595
        }
596
    }
597
  return snprintf (ip, INS_BUF_SZ, "        %%%d, %%a%d", idata, iaddr);
598
}
599
 
600
/* Output asm insn for load.  */
601
const char *
602
mdp_linsn (rtx *ops, enum mda_mem_unit unit, bool sign)
603
{
604
  const char *pre_ins[] =
605
    {"lbu", "lhu", "lw", "??", "lb", "lh", "lw", "??"};
606
  char *ip;
607
 
608
  strcpy (ins, pre_ins[(sign ? 4 : 0) + unit]);
609
  ip = ins + strlen (ins);
610
 
611
  if ((!sign && unit != MDA_HWORD)
612
      || (sign && unit != MDA_BYTE))
613
    pr_addr_post (ops, 0, 1, ip, unit);
614
  else
615
    snprintf (ip, INS_BUF_SZ, "        %%0, %%a1");
616
 
617
  return ins;
618
}
619
 
620
/* Output asm insn for store.  */
621
const char *
622
mdp_sinsn (rtx *ops, enum mda_mem_unit unit)
623
{
624
  const char *pre_ins[] = {"sb", "sh", "sw"};
625
  char *ip;
626
 
627
  strcpy (ins, pre_ins[unit]);
628
  ip = ins + strlen (ins);
629
  pr_addr_post (ops, 1, 0, ip, unit);
630
  return ins;
631
}
632
 
633
/* Output asm insn for load immediate.  */
634
const char *
635
mdp_limm (rtx *ops)
636
{
637
  HOST_WIDE_INT v;
638
 
639
  gcc_assert (GET_CODE (ops[0]) == REG);
640
  gcc_assert (GET_CODE (ops[1]) == CONST_INT);
641
 
642
  v = INTVAL (ops[1]);
643
  if (G16_REG_P (REGNO (ops[0])) && IMM_IN_RANGE (v, 8, 0))
644
    return "ldiu!   %0, %c1";
645
  else if (IMM_IN_RANGE (v, 16, 1))
646
    return "ldi     %0, %c1";
647
  else if ((v & 0xffff) == 0)
648
    return "ldis    %0, %U1";
649
  else
650
    return "li      %0, %c1";
651
}
652
 
653
/* Output asm insn for move.  */
654
const char *
655
mdp_move (rtx *ops)
656
{
657
  gcc_assert (GET_CODE (ops[0]) == REG);
658
  gcc_assert (GET_CODE (ops[1]) == REG);
659
 
660
  if (G16_REG_P (REGNO (ops[0])))
661
    {
662
      if (G16_REG_P (REGNO (ops[1])))
663
        return "mv!     %0, %1";
664
      else
665
        return "mlfh!   %0, %1";
666
    }
667
  else if (G16_REG_P (REGNO (ops[1])))
668
    return "mhfl!   %0, %1";
669
  else
670
    return "mv      %0, %1";
671
}
672
 
673
/* Emit lcb/lce insns.  */
674
bool
675
mdx_unaligned_load (rtx *ops)
676
{
677
  rtx dst = ops[0];
678
  rtx src = ops[1];
679
  rtx len = ops[2];
680
  rtx off = ops[3];
681
  rtx addr_reg;
682
 
683
  if (INTVAL (len) != BITS_PER_WORD
684
      || (INTVAL (off) % BITS_PER_UNIT) != 0)
685
    return false;
686
 
687
  gcc_assert (GET_MODE_SIZE (GET_MODE (dst)) == GET_MODE_SIZE (SImode));
688
 
689
  addr_reg = copy_addr_to_reg (XEXP (src, 0));
690
  emit_insn (gen_move_lcb (addr_reg, addr_reg));
691
  emit_insn (gen_move_lce (addr_reg, addr_reg, dst));
692
 
693
  return true;
694
}
695
 
696
/* Emit scb/sce insns.  */
697
bool
698
mdx_unaligned_store (rtx *ops)
699
{
700
  rtx dst = ops[0];
701
  rtx len = ops[1];
702
  rtx off = ops[2];
703
  rtx src = ops[3];
704
  rtx addr_reg;
705
 
706
  if (INTVAL(len) != BITS_PER_WORD
707
      || (INTVAL(off) % BITS_PER_UNIT) != 0)
708
    return false;
709
 
710
  gcc_assert (GET_MODE_SIZE (GET_MODE (src)) == GET_MODE_SIZE (SImode));
711
 
712
  addr_reg = copy_addr_to_reg (XEXP (dst, 0));
713
  emit_insn (gen_move_scb (addr_reg, addr_reg, src));
714
  emit_insn (gen_move_sce (addr_reg, addr_reg));
715
 
716
  return true;
717
}
718
 
719
/* If length is short, generate move insns straight.  */
720
static void
721
mdx_block_move_straight (rtx dst, rtx src, HOST_WIDE_INT length)
722
{
723
  HOST_WIDE_INT leftover;
724
  int i, reg_count;
725
  rtx *regs;
726
 
727
  leftover = length % UNITS_PER_WORD;
728
  length -= leftover;
729
  reg_count = length / UNITS_PER_WORD;
730
 
731
  regs = alloca (sizeof (rtx) * reg_count);
732
  for (i = 0; i < reg_count; i++)
733
    regs[i] = gen_reg_rtx (SImode);
734
 
735
  /* Load from src to regs.  */
736
  if (MEM_ALIGN (src) >= BITS_PER_WORD)
737
    {
738
      HOST_WIDE_INT offset = 0;
739
      for (i = 0; i < reg_count; offset += UNITS_PER_WORD, i++)
740
        emit_move_insn (regs[i], adjust_address (src, SImode, offset));
741
    }
742
  else if (reg_count >= 1)
743
    {
744
      rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
745
 
746
      emit_insn (gen_move_lcb (src_reg, src_reg));
747
      for (i = 0; i < (reg_count - 1); i++)
748
        emit_insn (gen_move_lcw (src_reg, src_reg, regs[i]));
749
      emit_insn (gen_move_lce (src_reg, src_reg, regs[i]));
750
    }
751
 
752
  /* Store regs to dest.  */
753
  if (MEM_ALIGN (dst) >= BITS_PER_WORD)
754
    {
755
      HOST_WIDE_INT offset = 0;
756
      for (i = 0; i < reg_count; offset += UNITS_PER_WORD, i++)
757
        emit_move_insn (adjust_address (dst, SImode, offset), regs[i]);
758
    }
759
  else if (reg_count >= 1)
760
    {
761
      rtx dst_reg = copy_addr_to_reg (XEXP (dst, 0));
762
 
763
      emit_insn (gen_move_scb (dst_reg, dst_reg, regs[0]));
764
      for (i = 1; i < reg_count; i++)
765
        emit_insn (gen_move_scw (dst_reg, dst_reg, regs[i]));
766
      emit_insn (gen_move_sce (dst_reg, dst_reg));
767
    }
768
 
769
  /* Mop up any left-over bytes.  */
770
  if (leftover > 0)
771
    {
772
      src = adjust_address (src, BLKmode, length);
773
      dst = adjust_address (dst, BLKmode, length);
774
      move_by_pieces (dst, src, leftover,
775
                      MIN (MEM_ALIGN (src), MEM_ALIGN (dst)), 0);
776
    }
777
}
778
 
779
/* Generate loop head when dst or src is unaligned.  */
780
static void
781
mdx_block_move_loop_head (rtx dst_reg, HOST_WIDE_INT dst_align,
782
                          rtx src_reg, HOST_WIDE_INT src_align,
783
                          HOST_WIDE_INT length)
784
{
785
  bool src_unaligned = (src_align < BITS_PER_WORD);
786
  bool dst_unaligned = (dst_align < BITS_PER_WORD);
787
 
788
  rtx temp = gen_reg_rtx (SImode);
789
 
790
  gcc_assert (length == UNITS_PER_WORD);
791
 
792
  if (src_unaligned)
793
    {
794
      emit_insn (gen_move_lcb (src_reg, src_reg));
795
      emit_insn (gen_move_lcw (src_reg, src_reg, temp));
796
    }
797
  else
798
    emit_insn (gen_move_lw_a (src_reg,
799
                              src_reg, gen_int_mode (4, SImode), temp));
800
 
801
  if (dst_unaligned)
802
    emit_insn (gen_move_scb (dst_reg, dst_reg, temp));
803
  else
804
    emit_insn (gen_move_sw_a (dst_reg,
805
                              dst_reg, gen_int_mode (4, SImode), temp));
806
}
807
 
808
/* Generate loop body, copy length bytes per iteration.  */
809
static void
810
mdx_block_move_loop_body (rtx dst_reg, HOST_WIDE_INT dst_align,
811
                          rtx src_reg, HOST_WIDE_INT src_align,
812
                          HOST_WIDE_INT length)
813
{
814
  int reg_count = length / UNITS_PER_WORD;
815
  rtx *regs = alloca (sizeof (rtx) * reg_count);
816
  int i;
817
  bool src_unaligned = (src_align < BITS_PER_WORD);
818
  bool dst_unaligned = (dst_align < BITS_PER_WORD);
819
 
820
  for (i = 0; i < reg_count; i++)
821
    regs[i] = gen_reg_rtx (SImode);
822
 
823
  if (src_unaligned)
824
    {
825
      for (i = 0; i < reg_count; i++)
826
        emit_insn (gen_move_lcw (src_reg, src_reg, regs[i]));
827
    }
828
  else
829
    {
830
      for (i = 0; i < reg_count; i++)
831
        emit_insn (gen_move_lw_a (src_reg,
832
                                  src_reg, gen_int_mode (4, SImode), regs[i]));
833
    }
834
 
835
  if (dst_unaligned)
836
    {
837
      for (i = 0; i < reg_count; i++)
838
        emit_insn (gen_move_scw (dst_reg, dst_reg, regs[i]));
839
    }
840
  else
841
    {
842
      for (i = 0; i < reg_count; i++)
843
        emit_insn (gen_move_sw_a (dst_reg,
844
                                  dst_reg, gen_int_mode (4, SImode), regs[i]));
845
    }
846
}
847
 
848
/* Generate loop foot, copy the leftover bytes.  */
849
static void
850
mdx_block_move_loop_foot (rtx dst_reg, HOST_WIDE_INT dst_align,
851
                          rtx src_reg, HOST_WIDE_INT src_align,
852
                          HOST_WIDE_INT length)
853
{
854
  bool src_unaligned = (src_align < BITS_PER_WORD);
855
  bool dst_unaligned = (dst_align < BITS_PER_WORD);
856
 
857
  HOST_WIDE_INT leftover;
858
 
859
  leftover = length % UNITS_PER_WORD;
860
  length -= leftover;
861
 
862
  if (length > 0)
863
    mdx_block_move_loop_body (dst_reg, dst_align,
864
                              src_reg, src_align, length);
865
 
866
  if (dst_unaligned)
867
    emit_insn (gen_move_sce (dst_reg, dst_reg));
868
 
869
  if (leftover > 0)
870
    {
871
      HOST_WIDE_INT src_adj = src_unaligned ? -4 : 0;
872
      HOST_WIDE_INT dst_adj = dst_unaligned ? -4 : 0;
873
      rtx temp;
874
 
875
      gcc_assert (leftover < UNITS_PER_WORD);
876
 
877
      if (leftover >= UNITS_PER_WORD / 2
878
          && src_align >= BITS_PER_WORD / 2
879
          && dst_align >= BITS_PER_WORD / 2)
880
        {
881
          temp = gen_reg_rtx (HImode);
882
          emit_insn (gen_move_lhu_b (src_reg, src_reg,
883
                                     gen_int_mode (src_adj, SImode), temp));
884
          emit_insn (gen_move_sh_b (dst_reg, dst_reg,
885
                                    gen_int_mode (dst_adj, SImode), temp));
886
          leftover -= UNITS_PER_WORD / 2;
887
          src_adj = UNITS_PER_WORD / 2;
888
          dst_adj = UNITS_PER_WORD / 2;
889
        }
890
 
891
      while (leftover > 0)
892
        {
893
          temp = gen_reg_rtx (QImode);
894
          emit_insn (gen_move_lbu_b (src_reg, src_reg,
895
                                     gen_int_mode (src_adj, SImode), temp));
896
          emit_insn (gen_move_sb_b (dst_reg, dst_reg,
897
                                    gen_int_mode (dst_adj, SImode), temp));
898
          leftover--;
899
          src_adj = 1;
900
          dst_adj = 1;
901
        }
902
    }
903
}
904
 
905
#define MIN_MOVE_REGS 3
906
#define MIN_MOVE_BYTES (MIN_MOVE_REGS * UNITS_PER_WORD)
907
#define MAX_MOVE_REGS 4
908
#define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
909
 
910
/* The length is large, generate a loop if necessary.
911
   The loop is consisted by loop head/body/foot.  */
912
static void
913
mdx_block_move_loop (rtx dst, rtx src, HOST_WIDE_INT length)
914
{
915
  HOST_WIDE_INT src_align = MEM_ALIGN (src);
916
  HOST_WIDE_INT dst_align = MEM_ALIGN (dst);
917
  HOST_WIDE_INT loop_mov_bytes;
918
  HOST_WIDE_INT iteration = 0;
919
  HOST_WIDE_INT head_length = 0, leftover;
920
  rtx label, src_reg, dst_reg, final_dst;
921
 
922
  bool gen_loop_head = (src_align < BITS_PER_WORD
923
                        || dst_align < BITS_PER_WORD);
924
 
925
  if (gen_loop_head)
926
    head_length += UNITS_PER_WORD;
927
 
928
  for (loop_mov_bytes = MAX_MOVE_BYTES;
929
       loop_mov_bytes >= MIN_MOVE_BYTES;
930
       loop_mov_bytes -= UNITS_PER_WORD)
931
    {
932
      iteration = (length - head_length) / loop_mov_bytes;
933
      if (iteration > 1)
934
        break;
935
    }
936
  if (iteration <= 1)
937
    {
938
      mdx_block_move_straight (dst, src, length);
939
      return;
940
    }
941
 
942
  leftover = (length - head_length) % loop_mov_bytes;
943
  length -= leftover;
944
 
945
  src_reg = copy_addr_to_reg (XEXP (src, 0));
946
  dst_reg = copy_addr_to_reg (XEXP (dst, 0));
947
  final_dst = expand_simple_binop (Pmode, PLUS, dst_reg, GEN_INT (length),
948
                                   0, 0, OPTAB_WIDEN);
949
 
950
  if (gen_loop_head)
951
    mdx_block_move_loop_head (dst_reg, dst_align,
952
                              src_reg, src_align, head_length);
953
 
954
  label = gen_label_rtx ();
955
  emit_label (label);
956
 
957
  mdx_block_move_loop_body (dst_reg, dst_align,
958
                            src_reg, src_align, loop_mov_bytes);
959
 
960
  emit_insn (gen_cmpsi (dst_reg, final_dst));
961
  emit_jump_insn (gen_bne (label));
962
 
963
  mdx_block_move_loop_foot (dst_reg, dst_align,
964
                            src_reg, src_align, leftover);
965
}
966
 
967
/* Generate block move, for misc.md: "movmemsi".  */
968
bool
969
mdx_block_move (rtx *ops)
970
{
971
  rtx dst = ops[0];
972
  rtx src = ops[1];
973
  rtx length = ops[2];
974
 
975
  if (TARGET_LITTLE_ENDIAN
976
      && (MEM_ALIGN (src) < BITS_PER_WORD || MEM_ALIGN (dst) < BITS_PER_WORD)
977
      && INTVAL (length) >= UNITS_PER_WORD)
978
    return false;
979
 
980
  if (GET_CODE (length) == CONST_INT)
981
    {
982
      if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
983
        {
984
           mdx_block_move_straight (dst, src, INTVAL (length));
985
           return true;
986
        }
987
      else if (optimize &&
988
               !(flag_unroll_loops || flag_unroll_all_loops))
989
        {
990
          mdx_block_move_loop (dst, src, INTVAL (length));
991
          return true;
992
        }
993
    }
994
  return false;
995
}
996
 
997
/* Generate add insn.  */
998
const char *
999
mdp_select_add_imm (rtx *ops, bool set_cc)
1000
{
1001
  HOST_WIDE_INT v = INTVAL (ops[2]);
1002
 
1003
  gcc_assert (GET_CODE (ops[2]) == CONST_INT);
1004
  gcc_assert (REGNO (ops[0]) == REGNO (ops[1]));
1005
 
1006
  if (set_cc && G16_REG_P (REGNO (ops[0])))
1007
    {
1008
      if (v > 0 && IMM_IS_POW_OF_2 ((unsigned HOST_WIDE_INT) v, 0, 15))
1009
        {
1010
          ops[2] = GEN_INT (ffs (v) - 1);
1011
          return "addei!  %0, %c2";
1012
        }
1013
 
1014
      if (v < 0 && IMM_IS_POW_OF_2 ((unsigned HOST_WIDE_INT) (-v), 0, 15))
1015
        {
1016
          ops[2] = GEN_INT (ffs (-v) - 1);
1017
          return "subei!  %0, %c2";
1018
        }
1019
    }
1020
 
1021
  if (set_cc)
1022
    return "addi.c  %0, %c2";
1023
  else
1024
    return "addi    %0, %c2";
1025
}
1026
 
1027
/* Output arith insn.  */
1028
const char *
1029
mdp_select (rtx *ops, const char *inst_pre,
1030
            bool commu, const char *letter, bool set_cc)
1031
{
1032
  gcc_assert (GET_CODE (ops[0]) == REG);
1033
  gcc_assert (GET_CODE (ops[1]) == REG);
1034
 
1035
  if (set_cc && G16_REG_P (REGNO (ops[0]))
1036
      && (GET_CODE (ops[2]) == REG ? G16_REG_P (REGNO (ops[2])) : 1)
1037
      && REGNO (ops[0]) == REGNO (ops[1]))
1038
    {
1039
      snprintf (ins, INS_BUF_SZ, "%s!  %%0, %%%s2", inst_pre, letter);
1040
      return ins;
1041
    }
1042
 
1043
  if (commu && set_cc && G16_REG_P (REGNO (ops[0]))
1044
      && G16_REG_P (REGNO (ops[1]))
1045
      && REGNO (ops[0]) == REGNO (ops[2]))
1046
    {
1047
      gcc_assert (GET_CODE (ops[2]) == REG);
1048
      snprintf (ins, INS_BUF_SZ, "%s!  %%0, %%%s1", inst_pre, letter);
1049
      return ins;
1050
    }
1051
 
1052
  if (set_cc)
1053
    snprintf (ins, INS_BUF_SZ, "%s.c  %%0, %%1, %%%s2", inst_pre, letter);
1054
  else
1055
    snprintf (ins, INS_BUF_SZ, "%s    %%0, %%1, %%%s2", inst_pre, letter);
1056
  return ins;
1057
}
1058
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.