OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [config/] [mn10300/] [mn10300.c] - Blame information for rev 714

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 709 jeremybenn
/* Subroutines for insn-output.c for Matsushita MN10300 series
2
   Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
3
   2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4
   Contributed by Jeff Law (law@cygnus.com).
5
 
6
   This file is part of GCC.
7
 
8
   GCC is free software; you can redistribute it and/or modify
9
   it under the terms of the GNU General Public License as published by
10
   the Free Software Foundation; either version 3, or (at your option)
11
   any later version.
12
 
13
   GCC is distributed in the hope that it will be useful,
14
   but WITHOUT ANY WARRANTY; without even the implied warranty of
15
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
   GNU General Public License for more details.
17
 
18
   You should have received a copy of the GNU General Public License
19
   along with GCC; see the file COPYING3.  If not see
20
   <http://www.gnu.org/licenses/>.  */
21
 
22
#include "config.h"
23
#include "system.h"
24
#include "coretypes.h"
25
#include "tm.h"
26
#include "rtl.h"
27
#include "tree.h"
28
#include "regs.h"
29
#include "hard-reg-set.h"
30
#include "insn-config.h"
31
#include "conditions.h"
32
#include "output.h"
33
#include "insn-attr.h"
34
#include "flags.h"
35
#include "recog.h"
36
#include "reload.h"
37
#include "expr.h"
38
#include "optabs.h"
39
#include "function.h"
40
#include "obstack.h"
41
#include "diagnostic-core.h"
42
#include "tm_p.h"
43
#include "tm-constrs.h"
44
#include "target.h"
45
#include "target-def.h"
46
#include "df.h"
47
#include "opts.h"
48
#include "cfgloop.h"
49
 
50
/* This is used in the am33_2.0-linux-gnu port, in which global symbol
51
   names are not prefixed by underscores, to tell whether to prefix a
52
   label with a plus sign or not, so that the assembler can tell
53
   symbol names from register names.  */
54
int mn10300_protect_label;
55
 
56
/* Selected processor type for tuning.  */
57
enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
58
 
59
/* The size of the callee register save area.  Right now we save everything
60
   on entry since it costs us nothing in code size.  It does cost us from a
61
   speed standpoint, so we want to optimize this sooner or later.  */
62
#define REG_SAVE_BYTES (4 * df_regs_ever_live_p (2)             \
63
                        + 4 * df_regs_ever_live_p (3)           \
64
                        + 4 * df_regs_ever_live_p (6)           \
65
                        + 4 * df_regs_ever_live_p (7)           \
66
                        + 16 * (df_regs_ever_live_p (14)        \
67
                                || df_regs_ever_live_p (15)     \
68
                                || df_regs_ever_live_p (16)     \
69
                                || df_regs_ever_live_p (17)))
70
 
71
#define CC_FLAG_Z       1
72
#define CC_FLAG_N       2
73
#define CC_FLAG_C       4
74
#define CC_FLAG_V       8
75
 
76
static int cc_flags_for_mode(enum machine_mode);
77
static int cc_flags_for_code(enum rtx_code);
78
 
79
/* Implement TARGET_OPTION_OVERRIDE.  */
80
 
81
static void
82
mn10300_option_override (void)
83
{
84
  if (TARGET_AM33)
85
    target_flags &= ~MASK_MULT_BUG;
86
  else
87
    {
88
      /* Disable scheduling for the MN10300 as we do
89
         not have timing information available for it.  */
90
      flag_schedule_insns = 0;
91
      flag_schedule_insns_after_reload = 0;
92
 
93
      /* Force enable splitting of wide types, as otherwise it is trivial
94
         to run out of registers.  Indeed, this works so well that register
95
         allocation problems are now more common *without* optimization,
96
         when this flag is not enabled by default.  */
97
      flag_split_wide_types = 1;
98
    }
99
 
100
  if (mn10300_tune_string)
101
    {
102
      if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
103
        mn10300_tune_cpu = PROCESSOR_MN10300;
104
      else if (strcasecmp (mn10300_tune_string, "am33") == 0)
105
        mn10300_tune_cpu = PROCESSOR_AM33;
106
      else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
107
        mn10300_tune_cpu = PROCESSOR_AM33_2;
108
      else if (strcasecmp (mn10300_tune_string, "am34") == 0)
109
        mn10300_tune_cpu = PROCESSOR_AM34;
110
      else
111
        error ("-mtune= expects mn10300, am33, am33-2, or am34");
112
    }
113
}
114
 
115
static void
116
mn10300_file_start (void)
117
{
118
  default_file_start ();
119
 
120
  if (TARGET_AM33_2)
121
    fprintf (asm_out_file, "\t.am33_2\n");
122
  else if (TARGET_AM33)
123
    fprintf (asm_out_file, "\t.am33\n");
124
}
125
 
126
/* Note: This list must match the liw_op attribute in mn10300.md.  */
127
 
128
static const char *liw_op_names[] =
129
{
130
  "add", "cmp", "sub", "mov",
131
  "and", "or", "xor",
132
  "asr", "lsr", "asl",
133
  "none", "max"
134
};
135
 
136
/* Print operand X using operand code CODE to assembly language output file
137
   FILE.  */
138
 
139
void
140
mn10300_print_operand (FILE *file, rtx x, int code)
141
{
142
  switch (code)
143
    {
144
    case 'W':
145
      {
146
        unsigned int liw_op = UINTVAL (x);
147
 
148
        gcc_assert (TARGET_ALLOW_LIW);
149
        gcc_assert (liw_op < LIW_OP_MAX);
150
        fputs (liw_op_names[liw_op], file);
151
        break;
152
      }
153
 
154
    case 'b':
155
    case 'B':
156
      {
157
        enum rtx_code cmp = GET_CODE (x);
158
        enum machine_mode mode = GET_MODE (XEXP (x, 0));
159
        const char *str;
160
        int have_flags;
161
 
162
        if (code == 'B')
163
          cmp = reverse_condition (cmp);
164
        have_flags = cc_flags_for_mode (mode);
165
 
166
        switch (cmp)
167
          {
168
          case NE:
169
            str = "ne";
170
            break;
171
          case EQ:
172
            str = "eq";
173
            break;
174
          case GE:
175
            /* bge is smaller than bnc.  */
176
            str = (have_flags & CC_FLAG_V ? "ge" : "nc");
177
            break;
178
          case LT:
179
            str = (have_flags & CC_FLAG_V ? "lt" : "ns");
180
            break;
181
          case GT:
182
            str = "gt";
183
            break;
184
          case LE:
185
            str = "le";
186
            break;
187
          case GEU:
188
            str = "cc";
189
            break;
190
          case GTU:
191
            str = "hi";
192
            break;
193
          case LEU:
194
            str = "ls";
195
            break;
196
          case LTU:
197
            str = "cs";
198
            break;
199
          case ORDERED:
200
            str = "lge";
201
            break;
202
          case UNORDERED:
203
            str = "uo";
204
            break;
205
          case LTGT:
206
            str = "lg";
207
            break;
208
          case UNEQ:
209
            str = "ue";
210
            break;
211
          case UNGE:
212
            str = "uge";
213
            break;
214
          case UNGT:
215
            str = "ug";
216
            break;
217
          case UNLE:
218
            str = "ule";
219
            break;
220
          case UNLT:
221
            str = "ul";
222
            break;
223
          default:
224
            gcc_unreachable ();
225
          }
226
 
227
        gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
228
        fputs (str, file);
229
      }
230
      break;
231
 
232
    case 'C':
233
      /* This is used for the operand to a call instruction;
234
         if it's a REG, enclose it in parens, else output
235
         the operand normally.  */
236
      if (REG_P (x))
237
        {
238
          fputc ('(', file);
239
          mn10300_print_operand (file, x, 0);
240
          fputc (')', file);
241
        }
242
      else
243
        mn10300_print_operand (file, x, 0);
244
      break;
245
 
246
    case 'D':
247
      switch (GET_CODE (x))
248
        {
249
        case MEM:
250
          fputc ('(', file);
251
          output_address (XEXP (x, 0));
252
          fputc (')', file);
253
          break;
254
 
255
        case REG:
256
          fprintf (file, "fd%d", REGNO (x) - 18);
257
          break;
258
 
259
        default:
260
          gcc_unreachable ();
261
        }
262
      break;
263
 
264
      /* These are the least significant word in a 64bit value.  */
265
    case 'L':
266
      switch (GET_CODE (x))
267
        {
268
        case MEM:
269
          fputc ('(', file);
270
          output_address (XEXP (x, 0));
271
          fputc (')', file);
272
          break;
273
 
274
        case REG:
275
          fprintf (file, "%s", reg_names[REGNO (x)]);
276
          break;
277
 
278
        case SUBREG:
279
          fprintf (file, "%s", reg_names[subreg_regno (x)]);
280
          break;
281
 
282
        case CONST_DOUBLE:
283
          {
284
            long val[2];
285
            REAL_VALUE_TYPE rv;
286
 
287
            switch (GET_MODE (x))
288
              {
289
              case DFmode:
290
                REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
291
                REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
292
                fprintf (file, "0x%lx", val[0]);
293
                break;;
294
              case SFmode:
295
                REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
296
                REAL_VALUE_TO_TARGET_SINGLE (rv, val[0]);
297
                fprintf (file, "0x%lx", val[0]);
298
                break;;
299
              case VOIDmode:
300
              case DImode:
301
                mn10300_print_operand_address (file,
302
                                               GEN_INT (CONST_DOUBLE_LOW (x)));
303
                break;
304
              default:
305
                break;
306
              }
307
            break;
308
          }
309
 
310
        case CONST_INT:
311
          {
312
            rtx low, high;
313
            split_double (x, &low, &high);
314
            fprintf (file, "%ld", (long)INTVAL (low));
315
            break;
316
            }
317
 
318
        default:
319
          gcc_unreachable ();
320
        }
321
      break;
322
 
323
      /* Similarly, but for the most significant word.  */
324
    case 'H':
325
      switch (GET_CODE (x))
326
        {
327
        case MEM:
328
          fputc ('(', file);
329
          x = adjust_address (x, SImode, 4);
330
          output_address (XEXP (x, 0));
331
          fputc (')', file);
332
          break;
333
 
334
        case REG:
335
          fprintf (file, "%s", reg_names[REGNO (x) + 1]);
336
          break;
337
 
338
        case SUBREG:
339
          fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
340
          break;
341
 
342
        case CONST_DOUBLE:
343
          {
344
            long val[2];
345
            REAL_VALUE_TYPE rv;
346
 
347
            switch (GET_MODE (x))
348
              {
349
              case DFmode:
350
                REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
351
                REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
352
                fprintf (file, "0x%lx", val[1]);
353
                break;;
354
              case SFmode:
355
                gcc_unreachable ();
356
              case VOIDmode:
357
              case DImode:
358
                mn10300_print_operand_address (file,
359
                                               GEN_INT (CONST_DOUBLE_HIGH (x)));
360
                break;
361
              default:
362
                break;
363
              }
364
            break;
365
          }
366
 
367
        case CONST_INT:
368
          {
369
            rtx low, high;
370
            split_double (x, &low, &high);
371
            fprintf (file, "%ld", (long)INTVAL (high));
372
            break;
373
          }
374
 
375
        default:
376
          gcc_unreachable ();
377
        }
378
      break;
379
 
380
    case 'A':
381
      fputc ('(', file);
382
      if (REG_P (XEXP (x, 0)))
383
        output_address (gen_rtx_PLUS (SImode, XEXP (x, 0), const0_rtx));
384
      else
385
        output_address (XEXP (x, 0));
386
      fputc (')', file);
387
      break;
388
 
389
    case 'N':
390
      gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
391
      fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
392
      break;
393
 
394
    case 'U':
395
      gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
396
      fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
397
      break;
398
 
399
      /* For shift counts.  The hardware ignores the upper bits of
400
         any immediate, but the assembler will flag an out of range
401
         shift count as an error.  So we mask off the high bits
402
         of the immediate here.  */
403
    case 'S':
404
      if (CONST_INT_P (x))
405
        {
406
          fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
407
          break;
408
        }
409
      /* FALL THROUGH */
410
 
411
    default:
412
      switch (GET_CODE (x))
413
        {
414
        case MEM:
415
          fputc ('(', file);
416
          output_address (XEXP (x, 0));
417
          fputc (')', file);
418
          break;
419
 
420
        case PLUS:
421
          output_address (x);
422
          break;
423
 
424
        case REG:
425
          fprintf (file, "%s", reg_names[REGNO (x)]);
426
          break;
427
 
428
        case SUBREG:
429
          fprintf (file, "%s", reg_names[subreg_regno (x)]);
430
          break;
431
 
432
          /* This will only be single precision....  */
433
        case CONST_DOUBLE:
434
          {
435
            unsigned long val;
436
            REAL_VALUE_TYPE rv;
437
 
438
            REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
439
            REAL_VALUE_TO_TARGET_SINGLE (rv, val);
440
            fprintf (file, "0x%lx", val);
441
            break;
442
          }
443
 
444
        case CONST_INT:
445
        case SYMBOL_REF:
446
        case CONST:
447
        case LABEL_REF:
448
        case CODE_LABEL:
449
        case UNSPEC:
450
          mn10300_print_operand_address (file, x);
451
          break;
452
        default:
453
          gcc_unreachable ();
454
        }
455
      break;
456
    }
457
}
458
 
459
/* Output assembly language output for the address ADDR to FILE.  */
460
 
461
void
462
mn10300_print_operand_address (FILE *file, rtx addr)
463
{
464
  switch (GET_CODE (addr))
465
    {
466
    case POST_INC:
467
      mn10300_print_operand (file, XEXP (addr, 0), 0);
468
      fputc ('+', file);
469
      break;
470
 
471
    case POST_MODIFY:
472
      mn10300_print_operand (file, XEXP (addr, 0), 0);
473
      fputc ('+', file);
474
      fputc (',', file);
475
      mn10300_print_operand (file, XEXP (addr, 1), 0);
476
      break;
477
 
478
    case REG:
479
      mn10300_print_operand (file, addr, 0);
480
      break;
481
    case PLUS:
482
      {
483
        rtx base = XEXP (addr, 0);
484
        rtx index = XEXP (addr, 1);
485
 
486
        if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
487
          {
488
            rtx x = base;
489
            base = index;
490
            index = x;
491
 
492
            gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
493
          }
494
        gcc_assert (REG_OK_FOR_BASE_P (base));
495
 
496
        mn10300_print_operand (file, index, 0);
497
        fputc (',', file);
498
        mn10300_print_operand (file, base, 0);
499
        break;
500
      }
501
    case SYMBOL_REF:
502
      output_addr_const (file, addr);
503
      break;
504
    default:
505
      output_addr_const (file, addr);
506
      break;
507
    }
508
}
509
 
510
/* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
511
 
512
   Used for PIC-specific UNSPECs.  */
513
 
514
static bool
515
mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
516
{
517
  if (GET_CODE (x) == UNSPEC)
518
    {
519
      switch (XINT (x, 1))
520
        {
521
        case UNSPEC_PIC:
522
          /* GLOBAL_OFFSET_TABLE or local symbols, no suffix.  */
523
          output_addr_const (file, XVECEXP (x, 0, 0));
524
          break;
525
        case UNSPEC_GOT:
526
          output_addr_const (file, XVECEXP (x, 0, 0));
527
          fputs ("@GOT", file);
528
          break;
529
        case UNSPEC_GOTOFF:
530
          output_addr_const (file, XVECEXP (x, 0, 0));
531
          fputs ("@GOTOFF", file);
532
          break;
533
        case UNSPEC_PLT:
534
          output_addr_const (file, XVECEXP (x, 0, 0));
535
          fputs ("@PLT", file);
536
          break;
537
        case UNSPEC_GOTSYM_OFF:
538
          assemble_name (file, GOT_SYMBOL_NAME);
539
          fputs ("-(", file);
540
          output_addr_const (file, XVECEXP (x, 0, 0));
541
          fputs ("-.)", file);
542
          break;
543
        default:
544
          return false;
545
        }
546
      return true;
547
    }
548
  else
549
    return false;
550
}
551
 
552
/* Count the number of FP registers that have to be saved.  */
553
static int
554
fp_regs_to_save (void)
555
{
556
  int i, n = 0;
557
 
558
  if (! TARGET_AM33_2)
559
    return 0;
560
 
561
  for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
562
    if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
563
      ++n;
564
 
565
  return n;
566
}
567
 
568
/* Print a set of registers in the format required by "movm" and "ret".
569
   Register K is saved if bit K of MASK is set.  The data and address
570
   registers can be stored individually, but the extended registers cannot.
571
   We assume that the mask already takes that into account.  For instance,
572
   bits 14 to 17 must have the same value.  */
573
 
574
void
575
mn10300_print_reg_list (FILE *file, int mask)
576
{
577
  int need_comma;
578
  int i;
579
 
580
  need_comma = 0;
581
  fputc ('[', file);
582
 
583
  for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
584
    if ((mask & (1 << i)) != 0)
585
      {
586
        if (need_comma)
587
          fputc (',', file);
588
        fputs (reg_names [i], file);
589
        need_comma = 1;
590
      }
591
 
592
  if ((mask & 0x3c000) != 0)
593
    {
594
      gcc_assert ((mask & 0x3c000) == 0x3c000);
595
      if (need_comma)
596
        fputc (',', file);
597
      fputs ("exreg1", file);
598
      need_comma = 1;
599
    }
600
 
601
  fputc (']', file);
602
}
603
 
604
/* If the MDR register is never clobbered, we can use the RETF instruction
605
   which takes the address from the MDR register.  This is 3 cycles faster
606
   than having to load the address from the stack.  */
607
 
608
bool
609
mn10300_can_use_retf_insn (void)
610
{
611
  /* Don't bother if we're not optimizing.  In this case we won't
612
     have proper access to df_regs_ever_live_p.  */
613
  if (!optimize)
614
    return false;
615
 
616
  /* EH returns alter the saved return address; MDR is not current.  */
617
  if (crtl->calls_eh_return)
618
    return false;
619
 
620
  /* Obviously not if MDR is ever clobbered.  */
621
  if (df_regs_ever_live_p (MDR_REG))
622
    return false;
623
 
624
  /* ??? Careful not to use this during expand_epilogue etc.  */
625
  gcc_assert (!in_sequence_p ());
626
  return leaf_function_p ();
627
}
628
 
629
bool
630
mn10300_can_use_rets_insn (void)
631
{
632
  return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
633
}
634
 
635
/* Returns the set of live, callee-saved registers as a bitmask.  The
636
   callee-saved extended registers cannot be stored individually, so
637
   all of them will be included in the mask if any one of them is used.  */
638
 
639
int
640
mn10300_get_live_callee_saved_regs (void)
641
{
642
  int mask;
643
  int i;
644
 
645
  mask = 0;
646
  for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
647
    if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
648
      mask |= (1 << i);
649
  if ((mask & 0x3c000) != 0)
650
    mask |= 0x3c000;
651
 
652
  return mask;
653
}
654
 
655
static rtx
656
F (rtx r)
657
{
658
  RTX_FRAME_RELATED_P (r) = 1;
659
  return r;
660
}
661
 
662
/* Generate an instruction that pushes several registers onto the stack.
663
   Register K will be saved if bit K in MASK is set.  The function does
664
   nothing if MASK is zero.
665
 
666
   To be compatible with the "movm" instruction, the lowest-numbered
667
   register must be stored in the lowest slot.  If MASK is the set
668
   { R1,...,RN }, where R1...RN are ordered least first, the generated
669
   instruction will have the form:
670
 
671
       (parallel
672
         (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
673
         (set (mem:SI (plus:SI (reg:SI 9)
674
                               (const_int -1*4)))
675
              (reg:SI RN))
676
         ...
677
         (set (mem:SI (plus:SI (reg:SI 9)
678
                               (const_int -N*4)))
679
              (reg:SI R1))) */
680
 
681
static void
682
mn10300_gen_multiple_store (unsigned int mask)
683
{
684
  /* The order in which registers are stored, from SP-4 through SP-N*4.  */
685
  static const unsigned int store_order[8] = {
686
    /* e2, e3: never saved */
687
    FIRST_EXTENDED_REGNUM + 4,
688
    FIRST_EXTENDED_REGNUM + 5,
689
    FIRST_EXTENDED_REGNUM + 6,
690
    FIRST_EXTENDED_REGNUM + 7,
691
    /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
692
    FIRST_DATA_REGNUM + 2,
693
    FIRST_DATA_REGNUM + 3,
694
    FIRST_ADDRESS_REGNUM + 2,
695
    FIRST_ADDRESS_REGNUM + 3,
696
    /* d0, d1, a0, a1, mdr, lir, lar: never saved.  */
697
  };
698
 
699
  rtx x, elts[9];
700
  unsigned int i;
701
  int count;
702
 
703
  if (mask == 0)
704
    return;
705
 
706
  for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
707
    {
708
      unsigned regno = store_order[i];
709
 
710
      if (((mask >> regno) & 1) == 0)
711
        continue;
712
 
713
      ++count;
714
      x = plus_constant (stack_pointer_rtx, count * -4);
715
      x = gen_frame_mem (SImode, x);
716
      x = gen_rtx_SET (VOIDmode, x, gen_rtx_REG (SImode, regno));
717
      elts[count] = F(x);
718
 
719
      /* Remove the register from the mask so that... */
720
      mask &= ~(1u << regno);
721
    }
722
 
723
  /* ... we can make sure that we didn't try to use a register
724
     not listed in the store order.  */
725
  gcc_assert (mask == 0);
726
 
727
  /* Create the instruction that updates the stack pointer.  */
728
  x = plus_constant (stack_pointer_rtx, count * -4);
729
  x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
730
  elts[0] = F(x);
731
 
732
  /* We need one PARALLEL element to update the stack pointer and
733
     an additional element for each register that is stored.  */
734
  x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
735
  F (emit_insn (x));
736
}
737
 
738
void
739
mn10300_expand_prologue (void)
740
{
741
  HOST_WIDE_INT size = mn10300_frame_size ();
742
 
743
  /* If we use any of the callee-saved registers, save them now.  */
744
  mn10300_gen_multiple_store (mn10300_get_live_callee_saved_regs ());
745
 
746
  if (TARGET_AM33_2 && fp_regs_to_save ())
747
    {
748
      int num_regs_to_save = fp_regs_to_save (), i;
749
      HOST_WIDE_INT xsize;
750
      enum
751
      {
752
        save_sp_merge,
753
        save_sp_no_merge,
754
        save_sp_partial_merge,
755
        save_a0_merge,
756
        save_a0_no_merge
757
      } strategy;
758
      unsigned int strategy_size = (unsigned)-1, this_strategy_size;
759
      rtx reg;
760
 
761
      /* We have several different strategies to save FP registers.
762
         We can store them using SP offsets, which is beneficial if
763
         there are just a few registers to save, or we can use `a0' in
764
         post-increment mode (`a0' is the only call-clobbered address
765
         register that is never used to pass information to a
766
         function).  Furthermore, if we don't need a frame pointer, we
767
         can merge the two SP adds into a single one, but this isn't
768
         always beneficial; sometimes we can just split the two adds
769
         so that we don't exceed a 16-bit constant size.  The code
770
         below will select which strategy to use, so as to generate
771
         smallest code.  Ties are broken in favor or shorter sequences
772
         (in terms of number of instructions).  */
773
 
774
#define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
775
                        : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
776
#define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
777
                        : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
778
 
779
/* We add 0 * (S) in two places to promote to the type of S,
780
   so that all arms of the conditional have the same type.  */
781
#define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
782
  (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
783
   : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
784
                               + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
785
   : 0 * (S) + (ELSE))
786
#define SIZE_FMOV_SP_(S,N) \
787
  (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
788
                   SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
789
                                    (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
790
#define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
791
 
792
      /* Consider alternative save_sp_merge only if we don't need the
793
         frame pointer and size is nonzero.  */
794
      if (! frame_pointer_needed && size)
795
        {
796
          /* Insn: add -(size + 4 * num_regs_to_save), sp.  */
797
          this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
798
          /* Insn: fmov fs#, (##, sp), for each fs# to be saved.  */
799
          this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
800
 
801
          if (this_strategy_size < strategy_size)
802
            {
803
              strategy = save_sp_merge;
804
              strategy_size = this_strategy_size;
805
            }
806
        }
807
 
808
      /* Consider alternative save_sp_no_merge unconditionally.  */
809
      /* Insn: add -4 * num_regs_to_save, sp.  */
810
      this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
811
      /* Insn: fmov fs#, (##, sp), for each fs# to be saved.  */
812
      this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
813
      if (size)
814
        {
815
          /* Insn: add -size, sp.  */
816
          this_strategy_size += SIZE_ADD_SP (-size);
817
        }
818
 
819
      if (this_strategy_size < strategy_size)
820
        {
821
          strategy = save_sp_no_merge;
822
          strategy_size = this_strategy_size;
823
        }
824
 
825
      /* Consider alternative save_sp_partial_merge only if we don't
826
         need a frame pointer and size is reasonably large.  */
827
      if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
828
        {
829
          /* Insn: add -128, sp.  */
830
          this_strategy_size = SIZE_ADD_SP (-128);
831
          /* Insn: fmov fs#, (##, sp), for each fs# to be saved.  */
832
          this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
833
                                              num_regs_to_save);
834
          if (size)
835
            {
836
              /* Insn: add 128-size, sp.  */
837
              this_strategy_size += SIZE_ADD_SP (128 - size);
838
            }
839
 
840
          if (this_strategy_size < strategy_size)
841
            {
842
              strategy = save_sp_partial_merge;
843
              strategy_size = this_strategy_size;
844
            }
845
        }
846
 
847
      /* Consider alternative save_a0_merge only if we don't need a
848
         frame pointer, size is nonzero and the user hasn't
849
         changed the calling conventions of a0.  */
850
      if (! frame_pointer_needed && size
851
          && call_really_used_regs [FIRST_ADDRESS_REGNUM]
852
          && ! fixed_regs[FIRST_ADDRESS_REGNUM])
853
        {
854
          /* Insn: add -(size + 4 * num_regs_to_save), sp.  */
855
          this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
856
          /* Insn: mov sp, a0.  */
857
          this_strategy_size++;
858
          if (size)
859
            {
860
              /* Insn: add size, a0.  */
861
              this_strategy_size += SIZE_ADD_AX (size);
862
            }
863
          /* Insn: fmov fs#, (a0+), for each fs# to be saved.  */
864
          this_strategy_size += 3 * num_regs_to_save;
865
 
866
          if (this_strategy_size < strategy_size)
867
            {
868
              strategy = save_a0_merge;
869
              strategy_size = this_strategy_size;
870
            }
871
        }
872
 
873
      /* Consider alternative save_a0_no_merge if the user hasn't
874
         changed the calling conventions of a0.  */
875
      if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
876
          && ! fixed_regs[FIRST_ADDRESS_REGNUM])
877
        {
878
          /* Insn: add -4 * num_regs_to_save, sp.  */
879
          this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
880
          /* Insn: mov sp, a0.  */
881
          this_strategy_size++;
882
          /* Insn: fmov fs#, (a0+), for each fs# to be saved.  */
883
          this_strategy_size += 3 * num_regs_to_save;
884
          if (size)
885
            {
886
              /* Insn: add -size, sp.  */
887
              this_strategy_size += SIZE_ADD_SP (-size);
888
            }
889
 
890
          if (this_strategy_size < strategy_size)
891
            {
892
              strategy = save_a0_no_merge;
893
              strategy_size = this_strategy_size;
894
            }
895
        }
896
 
897
      /* Emit the initial SP add, common to all strategies.  */
898
      switch (strategy)
899
        {
900
        case save_sp_no_merge:
901
        case save_a0_no_merge:
902
          F (emit_insn (gen_addsi3 (stack_pointer_rtx,
903
                                    stack_pointer_rtx,
904
                                    GEN_INT (-4 * num_regs_to_save))));
905
          xsize = 0;
906
          break;
907
 
908
        case save_sp_partial_merge:
909
          F (emit_insn (gen_addsi3 (stack_pointer_rtx,
910
                                    stack_pointer_rtx,
911
                                    GEN_INT (-128))));
912
          xsize = 128 - 4 * num_regs_to_save;
913
          size -= xsize;
914
          break;
915
 
916
        case save_sp_merge:
917
        case save_a0_merge:
918
          F (emit_insn (gen_addsi3 (stack_pointer_rtx,
919
                                    stack_pointer_rtx,
920
                                    GEN_INT (-(size + 4 * num_regs_to_save)))));
921
          /* We'll have to adjust FP register saves according to the
922
             frame size.  */
923
          xsize = size;
924
          /* Since we've already created the stack frame, don't do it
925
             again at the end of the function.  */
926
          size = 0;
927
          break;
928
 
929
        default:
930
          gcc_unreachable ();
931
        }
932
 
933
      /* Now prepare register a0, if we have decided to use it.  */
934
      switch (strategy)
935
        {
936
        case save_sp_merge:
937
        case save_sp_no_merge:
938
        case save_sp_partial_merge:
939
          reg = 0;
940
          break;
941
 
942
        case save_a0_merge:
943
        case save_a0_no_merge:
944
          reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
945
          F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
946
          if (xsize)
947
            F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
948
          reg = gen_rtx_POST_INC (SImode, reg);
949
          break;
950
 
951
        default:
952
          gcc_unreachable ();
953
        }
954
 
955
      /* Now actually save the FP registers.  */
956
      for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
957
        if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
958
          {
959
            rtx addr;
960
 
961
            if (reg)
962
              addr = reg;
963
            else
964
              {
965
                /* If we aren't using `a0', use an SP offset.  */
966
                if (xsize)
967
                  {
968
                    addr = gen_rtx_PLUS (SImode,
969
                                         stack_pointer_rtx,
970
                                         GEN_INT (xsize));
971
                  }
972
                else
973
                  addr = stack_pointer_rtx;
974
 
975
                xsize += 4;
976
              }
977
 
978
            F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
979
                                     gen_rtx_REG (SFmode, i))));
980
          }
981
    }
982
 
983
  /* Now put the frame pointer into the frame pointer register.  */
984
  if (frame_pointer_needed)
985
    F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
986
 
987
  /* Allocate stack for this frame.  */
988
  if (size)
989
    F (emit_insn (gen_addsi3 (stack_pointer_rtx,
990
                              stack_pointer_rtx,
991
                              GEN_INT (-size))));
992
 
993
  if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
994
    emit_insn (gen_load_pic ());
995
}
996
 
997
void
998
mn10300_expand_epilogue (void)
999
{
1000
  HOST_WIDE_INT size = mn10300_frame_size ();
1001
  int reg_save_bytes = REG_SAVE_BYTES;
1002
 
1003
  if (TARGET_AM33_2 && fp_regs_to_save ())
1004
    {
1005
      int num_regs_to_save = fp_regs_to_save (), i;
1006
      rtx reg = 0;
1007
 
1008
      /* We have several options to restore FP registers.  We could
1009
         load them from SP offsets, but, if there are enough FP
1010
         registers to restore, we win if we use a post-increment
1011
         addressing mode.  */
1012
 
1013
      /* If we have a frame pointer, it's the best option, because we
1014
         already know it has the value we want.  */
1015
      if (frame_pointer_needed)
1016
        reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1017
      /* Otherwise, we may use `a1', since it's call-clobbered and
1018
         it's never used for return values.  But only do so if it's
1019
         smaller than using SP offsets.  */
1020
      else
1021
        {
1022
          enum { restore_sp_post_adjust,
1023
                 restore_sp_pre_adjust,
1024
                 restore_sp_partial_adjust,
1025
                 restore_a1 } strategy;
1026
          unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1027
 
1028
          /* Consider using sp offsets before adjusting sp.  */
1029
          /* Insn: fmov (##,sp),fs#, for each fs# to be restored.  */
1030
          this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1031
          /* If size is too large, we'll have to adjust SP with an
1032
                 add.  */
1033
          if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1034
            {
1035
              /* Insn: add size + 4 * num_regs_to_save, sp.  */
1036
              this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1037
            }
1038
          /* If we don't have to restore any non-FP registers,
1039
                 we'll be able to save one byte by using rets.  */
1040
          if (! reg_save_bytes)
1041
            this_strategy_size--;
1042
 
1043
          if (this_strategy_size < strategy_size)
1044
            {
1045
              strategy = restore_sp_post_adjust;
1046
              strategy_size = this_strategy_size;
1047
            }
1048
 
1049
          /* Consider using sp offsets after adjusting sp.  */
1050
          /* Insn: add size, sp.  */
1051
          this_strategy_size = SIZE_ADD_SP (size);
1052
          /* Insn: fmov (##,sp),fs#, for each fs# to be restored.  */
1053
          this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1054
          /* We're going to use ret to release the FP registers
1055
                 save area, so, no savings.  */
1056
 
1057
          if (this_strategy_size < strategy_size)
1058
            {
1059
              strategy = restore_sp_pre_adjust;
1060
              strategy_size = this_strategy_size;
1061
            }
1062
 
1063
          /* Consider using sp offsets after partially adjusting sp.
1064
             When size is close to 32Kb, we may be able to adjust SP
1065
             with an imm16 add instruction while still using fmov
1066
             (d8,sp).  */
1067
          if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1068
            {
1069
              /* Insn: add size + 4 * num_regs_to_save
1070
                                + reg_save_bytes - 252,sp.  */
1071
              this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
1072
                                                + reg_save_bytes - 252);
1073
              /* Insn: fmov (##,sp),fs#, fo each fs# to be restored.  */
1074
              this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
1075
                                                  - 4 * num_regs_to_save,
1076
                                                  num_regs_to_save);
1077
              /* We're going to use ret to release the FP registers
1078
                 save area, so, no savings.  */
1079
 
1080
              if (this_strategy_size < strategy_size)
1081
                {
1082
                  strategy = restore_sp_partial_adjust;
1083
                  strategy_size = this_strategy_size;
1084
                }
1085
            }
1086
 
1087
          /* Consider using a1 in post-increment mode, as long as the
1088
             user hasn't changed the calling conventions of a1.  */
1089
          if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
1090
              && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1091
            {
1092
              /* Insn: mov sp,a1.  */
1093
              this_strategy_size = 1;
1094
              if (size)
1095
                {
1096
                  /* Insn: add size,a1.  */
1097
                  this_strategy_size += SIZE_ADD_AX (size);
1098
                }
1099
              /* Insn: fmov (a1+),fs#, for each fs# to be restored.  */
1100
              this_strategy_size += 3 * num_regs_to_save;
1101
              /* If size is large enough, we may be able to save a
1102
                 couple of bytes.  */
1103
              if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1104
                {
1105
                  /* Insn: mov a1,sp.  */
1106
                  this_strategy_size += 2;
1107
                }
1108
              /* If we don't have to restore any non-FP registers,
1109
                 we'll be able to save one byte by using rets.  */
1110
              if (! reg_save_bytes)
1111
                this_strategy_size--;
1112
 
1113
              if (this_strategy_size < strategy_size)
1114
                {
1115
                  strategy = restore_a1;
1116
                  strategy_size = this_strategy_size;
1117
                }
1118
            }
1119
 
1120
          switch (strategy)
1121
            {
1122
            case restore_sp_post_adjust:
1123
              break;
1124
 
1125
            case restore_sp_pre_adjust:
1126
              emit_insn (gen_addsi3 (stack_pointer_rtx,
1127
                                     stack_pointer_rtx,
1128
                                     GEN_INT (size)));
1129
              size = 0;
1130
              break;
1131
 
1132
            case restore_sp_partial_adjust:
1133
              emit_insn (gen_addsi3 (stack_pointer_rtx,
1134
                                     stack_pointer_rtx,
1135
                                     GEN_INT (size + 4 * num_regs_to_save
1136
                                              + reg_save_bytes - 252)));
1137
              size = 252 - reg_save_bytes - 4 * num_regs_to_save;
1138
              break;
1139
 
1140
            case restore_a1:
1141
              reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1142
              emit_insn (gen_movsi (reg, stack_pointer_rtx));
1143
              if (size)
1144
                emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1145
              break;
1146
 
1147
            default:
1148
              gcc_unreachable ();
1149
            }
1150
        }
1151
 
1152
      /* Adjust the selected register, if any, for post-increment.  */
1153
      if (reg)
1154
        reg = gen_rtx_POST_INC (SImode, reg);
1155
 
1156
      for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
1157
        if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
1158
          {
1159
            rtx addr;
1160
 
1161
            if (reg)
1162
              addr = reg;
1163
            else if (size)
1164
              {
1165
                /* If we aren't using a post-increment register, use an
1166
                   SP offset.  */
1167
                addr = gen_rtx_PLUS (SImode,
1168
                                     stack_pointer_rtx,
1169
                                     GEN_INT (size));
1170
              }
1171
            else
1172
              addr = stack_pointer_rtx;
1173
 
1174
            size += 4;
1175
 
1176
            emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1177
                                  gen_rtx_MEM (SFmode, addr)));
1178
          }
1179
 
1180
      /* If we were using the restore_a1 strategy and the number of
1181
         bytes to be released won't fit in the `ret' byte, copy `a1'
1182
         to `sp', to avoid having to use `add' to adjust it.  */
1183
      if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
1184
        {
1185
          emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1186
          size = 0;
1187
        }
1188
    }
1189
 
1190
  /* Maybe cut back the stack, except for the register save area.
1191
 
1192
     If the frame pointer exists, then use the frame pointer to
1193
     cut back the stack.
1194
 
1195
     If the stack size + register save area is more than 255 bytes,
1196
     then the stack must be cut back here since the size + register
1197
     save size is too big for a ret/retf instruction.
1198
 
1199
     Else leave it alone, it will be cut back as part of the
1200
     ret/retf instruction, or there wasn't any stack to begin with.
1201
 
1202
     Under no circumstances should the register save area be
1203
     deallocated here, that would leave a window where an interrupt
1204
     could occur and trash the register save area.  */
1205
  if (frame_pointer_needed)
1206
    {
1207
      emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
1208
      size = 0;
1209
    }
1210
  else if (size + reg_save_bytes > 255)
1211
    {
1212
      emit_insn (gen_addsi3 (stack_pointer_rtx,
1213
                             stack_pointer_rtx,
1214
                             GEN_INT (size)));
1215
      size = 0;
1216
    }
1217
 
1218
  /* Adjust the stack and restore callee-saved registers, if any.  */
1219
  if (mn10300_can_use_rets_insn ())
1220
    emit_jump_insn (ret_rtx);
1221
  else
1222
    emit_jump_insn (gen_return_ret (GEN_INT (size + REG_SAVE_BYTES)));
1223
}
1224
 
1225
/* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1226
   This function is for MATCH_PARALLEL and so assumes OP is known to be
1227
   parallel.  If OP is a multiple store, return a mask indicating which
1228
   registers it saves.  Return 0 otherwise.  */
1229
 
1230
int
1231
mn10300_store_multiple_operation (rtx op,
1232
                                  enum machine_mode mode ATTRIBUTE_UNUSED)
1233
{
1234
  int count;
1235
  int mask;
1236
  int i;
1237
  unsigned int last;
1238
  rtx elt;
1239
 
1240
  count = XVECLEN (op, 0);
1241
  if (count < 2)
1242
    return 0;
1243
 
1244
  /* Check that first instruction has the form (set (sp) (plus A B)) */
1245
  elt = XVECEXP (op, 0, 0);
1246
  if (GET_CODE (elt) != SET
1247
      || (! REG_P (SET_DEST (elt)))
1248
      || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1249
      || GET_CODE (SET_SRC (elt)) != PLUS)
1250
    return 0;
1251
 
1252
  /* Check that A is the stack pointer and B is the expected stack size.
1253
     For OP to match, each subsequent instruction should push a word onto
1254
     the stack.  We therefore expect the first instruction to create
1255
     COUNT-1 stack slots.  */
1256
  elt = SET_SRC (elt);
1257
  if ((! REG_P (XEXP (elt, 0)))
1258
      || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1259
      || (! CONST_INT_P (XEXP (elt, 1)))
1260
      || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1261
    return 0;
1262
 
1263
  mask = 0;
1264
  for (i = 1; i < count; i++)
1265
    {
1266
      /* Check that element i is a (set (mem M) R).  */
1267
      /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1268
         Remember: the ordering is *not* monotonic.  */
1269
      elt = XVECEXP (op, 0, i);
1270
      if (GET_CODE (elt) != SET
1271
          || (! MEM_P (SET_DEST (elt)))
1272
          || (! REG_P (SET_SRC (elt))))
1273
        return 0;
1274
 
1275
      /* Remember which registers are to be saved.  */
1276
      last = REGNO (SET_SRC (elt));
1277
      mask |= (1 << last);
1278
 
1279
      /* Check that M has the form (plus (sp) (const_int -I*4)) */
1280
      elt = XEXP (SET_DEST (elt), 0);
1281
      if (GET_CODE (elt) != PLUS
1282
          || (! REG_P (XEXP (elt, 0)))
1283
          || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1284
          || (! CONST_INT_P (XEXP (elt, 1)))
1285
          || INTVAL (XEXP (elt, 1)) != -i * 4)
1286
        return 0;
1287
    }
1288
 
1289
  /* All or none of the callee-saved extended registers must be in the set.  */
1290
  if ((mask & 0x3c000) != 0
1291
      && (mask & 0x3c000) != 0x3c000)
1292
    return 0;
1293
 
1294
  return mask;
1295
}
1296
 
1297
/* Implement TARGET_PREFERRED_RELOAD_CLASS.  */
1298
 
1299
static reg_class_t
1300
mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1301
{
1302
  if (x == stack_pointer_rtx && rclass != SP_REGS)
1303
    return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1304
  else if (MEM_P (x)
1305
           || (REG_P (x)
1306
               && !HARD_REGISTER_P (x))
1307
           || (GET_CODE (x) == SUBREG
1308
               && REG_P (SUBREG_REG (x))
1309
               && !HARD_REGISTER_P (SUBREG_REG (x))))
1310
    return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1311
  else
1312
    return rclass;
1313
}
1314
 
1315
/* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS.  */
1316
 
1317
static reg_class_t
1318
mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1319
{
1320
  if (x == stack_pointer_rtx && rclass != SP_REGS)
1321
    return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1322
  return rclass;
1323
}
1324
 
1325
/* Implement TARGET_SECONDARY_RELOAD.  */
1326
 
1327
static reg_class_t
1328
mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1329
                          enum machine_mode mode, secondary_reload_info *sri)
1330
{
1331
  enum reg_class rclass = (enum reg_class) rclass_i;
1332
  enum reg_class xclass = NO_REGS;
1333
  unsigned int xregno = INVALID_REGNUM;
1334
 
1335
  if (REG_P (x))
1336
    {
1337
      xregno = REGNO (x);
1338
      if (xregno >= FIRST_PSEUDO_REGISTER)
1339
        xregno = true_regnum (x);
1340
      if (xregno != INVALID_REGNUM)
1341
        xclass = REGNO_REG_CLASS (xregno);
1342
    }
1343
 
1344
  if (!TARGET_AM33)
1345
    {
1346
      /* Memory load/stores less than a full word wide can't have an
1347
         address or stack pointer destination.  They must use a data
1348
         register as an intermediate register.  */
1349
      if (rclass != DATA_REGS
1350
          && (mode == QImode || mode == HImode)
1351
          && xclass == NO_REGS)
1352
        return DATA_REGS;
1353
 
1354
      /* We can only move SP to/from an address register.  */
1355
      if (in_p
1356
          && rclass == SP_REGS
1357
          && xclass != ADDRESS_REGS)
1358
        return ADDRESS_REGS;
1359
      if (!in_p
1360
          && xclass == SP_REGS
1361
          && rclass != ADDRESS_REGS
1362
          && rclass != SP_OR_ADDRESS_REGS)
1363
        return ADDRESS_REGS;
1364
    }
1365
 
1366
  /* We can't directly load sp + const_int into a register;
1367
     we must use an address register as an scratch.  */
1368
  if (in_p
1369
      && rclass != SP_REGS
1370
      && rclass != SP_OR_ADDRESS_REGS
1371
      && rclass != SP_OR_GENERAL_REGS
1372
      && GET_CODE (x) == PLUS
1373
      && (XEXP (x, 0) == stack_pointer_rtx
1374
          || XEXP (x, 1) == stack_pointer_rtx))
1375
    {
1376
      sri->icode = CODE_FOR_reload_plus_sp_const;
1377
      return NO_REGS;
1378
    }
1379
 
1380
  /* We can only move MDR to/from a data register.  */
1381
  if (rclass == MDR_REGS && xclass != DATA_REGS)
1382
    return DATA_REGS;
1383
  if (xclass == MDR_REGS && rclass != DATA_REGS)
1384
    return DATA_REGS;
1385
 
1386
  /* We can't load/store an FP register from a constant address.  */
1387
  if (TARGET_AM33_2
1388
      && (rclass == FP_REGS || xclass == FP_REGS)
1389
      && (xclass == NO_REGS || rclass == NO_REGS))
1390
    {
1391
      rtx addr = NULL;
1392
 
1393
      if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1394
        {
1395
          addr = reg_equiv_mem (xregno);
1396
          if (addr)
1397
            addr = XEXP (addr, 0);
1398
        }
1399
      else if (MEM_P (x))
1400
        addr = XEXP (x, 0);
1401
 
1402
      if (addr && CONSTANT_ADDRESS_P (addr))
1403
        return GENERAL_REGS;
1404
    }
1405
 
1406
  /* Otherwise assume no secondary reloads are needed.  */
1407
  return NO_REGS;
1408
}
1409
 
1410
int
1411
mn10300_frame_size (void)
1412
{
1413
  /* size includes the fixed stack space needed for function calls.  */
1414
  int size = get_frame_size () + crtl->outgoing_args_size;
1415
 
1416
  /* And space for the return pointer.  */
1417
  size += crtl->outgoing_args_size ? 4 : 0;
1418
 
1419
  return size;
1420
}
1421
 
1422
int
1423
mn10300_initial_offset (int from, int to)
1424
{
1425
  int diff = 0;
1426
 
1427
  gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1428
  gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1429
 
1430
  if (to == STACK_POINTER_REGNUM)
1431
    diff = mn10300_frame_size ();
1432
 
1433
  /* The difference between the argument pointer and the frame pointer
1434
     is the size of the callee register save area.  */
1435
  if (from == ARG_POINTER_REGNUM)
1436
    {
1437
      diff += REG_SAVE_BYTES;
1438
      diff += 4 * fp_regs_to_save ();
1439
    }
1440
 
1441
  return diff;
1442
}
1443
 
1444
/* Worker function for TARGET_RETURN_IN_MEMORY.  */
1445
 
1446
static bool
1447
mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1448
{
1449
  /* Return values > 8 bytes in length in memory.  */
1450
  return (int_size_in_bytes (type) > 8
1451
          || int_size_in_bytes (type) == 0
1452
          || TYPE_MODE (type) == BLKmode);
1453
}
1454
 
1455
/* Flush the argument registers to the stack for a stdarg function;
1456
   return the new argument pointer.  */
1457
static rtx
1458
mn10300_builtin_saveregs (void)
1459
{
1460
  rtx offset, mem;
1461
  tree fntype = TREE_TYPE (current_function_decl);
1462
  int argadj = ((!stdarg_p (fntype))
1463
                ? UNITS_PER_WORD : 0);
1464
  alias_set_type set = get_varargs_alias_set ();
1465
 
1466
  if (argadj)
1467
    offset = plus_constant (crtl->args.arg_offset_rtx, argadj);
1468
  else
1469
    offset = crtl->args.arg_offset_rtx;
1470
 
1471
  mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
1472
  set_mem_alias_set (mem, set);
1473
  emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1474
 
1475
  mem = gen_rtx_MEM (SImode,
1476
                     plus_constant (crtl->args.internal_arg_pointer, 4));
1477
  set_mem_alias_set (mem, set);
1478
  emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1479
 
1480
  return copy_to_reg (expand_binop (Pmode, add_optab,
1481
                                    crtl->args.internal_arg_pointer,
1482
                                    offset, 0, 0, OPTAB_LIB_WIDEN));
1483
}
1484
 
1485
static void
1486
mn10300_va_start (tree valist, rtx nextarg)
1487
{
1488
  nextarg = expand_builtin_saveregs ();
1489
  std_expand_builtin_va_start (valist, nextarg);
1490
}
1491
 
1492
/* Return true when a parameter should be passed by reference.  */
1493
 
1494
static bool
1495
mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
1496
                           enum machine_mode mode, const_tree type,
1497
                           bool named ATTRIBUTE_UNUSED)
1498
{
1499
  unsigned HOST_WIDE_INT size;
1500
 
1501
  if (type)
1502
    size = int_size_in_bytes (type);
1503
  else
1504
    size = GET_MODE_SIZE (mode);
1505
 
1506
  return (size > 8 || size == 0);
1507
}
1508
 
1509
/* Return an RTX to represent where a value with mode MODE will be returned
1510
   from a function.  If the result is NULL_RTX, the argument is pushed.  */
1511
 
1512
static rtx
1513
mn10300_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
1514
                      const_tree type, bool named ATTRIBUTE_UNUSED)
1515
{
1516
  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1517
  rtx result = NULL_RTX;
1518
  int size;
1519
 
1520
  /* We only support using 2 data registers as argument registers.  */
1521
  int nregs = 2;
1522
 
1523
  /* Figure out the size of the object to be passed.  */
1524
  if (mode == BLKmode)
1525
    size = int_size_in_bytes (type);
1526
  else
1527
    size = GET_MODE_SIZE (mode);
1528
 
1529
  cum->nbytes = (cum->nbytes + 3) & ~3;
1530
 
1531
  /* Don't pass this arg via a register if all the argument registers
1532
     are used up.  */
1533
  if (cum->nbytes > nregs * UNITS_PER_WORD)
1534
    return result;
1535
 
1536
  /* Don't pass this arg via a register if it would be split between
1537
     registers and memory.  */
1538
  if (type == NULL_TREE
1539
      && cum->nbytes + size > nregs * UNITS_PER_WORD)
1540
    return result;
1541
 
1542
  switch (cum->nbytes / UNITS_PER_WORD)
1543
    {
1544
    case 0:
1545
      result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
1546
      break;
1547
    case 1:
1548
      result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
1549
      break;
1550
    default:
1551
      break;
1552
    }
1553
 
1554
  return result;
1555
}
1556
 
1557
/* Update the data in CUM to advance over an argument
1558
   of mode MODE and data type TYPE.
1559
   (TYPE is null for libcalls where that information may not be available.)  */
1560
 
1561
static void
1562
mn10300_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
1563
                              const_tree type, bool named ATTRIBUTE_UNUSED)
1564
{
1565
  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1566
 
1567
  cum->nbytes += (mode != BLKmode
1568
                  ? (GET_MODE_SIZE (mode) + 3) & ~3
1569
                  : (int_size_in_bytes (type) + 3) & ~3);
1570
}
1571
 
1572
/* Return the number of bytes of registers to use for an argument passed
1573
   partially in registers and partially in memory.  */
1574
 
1575
static int
1576
mn10300_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
1577
                           tree type, bool named ATTRIBUTE_UNUSED)
1578
{
1579
  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1580
  int size;
1581
 
1582
  /* We only support using 2 data registers as argument registers.  */
1583
  int nregs = 2;
1584
 
1585
  /* Figure out the size of the object to be passed.  */
1586
  if (mode == BLKmode)
1587
    size = int_size_in_bytes (type);
1588
  else
1589
    size = GET_MODE_SIZE (mode);
1590
 
1591
  cum->nbytes = (cum->nbytes + 3) & ~3;
1592
 
1593
  /* Don't pass this arg via a register if all the argument registers
1594
     are used up.  */
1595
  if (cum->nbytes > nregs * UNITS_PER_WORD)
1596
    return 0;
1597
 
1598
  if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1599
    return 0;
1600
 
1601
  /* Don't pass this arg via a register if it would be split between
1602
     registers and memory.  */
1603
  if (type == NULL_TREE
1604
      && cum->nbytes + size > nregs * UNITS_PER_WORD)
1605
    return 0;
1606
 
1607
  return nregs * UNITS_PER_WORD - cum->nbytes;
1608
}
1609
 
1610
/* Return the location of the function's value.  This will be either
1611
   $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1612
   $d0 and $a0 if the -mreturn-pointer-on-do flag is set.  Note that
1613
   we only return the PARALLEL for outgoing values; we do not want
1614
   callers relying on this extra copy.  */
1615
 
1616
static rtx
1617
mn10300_function_value (const_tree valtype,
1618
                        const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1619
                        bool outgoing)
1620
{
1621
  rtx rv;
1622
  enum machine_mode mode = TYPE_MODE (valtype);
1623
 
1624
  if (! POINTER_TYPE_P (valtype))
1625
    return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1626
  else if (! TARGET_PTR_A0D0 || ! outgoing
1627
           || cfun->returns_struct)
1628
    return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1629
 
1630
  rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1631
  XVECEXP (rv, 0, 0)
1632
    = gen_rtx_EXPR_LIST (VOIDmode,
1633
                         gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1634
                         GEN_INT (0));
1635
 
1636
  XVECEXP (rv, 0, 1)
1637
    = gen_rtx_EXPR_LIST (VOIDmode,
1638
                         gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1639
                         GEN_INT (0));
1640
  return rv;
1641
}
1642
 
1643
/* Implements TARGET_LIBCALL_VALUE.  */
1644
 
1645
static rtx
1646
mn10300_libcall_value (enum machine_mode mode,
1647
                       const_rtx fun ATTRIBUTE_UNUSED)
1648
{
1649
  return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1650
}
1651
 
1652
/* Implements FUNCTION_VALUE_REGNO_P.  */
1653
 
1654
bool
1655
mn10300_function_value_regno_p (const unsigned int regno)
1656
{
1657
 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1658
}
1659
 
1660
/* Output an addition operation.  */
1661
 
1662
const char *
1663
mn10300_output_add (rtx operands[3], bool need_flags)
1664
{
1665
  rtx dest, src1, src2;
1666
  unsigned int dest_regnum, src1_regnum, src2_regnum;
1667
  enum reg_class src1_class, src2_class, dest_class;
1668
 
1669
  dest = operands[0];
1670
  src1 = operands[1];
1671
  src2 = operands[2];
1672
 
1673
  dest_regnum = true_regnum (dest);
1674
  src1_regnum = true_regnum (src1);
1675
 
1676
  dest_class = REGNO_REG_CLASS (dest_regnum);
1677
  src1_class = REGNO_REG_CLASS (src1_regnum);
1678
 
1679
  if (CONST_INT_P (src2))
1680
    {
1681
      gcc_assert (dest_regnum == src1_regnum);
1682
 
1683
      if (src2 == const1_rtx && !need_flags)
1684
        return "inc %0";
1685
      if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1686
        return "inc4 %0";
1687
 
1688
      gcc_assert (!need_flags || dest_class != SP_REGS);
1689
      return "add %2,%0";
1690
    }
1691
  else if (CONSTANT_P (src2))
1692
    return "add %2,%0";
1693
 
1694
  src2_regnum = true_regnum (src2);
1695
  src2_class = REGNO_REG_CLASS (src2_regnum);
1696
 
1697
  if (dest_regnum == src1_regnum)
1698
    return "add %2,%0";
1699
  if (dest_regnum == src2_regnum)
1700
    return "add %1,%0";
1701
 
1702
  /* The rest of the cases are reg = reg+reg.  For AM33, we can implement
1703
     this directly, as below, but when optimizing for space we can sometimes
1704
     do better by using a mov+add.  For MN103, we claimed that we could
1705
     implement a three-operand add because the various move and add insns
1706
     change sizes across register classes, and we can often do better than
1707
     reload in choosing which operand to move.  */
1708
  if (TARGET_AM33 && optimize_insn_for_speed_p ())
1709
    return "add %2,%1,%0";
1710
 
1711
  /* Catch cases where no extended register was used.  */
1712
  if (src1_class != EXTENDED_REGS
1713
      && src2_class != EXTENDED_REGS
1714
      && dest_class != EXTENDED_REGS)
1715
    {
1716
      /* We have to copy one of the sources into the destination, then
1717
         add the other source to the destination.
1718
 
1719
         Carefully select which source to copy to the destination; a
1720
         naive implementation will waste a byte when the source classes
1721
         are different and the destination is an address register.
1722
         Selecting the lowest cost register copy will optimize this
1723
         sequence.  */
1724
      if (src1_class == dest_class)
1725
        return "mov %1,%0\n\tadd %2,%0";
1726
      else
1727
        return "mov %2,%0\n\tadd %1,%0";
1728
    }
1729
 
1730
  /* At least one register is an extended register.  */
1731
 
1732
  /* The three operand add instruction on the am33 is a win iff the
1733
     output register is an extended register, or if both source
1734
     registers are extended registers.  */
1735
  if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1736
    return "add %2,%1,%0";
1737
 
1738
  /* It is better to copy one of the sources to the destination, then
1739
     perform a 2 address add.  The destination in this case must be
1740
     an address or data register and one of the sources must be an
1741
     extended register and the remaining source must not be an extended
1742
     register.
1743
 
1744
     The best code for this case is to copy the extended reg to the
1745
     destination, then emit a two address add.  */
1746
  if (src1_class == EXTENDED_REGS)
1747
    return "mov %1,%0\n\tadd %2,%0";
1748
  else
1749
    return "mov %2,%0\n\tadd %1,%0";
1750
}
1751
 
1752
/* Return 1 if X contains a symbolic expression.  We know these
1753
   expressions will have one of a few well defined forms, so
1754
   we need only check those forms.  */
1755
 
1756
int
1757
mn10300_symbolic_operand (rtx op,
1758
                          enum machine_mode mode ATTRIBUTE_UNUSED)
1759
{
1760
  switch (GET_CODE (op))
1761
    {
1762
    case SYMBOL_REF:
1763
    case LABEL_REF:
1764
      return 1;
1765
    case CONST:
1766
      op = XEXP (op, 0);
1767
      return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1768
               || GET_CODE (XEXP (op, 0)) == LABEL_REF)
1769
              && CONST_INT_P (XEXP (op, 1)));
1770
    default:
1771
      return 0;
1772
    }
1773
}
1774
 
1775
/* Try machine dependent ways of modifying an illegitimate address
1776
   to be legitimate.  If we find one, return the new valid address.
1777
   This macro is used in only one place: `memory_address' in explow.c.
1778
 
1779
   OLDX is the address as it was before break_out_memory_refs was called.
1780
   In some cases it is useful to look at this to decide what needs to be done.
1781
 
1782
   Normally it is always safe for this macro to do nothing.  It exists to
1783
   recognize opportunities to optimize the output.
1784
 
1785
   But on a few ports with segmented architectures and indexed addressing
1786
   (mn10300, hppa) it is used to rewrite certain problematical addresses.  */
1787
 
1788
static rtx
1789
mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1790
                            enum machine_mode mode ATTRIBUTE_UNUSED)
1791
{
1792
  if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1793
    x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
1794
 
1795
  /* Uh-oh.  We might have an address for x[n-100000].  This needs
1796
     special handling to avoid creating an indexed memory address
1797
     with x-100000 as the base.  */
1798
  if (GET_CODE (x) == PLUS
1799
      && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
1800
    {
1801
      /* Ugly.  We modify things here so that the address offset specified
1802
         by the index expression is computed first, then added to x to form
1803
         the entire address.  */
1804
 
1805
      rtx regx1, regy1, regy2, y;
1806
 
1807
      /* Strip off any CONST.  */
1808
      y = XEXP (x, 1);
1809
      if (GET_CODE (y) == CONST)
1810
        y = XEXP (y, 0);
1811
 
1812
      if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1813
        {
1814
          regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1815
          regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1816
          regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1817
          regx1 = force_reg (Pmode,
1818
                             gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1819
                                             regy2));
1820
          return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1821
        }
1822
    }
1823
  return x;
1824
}
1825
 
1826
/* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1827
   @GOTOFF in `reg'.  */
1828
 
1829
rtx
1830
mn10300_legitimize_pic_address (rtx orig, rtx reg)
1831
{
1832
  rtx x;
1833
 
1834
  if (GET_CODE (orig) == LABEL_REF
1835
      || (GET_CODE (orig) == SYMBOL_REF
1836
          && (CONSTANT_POOL_ADDRESS_P (orig)
1837
              || ! MN10300_GLOBAL_P (orig))))
1838
    {
1839
      if (reg == NULL)
1840
        reg = gen_reg_rtx (Pmode);
1841
 
1842
      x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1843
      x = gen_rtx_CONST (SImode, x);
1844
      emit_move_insn (reg, x);
1845
 
1846
      x = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
1847
    }
1848
  else if (GET_CODE (orig) == SYMBOL_REF)
1849
    {
1850
      if (reg == NULL)
1851
        reg = gen_reg_rtx (Pmode);
1852
 
1853
      x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1854
      x = gen_rtx_CONST (SImode, x);
1855
      x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1856
      x = gen_const_mem (SImode, x);
1857
 
1858
      x = emit_move_insn (reg, x);
1859
    }
1860
  else
1861
    return orig;
1862
 
1863
  set_unique_reg_note (x, REG_EQUAL, orig);
1864
  return reg;
1865
}
1866
 
1867
/* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1868
   isn't protected by a PIC unspec; nonzero otherwise.  */
1869
 
1870
int
1871
mn10300_legitimate_pic_operand_p (rtx x)
1872
{
1873
  const char *fmt;
1874
  int i;
1875
 
1876
  if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1877
    return 0;
1878
 
1879
  if (GET_CODE (x) == UNSPEC
1880
      && (XINT (x, 1) == UNSPEC_PIC
1881
          || XINT (x, 1) == UNSPEC_GOT
1882
          || XINT (x, 1) == UNSPEC_GOTOFF
1883
          || XINT (x, 1) == UNSPEC_PLT
1884
          || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
1885
      return 1;
1886
 
1887
  fmt = GET_RTX_FORMAT (GET_CODE (x));
1888
  for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1889
    {
1890
      if (fmt[i] == 'E')
1891
        {
1892
          int j;
1893
 
1894
          for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1895
            if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
1896
              return 0;
1897
        }
1898
      else if (fmt[i] == 'e'
1899
               && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
1900
        return 0;
1901
    }
1902
 
1903
  return 1;
1904
}
1905
 
1906
/* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1907
   legitimate, and FALSE otherwise.
1908
 
1909
   On the mn10300, the value in the address register must be
1910
   in the same memory space/segment as the effective address.
1911
 
1912
   This is problematical for reload since it does not understand
1913
   that base+index != index+base in a memory reference.
1914
 
1915
   Note it is still possible to use reg+reg addressing modes,
1916
   it's just much more difficult.  For a discussion of a possible
1917
   workaround and solution, see the comments in pa.c before the
1918
   function record_unscaled_index_insn_codes.  */
1919
 
1920
static bool
1921
mn10300_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1922
{
1923
  rtx base, index;
1924
 
1925
  if (CONSTANT_ADDRESS_P (x))
1926
    return !flag_pic || mn10300_legitimate_pic_operand_p (x);
1927
 
1928
  if (RTX_OK_FOR_BASE_P (x, strict))
1929
    return true;
1930
 
1931
  if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1932
    {
1933
      if (GET_CODE (x) == POST_INC)
1934
        return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1935
      if (GET_CODE (x) == POST_MODIFY)
1936
        return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1937
                && CONSTANT_ADDRESS_P (XEXP (x, 1)));
1938
    }
1939
 
1940
  if (GET_CODE (x) != PLUS)
1941
    return false;
1942
 
1943
  base = XEXP (x, 0);
1944
  index = XEXP (x, 1);
1945
 
1946
  if (!REG_P (base))
1947
    return false;
1948
  if (REG_P (index))
1949
    {
1950
      /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1951
         addressing is hard to satisfy.  */
1952
      if (!TARGET_AM33)
1953
        return false;
1954
 
1955
      return (REGNO_GENERAL_P (REGNO (base), strict)
1956
              && REGNO_GENERAL_P (REGNO (index), strict));
1957
    }
1958
 
1959
  if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
1960
    return false;
1961
 
1962
  if (CONST_INT_P (index))
1963
    return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
1964
 
1965
  if (CONSTANT_ADDRESS_P (index))
1966
    return !flag_pic || mn10300_legitimate_pic_operand_p (index);
1967
 
1968
  return false;
1969
}
1970
 
1971
bool
1972
mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
1973
{
1974
  if (regno >= FIRST_PSEUDO_REGISTER)
1975
    {
1976
      if (!strict)
1977
        return true;
1978
      if (!reg_renumber)
1979
        return false;
1980
      regno = reg_renumber[regno];
1981
      if (regno == INVALID_REGNUM)
1982
        return false;
1983
    }
1984
  return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
1985
}
1986
 
1987
rtx
1988
mn10300_legitimize_reload_address (rtx x,
1989
                                   enum machine_mode mode ATTRIBUTE_UNUSED,
1990
                                   int opnum, int type,
1991
                                   int ind_levels ATTRIBUTE_UNUSED)
1992
{
1993
  bool any_change = false;
1994
 
1995
  /* See above re disabling reg+reg addressing for MN103.  */
1996
  if (!TARGET_AM33)
1997
    return NULL_RTX;
1998
 
1999
  if (GET_CODE (x) != PLUS)
2000
    return NULL_RTX;
2001
 
2002
  if (XEXP (x, 0) == stack_pointer_rtx)
2003
    {
2004
      push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2005
                   GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2006
                   opnum, (enum reload_type) type);
2007
      any_change = true;
2008
    }
2009
  if (XEXP (x, 1) == stack_pointer_rtx)
2010
    {
2011
      push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2012
                   GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2013
                   opnum, (enum reload_type) type);
2014
      any_change = true;
2015
    }
2016
 
2017
  return any_change ? x : NULL_RTX;
2018
}
2019
 
2020
/* Implement TARGET_LEGITIMATE_CONSTANT_P.  Returns TRUE if X is a valid
2021
   constant.  Note that some "constants" aren't valid, such as TLS
2022
   symbols and unconverted GOT-based references, so we eliminate
2023
   those here.  */
2024
 
2025
static bool
2026
mn10300_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2027
{
2028
  switch (GET_CODE (x))
2029
    {
2030
    case CONST:
2031
      x = XEXP (x, 0);
2032
 
2033
      if (GET_CODE (x) == PLUS)
2034
        {
2035
          if (! CONST_INT_P (XEXP (x, 1)))
2036
            return false;
2037
          x = XEXP (x, 0);
2038
        }
2039
 
2040
      /* Only some unspecs are valid as "constants".  */
2041
      if (GET_CODE (x) == UNSPEC)
2042
        {
2043
          switch (XINT (x, 1))
2044
            {
2045
            case UNSPEC_PIC:
2046
            case UNSPEC_GOT:
2047
            case UNSPEC_GOTOFF:
2048
            case UNSPEC_PLT:
2049
              return true;
2050
            default:
2051
              return false;
2052
            }
2053
        }
2054
 
2055
      /* We must have drilled down to a symbol.  */
2056
      if (! mn10300_symbolic_operand (x, Pmode))
2057
        return false;
2058
      break;
2059
 
2060
    default:
2061
      break;
2062
    }
2063
 
2064
  return true;
2065
}
2066
 
2067
/* Undo pic address legitimization for the benefit of debug info.  */
2068
 
2069
static rtx
2070
mn10300_delegitimize_address (rtx orig_x)
2071
{
2072
  rtx x = orig_x, ret, addend = NULL;
2073
  bool need_mem;
2074
 
2075
  if (MEM_P (x))
2076
    x = XEXP (x, 0);
2077
  if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2078
    return orig_x;
2079
 
2080
  if (XEXP (x, 0) == pic_offset_table_rtx)
2081
    ;
2082
  /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2083
     some odd-looking "addresses" that were never valid in the first place.
2084
     We need to look harder to avoid warnings being emitted.  */
2085
  else if (GET_CODE (XEXP (x, 0)) == PLUS)
2086
    {
2087
      rtx x0 = XEXP (x, 0);
2088
      rtx x00 = XEXP (x0, 0);
2089
      rtx x01 = XEXP (x0, 1);
2090
 
2091
      if (x00 == pic_offset_table_rtx)
2092
        addend = x01;
2093
      else if (x01 == pic_offset_table_rtx)
2094
        addend = x00;
2095
      else
2096
        return orig_x;
2097
 
2098
    }
2099
  else
2100
    return orig_x;
2101
  x = XEXP (x, 1);
2102
 
2103
  if (GET_CODE (x) != CONST)
2104
    return orig_x;
2105
  x = XEXP (x, 0);
2106
  if (GET_CODE (x) != UNSPEC)
2107
    return orig_x;
2108
 
2109
  ret = XVECEXP (x, 0, 0);
2110
  if (XINT (x, 1) == UNSPEC_GOTOFF)
2111
    need_mem = false;
2112
  else if (XINT (x, 1) == UNSPEC_GOT)
2113
    need_mem = true;
2114
  else
2115
    return orig_x;
2116
 
2117
  gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2118
  if (need_mem != MEM_P (orig_x))
2119
    return orig_x;
2120
  if (need_mem && addend)
2121
    return orig_x;
2122
  if (addend)
2123
    ret = gen_rtx_PLUS (Pmode, addend, ret);
2124
  return ret;
2125
}
2126
 
2127
/* For addresses, costs are relative to "MOV (Rm),Rn".  For AM33 this is
2128
   the 3-byte fully general instruction; for MN103 this is the 2-byte form
2129
   with an address register.  */
2130
 
2131
static int
2132
mn10300_address_cost (rtx x, bool speed)
2133
{
2134
  HOST_WIDE_INT i;
2135
  rtx base, index;
2136
 
2137
  switch (GET_CODE (x))
2138
    {
2139
    case CONST:
2140
    case SYMBOL_REF:
2141
    case LABEL_REF:
2142
      /* We assume all of these require a 32-bit constant, even though
2143
         some symbol and label references can be relaxed.  */
2144
      return speed ? 1 : 4;
2145
 
2146
    case REG:
2147
    case SUBREG:
2148
    case POST_INC:
2149
      return 0;
2150
 
2151
    case POST_MODIFY:
2152
      /* Assume any symbolic offset is a 32-bit constant.  */
2153
      i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2154
      if (IN_RANGE (i, -128, 127))
2155
        return speed ? 0 : 1;
2156
      if (speed)
2157
        return 1;
2158
      if (IN_RANGE (i, -0x800000, 0x7fffff))
2159
        return 3;
2160
      return 4;
2161
 
2162
    case PLUS:
2163
      base = XEXP (x, 0);
2164
      index = XEXP (x, 1);
2165
      if (register_operand (index, SImode))
2166
        {
2167
          /* Attempt to minimize the number of registers in the address.
2168
             This is similar to what other ports do.  */
2169
          if (register_operand (base, SImode))
2170
            return 1;
2171
 
2172
          base = XEXP (x, 1);
2173
          index = XEXP (x, 0);
2174
        }
2175
 
2176
      /* Assume any symbolic offset is a 32-bit constant.  */
2177
      i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2178
      if (IN_RANGE (i, -128, 127))
2179
        return speed ? 0 : 1;
2180
      if (IN_RANGE (i, -32768, 32767))
2181
        return speed ? 0 : 2;
2182
      return speed ? 2 : 6;
2183
 
2184
    default:
2185
      return rtx_cost (x, MEM, 0, speed);
2186
    }
2187
}
2188
 
2189
/* Implement the TARGET_REGISTER_MOVE_COST hook.
2190
 
2191
   Recall that the base value of 2 is required by assumptions elsewhere
2192
   in the body of the compiler, and that cost 2 is special-cased as an
2193
   early exit from reload meaning no work is required.  */
2194
 
2195
static int
2196
mn10300_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2197
                            reg_class_t ifrom, reg_class_t ito)
2198
{
2199
  enum reg_class from = (enum reg_class) ifrom;
2200
  enum reg_class to = (enum reg_class) ito;
2201
  enum reg_class scratch, test;
2202
 
2203
  /* Simplify the following code by unifying the fp register classes.  */
2204
  if (to == FP_ACC_REGS)
2205
    to = FP_REGS;
2206
  if (from == FP_ACC_REGS)
2207
    from = FP_REGS;
2208
 
2209
  /* Diagnose invalid moves by costing them as two moves.  */
2210
 
2211
  scratch = NO_REGS;
2212
  test = from;
2213
  if (to == SP_REGS)
2214
    scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2215
  else if (to == MDR_REGS)
2216
    scratch = DATA_REGS;
2217
  else if (to == FP_REGS && to != from)
2218
    scratch = GENERAL_REGS;
2219
  else
2220
    {
2221
      test = to;
2222
      if (from == SP_REGS)
2223
        scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2224
      else if (from == MDR_REGS)
2225
        scratch = DATA_REGS;
2226
      else if (from == FP_REGS && to != from)
2227
        scratch = GENERAL_REGS;
2228
    }
2229
  if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2230
    return (mn10300_register_move_cost (VOIDmode, from, scratch)
2231
            + mn10300_register_move_cost (VOIDmode, scratch, to));
2232
 
2233
  /* From here on, all we need consider are legal combinations.  */
2234
 
2235
  if (optimize_size)
2236
    {
2237
      /* The scale here is bytes * 2.  */
2238
 
2239
      if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2240
        return 2;
2241
 
2242
      if (from == SP_REGS)
2243
        return (to == ADDRESS_REGS ? 2 : 6);
2244
 
2245
      /* For MN103, all remaining legal moves are two bytes.  */
2246
      if (TARGET_AM33)
2247
        return 4;
2248
 
2249
      if (to == SP_REGS)
2250
        return (from == ADDRESS_REGS ? 4 : 6);
2251
 
2252
      if ((from == ADDRESS_REGS || from == DATA_REGS)
2253
           && (to == ADDRESS_REGS || to == DATA_REGS))
2254
        return 4;
2255
 
2256
      if (to == EXTENDED_REGS)
2257
        return (to == from ? 6 : 4);
2258
 
2259
      /* What's left are SP_REGS, FP_REGS, or combinations of the above.  */
2260
      return 6;
2261
    }
2262
  else
2263
    {
2264
      /* The scale here is cycles * 2.  */
2265
 
2266
      if (to == FP_REGS)
2267
        return 8;
2268
      if (from == FP_REGS)
2269
        return 4;
2270
 
2271
      /* All legal moves between integral registers are single cycle.  */
2272
      return 2;
2273
    }
2274
}
2275
 
2276
/* Implement the TARGET_MEMORY_MOVE_COST hook.
2277
 
2278
   Given lack of the form of the address, this must be speed-relative,
2279
   though we should never be less expensive than a size-relative register
2280
   move cost above.  This is not a problem.  */
2281
 
2282
static int
2283
mn10300_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2284
                          reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
2285
{
2286
  enum reg_class rclass = (enum reg_class) iclass;
2287
 
2288
  if (rclass == FP_REGS)
2289
    return 8;
2290
  return 6;
2291
}
2292
 
2293
/* Implement the TARGET_RTX_COSTS hook.
2294
 
2295
   Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2296
   to represent cycles.  Size-relative costs are in bytes.  */
2297
 
2298
static bool
2299
mn10300_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2300
                   int *ptotal, bool speed)
2301
{
2302
  /* This value is used for SYMBOL_REF etc where we want to pretend
2303
     we have a full 32-bit constant.  */
2304
  HOST_WIDE_INT i = 0x12345678;
2305
  int total;
2306
 
2307
  switch (code)
2308
    {
2309
    case CONST_INT:
2310
      i = INTVAL (x);
2311
    do_int_costs:
2312
      if (speed)
2313
        {
2314
          if (outer_code == SET)
2315
            {
2316
              /* 16-bit integer loads have latency 1, 32-bit loads 2.  */
2317
              if (IN_RANGE (i, -32768, 32767))
2318
                total = COSTS_N_INSNS (1);
2319
              else
2320
                total = COSTS_N_INSNS (2);
2321
            }
2322
          else
2323
            {
2324
              /* 16-bit integer operands don't affect latency;
2325
                 24-bit and 32-bit operands add a cycle.  */
2326
              if (IN_RANGE (i, -32768, 32767))
2327
                total = 0;
2328
              else
2329
                total = COSTS_N_INSNS (1);
2330
            }
2331
        }
2332
      else
2333
        {
2334
          if (outer_code == SET)
2335
            {
2336
              if (i == 0)
2337
                total = 1;
2338
              else if (IN_RANGE (i, -128, 127))
2339
                total = 2;
2340
              else if (IN_RANGE (i, -32768, 32767))
2341
                total = 3;
2342
              else
2343
                total = 6;
2344
            }
2345
          else
2346
            {
2347
              /* Reference here is ADD An,Dn, vs ADD imm,Dn.  */
2348
              if (IN_RANGE (i, -128, 127))
2349
                total = 0;
2350
              else if (IN_RANGE (i, -32768, 32767))
2351
                total = 2;
2352
              else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2353
                total = 3;
2354
              else
2355
                total = 4;
2356
            }
2357
        }
2358
      goto alldone;
2359
 
2360
    case CONST:
2361
    case LABEL_REF:
2362
    case SYMBOL_REF:
2363
    case CONST_DOUBLE:
2364
      /* We assume all of these require a 32-bit constant, even though
2365
         some symbol and label references can be relaxed.  */
2366
      goto do_int_costs;
2367
 
2368
    case UNSPEC:
2369
      switch (XINT (x, 1))
2370
        {
2371
        case UNSPEC_PIC:
2372
        case UNSPEC_GOT:
2373
        case UNSPEC_GOTOFF:
2374
        case UNSPEC_PLT:
2375
        case UNSPEC_GOTSYM_OFF:
2376
          /* The PIC unspecs also resolve to a 32-bit constant.  */
2377
          goto do_int_costs;
2378
 
2379
        default:
2380
          /* Assume any non-listed unspec is some sort of arithmetic.  */
2381
          goto do_arith_costs;
2382
        }
2383
 
2384
    case PLUS:
2385
      /* Notice the size difference of INC and INC4.  */
2386
      if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2387
        {
2388
          i = INTVAL (XEXP (x, 1));
2389
          if (i == 1 || i == 4)
2390
            {
2391
              total = 1 + rtx_cost (XEXP (x, 0), PLUS, 0, speed);
2392
              goto alldone;
2393
            }
2394
        }
2395
      goto do_arith_costs;
2396
 
2397
    case MINUS:
2398
    case AND:
2399
    case IOR:
2400
    case XOR:
2401
    case NOT:
2402
    case NEG:
2403
    case ZERO_EXTEND:
2404
    case SIGN_EXTEND:
2405
    case COMPARE:
2406
    case BSWAP:
2407
    case CLZ:
2408
    do_arith_costs:
2409
      total = (speed ? COSTS_N_INSNS (1) : 2);
2410
      break;
2411
 
2412
    case ASHIFT:
2413
      /* Notice the size difference of ASL2 and variants.  */
2414
      if (!speed && CONST_INT_P (XEXP (x, 1)))
2415
        switch (INTVAL (XEXP (x, 1)))
2416
          {
2417
          case 1:
2418
          case 2:
2419
            total = 1;
2420
            goto alldone;
2421
          case 3:
2422
          case 4:
2423
            total = 2;
2424
            goto alldone;
2425
          }
2426
      /* FALLTHRU */
2427
 
2428
    case ASHIFTRT:
2429
    case LSHIFTRT:
2430
      total = (speed ? COSTS_N_INSNS (1) : 3);
2431
      goto alldone;
2432
 
2433
    case MULT:
2434
      total = (speed ? COSTS_N_INSNS (3) : 2);
2435
      break;
2436
 
2437
    case DIV:
2438
    case UDIV:
2439
    case MOD:
2440
    case UMOD:
2441
      total = (speed ? COSTS_N_INSNS (39)
2442
                /* Include space to load+retrieve MDR.  */
2443
                : code == MOD || code == UMOD ? 6 : 4);
2444
      break;
2445
 
2446
    case MEM:
2447
      total = mn10300_address_cost (XEXP (x, 0), speed);
2448
      if (speed)
2449
        total = COSTS_N_INSNS (2 + total);
2450
      goto alldone;
2451
 
2452
    default:
2453
      /* Probably not implemented.  Assume external call.  */
2454
      total = (speed ? COSTS_N_INSNS (10) : 7);
2455
      break;
2456
    }
2457
 
2458
  *ptotal = total;
2459
  return false;
2460
 
2461
 alldone:
2462
  *ptotal = total;
2463
  return true;
2464
}
2465
 
2466
/* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2467
   may access it using GOTOFF instead of GOT.  */
2468
 
2469
static void
2470
mn10300_encode_section_info (tree decl, rtx rtl, int first ATTRIBUTE_UNUSED)
2471
{
2472
  rtx symbol;
2473
 
2474
  if (! MEM_P (rtl))
2475
    return;
2476
  symbol = XEXP (rtl, 0);
2477
  if (GET_CODE (symbol) != SYMBOL_REF)
2478
    return;
2479
 
2480
  if (flag_pic)
2481
    SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2482
}
2483
 
2484
/* Dispatch tables on the mn10300 are extremely expensive in terms of code
2485
   and readonly data size.  So we crank up the case threshold value to
2486
   encourage a series of if/else comparisons to implement many small switch
2487
   statements.  In theory, this value could be increased much more if we
2488
   were solely optimizing for space, but we keep it "reasonable" to avoid
2489
   serious code efficiency lossage.  */
2490
 
2491
static unsigned int
2492
mn10300_case_values_threshold (void)
2493
{
2494
  return 6;
2495
}
2496
 
2497
/* Worker function for TARGET_TRAMPOLINE_INIT.  */
2498
 
2499
static void
2500
mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2501
{
2502
  rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2503
 
2504
  /* This is a strict alignment target, which means that we play
2505
     some games to make sure that the locations at which we need
2506
     to store <chain> and <disp> wind up at aligned addresses.
2507
 
2508
        0x28 0x00                       add 0,d0
2509
                  0xfc 0xdd             mov chain,a1
2510
        <chain>
2511
        0xf8 0xed 0x00                  btst 0,d1
2512
                       0xdc             jmp fnaddr
2513
        <disp>
2514
 
2515
     Note that the two extra insns are effectively nops; they
2516
     clobber the flags but do not affect the contents of D0 or D1.  */
2517
 
2518
  disp = expand_binop (SImode, sub_optab, fnaddr,
2519
                       plus_constant (XEXP (m_tramp, 0), 11),
2520
                       NULL_RTX, 1, OPTAB_DIRECT);
2521
 
2522
  mem = adjust_address (m_tramp, SImode, 0);
2523
  emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2524
  mem = adjust_address (m_tramp, SImode, 4);
2525
  emit_move_insn (mem, chain_value);
2526
  mem = adjust_address (m_tramp, SImode, 8);
2527
  emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2528
  mem = adjust_address (m_tramp, SImode, 12);
2529
  emit_move_insn (mem, disp);
2530
}
2531
 
2532
/* Output the assembler code for a C++ thunk function.
2533
   THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2534
   is the decl for the target function.  DELTA is an immediate constant
2535
   offset to be added to the THIS parameter.  If VCALL_OFFSET is nonzero
2536
   the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2537
   additionally added to THIS.  Finally jump to the entry point of
2538
   FUNCTION.  */
2539
 
2540
static void
2541
mn10300_asm_output_mi_thunk (FILE *        file,
2542
                             tree          thunk_fndecl ATTRIBUTE_UNUSED,
2543
                             HOST_WIDE_INT delta,
2544
                             HOST_WIDE_INT vcall_offset,
2545
                             tree          function)
2546
{
2547
  const char * _this;
2548
 
2549
  /* Get the register holding the THIS parameter.  Handle the case
2550
     where there is a hidden first argument for a returned structure.  */
2551
  if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2552
    _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2553
  else
2554
    _this = reg_names [FIRST_ARGUMENT_REGNUM];
2555
 
2556
  fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2557
 
2558
  if (delta)
2559
    fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2560
 
2561
  if (vcall_offset)
2562
    {
2563
      const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2564
 
2565
      fprintf (file, "\tmov %s, %s\n", _this, scratch);
2566
      fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2567
      fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2568
      fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2569
      fprintf (file, "\tadd %s, %s\n", scratch, _this);
2570
    }
2571
 
2572
  fputs ("\tjmp ", file);
2573
  assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2574
  putc ('\n', file);
2575
}
2576
 
2577
/* Return true if mn10300_output_mi_thunk would be able to output the
2578
   assembler code for the thunk function specified by the arguments
2579
   it is passed, and false otherwise.  */
2580
 
2581
static bool
2582
mn10300_can_output_mi_thunk (const_tree    thunk_fndecl ATTRIBUTE_UNUSED,
2583
                             HOST_WIDE_INT delta        ATTRIBUTE_UNUSED,
2584
                             HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2585
                             const_tree    function     ATTRIBUTE_UNUSED)
2586
{
2587
  return true;
2588
}
2589
 
2590
bool
2591
mn10300_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
2592
{
2593
  if (REGNO_REG_CLASS (regno) == FP_REGS
2594
      || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2595
    /* Do not store integer values in FP registers.  */
2596
    return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
2597
 
2598
  if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2599
    return true;
2600
 
2601
  if (REGNO_REG_CLASS (regno) == DATA_REGS
2602
      || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2603
      || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2604
    return GET_MODE_SIZE (mode) <= 4;
2605
 
2606
  return false;
2607
}
2608
 
2609
bool
2610
mn10300_modes_tieable (enum machine_mode mode1, enum machine_mode mode2)
2611
{
2612
  if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2613
      && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2614
    return false;
2615
 
2616
  if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2617
      && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2618
    return false;
2619
 
2620
  if (TARGET_AM33
2621
      || mode1 == mode2
2622
      || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2623
    return true;
2624
 
2625
  return false;
2626
}
2627
 
2628
static int
2629
cc_flags_for_mode (enum machine_mode mode)
2630
{
2631
  switch (mode)
2632
    {
2633
    case CCmode:
2634
      return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
2635
    case CCZNCmode:
2636
      return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
2637
    case CCZNmode:
2638
      return CC_FLAG_Z | CC_FLAG_N;
2639
    case CC_FLOATmode:
2640
      return -1;
2641
    default:
2642
      gcc_unreachable ();
2643
    }
2644
}
2645
 
2646
static int
2647
cc_flags_for_code (enum rtx_code code)
2648
{
2649
  switch (code)
2650
    {
2651
    case EQ:    /* Z */
2652
    case NE:    /* ~Z */
2653
      return CC_FLAG_Z;
2654
 
2655
    case LT:    /* N */
2656
    case GE:    /* ~N */
2657
      return CC_FLAG_N;
2658
      break;
2659
 
2660
    case GT:    /* ~(Z|(N^V)) */
2661
    case LE:    /* Z|(N^V) */
2662
      return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2663
 
2664
    case GEU:   /* ~C */
2665
    case LTU:   /* C */
2666
      return CC_FLAG_C;
2667
 
2668
    case GTU:   /* ~(C | Z) */
2669
    case LEU:   /* C | Z */
2670
      return CC_FLAG_Z | CC_FLAG_C;
2671
 
2672
    case ORDERED:
2673
    case UNORDERED:
2674
    case LTGT:
2675
    case UNEQ:
2676
    case UNGE:
2677
    case UNGT:
2678
    case UNLE:
2679
    case UNLT:
2680
      return -1;
2681
 
2682
    default:
2683
      gcc_unreachable ();
2684
    }
2685
}
2686
 
2687
enum machine_mode
2688
mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
2689
{
2690
  int req;
2691
 
2692
  if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2693
    return CC_FLOATmode;
2694
 
2695
  req = cc_flags_for_code (code);
2696
 
2697
  if (req & CC_FLAG_V)
2698
    return CCmode;
2699
  if (req & CC_FLAG_C)
2700
    return CCZNCmode;
2701
  return CCZNmode;
2702
}
2703
 
2704
static inline bool
2705
is_load_insn (rtx insn)
2706
{
2707
  if (GET_CODE (PATTERN (insn)) != SET)
2708
    return false;
2709
 
2710
  return MEM_P (SET_SRC (PATTERN (insn)));
2711
}
2712
 
2713
static inline bool
2714
is_store_insn (rtx insn)
2715
{
2716
  if (GET_CODE (PATTERN (insn)) != SET)
2717
    return false;
2718
 
2719
  return MEM_P (SET_DEST (PATTERN (insn)));
2720
}
2721
 
2722
/* Update scheduling costs for situations that cannot be
2723
   described using the attributes and DFA machinery.
2724
   DEP is the insn being scheduled.
2725
   INSN is the previous insn.
2726
   COST is the current cycle cost for DEP.  */
2727
 
2728
static int
2729
mn10300_adjust_sched_cost (rtx insn, rtx link, rtx dep, int cost)
2730
{
2731
  int timings = get_attr_timings (insn);
2732
 
2733
  if (!TARGET_AM33)
2734
    return 1;
2735
 
2736
  if (GET_CODE (insn) == PARALLEL)
2737
    insn = XVECEXP (insn, 0, 0);
2738
 
2739
  if (GET_CODE (dep) == PARALLEL)
2740
    dep = XVECEXP (dep, 0, 0);
2741
 
2742
  /* For the AM34 a load instruction that follows a
2743
     store instruction incurs an extra cycle of delay.  */
2744
  if (mn10300_tune_cpu == PROCESSOR_AM34
2745
      && is_load_insn (dep)
2746
      && is_store_insn (insn))
2747
    cost += 1;
2748
 
2749
  /* For the AM34 a non-store, non-branch FPU insn that follows
2750
     another FPU insn incurs a one cycle throughput increase.  */
2751
  else if (mn10300_tune_cpu == PROCESSOR_AM34
2752
      && ! is_store_insn (insn)
2753
      && ! JUMP_P (insn)
2754
      && GET_CODE (PATTERN (dep)) == SET
2755
      && GET_CODE (PATTERN (insn)) == SET
2756
      && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) == MODE_FLOAT
2757
      && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) == MODE_FLOAT)
2758
    cost += 1;
2759
 
2760
  /*  Resolve the conflict described in section 1-7-4 of
2761
      Chapter 3 of the MN103E Series Instruction Manual
2762
      where it says:
2763
 
2764
        "When the preceeding instruction is a CPU load or
2765
         store instruction, a following FPU instruction
2766
         cannot be executed until the CPU completes the
2767
         latency period even though there are no register
2768
         or flag dependencies between them."  */
2769
 
2770
  /* Only the AM33-2 (and later) CPUs have FPU instructions.  */
2771
  if (! TARGET_AM33_2)
2772
    return cost;
2773
 
2774
  /* If a data dependence already exists then the cost is correct.  */
2775
  if (REG_NOTE_KIND (link) == 0)
2776
    return cost;
2777
 
2778
  /* Check that the instruction about to scheduled is an FPU instruction.  */
2779
  if (GET_CODE (PATTERN (dep)) != SET)
2780
    return cost;
2781
 
2782
  if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) != MODE_FLOAT)
2783
    return cost;
2784
 
2785
  /* Now check to see if the previous instruction is a load or store.  */
2786
  if (! is_load_insn (insn) && ! is_store_insn (insn))
2787
    return cost;
2788
 
2789
  /* XXX: Verify: The text of 1-7-4 implies that the restriction
2790
     only applies when an INTEGER load/store preceeds an FPU
2791
     instruction, but is this true ?  For now we assume that it is.  */
2792
  if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) != MODE_INT)
2793
    return cost;
2794
 
2795
  /* Extract the latency value from the timings attribute.  */
2796
  return timings < 100 ? (timings % 10) : (timings % 100);
2797
}
2798
 
2799
static void
2800
mn10300_conditional_register_usage (void)
2801
{
2802
  unsigned int i;
2803
 
2804
  if (!TARGET_AM33)
2805
    {
2806
      for (i = FIRST_EXTENDED_REGNUM;
2807
           i <= LAST_EXTENDED_REGNUM; i++)
2808
        fixed_regs[i] = call_used_regs[i] = 1;
2809
    }
2810
  if (!TARGET_AM33_2)
2811
    {
2812
      for (i = FIRST_FP_REGNUM;
2813
           i <= LAST_FP_REGNUM; i++)
2814
        fixed_regs[i] = call_used_regs[i] = 1;
2815
    }
2816
  if (flag_pic)
2817
    fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2818
    call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2819
}
2820
 
2821
/* Worker function for TARGET_MD_ASM_CLOBBERS.
2822
   We do this in the mn10300 backend to maintain source compatibility
2823
   with the old cc0-based compiler.  */
2824
 
2825
static tree
2826
mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
2827
                         tree inputs ATTRIBUTE_UNUSED,
2828
                         tree clobbers)
2829
{
2830
  clobbers = tree_cons (NULL_TREE, build_string (5, "EPSW"),
2831
                        clobbers);
2832
  return clobbers;
2833
}
2834
 
2835
/* A helper function for splitting cbranch patterns after reload.  */
2836
 
2837
void
2838
mn10300_split_cbranch (enum machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
2839
{
2840
  rtx flags, x;
2841
 
2842
  flags = gen_rtx_REG (cmp_mode, CC_REG);
2843
  x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
2844
  x = gen_rtx_SET (VOIDmode, flags, x);
2845
  emit_insn (x);
2846
 
2847
  x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2848
  x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
2849
  x = gen_rtx_SET (VOIDmode, pc_rtx, x);
2850
  emit_jump_insn (x);
2851
}
2852
 
2853
/* A helper function for matching parallels that set the flags.  */
2854
 
2855
bool
2856
mn10300_match_ccmode (rtx insn, enum machine_mode cc_mode)
2857
{
2858
  rtx op1, flags;
2859
  enum machine_mode flags_mode;
2860
 
2861
  gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2862
 
2863
  op1 = XVECEXP (PATTERN (insn), 0, 1);
2864
  gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2865
 
2866
  flags = SET_DEST (op1);
2867
  flags_mode = GET_MODE (flags);
2868
 
2869
  if (GET_MODE (SET_SRC (op1)) != flags_mode)
2870
    return false;
2871
  if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2872
    return false;
2873
 
2874
  /* Ensure that the mode of FLAGS is compatible with CC_MODE.  */
2875
  if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2876
    return false;
2877
 
2878
  return true;
2879
}
2880
 
2881
/* This function is used to help split:
2882
 
2883
     (set (reg) (and (reg) (int)))
2884
 
2885
   into:
2886
 
2887
     (set (reg) (shift (reg) (int))
2888
     (set (reg) (shift (reg) (int))
2889
 
2890
   where the shitfs will be shorter than the "and" insn.
2891
 
2892
   It returns the number of bits that should be shifted.  A positive
2893
   values means that the low bits are to be cleared (and hence the
2894
   shifts should be right followed by left) whereas a negative value
2895
   means that the high bits are to be cleared (left followed by right).
2896
   Zero is returned when it would not be economical to split the AND.  */
2897
 
2898
int
2899
mn10300_split_and_operand_count (rtx op)
2900
{
2901
  HOST_WIDE_INT val = INTVAL (op);
2902
  int count;
2903
 
2904
  if (val < 0)
2905
    {
2906
      /* High bit is set, look for bits clear at the bottom.  */
2907
      count = exact_log2 (-val);
2908
      if (count < 0)
2909
        return 0;
2910
      /* This is only size win if we can use the asl2 insn.  Otherwise we
2911
         would be replacing 1 6-byte insn with 2 3-byte insns.  */
2912
      if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2913
        return 0;
2914
      return count;
2915
    }
2916
  else
2917
    {
2918
      /* High bit is clear, look for bits set at the bottom.  */
2919
      count = exact_log2 (val + 1);
2920
      count = 32 - count;
2921
      /* Again, this is only a size win with asl2.  */
2922
      if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2923
        return 0;
2924
      return -count;
2925
    }
2926
}
2927
 
2928
struct liw_data
2929
{
2930
  enum attr_liw slot;
2931
  enum attr_liw_op op;
2932
  rtx dest;
2933
  rtx src;
2934
};
2935
 
2936
/* Decide if the given insn is a candidate for LIW bundling.  If it is then
2937
   extract the operands and LIW attributes from the insn and use them to fill
2938
   in the liw_data structure.  Return true upon success or false if the insn
2939
   cannot be bundled.  */
2940
 
2941
static bool
2942
extract_bundle (rtx insn, struct liw_data * pdata)
2943
{
2944
  bool allow_consts = true;
2945
  rtx p;
2946
 
2947
  gcc_assert (pdata != NULL);
2948
 
2949
  if (insn == NULL_RTX)
2950
    return false;
2951
  /* Make sure that we are dealing with a simple SET insn.  */
2952
  p = single_set (insn);
2953
  if (p == NULL_RTX)
2954
    return false;
2955
 
2956
  /* Make sure that it could go into one of the LIW pipelines.  */
2957
  pdata->slot = get_attr_liw (insn);
2958
  if (pdata->slot == LIW_BOTH)
2959
    return false;
2960
 
2961
  pdata->op = get_attr_liw_op (insn);
2962
 
2963
  switch (pdata->op)
2964
    {
2965
    case LIW_OP_MOV:
2966
      pdata->dest = SET_DEST (p);
2967
      pdata->src = SET_SRC (p);
2968
      break;
2969
    case LIW_OP_CMP:
2970
      pdata->dest = XEXP (SET_SRC (p), 0);
2971
      pdata->src = XEXP (SET_SRC (p), 1);
2972
      break;
2973
    case LIW_OP_NONE:
2974
      return false;
2975
    case LIW_OP_AND:
2976
    case LIW_OP_OR:
2977
    case LIW_OP_XOR:
2978
      /* The AND, OR and XOR long instruction words only accept register arguments.  */
2979
      allow_consts = false;
2980
      /* Fall through.  */
2981
    default:
2982
      pdata->dest = SET_DEST (p);
2983
      pdata->src = XEXP (SET_SRC (p), 1);
2984
      break;
2985
    }
2986
 
2987
  if (! REG_P (pdata->dest))
2988
    return false;
2989
 
2990
  if (REG_P (pdata->src))
2991
    return true;
2992
 
2993
  return allow_consts && satisfies_constraint_O (pdata->src);
2994
}
2995
 
2996
/* Make sure that it is OK to execute LIW1 and LIW2 in parallel.  GCC generated
2997
   the instructions with the assumption that LIW1 would be executed before LIW2
2998
   so we must check for overlaps between their sources and destinations.  */
2999
 
3000
static bool
3001
check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
3002
{
3003
  /* Check for slot conflicts.  */
3004
  if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
3005
    return false;
3006
 
3007
  /* If either operation is a compare, then "dest" is really an input; the real
3008
     destination is CC_REG.  So these instructions need different checks.  */
3009
 
3010
  /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3011
     check its values prior to any changes made by OP.  */
3012
  if (pliw1->op == LIW_OP_CMP)
3013
    {
3014
      /* Two sequential comparisons means dead code, which ought to
3015
         have been eliminated given that bundling only happens with
3016
         optimization.  We cannot bundle them in any case.  */
3017
      gcc_assert (pliw1->op != pliw2->op);
3018
      return true;
3019
    }
3020
 
3021
  /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3022
     is the destination of OP, as the CMP will look at the old value, not the new
3023
     one.  */
3024
  if (pliw2->op == LIW_OP_CMP)
3025
    {
3026
      if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3027
        return false;
3028
 
3029
      if (REG_P (pliw2->src))
3030
        return REGNO (pliw2->src) != REGNO (pliw1->dest);
3031
 
3032
      return true;
3033
    }
3034
 
3035
  /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3036
     same destination register.  */
3037
  if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3038
    return false;
3039
 
3040
  /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3041
     of OP1 is the source of OP2.  The exception is when OP1 is a MOVE instruction when
3042
     we can replace the source in OP2 with the source of OP1.  */
3043
  if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3044
    {
3045
      if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
3046
        {
3047
          if (! REG_P (pliw1->src)
3048
              && (pliw2->op == LIW_OP_AND
3049
                  || pliw2->op == LIW_OP_OR
3050
                  || pliw2->op == LIW_OP_XOR))
3051
            return false;
3052
 
3053
          pliw2->src = pliw1->src;
3054
          return true;
3055
        }
3056
      return false;
3057
    }
3058
 
3059
  /* Everything else is OK.  */
3060
  return true;
3061
}
3062
 
3063
/* Combine pairs of insns into LIW bundles.  */
3064
 
3065
static void
3066
mn10300_bundle_liw (void)
3067
{
3068
  rtx r;
3069
 
3070
  for (r = get_insns (); r != NULL_RTX; r = next_nonnote_nondebug_insn (r))
3071
    {
3072
      rtx insn1, insn2;
3073
      struct liw_data liw1, liw2;
3074
 
3075
      insn1 = r;
3076
      if (! extract_bundle (insn1, & liw1))
3077
        continue;
3078
 
3079
      insn2 = next_nonnote_nondebug_insn (insn1);
3080
      if (! extract_bundle (insn2, & liw2))
3081
        continue;
3082
 
3083
      /* Check for source/destination overlap.  */
3084
      if (! check_liw_constraints (& liw1, & liw2))
3085
        continue;
3086
 
3087
      if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
3088
        {
3089
          struct liw_data temp;
3090
 
3091
          temp = liw1;
3092
          liw1 = liw2;
3093
          liw2 = temp;
3094
        }
3095
 
3096
      delete_insn (insn2);
3097
 
3098
      if (liw1.op == LIW_OP_CMP)
3099
        insn2 = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3100
                             GEN_INT (liw2.op));
3101
      else if (liw2.op == LIW_OP_CMP)
3102
        insn2 = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3103
                             GEN_INT (liw1.op));
3104
      else
3105
        insn2 = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3106
                         GEN_INT (liw1.op), GEN_INT (liw2.op));
3107
 
3108
      insn2 = emit_insn_after (insn2, insn1);
3109
      delete_insn (insn1);
3110
      r = insn2;
3111
    }
3112
}
3113
 
3114
#define DUMP(reason, insn)                      \
3115
  do                                            \
3116
    {                                           \
3117
      if (dump_file)                            \
3118
        {                                       \
3119
          fprintf (dump_file, reason "\n");     \
3120
          if (insn != NULL_RTX)                 \
3121
            print_rtl_single (dump_file, insn); \
3122
          fprintf(dump_file, "\n");             \
3123
        }                                       \
3124
    }                                           \
3125
  while (0)
3126
 
3127
/* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3128
   Insert a SETLB insn just before LABEL.  */
3129
 
3130
static void
3131
mn10300_insert_setlb_lcc (rtx label, rtx branch)
3132
{
3133
  rtx lcc, comparison, cmp_reg;
3134
 
3135
  if (LABEL_NUSES (label) > 1)
3136
    {
3137
      rtx insn;
3138
 
3139
      /* This label is used both as an entry point to the loop
3140
         and as a loop-back point for the loop.  We need to separate
3141
         these two functions so that the SETLB happens upon entry,
3142
         but the loop-back does not go to the SETLB instruction.  */
3143
      DUMP ("Inserting SETLB insn after:", label);
3144
      insn = emit_insn_after (gen_setlb (), label);
3145
      label = gen_label_rtx ();
3146
      emit_label_after (label, insn);
3147
      DUMP ("Created new loop-back label:", label);
3148
    }
3149
  else
3150
    {
3151
      DUMP ("Inserting SETLB insn before:", label);
3152
      emit_insn_before (gen_setlb (), label);
3153
    }
3154
 
3155
  comparison = XEXP (SET_SRC (PATTERN (branch)), 0);
3156
  cmp_reg = XEXP (comparison, 0);
3157
  gcc_assert (REG_P (cmp_reg));
3158
 
3159
  /* If the comparison has not already been split out of the branch
3160
     then do so now.  */
3161
  gcc_assert (REGNO (cmp_reg) == CC_REG);
3162
 
3163
  if (GET_MODE (cmp_reg) == CC_FLOATmode)
3164
    lcc = gen_FLcc (comparison, label);
3165
  else
3166
    lcc = gen_Lcc (comparison, label);
3167
 
3168
  lcc = emit_jump_insn_before (lcc, branch);
3169
  mark_jump_label (XVECEXP (PATTERN (lcc), 0, 0), lcc, 0);
3170
  JUMP_LABEL (lcc) = label;
3171
  DUMP ("Replacing branch insn...", branch);
3172
  DUMP ("... with Lcc insn:", lcc);
3173
  delete_insn (branch);
3174
}
3175
 
3176
static bool
3177
mn10300_block_contains_call (struct basic_block_def * block)
3178
{
3179
  rtx insn;
3180
 
3181
  FOR_BB_INSNS (block, insn)
3182
    if (CALL_P (insn))
3183
      return true;
3184
 
3185
  return false;
3186
}
3187
 
3188
static bool
3189
mn10300_loop_contains_call_insn (loop_p loop)
3190
{
3191
  basic_block * bbs;
3192
  bool result = false;
3193
  unsigned int i;
3194
 
3195
  bbs = get_loop_body (loop);
3196
 
3197
  for (i = 0; i < loop->num_nodes; i++)
3198
    if (mn10300_block_contains_call (bbs[i]))
3199
      {
3200
        result = true;
3201
        break;
3202
      }
3203
 
3204
  free (bbs);
3205
  return result;
3206
}
3207
 
3208
static void
3209
mn10300_scan_for_setlb_lcc (void)
3210
{
3211
  struct loops loops;
3212
  loop_iterator liter;
3213
  loop_p loop;
3214
 
3215
  DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
3216
 
3217
  df_analyze ();
3218
  compute_bb_for_insn ();
3219
 
3220
  /* Find the loops.  */
3221
  if (flow_loops_find (& loops) < 1)
3222
    DUMP ("No loops found", NULL_RTX);
3223
  current_loops = & loops;
3224
 
3225
  /* FIXME: For now we only investigate innermost loops.  In practice however
3226
     if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3227
     be the case that its parent loop is suitable.  Thus we should check all
3228
     loops, but work from the innermost outwards.  */
3229
  FOR_EACH_LOOP (liter, loop, LI_ONLY_INNERMOST)
3230
    {
3231
      const char * reason = NULL;
3232
 
3233
      /* Check to see if we can modify this loop.  If we cannot
3234
         then set 'reason' to describe why it could not be done.  */
3235
      if (loop->latch == NULL)
3236
        reason = "it contains multiple latches";
3237
      else if (loop->header != loop->latch)
3238
        /* FIXME: We could handle loops that span multiple blocks,
3239
           but this requires a lot more work tracking down the branches
3240
           that need altering, so for now keep things simple.  */
3241
        reason = "the loop spans multiple blocks";
3242
      else if (mn10300_loop_contains_call_insn (loop))
3243
        reason = "it contains CALL insns";
3244
      else
3245
        {
3246
          rtx branch = BB_END (loop->latch);
3247
 
3248
          gcc_assert (JUMP_P (branch));
3249
          if (single_set (branch) == NULL_RTX || ! any_condjump_p (branch))
3250
            /* We cannot optimize tablejumps and the like.  */
3251
            /* FIXME: We could handle unconditional jumps.  */
3252
            reason = "it is not a simple loop";
3253
          else
3254
            {
3255
              rtx label;
3256
 
3257
              if (dump_file)
3258
                flow_loop_dump (loop, dump_file, NULL, 0);
3259
 
3260
              label = BB_HEAD (loop->header);
3261
              gcc_assert (LABEL_P (label));
3262
 
3263
              mn10300_insert_setlb_lcc (label, branch);
3264
            }
3265
        }
3266
 
3267
      if (dump_file && reason != NULL)
3268
        fprintf (dump_file, "Loop starting with insn %d is not suitable because %s\n",
3269
                 INSN_UID (BB_HEAD (loop->header)),
3270
                 reason);
3271
    }
3272
 
3273
#if 0 /* FIXME: We should free the storage we allocated, but
3274
         for some unknown reason this leads to seg-faults.  */
3275
  FOR_EACH_LOOP (liter, loop, 0)
3276
    free_simple_loop_desc (loop);
3277
 
3278
  flow_loops_free (current_loops);
3279
#endif
3280
 
3281
  current_loops = NULL;
3282
 
3283
  df_finish_pass (false);
3284
 
3285
  DUMP ("SETLB scan complete", NULL_RTX);
3286
}
3287
 
3288
static void
3289
mn10300_reorg (void)
3290
{
3291
  /* These are optimizations, so only run them if optimizing.  */
3292
  if (TARGET_AM33 && (optimize > 0 || optimize_size))
3293
    {
3294
      if (TARGET_ALLOW_SETLB)
3295
        mn10300_scan_for_setlb_lcc ();
3296
 
3297
      if (TARGET_ALLOW_LIW)
3298
        mn10300_bundle_liw ();
3299
    }
3300
}
3301
 
3302
/* Initialize the GCC target structure.  */
3303
 
3304
#undef  TARGET_MACHINE_DEPENDENT_REORG
3305
#define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3306
 
3307
#undef  TARGET_ASM_ALIGNED_HI_OP
3308
#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3309
 
3310
#undef  TARGET_LEGITIMIZE_ADDRESS
3311
#define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3312
 
3313
#undef  TARGET_ADDRESS_COST
3314
#define TARGET_ADDRESS_COST  mn10300_address_cost
3315
#undef  TARGET_REGISTER_MOVE_COST
3316
#define TARGET_REGISTER_MOVE_COST  mn10300_register_move_cost
3317
#undef  TARGET_MEMORY_MOVE_COST
3318
#define TARGET_MEMORY_MOVE_COST  mn10300_memory_move_cost
3319
#undef  TARGET_RTX_COSTS
3320
#define TARGET_RTX_COSTS mn10300_rtx_costs
3321
 
3322
#undef  TARGET_ASM_FILE_START
3323
#define TARGET_ASM_FILE_START mn10300_file_start
3324
#undef  TARGET_ASM_FILE_START_FILE_DIRECTIVE
3325
#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3326
 
3327
#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3328
#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3329
 
3330
#undef  TARGET_OPTION_OVERRIDE
3331
#define TARGET_OPTION_OVERRIDE mn10300_option_override
3332
 
3333
#undef  TARGET_ENCODE_SECTION_INFO
3334
#define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3335
 
3336
#undef  TARGET_PROMOTE_PROTOTYPES
3337
#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3338
#undef  TARGET_RETURN_IN_MEMORY
3339
#define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3340
#undef  TARGET_PASS_BY_REFERENCE
3341
#define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3342
#undef  TARGET_CALLEE_COPIES
3343
#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3344
#undef  TARGET_ARG_PARTIAL_BYTES
3345
#define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3346
#undef  TARGET_FUNCTION_ARG
3347
#define TARGET_FUNCTION_ARG mn10300_function_arg
3348
#undef  TARGET_FUNCTION_ARG_ADVANCE
3349
#define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3350
 
3351
#undef  TARGET_EXPAND_BUILTIN_SAVEREGS
3352
#define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3353
#undef  TARGET_EXPAND_BUILTIN_VA_START
3354
#define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3355
 
3356
#undef  TARGET_CASE_VALUES_THRESHOLD
3357
#define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3358
 
3359
#undef  TARGET_LEGITIMATE_ADDRESS_P
3360
#define TARGET_LEGITIMATE_ADDRESS_P     mn10300_legitimate_address_p
3361
#undef  TARGET_DELEGITIMIZE_ADDRESS
3362
#define TARGET_DELEGITIMIZE_ADDRESS     mn10300_delegitimize_address
3363
#undef  TARGET_LEGITIMATE_CONSTANT_P
3364
#define TARGET_LEGITIMATE_CONSTANT_P    mn10300_legitimate_constant_p
3365
 
3366
#undef  TARGET_PREFERRED_RELOAD_CLASS
3367
#define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3368
#undef  TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3369
#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3370
  mn10300_preferred_output_reload_class
3371
#undef  TARGET_SECONDARY_RELOAD
3372
#define TARGET_SECONDARY_RELOAD  mn10300_secondary_reload
3373
 
3374
#undef  TARGET_TRAMPOLINE_INIT
3375
#define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3376
 
3377
#undef  TARGET_FUNCTION_VALUE
3378
#define TARGET_FUNCTION_VALUE mn10300_function_value
3379
#undef  TARGET_LIBCALL_VALUE
3380
#define TARGET_LIBCALL_VALUE mn10300_libcall_value
3381
 
3382
#undef  TARGET_ASM_OUTPUT_MI_THUNK
3383
#define TARGET_ASM_OUTPUT_MI_THUNK      mn10300_asm_output_mi_thunk
3384
#undef  TARGET_ASM_CAN_OUTPUT_MI_THUNK
3385
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK  mn10300_can_output_mi_thunk
3386
 
3387
#undef  TARGET_SCHED_ADJUST_COST
3388
#define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3389
 
3390
#undef  TARGET_CONDITIONAL_REGISTER_USAGE
3391
#define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3392
 
3393
#undef TARGET_MD_ASM_CLOBBERS
3394
#define TARGET_MD_ASM_CLOBBERS  mn10300_md_asm_clobbers
3395
 
3396
#undef  TARGET_FLAGS_REGNUM
3397
#define TARGET_FLAGS_REGNUM  CC_REG
3398
 
3399
struct gcc_target targetm = TARGET_INITIALIZER;

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.