OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gcc-4.2.2/] [gcc/] [config/] [m32c/] [m32c.c] - Blame information for rev 827

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* Target Code for R8C/M16C/M32C
2
   Copyright (C) 2005, 2007 Free Software Foundation, Inc.
3
   Contributed by Red Hat.
4
 
5
   This file is part of GCC.
6
 
7
   GCC is free software; you can redistribute it and/or modify it
8
   under the terms of the GNU General Public License as published
9
   by the Free Software Foundation; either version 3, or (at your
10
   option) any later version.
11
 
12
   GCC is distributed in the hope that it will be useful, but WITHOUT
13
   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14
   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15
   License for more details.
16
 
17
   You should have received a copy of the GNU General Public License
18
   along with GCC; see the file COPYING3.  If not see
19
   <http://www.gnu.org/licenses/>.  */
20
 
21
#include "config.h"
22
#include "system.h"
23
#include "coretypes.h"
24
#include "tm.h"
25
#include "rtl.h"
26
#include "regs.h"
27
#include "hard-reg-set.h"
28
#include "real.h"
29
#include "insn-config.h"
30
#include "conditions.h"
31
#include "insn-flags.h"
32
#include "output.h"
33
#include "insn-attr.h"
34
#include "flags.h"
35
#include "recog.h"
36
#include "reload.h"
37
#include "toplev.h"
38
#include "obstack.h"
39
#include "tree.h"
40
#include "expr.h"
41
#include "optabs.h"
42
#include "except.h"
43
#include "function.h"
44
#include "ggc.h"
45
#include "target.h"
46
#include "target-def.h"
47
#include "tm_p.h"
48
#include "langhooks.h"
49
#include "tree-gimple.h"
50
 
51
/* Prototypes */
52
 
53
/* Used by m32c_pushm_popm.  */
54
typedef enum
55
{
56
  PP_pushm,
57
  PP_popm,
58
  PP_justcount
59
} Push_Pop_Type;
60
 
61
static tree interrupt_handler (tree *, tree, tree, int, bool *);
62
static int interrupt_p (tree node);
63
static bool m32c_asm_integer (rtx, unsigned int, int);
64
static int m32c_comp_type_attributes (tree, tree);
65
static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
66
static struct machine_function *m32c_init_machine_status (void);
67
static void m32c_insert_attributes (tree, tree *);
68
static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
69
                                    tree, bool);
70
static bool m32c_promote_prototypes (tree);
71
static int m32c_pushm_popm (Push_Pop_Type);
72
static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
73
static rtx m32c_struct_value_rtx (tree, int);
74
static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
75
static int need_to_save (int);
76
 
77
#define streq(a,b) (strcmp ((a), (b)) == 0)
78
 
79
/* Internal support routines */
80
 
81
/* Debugging statements are tagged with DEBUG0 only so that they can
82
   be easily enabled individually, by replacing the '0' with '1' as
83
   needed.  */
84
#define DEBUG0 0
85
#define DEBUG1 1
86
 
87
#if DEBUG0
88
/* This is needed by some of the commented-out debug statements
89
   below.  */
90
static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
91
#endif
92
static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
93
 
94
/* These are all to support encode_pattern().  */
95
static char pattern[30], *patternp;
96
static GTY(()) rtx patternr[30];
97
#define RTX_IS(x) (streq (pattern, x))
98
 
99
/* Some macros to simplify the logic throughout this file.  */
100
#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
101
#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
102
 
103
#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
104
#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
105
 
106
/* We do most RTX matching by converting the RTX into a string, and
107
   using string compares.  This vastly simplifies the logic in many of
108
   the functions in this file.
109
 
110
   On exit, pattern[] has the encoded string (use RTX_IS("...") to
111
   compare it) and patternr[] has pointers to the nodes in the RTX
112
   corresponding to each character in the encoded string.  The latter
113
   is mostly used by print_operand().
114
 
115
   Unrecognized patterns have '?' in them; this shows up when the
116
   assembler complains about syntax errors.
117
*/
118
 
119
static void
120
encode_pattern_1 (rtx x)
121
{
122
  int i;
123
 
124
  if (patternp == pattern + sizeof (pattern) - 2)
125
    {
126
      patternp[-1] = '?';
127
      return;
128
    }
129
 
130
  patternr[patternp - pattern] = x;
131
 
132
  switch (GET_CODE (x))
133
    {
134
    case REG:
135
      *patternp++ = 'r';
136
      break;
137
    case SUBREG:
138
      if (GET_MODE_SIZE (GET_MODE (x)) !=
139
          GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
140
        *patternp++ = 'S';
141
      encode_pattern_1 (XEXP (x, 0));
142
      break;
143
    case MEM:
144
      *patternp++ = 'm';
145
    case CONST:
146
      encode_pattern_1 (XEXP (x, 0));
147
      break;
148
    case PLUS:
149
      *patternp++ = '+';
150
      encode_pattern_1 (XEXP (x, 0));
151
      encode_pattern_1 (XEXP (x, 1));
152
      break;
153
    case PRE_DEC:
154
      *patternp++ = '>';
155
      encode_pattern_1 (XEXP (x, 0));
156
      break;
157
    case POST_INC:
158
      *patternp++ = '<';
159
      encode_pattern_1 (XEXP (x, 0));
160
      break;
161
    case LO_SUM:
162
      *patternp++ = 'L';
163
      encode_pattern_1 (XEXP (x, 0));
164
      encode_pattern_1 (XEXP (x, 1));
165
      break;
166
    case HIGH:
167
      *patternp++ = 'H';
168
      encode_pattern_1 (XEXP (x, 0));
169
      break;
170
    case SYMBOL_REF:
171
      *patternp++ = 's';
172
      break;
173
    case LABEL_REF:
174
      *patternp++ = 'l';
175
      break;
176
    case CODE_LABEL:
177
      *patternp++ = 'c';
178
      break;
179
    case CONST_INT:
180
    case CONST_DOUBLE:
181
      *patternp++ = 'i';
182
      break;
183
    case UNSPEC:
184
      *patternp++ = 'u';
185
      *patternp++ = '0' + XCINT (x, 1, UNSPEC);
186
      for (i = 0; i < XVECLEN (x, 0); i++)
187
        encode_pattern_1 (XVECEXP (x, 0, i));
188
      break;
189
    case USE:
190
      *patternp++ = 'U';
191
      break;
192
    case PARALLEL:
193
      *patternp++ = '|';
194
      for (i = 0; i < XVECLEN (x, 0); i++)
195
        encode_pattern_1 (XVECEXP (x, 0, i));
196
      break;
197
    case EXPR_LIST:
198
      *patternp++ = 'E';
199
      encode_pattern_1 (XEXP (x, 0));
200
      if (XEXP (x, 1))
201
        encode_pattern_1 (XEXP (x, 1));
202
      break;
203
    default:
204
      *patternp++ = '?';
205
#if DEBUG0
206
      fprintf (stderr, "can't encode pattern %s\n",
207
               GET_RTX_NAME (GET_CODE (x)));
208
      debug_rtx (x);
209
      gcc_unreachable ();
210
#endif
211
      break;
212
    }
213
}
214
 
215
static void
216
encode_pattern (rtx x)
217
{
218
  patternp = pattern;
219
  encode_pattern_1 (x);
220
  *patternp = 0;
221
}
222
 
223
/* Since register names indicate the mode they're used in, we need a
224
   way to determine which name to refer to the register with.  Called
225
   by print_operand().  */
226
 
227
static const char *
228
reg_name_with_mode (int regno, enum machine_mode mode)
229
{
230
  int mlen = GET_MODE_SIZE (mode);
231
  if (regno == R0_REGNO && mlen == 1)
232
    return "r0l";
233
  if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
234
    return "r2r0";
235
  if (regno == R0_REGNO && mlen == 6)
236
    return "r2r1r0";
237
  if (regno == R0_REGNO && mlen == 8)
238
    return "r3r1r2r0";
239
  if (regno == R1_REGNO && mlen == 1)
240
    return "r1l";
241
  if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
242
    return "r3r1";
243
  if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
244
    return "a1a0";
245
  return reg_names[regno];
246
}
247
 
248
/* How many bytes a register uses on stack when it's pushed.  We need
249
   to know this because the push opcode needs to explicitly indicate
250
   the size of the register, even though the name of the register
251
   already tells it that.  Used by m32c_output_reg_{push,pop}, which
252
   is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}.  */
253
 
254
static int
255
reg_push_size (int regno)
256
{
257
  switch (regno)
258
    {
259
    case R0_REGNO:
260
    case R1_REGNO:
261
      return 2;
262
    case R2_REGNO:
263
    case R3_REGNO:
264
    case FLG_REGNO:
265
      return 2;
266
    case A0_REGNO:
267
    case A1_REGNO:
268
    case SB_REGNO:
269
    case FB_REGNO:
270
    case SP_REGNO:
271
      if (TARGET_A16)
272
        return 2;
273
      else
274
        return 3;
275
    default:
276
      gcc_unreachable ();
277
    }
278
}
279
 
280
static int *class_sizes = 0;
281
 
282
/* Given two register classes, find the largest intersection between
283
   them.  If there is no intersection, return RETURNED_IF_EMPTY
284
   instead.  */
285
static int
286
reduce_class (int original_class, int limiting_class, int returned_if_empty)
287
{
288
  int cc = class_contents[original_class][0];
289
  int i, best = NO_REGS;
290
  int best_size = 0;
291
 
292
  if (original_class == limiting_class)
293
    return original_class;
294
 
295
  if (!class_sizes)
296
    {
297
      int r;
298
      class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
299
      for (i = 0; i < LIM_REG_CLASSES; i++)
300
        {
301
          class_sizes[i] = 0;
302
          for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
303
            if (class_contents[i][0] & (1 << r))
304
              class_sizes[i]++;
305
        }
306
    }
307
 
308
  cc &= class_contents[limiting_class][0];
309
  for (i = 0; i < LIM_REG_CLASSES; i++)
310
    {
311
      int ic = class_contents[i][0];
312
 
313
      if ((~cc & ic) == 0)
314
        if (best_size < class_sizes[i])
315
          {
316
            best = i;
317
            best_size = class_sizes[i];
318
          }
319
 
320
    }
321
  if (best == NO_REGS)
322
    return returned_if_empty;
323
  return best;
324
}
325
 
326
/* Returns TRUE If there are any registers that exist in both register
327
   classes.  */
328
static int
329
classes_intersect (int class1, int class2)
330
{
331
  return class_contents[class1][0] & class_contents[class2][0];
332
}
333
 
334
/* Used by m32c_register_move_cost to determine if a move is
335
   impossibly expensive.  */
336
static int
337
class_can_hold_mode (int class, enum machine_mode mode)
338
{
339
  /* Cache the results:  0=untested  1=no  2=yes */
340
  static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
341
  if (results[class][mode] == 0)
342
    {
343
      int r, n, i;
344
      results[class][mode] = 1;
345
      for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
346
        if (class_contents[class][0] & (1 << r)
347
            && HARD_REGNO_MODE_OK (r, mode))
348
          {
349
            int ok = 1;
350
            n = HARD_REGNO_NREGS (r, mode);
351
            for (i = 1; i < n; i++)
352
              if (!(class_contents[class][0] & (1 << (r + i))))
353
                ok = 0;
354
            if (ok)
355
              {
356
                results[class][mode] = 2;
357
                break;
358
              }
359
          }
360
    }
361
#if DEBUG0
362
  fprintf (stderr, "class %s can hold %s? %s\n",
363
           class_names[class], mode_name[mode],
364
           (results[class][mode] == 2) ? "yes" : "no");
365
#endif
366
  return results[class][mode] == 2;
367
}
368
 
369
/* Run-time Target Specification.  */
370
 
371
/* Memregs are memory locations that gcc treats like general
372
   registers, as there are a limited number of true registers and the
373
   m32c families can use memory in most places that registers can be
374
   used.
375
 
376
   However, since memory accesses are more expensive than registers,
377
   we allow the user to limit the number of memregs available, in
378
   order to try to persuade gcc to try harder to use real registers.
379
 
380
   Memregs are provided by m32c-lib1.S.
381
*/
382
 
383
int target_memregs = 16;
384
static bool target_memregs_set = FALSE;
385
int ok_to_change_target_memregs = TRUE;
386
 
387
#undef  TARGET_HANDLE_OPTION
388
#define TARGET_HANDLE_OPTION m32c_handle_option
389
static bool
390
m32c_handle_option (size_t code,
391
                    const char *arg ATTRIBUTE_UNUSED,
392
                    int value ATTRIBUTE_UNUSED)
393
{
394
  if (code == OPT_memregs_)
395
    {
396
      target_memregs_set = TRUE;
397
      target_memregs = atoi (arg);
398
    }
399
  return TRUE;
400
}
401
 
402
/* Implements OVERRIDE_OPTIONS.  We limit memregs to 0..16, and
403
   provide a default.  */
404
void
405
m32c_override_options (void)
406
{
407
  if (target_memregs_set)
408
    {
409
      if (target_memregs < 0 || target_memregs > 16)
410
        error ("invalid target memregs value '%d'", target_memregs);
411
    }
412
  else
413
    target_memregs = 16;
414
}
415
 
416
/* Defining data structures for per-function information */
417
 
418
/* The usual; we set up our machine_function data.  */
419
static struct machine_function *
420
m32c_init_machine_status (void)
421
{
422
  struct machine_function *machine;
423
  machine =
424
    (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
425
 
426
  return machine;
427
}
428
 
429
/* Implements INIT_EXPANDERS.  We just set up to call the above
430
   function.  */
431
void
432
m32c_init_expanders (void)
433
{
434
  init_machine_status = m32c_init_machine_status;
435
}
436
 
437
/* Storage Layout */
438
 
439
#undef TARGET_PROMOTE_FUNCTION_RETURN
440
#define TARGET_PROMOTE_FUNCTION_RETURN m32c_promote_function_return
441
bool
442
m32c_promote_function_return (tree fntype ATTRIBUTE_UNUSED)
443
{
444
  return false;
445
}
446
 
447
/* Register Basics */
448
 
449
/* Basic Characteristics of Registers */
450
 
451
/* Whether a mode fits in a register is complex enough to warrant a
452
   table.  */
453
static struct
454
{
455
  char qi_regs;
456
  char hi_regs;
457
  char pi_regs;
458
  char si_regs;
459
  char di_regs;
460
} nregs_table[FIRST_PSEUDO_REGISTER] =
461
{
462
  { 1, 1, 2, 2, 4 },            /* r0 */
463
  { 0, 1, 0, 0, 0 },                /* r2 */
464
  { 1, 1, 2, 2, 0 },             /* r1 */
465
  { 0, 1, 0, 0, 0 },                /* r3 */
466
  { 0, 1, 1, 0, 0 },               /* a0 */
467
  { 0, 1, 1, 0, 0 },               /* a1 */
468
  { 0, 1, 1, 0, 0 },               /* sb */
469
  { 0, 1, 1, 0, 0 },               /* fb */
470
  { 0, 1, 1, 0, 0 },               /* sp */
471
  { 1, 1, 1, 0, 0 },              /* pc */
472
  { 0, 0, 0, 0, 0 },         /* fl */
473
  { 1, 1, 1, 0, 0 },              /* ap */
474
  { 1, 1, 2, 2, 4 },            /* mem0 */
475
  { 1, 1, 2, 2, 4 },            /* mem1 */
476
  { 1, 1, 2, 2, 4 },            /* mem2 */
477
  { 1, 1, 2, 2, 4 },            /* mem3 */
478
  { 1, 1, 2, 2, 4 },            /* mem4 */
479
  { 1, 1, 2, 2, 0 },             /* mem5 */
480
  { 1, 1, 2, 2, 0 },             /* mem6 */
481
  { 1, 1, 0, 0, 0 },               /* mem7 */
482
};
483
 
484
/* Implements CONDITIONAL_REGISTER_USAGE.  We adjust the number of
485
   available memregs, and select which registers need to be preserved
486
   across calls based on the chip family.  */
487
 
488
void
489
m32c_conditional_register_usage (void)
490
{
491
  int i;
492
 
493
  if (0 <= target_memregs && target_memregs <= 16)
494
    {
495
      /* The command line option is bytes, but our "registers" are
496
         16-bit words.  */
497
      for (i = target_memregs/2; i < 8; i++)
498
        {
499
          fixed_regs[MEM0_REGNO + i] = 1;
500
          CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
501
        }
502
    }
503
 
504
  /* M32CM and M32C preserve more registers across function calls.  */
505
  if (TARGET_A24)
506
    {
507
      call_used_regs[R1_REGNO] = 0;
508
      call_used_regs[R2_REGNO] = 0;
509
      call_used_regs[R3_REGNO] = 0;
510
      call_used_regs[A0_REGNO] = 0;
511
      call_used_regs[A1_REGNO] = 0;
512
    }
513
}
514
 
515
/* How Values Fit in Registers */
516
 
517
/* Implements HARD_REGNO_NREGS.  This is complicated by the fact that
518
   different registers are different sizes from each other, *and* may
519
   be different sizes in different chip families.  */
520
int
521
m32c_hard_regno_nregs (int regno, enum machine_mode mode)
522
{
523
  if (regno == FLG_REGNO && mode == CCmode)
524
    return 1;
525
  if (regno >= FIRST_PSEUDO_REGISTER)
526
    return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
527
 
528
  if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
529
    return (GET_MODE_SIZE (mode) + 1) / 2;
530
 
531
  if (GET_MODE_SIZE (mode) <= 1)
532
    return nregs_table[regno].qi_regs;
533
  if (GET_MODE_SIZE (mode) <= 2)
534
    return nregs_table[regno].hi_regs;
535
  if (regno == A0_REGNO && mode == PSImode && TARGET_A16)
536
    return 2;
537
  if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
538
    return nregs_table[regno].pi_regs;
539
  if (GET_MODE_SIZE (mode) <= 4)
540
    return nregs_table[regno].si_regs;
541
  if (GET_MODE_SIZE (mode) <= 8)
542
    return nregs_table[regno].di_regs;
543
  return 0;
544
}
545
 
546
/* Implements HARD_REGNO_MODE_OK.  The above function does the work
547
   already; just test its return value.  */
548
int
549
m32c_hard_regno_ok (int regno, enum machine_mode mode)
550
{
551
  return m32c_hard_regno_nregs (regno, mode) != 0;
552
}
553
 
554
/* Implements MODES_TIEABLE_P.  In general, modes aren't tieable since
555
   registers are all different sizes.  However, since most modes are
556
   bigger than our registers anyway, it's easier to implement this
557
   function that way, leaving QImode as the only unique case.  */
558
int
559
m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
560
{
561
  if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
562
    return 1;
563
 
564
#if 0
565
  if (m1 == QImode || m2 == QImode)
566
    return 0;
567
#endif
568
 
569
  return 1;
570
}
571
 
572
/* Register Classes */
573
 
574
/* Implements REGNO_REG_CLASS.  */
575
enum machine_mode
576
m32c_regno_reg_class (int regno)
577
{
578
  switch (regno)
579
    {
580
    case R0_REGNO:
581
      return R0_REGS;
582
    case R1_REGNO:
583
      return R1_REGS;
584
    case R2_REGNO:
585
      return R2_REGS;
586
    case R3_REGNO:
587
      return R3_REGS;
588
    case A0_REGNO:
589
    case A1_REGNO:
590
      return A_REGS;
591
    case SB_REGNO:
592
      return SB_REGS;
593
    case FB_REGNO:
594
      return FB_REGS;
595
    case SP_REGNO:
596
      return SP_REGS;
597
    case FLG_REGNO:
598
      return FLG_REGS;
599
    default:
600
      if (IS_MEM_REGNO (regno))
601
        return MEM_REGS;
602
      return ALL_REGS;
603
    }
604
}
605
 
606
/* Implements REG_CLASS_FROM_CONSTRAINT.  Note that some constraints only match
607
   for certain chip families.  */
608
int
609
m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
610
{
611
  if (memcmp (s, "Rsp", 3) == 0)
612
    return SP_REGS;
613
  if (memcmp (s, "Rfb", 3) == 0)
614
    return FB_REGS;
615
  if (memcmp (s, "Rsb", 3) == 0)
616
    return SB_REGS;
617
  if (memcmp (s, "Rcr", 3) == 0)
618
    return TARGET_A16 ? CR_REGS : NO_REGS;
619
  if (memcmp (s, "Rcl", 3) == 0)
620
    return TARGET_A24 ? CR_REGS : NO_REGS;
621
  if (memcmp (s, "R0w", 3) == 0)
622
    return R0_REGS;
623
  if (memcmp (s, "R1w", 3) == 0)
624
    return R1_REGS;
625
  if (memcmp (s, "R2w", 3) == 0)
626
    return R2_REGS;
627
  if (memcmp (s, "R3w", 3) == 0)
628
    return R3_REGS;
629
  if (memcmp (s, "R02", 3) == 0)
630
    return R02_REGS;
631
  if (memcmp (s, "R03", 3) == 0)
632
    return R03_REGS;
633
  if (memcmp (s, "Rdi", 3) == 0)
634
    return DI_REGS;
635
  if (memcmp (s, "Rhl", 3) == 0)
636
    return HL_REGS;
637
  if (memcmp (s, "R23", 3) == 0)
638
    return R23_REGS;
639
  if (memcmp (s, "Ra0", 3) == 0)
640
    return A0_REGS;
641
  if (memcmp (s, "Ra1", 3) == 0)
642
    return A1_REGS;
643
  if (memcmp (s, "Raa", 3) == 0)
644
    return A_REGS;
645
  if (memcmp (s, "Raw", 3) == 0)
646
    return TARGET_A16 ? A_REGS : NO_REGS;
647
  if (memcmp (s, "Ral", 3) == 0)
648
    return TARGET_A24 ? A_REGS : NO_REGS;
649
  if (memcmp (s, "Rqi", 3) == 0)
650
    return QI_REGS;
651
  if (memcmp (s, "Rad", 3) == 0)
652
    return AD_REGS;
653
  if (memcmp (s, "Rsi", 3) == 0)
654
    return SI_REGS;
655
  if (memcmp (s, "Rhi", 3) == 0)
656
    return HI_REGS;
657
  if (memcmp (s, "Rhc", 3) == 0)
658
    return HC_REGS;
659
  if (memcmp (s, "Rra", 3) == 0)
660
    return RA_REGS;
661
  if (memcmp (s, "Rfl", 3) == 0)
662
    return FLG_REGS;
663
  if (memcmp (s, "Rmm", 3) == 0)
664
    {
665
      if (fixed_regs[MEM0_REGNO])
666
        return NO_REGS;
667
      return MEM_REGS;
668
    }
669
 
670
  /* PSImode registers - i.e. whatever can hold a pointer.  */
671
  if (memcmp (s, "Rpi", 3) == 0)
672
    {
673
      if (TARGET_A16)
674
        return HI_REGS;
675
      else
676
        return RA_REGS; /* r2r0 and r3r1 can hold pointers.  */
677
    }
678
 
679
  /* We handle this one as an EXTRA_CONSTRAINT.  */
680
  if (memcmp (s, "Rpa", 3) == 0)
681
    return NO_REGS;
682
 
683
  if (*s == 'R')
684
    {
685
      fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
686
      gcc_unreachable();
687
    }
688
 
689
  return NO_REGS;
690
}
691
 
692
/* Implements REGNO_OK_FOR_BASE_P.  */
693
int
694
m32c_regno_ok_for_base_p (int regno)
695
{
696
  if (regno == A0_REGNO
697
      || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
698
    return 1;
699
  return 0;
700
}
701
 
702
#define DEBUG_RELOAD 0
703
 
704
/* Implements PREFERRED_RELOAD_CLASS.  In general, prefer general
705
   registers of the appropriate size.  */
706
int
707
m32c_preferred_reload_class (rtx x, int rclass)
708
{
709
  int newclass = rclass;
710
 
711
#if DEBUG_RELOAD
712
  fprintf (stderr, "\npreferred_reload_class for %s is ",
713
           class_names[rclass]);
714
#endif
715
  if (rclass == NO_REGS)
716
    rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
717
 
718
  if (classes_intersect (rclass, CR_REGS))
719
    {
720
      switch (GET_MODE (x))
721
        {
722
        case QImode:
723
          newclass = HL_REGS;
724
          break;
725
        default:
726
          /*      newclass = HI_REGS; */
727
          break;
728
        }
729
    }
730
 
731
  else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
732
    newclass = SI_REGS;
733
  else if (GET_MODE_SIZE (GET_MODE (x)) > 4
734
           && ~class_contents[rclass][0] & 0x000f)
735
    newclass = DI_REGS;
736
 
737
  rclass = reduce_class (rclass, newclass, rclass);
738
 
739
  if (GET_MODE (x) == QImode)
740
    rclass = reduce_class (rclass, HL_REGS, rclass);
741
 
742
#if DEBUG_RELOAD
743
  fprintf (stderr, "%s\n", class_names[rclass]);
744
  debug_rtx (x);
745
 
746
  if (GET_CODE (x) == MEM
747
      && GET_CODE (XEXP (x, 0)) == PLUS
748
      && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
749
    fprintf (stderr, "Glorm!\n");
750
#endif
751
  return rclass;
752
}
753
 
754
/* Implements PREFERRED_OUTPUT_RELOAD_CLASS.  */
755
int
756
m32c_preferred_output_reload_class (rtx x, int rclass)
757
{
758
  return m32c_preferred_reload_class (x, rclass);
759
}
760
 
761
/* Implements LIMIT_RELOAD_CLASS.  We basically want to avoid using
762
   address registers for reloads since they're needed for address
763
   reloads.  */
764
int
765
m32c_limit_reload_class (enum machine_mode mode, int rclass)
766
{
767
#if DEBUG_RELOAD
768
  fprintf (stderr, "limit_reload_class for %s: %s ->",
769
           mode_name[mode], class_names[rclass]);
770
#endif
771
 
772
  if (mode == QImode)
773
    rclass = reduce_class (rclass, HL_REGS, rclass);
774
  else if (mode == HImode)
775
    rclass = reduce_class (rclass, HI_REGS, rclass);
776
  else if (mode == SImode)
777
    rclass = reduce_class (rclass, SI_REGS, rclass);
778
 
779
  if (rclass != A_REGS)
780
    rclass = reduce_class (rclass, DI_REGS, rclass);
781
 
782
#if DEBUG_RELOAD
783
  fprintf (stderr, " %s\n", class_names[rclass]);
784
#endif
785
  return rclass;
786
}
787
 
788
/* Implements SECONDARY_RELOAD_CLASS.  QImode have to be reloaded in
789
   r0 or r1, as those are the only real QImode registers.  CR regs get
790
   reloaded through appropriately sized general or address
791
   registers.  */
792
int
793
m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
794
{
795
  int cc = class_contents[rclass][0];
796
#if DEBUG0
797
  fprintf (stderr, "\nsecondary reload class %s %s\n",
798
           class_names[rclass], mode_name[mode]);
799
  debug_rtx (x);
800
#endif
801
  if (mode == QImode
802
      && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
803
    return QI_REGS;
804
  if (classes_intersect (rclass, CR_REGS)
805
      && GET_CODE (x) == REG
806
      && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
807
    return TARGET_A16 ? HI_REGS : A_REGS;
808
  return NO_REGS;
809
}
810
 
811
/* Implements CLASS_LIKELY_SPILLED_P.  A_REGS is needed for address
812
   reloads.  */
813
int
814
m32c_class_likely_spilled_p (int regclass)
815
{
816
  if (regclass == A_REGS)
817
    return 1;
818
  return reg_class_size[regclass] == 1;
819
}
820
 
821
/* Implements CLASS_MAX_NREGS.  We calculate this according to its
822
   documented meaning, to avoid potential inconsistencies with actual
823
   class definitions.  */
824
int
825
m32c_class_max_nregs (int regclass, enum machine_mode mode)
826
{
827
  int rn, max = 0;
828
 
829
  for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
830
    if (class_contents[regclass][0] & (1 << rn))
831
      {
832
        int n = m32c_hard_regno_nregs (rn, mode);
833
        if (max < n)
834
          max = n;
835
      }
836
  return max;
837
}
838
 
839
/* Implements CANNOT_CHANGE_MODE_CLASS.  Only r0 and r1 can change to
840
   QI (r0l, r1l) because the chip doesn't support QI ops on other
841
   registers (well, it does on a0/a1 but if we let gcc do that, reload
842
   suffers).  Otherwise, we allow changes to larger modes.  */
843
int
844
m32c_cannot_change_mode_class (enum machine_mode from,
845
                               enum machine_mode to, int rclass)
846
{
847
#if DEBUG0
848
  fprintf (stderr, "cannot change from %s to %s in %s\n",
849
           mode_name[from], mode_name[to], class_names[rclass]);
850
#endif
851
 
852
  if (to == QImode)
853
    return (class_contents[rclass][0] & 0x1ffa);
854
 
855
  if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
856
      && GET_MODE_SIZE (from) > 1)
857
    return 0;
858
  if (GET_MODE_SIZE (from) > 2) /* all other regs */
859
    return 0;
860
 
861
  return 1;
862
}
863
 
864
/* Helpers for the rest of the file.  */
865
/* TRUE if the rtx is a REG rtx for the given register.  */
866
#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
867
                           && REGNO (rtx) == regno)
868
/* TRUE if the rtx is a pseudo - specifically, one we can use as a
869
   base register in address calculations (hence the "strict"
870
   argument).  */
871
#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
872
                               && (REGNO (rtx) == AP_REGNO \
873
                                   || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
874
 
875
/* Implements CONST_OK_FOR_CONSTRAINT_P.  Currently, all constant
876
   constraints start with 'I', with the next two characters indicating
877
   the type and size of the range allowed.  */
878
int
879
m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
880
                                char c ATTRIBUTE_UNUSED, const char *str)
881
{
882
  /* s=signed u=unsigned n=nonzero m=minus l=log2able,
883
     [sun] bits [SUN] bytes, p=pointer size
884
     I[-0-9][0-9] matches that number */
885
  if (memcmp (str, "Is3", 3) == 0)
886
    {
887
      return (-8 <= value && value <= 7);
888
    }
889
  if (memcmp (str, "IS1", 3) == 0)
890
    {
891
      return (-128 <= value && value <= 127);
892
    }
893
  if (memcmp (str, "IS2", 3) == 0)
894
    {
895
      return (-32768 <= value && value <= 32767);
896
    }
897
  if (memcmp (str, "IU2", 3) == 0)
898
    {
899
      return (0 <= value && value <= 65535);
900
    }
901
  if (memcmp (str, "IU3", 3) == 0)
902
    {
903
      return (0 <= value && value <= 0x00ffffff);
904
    }
905
  if (memcmp (str, "In4", 3) == 0)
906
    {
907
      return (-8 <= value && value && value <= 8);
908
    }
909
  if (memcmp (str, "In5", 3) == 0)
910
    {
911
      return (-16 <= value && value && value <= 16);
912
    }
913
  if (memcmp (str, "In6", 3) == 0)
914
    {
915
      return (-32 <= value && value && value <= 32);
916
    }
917
  if (memcmp (str, "IM2", 3) == 0)
918
    {
919
      return (-65536 <= value && value && value <= -1);
920
    }
921
  if (memcmp (str, "Ilb", 3) == 0)
922
    {
923
      int b = exact_log2 (value);
924
      return (b >= 0 && b <= 7);
925
    }
926
  if (memcmp (str, "Imb", 3) == 0)
927
    {
928
      int b = exact_log2 ((value ^ 0xff) & 0xff);
929
      return (b >= 0 && b <= 7);
930
    }
931
  if (memcmp (str, "Ilw", 3) == 0)
932
    {
933
      int b = exact_log2 (value);
934
      return (b >= 0 && b <= 15);
935
    }
936
  if (memcmp (str, "Imw", 3) == 0)
937
    {
938
      int b = exact_log2 ((value ^ 0xffff) & 0xffff);
939
      return (b >= 0 && b <= 15);
940
    }
941
  if (memcmp (str, "I00", 3) == 0)
942
    {
943
      return (value == 0);
944
    }
945
  return 0;
946
}
947
 
948
/* Implements EXTRA_CONSTRAINT_STR (see next function too).  'S' is
949
   for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
950
   call return values.  */
951
int
952
m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
953
{
954
  encode_pattern (value);
955
  if (memcmp (str, "Sd", 2) == 0)
956
    {
957
      /* This is the common "src/dest" address */
958
      rtx r;
959
      if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
960
        return 1;
961
      if (RTX_IS ("ms") || RTX_IS ("m+si"))
962
        return 1;
963
      if (RTX_IS ("m++rii"))
964
        {
965
          if (REGNO (patternr[3]) == FB_REGNO
966
              && INTVAL (patternr[4]) == 0)
967
            return 1;
968
        }
969
      if (RTX_IS ("mr"))
970
        r = patternr[1];
971
      else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
972
        r = patternr[2];
973
      else
974
        return 0;
975
      if (REGNO (r) == SP_REGNO)
976
        return 0;
977
      return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
978
    }
979
  else if (memcmp (str, "Sa", 2) == 0)
980
    {
981
      rtx r;
982
      if (RTX_IS ("mr"))
983
        r = patternr[1];
984
      else if (RTX_IS ("m+ri"))
985
        r = patternr[2];
986
      else
987
        return 0;
988
      return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
989
    }
990
  else if (memcmp (str, "Si", 2) == 0)
991
    {
992
      return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
993
    }
994
  else if (memcmp (str, "Ss", 2) == 0)
995
    {
996
      return ((RTX_IS ("mr")
997
               && (IS_REG (patternr[1], SP_REGNO)))
998
              || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
999
    }
1000
  else if (memcmp (str, "Sf", 2) == 0)
1001
    {
1002
      return ((RTX_IS ("mr")
1003
               && (IS_REG (patternr[1], FB_REGNO)))
1004
              || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1005
    }
1006
  else if (memcmp (str, "Sb", 2) == 0)
1007
    {
1008
      return ((RTX_IS ("mr")
1009
               && (IS_REG (patternr[1], SB_REGNO)))
1010
              || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1011
    }
1012
  else if (memcmp (str, "Sp", 2) == 0)
1013
    {
1014
      /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1015
      return (RTX_IS ("mi")
1016
              && !(INTVAL (patternr[1]) & ~0x1fff));
1017
    }
1018
  else if (memcmp (str, "S1", 2) == 0)
1019
    {
1020
      return r1h_operand (value, QImode);
1021
    }
1022
 
1023
  gcc_assert (str[0] != 'S');
1024
 
1025
  if (memcmp (str, "Rpa", 2) == 0)
1026
    return GET_CODE (value) == PARALLEL;
1027
 
1028
  return 0;
1029
}
1030
 
1031
/* This is for when we're debugging the above.  */
1032
int
1033
m32c_extra_constraint_p (rtx value, char c, const char *str)
1034
{
1035
  int rv = m32c_extra_constraint_p2 (value, c, str);
1036
#if DEBUG0
1037
  fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1038
           rv);
1039
  debug_rtx (value);
1040
#endif
1041
  return rv;
1042
}
1043
 
1044
/* Implements EXTRA_MEMORY_CONSTRAINT.  Currently, we only use strings
1045
   starting with 'S'.  */
1046
int
1047
m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1048
{
1049
  return c == 'S';
1050
}
1051
 
1052
/* Implements EXTRA_ADDRESS_CONSTRAINT.  We reserve 'A' strings for these,
1053
   but don't currently define any.  */
1054
int
1055
m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1056
{
1057
  return c == 'A';
1058
}
1059
 
1060
/* STACK AND CALLING */
1061
 
1062
/* Frame Layout */
1063
 
1064
/* Implements RETURN_ADDR_RTX.  Note that R8C and M16C push 24 bits
1065
   (yes, THREE bytes) onto the stack for the return address, but we
1066
   don't support pointers bigger than 16 bits on those chips.  This
1067
   will likely wreak havoc with exception unwinding.  FIXME.  */
1068
rtx
1069
m32c_return_addr_rtx (int count)
1070
{
1071
  enum machine_mode mode;
1072
  int offset;
1073
  rtx ra_mem;
1074
 
1075
  if (count)
1076
    return NULL_RTX;
1077
  /* we want 2[$fb] */
1078
 
1079
  if (TARGET_A24)
1080
    {
1081
      mode = SImode;
1082
      offset = 4;
1083
    }
1084
  else
1085
    {
1086
      /* FIXME: it's really 3 bytes */
1087
      mode = HImode;
1088
      offset = 2;
1089
    }
1090
 
1091
  ra_mem =
1092
    gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1093
  return copy_to_mode_reg (mode, ra_mem);
1094
}
1095
 
1096
/* Implements INCOMING_RETURN_ADDR_RTX.  See comment above.  */
1097
rtx
1098
m32c_incoming_return_addr_rtx (void)
1099
{
1100
  /* we want [sp] */
1101
  return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1102
}
1103
 
1104
/* Exception Handling Support */
1105
 
1106
/* Implements EH_RETURN_DATA_REGNO.  Choose registers able to hold
1107
   pointers.  */
1108
int
1109
m32c_eh_return_data_regno (int n)
1110
{
1111
  switch (n)
1112
    {
1113
    case 0:
1114
      return A0_REGNO;
1115
    case 1:
1116
      return A1_REGNO;
1117
    default:
1118
      return INVALID_REGNUM;
1119
    }
1120
}
1121
 
1122
/* Implements EH_RETURN_STACKADJ_RTX.  Saved and used later in
1123
   m32c_emit_eh_epilogue.  */
1124
rtx
1125
m32c_eh_return_stackadj_rtx (void)
1126
{
1127
  if (!cfun->machine->eh_stack_adjust)
1128
    {
1129
      rtx sa;
1130
 
1131
      sa = gen_reg_rtx (Pmode);
1132
      cfun->machine->eh_stack_adjust = sa;
1133
    }
1134
  return cfun->machine->eh_stack_adjust;
1135
}
1136
 
1137
/* Registers That Address the Stack Frame */
1138
 
1139
/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER.  Note that
1140
   the original spec called for dwarf numbers to vary with register
1141
   width as well, for example, r0l, r0, and r2r0 would each have
1142
   different dwarf numbers.  GCC doesn't support this, and we don't do
1143
   it, and gdb seems to like it this way anyway.  */
1144
unsigned int
1145
m32c_dwarf_frame_regnum (int n)
1146
{
1147
  switch (n)
1148
    {
1149
    case R0_REGNO:
1150
      return 5;
1151
    case R1_REGNO:
1152
      return 6;
1153
    case R2_REGNO:
1154
      return 7;
1155
    case R3_REGNO:
1156
      return 8;
1157
    case A0_REGNO:
1158
      return 9;
1159
    case A1_REGNO:
1160
      return 10;
1161
    case FB_REGNO:
1162
      return 11;
1163
    case SB_REGNO:
1164
      return 19;
1165
 
1166
    case SP_REGNO:
1167
      return 12;
1168
    case PC_REGNO:
1169
      return 13;
1170
    default:
1171
      return DWARF_FRAME_REGISTERS + 1;
1172
    }
1173
}
1174
 
1175
/* The frame looks like this:
1176
 
1177
   ap -> +------------------------------
1178
         | Return address (3 or 4 bytes)
1179
         | Saved FB (2 or 4 bytes)
1180
   fb -> +------------------------------
1181
         | local vars
1182
         | register saves fb
1183
         |        through r0 as needed
1184
   sp -> +------------------------------
1185
*/
1186
 
1187
/* We use this to wrap all emitted insns in the prologue.  */
1188
static rtx
1189
F (rtx x)
1190
{
1191
  RTX_FRAME_RELATED_P (x) = 1;
1192
  return x;
1193
}
1194
 
1195
/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1196
   how much the stack pointer moves for each, for each cpu family.  */
1197
static struct
1198
{
1199
  int reg1;
1200
  int bit;
1201
  int a16_bytes;
1202
  int a24_bytes;
1203
} pushm_info[] =
1204
{
1205
  /* These are in reverse push (nearest-to-sp) order.  */
1206
  { R0_REGNO, 0x80, 2, 2 },
1207
  { R1_REGNO, 0x40, 2, 2 },
1208
  { R2_REGNO, 0x20, 2, 2 },
1209
  { R3_REGNO, 0x10, 2, 2 },
1210
  { A0_REGNO, 0x08, 2, 4 },
1211
  { A1_REGNO, 0x04, 2, 4 },
1212
  { SB_REGNO, 0x02, 2, 4 },
1213
  { FB_REGNO, 0x01, 2, 4 }
1214
};
1215
 
1216
#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1217
 
1218
/* Returns TRUE if we need to save/restore the given register.  We
1219
   save everything for exception handlers, so that any register can be
1220
   unwound.  For interrupt handlers, we save everything if the handler
1221
   calls something else (because we don't know what *that* function
1222
   might do), but try to be a bit smarter if the handler is a leaf
1223
   function.  We always save $a0, though, because we use that in the
1224
   epilog to copy $fb to $sp.  */
1225
static int
1226
need_to_save (int regno)
1227
{
1228
  if (fixed_regs[regno])
1229
    return 0;
1230
  if (cfun->calls_eh_return)
1231
    return 1;
1232
  if (regno == FP_REGNO)
1233
    return 0;
1234
  if (cfun->machine->is_interrupt
1235
      && (!cfun->machine->is_leaf || regno == A0_REGNO))
1236
    return 1;
1237
  if (regs_ever_live[regno]
1238
      && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1239
    return 1;
1240
  return 0;
1241
}
1242
 
1243
/* This function contains all the intelligence about saving and
1244
   restoring registers.  It always figures out the register save set.
1245
   When called with PP_justcount, it merely returns the size of the
1246
   save set (for eliminating the frame pointer, for example).  When
1247
   called with PP_pushm or PP_popm, it emits the appropriate
1248
   instructions for saving (pushm) or restoring (popm) the
1249
   registers.  */
1250
static int
1251
m32c_pushm_popm (Push_Pop_Type ppt)
1252
{
1253
  int reg_mask = 0;
1254
  int byte_count = 0, bytes;
1255
  int i;
1256
  rtx dwarf_set[PUSHM_N];
1257
  int n_dwarfs = 0;
1258
  int nosave_mask = 0;
1259
 
1260
  if (cfun->return_rtx
1261
      && GET_CODE (cfun->return_rtx) == PARALLEL
1262
      && !(cfun->calls_eh_return || cfun->machine->is_interrupt))
1263
    {
1264
      rtx exp = XVECEXP (cfun->return_rtx, 0, 0);
1265
      rtx rv = XEXP (exp, 0);
1266
      int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1267
 
1268
      if (rv_bytes > 2)
1269
        nosave_mask |= 0x20;    /* PSI, SI */
1270
      else
1271
        nosave_mask |= 0xf0;    /* DF */
1272
      if (rv_bytes > 4)
1273
        nosave_mask |= 0x50;    /* DI */
1274
    }
1275
 
1276
  for (i = 0; i < (int) PUSHM_N; i++)
1277
    {
1278
      /* Skip if neither register needs saving.  */
1279
      if (!need_to_save (pushm_info[i].reg1))
1280
        continue;
1281
 
1282
      if (pushm_info[i].bit & nosave_mask)
1283
        continue;
1284
 
1285
      reg_mask |= pushm_info[i].bit;
1286
      bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1287
 
1288
      if (ppt == PP_pushm)
1289
        {
1290
          enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1291
          rtx addr;
1292
 
1293
          /* Always use stack_pointer_rtx instead of calling
1294
             rtx_gen_REG ourselves.  Code elsewhere in GCC assumes
1295
             that there is a single rtx representing the stack pointer,
1296
             namely stack_pointer_rtx, and uses == to recognize it.  */
1297
          addr = stack_pointer_rtx;
1298
 
1299
          if (byte_count != 0)
1300
            addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1301
 
1302
          dwarf_set[n_dwarfs++] =
1303
            gen_rtx_SET (VOIDmode,
1304
                         gen_rtx_MEM (mode, addr),
1305
                         gen_rtx_REG (mode, pushm_info[i].reg1));
1306
          F (dwarf_set[n_dwarfs - 1]);
1307
 
1308
        }
1309
      byte_count += bytes;
1310
    }
1311
 
1312
  if (cfun->machine->is_interrupt)
1313
    {
1314
      cfun->machine->intr_pushm = reg_mask & 0xfe;
1315
      reg_mask = 0;
1316
      byte_count = 0;
1317
    }
1318
 
1319
  if (cfun->machine->is_interrupt)
1320
    for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1321
      if (need_to_save (i))
1322
        {
1323
          byte_count += 2;
1324
          cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1325
        }
1326
 
1327
  if (ppt == PP_pushm && byte_count)
1328
    {
1329
      rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1330
      rtx pushm;
1331
 
1332
      if (reg_mask)
1333
        {
1334
          XVECEXP (note, 0, 0)
1335
            = gen_rtx_SET (VOIDmode,
1336
                           stack_pointer_rtx,
1337
                           gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1338
                                         stack_pointer_rtx,
1339
                                         GEN_INT (-byte_count)));
1340
          F (XVECEXP (note, 0, 0));
1341
 
1342
          for (i = 0; i < n_dwarfs; i++)
1343
            XVECEXP (note, 0, i + 1) = dwarf_set[i];
1344
 
1345
          pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1346
 
1347
          REG_NOTES (pushm) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, note,
1348
                                                 REG_NOTES (pushm));
1349
        }
1350
 
1351
      if (cfun->machine->is_interrupt)
1352
        for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1353
          if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1354
            {
1355
              if (TARGET_A16)
1356
                pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1357
              else
1358
                pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1359
              F (pushm);
1360
            }
1361
    }
1362
  if (ppt == PP_popm && byte_count)
1363
    {
1364
      if (cfun->machine->is_interrupt)
1365
        for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1366
          if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1367
            {
1368
              if (TARGET_A16)
1369
                emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1370
              else
1371
                emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1372
            }
1373
      if (reg_mask)
1374
        emit_insn (gen_popm (GEN_INT (reg_mask)));
1375
    }
1376
 
1377
  return byte_count;
1378
}
1379
 
1380
/* Implements INITIAL_ELIMINATION_OFFSET.  See the comment above that
1381
   diagrams our call frame.  */
1382
int
1383
m32c_initial_elimination_offset (int from, int to)
1384
{
1385
  int ofs = 0;
1386
 
1387
  if (from == AP_REGNO)
1388
    {
1389
      if (TARGET_A16)
1390
        ofs += 5;
1391
      else
1392
        ofs += 8;
1393
    }
1394
 
1395
  if (to == SP_REGNO)
1396
    {
1397
      ofs += m32c_pushm_popm (PP_justcount);
1398
      ofs += get_frame_size ();
1399
    }
1400
 
1401
  /* Account for push rounding.  */
1402
  if (TARGET_A24)
1403
    ofs = (ofs + 1) & ~1;
1404
#if DEBUG0
1405
  fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1406
           to, ofs);
1407
#endif
1408
  return ofs;
1409
}
1410
 
1411
/* Passing Function Arguments on the Stack */
1412
 
1413
#undef TARGET_PROMOTE_PROTOTYPES
1414
#define TARGET_PROMOTE_PROTOTYPES m32c_promote_prototypes
1415
static bool
1416
m32c_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
1417
{
1418
  return 0;
1419
}
1420
 
1421
/* Implements PUSH_ROUNDING.  The R8C and M16C have byte stacks, the
1422
   M32C has word stacks.  */
1423
int
1424
m32c_push_rounding (int n)
1425
{
1426
  if (TARGET_R8C || TARGET_M16C)
1427
    return n;
1428
  return (n + 1) & ~1;
1429
}
1430
 
1431
/* Passing Arguments in Registers */
1432
 
1433
/* Implements FUNCTION_ARG.  Arguments are passed partly in registers,
1434
   partly on stack.  If our function returns a struct, a pointer to a
1435
   buffer for it is at the top of the stack (last thing pushed).  The
1436
   first few real arguments may be in registers as follows:
1437
 
1438
   R8C/M16C:    arg1 in r1 if it's QI or HI (else it's pushed on stack)
1439
                arg2 in r2 if it's HI (else pushed on stack)
1440
                rest on stack
1441
   M32C:        arg1 in r0 if it's QI or HI (else it's pushed on stack)
1442
                rest on stack
1443
 
1444
   Structs are not passed in registers, even if they fit.  Only
1445
   integer and pointer types are passed in registers.
1446
 
1447
   Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1448
   r2 if it fits.  */
1449
rtx
1450
m32c_function_arg (CUMULATIVE_ARGS * ca,
1451
                   enum machine_mode mode, tree type, int named)
1452
{
1453
  /* Can return a reg, parallel, or 0 for stack */
1454
  rtx rv = NULL_RTX;
1455
#if DEBUG0
1456
  fprintf (stderr, "func_arg %d (%s, %d)\n",
1457
           ca->parm_num, mode_name[mode], named);
1458
  debug_tree (type);
1459
#endif
1460
 
1461
  if (mode == VOIDmode)
1462
    return GEN_INT (0);
1463
 
1464
  if (ca->force_mem || !named)
1465
    {
1466
#if DEBUG0
1467
      fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1468
               named);
1469
#endif
1470
      return NULL_RTX;
1471
    }
1472
 
1473
  if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1474
    return NULL_RTX;
1475
 
1476
  if (type && AGGREGATE_TYPE_P (type))
1477
    return NULL_RTX;
1478
 
1479
  switch (ca->parm_num)
1480
    {
1481
    case 1:
1482
      if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1483
        rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1484
      break;
1485
 
1486
    case 2:
1487
      if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1488
        rv = gen_rtx_REG (mode, R2_REGNO);
1489
      break;
1490
    }
1491
 
1492
#if DEBUG0
1493
  debug_rtx (rv);
1494
#endif
1495
  return rv;
1496
}
1497
 
1498
#undef TARGET_PASS_BY_REFERENCE
1499
#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1500
static bool
1501
m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
1502
                        enum machine_mode mode ATTRIBUTE_UNUSED,
1503
                        tree type ATTRIBUTE_UNUSED,
1504
                        bool named ATTRIBUTE_UNUSED)
1505
{
1506
  return 0;
1507
}
1508
 
1509
/* Implements INIT_CUMULATIVE_ARGS.  */
1510
void
1511
m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1512
                           tree fntype,
1513
                           rtx libname ATTRIBUTE_UNUSED,
1514
                           tree fndecl,
1515
                           int n_named_args ATTRIBUTE_UNUSED)
1516
{
1517
  if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1518
    ca->force_mem = 1;
1519
  else
1520
    ca->force_mem = 0;
1521
  ca->parm_num = 1;
1522
}
1523
 
1524
/* Implements FUNCTION_ARG_ADVANCE.  force_mem is set for functions
1525
   returning structures, so we always reset that.  Otherwise, we only
1526
   need to know the sequence number of the argument to know what to do
1527
   with it.  */
1528
void
1529
m32c_function_arg_advance (CUMULATIVE_ARGS * ca,
1530
                           enum machine_mode mode ATTRIBUTE_UNUSED,
1531
                           tree type ATTRIBUTE_UNUSED,
1532
                           int named ATTRIBUTE_UNUSED)
1533
{
1534
  if (ca->force_mem)
1535
    ca->force_mem = 0;
1536
  else
1537
    ca->parm_num++;
1538
}
1539
 
1540
/* Implements FUNCTION_ARG_REGNO_P.  */
1541
int
1542
m32c_function_arg_regno_p (int r)
1543
{
1544
  if (TARGET_A24)
1545
    return (r == R0_REGNO);
1546
  return (r == R1_REGNO || r == R2_REGNO);
1547
}
1548
 
1549
/* HImode and PSImode are the two "native" modes as far as GCC is
1550
   concerned, but the chips also support a 32 bit mode which is used
1551
   for some opcodes in R8C/M16C and for reset vectors and such.  */
1552
#undef TARGET_VALID_POINTER_MODE
1553
#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1554
static bool
1555
m32c_valid_pointer_mode (enum machine_mode mode)
1556
{
1557
  if (mode == HImode
1558
      || mode == PSImode
1559
      || mode == SImode
1560
      )
1561
    return 1;
1562
  return 0;
1563
}
1564
 
1565
/* How Scalar Function Values Are Returned */
1566
 
1567
/* Implements LIBCALL_VALUE.  Most values are returned in $r0, or some
1568
   combination of registers starting there (r2r0 for longs, r3r1r2r0
1569
   for long long, r3r2r1r0 for doubles), except that that ABI
1570
   currently doesn't work because it ends up using all available
1571
   general registers and gcc often can't compile it.  So, instead, we
1572
   return anything bigger than 16 bits in "mem0" (effectively, a
1573
   memory location).  */
1574
rtx
1575
m32c_libcall_value (enum machine_mode mode)
1576
{
1577
  /* return reg or parallel */
1578
#if 0
1579
  /* FIXME: GCC has difficulty returning large values in registers,
1580
     because that ties up most of the general registers and gives the
1581
     register allocator little to work with.  Until we can resolve
1582
     this, large values are returned in memory.  */
1583
  if (mode == DFmode)
1584
    {
1585
      rtx rv;
1586
 
1587
      rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1588
      XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1589
                                              gen_rtx_REG (HImode,
1590
                                                           R0_REGNO),
1591
                                              GEN_INT (0));
1592
      XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1593
                                              gen_rtx_REG (HImode,
1594
                                                           R1_REGNO),
1595
                                              GEN_INT (2));
1596
      XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1597
                                              gen_rtx_REG (HImode,
1598
                                                           R2_REGNO),
1599
                                              GEN_INT (4));
1600
      XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1601
                                              gen_rtx_REG (HImode,
1602
                                                           R3_REGNO),
1603
                                              GEN_INT (6));
1604
      return rv;
1605
    }
1606
 
1607
  if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1608
    {
1609
      rtx rv;
1610
 
1611
      rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1612
      XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1613
                                              gen_rtx_REG (mode,
1614
                                                           R0_REGNO),
1615
                                              GEN_INT (0));
1616
      return rv;
1617
    }
1618
#endif
1619
 
1620
  if (GET_MODE_SIZE (mode) > 2)
1621
    return gen_rtx_REG (mode, MEM0_REGNO);
1622
  return gen_rtx_REG (mode, R0_REGNO);
1623
}
1624
 
1625
/* Implements FUNCTION_VALUE.  Functions and libcalls have the same
1626
   conventions.  */
1627
rtx
1628
m32c_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
1629
{
1630
  /* return reg or parallel */
1631
  enum machine_mode mode = TYPE_MODE (valtype);
1632
  return m32c_libcall_value (mode);
1633
}
1634
 
1635
/* How Large Values Are Returned */
1636
 
1637
/* We return structures by pushing the address on the stack, even if
1638
   we use registers for the first few "real" arguments.  */
1639
#undef TARGET_STRUCT_VALUE_RTX
1640
#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1641
static rtx
1642
m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1643
                       int incoming ATTRIBUTE_UNUSED)
1644
{
1645
  return 0;
1646
}
1647
 
1648
/* Function Entry and Exit */
1649
 
1650
/* Implements EPILOGUE_USES.  Interrupts restore all registers.  */
1651
int
1652
m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1653
{
1654
  if (cfun->machine->is_interrupt)
1655
    return 1;
1656
  return 0;
1657
}
1658
 
1659
/* Implementing the Varargs Macros */
1660
 
1661
#undef TARGET_STRICT_ARGUMENT_NAMING
1662
#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1663
static bool
1664
m32c_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1665
{
1666
  return 1;
1667
}
1668
 
1669
/* Trampolines for Nested Functions */
1670
 
1671
/*
1672
   m16c:
1673
   1 0000 75C43412              mov.w   #0x1234,a0
1674
   2 0004 FC000000              jmp.a   label
1675
 
1676
   m32c:
1677
   1 0000 BC563412              mov.l:s #0x123456,a0
1678
   2 0004 CC000000              jmp.a   label
1679
*/
1680
 
1681
/* Implements TRAMPOLINE_SIZE.  */
1682
int
1683
m32c_trampoline_size (void)
1684
{
1685
  /* Allocate extra space so we can avoid the messy shifts when we
1686
     initialize the trampoline; we just write past the end of the
1687
     opcode.  */
1688
  return TARGET_A16 ? 8 : 10;
1689
}
1690
 
1691
/* Implements TRAMPOLINE_ALIGNMENT.  */
1692
int
1693
m32c_trampoline_alignment (void)
1694
{
1695
  return 2;
1696
}
1697
 
1698
/* Implements INITIALIZE_TRAMPOLINE.  */
1699
void
1700
m32c_initialize_trampoline (rtx tramp, rtx function, rtx chainval)
1701
{
1702
#define A0(m,i) gen_rtx_MEM (m, plus_constant (tramp, i))
1703
  if (TARGET_A16)
1704
    {
1705
      /* Note: we subtract a "word" because the moves want signed
1706
         constants, not unsigned constants.  */
1707
      emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1708
      emit_move_insn (A0 (HImode, 2), chainval);
1709
      emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1710
      /* We use 16 bit addresses here, but store the zero to turn it
1711
         into a 24 bit offset.  */
1712
      emit_move_insn (A0 (HImode, 5), function);
1713
      emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1714
    }
1715
  else
1716
    {
1717
      /* Note that the PSI moves actually write 4 bytes.  Make sure we
1718
         write stuff out in the right order, and leave room for the
1719
         extra byte at the end.  */
1720
      emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1721
      emit_move_insn (A0 (PSImode, 1), chainval);
1722
      emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1723
      emit_move_insn (A0 (PSImode, 5), function);
1724
    }
1725
#undef A0
1726
}
1727
 
1728
/* Implicit Calls to Library Routines */
1729
 
1730
#undef TARGET_INIT_LIBFUNCS
1731
#define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1732
static void
1733
m32c_init_libfuncs (void)
1734
{
1735
  if (TARGET_A24)
1736
    {
1737
      /* We do this because the M32C has an HImode operand, but the
1738
         M16C has an 8 bit operand.  Since gcc looks at the match data
1739
         and not the expanded rtl, we have to reset the array so that
1740
         the right modes are found. */
1741
      setcc_gen_code[EQ] = CODE_FOR_seq_24;
1742
      setcc_gen_code[NE] = CODE_FOR_sne_24;
1743
      setcc_gen_code[GT] = CODE_FOR_sgt_24;
1744
      setcc_gen_code[GE] = CODE_FOR_sge_24;
1745
      setcc_gen_code[LT] = CODE_FOR_slt_24;
1746
      setcc_gen_code[LE] = CODE_FOR_sle_24;
1747
      setcc_gen_code[GTU] = CODE_FOR_sgtu_24;
1748
      setcc_gen_code[GEU] = CODE_FOR_sgeu_24;
1749
      setcc_gen_code[LTU] = CODE_FOR_sltu_24;
1750
      setcc_gen_code[LEU] = CODE_FOR_sleu_24;
1751
    }
1752
}
1753
 
1754
/* Addressing Modes */
1755
 
1756
/* Used by GO_IF_LEGITIMATE_ADDRESS.  The r8c/m32c family supports a
1757
   wide range of non-orthogonal addressing modes, including the
1758
   ability to double-indirect on *some* of them.  Not all insns
1759
   support all modes, either, but we rely on predicates and
1760
   constraints to deal with that.  */
1761
int
1762
m32c_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1763
{
1764
  int mode_adjust;
1765
  if (CONSTANT_P (x))
1766
    return 1;
1767
 
1768
  /* Wide references to memory will be split after reload, so we must
1769
     ensure that all parts of such splits remain legitimate
1770
     addresses.  */
1771
  mode_adjust = GET_MODE_SIZE (mode) - 1;
1772
 
1773
  /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1774
  if (GET_CODE (x) == PRE_DEC
1775
      || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1776
    {
1777
      return (GET_CODE (XEXP (x, 0)) == REG
1778
              && REGNO (XEXP (x, 0)) == SP_REGNO);
1779
    }
1780
 
1781
#if 0
1782
  /* This is the double indirection detection, but it currently
1783
     doesn't work as cleanly as this code implies, so until we've had
1784
     a chance to debug it, leave it disabled.  */
1785
  if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1786
    {
1787
#if DEBUG_DOUBLE
1788
      fprintf (stderr, "double indirect\n");
1789
#endif
1790
      x = XEXP (x, 0);
1791
    }
1792
#endif
1793
 
1794
  encode_pattern (x);
1795
  if (RTX_IS ("r"))
1796
    {
1797
      /* Most indexable registers can be used without displacements,
1798
         although some of them will be emitted with an explicit zero
1799
         to please the assembler.  */
1800
      switch (REGNO (patternr[0]))
1801
        {
1802
        case A0_REGNO:
1803
        case A1_REGNO:
1804
        case SB_REGNO:
1805
        case FB_REGNO:
1806
        case SP_REGNO:
1807
          return 1;
1808
 
1809
        default:
1810
          if (IS_PSEUDO (patternr[0], strict))
1811
            return 1;
1812
          return 0;
1813
        }
1814
    }
1815
  if (RTX_IS ("+ri"))
1816
    {
1817
      /* This is more interesting, because different base registers
1818
         allow for different displacements - both range and signedness
1819
         - and it differs from chip series to chip series too.  */
1820
      int rn = REGNO (patternr[1]);
1821
      HOST_WIDE_INT offs = INTVAL (patternr[2]);
1822
      switch (rn)
1823
        {
1824
        case A0_REGNO:
1825
        case A1_REGNO:
1826
        case SB_REGNO:
1827
          /* The syntax only allows positive offsets, but when the
1828
             offsets span the entire memory range, we can simulate
1829
             negative offsets by wrapping.  */
1830
          if (TARGET_A16)
1831
            return (offs >= -65536 && offs <= 65535 - mode_adjust);
1832
          if (rn == SB_REGNO)
1833
            return (offs >= 0 && offs <= 65535 - mode_adjust);
1834
          /* A0 or A1 */
1835
          return (offs >= -16777216 && offs <= 16777215);
1836
 
1837
        case FB_REGNO:
1838
          if (TARGET_A16)
1839
            return (offs >= -128 && offs <= 127 - mode_adjust);
1840
          return (offs >= -65536 && offs <= 65535 - mode_adjust);
1841
 
1842
        case SP_REGNO:
1843
          return (offs >= -128 && offs <= 127 - mode_adjust);
1844
 
1845
        default:
1846
          if (IS_PSEUDO (patternr[1], strict))
1847
            return 1;
1848
          return 0;
1849
        }
1850
    }
1851
  if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1852
    {
1853
      rtx reg = patternr[1];
1854
 
1855
      /* We don't know where the symbol is, so only allow base
1856
         registers which support displacements spanning the whole
1857
         address range.  */
1858
      switch (REGNO (reg))
1859
        {
1860
        case A0_REGNO:
1861
        case A1_REGNO:
1862
          /* $sb needs a secondary reload, but since it's involved in
1863
             memory address reloads too, we don't deal with it very
1864
             well.  */
1865
          /*    case SB_REGNO: */
1866
          return 1;
1867
        default:
1868
          if (IS_PSEUDO (reg, strict))
1869
            return 1;
1870
          return 0;
1871
        }
1872
    }
1873
  return 0;
1874
}
1875
 
1876
/* Implements REG_OK_FOR_BASE_P.  */
1877
int
1878
m32c_reg_ok_for_base_p (rtx x, int strict)
1879
{
1880
  if (GET_CODE (x) != REG)
1881
    return 0;
1882
  switch (REGNO (x))
1883
    {
1884
    case A0_REGNO:
1885
    case A1_REGNO:
1886
    case SB_REGNO:
1887
    case FB_REGNO:
1888
    case SP_REGNO:
1889
      return 1;
1890
    default:
1891
      if (IS_PSEUDO (x, strict))
1892
        return 1;
1893
      return 0;
1894
    }
1895
}
1896
 
1897
/* We have three choices for choosing fb->aN offsets.  If we choose -128,
1898
   we need one MOVA -128[fb],aN opcode and 16 bit aN displacements,
1899
   like this:
1900
       EB 4B FF    mova    -128[$fb],$a0
1901
       D8 0C FF FF mov.w:Q #0,-1[$a0]
1902
 
1903
   Alternately, we subtract the frame size, and hopefully use 8 bit aN
1904
   displacements:
1905
       7B F4       stc $fb,$a0
1906
       77 54 00 01 sub #256,$a0
1907
       D8 08 01    mov.w:Q #0,1[$a0]
1908
 
1909
   If we don't offset (i.e. offset by zero), we end up with:
1910
       7B F4       stc $fb,$a0
1911
       D8 0C 00 FF mov.w:Q #0,-256[$a0]
1912
 
1913
   We have to subtract *something* so that we have a PLUS rtx to mark
1914
   that we've done this reload.  The -128 offset will never result in
1915
   an 8 bit aN offset, and the payoff for the second case is five
1916
   loads *if* those loads are within 256 bytes of the other end of the
1917
   frame, so the third case seems best.  Note that we subtract the
1918
   zero, but detect that in the addhi3 pattern.  */
1919
 
1920
#define BIG_FB_ADJ 0
1921
 
1922
/* Implements LEGITIMIZE_ADDRESS.  The only address we really have to
1923
   worry about is frame base offsets, as $fb has a limited
1924
   displacement range.  We deal with this by attempting to reload $fb
1925
   itself into an address register; that seems to result in the best
1926
   code.  */
1927
int
1928
m32c_legitimize_address (rtx * x ATTRIBUTE_UNUSED,
1929
                         rtx oldx ATTRIBUTE_UNUSED,
1930
                         enum machine_mode mode ATTRIBUTE_UNUSED)
1931
{
1932
#if DEBUG0
1933
  fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1934
  debug_rtx (*x);
1935
  fprintf (stderr, "\n");
1936
#endif
1937
 
1938
  if (GET_CODE (*x) == PLUS
1939
      && GET_CODE (XEXP (*x, 0)) == REG
1940
      && REGNO (XEXP (*x, 0)) == FB_REGNO
1941
      && GET_CODE (XEXP (*x, 1)) == CONST_INT
1942
      && (INTVAL (XEXP (*x, 1)) < -128
1943
          || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1944
    {
1945
      /* reload FB to A_REGS */
1946
      rtx temp = gen_reg_rtx (Pmode);
1947
      *x = copy_rtx (*x);
1948
      emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (*x, 0)));
1949
      XEXP (*x, 0) = temp;
1950
      return 1;
1951
    }
1952
 
1953
  return 0;
1954
}
1955
 
1956
/* Implements LEGITIMIZE_RELOAD_ADDRESS.  See comment above.  */
1957
int
1958
m32c_legitimize_reload_address (rtx * x,
1959
                                enum machine_mode mode,
1960
                                int opnum,
1961
                                int type, int ind_levels ATTRIBUTE_UNUSED)
1962
{
1963
#if DEBUG0
1964
  fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1965
           mode_name[mode]);
1966
  debug_rtx (*x);
1967
#endif
1968
 
1969
  /* At one point, this function tried to get $fb copied to an address
1970
     register, which in theory would maximize sharing, but gcc was
1971
     *also* still trying to reload the whole address, and we'd run out
1972
     of address registers.  So we let gcc do the naive (but safe)
1973
     reload instead, when the above function doesn't handle it for
1974
     us.
1975
 
1976
     The code below is a second attempt at the above.  */
1977
 
1978
  if (GET_CODE (*x) == PLUS
1979
      && GET_CODE (XEXP (*x, 0)) == REG
1980
      && REGNO (XEXP (*x, 0)) == FB_REGNO
1981
      && GET_CODE (XEXP (*x, 1)) == CONST_INT
1982
      && (INTVAL (XEXP (*x, 1)) < -128
1983
          || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1984
    {
1985
      rtx sum;
1986
      int offset = INTVAL (XEXP (*x, 1));
1987
      int adjustment = -BIG_FB_ADJ;
1988
 
1989
      sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1990
                          GEN_INT (adjustment));
1991
      *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1992
      if (type == RELOAD_OTHER)
1993
        type = RELOAD_FOR_OTHER_ADDRESS;
1994
      push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1995
                   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1996
                   type);
1997
      return 1;
1998
    }
1999
 
2000
  if (GET_CODE (*x) == PLUS
2001
      && GET_CODE (XEXP (*x, 0)) == PLUS
2002
      && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2003
      && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2004
      && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2005
      && GET_CODE (XEXP (*x, 1)) == CONST_INT
2006
      )
2007
    {
2008
      if (type == RELOAD_OTHER)
2009
        type = RELOAD_FOR_OTHER_ADDRESS;
2010
      push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2011
                   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2012
                   type);
2013
      return 1;
2014
    }
2015
 
2016
  return 0;
2017
}
2018
 
2019
/* Used in GO_IF_MODE_DEPENDENT_ADDRESS.  */
2020
int
2021
m32c_mode_dependent_address (rtx addr)
2022
{
2023
  if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == PRE_DEC)
2024
    return 1;
2025
  return 0;
2026
}
2027
 
2028
/* Implements LEGITIMATE_CONSTANT_P.  We split large constants anyway,
2029
   so we can allow anything.  */
2030
int
2031
m32c_legitimate_constant_p (rtx x ATTRIBUTE_UNUSED)
2032
{
2033
  return 1;
2034
}
2035
 
2036
 
2037
/* Condition Code Status */
2038
 
2039
#undef TARGET_FIXED_CONDITION_CODE_REGS
2040
#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2041
static bool
2042
m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2043
{
2044
  *p1 = FLG_REGNO;
2045
  *p2 = INVALID_REGNUM;
2046
  return true;
2047
}
2048
 
2049
/* Describing Relative Costs of Operations */
2050
 
2051
/* Implements REGISTER_MOVE_COST.  We make impossible moves
2052
   prohibitively expensive, like trying to put QIs in r2/r3 (there are
2053
   no opcodes to do that).  We also discourage use of mem* registers
2054
   since they're really memory.  */
2055
int
2056
m32c_register_move_cost (enum machine_mode mode, int from, int to)
2057
{
2058
  int cost = COSTS_N_INSNS (3);
2059
  int cc = class_contents[from][0] | class_contents[to][0];
2060
  /* FIXME: pick real values, but not 2 for now.  */
2061
  if (mode == QImode && (cc & class_contents[R23_REGS][0]))
2062
    {
2063
      if (!(cc & ~class_contents[R23_REGS][0]))
2064
        cost = COSTS_N_INSNS (1000);
2065
      else
2066
        cost = COSTS_N_INSNS (80);
2067
    }
2068
 
2069
  if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2070
    cost = COSTS_N_INSNS (1000);
2071
 
2072
  if (classes_intersect (from, CR_REGS))
2073
    cost += COSTS_N_INSNS (5);
2074
 
2075
  if (classes_intersect (to, CR_REGS))
2076
    cost += COSTS_N_INSNS (5);
2077
 
2078
  if (from == MEM_REGS || to == MEM_REGS)
2079
    cost += COSTS_N_INSNS (50);
2080
  else if (classes_intersect (from, MEM_REGS)
2081
           || classes_intersect (to, MEM_REGS))
2082
    cost += COSTS_N_INSNS (10);
2083
 
2084
#if DEBUG0
2085
  fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2086
           mode_name[mode], class_names[from], class_names[to], cost);
2087
#endif
2088
  return cost;
2089
}
2090
 
2091
/*  Implements MEMORY_MOVE_COST.  */
2092
int
2093
m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2094
                       int reg_class ATTRIBUTE_UNUSED,
2095
                       int in ATTRIBUTE_UNUSED)
2096
{
2097
  /* FIXME: pick real values.  */
2098
  return COSTS_N_INSNS (10);
2099
}
2100
 
2101
/* Here we try to describe when we use multiple opcodes for one RTX so
2102
   that gcc knows when to use them.  */
2103
#undef TARGET_RTX_COSTS
2104
#define TARGET_RTX_COSTS m32c_rtx_costs
2105
static bool
2106
m32c_rtx_costs (rtx x, int code, int outer_code, int *total)
2107
{
2108
  switch (code)
2109
    {
2110
    case REG:
2111
      if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2112
        *total += COSTS_N_INSNS (500);
2113
      else
2114
        *total += COSTS_N_INSNS (1);
2115
      return true;
2116
 
2117
    case ASHIFT:
2118
    case LSHIFTRT:
2119
    case ASHIFTRT:
2120
      if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2121
        {
2122
          /* mov.b r1l, r1h */
2123
          *total +=  COSTS_N_INSNS (1);
2124
          return true;
2125
        }
2126
      if (INTVAL (XEXP (x, 1)) > 8
2127
          || INTVAL (XEXP (x, 1)) < -8)
2128
        {
2129
          /* mov.b #N, r1l */
2130
          /* mov.b r1l, r1h */
2131
          *total +=  COSTS_N_INSNS (2);
2132
          return true;
2133
        }
2134
      return true;
2135
 
2136
    case LE:
2137
    case LEU:
2138
    case LT:
2139
    case LTU:
2140
    case GT:
2141
    case GTU:
2142
    case GE:
2143
    case GEU:
2144
    case NE:
2145
    case EQ:
2146
      if (outer_code == SET)
2147
        {
2148
          *total += COSTS_N_INSNS (2);
2149
          return true;
2150
        }
2151
      break;
2152
 
2153
    case ZERO_EXTRACT:
2154
      {
2155
        rtx dest = XEXP (x, 0);
2156
        rtx addr = XEXP (dest, 0);
2157
        switch (GET_CODE (addr))
2158
          {
2159
          case CONST_INT:
2160
            *total += COSTS_N_INSNS (1);
2161
            break;
2162
          case SYMBOL_REF:
2163
            *total += COSTS_N_INSNS (3);
2164
            break;
2165
          default:
2166
            *total += COSTS_N_INSNS (2);
2167
            break;
2168
          }
2169
        return true;
2170
      }
2171
      break;
2172
 
2173
    default:
2174
      /* Reasonable default.  */
2175
      if (TARGET_A16 && GET_MODE(x) == SImode)
2176
        *total += COSTS_N_INSNS (2);
2177
      break;
2178
    }
2179
  return false;
2180
}
2181
 
2182
#undef TARGET_ADDRESS_COST
2183
#define TARGET_ADDRESS_COST m32c_address_cost
2184
static int
2185
m32c_address_cost (rtx addr)
2186
{
2187
  /*  fprintf(stderr, "\naddress_cost\n");
2188
      debug_rtx(addr);*/
2189
  switch (GET_CODE (addr))
2190
    {
2191
    case CONST_INT:
2192
      return COSTS_N_INSNS(1);
2193
    case SYMBOL_REF:
2194
      return COSTS_N_INSNS(3);
2195
    case REG:
2196
      return COSTS_N_INSNS(2);
2197
    default:
2198
      return 0;
2199
    }
2200
}
2201
 
2202
/* Defining the Output Assembler Language */
2203
 
2204
/* The Overall Framework of an Assembler File */
2205
 
2206
#undef TARGET_HAVE_NAMED_SECTIONS
2207
#define TARGET_HAVE_NAMED_SECTIONS true
2208
 
2209
/* Output of Data */
2210
 
2211
/* We may have 24 bit sizes, which is the native address size.
2212
   Currently unused, but provided for completeness.  */
2213
#undef TARGET_ASM_INTEGER
2214
#define TARGET_ASM_INTEGER m32c_asm_integer
2215
static bool
2216
m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2217
{
2218
  switch (size)
2219
    {
2220
    case 3:
2221
      fprintf (asm_out_file, "\t.3byte\t");
2222
      output_addr_const (asm_out_file, x);
2223
      fputc ('\n', asm_out_file);
2224
      return true;
2225
    case 4:
2226
      if (GET_CODE (x) == SYMBOL_REF)
2227
        {
2228
          fprintf (asm_out_file, "\t.long\t");
2229
          output_addr_const (asm_out_file, x);
2230
          fputc ('\n', asm_out_file);
2231
          return true;
2232
        }
2233
      break;
2234
    }
2235
  return default_assemble_integer (x, size, aligned_p);
2236
}
2237
 
2238
/* Output of Assembler Instructions */
2239
 
2240
/* We use a lookup table because the addressing modes are non-orthogonal.  */
2241
 
2242
static struct
2243
{
2244
  char code;
2245
  char const *pattern;
2246
  char const *format;
2247
}
2248
const conversions[] = {
2249
  { 0, "r", "0" },
2250
 
2251
  { 0, "mr", "z[1]" },
2252
  { 0, "m+ri", "3[2]" },
2253
  { 0, "m+rs", "3[2]" },
2254
  { 0, "m+r+si", "4+5[2]" },
2255
  { 0, "ms", "1" },
2256
  { 0, "mi", "1" },
2257
  { 0, "m+si", "2+3" },
2258
 
2259
  { 0, "mmr", "[z[2]]" },
2260
  { 0, "mm+ri", "[4[3]]" },
2261
  { 0, "mm+rs", "[4[3]]" },
2262
  { 0, "mm+r+si", "[5+6[3]]" },
2263
  { 0, "mms", "[[2]]" },
2264
  { 0, "mmi", "[[2]]" },
2265
  { 0, "mm+si", "[4[3]]" },
2266
 
2267
  { 0, "i", "#0" },
2268
  { 0, "s", "#0" },
2269
  { 0, "+si", "#1+2" },
2270
  { 0, "l", "#0" },
2271
 
2272
  { 'l', "l", "0" },
2273
  { 'd', "i", "0" },
2274
  { 'd', "s", "0" },
2275
  { 'd', "+si", "1+2" },
2276
  { 'D', "i", "0" },
2277
  { 'D', "s", "0" },
2278
  { 'D', "+si", "1+2" },
2279
  { 'x', "i", "#0" },
2280
  { 'X', "i", "#0" },
2281
  { 'm', "i", "#0" },
2282
  { 'b', "i", "#0" },
2283
  { 'B', "i", "0" },
2284
  { 'p', "i", "0" },
2285
 
2286
  { 0, 0, 0 }
2287
};
2288
 
2289
/* This is in order according to the bitfield that pushm/popm use.  */
2290
static char const *pushm_regs[] = {
2291
  "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2292
};
2293
 
2294
/* Implements PRINT_OPERAND.  */
2295
void
2296
m32c_print_operand (FILE * file, rtx x, int code)
2297
{
2298
  int i, j, b;
2299
  const char *comma;
2300
  HOST_WIDE_INT ival;
2301
  int unsigned_const = 0;
2302
  int force_sign;
2303
 
2304
  /* Multiplies; constants are converted to sign-extended format but
2305
   we need unsigned, so 'u' and 'U' tell us what size unsigned we
2306
   need.  */
2307
  if (code == 'u')
2308
    {
2309
      unsigned_const = 2;
2310
      code = 0;
2311
    }
2312
  if (code == 'U')
2313
    {
2314
      unsigned_const = 1;
2315
      code = 0;
2316
    }
2317
  /* This one is only for debugging; you can put it in a pattern to
2318
     force this error.  */
2319
  if (code == '!')
2320
    {
2321
      fprintf (stderr, "dj: unreviewed pattern:");
2322
      if (current_output_insn)
2323
        debug_rtx (current_output_insn);
2324
      gcc_unreachable ();
2325
    }
2326
  /* PSImode operations are either .w or .l depending on the target.  */
2327
  if (code == '&')
2328
    {
2329
      if (TARGET_A16)
2330
        fprintf (file, "w");
2331
      else
2332
        fprintf (file, "l");
2333
      return;
2334
    }
2335
  /* Inverted conditionals.  */
2336
  if (code == 'C')
2337
    {
2338
      switch (GET_CODE (x))
2339
        {
2340
        case LE:
2341
          fputs ("gt", file);
2342
          break;
2343
        case LEU:
2344
          fputs ("gtu", file);
2345
          break;
2346
        case LT:
2347
          fputs ("ge", file);
2348
          break;
2349
        case LTU:
2350
          fputs ("geu", file);
2351
          break;
2352
        case GT:
2353
          fputs ("le", file);
2354
          break;
2355
        case GTU:
2356
          fputs ("leu", file);
2357
          break;
2358
        case GE:
2359
          fputs ("lt", file);
2360
          break;
2361
        case GEU:
2362
          fputs ("ltu", file);
2363
          break;
2364
        case NE:
2365
          fputs ("eq", file);
2366
          break;
2367
        case EQ:
2368
          fputs ("ne", file);
2369
          break;
2370
        default:
2371
          gcc_unreachable ();
2372
        }
2373
      return;
2374
    }
2375
  /* Regular conditionals.  */
2376
  if (code == 'c')
2377
    {
2378
      switch (GET_CODE (x))
2379
        {
2380
        case LE:
2381
          fputs ("le", file);
2382
          break;
2383
        case LEU:
2384
          fputs ("leu", file);
2385
          break;
2386
        case LT:
2387
          fputs ("lt", file);
2388
          break;
2389
        case LTU:
2390
          fputs ("ltu", file);
2391
          break;
2392
        case GT:
2393
          fputs ("gt", file);
2394
          break;
2395
        case GTU:
2396
          fputs ("gtu", file);
2397
          break;
2398
        case GE:
2399
          fputs ("ge", file);
2400
          break;
2401
        case GEU:
2402
          fputs ("geu", file);
2403
          break;
2404
        case NE:
2405
          fputs ("ne", file);
2406
          break;
2407
        case EQ:
2408
          fputs ("eq", file);
2409
          break;
2410
        default:
2411
          gcc_unreachable ();
2412
        }
2413
      return;
2414
    }
2415
  /* Used in negsi2 to do HImode ops on the two parts of an SImode
2416
     operand.  */
2417
  if (code == 'h' && GET_MODE (x) == SImode)
2418
    {
2419
      x = m32c_subreg (HImode, x, SImode, 0);
2420
      code = 0;
2421
    }
2422
  if (code == 'H' && GET_MODE (x) == SImode)
2423
    {
2424
      x = m32c_subreg (HImode, x, SImode, 2);
2425
      code = 0;
2426
    }
2427
  if (code == 'h' && GET_MODE (x) == HImode)
2428
    {
2429
      x = m32c_subreg (QImode, x, HImode, 0);
2430
      code = 0;
2431
    }
2432
  if (code == 'H' && GET_MODE (x) == HImode)
2433
    {
2434
      /* We can't actually represent this as an rtx.  Do it here.  */
2435
      if (GET_CODE (x) == REG)
2436
        {
2437
          switch (REGNO (x))
2438
            {
2439
            case R0_REGNO:
2440
              fputs ("r0h", file);
2441
              return;
2442
            case R1_REGNO:
2443
              fputs ("r1h", file);
2444
              return;
2445
            default:
2446
              gcc_unreachable();
2447
            }
2448
        }
2449
      /* This should be a MEM.  */
2450
      x = m32c_subreg (QImode, x, HImode, 1);
2451
      code = 0;
2452
    }
2453
  /* This is for BMcond, which always wants word register names.  */
2454
  if (code == 'h' && GET_MODE (x) == QImode)
2455
    {
2456
      if (GET_CODE (x) == REG)
2457
        x = gen_rtx_REG (HImode, REGNO (x));
2458
      code = 0;
2459
    }
2460
  /* 'x' and 'X' need to be ignored for non-immediates.  */
2461
  if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2462
    code = 0;
2463
 
2464
  encode_pattern (x);
2465
  force_sign = 0;
2466
  for (i = 0; conversions[i].pattern; i++)
2467
    if (conversions[i].code == code
2468
        && streq (conversions[i].pattern, pattern))
2469
      {
2470
        for (j = 0; conversions[i].format[j]; j++)
2471
          /* backslash quotes the next character in the output pattern.  */
2472
          if (conversions[i].format[j] == '\\')
2473
            {
2474
              fputc (conversions[i].format[j + 1], file);
2475
              j++;
2476
            }
2477
          /* Digits in the output pattern indicate that the
2478
             corresponding RTX is to be output at that point.  */
2479
          else if (ISDIGIT (conversions[i].format[j]))
2480
            {
2481
              rtx r = patternr[conversions[i].format[j] - '0'];
2482
              switch (GET_CODE (r))
2483
                {
2484
                case REG:
2485
                  fprintf (file, "%s",
2486
                           reg_name_with_mode (REGNO (r), GET_MODE (r)));
2487
                  break;
2488
                case CONST_INT:
2489
                  switch (code)
2490
                    {
2491
                    case 'b':
2492
                    case 'B':
2493
                      {
2494
                        int v = INTVAL (r);
2495
                        int i = (int) exact_log2 (v);
2496
                        if (i == -1)
2497
                          i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2498
                        if (i == -1)
2499
                          i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2500
                        /* Bit position.  */
2501
                        fprintf (file, "%d", i);
2502
                      }
2503
                      break;
2504
                    case 'x':
2505
                      /* Unsigned byte.  */
2506
                      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2507
                               INTVAL (r) & 0xff);
2508
                      break;
2509
                    case 'X':
2510
                      /* Unsigned word.  */
2511
                      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2512
                               INTVAL (r) & 0xffff);
2513
                      break;
2514
                    case 'p':
2515
                      /* pushm and popm encode a register set into a single byte.  */
2516
                      comma = "";
2517
                      for (b = 7; b >= 0; b--)
2518
                        if (INTVAL (r) & (1 << b))
2519
                          {
2520
                            fprintf (file, "%s%s", comma, pushm_regs[b]);
2521
                            comma = ",";
2522
                          }
2523
                      break;
2524
                    case 'm':
2525
                      /* "Minus".  Output -X  */
2526
                      ival = (-INTVAL (r) & 0xffff);
2527
                      if (ival & 0x8000)
2528
                        ival = ival - 0x10000;
2529
                      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2530
                      break;
2531
                    default:
2532
                      ival = INTVAL (r);
2533
                      if (conversions[i].format[j + 1] == '[' && ival < 0)
2534
                        {
2535
                          /* We can simulate negative displacements by
2536
                             taking advantage of address space
2537
                             wrapping when the offset can span the
2538
                             entire address range.  */
2539
                          rtx base =
2540
                            patternr[conversions[i].format[j + 2] - '0'];
2541
                          if (GET_CODE (base) == REG)
2542
                            switch (REGNO (base))
2543
                              {
2544
                              case A0_REGNO:
2545
                              case A1_REGNO:
2546
                                if (TARGET_A24)
2547
                                  ival = 0x1000000 + ival;
2548
                                else
2549
                                  ival = 0x10000 + ival;
2550
                                break;
2551
                              case SB_REGNO:
2552
                                if (TARGET_A16)
2553
                                  ival = 0x10000 + ival;
2554
                                break;
2555
                              }
2556
                        }
2557
                      else if (code == 'd' && ival < 0 && j == 0)
2558
                        /* The "mova" opcode is used to do addition by
2559
                           computing displacements, but again, we need
2560
                           displacements to be unsigned *if* they're
2561
                           the only component of the displacement
2562
                           (i.e. no "symbol-4" type displacement).  */
2563
                        ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2564
 
2565
                      if (conversions[i].format[j] == '0')
2566
                        {
2567
                          /* More conversions to unsigned.  */
2568
                          if (unsigned_const == 2)
2569
                            ival &= 0xffff;
2570
                          if (unsigned_const == 1)
2571
                            ival &= 0xff;
2572
                        }
2573
                      if (streq (conversions[i].pattern, "mi")
2574
                          || streq (conversions[i].pattern, "mmi"))
2575
                        {
2576
                          /* Integers used as addresses are unsigned.  */
2577
                          ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2578
                        }
2579
                      if (force_sign && ival >= 0)
2580
                        fputc ('+', file);
2581
                      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2582
                      break;
2583
                    }
2584
                  break;
2585
                case CONST_DOUBLE:
2586
                  /* We don't have const_double constants.  If it
2587
                     happens, make it obvious.  */
2588
                  fprintf (file, "[const_double 0x%lx]",
2589
                           (unsigned long) CONST_DOUBLE_HIGH (r));
2590
                  break;
2591
                case SYMBOL_REF:
2592
                  assemble_name (file, XSTR (r, 0));
2593
                  break;
2594
                case LABEL_REF:
2595
                  output_asm_label (r);
2596
                  break;
2597
                default:
2598
                  fprintf (stderr, "don't know how to print this operand:");
2599
                  debug_rtx (r);
2600
                  gcc_unreachable ();
2601
                }
2602
            }
2603
          else
2604
            {
2605
              if (conversions[i].format[j] == 'z')
2606
                {
2607
                  /* Some addressing modes *must* have a displacement,
2608
                     so insert a zero here if needed.  */
2609
                  int k;
2610
                  for (k = j + 1; conversions[i].format[k]; k++)
2611
                    if (ISDIGIT (conversions[i].format[k]))
2612
                      {
2613
                        rtx reg = patternr[conversions[i].format[k] - '0'];
2614
                        if (GET_CODE (reg) == REG
2615
                            && (REGNO (reg) == SB_REGNO
2616
                                || REGNO (reg) == FB_REGNO
2617
                                || REGNO (reg) == SP_REGNO))
2618
                          fputc ('0', file);
2619
                      }
2620
                  continue;
2621
                }
2622
              /* Signed displacements off symbols need to have signs
2623
                 blended cleanly.  */
2624
              if (conversions[i].format[j] == '+'
2625
                  && (!code || code == 'D' || code == 'd')
2626
                  && ISDIGIT (conversions[i].format[j + 1])
2627
                  && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2628
                      == CONST_INT))
2629
                {
2630
                  force_sign = 1;
2631
                  continue;
2632
                }
2633
              fputc (conversions[i].format[j], file);
2634
            }
2635
        break;
2636
      }
2637
  if (!conversions[i].pattern)
2638
    {
2639
      fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2640
               pattern);
2641
      debug_rtx (x);
2642
      fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2643
    }
2644
 
2645
  return;
2646
}
2647
 
2648
/* Implements PRINT_OPERAND_PUNCT_VALID_P.  See m32c_print_operand
2649
   above for descriptions of what these do.  */
2650
int
2651
m32c_print_operand_punct_valid_p (int c)
2652
{
2653
  if (c == '&' || c == '!')
2654
    return 1;
2655
  return 0;
2656
}
2657
 
2658
/* Implements PRINT_OPERAND_ADDRESS.  Nothing unusual here.  */
2659
void
2660
m32c_print_operand_address (FILE * stream, rtx address)
2661
{
2662
  gcc_assert (GET_CODE (address) == MEM);
2663
  m32c_print_operand (stream, XEXP (address, 0), 0);
2664
}
2665
 
2666
/* Implements ASM_OUTPUT_REG_PUSH.  Control registers are pushed
2667
   differently than general registers.  */
2668
void
2669
m32c_output_reg_push (FILE * s, int regno)
2670
{
2671
  if (regno == FLG_REGNO)
2672
    fprintf (s, "\tpushc\tflg\n");
2673
  else
2674
    fprintf (s, "\tpush.%c\t%s\n",
2675
             " bwll"[reg_push_size (regno)], reg_names[regno]);
2676
}
2677
 
2678
/* Likewise for ASM_OUTPUT_REG_POP.  */
2679
void
2680
m32c_output_reg_pop (FILE * s, int regno)
2681
{
2682
  if (regno == FLG_REGNO)
2683
    fprintf (s, "\tpopc\tflg\n");
2684
  else
2685
    fprintf (s, "\tpop.%c\t%s\n",
2686
             " bwll"[reg_push_size (regno)], reg_names[regno]);
2687
}
2688
 
2689
/* Defining target-specific uses of `__attribute__' */
2690
 
2691
/* Used to simplify the logic below.  Find the attributes wherever
2692
   they may be.  */
2693
#define M32C_ATTRIBUTES(decl) \
2694
  (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2695
                : DECL_ATTRIBUTES (decl) \
2696
                  ? (DECL_ATTRIBUTES (decl)) \
2697
                  : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2698
 
2699
/* Returns TRUE if the given tree has the "interrupt" attribute.  */
2700
static int
2701
interrupt_p (tree node ATTRIBUTE_UNUSED)
2702
{
2703
  tree list = M32C_ATTRIBUTES (node);
2704
  while (list)
2705
    {
2706
      if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2707
        return 1;
2708
      list = TREE_CHAIN (list);
2709
    }
2710
  return 0;
2711
}
2712
 
2713
static tree
2714
interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2715
                   tree name ATTRIBUTE_UNUSED,
2716
                   tree args ATTRIBUTE_UNUSED,
2717
                   int flags ATTRIBUTE_UNUSED,
2718
                   bool * no_add_attrs ATTRIBUTE_UNUSED)
2719
{
2720
  return NULL_TREE;
2721
}
2722
 
2723
#undef TARGET_ATTRIBUTE_TABLE
2724
#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2725
static const struct attribute_spec m32c_attribute_table[] = {
2726
  {"interrupt", 0, 0, false, false, false, interrupt_handler},
2727
  {0, 0, 0, 0, 0, 0, 0}
2728
};
2729
 
2730
#undef TARGET_COMP_TYPE_ATTRIBUTES
2731
#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
2732
static int
2733
m32c_comp_type_attributes (tree type1 ATTRIBUTE_UNUSED,
2734
                           tree type2 ATTRIBUTE_UNUSED)
2735
{
2736
  /* 0=incompatible 1=compatible 2=warning */
2737
  return 1;
2738
}
2739
 
2740
#undef TARGET_INSERT_ATTRIBUTES
2741
#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
2742
static void
2743
m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
2744
                        tree * attr_ptr ATTRIBUTE_UNUSED)
2745
{
2746
  /* Nothing to do here.  */
2747
}
2748
 
2749
/* Predicates */
2750
 
2751
/* Returns TRUE if we support a move between the first two operands.
2752
   At the moment, we just want to discourage mem to mem moves until
2753
   after reload, because reload has a hard time with our limited
2754
   number of address registers, and we can get into a situation where
2755
   we need three of them when we only have two.  */
2756
bool
2757
m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
2758
{
2759
  rtx op0 = operands[0];
2760
  rtx op1 = operands[1];
2761
 
2762
  if (TARGET_A24)
2763
    return true;
2764
 
2765
#define DEBUG_MOV_OK 0
2766
#if DEBUG_MOV_OK
2767
  fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
2768
  debug_rtx (op0);
2769
  debug_rtx (op1);
2770
#endif
2771
 
2772
  if (GET_CODE (op0) == SUBREG)
2773
    op0 = XEXP (op0, 0);
2774
  if (GET_CODE (op1) == SUBREG)
2775
    op1 = XEXP (op1, 0);
2776
 
2777
  if (GET_CODE (op0) == MEM
2778
      && GET_CODE (op1) == MEM
2779
      && ! reload_completed)
2780
    {
2781
#if DEBUG_MOV_OK
2782
      fprintf (stderr, " - no, mem to mem\n");
2783
#endif
2784
      return false;
2785
    }
2786
 
2787
#if DEBUG_MOV_OK
2788
  fprintf (stderr, " - ok\n");
2789
#endif
2790
  return true;
2791
}
2792
 
2793
/* Returns TRUE if two consecutive HImode mov instructions, generated
2794
   for moving an immediate double data to a double data type variable
2795
   location, can be combined into single SImode mov instruction.  */
2796
bool
2797
m32c_immd_dbl_mov (rtx * operands,
2798
                   enum machine_mode mode ATTRIBUTE_UNUSED)
2799
{
2800
  int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
2801
  const char *str1;
2802
  const char *str2;
2803
 
2804
  if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
2805
      && MEM_SCALAR_P (operands[0])
2806
      && !MEM_IN_STRUCT_P (operands[0])
2807
      && GET_CODE (XEXP (operands[2], 0)) == CONST
2808
      && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
2809
      && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
2810
      && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
2811
      && MEM_SCALAR_P (operands[2])
2812
      && !MEM_IN_STRUCT_P (operands[2]))
2813
    flag = 1;
2814
 
2815
  else if (GET_CODE (XEXP (operands[0], 0)) == CONST
2816
           && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
2817
           && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
2818
           && MEM_SCALAR_P (operands[0])
2819
           && !MEM_IN_STRUCT_P (operands[0])
2820
           && !(XINT (XEXP (XEXP (XEXP (operands[0], 0), 0), 1), 0) %4)
2821
           && GET_CODE (XEXP (operands[2], 0)) == CONST
2822
           && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
2823
           && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
2824
           && MEM_SCALAR_P (operands[2])
2825
           && !MEM_IN_STRUCT_P (operands[2]))
2826
    flag = 2;
2827
 
2828
  else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
2829
           &&  GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
2830
           &&  REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
2831
           &&  GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
2832
           &&  MEM_SCALAR_P (operands[0])
2833
           &&  !MEM_IN_STRUCT_P (operands[0])
2834
           &&  !(XINT (XEXP (XEXP (operands[0], 0), 1), 0) %4)
2835
           &&  REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
2836
           &&  GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
2837
           &&  MEM_SCALAR_P (operands[2])
2838
           &&  !MEM_IN_STRUCT_P (operands[2]))
2839
    flag = 3;
2840
 
2841
  else
2842
    return false;
2843
 
2844
  switch (flag)
2845
    {
2846
    case 1:
2847
      str1 = XSTR (XEXP (operands[0], 0), 0);
2848
      str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
2849
      if (strcmp (str1, str2) == 0)
2850
        okflag = 1;
2851
      else
2852
        okflag = 0;
2853
      break;
2854
    case 2:
2855
      str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
2856
      str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
2857
      if (strcmp(str1,str2) == 0)
2858
        okflag = 1;
2859
      else
2860
        okflag = 0;
2861
      break;
2862
    case 3:
2863
      offset1 = XINT (XEXP (XEXP (operands[0], 0), 1), 0);
2864
      offset2 = XINT (XEXP (XEXP (operands[2], 0), 1), 0);
2865
      offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
2866
      if (((offset2-offset1) == 2) && offsetsign != 0)
2867
        okflag = 1;
2868
      else
2869
        okflag = 0;
2870
      break;
2871
    default:
2872
      okflag = 0;
2873
    }
2874
 
2875
  if (okflag == 1)
2876
    {
2877
      HOST_WIDE_INT val;
2878
      operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
2879
 
2880
      val = (XINT (operands[3], 0) << 16) + (XINT (operands[1], 0) & 0xFFFF);
2881
      operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
2882
 
2883
      return true;
2884
    }
2885
 
2886
  return false;
2887
}
2888
 
2889
/* Expanders */
2890
 
2891
/* Subregs are non-orthogonal for us, because our registers are all
2892
   different sizes.  */
2893
static rtx
2894
m32c_subreg (enum machine_mode outer,
2895
             rtx x, enum machine_mode inner, int byte)
2896
{
2897
  int r, nr = -1;
2898
 
2899
  /* Converting MEMs to different types that are the same size, we
2900
     just rewrite them.  */
2901
  if (GET_CODE (x) == SUBREG
2902
      && SUBREG_BYTE (x) == 0
2903
      && GET_CODE (SUBREG_REG (x)) == MEM
2904
      && (GET_MODE_SIZE (GET_MODE (x))
2905
          == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
2906
    {
2907
      rtx oldx = x;
2908
      x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
2909
      MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
2910
    }
2911
 
2912
  /* Push/pop get done as smaller push/pops.  */
2913
  if (GET_CODE (x) == MEM
2914
      && (GET_CODE (XEXP (x, 0)) == PRE_DEC
2915
          || GET_CODE (XEXP (x, 0)) == POST_INC))
2916
    return gen_rtx_MEM (outer, XEXP (x, 0));
2917
  if (GET_CODE (x) == SUBREG
2918
      && GET_CODE (XEXP (x, 0)) == MEM
2919
      && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
2920
          || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
2921
    return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
2922
 
2923
  if (GET_CODE (x) != REG)
2924
    return simplify_gen_subreg (outer, x, inner, byte);
2925
 
2926
  r = REGNO (x);
2927
  if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
2928
    return simplify_gen_subreg (outer, x, inner, byte);
2929
 
2930
  if (IS_MEM_REGNO (r))
2931
    return simplify_gen_subreg (outer, x, inner, byte);
2932
 
2933
  /* This is where the complexities of our register layout are
2934
     described.  */
2935
  if (byte == 0)
2936
    nr = r;
2937
  else if (outer == HImode)
2938
    {
2939
      if (r == R0_REGNO && byte == 2)
2940
        nr = R2_REGNO;
2941
      else if (r == R0_REGNO && byte == 4)
2942
        nr = R1_REGNO;
2943
      else if (r == R0_REGNO && byte == 6)
2944
        nr = R3_REGNO;
2945
      else if (r == R1_REGNO && byte == 2)
2946
        nr = R3_REGNO;
2947
      else if (r == A0_REGNO && byte == 2)
2948
        nr = A1_REGNO;
2949
    }
2950
  else if (outer == SImode)
2951
    {
2952
      if (r == R0_REGNO && byte == 0)
2953
        nr = R0_REGNO;
2954
      else if (r == R0_REGNO && byte == 4)
2955
        nr = R1_REGNO;
2956
    }
2957
  if (nr == -1)
2958
    {
2959
      fprintf (stderr, "m32c_subreg %s %s %d\n",
2960
               mode_name[outer], mode_name[inner], byte);
2961
      debug_rtx (x);
2962
      gcc_unreachable ();
2963
    }
2964
  return gen_rtx_REG (outer, nr);
2965
}
2966
 
2967
/* Used to emit move instructions.  We split some moves,
2968
   and avoid mem-mem moves.  */
2969
int
2970
m32c_prepare_move (rtx * operands, enum machine_mode mode)
2971
{
2972
  if (TARGET_A16 && mode == PSImode)
2973
    return m32c_split_move (operands, mode, 1);
2974
  if ((GET_CODE (operands[0]) == MEM)
2975
      && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
2976
    {
2977
      rtx pmv = XEXP (operands[0], 0);
2978
      rtx dest_reg = XEXP (pmv, 0);
2979
      rtx dest_mod = XEXP (pmv, 1);
2980
 
2981
      emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
2982
      operands[0] = gen_rtx_MEM (mode, dest_reg);
2983
    }
2984
  if (!no_new_pseudos && MEM_P (operands[0]) && MEM_P (operands[1]))
2985
    operands[1] = copy_to_mode_reg (mode, operands[1]);
2986
  return 0;
2987
}
2988
 
2989
#define DEBUG_SPLIT 0
2990
 
2991
/* Returns TRUE if the given PSImode move should be split.  We split
2992
   for all r8c/m16c moves, since it doesn't support them, and for
2993
   POP.L as we can only *push* SImode.  */
2994
int
2995
m32c_split_psi_p (rtx * operands)
2996
{
2997
#if DEBUG_SPLIT
2998
  fprintf (stderr, "\nm32c_split_psi_p\n");
2999
  debug_rtx (operands[0]);
3000
  debug_rtx (operands[1]);
3001
#endif
3002
  if (TARGET_A16)
3003
    {
3004
#if DEBUG_SPLIT
3005
      fprintf (stderr, "yes, A16\n");
3006
#endif
3007
      return 1;
3008
    }
3009
  if (GET_CODE (operands[1]) == MEM
3010
      && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3011
    {
3012
#if DEBUG_SPLIT
3013
      fprintf (stderr, "yes, pop.l\n");
3014
#endif
3015
      return 1;
3016
    }
3017
#if DEBUG_SPLIT
3018
  fprintf (stderr, "no, default\n");
3019
#endif
3020
  return 0;
3021
}
3022
 
3023
/* Split the given move.  SPLIT_ALL is 0 if splitting is optional
3024
   (define_expand), 1 if it is not optional (define_insn_and_split),
3025
   and 3 for define_split (alternate api). */
3026
int
3027
m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3028
{
3029
  rtx s[4], d[4];
3030
  int parts, si, di, rev = 0;
3031
  int rv = 0, opi = 2;
3032
  enum machine_mode submode = HImode;
3033
  rtx *ops, local_ops[10];
3034
 
3035
  /* define_split modifies the existing operands, but the other two
3036
     emit new insns.  OPS is where we store the operand pairs, which
3037
     we emit later.  */
3038
  if (split_all == 3)
3039
    ops = operands;
3040
  else
3041
    ops = local_ops;
3042
 
3043
  /* Else HImode.  */
3044
  if (mode == DImode)
3045
    submode = SImode;
3046
 
3047
  /* Before splitting mem-mem moves, force one operand into a
3048
     register.  */
3049
  if (!no_new_pseudos && MEM_P (operands[0]) && MEM_P (operands[1]))
3050
    {
3051
#if DEBUG0
3052
      fprintf (stderr, "force_reg...\n");
3053
      debug_rtx (operands[1]);
3054
#endif
3055
      operands[1] = force_reg (mode, operands[1]);
3056
#if DEBUG0
3057
      debug_rtx (operands[1]);
3058
#endif
3059
    }
3060
 
3061
  parts = 2;
3062
 
3063
#if DEBUG_SPLIT
3064
  fprintf (stderr, "\nsplit_move %d all=%d\n", no_new_pseudos, split_all);
3065
  debug_rtx (operands[0]);
3066
  debug_rtx (operands[1]);
3067
#endif
3068
 
3069
  /* Note that split_all is not used to select the api after this
3070
     point, so it's safe to set it to 3 even with define_insn.  */
3071
  /* None of the chips can move SI operands to sp-relative addresses,
3072
     so we always split those.  */
3073
  if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3074
    split_all = 3;
3075
 
3076
  /* We don't need to split these.  */
3077
  if (TARGET_A24
3078
      && split_all != 3
3079
      && (mode == SImode || mode == PSImode)
3080
      && !(GET_CODE (operands[1]) == MEM
3081
           && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3082
    return 0;
3083
 
3084
  /* First, enumerate the subregs we'll be dealing with.  */
3085
  for (si = 0; si < parts; si++)
3086
    {
3087
      d[si] =
3088
        m32c_subreg (submode, operands[0], mode,
3089
                     si * GET_MODE_SIZE (submode));
3090
      s[si] =
3091
        m32c_subreg (submode, operands[1], mode,
3092
                     si * GET_MODE_SIZE (submode));
3093
    }
3094
 
3095
  /* Split pushes by emitting a sequence of smaller pushes.  */
3096
  if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3097
    {
3098
      for (si = parts - 1; si >= 0; si--)
3099
        {
3100
          ops[opi++] = gen_rtx_MEM (submode,
3101
                                    gen_rtx_PRE_DEC (Pmode,
3102
                                                     gen_rtx_REG (Pmode,
3103
                                                                  SP_REGNO)));
3104
          ops[opi++] = s[si];
3105
        }
3106
 
3107
      rv = 1;
3108
    }
3109
  /* Likewise for pops.  */
3110
  else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3111
    {
3112
      for (di = 0; di < parts; di++)
3113
        {
3114
          ops[opi++] = d[di];
3115
          ops[opi++] = gen_rtx_MEM (submode,
3116
                                    gen_rtx_POST_INC (Pmode,
3117
                                                      gen_rtx_REG (Pmode,
3118
                                                                   SP_REGNO)));
3119
        }
3120
      rv = 1;
3121
    }
3122
  else if (split_all)
3123
    {
3124
      /* if d[di] == s[si] for any di < si, we'll early clobber. */
3125
      for (di = 0; di < parts - 1; di++)
3126
        for (si = di + 1; si < parts; si++)
3127
          if (reg_mentioned_p (d[di], s[si]))
3128
            rev = 1;
3129
 
3130
      if (rev)
3131
        for (si = 0; si < parts; si++)
3132
          {
3133
            ops[opi++] = d[si];
3134
            ops[opi++] = s[si];
3135
          }
3136
      else
3137
        for (si = parts - 1; si >= 0; si--)
3138
          {
3139
            ops[opi++] = d[si];
3140
            ops[opi++] = s[si];
3141
          }
3142
      rv = 1;
3143
    }
3144
  /* Now emit any moves we may have accumulated.  */
3145
  if (rv && split_all != 3)
3146
    {
3147
      int i;
3148
      for (i = 2; i < opi; i += 2)
3149
        emit_move_insn (ops[i], ops[i + 1]);
3150
    }
3151
  return rv;
3152
}
3153
 
3154
/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3155
   the like.  For the R8C they expect one of the addresses to be in
3156
   R1L:An so we need to arrange for that.  Otherwise, it's just a
3157
   matter of picking out the operands we want and emitting the right
3158
   pattern for them.  All these expanders, which correspond to
3159
   patterns in blkmov.md, must return nonzero if they expand the insn,
3160
   or zero if they should FAIL.  */
3161
 
3162
/* This is a memset() opcode.  All operands are implied, so we need to
3163
   arrange for them to be in the right registers.  The opcode wants
3164
   addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3165
   the count (HI), and $2 the value (QI).  */
3166
int
3167
m32c_expand_setmemhi(rtx *operands)
3168
{
3169
  rtx desta, count, val;
3170
  rtx desto, counto;
3171
 
3172
  desta = XEXP (operands[0], 0);
3173
  count = operands[1];
3174
  val = operands[2];
3175
 
3176
  desto = gen_reg_rtx (Pmode);
3177
  counto = gen_reg_rtx (HImode);
3178
 
3179
  if (GET_CODE (desta) != REG
3180
      || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3181
    desta = copy_to_mode_reg (Pmode, desta);
3182
 
3183
  /* This looks like an arbitrary restriction, but this is by far the
3184
     most common case.  For counts 8..14 this actually results in
3185
     smaller code with no speed penalty because the half-sized
3186
     constant can be loaded with a shorter opcode.  */
3187
  if (GET_CODE (count) == CONST_INT
3188
      && GET_CODE (val) == CONST_INT
3189
      && ! (INTVAL (count) & 1)
3190
      && (INTVAL (count) > 1)
3191
      && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3192
    {
3193
      unsigned v = INTVAL (val) & 0xff;
3194
      v = v | (v << 8);
3195
      count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3196
      val = copy_to_mode_reg (HImode, GEN_INT (v));
3197
      if (TARGET_A16)
3198
        emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3199
      else
3200
        emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3201
      return 1;
3202
    }
3203
 
3204
  /* This is the generalized memset() case.  */
3205
  if (GET_CODE (val) != REG
3206
      || REGNO (val) < FIRST_PSEUDO_REGISTER)
3207
    val = copy_to_mode_reg (QImode, val);
3208
 
3209
  if (GET_CODE (count) != REG
3210
      || REGNO (count) < FIRST_PSEUDO_REGISTER)
3211
    count = copy_to_mode_reg (HImode, count);
3212
 
3213
  if (TARGET_A16)
3214
    emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3215
  else
3216
    emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3217
 
3218
  return 1;
3219
}
3220
 
3221
/* This is a memcpy() opcode.  All operands are implied, so we need to
3222
   arrange for them to be in the right registers.  The opcode wants
3223
   addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3224
   is the source (MEM:BLK), and $2 the count (HI).  */
3225
int
3226
m32c_expand_movmemhi(rtx *operands)
3227
{
3228
  rtx desta, srca, count;
3229
  rtx desto, srco, counto;
3230
 
3231
  desta = XEXP (operands[0], 0);
3232
  srca = XEXP (operands[1], 0);
3233
  count = operands[2];
3234
 
3235
  desto = gen_reg_rtx (Pmode);
3236
  srco = gen_reg_rtx (Pmode);
3237
  counto = gen_reg_rtx (HImode);
3238
 
3239
  if (GET_CODE (desta) != REG
3240
      || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3241
    desta = copy_to_mode_reg (Pmode, desta);
3242
 
3243
  if (GET_CODE (srca) != REG
3244
      || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3245
    srca = copy_to_mode_reg (Pmode, srca);
3246
 
3247
  /* Similar to setmem, but we don't need to check the value.  */
3248
  if (GET_CODE (count) == CONST_INT
3249
      && ! (INTVAL (count) & 1)
3250
      && (INTVAL (count) > 1))
3251
    {
3252
      count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3253
      if (TARGET_A16)
3254
        emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3255
      else
3256
        emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3257
      return 1;
3258
    }
3259
 
3260
  /* This is the generalized memset() case.  */
3261
  if (GET_CODE (count) != REG
3262
      || REGNO (count) < FIRST_PSEUDO_REGISTER)
3263
    count = copy_to_mode_reg (HImode, count);
3264
 
3265
  if (TARGET_A16)
3266
    emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3267
  else
3268
    emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3269
 
3270
  return 1;
3271
}
3272
 
3273
/* This is a stpcpy() opcode.  $0 is the destination (MEM:BLK) after
3274
   the copy, which should point to the NUL at the end of the string,
3275
   $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3276
   Since our opcode leaves the destination pointing *after* the NUL,
3277
   we must emit an adjustment.  */
3278
int
3279
m32c_expand_movstr(rtx *operands)
3280
{
3281
  rtx desta, srca;
3282
  rtx desto, srco;
3283
 
3284
  desta = XEXP (operands[1], 0);
3285
  srca = XEXP (operands[2], 0);
3286
 
3287
  desto = gen_reg_rtx (Pmode);
3288
  srco = gen_reg_rtx (Pmode);
3289
 
3290
  if (GET_CODE (desta) != REG
3291
      || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3292
    desta = copy_to_mode_reg (Pmode, desta);
3293
 
3294
  if (GET_CODE (srca) != REG
3295
      || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3296
    srca = copy_to_mode_reg (Pmode, srca);
3297
 
3298
  emit_insn (gen_movstr_op (desto, srco, desta, srca));
3299
  /* desto ends up being a1, which allows this type of add through MOVA.  */
3300
  emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3301
 
3302
  return 1;
3303
}
3304
 
3305
/* This is a strcmp() opcode.  $0 is the destination (HI) which holds
3306
   <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3307
   $2 is the other (MEM:BLK).  We must do the comparison, and then
3308
   convert the flags to a signed integer result.  */
3309
int
3310
m32c_expand_cmpstr(rtx *operands)
3311
{
3312
  rtx src1a, src2a;
3313
 
3314
  src1a = XEXP (operands[1], 0);
3315
  src2a = XEXP (operands[2], 0);
3316
 
3317
  if (GET_CODE (src1a) != REG
3318
      || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3319
    src1a = copy_to_mode_reg (Pmode, src1a);
3320
 
3321
  if (GET_CODE (src2a) != REG
3322
      || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3323
    src2a = copy_to_mode_reg (Pmode, src2a);
3324
 
3325
  emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3326
  emit_insn (gen_cond_to_int (operands[0]));
3327
 
3328
  return 1;
3329
}
3330
 
3331
 
3332
typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3333
 
3334
static shift_gen_func
3335
shift_gen_func_for (int mode, int code)
3336
{
3337
#define GFF(m,c,f) if (mode == m && code == c) return f
3338
  GFF(QImode,  ASHIFT,   gen_ashlqi3_i);
3339
  GFF(QImode,  ASHIFTRT, gen_ashrqi3_i);
3340
  GFF(QImode,  LSHIFTRT, gen_lshrqi3_i);
3341
  GFF(HImode,  ASHIFT,   gen_ashlhi3_i);
3342
  GFF(HImode,  ASHIFTRT, gen_ashrhi3_i);
3343
  GFF(HImode,  LSHIFTRT, gen_lshrhi3_i);
3344
  GFF(PSImode, ASHIFT,   gen_ashlpsi3_i);
3345
  GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3346
  GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3347
  GFF(SImode,  ASHIFT,   TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3348
  GFF(SImode,  ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3349
  GFF(SImode,  LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3350
#undef GFF
3351
  gcc_unreachable ();
3352
}
3353
 
3354
/* The m32c only has one shift, but it takes a signed count.  GCC
3355
   doesn't want this, so we fake it by negating any shift count when
3356
   we're pretending to shift the other way.  Also, the shift count is
3357
   limited to -8..8.  It's slightly better to use two shifts for 9..15
3358
   than to load the count into r1h, so we do that too.  */
3359
int
3360
m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3361
{
3362
  enum machine_mode mode = GET_MODE (operands[0]);
3363
  shift_gen_func func = shift_gen_func_for (mode, shift_code);
3364
  rtx temp;
3365
 
3366
  if (GET_CODE (operands[2]) == CONST_INT)
3367
    {
3368
      int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3369
      int count = INTVAL (operands[2]) * scale;
3370
 
3371
      while (count > maxc)
3372
        {
3373
          temp = gen_reg_rtx (mode);
3374
          emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3375
          operands[1] = temp;
3376
          count -= maxc;
3377
        }
3378
      while (count < -maxc)
3379
        {
3380
          temp = gen_reg_rtx (mode);
3381
          emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3382
          operands[1] = temp;
3383
          count += maxc;
3384
        }
3385
      emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3386
      return 1;
3387
    }
3388
 
3389
  temp = gen_reg_rtx (QImode);
3390
  if (scale < 0)
3391
    /* The pattern has a NEG that corresponds to this. */
3392
    emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3393
  else if (TARGET_A16 && mode == SImode)
3394
    /* We do this because the code below may modify this, we don't
3395
       want to modify the origin of this value.  */
3396
    emit_move_insn (temp, operands[2]);
3397
  else
3398
    /* We'll only use it for the shift, no point emitting a move.  */
3399
    temp = operands[2];
3400
 
3401
  if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3402
    {
3403
      /* The m16c has a limit of -16..16 for SI shifts, even when the
3404
         shift count is in a register.  Since there are so many targets
3405
         of these shifts, it's better to expand the RTL here than to
3406
         call a helper function.
3407
 
3408
         The resulting code looks something like this:
3409
 
3410
                cmp.b   r1h,-16
3411
                jge.b   1f
3412
                shl.l   -16,dest
3413
                add.b   r1h,16
3414
        1f:     cmp.b   r1h,16
3415
                jle.b   1f
3416
                shl.l   16,dest
3417
                sub.b   r1h,16
3418
        1f:     shl.l   r1h,dest
3419
 
3420
         We take advantage of the fact that "negative" shifts are
3421
         undefined to skip one of the comparisons.  */
3422
 
3423
      rtx count;
3424
      rtx label, lref, insn, tempvar;
3425
 
3426
      emit_move_insn (operands[0], operands[1]);
3427
 
3428
      count = temp;
3429
      label = gen_label_rtx ();
3430
      lref = gen_rtx_LABEL_REF (VOIDmode, label);
3431
      LABEL_NUSES (label) ++;
3432
 
3433
      tempvar = gen_reg_rtx (mode);
3434
 
3435
      if (shift_code == ASHIFT)
3436
        {
3437
          /* This is a left shift.  We only need check positive counts.  */
3438
          emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3439
                                          count, GEN_INT (16), label));
3440
          emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3441
          emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3442
          insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3443
          emit_label_after (label, insn);
3444
        }
3445
      else
3446
        {
3447
          /* This is a right shift.  We only need check negative counts.  */
3448
          emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3449
                                          count, GEN_INT (-16), label));
3450
          emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3451
          emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3452
          insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3453
          emit_label_after (label, insn);
3454
        }
3455
      operands[1] = operands[0];
3456
      emit_insn (func (operands[0], operands[0], count));
3457
      return 1;
3458
    }
3459
 
3460
  operands[2] = temp;
3461
  return 0;
3462
}
3463
 
3464
/* The m32c has a limited range of operations that work on PSImode
3465
   values; we have to expand to SI, do the math, and truncate back to
3466
   PSI.  Yes, this is expensive, but hopefully gcc will learn to avoid
3467
   those cases.  */
3468
void
3469
m32c_expand_neg_mulpsi3 (rtx * operands)
3470
{
3471
  /* operands: a = b * i */
3472
  rtx temp1; /* b as SI */
3473
  rtx scale /* i as SI */;
3474
  rtx temp2; /* a*b as SI */
3475
 
3476
  temp1 = gen_reg_rtx (SImode);
3477
  temp2 = gen_reg_rtx (SImode);
3478
  if (GET_CODE (operands[2]) != CONST_INT)
3479
    {
3480
      scale = gen_reg_rtx (SImode);
3481
      emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3482
    }
3483
  else
3484
    scale = copy_to_mode_reg (SImode, operands[2]);
3485
 
3486
  emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3487
  temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3488
  emit_insn (gen_truncsipsi2 (operands[0], temp2));
3489
}
3490
 
3491
static rtx compare_op0, compare_op1;
3492
 
3493
void
3494
m32c_pend_compare (rtx *operands)
3495
{
3496
  compare_op0 = operands[0];
3497
  compare_op1 = operands[1];
3498
}
3499
 
3500
void
3501
m32c_unpend_compare (void)
3502
{
3503
  switch (GET_MODE (compare_op0))
3504
    {
3505
    case QImode:
3506
      emit_insn (gen_cmpqi_op (compare_op0, compare_op1));
3507
    case HImode:
3508
      emit_insn (gen_cmphi_op (compare_op0, compare_op1));
3509
    case PSImode:
3510
      emit_insn (gen_cmppsi_op (compare_op0, compare_op1));
3511
    }
3512
}
3513
 
3514
void
3515
m32c_expand_scc (int code, rtx *operands)
3516
{
3517
  enum machine_mode mode = TARGET_A16 ? QImode : HImode;
3518
 
3519
  emit_insn (gen_rtx_SET (mode,
3520
                          operands[0],
3521
                          gen_rtx_fmt_ee (code,
3522
                                          mode,
3523
                                          compare_op0,
3524
                                          compare_op1)));
3525
}
3526
 
3527
/* Pattern Output Functions */
3528
 
3529
/* Returns a (OP (reg:CC FLG_REGNO) (const_int 0)) from some other
3530
   match_operand rtx's OP.  */
3531
rtx
3532
m32c_cmp_flg_0 (rtx cmp)
3533
{
3534
  return gen_rtx_fmt_ee (GET_CODE (cmp),
3535
                         GET_MODE (cmp),
3536
                         gen_rtx_REG (CCmode, FLG_REGNO),
3537
                         GEN_INT (0));
3538
}
3539
 
3540
int
3541
m32c_expand_movcc (rtx *operands)
3542
{
3543
  rtx rel = operands[1];
3544
  rtx cmp;
3545
 
3546
  if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3547
    return 1;
3548
  if (GET_CODE (operands[2]) != CONST_INT
3549
      || GET_CODE (operands[3]) != CONST_INT)
3550
    return 1;
3551
  emit_insn (gen_cmpqi(XEXP (rel, 0), XEXP (rel, 1)));
3552
  if (GET_CODE (rel) == NE)
3553
    {
3554
      rtx tmp = operands[2];
3555
      operands[2] = operands[3];
3556
      operands[3] = tmp;
3557
    }
3558
 
3559
  cmp = gen_rtx_fmt_ee (GET_CODE (rel),
3560
                        GET_MODE (rel),
3561
                        compare_op0,
3562
                        compare_op1);
3563
 
3564
  emit_move_insn (operands[0],
3565
                  gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3566
                                        cmp,
3567
                                        operands[2],
3568
                                        operands[3]));
3569
  return 0;
3570
}
3571
 
3572
/* Used for the "insv" pattern.  Return nonzero to fail, else done.  */
3573
int
3574
m32c_expand_insv (rtx *operands)
3575
{
3576
  rtx op0, src0, p;
3577
  int mask;
3578
 
3579
  if (INTVAL (operands[1]) != 1)
3580
    return 1;
3581
 
3582
  /* Our insv opcode (bset, bclr) can only insert a one-bit constant.  */
3583
  if (GET_CODE (operands[3]) != CONST_INT)
3584
    return 1;
3585
  if (INTVAL (operands[3]) != 0
3586
      && INTVAL (operands[3]) != 1
3587
      && INTVAL (operands[3]) != -1)
3588
    return 1;
3589
 
3590
  mask = 1 << INTVAL (operands[2]);
3591
 
3592
  op0 = operands[0];
3593
  if (GET_CODE (op0) == SUBREG
3594
      && SUBREG_BYTE (op0) == 0)
3595
    {
3596
      rtx sub = SUBREG_REG (op0);
3597
      if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3598
        op0 = sub;
3599
    }
3600
 
3601
  if (no_new_pseudos
3602
      || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3603
    src0 = op0;
3604
  else
3605
    {
3606
      src0 = gen_reg_rtx (GET_MODE (op0));
3607
      emit_move_insn (src0, op0);
3608
    }
3609
 
3610
  if (GET_MODE (op0) == HImode
3611
      && INTVAL (operands[2]) >= 8
3612
      && GET_MODE (op0) == MEM)
3613
    {
3614
      /* We are little endian.  */
3615
      rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
3616
      MEM_COPY_ATTRIBUTES (new_mem, op0);
3617
      mask >>= 8;
3618
    }
3619
 
3620
  /* First, we generate a mask with the correct polarity.  If we are
3621
     storing a zero, we want an AND mask, so invert it.  */
3622
  if (INTVAL (operands[3]) == 0)
3623
    {
3624
      /* Storing a zero, use an AND mask */
3625
      if (GET_MODE (op0) == HImode)
3626
        mask ^= 0xffff;
3627
      else
3628
        mask ^= 0xff;
3629
    }
3630
  /* Now we need to properly sign-extend the mask in case we need to
3631
     fall back to an AND or OR opcode.  */
3632
  if (GET_MODE (op0) == HImode)
3633
    {
3634
      if (mask & 0x8000)
3635
        mask -= 0x10000;
3636
    }
3637
  else
3638
    {
3639
      if (mask & 0x80)
3640
        mask -= 0x100;
3641
    }
3642
 
3643
  switch (  (INTVAL (operands[3]) ? 4 : 0)
3644
          + ((GET_MODE (op0) == HImode) ? 2 : 0)
3645
          + (TARGET_A24 ? 1 : 0))
3646
    {
3647
    case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3648
    case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3649
    case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3650
    case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3651
    case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3652
    case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3653
    case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3654
    case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3655
    }
3656
 
3657
  emit_insn (p);
3658
  return 0;
3659
}
3660
 
3661
const char *
3662
m32c_scc_pattern(rtx *operands, RTX_CODE code)
3663
{
3664
  static char buf[30];
3665
  if (GET_CODE (operands[0]) == REG
3666
      && REGNO (operands[0]) == R0_REGNO)
3667
    {
3668
      if (code == EQ)
3669
        return "stzx\t#1,#0,r0l";
3670
      if (code == NE)
3671
        return "stzx\t#0,#1,r0l";
3672
    }
3673
  sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3674
  return buf;
3675
}
3676
 
3677
/* Returns TRUE if the current function is a leaf, and thus we can
3678
   determine which registers an interrupt function really needs to
3679
   save.  The logic below is mostly about finding the insn sequence
3680
   that's the function, versus any sequence that might be open for the
3681
   current insn.  */
3682
static int
3683
m32c_leaf_function_p (void)
3684
{
3685
  rtx saved_first, saved_last;
3686
  struct sequence_stack *seq;
3687
  int rv;
3688
 
3689
  saved_first = cfun->emit->x_first_insn;
3690
  saved_last = cfun->emit->x_last_insn;
3691
  for (seq = cfun->emit->sequence_stack; seq && seq->next; seq = seq->next)
3692
    ;
3693
  if (seq)
3694
    {
3695
      cfun->emit->x_first_insn = seq->first;
3696
      cfun->emit->x_last_insn = seq->last;
3697
    }
3698
 
3699
  rv = leaf_function_p ();
3700
 
3701
  cfun->emit->x_first_insn = saved_first;
3702
  cfun->emit->x_last_insn = saved_last;
3703
  return rv;
3704
}
3705
 
3706
/* Returns TRUE if the current function needs to use the ENTER/EXIT
3707
   opcodes.  If the function doesn't need the frame base or stack
3708
   pointer, it can use the simpler RTS opcode.  */
3709
static bool
3710
m32c_function_needs_enter (void)
3711
{
3712
  rtx insn;
3713
  struct sequence_stack *seq;
3714
  rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
3715
  rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
3716
 
3717
  insn = get_insns ();
3718
  for (seq = cfun->emit->sequence_stack;
3719
       seq;
3720
       insn = seq->first, seq = seq->next);
3721
 
3722
  while (insn)
3723
    {
3724
      if (reg_mentioned_p (sp, insn))
3725
        return true;
3726
      if (reg_mentioned_p (fb, insn))
3727
        return true;
3728
      insn = NEXT_INSN (insn);
3729
    }
3730
  return false;
3731
}
3732
 
3733
/* Mark all the subexpressions of the PARALLEL rtx PAR as
3734
   frame-related.  Return PAR.
3735
 
3736
   dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
3737
   PARALLEL rtx other than the first if they do not have the
3738
   FRAME_RELATED flag set on them.  So this function is handy for
3739
   marking up 'enter' instructions.  */
3740
static rtx
3741
m32c_all_frame_related (rtx par)
3742
{
3743
  int len = XVECLEN (par, 0);
3744
  int i;
3745
 
3746
  for (i = 0; i < len; i++)
3747
    F (XVECEXP (par, 0, i));
3748
 
3749
  return par;
3750
}
3751
 
3752
/* Emits the prologue.  See the frame layout comment earlier in this
3753
   file.  We can reserve up to 256 bytes with the ENTER opcode, beyond
3754
   that we manually update sp.  */
3755
void
3756
m32c_emit_prologue (void)
3757
{
3758
  int frame_size, extra_frame_size = 0, reg_save_size;
3759
  int complex_prologue = 0;
3760
 
3761
  cfun->machine->is_leaf = m32c_leaf_function_p ();
3762
  if (interrupt_p (cfun->decl))
3763
    {
3764
      cfun->machine->is_interrupt = 1;
3765
      complex_prologue = 1;
3766
    }
3767
 
3768
  reg_save_size = m32c_pushm_popm (PP_justcount);
3769
 
3770
  if (interrupt_p (cfun->decl))
3771
    emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
3772
 
3773
  frame_size =
3774
    m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
3775
  if (frame_size == 0
3776
      && !cfun->machine->is_interrupt
3777
      && !m32c_function_needs_enter ())
3778
    cfun->machine->use_rts = 1;
3779
 
3780
  if (frame_size > 254)
3781
    {
3782
      extra_frame_size = frame_size - 254;
3783
      frame_size = 254;
3784
    }
3785
  if (cfun->machine->use_rts == 0)
3786
    F (emit_insn (m32c_all_frame_related
3787
                  (TARGET_A16
3788
                   ? gen_prologue_enter_16 (GEN_INT (frame_size))
3789
                   : gen_prologue_enter_24 (GEN_INT (frame_size)))));
3790
 
3791
  if (extra_frame_size)
3792
    {
3793
      complex_prologue = 1;
3794
      if (TARGET_A16)
3795
        F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
3796
                                  gen_rtx_REG (HImode, SP_REGNO),
3797
                                  GEN_INT (-extra_frame_size))));
3798
      else
3799
        F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
3800
                                   gen_rtx_REG (PSImode, SP_REGNO),
3801
                                   GEN_INT (-extra_frame_size))));
3802
    }
3803
 
3804
  complex_prologue += m32c_pushm_popm (PP_pushm);
3805
 
3806
  /* This just emits a comment into the .s file for debugging.  */
3807
  if (complex_prologue)
3808
    emit_insn (gen_prologue_end ());
3809
}
3810
 
3811
/* Likewise, for the epilogue.  The only exception is that, for
3812
   interrupts, we must manually unwind the frame as the REIT opcode
3813
   doesn't do that.  */
3814
void
3815
m32c_emit_epilogue (void)
3816
{
3817
  /* This just emits a comment into the .s file for debugging.  */
3818
  if (m32c_pushm_popm (PP_justcount) > 0 || cfun->machine->is_interrupt)
3819
    emit_insn (gen_epilogue_start ());
3820
 
3821
  m32c_pushm_popm (PP_popm);
3822
 
3823
  if (cfun->machine->is_interrupt)
3824
    {
3825
      enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
3826
 
3827
      emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
3828
                      gen_rtx_REG (spmode, FP_REGNO));
3829
      emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
3830
                      gen_rtx_REG (spmode, A0_REGNO));
3831
      if (TARGET_A16)
3832
        emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
3833
      else
3834
        emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
3835
      emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
3836
      emit_jump_insn (gen_epilogue_reit (GEN_INT (TARGET_A16 ? 4 : 6)));
3837
    }
3838
  else if (cfun->machine->use_rts)
3839
    emit_jump_insn (gen_epilogue_rts ());
3840
  else
3841
    emit_jump_insn (gen_epilogue_exitd (GEN_INT (TARGET_A16 ? 2 : 4)));
3842
  emit_barrier ();
3843
}
3844
 
3845
void
3846
m32c_emit_eh_epilogue (rtx ret_addr)
3847
{
3848
  /* R0[R2] has the stack adjustment.  R1[R3] has the address to
3849
     return to.  We have to fudge the stack, pop everything, pop SP
3850
     (fudged), and return (fudged).  This is actually easier to do in
3851
     assembler, so punt to libgcc.  */
3852
  emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
3853
  /*  emit_insn (gen_rtx_CLOBBER (HImode, gen_rtx_REG (HImode, R0L_REGNO))); */
3854
  emit_barrier ();
3855
}
3856
 
3857
/* Indicate which flags must be properly set for a given conditional.  */
3858
static int
3859
flags_needed_for_conditional (rtx cond)
3860
{
3861
  switch (GET_CODE (cond))
3862
    {
3863
    case LE:
3864
    case GT:
3865
      return FLAGS_OSZ;
3866
    case LEU:
3867
    case GTU:
3868
      return FLAGS_ZC;
3869
    case LT:
3870
    case GE:
3871
      return FLAGS_OS;
3872
    case LTU:
3873
    case GEU:
3874
      return FLAGS_C;
3875
    case EQ:
3876
    case NE:
3877
      return FLAGS_Z;
3878
    default:
3879
      return FLAGS_N;
3880
    }
3881
}
3882
 
3883
#define DEBUG_CMP 0
3884
 
3885
/* Returns true if a compare insn is redundant because it would only
3886
   set flags that are already set correctly.  */
3887
static bool
3888
m32c_compare_redundant (rtx cmp, rtx *operands)
3889
{
3890
  int flags_needed;
3891
  int pflags;
3892
  rtx prev, pp, next;
3893
  rtx op0, op1, op2;
3894
#if DEBUG_CMP
3895
  int prev_icode, i;
3896
#endif
3897
 
3898
  op0 = operands[0];
3899
  op1 = operands[1];
3900
  op2 = operands[2];
3901
 
3902
#if DEBUG_CMP
3903
  fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
3904
  debug_rtx(cmp);
3905
  for (i=0; i<2; i++)
3906
    {
3907
      fprintf(stderr, "operands[%d] = ", i);
3908
      debug_rtx(operands[i]);
3909
    }
3910
#endif
3911
 
3912
  next = next_nonnote_insn (cmp);
3913
  if (!next || !INSN_P (next))
3914
    {
3915
#if DEBUG_CMP
3916
      fprintf(stderr, "compare not followed by insn\n");
3917
      debug_rtx(next);
3918
#endif
3919
      return false;
3920
    }
3921
  if (GET_CODE (PATTERN (next)) == SET
3922
      && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
3923
    {
3924
      next = XEXP (XEXP (PATTERN (next), 1), 0);
3925
    }
3926
  else if (GET_CODE (PATTERN (next)) == SET)
3927
    {
3928
      /* If this is a conditional, flags_needed will be something
3929
         other than FLAGS_N, which we test below.  */
3930
      next = XEXP (PATTERN (next), 1);
3931
    }
3932
  else
3933
    {
3934
#if DEBUG_CMP
3935
      fprintf(stderr, "compare not followed by conditional\n");
3936
      debug_rtx(next);
3937
#endif
3938
      return false;
3939
    }
3940
#if DEBUG_CMP
3941
  fprintf(stderr, "conditional is: ");
3942
  debug_rtx(next);
3943
#endif
3944
 
3945
  flags_needed = flags_needed_for_conditional (next);
3946
  if (flags_needed == FLAGS_N)
3947
    {
3948
#if DEBUG_CMP
3949
      fprintf(stderr, "compare not followed by conditional\n");
3950
      debug_rtx(next);
3951
#endif
3952
      return false;
3953
    }
3954
 
3955
  /* Compare doesn't set overflow and carry the same way that
3956
     arithmetic instructions do, so we can't replace those.  */
3957
  if (flags_needed & FLAGS_OC)
3958
    return false;
3959
 
3960
  prev = cmp;
3961
  do {
3962
    prev = prev_nonnote_insn (prev);
3963
    if (!prev)
3964
      {
3965
#if DEBUG_CMP
3966
        fprintf(stderr, "No previous insn.\n");
3967
#endif
3968
        return false;
3969
      }
3970
    if (!INSN_P (prev))
3971
      {
3972
#if DEBUG_CMP
3973
        fprintf(stderr, "Previous insn is a non-insn.\n");
3974
#endif
3975
        return false;
3976
      }
3977
    pp = PATTERN (prev);
3978
    if (GET_CODE (pp) != SET)
3979
      {
3980
#if DEBUG_CMP
3981
        fprintf(stderr, "Previous insn is not a SET.\n");
3982
#endif
3983
        return false;
3984
      }
3985
    pflags = get_attr_flags (prev);
3986
 
3987
    /* Looking up attributes of previous insns corrupted the recog
3988
       tables.  */
3989
    INSN_UID (cmp) = -1;
3990
    recog (PATTERN (cmp), cmp, 0);
3991
 
3992
    if (pflags == FLAGS_N
3993
        && reg_mentioned_p (op0, pp))
3994
      {
3995
#if DEBUG_CMP
3996
        fprintf(stderr, "intermediate non-flags insn uses op:\n");
3997
        debug_rtx(prev);
3998
#endif
3999
        return false;
4000
      }
4001
  } while (pflags == FLAGS_N);
4002
#if DEBUG_CMP
4003
  fprintf(stderr, "previous flag-setting insn:\n");
4004
  debug_rtx(prev);
4005
  debug_rtx(pp);
4006
#endif
4007
 
4008
  if (GET_CODE (pp) == SET
4009
      && GET_CODE (XEXP (pp, 0)) == REG
4010
      && REGNO (XEXP (pp, 0)) == FLG_REGNO
4011
      && GET_CODE (XEXP (pp, 1)) == COMPARE)
4012
    {
4013
      /* Adjacent cbranches must have the same operands to be
4014
         redundant.  */
4015
      rtx pop0 = XEXP (XEXP (pp, 1), 0);
4016
      rtx pop1 = XEXP (XEXP (pp, 1), 1);
4017
#if DEBUG_CMP
4018
      fprintf(stderr, "adjacent cbranches\n");
4019
      debug_rtx(pop0);
4020
      debug_rtx(pop1);
4021
#endif
4022
      if (rtx_equal_p (op0, pop0)
4023
          && rtx_equal_p (op1, pop1))
4024
        return true;
4025
#if DEBUG_CMP
4026
      fprintf(stderr, "prev cmp not same\n");
4027
#endif
4028
      return false;
4029
    }
4030
 
4031
  /* Else the previous insn must be a SET, with either the source or
4032
     dest equal to operands[0], and operands[1] must be zero.  */
4033
 
4034
  if (!rtx_equal_p (op1, const0_rtx))
4035
    {
4036
#if DEBUG_CMP
4037
      fprintf(stderr, "operands[1] not const0_rtx\n");
4038
#endif
4039
      return false;
4040
    }
4041
  if (GET_CODE (pp) != SET)
4042
    {
4043
#if DEBUG_CMP
4044
      fprintf (stderr, "pp not set\n");
4045
#endif
4046
      return false;
4047
    }
4048
  if (!rtx_equal_p (op0, SET_SRC (pp))
4049
      && !rtx_equal_p (op0, SET_DEST (pp)))
4050
    {
4051
#if DEBUG_CMP
4052
      fprintf(stderr, "operands[0] not found in set\n");
4053
#endif
4054
      return false;
4055
    }
4056
 
4057
#if DEBUG_CMP
4058
  fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4059
#endif
4060
  if ((pflags & flags_needed) == flags_needed)
4061
    return true;
4062
 
4063
  return false;
4064
}
4065
 
4066
/* Return the pattern for a compare.  This will be commented out if
4067
   the compare is redundant, else a normal pattern is returned.  Thus,
4068
   the assembler output says where the compare would have been.  */
4069
char *
4070
m32c_output_compare (rtx insn, rtx *operands)
4071
{
4072
  static char template[] = ";cmp.b\t%1,%0";
4073
  /*                             ^ 5  */
4074
 
4075
  template[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4076
  if (m32c_compare_redundant (insn, operands))
4077
    {
4078
#if DEBUG_CMP
4079
      fprintf(stderr, "cbranch: cmp not needed\n");
4080
#endif
4081
      return template;
4082
    }
4083
 
4084
#if DEBUG_CMP
4085
  fprintf(stderr, "cbranch: cmp needed: `%s'\n", template);
4086
#endif
4087
  return template + 1;
4088
}
4089
 
4090
/* The Global `targetm' Variable. */
4091
 
4092
struct gcc_target targetm = TARGET_INITIALIZER;
4093
 
4094
#include "gt-m32c.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.