OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [config/] [m32c/] [m32c.c] - Blame information for rev 741

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 709 jeremybenn
/* Target Code for R8C/M16C/M32C
2
   Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
3
   Free Software Foundation, Inc.
4
   Contributed by Red Hat.
5
 
6
   This file is part of GCC.
7
 
8
   GCC is free software; you can redistribute it and/or modify it
9
   under the terms of the GNU General Public License as published
10
   by the Free Software Foundation; either version 3, or (at your
11
   option) any later version.
12
 
13
   GCC is distributed in the hope that it will be useful, but WITHOUT
14
   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15
   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
16
   License for more details.
17
 
18
   You should have received a copy of the GNU General Public License
19
   along with GCC; see the file COPYING3.  If not see
20
   <http://www.gnu.org/licenses/>.  */
21
 
22
#include "config.h"
23
#include "system.h"
24
#include "coretypes.h"
25
#include "tm.h"
26
#include "rtl.h"
27
#include "regs.h"
28
#include "hard-reg-set.h"
29
#include "insn-config.h"
30
#include "conditions.h"
31
#include "insn-flags.h"
32
#include "output.h"
33
#include "insn-attr.h"
34
#include "flags.h"
35
#include "recog.h"
36
#include "reload.h"
37
#include "diagnostic-core.h"
38
#include "obstack.h"
39
#include "tree.h"
40
#include "expr.h"
41
#include "optabs.h"
42
#include "except.h"
43
#include "function.h"
44
#include "ggc.h"
45
#include "target.h"
46
#include "target-def.h"
47
#include "tm_p.h"
48
#include "langhooks.h"
49
#include "gimple.h"
50
#include "df.h"
51
 
52
/* Prototypes */
53
 
54
/* Used by m32c_pushm_popm.  */
55
typedef enum
56
{
57
  PP_pushm,
58
  PP_popm,
59
  PP_justcount
60
} Push_Pop_Type;
61
 
62
static bool m32c_function_needs_enter (void);
63
static tree interrupt_handler (tree *, tree, tree, int, bool *);
64
static tree function_vector_handler (tree *, tree, tree, int, bool *);
65
static int interrupt_p (tree node);
66
static int bank_switch_p (tree node);
67
static int fast_interrupt_p (tree node);
68
static int interrupt_p (tree node);
69
static bool m32c_asm_integer (rtx, unsigned int, int);
70
static int m32c_comp_type_attributes (const_tree, const_tree);
71
static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
72
static struct machine_function *m32c_init_machine_status (void);
73
static void m32c_insert_attributes (tree, tree *);
74
static bool m32c_legitimate_address_p (enum machine_mode, rtx, bool);
75
static bool m32c_addr_space_legitimate_address_p (enum machine_mode, rtx, bool, addr_space_t);
76
static rtx m32c_function_arg (cumulative_args_t, enum machine_mode,
77
                              const_tree, bool);
78
static bool m32c_pass_by_reference (cumulative_args_t, enum machine_mode,
79
                                    const_tree, bool);
80
static void m32c_function_arg_advance (cumulative_args_t, enum machine_mode,
81
                                       const_tree, bool);
82
static unsigned int m32c_function_arg_boundary (enum machine_mode, const_tree);
83
static int m32c_pushm_popm (Push_Pop_Type);
84
static bool m32c_strict_argument_naming (cumulative_args_t);
85
static rtx m32c_struct_value_rtx (tree, int);
86
static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
87
static int need_to_save (int);
88
static rtx m32c_function_value (const_tree, const_tree, bool);
89
static rtx m32c_libcall_value (enum machine_mode, const_rtx);
90
 
91
/* Returns true if an address is specified, else false.  */
92
static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
93
 
94
#define SYMBOL_FLAG_FUNCVEC_FUNCTION    (SYMBOL_FLAG_MACH_DEP << 0)
95
 
96
#define streq(a,b) (strcmp ((a), (b)) == 0)
97
 
98
/* Internal support routines */
99
 
100
/* Debugging statements are tagged with DEBUG0 only so that they can
101
   be easily enabled individually, by replacing the '0' with '1' as
102
   needed.  */
103
#define DEBUG0 0
104
#define DEBUG1 1
105
 
106
#if DEBUG0
107
/* This is needed by some of the commented-out debug statements
108
   below.  */
109
static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
110
#endif
111
static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
112
 
113
/* These are all to support encode_pattern().  */
114
static char pattern[30], *patternp;
115
static GTY(()) rtx patternr[30];
116
#define RTX_IS(x) (streq (pattern, x))
117
 
118
/* Some macros to simplify the logic throughout this file.  */
119
#define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
120
#define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
121
 
122
#define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
123
#define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
124
 
125
static int
126
far_addr_space_p (rtx x)
127
{
128
  if (GET_CODE (x) != MEM)
129
    return 0;
130
#if DEBUG0
131
  fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
132
  fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
133
#endif
134
  return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
135
}
136
 
137
/* We do most RTX matching by converting the RTX into a string, and
138
   using string compares.  This vastly simplifies the logic in many of
139
   the functions in this file.
140
 
141
   On exit, pattern[] has the encoded string (use RTX_IS("...") to
142
   compare it) and patternr[] has pointers to the nodes in the RTX
143
   corresponding to each character in the encoded string.  The latter
144
   is mostly used by print_operand().
145
 
146
   Unrecognized patterns have '?' in them; this shows up when the
147
   assembler complains about syntax errors.
148
*/
149
 
150
static void
151
encode_pattern_1 (rtx x)
152
{
153
  int i;
154
 
155
  if (patternp == pattern + sizeof (pattern) - 2)
156
    {
157
      patternp[-1] = '?';
158
      return;
159
    }
160
 
161
  patternr[patternp - pattern] = x;
162
 
163
  switch (GET_CODE (x))
164
    {
165
    case REG:
166
      *patternp++ = 'r';
167
      break;
168
    case SUBREG:
169
      if (GET_MODE_SIZE (GET_MODE (x)) !=
170
          GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
171
        *patternp++ = 'S';
172
      encode_pattern_1 (XEXP (x, 0));
173
      break;
174
    case MEM:
175
      *patternp++ = 'm';
176
    case CONST:
177
      encode_pattern_1 (XEXP (x, 0));
178
      break;
179
    case SIGN_EXTEND:
180
      *patternp++ = '^';
181
      *patternp++ = 'S';
182
      encode_pattern_1 (XEXP (x, 0));
183
      break;
184
    case ZERO_EXTEND:
185
      *patternp++ = '^';
186
      *patternp++ = 'Z';
187
      encode_pattern_1 (XEXP (x, 0));
188
      break;
189
    case PLUS:
190
      *patternp++ = '+';
191
      encode_pattern_1 (XEXP (x, 0));
192
      encode_pattern_1 (XEXP (x, 1));
193
      break;
194
    case PRE_DEC:
195
      *patternp++ = '>';
196
      encode_pattern_1 (XEXP (x, 0));
197
      break;
198
    case POST_INC:
199
      *patternp++ = '<';
200
      encode_pattern_1 (XEXP (x, 0));
201
      break;
202
    case LO_SUM:
203
      *patternp++ = 'L';
204
      encode_pattern_1 (XEXP (x, 0));
205
      encode_pattern_1 (XEXP (x, 1));
206
      break;
207
    case HIGH:
208
      *patternp++ = 'H';
209
      encode_pattern_1 (XEXP (x, 0));
210
      break;
211
    case SYMBOL_REF:
212
      *patternp++ = 's';
213
      break;
214
    case LABEL_REF:
215
      *patternp++ = 'l';
216
      break;
217
    case CODE_LABEL:
218
      *patternp++ = 'c';
219
      break;
220
    case CONST_INT:
221
    case CONST_DOUBLE:
222
      *patternp++ = 'i';
223
      break;
224
    case UNSPEC:
225
      *patternp++ = 'u';
226
      *patternp++ = '0' + XCINT (x, 1, UNSPEC);
227
      for (i = 0; i < XVECLEN (x, 0); i++)
228
        encode_pattern_1 (XVECEXP (x, 0, i));
229
      break;
230
    case USE:
231
      *patternp++ = 'U';
232
      break;
233
    case PARALLEL:
234
      *patternp++ = '|';
235
      for (i = 0; i < XVECLEN (x, 0); i++)
236
        encode_pattern_1 (XVECEXP (x, 0, i));
237
      break;
238
    case EXPR_LIST:
239
      *patternp++ = 'E';
240
      encode_pattern_1 (XEXP (x, 0));
241
      if (XEXP (x, 1))
242
        encode_pattern_1 (XEXP (x, 1));
243
      break;
244
    default:
245
      *patternp++ = '?';
246
#if DEBUG0
247
      fprintf (stderr, "can't encode pattern %s\n",
248
               GET_RTX_NAME (GET_CODE (x)));
249
      debug_rtx (x);
250
      gcc_unreachable ();
251
#endif
252
      break;
253
    }
254
}
255
 
256
static void
257
encode_pattern (rtx x)
258
{
259
  patternp = pattern;
260
  encode_pattern_1 (x);
261
  *patternp = 0;
262
}
263
 
264
/* Since register names indicate the mode they're used in, we need a
265
   way to determine which name to refer to the register with.  Called
266
   by print_operand().  */
267
 
268
static const char *
269
reg_name_with_mode (int regno, enum machine_mode mode)
270
{
271
  int mlen = GET_MODE_SIZE (mode);
272
  if (regno == R0_REGNO && mlen == 1)
273
    return "r0l";
274
  if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
275
    return "r2r0";
276
  if (regno == R0_REGNO && mlen == 6)
277
    return "r2r1r0";
278
  if (regno == R0_REGNO && mlen == 8)
279
    return "r3r1r2r0";
280
  if (regno == R1_REGNO && mlen == 1)
281
    return "r1l";
282
  if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
283
    return "r3r1";
284
  if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
285
    return "a1a0";
286
  return reg_names[regno];
287
}
288
 
289
/* How many bytes a register uses on stack when it's pushed.  We need
290
   to know this because the push opcode needs to explicitly indicate
291
   the size of the register, even though the name of the register
292
   already tells it that.  Used by m32c_output_reg_{push,pop}, which
293
   is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}.  */
294
 
295
static int
296
reg_push_size (int regno)
297
{
298
  switch (regno)
299
    {
300
    case R0_REGNO:
301
    case R1_REGNO:
302
      return 2;
303
    case R2_REGNO:
304
    case R3_REGNO:
305
    case FLG_REGNO:
306
      return 2;
307
    case A0_REGNO:
308
    case A1_REGNO:
309
    case SB_REGNO:
310
    case FB_REGNO:
311
    case SP_REGNO:
312
      if (TARGET_A16)
313
        return 2;
314
      else
315
        return 3;
316
    default:
317
      gcc_unreachable ();
318
    }
319
}
320
 
321
/* Given two register classes, find the largest intersection between
322
   them.  If there is no intersection, return RETURNED_IF_EMPTY
323
   instead.  */
324
static reg_class_t
325
reduce_class (reg_class_t original_class, reg_class_t limiting_class,
326
              reg_class_t returned_if_empty)
327
{
328
  HARD_REG_SET cc;
329
  int i;
330
  reg_class_t best = NO_REGS;
331
  unsigned int best_size = 0;
332
 
333
  if (original_class == limiting_class)
334
    return original_class;
335
 
336
  cc = reg_class_contents[original_class];
337
  AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
338
 
339
  for (i = 0; i < LIM_REG_CLASSES; i++)
340
    {
341
      if (hard_reg_set_subset_p (reg_class_contents[i], cc))
342
        if (best_size < reg_class_size[i])
343
          {
344
            best = (reg_class_t) i;
345
            best_size = reg_class_size[i];
346
          }
347
 
348
    }
349
  if (best == NO_REGS)
350
    return returned_if_empty;
351
  return best;
352
}
353
 
354
/* Used by m32c_register_move_cost to determine if a move is
355
   impossibly expensive.  */
356
static bool
357
class_can_hold_mode (reg_class_t rclass, enum machine_mode mode)
358
{
359
  /* Cache the results:  0=untested  1=no  2=yes */
360
  static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
361
 
362
  if (results[(int) rclass][mode] == 0)
363
    {
364
      int r;
365
      results[rclass][mode] = 1;
366
      for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
367
        if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
368
            && HARD_REGNO_MODE_OK (r, mode))
369
          {
370
            results[rclass][mode] = 2;
371
            break;
372
          }
373
    }
374
 
375
#if DEBUG0
376
  fprintf (stderr, "class %s can hold %s? %s\n",
377
           class_names[(int) rclass], mode_name[mode],
378
           (results[rclass][mode] == 2) ? "yes" : "no");
379
#endif
380
  return results[(int) rclass][mode] == 2;
381
}
382
 
383
/* Run-time Target Specification.  */
384
 
385
/* Memregs are memory locations that gcc treats like general
386
   registers, as there are a limited number of true registers and the
387
   m32c families can use memory in most places that registers can be
388
   used.
389
 
390
   However, since memory accesses are more expensive than registers,
391
   we allow the user to limit the number of memregs available, in
392
   order to try to persuade gcc to try harder to use real registers.
393
 
394
   Memregs are provided by lib1funcs.S.
395
*/
396
 
397
int ok_to_change_target_memregs = TRUE;
398
 
399
/* Implements TARGET_OPTION_OVERRIDE.  */
400
 
401
#undef TARGET_OPTION_OVERRIDE
402
#define TARGET_OPTION_OVERRIDE m32c_option_override
403
 
404
static void
405
m32c_option_override (void)
406
{
407
  /* We limit memregs to 0..16, and provide a default.  */
408
  if (global_options_set.x_target_memregs)
409
    {
410
      if (target_memregs < 0 || target_memregs > 16)
411
        error ("invalid target memregs value '%d'", target_memregs);
412
    }
413
  else
414
    target_memregs = 16;
415
 
416
  if (TARGET_A24)
417
    flag_ivopts = 0;
418
 
419
  /* This target defaults to strict volatile bitfields.  */
420
  if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
421
    flag_strict_volatile_bitfields = 1;
422
 
423
  /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
424
     This is always worse than an absolute call.  */
425
  if (TARGET_A16)
426
    flag_no_function_cse = 1;
427
 
428
  /* This wants to put insns between compares and their jumps.  */
429
  /* FIXME: The right solution is to properly trace the flags register
430
     values, but that is too much work for stage 4.  */
431
  flag_combine_stack_adjustments = 0;
432
}
433
 
434
#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
435
#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
436
 
437
static void
438
m32c_override_options_after_change (void)
439
{
440
  if (TARGET_A16)
441
    flag_no_function_cse = 1;
442
}
443
 
444
/* Defining data structures for per-function information */
445
 
446
/* The usual; we set up our machine_function data.  */
447
static struct machine_function *
448
m32c_init_machine_status (void)
449
{
450
  return ggc_alloc_cleared_machine_function ();
451
}
452
 
453
/* Implements INIT_EXPANDERS.  We just set up to call the above
454
   function.  */
455
void
456
m32c_init_expanders (void)
457
{
458
  init_machine_status = m32c_init_machine_status;
459
}
460
 
461
/* Storage Layout */
462
 
463
/* Register Basics */
464
 
465
/* Basic Characteristics of Registers */
466
 
467
/* Whether a mode fits in a register is complex enough to warrant a
468
   table.  */
469
static struct
470
{
471
  char qi_regs;
472
  char hi_regs;
473
  char pi_regs;
474
  char si_regs;
475
  char di_regs;
476
} nregs_table[FIRST_PSEUDO_REGISTER] =
477
{
478
  { 1, 1, 2, 2, 4 },            /* r0 */
479
  { 0, 1, 0, 0, 0 },                /* r2 */
480
  { 1, 1, 2, 2, 0 },             /* r1 */
481
  { 0, 1, 0, 0, 0 },                /* r3 */
482
  { 0, 1, 1, 0, 0 },               /* a0 */
483
  { 0, 1, 1, 0, 0 },               /* a1 */
484
  { 0, 1, 1, 0, 0 },               /* sb */
485
  { 0, 1, 1, 0, 0 },               /* fb */
486
  { 0, 1, 1, 0, 0 },               /* sp */
487
  { 1, 1, 1, 0, 0 },              /* pc */
488
  { 0, 0, 0, 0, 0 },         /* fl */
489
  { 1, 1, 1, 0, 0 },              /* ap */
490
  { 1, 1, 2, 2, 4 },            /* mem0 */
491
  { 1, 1, 2, 2, 4 },            /* mem1 */
492
  { 1, 1, 2, 2, 4 },            /* mem2 */
493
  { 1, 1, 2, 2, 4 },            /* mem3 */
494
  { 1, 1, 2, 2, 4 },            /* mem4 */
495
  { 1, 1, 2, 2, 0 },             /* mem5 */
496
  { 1, 1, 2, 2, 0 },             /* mem6 */
497
  { 1, 1, 0, 0, 0 },               /* mem7 */
498
};
499
 
500
/* Implements TARGET_CONDITIONAL_REGISTER_USAGE.  We adjust the number
501
   of available memregs, and select which registers need to be preserved
502
   across calls based on the chip family.  */
503
 
504
#undef TARGET_CONDITIONAL_REGISTER_USAGE
505
#define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
506
void
507
m32c_conditional_register_usage (void)
508
{
509
  int i;
510
 
511
  if (0 <= target_memregs && target_memregs <= 16)
512
    {
513
      /* The command line option is bytes, but our "registers" are
514
         16-bit words.  */
515
      for (i = (target_memregs+1)/2; i < 8; i++)
516
        {
517
          fixed_regs[MEM0_REGNO + i] = 1;
518
          CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
519
        }
520
    }
521
 
522
  /* M32CM and M32C preserve more registers across function calls.  */
523
  if (TARGET_A24)
524
    {
525
      call_used_regs[R1_REGNO] = 0;
526
      call_used_regs[R2_REGNO] = 0;
527
      call_used_regs[R3_REGNO] = 0;
528
      call_used_regs[A0_REGNO] = 0;
529
      call_used_regs[A1_REGNO] = 0;
530
    }
531
}
532
 
533
/* How Values Fit in Registers */
534
 
535
/* Implements HARD_REGNO_NREGS.  This is complicated by the fact that
536
   different registers are different sizes from each other, *and* may
537
   be different sizes in different chip families.  */
538
static int
539
m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
540
{
541
  if (regno == FLG_REGNO && mode == CCmode)
542
    return 1;
543
  if (regno >= FIRST_PSEUDO_REGISTER)
544
    return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
545
 
546
  if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
547
    return (GET_MODE_SIZE (mode) + 1) / 2;
548
 
549
  if (GET_MODE_SIZE (mode) <= 1)
550
    return nregs_table[regno].qi_regs;
551
  if (GET_MODE_SIZE (mode) <= 2)
552
    return nregs_table[regno].hi_regs;
553
  if (regno == A0_REGNO && mode == SImode && TARGET_A16)
554
    return 2;
555
  if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
556
    return nregs_table[regno].pi_regs;
557
  if (GET_MODE_SIZE (mode) <= 4)
558
    return nregs_table[regno].si_regs;
559
  if (GET_MODE_SIZE (mode) <= 8)
560
    return nregs_table[regno].di_regs;
561
  return 0;
562
}
563
 
564
int
565
m32c_hard_regno_nregs (int regno, enum machine_mode mode)
566
{
567
  int rv = m32c_hard_regno_nregs_1 (regno, mode);
568
  return rv ? rv : 1;
569
}
570
 
571
/* Implements HARD_REGNO_MODE_OK.  The above function does the work
572
   already; just test its return value.  */
573
int
574
m32c_hard_regno_ok (int regno, enum machine_mode mode)
575
{
576
  return m32c_hard_regno_nregs_1 (regno, mode) != 0;
577
}
578
 
579
/* Implements MODES_TIEABLE_P.  In general, modes aren't tieable since
580
   registers are all different sizes.  However, since most modes are
581
   bigger than our registers anyway, it's easier to implement this
582
   function that way, leaving QImode as the only unique case.  */
583
int
584
m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
585
{
586
  if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
587
    return 1;
588
 
589
#if 0
590
  if (m1 == QImode || m2 == QImode)
591
    return 0;
592
#endif
593
 
594
  return 1;
595
}
596
 
597
/* Register Classes */
598
 
599
/* Implements REGNO_REG_CLASS.  */
600
enum reg_class
601
m32c_regno_reg_class (int regno)
602
{
603
  switch (regno)
604
    {
605
    case R0_REGNO:
606
      return R0_REGS;
607
    case R1_REGNO:
608
      return R1_REGS;
609
    case R2_REGNO:
610
      return R2_REGS;
611
    case R3_REGNO:
612
      return R3_REGS;
613
    case A0_REGNO:
614
      return A0_REGS;
615
    case A1_REGNO:
616
      return A1_REGS;
617
    case SB_REGNO:
618
      return SB_REGS;
619
    case FB_REGNO:
620
      return FB_REGS;
621
    case SP_REGNO:
622
      return SP_REGS;
623
    case FLG_REGNO:
624
      return FLG_REGS;
625
    default:
626
      if (IS_MEM_REGNO (regno))
627
        return MEM_REGS;
628
      return ALL_REGS;
629
    }
630
}
631
 
632
/* Implements REG_CLASS_FROM_CONSTRAINT.  Note that some constraints only match
633
   for certain chip families.  */
634
int
635
m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
636
{
637
  if (memcmp (s, "Rsp", 3) == 0)
638
    return SP_REGS;
639
  if (memcmp (s, "Rfb", 3) == 0)
640
    return FB_REGS;
641
  if (memcmp (s, "Rsb", 3) == 0)
642
    return SB_REGS;
643
  if (memcmp (s, "Rcr", 3) == 0)
644
    return TARGET_A16 ? CR_REGS : NO_REGS;
645
  if (memcmp (s, "Rcl", 3) == 0)
646
    return TARGET_A24 ? CR_REGS : NO_REGS;
647
  if (memcmp (s, "R0w", 3) == 0)
648
    return R0_REGS;
649
  if (memcmp (s, "R1w", 3) == 0)
650
    return R1_REGS;
651
  if (memcmp (s, "R2w", 3) == 0)
652
    return R2_REGS;
653
  if (memcmp (s, "R3w", 3) == 0)
654
    return R3_REGS;
655
  if (memcmp (s, "R02", 3) == 0)
656
    return R02_REGS;
657
  if (memcmp (s, "R13", 3) == 0)
658
    return R13_REGS;
659
  if (memcmp (s, "R03", 3) == 0)
660
    return R03_REGS;
661
  if (memcmp (s, "Rdi", 3) == 0)
662
    return DI_REGS;
663
  if (memcmp (s, "Rhl", 3) == 0)
664
    return HL_REGS;
665
  if (memcmp (s, "R23", 3) == 0)
666
    return R23_REGS;
667
  if (memcmp (s, "Ra0", 3) == 0)
668
    return A0_REGS;
669
  if (memcmp (s, "Ra1", 3) == 0)
670
    return A1_REGS;
671
  if (memcmp (s, "Raa", 3) == 0)
672
    return A_REGS;
673
  if (memcmp (s, "Raw", 3) == 0)
674
    return TARGET_A16 ? A_REGS : NO_REGS;
675
  if (memcmp (s, "Ral", 3) == 0)
676
    return TARGET_A24 ? A_REGS : NO_REGS;
677
  if (memcmp (s, "Rqi", 3) == 0)
678
    return QI_REGS;
679
  if (memcmp (s, "Rad", 3) == 0)
680
    return AD_REGS;
681
  if (memcmp (s, "Rsi", 3) == 0)
682
    return SI_REGS;
683
  if (memcmp (s, "Rhi", 3) == 0)
684
    return HI_REGS;
685
  if (memcmp (s, "Rhc", 3) == 0)
686
    return HC_REGS;
687
  if (memcmp (s, "Rra", 3) == 0)
688
    return RA_REGS;
689
  if (memcmp (s, "Rfl", 3) == 0)
690
    return FLG_REGS;
691
  if (memcmp (s, "Rmm", 3) == 0)
692
    {
693
      if (fixed_regs[MEM0_REGNO])
694
        return NO_REGS;
695
      return MEM_REGS;
696
    }
697
 
698
  /* PSImode registers - i.e. whatever can hold a pointer.  */
699
  if (memcmp (s, "Rpi", 3) == 0)
700
    {
701
      if (TARGET_A16)
702
        return HI_REGS;
703
      else
704
        return RA_REGS; /* r2r0 and r3r1 can hold pointers.  */
705
    }
706
 
707
  /* We handle this one as an EXTRA_CONSTRAINT.  */
708
  if (memcmp (s, "Rpa", 3) == 0)
709
    return NO_REGS;
710
 
711
  if (*s == 'R')
712
    {
713
      fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
714
      gcc_unreachable();
715
    }
716
 
717
  return NO_REGS;
718
}
719
 
720
/* Implements REGNO_OK_FOR_BASE_P.  */
721
int
722
m32c_regno_ok_for_base_p (int regno)
723
{
724
  if (regno == A0_REGNO
725
      || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
726
    return 1;
727
  return 0;
728
}
729
 
730
#define DEBUG_RELOAD 0
731
 
732
/* Implements TARGET_PREFERRED_RELOAD_CLASS.  In general, prefer general
733
   registers of the appropriate size.  */
734
 
735
#undef TARGET_PREFERRED_RELOAD_CLASS
736
#define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
737
 
738
static reg_class_t
739
m32c_preferred_reload_class (rtx x, reg_class_t rclass)
740
{
741
  reg_class_t newclass = rclass;
742
 
743
#if DEBUG_RELOAD
744
  fprintf (stderr, "\npreferred_reload_class for %s is ",
745
           class_names[rclass]);
746
#endif
747
  if (rclass == NO_REGS)
748
    rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
749
 
750
  if (reg_classes_intersect_p (rclass, CR_REGS))
751
    {
752
      switch (GET_MODE (x))
753
        {
754
        case QImode:
755
          newclass = HL_REGS;
756
          break;
757
        default:
758
          /*      newclass = HI_REGS; */
759
          break;
760
        }
761
    }
762
 
763
  else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
764
    newclass = SI_REGS;
765
  else if (GET_MODE_SIZE (GET_MODE (x)) > 4
766
           && ! reg_class_subset_p (R03_REGS, rclass))
767
    newclass = DI_REGS;
768
 
769
  rclass = reduce_class (rclass, newclass, rclass);
770
 
771
  if (GET_MODE (x) == QImode)
772
    rclass = reduce_class (rclass, HL_REGS, rclass);
773
 
774
#if DEBUG_RELOAD
775
  fprintf (stderr, "%s\n", class_names[rclass]);
776
  debug_rtx (x);
777
 
778
  if (GET_CODE (x) == MEM
779
      && GET_CODE (XEXP (x, 0)) == PLUS
780
      && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
781
    fprintf (stderr, "Glorm!\n");
782
#endif
783
  return rclass;
784
}
785
 
786
/* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS.  */
787
 
788
#undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
789
#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
790
 
791
static reg_class_t
792
m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
793
{
794
  return m32c_preferred_reload_class (x, rclass);
795
}
796
 
797
/* Implements LIMIT_RELOAD_CLASS.  We basically want to avoid using
798
   address registers for reloads since they're needed for address
799
   reloads.  */
800
int
801
m32c_limit_reload_class (enum machine_mode mode, int rclass)
802
{
803
#if DEBUG_RELOAD
804
  fprintf (stderr, "limit_reload_class for %s: %s ->",
805
           mode_name[mode], class_names[rclass]);
806
#endif
807
 
808
  if (mode == QImode)
809
    rclass = reduce_class (rclass, HL_REGS, rclass);
810
  else if (mode == HImode)
811
    rclass = reduce_class (rclass, HI_REGS, rclass);
812
  else if (mode == SImode)
813
    rclass = reduce_class (rclass, SI_REGS, rclass);
814
 
815
  if (rclass != A_REGS)
816
    rclass = reduce_class (rclass, DI_REGS, rclass);
817
 
818
#if DEBUG_RELOAD
819
  fprintf (stderr, " %s\n", class_names[rclass]);
820
#endif
821
  return rclass;
822
}
823
 
824
/* Implements SECONDARY_RELOAD_CLASS.  QImode have to be reloaded in
825
   r0 or r1, as those are the only real QImode registers.  CR regs get
826
   reloaded through appropriately sized general or address
827
   registers.  */
828
int
829
m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
830
{
831
  int cc = class_contents[rclass][0];
832
#if DEBUG0
833
  fprintf (stderr, "\nsecondary reload class %s %s\n",
834
           class_names[rclass], mode_name[mode]);
835
  debug_rtx (x);
836
#endif
837
  if (mode == QImode
838
      && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
839
    return QI_REGS;
840
  if (reg_classes_intersect_p (rclass, CR_REGS)
841
      && GET_CODE (x) == REG
842
      && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
843
    return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
844
  return NO_REGS;
845
}
846
 
847
/* Implements TARGET_CLASS_LIKELY_SPILLED_P.  A_REGS is needed for address
848
   reloads.  */
849
 
850
#undef TARGET_CLASS_LIKELY_SPILLED_P
851
#define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
852
 
853
static bool
854
m32c_class_likely_spilled_p (reg_class_t regclass)
855
{
856
  if (regclass == A_REGS)
857
    return true;
858
 
859
  return (reg_class_size[(int) regclass] == 1);
860
}
861
 
862
/* Implements TARGET_CLASS_MAX_NREGS.  We calculate this according to its
863
   documented meaning, to avoid potential inconsistencies with actual
864
   class definitions.  */
865
 
866
#undef TARGET_CLASS_MAX_NREGS
867
#define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
868
 
869
static unsigned char
870
m32c_class_max_nregs (reg_class_t regclass, enum machine_mode mode)
871
{
872
  int rn;
873
  unsigned char max = 0;
874
 
875
  for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
876
    if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
877
      {
878
        unsigned char n = m32c_hard_regno_nregs (rn, mode);
879
        if (max < n)
880
          max = n;
881
      }
882
  return max;
883
}
884
 
885
/* Implements CANNOT_CHANGE_MODE_CLASS.  Only r0 and r1 can change to
886
   QI (r0l, r1l) because the chip doesn't support QI ops on other
887
   registers (well, it does on a0/a1 but if we let gcc do that, reload
888
   suffers).  Otherwise, we allow changes to larger modes.  */
889
int
890
m32c_cannot_change_mode_class (enum machine_mode from,
891
                               enum machine_mode to, int rclass)
892
{
893
  int rn;
894
#if DEBUG0
895
  fprintf (stderr, "cannot change from %s to %s in %s\n",
896
           mode_name[from], mode_name[to], class_names[rclass]);
897
#endif
898
 
899
  /* If the larger mode isn't allowed in any of these registers, we
900
     can't allow the change.  */
901
  for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
902
    if (class_contents[rclass][0] & (1 << rn))
903
      if (! m32c_hard_regno_ok (rn, to))
904
        return 1;
905
 
906
  if (to == QImode)
907
    return (class_contents[rclass][0] & 0x1ffa);
908
 
909
  if (class_contents[rclass][0] & 0x0005 /* r0, r1 */
910
      && GET_MODE_SIZE (from) > 1)
911
    return 0;
912
  if (GET_MODE_SIZE (from) > 2) /* all other regs */
913
    return 0;
914
 
915
  return 1;
916
}
917
 
918
/* Helpers for the rest of the file.  */
919
/* TRUE if the rtx is a REG rtx for the given register.  */
920
#define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
921
                           && REGNO (rtx) == regno)
922
/* TRUE if the rtx is a pseudo - specifically, one we can use as a
923
   base register in address calculations (hence the "strict"
924
   argument).  */
925
#define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
926
                               && (REGNO (rtx) == AP_REGNO \
927
                                   || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
928
 
929
/* Implements CONST_OK_FOR_CONSTRAINT_P.  Currently, all constant
930
   constraints start with 'I', with the next two characters indicating
931
   the type and size of the range allowed.  */
932
int
933
m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
934
                                char c ATTRIBUTE_UNUSED, const char *str)
935
{
936
  /* s=signed u=unsigned n=nonzero m=minus l=log2able,
937
     [sun] bits [SUN] bytes, p=pointer size
938
     I[-0-9][0-9] matches that number */
939
  if (memcmp (str, "Is3", 3) == 0)
940
    {
941
      return (-8 <= value && value <= 7);
942
    }
943
  if (memcmp (str, "IS1", 3) == 0)
944
    {
945
      return (-128 <= value && value <= 127);
946
    }
947
  if (memcmp (str, "IS2", 3) == 0)
948
    {
949
      return (-32768 <= value && value <= 32767);
950
    }
951
  if (memcmp (str, "IU2", 3) == 0)
952
    {
953
      return (0 <= value && value <= 65535);
954
    }
955
  if (memcmp (str, "IU3", 3) == 0)
956
    {
957
      return (0 <= value && value <= 0x00ffffff);
958
    }
959
  if (memcmp (str, "In4", 3) == 0)
960
    {
961
      return (-8 <= value && value && value <= 8);
962
    }
963
  if (memcmp (str, "In5", 3) == 0)
964
    {
965
      return (-16 <= value && value && value <= 16);
966
    }
967
  if (memcmp (str, "In6", 3) == 0)
968
    {
969
      return (-32 <= value && value && value <= 32);
970
    }
971
  if (memcmp (str, "IM2", 3) == 0)
972
    {
973
      return (-65536 <= value && value && value <= -1);
974
    }
975
  if (memcmp (str, "Ilb", 3) == 0)
976
    {
977
      int b = exact_log2 (value);
978
      return (b >= 0 && b <= 7);
979
    }
980
  if (memcmp (str, "Imb", 3) == 0)
981
    {
982
      int b = exact_log2 ((value ^ 0xff) & 0xff);
983
      return (b >= 0 && b <= 7);
984
    }
985
  if (memcmp (str, "ImB", 3) == 0)
986
    {
987
      int b = exact_log2 ((value ^ 0xffff) & 0xffff);
988
      return (b >= 0 && b <= 7);
989
    }
990
  if (memcmp (str, "Ilw", 3) == 0)
991
    {
992
      int b = exact_log2 (value);
993
      return (b >= 0 && b <= 15);
994
    }
995
  if (memcmp (str, "Imw", 3) == 0)
996
    {
997
      int b = exact_log2 ((value ^ 0xffff) & 0xffff);
998
      return (b >= 0 && b <= 15);
999
    }
1000
  if (memcmp (str, "I00", 3) == 0)
1001
    {
1002
      return (value == 0);
1003
    }
1004
  return 0;
1005
}
1006
 
1007
#define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
1008
 
1009
/* Implements EXTRA_CONSTRAINT_STR (see next function too).  'S' is
1010
   for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
1011
   call return values.  */
1012
int
1013
m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
1014
{
1015
  encode_pattern (value);
1016
 
1017
  if (far_addr_space_p (value))
1018
    {
1019
      if (memcmp (str, "SF", 2) == 0)
1020
        {
1021
          return (   (RTX_IS ("mr")
1022
                      && A0_OR_PSEUDO (patternr[1])
1023
                      && GET_MODE (patternr[1]) == SImode)
1024
                     || (RTX_IS ("m+^Sri")
1025
                         && A0_OR_PSEUDO (patternr[4])
1026
                         && GET_MODE (patternr[4]) == HImode)
1027
                     || (RTX_IS ("m+^Srs")
1028
                         && A0_OR_PSEUDO (patternr[4])
1029
                         && GET_MODE (patternr[4]) == HImode)
1030
                     || (RTX_IS ("m+^S+ris")
1031
                         && A0_OR_PSEUDO (patternr[5])
1032
                         && GET_MODE (patternr[5]) == HImode)
1033
                     || RTX_IS ("ms")
1034
                     );
1035
        }
1036
      return 0;
1037
    }
1038
 
1039
  if (memcmp (str, "Sd", 2) == 0)
1040
    {
1041
      /* This is the common "src/dest" address */
1042
      rtx r;
1043
      if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
1044
        return 1;
1045
      if (RTX_IS ("ms") || RTX_IS ("m+si"))
1046
        return 1;
1047
      if (RTX_IS ("m++rii"))
1048
        {
1049
          if (REGNO (patternr[3]) == FB_REGNO
1050
              && INTVAL (patternr[4]) == 0)
1051
            return 1;
1052
        }
1053
      if (RTX_IS ("mr"))
1054
        r = patternr[1];
1055
      else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
1056
        r = patternr[2];
1057
      else
1058
        return 0;
1059
      if (REGNO (r) == SP_REGNO)
1060
        return 0;
1061
      return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
1062
    }
1063
  else if (memcmp (str, "Sa", 2) == 0)
1064
    {
1065
      rtx r;
1066
      if (RTX_IS ("mr"))
1067
        r = patternr[1];
1068
      else if (RTX_IS ("m+ri"))
1069
        r = patternr[2];
1070
      else
1071
        return 0;
1072
      return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
1073
    }
1074
  else if (memcmp (str, "Si", 2) == 0)
1075
    {
1076
      return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1077
    }
1078
  else if (memcmp (str, "Ss", 2) == 0)
1079
    {
1080
      return ((RTX_IS ("mr")
1081
               && (IS_REG (patternr[1], SP_REGNO)))
1082
              || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1083
    }
1084
  else if (memcmp (str, "Sf", 2) == 0)
1085
    {
1086
      return ((RTX_IS ("mr")
1087
               && (IS_REG (patternr[1], FB_REGNO)))
1088
              || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1089
    }
1090
  else if (memcmp (str, "Sb", 2) == 0)
1091
    {
1092
      return ((RTX_IS ("mr")
1093
               && (IS_REG (patternr[1], SB_REGNO)))
1094
              || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1095
    }
1096
  else if (memcmp (str, "Sp", 2) == 0)
1097
    {
1098
      /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1099
      return (RTX_IS ("mi")
1100
              && !(INTVAL (patternr[1]) & ~0x1fff));
1101
    }
1102
  else if (memcmp (str, "S1", 2) == 0)
1103
    {
1104
      return r1h_operand (value, QImode);
1105
    }
1106
  else if (memcmp (str, "SF", 2) == 0)
1107
    {
1108
      return 0;
1109
    }
1110
 
1111
  gcc_assert (str[0] != 'S');
1112
 
1113
  if (memcmp (str, "Rpa", 2) == 0)
1114
    return GET_CODE (value) == PARALLEL;
1115
 
1116
  return 0;
1117
}
1118
 
1119
/* This is for when we're debugging the above.  */
1120
int
1121
m32c_extra_constraint_p (rtx value, char c, const char *str)
1122
{
1123
  int rv = m32c_extra_constraint_p2 (value, c, str);
1124
#if DEBUG0
1125
  fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1126
           rv);
1127
  debug_rtx (value);
1128
#endif
1129
  return rv;
1130
}
1131
 
1132
/* Implements EXTRA_MEMORY_CONSTRAINT.  Currently, we only use strings
1133
   starting with 'S'.  */
1134
int
1135
m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1136
{
1137
  return c == 'S';
1138
}
1139
 
1140
/* Implements EXTRA_ADDRESS_CONSTRAINT.  We reserve 'A' strings for these,
1141
   but don't currently define any.  */
1142
int
1143
m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1144
{
1145
  return c == 'A';
1146
}
1147
 
1148
/* STACK AND CALLING */
1149
 
1150
/* Frame Layout */
1151
 
1152
/* Implements RETURN_ADDR_RTX.  Note that R8C and M16C push 24 bits
1153
   (yes, THREE bytes) onto the stack for the return address, but we
1154
   don't support pointers bigger than 16 bits on those chips.  This
1155
   will likely wreak havoc with exception unwinding.  FIXME.  */
1156
rtx
1157
m32c_return_addr_rtx (int count)
1158
{
1159
  enum machine_mode mode;
1160
  int offset;
1161
  rtx ra_mem;
1162
 
1163
  if (count)
1164
    return NULL_RTX;
1165
  /* we want 2[$fb] */
1166
 
1167
  if (TARGET_A24)
1168
    {
1169
      /* It's four bytes */
1170
      mode = PSImode;
1171
      offset = 4;
1172
    }
1173
  else
1174
    {
1175
      /* FIXME: it's really 3 bytes */
1176
      mode = HImode;
1177
      offset = 2;
1178
    }
1179
 
1180
  ra_mem =
1181
    gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1182
  return copy_to_mode_reg (mode, ra_mem);
1183
}
1184
 
1185
/* Implements INCOMING_RETURN_ADDR_RTX.  See comment above.  */
1186
rtx
1187
m32c_incoming_return_addr_rtx (void)
1188
{
1189
  /* we want [sp] */
1190
  return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1191
}
1192
 
1193
/* Exception Handling Support */
1194
 
1195
/* Implements EH_RETURN_DATA_REGNO.  Choose registers able to hold
1196
   pointers.  */
1197
int
1198
m32c_eh_return_data_regno (int n)
1199
{
1200
  switch (n)
1201
    {
1202
    case 0:
1203
      return A0_REGNO;
1204
    case 1:
1205
      if (TARGET_A16)
1206
        return R3_REGNO;
1207
      else
1208
        return R1_REGNO;
1209
    default:
1210
      return INVALID_REGNUM;
1211
    }
1212
}
1213
 
1214
/* Implements EH_RETURN_STACKADJ_RTX.  Saved and used later in
1215
   m32c_emit_eh_epilogue.  */
1216
rtx
1217
m32c_eh_return_stackadj_rtx (void)
1218
{
1219
  if (!cfun->machine->eh_stack_adjust)
1220
    {
1221
      rtx sa;
1222
 
1223
      sa = gen_rtx_REG (Pmode, R0_REGNO);
1224
      cfun->machine->eh_stack_adjust = sa;
1225
    }
1226
  return cfun->machine->eh_stack_adjust;
1227
}
1228
 
1229
/* Registers That Address the Stack Frame */
1230
 
1231
/* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER.  Note that
1232
   the original spec called for dwarf numbers to vary with register
1233
   width as well, for example, r0l, r0, and r2r0 would each have
1234
   different dwarf numbers.  GCC doesn't support this, and we don't do
1235
   it, and gdb seems to like it this way anyway.  */
1236
unsigned int
1237
m32c_dwarf_frame_regnum (int n)
1238
{
1239
  switch (n)
1240
    {
1241
    case R0_REGNO:
1242
      return 5;
1243
    case R1_REGNO:
1244
      return 6;
1245
    case R2_REGNO:
1246
      return 7;
1247
    case R3_REGNO:
1248
      return 8;
1249
    case A0_REGNO:
1250
      return 9;
1251
    case A1_REGNO:
1252
      return 10;
1253
    case FB_REGNO:
1254
      return 11;
1255
    case SB_REGNO:
1256
      return 19;
1257
 
1258
    case SP_REGNO:
1259
      return 12;
1260
    case PC_REGNO:
1261
      return 13;
1262
    default:
1263
      return DWARF_FRAME_REGISTERS + 1;
1264
    }
1265
}
1266
 
1267
/* The frame looks like this:
1268
 
1269
   ap -> +------------------------------
1270
         | Return address (3 or 4 bytes)
1271
         | Saved FB (2 or 4 bytes)
1272
   fb -> +------------------------------
1273
         | local vars
1274
         | register saves fb
1275
         |        through r0 as needed
1276
   sp -> +------------------------------
1277
*/
1278
 
1279
/* We use this to wrap all emitted insns in the prologue.  */
1280
static rtx
1281
F (rtx x)
1282
{
1283
  RTX_FRAME_RELATED_P (x) = 1;
1284
  return x;
1285
}
1286
 
1287
/* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1288
   how much the stack pointer moves for each, for each cpu family.  */
1289
static struct
1290
{
1291
  int reg1;
1292
  int bit;
1293
  int a16_bytes;
1294
  int a24_bytes;
1295
} pushm_info[] =
1296
{
1297
  /* These are in reverse push (nearest-to-sp) order.  */
1298
  { R0_REGNO, 0x80, 2, 2 },
1299
  { R1_REGNO, 0x40, 2, 2 },
1300
  { R2_REGNO, 0x20, 2, 2 },
1301
  { R3_REGNO, 0x10, 2, 2 },
1302
  { A0_REGNO, 0x08, 2, 4 },
1303
  { A1_REGNO, 0x04, 2, 4 },
1304
  { SB_REGNO, 0x02, 2, 4 },
1305
  { FB_REGNO, 0x01, 2, 4 }
1306
};
1307
 
1308
#define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1309
 
1310
/* Returns TRUE if we need to save/restore the given register.  We
1311
   save everything for exception handlers, so that any register can be
1312
   unwound.  For interrupt handlers, we save everything if the handler
1313
   calls something else (because we don't know what *that* function
1314
   might do), but try to be a bit smarter if the handler is a leaf
1315
   function.  We always save $a0, though, because we use that in the
1316
   epilogue to copy $fb to $sp.  */
1317
static int
1318
need_to_save (int regno)
1319
{
1320
  if (fixed_regs[regno])
1321
    return 0;
1322
  if (crtl->calls_eh_return)
1323
    return 1;
1324
  if (regno == FP_REGNO)
1325
    return 0;
1326
  if (cfun->machine->is_interrupt
1327
      && (!cfun->machine->is_leaf
1328
          || (regno == A0_REGNO
1329
              && m32c_function_needs_enter ())
1330
          ))
1331
    return 1;
1332
  if (df_regs_ever_live_p (regno)
1333
      && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1334
    return 1;
1335
  return 0;
1336
}
1337
 
1338
/* This function contains all the intelligence about saving and
1339
   restoring registers.  It always figures out the register save set.
1340
   When called with PP_justcount, it merely returns the size of the
1341
   save set (for eliminating the frame pointer, for example).  When
1342
   called with PP_pushm or PP_popm, it emits the appropriate
1343
   instructions for saving (pushm) or restoring (popm) the
1344
   registers.  */
1345
static int
1346
m32c_pushm_popm (Push_Pop_Type ppt)
1347
{
1348
  int reg_mask = 0;
1349
  int byte_count = 0, bytes;
1350
  int i;
1351
  rtx dwarf_set[PUSHM_N];
1352
  int n_dwarfs = 0;
1353
  int nosave_mask = 0;
1354
 
1355
  if (crtl->return_rtx
1356
      && GET_CODE (crtl->return_rtx) == PARALLEL
1357
      && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1358
    {
1359
      rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1360
      rtx rv = XEXP (exp, 0);
1361
      int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1362
 
1363
      if (rv_bytes > 2)
1364
        nosave_mask |= 0x20;    /* PSI, SI */
1365
      else
1366
        nosave_mask |= 0xf0;    /* DF */
1367
      if (rv_bytes > 4)
1368
        nosave_mask |= 0x50;    /* DI */
1369
    }
1370
 
1371
  for (i = 0; i < (int) PUSHM_N; i++)
1372
    {
1373
      /* Skip if neither register needs saving.  */
1374
      if (!need_to_save (pushm_info[i].reg1))
1375
        continue;
1376
 
1377
      if (pushm_info[i].bit & nosave_mask)
1378
        continue;
1379
 
1380
      reg_mask |= pushm_info[i].bit;
1381
      bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1382
 
1383
      if (ppt == PP_pushm)
1384
        {
1385
          enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1386
          rtx addr;
1387
 
1388
          /* Always use stack_pointer_rtx instead of calling
1389
             rtx_gen_REG ourselves.  Code elsewhere in GCC assumes
1390
             that there is a single rtx representing the stack pointer,
1391
             namely stack_pointer_rtx, and uses == to recognize it.  */
1392
          addr = stack_pointer_rtx;
1393
 
1394
          if (byte_count != 0)
1395
            addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1396
 
1397
          dwarf_set[n_dwarfs++] =
1398
            gen_rtx_SET (VOIDmode,
1399
                         gen_rtx_MEM (mode, addr),
1400
                         gen_rtx_REG (mode, pushm_info[i].reg1));
1401
          F (dwarf_set[n_dwarfs - 1]);
1402
 
1403
        }
1404
      byte_count += bytes;
1405
    }
1406
 
1407
  if (cfun->machine->is_interrupt)
1408
    {
1409
      cfun->machine->intr_pushm = reg_mask & 0xfe;
1410
      reg_mask = 0;
1411
      byte_count = 0;
1412
    }
1413
 
1414
  if (cfun->machine->is_interrupt)
1415
    for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1416
      if (need_to_save (i))
1417
        {
1418
          byte_count += 2;
1419
          cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1420
        }
1421
 
1422
  if (ppt == PP_pushm && byte_count)
1423
    {
1424
      rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1425
      rtx pushm;
1426
 
1427
      if (reg_mask)
1428
        {
1429
          XVECEXP (note, 0, 0)
1430
            = gen_rtx_SET (VOIDmode,
1431
                           stack_pointer_rtx,
1432
                           gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1433
                                         stack_pointer_rtx,
1434
                                         GEN_INT (-byte_count)));
1435
          F (XVECEXP (note, 0, 0));
1436
 
1437
          for (i = 0; i < n_dwarfs; i++)
1438
            XVECEXP (note, 0, i + 1) = dwarf_set[i];
1439
 
1440
          pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1441
 
1442
          add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1443
        }
1444
 
1445
      if (cfun->machine->is_interrupt)
1446
        for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1447
          if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1448
            {
1449
              if (TARGET_A16)
1450
                pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1451
              else
1452
                pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1453
              F (pushm);
1454
            }
1455
    }
1456
  if (ppt == PP_popm && byte_count)
1457
    {
1458
      if (cfun->machine->is_interrupt)
1459
        for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1460
          if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1461
            {
1462
              if (TARGET_A16)
1463
                emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1464
              else
1465
                emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1466
            }
1467
      if (reg_mask)
1468
        emit_insn (gen_popm (GEN_INT (reg_mask)));
1469
    }
1470
 
1471
  return byte_count;
1472
}
1473
 
1474
/* Implements INITIAL_ELIMINATION_OFFSET.  See the comment above that
1475
   diagrams our call frame.  */
1476
int
1477
m32c_initial_elimination_offset (int from, int to)
1478
{
1479
  int ofs = 0;
1480
 
1481
  if (from == AP_REGNO)
1482
    {
1483
      if (TARGET_A16)
1484
        ofs += 5;
1485
      else
1486
        ofs += 8;
1487
    }
1488
 
1489
  if (to == SP_REGNO)
1490
    {
1491
      ofs += m32c_pushm_popm (PP_justcount);
1492
      ofs += get_frame_size ();
1493
    }
1494
 
1495
  /* Account for push rounding.  */
1496
  if (TARGET_A24)
1497
    ofs = (ofs + 1) & ~1;
1498
#if DEBUG0
1499
  fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1500
           to, ofs);
1501
#endif
1502
  return ofs;
1503
}
1504
 
1505
/* Passing Function Arguments on the Stack */
1506
 
1507
/* Implements PUSH_ROUNDING.  The R8C and M16C have byte stacks, the
1508
   M32C has word stacks.  */
1509
unsigned int
1510
m32c_push_rounding (int n)
1511
{
1512
  if (TARGET_R8C || TARGET_M16C)
1513
    return n;
1514
  return (n + 1) & ~1;
1515
}
1516
 
1517
/* Passing Arguments in Registers */
1518
 
1519
/* Implements TARGET_FUNCTION_ARG.  Arguments are passed partly in
1520
   registers, partly on stack.  If our function returns a struct, a
1521
   pointer to a buffer for it is at the top of the stack (last thing
1522
   pushed).  The first few real arguments may be in registers as
1523
   follows:
1524
 
1525
   R8C/M16C:    arg1 in r1 if it's QI or HI (else it's pushed on stack)
1526
                arg2 in r2 if it's HI (else pushed on stack)
1527
                rest on stack
1528
   M32C:        arg1 in r0 if it's QI or HI (else it's pushed on stack)
1529
                rest on stack
1530
 
1531
   Structs are not passed in registers, even if they fit.  Only
1532
   integer and pointer types are passed in registers.
1533
 
1534
   Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1535
   r2 if it fits.  */
1536
#undef TARGET_FUNCTION_ARG
1537
#define TARGET_FUNCTION_ARG m32c_function_arg
1538
static rtx
1539
m32c_function_arg (cumulative_args_t ca_v,
1540
                   enum machine_mode mode, const_tree type, bool named)
1541
{
1542
  CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1543
 
1544
  /* Can return a reg, parallel, or 0 for stack */
1545
  rtx rv = NULL_RTX;
1546
#if DEBUG0
1547
  fprintf (stderr, "func_arg %d (%s, %d)\n",
1548
           ca->parm_num, mode_name[mode], named);
1549
  debug_tree (type);
1550
#endif
1551
 
1552
  if (mode == VOIDmode)
1553
    return GEN_INT (0);
1554
 
1555
  if (ca->force_mem || !named)
1556
    {
1557
#if DEBUG0
1558
      fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1559
               named);
1560
#endif
1561
      return NULL_RTX;
1562
    }
1563
 
1564
  if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1565
    return NULL_RTX;
1566
 
1567
  if (type && AGGREGATE_TYPE_P (type))
1568
    return NULL_RTX;
1569
 
1570
  switch (ca->parm_num)
1571
    {
1572
    case 1:
1573
      if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1574
        rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1575
      break;
1576
 
1577
    case 2:
1578
      if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1579
        rv = gen_rtx_REG (mode, R2_REGNO);
1580
      break;
1581
    }
1582
 
1583
#if DEBUG0
1584
  debug_rtx (rv);
1585
#endif
1586
  return rv;
1587
}
1588
 
1589
#undef TARGET_PASS_BY_REFERENCE
1590
#define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1591
static bool
1592
m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
1593
                        enum machine_mode mode ATTRIBUTE_UNUSED,
1594
                        const_tree type ATTRIBUTE_UNUSED,
1595
                        bool named ATTRIBUTE_UNUSED)
1596
{
1597
  return 0;
1598
}
1599
 
1600
/* Implements INIT_CUMULATIVE_ARGS.  */
1601
void
1602
m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1603
                           tree fntype,
1604
                           rtx libname ATTRIBUTE_UNUSED,
1605
                           tree fndecl,
1606
                           int n_named_args ATTRIBUTE_UNUSED)
1607
{
1608
  if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1609
    ca->force_mem = 1;
1610
  else
1611
    ca->force_mem = 0;
1612
  ca->parm_num = 1;
1613
}
1614
 
1615
/* Implements TARGET_FUNCTION_ARG_ADVANCE.  force_mem is set for
1616
   functions returning structures, so we always reset that.  Otherwise,
1617
   we only need to know the sequence number of the argument to know what
1618
   to do with it.  */
1619
#undef TARGET_FUNCTION_ARG_ADVANCE
1620
#define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1621
static void
1622
m32c_function_arg_advance (cumulative_args_t ca_v,
1623
                           enum machine_mode mode ATTRIBUTE_UNUSED,
1624
                           const_tree type ATTRIBUTE_UNUSED,
1625
                           bool named ATTRIBUTE_UNUSED)
1626
{
1627
  CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1628
 
1629
  if (ca->force_mem)
1630
    ca->force_mem = 0;
1631
  else
1632
    ca->parm_num++;
1633
}
1634
 
1635
/* Implements TARGET_FUNCTION_ARG_BOUNDARY.  */
1636
#undef TARGET_FUNCTION_ARG_BOUNDARY
1637
#define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1638
static unsigned int
1639
m32c_function_arg_boundary (enum machine_mode mode ATTRIBUTE_UNUSED,
1640
                            const_tree type ATTRIBUTE_UNUSED)
1641
{
1642
  return (TARGET_A16 ? 8 : 16);
1643
}
1644
 
1645
/* Implements FUNCTION_ARG_REGNO_P.  */
1646
int
1647
m32c_function_arg_regno_p (int r)
1648
{
1649
  if (TARGET_A24)
1650
    return (r == R0_REGNO);
1651
  return (r == R1_REGNO || r == R2_REGNO);
1652
}
1653
 
1654
/* HImode and PSImode are the two "native" modes as far as GCC is
1655
   concerned, but the chips also support a 32-bit mode which is used
1656
   for some opcodes in R8C/M16C and for reset vectors and such.  */
1657
#undef TARGET_VALID_POINTER_MODE
1658
#define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1659
static bool
1660
m32c_valid_pointer_mode (enum machine_mode mode)
1661
{
1662
  if (mode == HImode
1663
      || mode == PSImode
1664
      || mode == SImode
1665
      )
1666
    return 1;
1667
  return 0;
1668
}
1669
 
1670
/* How Scalar Function Values Are Returned */
1671
 
1672
/* Implements TARGET_LIBCALL_VALUE.  Most values are returned in $r0, or some
1673
   combination of registers starting there (r2r0 for longs, r3r1r2r0
1674
   for long long, r3r2r1r0 for doubles), except that that ABI
1675
   currently doesn't work because it ends up using all available
1676
   general registers and gcc often can't compile it.  So, instead, we
1677
   return anything bigger than 16 bits in "mem0" (effectively, a
1678
   memory location).  */
1679
 
1680
#undef TARGET_LIBCALL_VALUE
1681
#define TARGET_LIBCALL_VALUE m32c_libcall_value
1682
 
1683
static rtx
1684
m32c_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1685
{
1686
  /* return reg or parallel */
1687
#if 0
1688
  /* FIXME: GCC has difficulty returning large values in registers,
1689
     because that ties up most of the general registers and gives the
1690
     register allocator little to work with.  Until we can resolve
1691
     this, large values are returned in memory.  */
1692
  if (mode == DFmode)
1693
    {
1694
      rtx rv;
1695
 
1696
      rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1697
      XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1698
                                              gen_rtx_REG (HImode,
1699
                                                           R0_REGNO),
1700
                                              GEN_INT (0));
1701
      XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1702
                                              gen_rtx_REG (HImode,
1703
                                                           R1_REGNO),
1704
                                              GEN_INT (2));
1705
      XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1706
                                              gen_rtx_REG (HImode,
1707
                                                           R2_REGNO),
1708
                                              GEN_INT (4));
1709
      XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1710
                                              gen_rtx_REG (HImode,
1711
                                                           R3_REGNO),
1712
                                              GEN_INT (6));
1713
      return rv;
1714
    }
1715
 
1716
  if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1717
    {
1718
      rtx rv;
1719
 
1720
      rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1721
      XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1722
                                              gen_rtx_REG (mode,
1723
                                                           R0_REGNO),
1724
                                              GEN_INT (0));
1725
      return rv;
1726
    }
1727
#endif
1728
 
1729
  if (GET_MODE_SIZE (mode) > 2)
1730
    return gen_rtx_REG (mode, MEM0_REGNO);
1731
  return gen_rtx_REG (mode, R0_REGNO);
1732
}
1733
 
1734
/* Implements TARGET_FUNCTION_VALUE.  Functions and libcalls have the same
1735
   conventions.  */
1736
 
1737
#undef TARGET_FUNCTION_VALUE
1738
#define TARGET_FUNCTION_VALUE m32c_function_value
1739
 
1740
static rtx
1741
m32c_function_value (const_tree valtype,
1742
                     const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1743
                     bool outgoing ATTRIBUTE_UNUSED)
1744
{
1745
  /* return reg or parallel */
1746
  const enum machine_mode mode = TYPE_MODE (valtype);
1747
  return m32c_libcall_value (mode, NULL_RTX);
1748
}
1749
 
1750
/* Implements TARGET_FUNCTION_VALUE_REGNO_P.  */
1751
 
1752
#undef TARGET_FUNCTION_VALUE_REGNO_P
1753
#define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1754
 
1755
static bool
1756
m32c_function_value_regno_p (const unsigned int regno)
1757
{
1758
  return (regno == R0_REGNO || regno == MEM0_REGNO);
1759
}
1760
 
1761
/* How Large Values Are Returned */
1762
 
1763
/* We return structures by pushing the address on the stack, even if
1764
   we use registers for the first few "real" arguments.  */
1765
#undef TARGET_STRUCT_VALUE_RTX
1766
#define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1767
static rtx
1768
m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1769
                       int incoming ATTRIBUTE_UNUSED)
1770
{
1771
  return 0;
1772
}
1773
 
1774
/* Function Entry and Exit */
1775
 
1776
/* Implements EPILOGUE_USES.  Interrupts restore all registers.  */
1777
int
1778
m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1779
{
1780
  if (cfun->machine->is_interrupt)
1781
    return 1;
1782
  return 0;
1783
}
1784
 
1785
/* Implementing the Varargs Macros */
1786
 
1787
#undef TARGET_STRICT_ARGUMENT_NAMING
1788
#define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1789
static bool
1790
m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1791
{
1792
  return 1;
1793
}
1794
 
1795
/* Trampolines for Nested Functions */
1796
 
1797
/*
1798
   m16c:
1799
   1 0000 75C43412              mov.w   #0x1234,a0
1800
   2 0004 FC000000              jmp.a   label
1801
 
1802
   m32c:
1803
   1 0000 BC563412              mov.l:s #0x123456,a0
1804
   2 0004 CC000000              jmp.a   label
1805
*/
1806
 
1807
/* Implements TRAMPOLINE_SIZE.  */
1808
int
1809
m32c_trampoline_size (void)
1810
{
1811
  /* Allocate extra space so we can avoid the messy shifts when we
1812
     initialize the trampoline; we just write past the end of the
1813
     opcode.  */
1814
  return TARGET_A16 ? 8 : 10;
1815
}
1816
 
1817
/* Implements TRAMPOLINE_ALIGNMENT.  */
1818
int
1819
m32c_trampoline_alignment (void)
1820
{
1821
  return 2;
1822
}
1823
 
1824
/* Implements TARGET_TRAMPOLINE_INIT.  */
1825
 
1826
#undef TARGET_TRAMPOLINE_INIT
1827
#define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1828
static void
1829
m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1830
{
1831
  rtx function = XEXP (DECL_RTL (fndecl), 0);
1832
 
1833
#define A0(m,i) adjust_address (m_tramp, m, i)
1834
  if (TARGET_A16)
1835
    {
1836
      /* Note: we subtract a "word" because the moves want signed
1837
         constants, not unsigned constants.  */
1838
      emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1839
      emit_move_insn (A0 (HImode, 2), chainval);
1840
      emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1841
      /* We use 16-bit addresses here, but store the zero to turn it
1842
         into a 24-bit offset.  */
1843
      emit_move_insn (A0 (HImode, 5), function);
1844
      emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1845
    }
1846
  else
1847
    {
1848
      /* Note that the PSI moves actually write 4 bytes.  Make sure we
1849
         write stuff out in the right order, and leave room for the
1850
         extra byte at the end.  */
1851
      emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1852
      emit_move_insn (A0 (PSImode, 1), chainval);
1853
      emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1854
      emit_move_insn (A0 (PSImode, 5), function);
1855
    }
1856
#undef A0
1857
}
1858
 
1859
/* Implicit Calls to Library Routines */
1860
 
1861
#undef TARGET_INIT_LIBFUNCS
1862
#define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1863
static void
1864
m32c_init_libfuncs (void)
1865
{
1866
  /* We do this because the M32C has an HImode operand, but the
1867
     M16C has an 8-bit operand.  Since gcc looks at the match data
1868
     and not the expanded rtl, we have to reset the optab so that
1869
     the right modes are found. */
1870
  if (TARGET_A24)
1871
    {
1872
      set_optab_handler (cstore_optab, QImode, CODE_FOR_cstoreqi4_24);
1873
      set_optab_handler (cstore_optab, HImode, CODE_FOR_cstorehi4_24);
1874
      set_optab_handler (cstore_optab, PSImode, CODE_FOR_cstorepsi4_24);
1875
    }
1876
}
1877
 
1878
/* Addressing Modes */
1879
 
1880
/* The r8c/m32c family supports a wide range of non-orthogonal
1881
   addressing modes, including the ability to double-indirect on *some*
1882
   of them.  Not all insns support all modes, either, but we rely on
1883
   predicates and constraints to deal with that.  */
1884
#undef TARGET_LEGITIMATE_ADDRESS_P
1885
#define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1886
bool
1887
m32c_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1888
{
1889
  int mode_adjust;
1890
  if (CONSTANT_P (x))
1891
    return 1;
1892
 
1893
  if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1894
    return 0;
1895
  if (TARGET_A24 && GET_MODE (x) != PSImode)
1896
    return 0;
1897
 
1898
  /* Wide references to memory will be split after reload, so we must
1899
     ensure that all parts of such splits remain legitimate
1900
     addresses.  */
1901
  mode_adjust = GET_MODE_SIZE (mode) - 1;
1902
 
1903
  /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1904
  if (GET_CODE (x) == PRE_DEC
1905
      || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1906
    {
1907
      return (GET_CODE (XEXP (x, 0)) == REG
1908
              && REGNO (XEXP (x, 0)) == SP_REGNO);
1909
    }
1910
 
1911
#if 0
1912
  /* This is the double indirection detection, but it currently
1913
     doesn't work as cleanly as this code implies, so until we've had
1914
     a chance to debug it, leave it disabled.  */
1915
  if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1916
    {
1917
#if DEBUG_DOUBLE
1918
      fprintf (stderr, "double indirect\n");
1919
#endif
1920
      x = XEXP (x, 0);
1921
    }
1922
#endif
1923
 
1924
  encode_pattern (x);
1925
  if (RTX_IS ("r"))
1926
    {
1927
      /* Most indexable registers can be used without displacements,
1928
         although some of them will be emitted with an explicit zero
1929
         to please the assembler.  */
1930
      switch (REGNO (patternr[0]))
1931
        {
1932
        case A1_REGNO:
1933
        case SB_REGNO:
1934
        case FB_REGNO:
1935
        case SP_REGNO:
1936
          if (TARGET_A16 && GET_MODE (x) == SImode)
1937
            return 0;
1938
        case A0_REGNO:
1939
          return 1;
1940
 
1941
        default:
1942
          if (IS_PSEUDO (patternr[0], strict))
1943
            return 1;
1944
          return 0;
1945
        }
1946
    }
1947
 
1948
  if (TARGET_A16 && GET_MODE (x) == SImode)
1949
    return 0;
1950
 
1951
  if (RTX_IS ("+ri"))
1952
    {
1953
      /* This is more interesting, because different base registers
1954
         allow for different displacements - both range and signedness
1955
         - and it differs from chip series to chip series too.  */
1956
      int rn = REGNO (patternr[1]);
1957
      HOST_WIDE_INT offs = INTVAL (patternr[2]);
1958
      switch (rn)
1959
        {
1960
        case A0_REGNO:
1961
        case A1_REGNO:
1962
        case SB_REGNO:
1963
          /* The syntax only allows positive offsets, but when the
1964
             offsets span the entire memory range, we can simulate
1965
             negative offsets by wrapping.  */
1966
          if (TARGET_A16)
1967
            return (offs >= -65536 && offs <= 65535 - mode_adjust);
1968
          if (rn == SB_REGNO)
1969
            return (offs >= 0 && offs <= 65535 - mode_adjust);
1970
          /* A0 or A1 */
1971
          return (offs >= -16777216 && offs <= 16777215);
1972
 
1973
        case FB_REGNO:
1974
          if (TARGET_A16)
1975
            return (offs >= -128 && offs <= 127 - mode_adjust);
1976
          return (offs >= -65536 && offs <= 65535 - mode_adjust);
1977
 
1978
        case SP_REGNO:
1979
          return (offs >= -128 && offs <= 127 - mode_adjust);
1980
 
1981
        default:
1982
          if (IS_PSEUDO (patternr[1], strict))
1983
            return 1;
1984
          return 0;
1985
        }
1986
    }
1987
  if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1988
    {
1989
      rtx reg = patternr[1];
1990
 
1991
      /* We don't know where the symbol is, so only allow base
1992
         registers which support displacements spanning the whole
1993
         address range.  */
1994
      switch (REGNO (reg))
1995
        {
1996
        case A0_REGNO:
1997
        case A1_REGNO:
1998
          /* $sb needs a secondary reload, but since it's involved in
1999
             memory address reloads too, we don't deal with it very
2000
             well.  */
2001
          /*    case SB_REGNO: */
2002
          return 1;
2003
        default:
2004
          if (IS_PSEUDO (reg, strict))
2005
            return 1;
2006
          return 0;
2007
        }
2008
    }
2009
  return 0;
2010
}
2011
 
2012
/* Implements REG_OK_FOR_BASE_P.  */
2013
int
2014
m32c_reg_ok_for_base_p (rtx x, int strict)
2015
{
2016
  if (GET_CODE (x) != REG)
2017
    return 0;
2018
  switch (REGNO (x))
2019
    {
2020
    case A0_REGNO:
2021
    case A1_REGNO:
2022
    case SB_REGNO:
2023
    case FB_REGNO:
2024
    case SP_REGNO:
2025
      return 1;
2026
    default:
2027
      if (IS_PSEUDO (x, strict))
2028
        return 1;
2029
      return 0;
2030
    }
2031
}
2032
 
2033
/* We have three choices for choosing fb->aN offsets.  If we choose -128,
2034
   we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
2035
   like this:
2036
       EB 4B FF    mova    -128[$fb],$a0
2037
       D8 0C FF FF mov.w:Q #0,-1[$a0]
2038
 
2039
   Alternately, we subtract the frame size, and hopefully use 8-bit aN
2040
   displacements:
2041
       7B F4       stc $fb,$a0
2042
       77 54 00 01 sub #256,$a0
2043
       D8 08 01    mov.w:Q #0,1[$a0]
2044
 
2045
   If we don't offset (i.e. offset by zero), we end up with:
2046
       7B F4       stc $fb,$a0
2047
       D8 0C 00 FF mov.w:Q #0,-256[$a0]
2048
 
2049
   We have to subtract *something* so that we have a PLUS rtx to mark
2050
   that we've done this reload.  The -128 offset will never result in
2051
   an 8-bit aN offset, and the payoff for the second case is five
2052
   loads *if* those loads are within 256 bytes of the other end of the
2053
   frame, so the third case seems best.  Note that we subtract the
2054
   zero, but detect that in the addhi3 pattern.  */
2055
 
2056
#define BIG_FB_ADJ 0
2057
 
2058
/* Implements LEGITIMIZE_ADDRESS.  The only address we really have to
2059
   worry about is frame base offsets, as $fb has a limited
2060
   displacement range.  We deal with this by attempting to reload $fb
2061
   itself into an address register; that seems to result in the best
2062
   code.  */
2063
#undef TARGET_LEGITIMIZE_ADDRESS
2064
#define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
2065
static rtx
2066
m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
2067
                         enum machine_mode mode)
2068
{
2069
#if DEBUG0
2070
  fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
2071
  debug_rtx (x);
2072
  fprintf (stderr, "\n");
2073
#endif
2074
 
2075
  if (GET_CODE (x) == PLUS
2076
      && GET_CODE (XEXP (x, 0)) == REG
2077
      && REGNO (XEXP (x, 0)) == FB_REGNO
2078
      && GET_CODE (XEXP (x, 1)) == CONST_INT
2079
      && (INTVAL (XEXP (x, 1)) < -128
2080
          || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
2081
    {
2082
      /* reload FB to A_REGS */
2083
      rtx temp = gen_reg_rtx (Pmode);
2084
      x = copy_rtx (x);
2085
      emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
2086
      XEXP (x, 0) = temp;
2087
    }
2088
 
2089
  return x;
2090
}
2091
 
2092
/* Implements LEGITIMIZE_RELOAD_ADDRESS.  See comment above.  */
2093
int
2094
m32c_legitimize_reload_address (rtx * x,
2095
                                enum machine_mode mode,
2096
                                int opnum,
2097
                                int type, int ind_levels ATTRIBUTE_UNUSED)
2098
{
2099
#if DEBUG0
2100
  fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
2101
           mode_name[mode]);
2102
  debug_rtx (*x);
2103
#endif
2104
 
2105
  /* At one point, this function tried to get $fb copied to an address
2106
     register, which in theory would maximize sharing, but gcc was
2107
     *also* still trying to reload the whole address, and we'd run out
2108
     of address registers.  So we let gcc do the naive (but safe)
2109
     reload instead, when the above function doesn't handle it for
2110
     us.
2111
 
2112
     The code below is a second attempt at the above.  */
2113
 
2114
  if (GET_CODE (*x) == PLUS
2115
      && GET_CODE (XEXP (*x, 0)) == REG
2116
      && REGNO (XEXP (*x, 0)) == FB_REGNO
2117
      && GET_CODE (XEXP (*x, 1)) == CONST_INT
2118
      && (INTVAL (XEXP (*x, 1)) < -128
2119
          || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
2120
    {
2121
      rtx sum;
2122
      int offset = INTVAL (XEXP (*x, 1));
2123
      int adjustment = -BIG_FB_ADJ;
2124
 
2125
      sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
2126
                          GEN_INT (adjustment));
2127
      *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2128
      if (type == RELOAD_OTHER)
2129
        type = RELOAD_FOR_OTHER_ADDRESS;
2130
      push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2131
                   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2132
                   (enum reload_type) type);
2133
      return 1;
2134
    }
2135
 
2136
  if (GET_CODE (*x) == PLUS
2137
      && GET_CODE (XEXP (*x, 0)) == PLUS
2138
      && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2139
      && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2140
      && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2141
      && GET_CODE (XEXP (*x, 1)) == CONST_INT
2142
      )
2143
    {
2144
      if (type == RELOAD_OTHER)
2145
        type = RELOAD_FOR_OTHER_ADDRESS;
2146
      push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2147
                   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2148
                   (enum reload_type) type);
2149
      return 1;
2150
    }
2151
 
2152
  return 0;
2153
}
2154
 
2155
/* Return the appropriate mode for a named address pointer.  */
2156
#undef TARGET_ADDR_SPACE_POINTER_MODE
2157
#define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
2158
static enum machine_mode
2159
m32c_addr_space_pointer_mode (addr_space_t addrspace)
2160
{
2161
  switch (addrspace)
2162
    {
2163
    case ADDR_SPACE_GENERIC:
2164
      return TARGET_A24 ? PSImode : HImode;
2165
    case ADDR_SPACE_FAR:
2166
      return SImode;
2167
    default:
2168
      gcc_unreachable ();
2169
    }
2170
}
2171
 
2172
/* Return the appropriate mode for a named address address.  */
2173
#undef TARGET_ADDR_SPACE_ADDRESS_MODE
2174
#define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
2175
static enum machine_mode
2176
m32c_addr_space_address_mode (addr_space_t addrspace)
2177
{
2178
  switch (addrspace)
2179
    {
2180
    case ADDR_SPACE_GENERIC:
2181
      return TARGET_A24 ? PSImode : HImode;
2182
    case ADDR_SPACE_FAR:
2183
      return SImode;
2184
    default:
2185
      gcc_unreachable ();
2186
    }
2187
}
2188
 
2189
/* Like m32c_legitimate_address_p, except with named addresses.  */
2190
#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
2191
#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
2192
  m32c_addr_space_legitimate_address_p
2193
static bool
2194
m32c_addr_space_legitimate_address_p (enum machine_mode mode, rtx x,
2195
                                      bool strict, addr_space_t as)
2196
{
2197
  if (as == ADDR_SPACE_FAR)
2198
    {
2199
      if (TARGET_A24)
2200
        return 0;
2201
      encode_pattern (x);
2202
      if (RTX_IS ("r"))
2203
        {
2204
          if (GET_MODE (x) != SImode)
2205
            return 0;
2206
          switch (REGNO (patternr[0]))
2207
            {
2208
            case A0_REGNO:
2209
              return 1;
2210
 
2211
            default:
2212
              if (IS_PSEUDO (patternr[0], strict))
2213
                return 1;
2214
              return 0;
2215
            }
2216
        }
2217
      if (RTX_IS ("+^Sri"))
2218
        {
2219
          int rn = REGNO (patternr[3]);
2220
          HOST_WIDE_INT offs = INTVAL (patternr[4]);
2221
          if (GET_MODE (patternr[3]) != HImode)
2222
            return 0;
2223
          switch (rn)
2224
            {
2225
            case A0_REGNO:
2226
              return (offs >= 0 && offs <= 0xfffff);
2227
 
2228
            default:
2229
              if (IS_PSEUDO (patternr[3], strict))
2230
                return 1;
2231
              return 0;
2232
            }
2233
        }
2234
      if (RTX_IS ("+^Srs"))
2235
        {
2236
          int rn = REGNO (patternr[3]);
2237
          if (GET_MODE (patternr[3]) != HImode)
2238
            return 0;
2239
          switch (rn)
2240
            {
2241
            case A0_REGNO:
2242
              return 1;
2243
 
2244
            default:
2245
              if (IS_PSEUDO (patternr[3], strict))
2246
                return 1;
2247
              return 0;
2248
            }
2249
        }
2250
      if (RTX_IS ("+^S+ris"))
2251
        {
2252
          int rn = REGNO (patternr[4]);
2253
          if (GET_MODE (patternr[4]) != HImode)
2254
            return 0;
2255
          switch (rn)
2256
            {
2257
            case A0_REGNO:
2258
              return 1;
2259
 
2260
            default:
2261
              if (IS_PSEUDO (patternr[4], strict))
2262
                return 1;
2263
              return 0;
2264
            }
2265
        }
2266
      if (RTX_IS ("s"))
2267
        {
2268
          return 1;
2269
        }
2270
      return 0;
2271
    }
2272
 
2273
  else if (as != ADDR_SPACE_GENERIC)
2274
    gcc_unreachable ();
2275
 
2276
  return m32c_legitimate_address_p (mode, x, strict);
2277
}
2278
 
2279
/* Like m32c_legitimate_address, except with named address support.  */
2280
#undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2281
#define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2282
static rtx
2283
m32c_addr_space_legitimize_address (rtx x, rtx oldx, enum machine_mode mode,
2284
                                    addr_space_t as)
2285
{
2286
  if (as != ADDR_SPACE_GENERIC)
2287
    {
2288
#if DEBUG0
2289
      fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2290
      debug_rtx (x);
2291
      fprintf (stderr, "\n");
2292
#endif
2293
 
2294
      if (GET_CODE (x) != REG)
2295
        {
2296
          x = force_reg (SImode, x);
2297
        }
2298
      return x;
2299
    }
2300
 
2301
  return m32c_legitimize_address (x, oldx, mode);
2302
}
2303
 
2304
/* Determine if one named address space is a subset of another.  */
2305
#undef TARGET_ADDR_SPACE_SUBSET_P
2306
#define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2307
static bool
2308
m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2309
{
2310
  gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2311
  gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2312
 
2313
  if (subset == superset)
2314
    return true;
2315
 
2316
  else
2317
    return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2318
}
2319
 
2320
#undef TARGET_ADDR_SPACE_CONVERT
2321
#define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2322
/* Convert from one address space to another.  */
2323
static rtx
2324
m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2325
{
2326
  addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2327
  addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2328
  rtx result;
2329
 
2330
  gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2331
  gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2332
 
2333
  if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2334
    {
2335
      /* This is unpredictable, as we're truncating off usable address
2336
         bits.  */
2337
 
2338
      result = gen_reg_rtx (HImode);
2339
      emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2340
      return result;
2341
    }
2342
  else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2343
    {
2344
      /* This always works.  */
2345
      result = gen_reg_rtx (SImode);
2346
      emit_insn (gen_zero_extendhisi2 (result, op));
2347
      return result;
2348
    }
2349
  else
2350
    gcc_unreachable ();
2351
}
2352
 
2353
/* Condition Code Status */
2354
 
2355
#undef TARGET_FIXED_CONDITION_CODE_REGS
2356
#define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2357
static bool
2358
m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2359
{
2360
  *p1 = FLG_REGNO;
2361
  *p2 = INVALID_REGNUM;
2362
  return true;
2363
}
2364
 
2365
/* Describing Relative Costs of Operations */
2366
 
2367
/* Implements TARGET_REGISTER_MOVE_COST.  We make impossible moves
2368
   prohibitively expensive, like trying to put QIs in r2/r3 (there are
2369
   no opcodes to do that).  We also discourage use of mem* registers
2370
   since they're really memory.  */
2371
 
2372
#undef TARGET_REGISTER_MOVE_COST
2373
#define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2374
 
2375
static int
2376
m32c_register_move_cost (enum machine_mode mode, reg_class_t from,
2377
                         reg_class_t to)
2378
{
2379
  int cost = COSTS_N_INSNS (3);
2380
  HARD_REG_SET cc;
2381
 
2382
/* FIXME: pick real values, but not 2 for now.  */
2383
  COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2384
  IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2385
 
2386
  if (mode == QImode
2387
      && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2388
    {
2389
      if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2390
        cost = COSTS_N_INSNS (1000);
2391
      else
2392
        cost = COSTS_N_INSNS (80);
2393
    }
2394
 
2395
  if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2396
    cost = COSTS_N_INSNS (1000);
2397
 
2398
  if (reg_classes_intersect_p (from, CR_REGS))
2399
    cost += COSTS_N_INSNS (5);
2400
 
2401
  if (reg_classes_intersect_p (to, CR_REGS))
2402
    cost += COSTS_N_INSNS (5);
2403
 
2404
  if (from == MEM_REGS || to == MEM_REGS)
2405
    cost += COSTS_N_INSNS (50);
2406
  else if (reg_classes_intersect_p (from, MEM_REGS)
2407
           || reg_classes_intersect_p (to, MEM_REGS))
2408
    cost += COSTS_N_INSNS (10);
2409
 
2410
#if DEBUG0
2411
  fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2412
           mode_name[mode], class_names[(int) from], class_names[(int) to],
2413
           cost);
2414
#endif
2415
  return cost;
2416
}
2417
 
2418
/*  Implements TARGET_MEMORY_MOVE_COST.  */
2419
 
2420
#undef TARGET_MEMORY_MOVE_COST
2421
#define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2422
 
2423
static int
2424
m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2425
                       reg_class_t rclass ATTRIBUTE_UNUSED,
2426
                       bool in ATTRIBUTE_UNUSED)
2427
{
2428
  /* FIXME: pick real values.  */
2429
  return COSTS_N_INSNS (10);
2430
}
2431
 
2432
/* Here we try to describe when we use multiple opcodes for one RTX so
2433
   that gcc knows when to use them.  */
2434
#undef TARGET_RTX_COSTS
2435
#define TARGET_RTX_COSTS m32c_rtx_costs
2436
static bool
2437
m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2438
                int *total, bool speed ATTRIBUTE_UNUSED)
2439
{
2440
  switch (code)
2441
    {
2442
    case REG:
2443
      if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2444
        *total += COSTS_N_INSNS (500);
2445
      else
2446
        *total += COSTS_N_INSNS (1);
2447
      return true;
2448
 
2449
    case ASHIFT:
2450
    case LSHIFTRT:
2451
    case ASHIFTRT:
2452
      if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2453
        {
2454
          /* mov.b r1l, r1h */
2455
          *total +=  COSTS_N_INSNS (1);
2456
          return true;
2457
        }
2458
      if (INTVAL (XEXP (x, 1)) > 8
2459
          || INTVAL (XEXP (x, 1)) < -8)
2460
        {
2461
          /* mov.b #N, r1l */
2462
          /* mov.b r1l, r1h */
2463
          *total +=  COSTS_N_INSNS (2);
2464
          return true;
2465
        }
2466
      return true;
2467
 
2468
    case LE:
2469
    case LEU:
2470
    case LT:
2471
    case LTU:
2472
    case GT:
2473
    case GTU:
2474
    case GE:
2475
    case GEU:
2476
    case NE:
2477
    case EQ:
2478
      if (outer_code == SET)
2479
        {
2480
          *total += COSTS_N_INSNS (2);
2481
          return true;
2482
        }
2483
      break;
2484
 
2485
    case ZERO_EXTRACT:
2486
      {
2487
        rtx dest = XEXP (x, 0);
2488
        rtx addr = XEXP (dest, 0);
2489
        switch (GET_CODE (addr))
2490
          {
2491
          case CONST_INT:
2492
            *total += COSTS_N_INSNS (1);
2493
            break;
2494
          case SYMBOL_REF:
2495
            *total += COSTS_N_INSNS (3);
2496
            break;
2497
          default:
2498
            *total += COSTS_N_INSNS (2);
2499
            break;
2500
          }
2501
        return true;
2502
      }
2503
      break;
2504
 
2505
    default:
2506
      /* Reasonable default.  */
2507
      if (TARGET_A16 && GET_MODE(x) == SImode)
2508
        *total += COSTS_N_INSNS (2);
2509
      break;
2510
    }
2511
  return false;
2512
}
2513
 
2514
#undef TARGET_ADDRESS_COST
2515
#define TARGET_ADDRESS_COST m32c_address_cost
2516
static int
2517
m32c_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2518
{
2519
  int i;
2520
  /*  fprintf(stderr, "\naddress_cost\n");
2521
      debug_rtx(addr);*/
2522
  switch (GET_CODE (addr))
2523
    {
2524
    case CONST_INT:
2525
      i = INTVAL (addr);
2526
      if (i == 0)
2527
        return COSTS_N_INSNS(1);
2528
      if (0 < i && i <= 255)
2529
        return COSTS_N_INSNS(2);
2530
      if (0 < i && i <= 65535)
2531
        return COSTS_N_INSNS(3);
2532
      return COSTS_N_INSNS(4);
2533
    case SYMBOL_REF:
2534
      return COSTS_N_INSNS(4);
2535
    case REG:
2536
      return COSTS_N_INSNS(1);
2537
    case PLUS:
2538
      if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2539
        {
2540
          i = INTVAL (XEXP (addr, 1));
2541
          if (i == 0)
2542
            return COSTS_N_INSNS(1);
2543
          if (0 < i && i <= 255)
2544
            return COSTS_N_INSNS(2);
2545
          if (0 < i && i <= 65535)
2546
            return COSTS_N_INSNS(3);
2547
        }
2548
      return COSTS_N_INSNS(4);
2549
    default:
2550
      return 0;
2551
    }
2552
}
2553
 
2554
/* Defining the Output Assembler Language */
2555
 
2556
/* Output of Data */
2557
 
2558
/* We may have 24 bit sizes, which is the native address size.
2559
   Currently unused, but provided for completeness.  */
2560
#undef TARGET_ASM_INTEGER
2561
#define TARGET_ASM_INTEGER m32c_asm_integer
2562
static bool
2563
m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2564
{
2565
  switch (size)
2566
    {
2567
    case 3:
2568
      fprintf (asm_out_file, "\t.3byte\t");
2569
      output_addr_const (asm_out_file, x);
2570
      fputc ('\n', asm_out_file);
2571
      return true;
2572
    case 4:
2573
      if (GET_CODE (x) == SYMBOL_REF)
2574
        {
2575
          fprintf (asm_out_file, "\t.long\t");
2576
          output_addr_const (asm_out_file, x);
2577
          fputc ('\n', asm_out_file);
2578
          return true;
2579
        }
2580
      break;
2581
    }
2582
  return default_assemble_integer (x, size, aligned_p);
2583
}
2584
 
2585
/* Output of Assembler Instructions */
2586
 
2587
/* We use a lookup table because the addressing modes are non-orthogonal.  */
2588
 
2589
static struct
2590
{
2591
  char code;
2592
  char const *pattern;
2593
  char const *format;
2594
}
2595
const conversions[] = {
2596
  { 0, "r", "0" },
2597
 
2598
  { 0, "mr", "z[1]" },
2599
  { 0, "m+ri", "3[2]" },
2600
  { 0, "m+rs", "3[2]" },
2601
  { 0, "m+^Zrs", "5[4]" },
2602
  { 0, "m+^Zri", "5[4]" },
2603
  { 0, "m+^Z+ris", "7+6[5]" },
2604
  { 0, "m+^Srs", "5[4]" },
2605
  { 0, "m+^Sri", "5[4]" },
2606
  { 0, "m+^S+ris", "7+6[5]" },
2607
  { 0, "m+r+si", "4+5[2]" },
2608
  { 0, "ms", "1" },
2609
  { 0, "mi", "1" },
2610
  { 0, "m+si", "2+3" },
2611
 
2612
  { 0, "mmr", "[z[2]]" },
2613
  { 0, "mm+ri", "[4[3]]" },
2614
  { 0, "mm+rs", "[4[3]]" },
2615
  { 0, "mm+r+si", "[5+6[3]]" },
2616
  { 0, "mms", "[[2]]" },
2617
  { 0, "mmi", "[[2]]" },
2618
  { 0, "mm+si", "[4[3]]" },
2619
 
2620
  { 0, "i", "#0" },
2621
  { 0, "s", "#0" },
2622
  { 0, "+si", "#1+2" },
2623
  { 0, "l", "#0" },
2624
 
2625
  { 'l', "l", "0" },
2626
  { 'd', "i", "0" },
2627
  { 'd', "s", "0" },
2628
  { 'd', "+si", "1+2" },
2629
  { 'D', "i", "0" },
2630
  { 'D', "s", "0" },
2631
  { 'D', "+si", "1+2" },
2632
  { 'x', "i", "#0" },
2633
  { 'X', "i", "#0" },
2634
  { 'm', "i", "#0" },
2635
  { 'b', "i", "#0" },
2636
  { 'B', "i", "0" },
2637
  { 'p', "i", "0" },
2638
 
2639
  { 0, 0, 0 }
2640
};
2641
 
2642
/* This is in order according to the bitfield that pushm/popm use.  */
2643
static char const *pushm_regs[] = {
2644
  "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2645
};
2646
 
2647
/* Implements TARGET_PRINT_OPERAND.  */
2648
 
2649
#undef TARGET_PRINT_OPERAND
2650
#define TARGET_PRINT_OPERAND m32c_print_operand
2651
 
2652
static void
2653
m32c_print_operand (FILE * file, rtx x, int code)
2654
{
2655
  int i, j, b;
2656
  const char *comma;
2657
  HOST_WIDE_INT ival;
2658
  int unsigned_const = 0;
2659
  int force_sign;
2660
 
2661
  /* Multiplies; constants are converted to sign-extended format but
2662
   we need unsigned, so 'u' and 'U' tell us what size unsigned we
2663
   need.  */
2664
  if (code == 'u')
2665
    {
2666
      unsigned_const = 2;
2667
      code = 0;
2668
    }
2669
  if (code == 'U')
2670
    {
2671
      unsigned_const = 1;
2672
      code = 0;
2673
    }
2674
  /* This one is only for debugging; you can put it in a pattern to
2675
     force this error.  */
2676
  if (code == '!')
2677
    {
2678
      fprintf (stderr, "dj: unreviewed pattern:");
2679
      if (current_output_insn)
2680
        debug_rtx (current_output_insn);
2681
      gcc_unreachable ();
2682
    }
2683
  /* PSImode operations are either .w or .l depending on the target.  */
2684
  if (code == '&')
2685
    {
2686
      if (TARGET_A16)
2687
        fprintf (file, "w");
2688
      else
2689
        fprintf (file, "l");
2690
      return;
2691
    }
2692
  /* Inverted conditionals.  */
2693
  if (code == 'C')
2694
    {
2695
      switch (GET_CODE (x))
2696
        {
2697
        case LE:
2698
          fputs ("gt", file);
2699
          break;
2700
        case LEU:
2701
          fputs ("gtu", file);
2702
          break;
2703
        case LT:
2704
          fputs ("ge", file);
2705
          break;
2706
        case LTU:
2707
          fputs ("geu", file);
2708
          break;
2709
        case GT:
2710
          fputs ("le", file);
2711
          break;
2712
        case GTU:
2713
          fputs ("leu", file);
2714
          break;
2715
        case GE:
2716
          fputs ("lt", file);
2717
          break;
2718
        case GEU:
2719
          fputs ("ltu", file);
2720
          break;
2721
        case NE:
2722
          fputs ("eq", file);
2723
          break;
2724
        case EQ:
2725
          fputs ("ne", file);
2726
          break;
2727
        default:
2728
          gcc_unreachable ();
2729
        }
2730
      return;
2731
    }
2732
  /* Regular conditionals.  */
2733
  if (code == 'c')
2734
    {
2735
      switch (GET_CODE (x))
2736
        {
2737
        case LE:
2738
          fputs ("le", file);
2739
          break;
2740
        case LEU:
2741
          fputs ("leu", file);
2742
          break;
2743
        case LT:
2744
          fputs ("lt", file);
2745
          break;
2746
        case LTU:
2747
          fputs ("ltu", file);
2748
          break;
2749
        case GT:
2750
          fputs ("gt", file);
2751
          break;
2752
        case GTU:
2753
          fputs ("gtu", file);
2754
          break;
2755
        case GE:
2756
          fputs ("ge", file);
2757
          break;
2758
        case GEU:
2759
          fputs ("geu", file);
2760
          break;
2761
        case NE:
2762
          fputs ("ne", file);
2763
          break;
2764
        case EQ:
2765
          fputs ("eq", file);
2766
          break;
2767
        default:
2768
          gcc_unreachable ();
2769
        }
2770
      return;
2771
    }
2772
  /* Used in negsi2 to do HImode ops on the two parts of an SImode
2773
     operand.  */
2774
  if (code == 'h' && GET_MODE (x) == SImode)
2775
    {
2776
      x = m32c_subreg (HImode, x, SImode, 0);
2777
      code = 0;
2778
    }
2779
  if (code == 'H' && GET_MODE (x) == SImode)
2780
    {
2781
      x = m32c_subreg (HImode, x, SImode, 2);
2782
      code = 0;
2783
    }
2784
  if (code == 'h' && GET_MODE (x) == HImode)
2785
    {
2786
      x = m32c_subreg (QImode, x, HImode, 0);
2787
      code = 0;
2788
    }
2789
  if (code == 'H' && GET_MODE (x) == HImode)
2790
    {
2791
      /* We can't actually represent this as an rtx.  Do it here.  */
2792
      if (GET_CODE (x) == REG)
2793
        {
2794
          switch (REGNO (x))
2795
            {
2796
            case R0_REGNO:
2797
              fputs ("r0h", file);
2798
              return;
2799
            case R1_REGNO:
2800
              fputs ("r1h", file);
2801
              return;
2802
            default:
2803
              gcc_unreachable();
2804
            }
2805
        }
2806
      /* This should be a MEM.  */
2807
      x = m32c_subreg (QImode, x, HImode, 1);
2808
      code = 0;
2809
    }
2810
  /* This is for BMcond, which always wants word register names.  */
2811
  if (code == 'h' && GET_MODE (x) == QImode)
2812
    {
2813
      if (GET_CODE (x) == REG)
2814
        x = gen_rtx_REG (HImode, REGNO (x));
2815
      code = 0;
2816
    }
2817
  /* 'x' and 'X' need to be ignored for non-immediates.  */
2818
  if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2819
    code = 0;
2820
 
2821
  encode_pattern (x);
2822
  force_sign = 0;
2823
  for (i = 0; conversions[i].pattern; i++)
2824
    if (conversions[i].code == code
2825
        && streq (conversions[i].pattern, pattern))
2826
      {
2827
        for (j = 0; conversions[i].format[j]; j++)
2828
          /* backslash quotes the next character in the output pattern.  */
2829
          if (conversions[i].format[j] == '\\')
2830
            {
2831
              fputc (conversions[i].format[j + 1], file);
2832
              j++;
2833
            }
2834
          /* Digits in the output pattern indicate that the
2835
             corresponding RTX is to be output at that point.  */
2836
          else if (ISDIGIT (conversions[i].format[j]))
2837
            {
2838
              rtx r = patternr[conversions[i].format[j] - '0'];
2839
              switch (GET_CODE (r))
2840
                {
2841
                case REG:
2842
                  fprintf (file, "%s",
2843
                           reg_name_with_mode (REGNO (r), GET_MODE (r)));
2844
                  break;
2845
                case CONST_INT:
2846
                  switch (code)
2847
                    {
2848
                    case 'b':
2849
                    case 'B':
2850
                      {
2851
                        int v = INTVAL (r);
2852
                        int i = (int) exact_log2 (v);
2853
                        if (i == -1)
2854
                          i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2855
                        if (i == -1)
2856
                          i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2857
                        /* Bit position.  */
2858
                        fprintf (file, "%d", i);
2859
                      }
2860
                      break;
2861
                    case 'x':
2862
                      /* Unsigned byte.  */
2863
                      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2864
                               INTVAL (r) & 0xff);
2865
                      break;
2866
                    case 'X':
2867
                      /* Unsigned word.  */
2868
                      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2869
                               INTVAL (r) & 0xffff);
2870
                      break;
2871
                    case 'p':
2872
                      /* pushm and popm encode a register set into a single byte.  */
2873
                      comma = "";
2874
                      for (b = 7; b >= 0; b--)
2875
                        if (INTVAL (r) & (1 << b))
2876
                          {
2877
                            fprintf (file, "%s%s", comma, pushm_regs[b]);
2878
                            comma = ",";
2879
                          }
2880
                      break;
2881
                    case 'm':
2882
                      /* "Minus".  Output -X  */
2883
                      ival = (-INTVAL (r) & 0xffff);
2884
                      if (ival & 0x8000)
2885
                        ival = ival - 0x10000;
2886
                      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2887
                      break;
2888
                    default:
2889
                      ival = INTVAL (r);
2890
                      if (conversions[i].format[j + 1] == '[' && ival < 0)
2891
                        {
2892
                          /* We can simulate negative displacements by
2893
                             taking advantage of address space
2894
                             wrapping when the offset can span the
2895
                             entire address range.  */
2896
                          rtx base =
2897
                            patternr[conversions[i].format[j + 2] - '0'];
2898
                          if (GET_CODE (base) == REG)
2899
                            switch (REGNO (base))
2900
                              {
2901
                              case A0_REGNO:
2902
                              case A1_REGNO:
2903
                                if (TARGET_A24)
2904
                                  ival = 0x1000000 + ival;
2905
                                else
2906
                                  ival = 0x10000 + ival;
2907
                                break;
2908
                              case SB_REGNO:
2909
                                if (TARGET_A16)
2910
                                  ival = 0x10000 + ival;
2911
                                break;
2912
                              }
2913
                        }
2914
                      else if (code == 'd' && ival < 0 && j == 0)
2915
                        /* The "mova" opcode is used to do addition by
2916
                           computing displacements, but again, we need
2917
                           displacements to be unsigned *if* they're
2918
                           the only component of the displacement
2919
                           (i.e. no "symbol-4" type displacement).  */
2920
                        ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2921
 
2922
                      if (conversions[i].format[j] == '0')
2923
                        {
2924
                          /* More conversions to unsigned.  */
2925
                          if (unsigned_const == 2)
2926
                            ival &= 0xffff;
2927
                          if (unsigned_const == 1)
2928
                            ival &= 0xff;
2929
                        }
2930
                      if (streq (conversions[i].pattern, "mi")
2931
                          || streq (conversions[i].pattern, "mmi"))
2932
                        {
2933
                          /* Integers used as addresses are unsigned.  */
2934
                          ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2935
                        }
2936
                      if (force_sign && ival >= 0)
2937
                        fputc ('+', file);
2938
                      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2939
                      break;
2940
                    }
2941
                  break;
2942
                case CONST_DOUBLE:
2943
                  /* We don't have const_double constants.  If it
2944
                     happens, make it obvious.  */
2945
                  fprintf (file, "[const_double 0x%lx]",
2946
                           (unsigned long) CONST_DOUBLE_HIGH (r));
2947
                  break;
2948
                case SYMBOL_REF:
2949
                  assemble_name (file, XSTR (r, 0));
2950
                  break;
2951
                case LABEL_REF:
2952
                  output_asm_label (r);
2953
                  break;
2954
                default:
2955
                  fprintf (stderr, "don't know how to print this operand:");
2956
                  debug_rtx (r);
2957
                  gcc_unreachable ();
2958
                }
2959
            }
2960
          else
2961
            {
2962
              if (conversions[i].format[j] == 'z')
2963
                {
2964
                  /* Some addressing modes *must* have a displacement,
2965
                     so insert a zero here if needed.  */
2966
                  int k;
2967
                  for (k = j + 1; conversions[i].format[k]; k++)
2968
                    if (ISDIGIT (conversions[i].format[k]))
2969
                      {
2970
                        rtx reg = patternr[conversions[i].format[k] - '0'];
2971
                        if (GET_CODE (reg) == REG
2972
                            && (REGNO (reg) == SB_REGNO
2973
                                || REGNO (reg) == FB_REGNO
2974
                                || REGNO (reg) == SP_REGNO))
2975
                          fputc ('0', file);
2976
                      }
2977
                  continue;
2978
                }
2979
              /* Signed displacements off symbols need to have signs
2980
                 blended cleanly.  */
2981
              if (conversions[i].format[j] == '+'
2982
                  && (!code || code == 'D' || code == 'd')
2983
                  && ISDIGIT (conversions[i].format[j + 1])
2984
                  && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2985
                      == CONST_INT))
2986
                {
2987
                  force_sign = 1;
2988
                  continue;
2989
                }
2990
              fputc (conversions[i].format[j], file);
2991
            }
2992
        break;
2993
      }
2994
  if (!conversions[i].pattern)
2995
    {
2996
      fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2997
               pattern);
2998
      debug_rtx (x);
2999
      fprintf (file, "[%c.%s]", code ? code : '-', pattern);
3000
    }
3001
 
3002
  return;
3003
}
3004
 
3005
/* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
3006
 
3007
   See m32c_print_operand above for descriptions of what these do.  */
3008
 
3009
#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
3010
#define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
3011
 
3012
static bool
3013
m32c_print_operand_punct_valid_p (unsigned char c)
3014
{
3015
  if (c == '&' || c == '!')
3016
    return true;
3017
 
3018
  return false;
3019
}
3020
 
3021
/* Implements TARGET_PRINT_OPERAND_ADDRESS.  Nothing unusual here.  */
3022
 
3023
#undef TARGET_PRINT_OPERAND_ADDRESS
3024
#define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
3025
 
3026
static void
3027
m32c_print_operand_address (FILE * stream, rtx address)
3028
{
3029
  if (GET_CODE (address) == MEM)
3030
    address = XEXP (address, 0);
3031
  else
3032
    /* cf: gcc.dg/asm-4.c.  */
3033
    gcc_assert (GET_CODE (address) == REG);
3034
 
3035
  m32c_print_operand (stream, address, 0);
3036
}
3037
 
3038
/* Implements ASM_OUTPUT_REG_PUSH.  Control registers are pushed
3039
   differently than general registers.  */
3040
void
3041
m32c_output_reg_push (FILE * s, int regno)
3042
{
3043
  if (regno == FLG_REGNO)
3044
    fprintf (s, "\tpushc\tflg\n");
3045
  else
3046
    fprintf (s, "\tpush.%c\t%s\n",
3047
             " bwll"[reg_push_size (regno)], reg_names[regno]);
3048
}
3049
 
3050
/* Likewise for ASM_OUTPUT_REG_POP.  */
3051
void
3052
m32c_output_reg_pop (FILE * s, int regno)
3053
{
3054
  if (regno == FLG_REGNO)
3055
    fprintf (s, "\tpopc\tflg\n");
3056
  else
3057
    fprintf (s, "\tpop.%c\t%s\n",
3058
             " bwll"[reg_push_size (regno)], reg_names[regno]);
3059
}
3060
 
3061
/* Defining target-specific uses of `__attribute__' */
3062
 
3063
/* Used to simplify the logic below.  Find the attributes wherever
3064
   they may be.  */
3065
#define M32C_ATTRIBUTES(decl) \
3066
  (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
3067
                : DECL_ATTRIBUTES (decl) \
3068
                  ? (DECL_ATTRIBUTES (decl)) \
3069
                  : TYPE_ATTRIBUTES (TREE_TYPE (decl))
3070
 
3071
/* Returns TRUE if the given tree has the "interrupt" attribute.  */
3072
static int
3073
interrupt_p (tree node ATTRIBUTE_UNUSED)
3074
{
3075
  tree list = M32C_ATTRIBUTES (node);
3076
  while (list)
3077
    {
3078
      if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
3079
        return 1;
3080
      list = TREE_CHAIN (list);
3081
    }
3082
  return fast_interrupt_p (node);
3083
}
3084
 
3085
/* Returns TRUE if the given tree has the "bank_switch" attribute.  */
3086
static int
3087
bank_switch_p (tree node ATTRIBUTE_UNUSED)
3088
{
3089
  tree list = M32C_ATTRIBUTES (node);
3090
  while (list)
3091
    {
3092
      if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
3093
        return 1;
3094
      list = TREE_CHAIN (list);
3095
    }
3096
  return 0;
3097
}
3098
 
3099
/* Returns TRUE if the given tree has the "fast_interrupt" attribute.  */
3100
static int
3101
fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
3102
{
3103
  tree list = M32C_ATTRIBUTES (node);
3104
  while (list)
3105
    {
3106
      if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
3107
        return 1;
3108
      list = TREE_CHAIN (list);
3109
    }
3110
  return 0;
3111
}
3112
 
3113
static tree
3114
interrupt_handler (tree * node ATTRIBUTE_UNUSED,
3115
                   tree name ATTRIBUTE_UNUSED,
3116
                   tree args ATTRIBUTE_UNUSED,
3117
                   int flags ATTRIBUTE_UNUSED,
3118
                   bool * no_add_attrs ATTRIBUTE_UNUSED)
3119
{
3120
  return NULL_TREE;
3121
}
3122
 
3123
/* Returns TRUE if given tree has the "function_vector" attribute. */
3124
int
3125
m32c_special_page_vector_p (tree func)
3126
{
3127
  tree list;
3128
 
3129
  if (TREE_CODE (func) != FUNCTION_DECL)
3130
    return 0;
3131
 
3132
  list = M32C_ATTRIBUTES (func);
3133
  while (list)
3134
    {
3135
      if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3136
        return 1;
3137
      list = TREE_CHAIN (list);
3138
    }
3139
  return 0;
3140
}
3141
 
3142
static tree
3143
function_vector_handler (tree * node ATTRIBUTE_UNUSED,
3144
                         tree name ATTRIBUTE_UNUSED,
3145
                         tree args ATTRIBUTE_UNUSED,
3146
                         int flags ATTRIBUTE_UNUSED,
3147
                         bool * no_add_attrs ATTRIBUTE_UNUSED)
3148
{
3149
  if (TARGET_R8C)
3150
    {
3151
      /* The attribute is not supported for R8C target.  */
3152
      warning (OPT_Wattributes,
3153
                "%qE attribute is not supported for R8C target",
3154
                name);
3155
      *no_add_attrs = true;
3156
    }
3157
  else if (TREE_CODE (*node) != FUNCTION_DECL)
3158
    {
3159
      /* The attribute must be applied to functions only.  */
3160
      warning (OPT_Wattributes,
3161
                "%qE attribute applies only to functions",
3162
                name);
3163
      *no_add_attrs = true;
3164
    }
3165
  else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
3166
    {
3167
      /* The argument must be a constant integer.  */
3168
      warning (OPT_Wattributes,
3169
                "%qE attribute argument not an integer constant",
3170
                name);
3171
      *no_add_attrs = true;
3172
    }
3173
  else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
3174
           || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
3175
    {
3176
      /* The argument value must be between 18 to 255.  */
3177
      warning (OPT_Wattributes,
3178
                "%qE attribute argument should be between 18 to 255",
3179
                name);
3180
      *no_add_attrs = true;
3181
    }
3182
  return NULL_TREE;
3183
}
3184
 
3185
/* If the function is assigned the attribute 'function_vector', it
3186
   returns the function vector number, otherwise returns zero.  */
3187
int
3188
current_function_special_page_vector (rtx x)
3189
{
3190
  int num;
3191
 
3192
  if ((GET_CODE(x) == SYMBOL_REF)
3193
      && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
3194
    {
3195
      tree list;
3196
      tree t = SYMBOL_REF_DECL (x);
3197
 
3198
      if (TREE_CODE (t) != FUNCTION_DECL)
3199
        return 0;
3200
 
3201
      list = M32C_ATTRIBUTES (t);
3202
      while (list)
3203
        {
3204
          if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3205
            {
3206
              num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
3207
              return num;
3208
            }
3209
 
3210
          list = TREE_CHAIN (list);
3211
        }
3212
 
3213
      return 0;
3214
    }
3215
  else
3216
    return 0;
3217
}
3218
 
3219
#undef TARGET_ATTRIBUTE_TABLE
3220
#define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3221
static const struct attribute_spec m32c_attribute_table[] = {
3222
  {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3223
  {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3224
  {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3225
  {"function_vector", 1, 1, true,  false, false, function_vector_handler,
3226
   false},
3227
  {0, 0, 0, 0, 0, 0, 0, false}
3228
};
3229
 
3230
#undef TARGET_COMP_TYPE_ATTRIBUTES
3231
#define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3232
static int
3233
m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3234
                           const_tree type2 ATTRIBUTE_UNUSED)
3235
{
3236
  /* 0=incompatible 1=compatible 2=warning */
3237
  return 1;
3238
}
3239
 
3240
#undef TARGET_INSERT_ATTRIBUTES
3241
#define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3242
static void
3243
m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3244
                        tree * attr_ptr ATTRIBUTE_UNUSED)
3245
{
3246
  unsigned addr;
3247
  /* See if we need to make #pragma address variables volatile.  */
3248
 
3249
  if (TREE_CODE (node) == VAR_DECL)
3250
    {
3251
      const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3252
      if (m32c_get_pragma_address  (name, &addr))
3253
        {
3254
          TREE_THIS_VOLATILE (node) = true;
3255
        }
3256
    }
3257
}
3258
 
3259
 
3260
struct GTY(()) pragma_entry {
3261
  const char *varname;
3262
  unsigned address;
3263
};
3264
typedef struct pragma_entry pragma_entry;
3265
 
3266
/* Hash table of pragma info.  */
3267
static GTY((param_is (pragma_entry))) htab_t pragma_htab;
3268
 
3269
static int
3270
pragma_entry_eq (const void *p1, const void *p2)
3271
{
3272
  const pragma_entry *old = (const pragma_entry *) p1;
3273
  const char *new_name = (const char *) p2;
3274
 
3275
  return strcmp (old->varname, new_name) == 0;
3276
}
3277
 
3278
static hashval_t
3279
pragma_entry_hash (const void *p)
3280
{
3281
  const pragma_entry *old = (const pragma_entry *) p;
3282
  return htab_hash_string (old->varname);
3283
}
3284
 
3285
void
3286
m32c_note_pragma_address (const char *varname, unsigned address)
3287
{
3288
  pragma_entry **slot;
3289
 
3290
  if (!pragma_htab)
3291
    pragma_htab = htab_create_ggc (31, pragma_entry_hash,
3292
                                    pragma_entry_eq, NULL);
3293
 
3294
  slot = (pragma_entry **)
3295
    htab_find_slot_with_hash (pragma_htab, varname,
3296
                              htab_hash_string (varname), INSERT);
3297
 
3298
  if (!*slot)
3299
    {
3300
      *slot = ggc_alloc_pragma_entry ();
3301
      (*slot)->varname = ggc_strdup (varname);
3302
    }
3303
  (*slot)->address = address;
3304
}
3305
 
3306
static bool
3307
m32c_get_pragma_address (const char *varname, unsigned *address)
3308
{
3309
  pragma_entry **slot;
3310
 
3311
  if (!pragma_htab)
3312
    return false;
3313
 
3314
  slot = (pragma_entry **)
3315
    htab_find_slot_with_hash (pragma_htab, varname,
3316
                              htab_hash_string (varname), NO_INSERT);
3317
  if (slot && *slot)
3318
    {
3319
      *address = (*slot)->address;
3320
      return true;
3321
    }
3322
  return false;
3323
}
3324
 
3325
void
3326
m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3327
                            const char *name,
3328
                            int size, int align, int global)
3329
{
3330
  unsigned address;
3331
 
3332
  if (m32c_get_pragma_address (name, &address))
3333
    {
3334
      /* We never output these as global.  */
3335
      assemble_name (stream, name);
3336
      fprintf (stream, " = 0x%04x\n", address);
3337
      return;
3338
    }
3339
  if (!global)
3340
    {
3341
      fprintf (stream, "\t.local\t");
3342
      assemble_name (stream, name);
3343
      fprintf (stream, "\n");
3344
    }
3345
  fprintf (stream, "\t.comm\t");
3346
  assemble_name (stream, name);
3347
  fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3348
}
3349
 
3350
/* Predicates */
3351
 
3352
/* This is a list of legal subregs of hard regs.  */
3353
static const struct {
3354
  unsigned char outer_mode_size;
3355
  unsigned char inner_mode_size;
3356
  unsigned char byte_mask;
3357
  unsigned char legal_when;
3358
  unsigned int regno;
3359
} legal_subregs[] = {
3360
  {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3361
  {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3362
  {1, 2, 0x01, 1, A0_REGNO},
3363
  {1, 2, 0x01, 1, A1_REGNO},
3364
 
3365
  {1, 4, 0x01, 1, A0_REGNO},
3366
  {1, 4, 0x01, 1, A1_REGNO},
3367
 
3368
  {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3369
  {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3370
  {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3371
  {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3372
  {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3373
 
3374
  {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3375
};
3376
 
3377
/* Returns TRUE if OP is a subreg of a hard reg which we don't
3378
   support.  We also bail on MEMs with illegal addresses.  */
3379
bool
3380
m32c_illegal_subreg_p (rtx op)
3381
{
3382
  int offset;
3383
  unsigned int i;
3384
  int src_mode, dest_mode;
3385
 
3386
  if (GET_CODE (op) == MEM
3387
      && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3388
    {
3389
      return true;
3390
    }
3391
 
3392
  if (GET_CODE (op) != SUBREG)
3393
    return false;
3394
 
3395
  dest_mode = GET_MODE (op);
3396
  offset = SUBREG_BYTE (op);
3397
  op = SUBREG_REG (op);
3398
  src_mode = GET_MODE (op);
3399
 
3400
  if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3401
    return false;
3402
  if (GET_CODE (op) != REG)
3403
    return false;
3404
  if (REGNO (op) >= MEM0_REGNO)
3405
    return false;
3406
 
3407
  offset = (1 << offset);
3408
 
3409
  for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3410
    if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3411
        && legal_subregs[i].regno == REGNO (op)
3412
        && legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3413
        && legal_subregs[i].byte_mask & offset)
3414
      {
3415
        switch (legal_subregs[i].legal_when)
3416
          {
3417
          case 1:
3418
            return false;
3419
          case 16:
3420
            if (TARGET_A16)
3421
              return false;
3422
            break;
3423
          case 24:
3424
            if (TARGET_A24)
3425
              return false;
3426
            break;
3427
          }
3428
      }
3429
  return true;
3430
}
3431
 
3432
/* Returns TRUE if we support a move between the first two operands.
3433
   At the moment, we just want to discourage mem to mem moves until
3434
   after reload, because reload has a hard time with our limited
3435
   number of address registers, and we can get into a situation where
3436
   we need three of them when we only have two.  */
3437
bool
3438
m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
3439
{
3440
  rtx op0 = operands[0];
3441
  rtx op1 = operands[1];
3442
 
3443
  if (TARGET_A24)
3444
    return true;
3445
 
3446
#define DEBUG_MOV_OK 0
3447
#if DEBUG_MOV_OK
3448
  fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3449
  debug_rtx (op0);
3450
  debug_rtx (op1);
3451
#endif
3452
 
3453
  if (GET_CODE (op0) == SUBREG)
3454
    op0 = XEXP (op0, 0);
3455
  if (GET_CODE (op1) == SUBREG)
3456
    op1 = XEXP (op1, 0);
3457
 
3458
  if (GET_CODE (op0) == MEM
3459
      && GET_CODE (op1) == MEM
3460
      && ! reload_completed)
3461
    {
3462
#if DEBUG_MOV_OK
3463
      fprintf (stderr, " - no, mem to mem\n");
3464
#endif
3465
      return false;
3466
    }
3467
 
3468
#if DEBUG_MOV_OK
3469
  fprintf (stderr, " - ok\n");
3470
#endif
3471
  return true;
3472
}
3473
 
3474
/* Returns TRUE if two consecutive HImode mov instructions, generated
3475
   for moving an immediate double data to a double data type variable
3476
   location, can be combined into single SImode mov instruction.  */
3477
bool
3478
m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3479
                   enum machine_mode mode ATTRIBUTE_UNUSED)
3480
{
3481
  /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3482
     flags.  */
3483
  return false;
3484
}
3485
 
3486
/* Expanders */
3487
 
3488
/* Subregs are non-orthogonal for us, because our registers are all
3489
   different sizes.  */
3490
static rtx
3491
m32c_subreg (enum machine_mode outer,
3492
             rtx x, enum machine_mode inner, int byte)
3493
{
3494
  int r, nr = -1;
3495
 
3496
  /* Converting MEMs to different types that are the same size, we
3497
     just rewrite them.  */
3498
  if (GET_CODE (x) == SUBREG
3499
      && SUBREG_BYTE (x) == 0
3500
      && GET_CODE (SUBREG_REG (x)) == MEM
3501
      && (GET_MODE_SIZE (GET_MODE (x))
3502
          == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3503
    {
3504
      rtx oldx = x;
3505
      x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3506
      MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3507
    }
3508
 
3509
  /* Push/pop get done as smaller push/pops.  */
3510
  if (GET_CODE (x) == MEM
3511
      && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3512
          || GET_CODE (XEXP (x, 0)) == POST_INC))
3513
    return gen_rtx_MEM (outer, XEXP (x, 0));
3514
  if (GET_CODE (x) == SUBREG
3515
      && GET_CODE (XEXP (x, 0)) == MEM
3516
      && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3517
          || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3518
    return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3519
 
3520
  if (GET_CODE (x) != REG)
3521
    {
3522
      rtx r = simplify_gen_subreg (outer, x, inner, byte);
3523
      if (GET_CODE (r) == SUBREG
3524
          && GET_CODE (x) == MEM
3525
          && MEM_VOLATILE_P (x))
3526
        {
3527
          /* Volatile MEMs don't get simplified, but we need them to
3528
             be.  We are little endian, so the subreg byte is the
3529
             offset.  */
3530
          r = adjust_address_nv (x, outer, byte);
3531
        }
3532
      return r;
3533
    }
3534
 
3535
  r = REGNO (x);
3536
  if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3537
    return simplify_gen_subreg (outer, x, inner, byte);
3538
 
3539
  if (IS_MEM_REGNO (r))
3540
    return simplify_gen_subreg (outer, x, inner, byte);
3541
 
3542
  /* This is where the complexities of our register layout are
3543
     described.  */
3544
  if (byte == 0)
3545
    nr = r;
3546
  else if (outer == HImode)
3547
    {
3548
      if (r == R0_REGNO && byte == 2)
3549
        nr = R2_REGNO;
3550
      else if (r == R0_REGNO && byte == 4)
3551
        nr = R1_REGNO;
3552
      else if (r == R0_REGNO && byte == 6)
3553
        nr = R3_REGNO;
3554
      else if (r == R1_REGNO && byte == 2)
3555
        nr = R3_REGNO;
3556
      else if (r == A0_REGNO && byte == 2)
3557
        nr = A1_REGNO;
3558
    }
3559
  else if (outer == SImode)
3560
    {
3561
      if (r == R0_REGNO && byte == 0)
3562
        nr = R0_REGNO;
3563
      else if (r == R0_REGNO && byte == 4)
3564
        nr = R1_REGNO;
3565
    }
3566
  if (nr == -1)
3567
    {
3568
      fprintf (stderr, "m32c_subreg %s %s %d\n",
3569
               mode_name[outer], mode_name[inner], byte);
3570
      debug_rtx (x);
3571
      gcc_unreachable ();
3572
    }
3573
  return gen_rtx_REG (outer, nr);
3574
}
3575
 
3576
/* Used to emit move instructions.  We split some moves,
3577
   and avoid mem-mem moves.  */
3578
int
3579
m32c_prepare_move (rtx * operands, enum machine_mode mode)
3580
{
3581
  if (far_addr_space_p (operands[0])
3582
      && CONSTANT_P (operands[1]))
3583
    {
3584
      operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3585
    }
3586
  if (TARGET_A16 && mode == PSImode)
3587
    return m32c_split_move (operands, mode, 1);
3588
  if ((GET_CODE (operands[0]) == MEM)
3589
      && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3590
    {
3591
      rtx pmv = XEXP (operands[0], 0);
3592
      rtx dest_reg = XEXP (pmv, 0);
3593
      rtx dest_mod = XEXP (pmv, 1);
3594
 
3595
      emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3596
      operands[0] = gen_rtx_MEM (mode, dest_reg);
3597
    }
3598
  if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3599
    operands[1] = copy_to_mode_reg (mode, operands[1]);
3600
  return 0;
3601
}
3602
 
3603
#define DEBUG_SPLIT 0
3604
 
3605
/* Returns TRUE if the given PSImode move should be split.  We split
3606
   for all r8c/m16c moves, since it doesn't support them, and for
3607
   POP.L as we can only *push* SImode.  */
3608
int
3609
m32c_split_psi_p (rtx * operands)
3610
{
3611
#if DEBUG_SPLIT
3612
  fprintf (stderr, "\nm32c_split_psi_p\n");
3613
  debug_rtx (operands[0]);
3614
  debug_rtx (operands[1]);
3615
#endif
3616
  if (TARGET_A16)
3617
    {
3618
#if DEBUG_SPLIT
3619
      fprintf (stderr, "yes, A16\n");
3620
#endif
3621
      return 1;
3622
    }
3623
  if (GET_CODE (operands[1]) == MEM
3624
      && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3625
    {
3626
#if DEBUG_SPLIT
3627
      fprintf (stderr, "yes, pop.l\n");
3628
#endif
3629
      return 1;
3630
    }
3631
#if DEBUG_SPLIT
3632
  fprintf (stderr, "no, default\n");
3633
#endif
3634
  return 0;
3635
}
3636
 
3637
/* Split the given move.  SPLIT_ALL is 0 if splitting is optional
3638
   (define_expand), 1 if it is not optional (define_insn_and_split),
3639
   and 3 for define_split (alternate api). */
3640
int
3641
m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3642
{
3643
  rtx s[4], d[4];
3644
  int parts, si, di, rev = 0;
3645
  int rv = 0, opi = 2;
3646
  enum machine_mode submode = HImode;
3647
  rtx *ops, local_ops[10];
3648
 
3649
  /* define_split modifies the existing operands, but the other two
3650
     emit new insns.  OPS is where we store the operand pairs, which
3651
     we emit later.  */
3652
  if (split_all == 3)
3653
    ops = operands;
3654
  else
3655
    ops = local_ops;
3656
 
3657
  /* Else HImode.  */
3658
  if (mode == DImode)
3659
    submode = SImode;
3660
 
3661
  /* Before splitting mem-mem moves, force one operand into a
3662
     register.  */
3663
  if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3664
    {
3665
#if DEBUG0
3666
      fprintf (stderr, "force_reg...\n");
3667
      debug_rtx (operands[1]);
3668
#endif
3669
      operands[1] = force_reg (mode, operands[1]);
3670
#if DEBUG0
3671
      debug_rtx (operands[1]);
3672
#endif
3673
    }
3674
 
3675
  parts = 2;
3676
 
3677
#if DEBUG_SPLIT
3678
  fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3679
           split_all);
3680
  debug_rtx (operands[0]);
3681
  debug_rtx (operands[1]);
3682
#endif
3683
 
3684
  /* Note that split_all is not used to select the api after this
3685
     point, so it's safe to set it to 3 even with define_insn.  */
3686
  /* None of the chips can move SI operands to sp-relative addresses,
3687
     so we always split those.  */
3688
  if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3689
    split_all = 3;
3690
 
3691
  if (TARGET_A16
3692
      && (far_addr_space_p (operands[0])
3693
          || far_addr_space_p (operands[1])))
3694
    split_all |= 1;
3695
 
3696
  /* We don't need to split these.  */
3697
  if (TARGET_A24
3698
      && split_all != 3
3699
      && (mode == SImode || mode == PSImode)
3700
      && !(GET_CODE (operands[1]) == MEM
3701
           && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3702
    return 0;
3703
 
3704
  /* First, enumerate the subregs we'll be dealing with.  */
3705
  for (si = 0; si < parts; si++)
3706
    {
3707
      d[si] =
3708
        m32c_subreg (submode, operands[0], mode,
3709
                     si * GET_MODE_SIZE (submode));
3710
      s[si] =
3711
        m32c_subreg (submode, operands[1], mode,
3712
                     si * GET_MODE_SIZE (submode));
3713
    }
3714
 
3715
  /* Split pushes by emitting a sequence of smaller pushes.  */
3716
  if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3717
    {
3718
      for (si = parts - 1; si >= 0; si--)
3719
        {
3720
          ops[opi++] = gen_rtx_MEM (submode,
3721
                                    gen_rtx_PRE_DEC (Pmode,
3722
                                                     gen_rtx_REG (Pmode,
3723
                                                                  SP_REGNO)));
3724
          ops[opi++] = s[si];
3725
        }
3726
 
3727
      rv = 1;
3728
    }
3729
  /* Likewise for pops.  */
3730
  else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3731
    {
3732
      for (di = 0; di < parts; di++)
3733
        {
3734
          ops[opi++] = d[di];
3735
          ops[opi++] = gen_rtx_MEM (submode,
3736
                                    gen_rtx_POST_INC (Pmode,
3737
                                                      gen_rtx_REG (Pmode,
3738
                                                                   SP_REGNO)));
3739
        }
3740
      rv = 1;
3741
    }
3742
  else if (split_all)
3743
    {
3744
      /* if d[di] == s[si] for any di < si, we'll early clobber. */
3745
      for (di = 0; di < parts - 1; di++)
3746
        for (si = di + 1; si < parts; si++)
3747
          if (reg_mentioned_p (d[di], s[si]))
3748
            rev = 1;
3749
 
3750
      if (rev)
3751
        for (si = 0; si < parts; si++)
3752
          {
3753
            ops[opi++] = d[si];
3754
            ops[opi++] = s[si];
3755
          }
3756
      else
3757
        for (si = parts - 1; si >= 0; si--)
3758
          {
3759
            ops[opi++] = d[si];
3760
            ops[opi++] = s[si];
3761
          }
3762
      rv = 1;
3763
    }
3764
  /* Now emit any moves we may have accumulated.  */
3765
  if (rv && split_all != 3)
3766
    {
3767
      int i;
3768
      for (i = 2; i < opi; i += 2)
3769
        emit_move_insn (ops[i], ops[i + 1]);
3770
    }
3771
  return rv;
3772
}
3773
 
3774
/* The m32c has a number of opcodes that act like memcpy, strcmp, and
3775
   the like.  For the R8C they expect one of the addresses to be in
3776
   R1L:An so we need to arrange for that.  Otherwise, it's just a
3777
   matter of picking out the operands we want and emitting the right
3778
   pattern for them.  All these expanders, which correspond to
3779
   patterns in blkmov.md, must return nonzero if they expand the insn,
3780
   or zero if they should FAIL.  */
3781
 
3782
/* This is a memset() opcode.  All operands are implied, so we need to
3783
   arrange for them to be in the right registers.  The opcode wants
3784
   addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3785
   the count (HI), and $2 the value (QI).  */
3786
int
3787
m32c_expand_setmemhi(rtx *operands)
3788
{
3789
  rtx desta, count, val;
3790
  rtx desto, counto;
3791
 
3792
  desta = XEXP (operands[0], 0);
3793
  count = operands[1];
3794
  val = operands[2];
3795
 
3796
  desto = gen_reg_rtx (Pmode);
3797
  counto = gen_reg_rtx (HImode);
3798
 
3799
  if (GET_CODE (desta) != REG
3800
      || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3801
    desta = copy_to_mode_reg (Pmode, desta);
3802
 
3803
  /* This looks like an arbitrary restriction, but this is by far the
3804
     most common case.  For counts 8..14 this actually results in
3805
     smaller code with no speed penalty because the half-sized
3806
     constant can be loaded with a shorter opcode.  */
3807
  if (GET_CODE (count) == CONST_INT
3808
      && GET_CODE (val) == CONST_INT
3809
      && ! (INTVAL (count) & 1)
3810
      && (INTVAL (count) > 1)
3811
      && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3812
    {
3813
      unsigned v = INTVAL (val) & 0xff;
3814
      v = v | (v << 8);
3815
      count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3816
      val = copy_to_mode_reg (HImode, GEN_INT (v));
3817
      if (TARGET_A16)
3818
        emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3819
      else
3820
        emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3821
      return 1;
3822
    }
3823
 
3824
  /* This is the generalized memset() case.  */
3825
  if (GET_CODE (val) != REG
3826
      || REGNO (val) < FIRST_PSEUDO_REGISTER)
3827
    val = copy_to_mode_reg (QImode, val);
3828
 
3829
  if (GET_CODE (count) != REG
3830
      || REGNO (count) < FIRST_PSEUDO_REGISTER)
3831
    count = copy_to_mode_reg (HImode, count);
3832
 
3833
  if (TARGET_A16)
3834
    emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3835
  else
3836
    emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3837
 
3838
  return 1;
3839
}
3840
 
3841
/* This is a memcpy() opcode.  All operands are implied, so we need to
3842
   arrange for them to be in the right registers.  The opcode wants
3843
   addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3844
   is the source (MEM:BLK), and $2 the count (HI).  */
3845
int
3846
m32c_expand_movmemhi(rtx *operands)
3847
{
3848
  rtx desta, srca, count;
3849
  rtx desto, srco, counto;
3850
 
3851
  desta = XEXP (operands[0], 0);
3852
  srca = XEXP (operands[1], 0);
3853
  count = operands[2];
3854
 
3855
  desto = gen_reg_rtx (Pmode);
3856
  srco = gen_reg_rtx (Pmode);
3857
  counto = gen_reg_rtx (HImode);
3858
 
3859
  if (GET_CODE (desta) != REG
3860
      || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3861
    desta = copy_to_mode_reg (Pmode, desta);
3862
 
3863
  if (GET_CODE (srca) != REG
3864
      || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3865
    srca = copy_to_mode_reg (Pmode, srca);
3866
 
3867
  /* Similar to setmem, but we don't need to check the value.  */
3868
  if (GET_CODE (count) == CONST_INT
3869
      && ! (INTVAL (count) & 1)
3870
      && (INTVAL (count) > 1))
3871
    {
3872
      count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3873
      if (TARGET_A16)
3874
        emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3875
      else
3876
        emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3877
      return 1;
3878
    }
3879
 
3880
  /* This is the generalized memset() case.  */
3881
  if (GET_CODE (count) != REG
3882
      || REGNO (count) < FIRST_PSEUDO_REGISTER)
3883
    count = copy_to_mode_reg (HImode, count);
3884
 
3885
  if (TARGET_A16)
3886
    emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3887
  else
3888
    emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3889
 
3890
  return 1;
3891
}
3892
 
3893
/* This is a stpcpy() opcode.  $0 is the destination (MEM:BLK) after
3894
   the copy, which should point to the NUL at the end of the string,
3895
   $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3896
   Since our opcode leaves the destination pointing *after* the NUL,
3897
   we must emit an adjustment.  */
3898
int
3899
m32c_expand_movstr(rtx *operands)
3900
{
3901
  rtx desta, srca;
3902
  rtx desto, srco;
3903
 
3904
  desta = XEXP (operands[1], 0);
3905
  srca = XEXP (operands[2], 0);
3906
 
3907
  desto = gen_reg_rtx (Pmode);
3908
  srco = gen_reg_rtx (Pmode);
3909
 
3910
  if (GET_CODE (desta) != REG
3911
      || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3912
    desta = copy_to_mode_reg (Pmode, desta);
3913
 
3914
  if (GET_CODE (srca) != REG
3915
      || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3916
    srca = copy_to_mode_reg (Pmode, srca);
3917
 
3918
  emit_insn (gen_movstr_op (desto, srco, desta, srca));
3919
  /* desto ends up being a1, which allows this type of add through MOVA.  */
3920
  emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3921
 
3922
  return 1;
3923
}
3924
 
3925
/* This is a strcmp() opcode.  $0 is the destination (HI) which holds
3926
   <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3927
   $2 is the other (MEM:BLK).  We must do the comparison, and then
3928
   convert the flags to a signed integer result.  */
3929
int
3930
m32c_expand_cmpstr(rtx *operands)
3931
{
3932
  rtx src1a, src2a;
3933
 
3934
  src1a = XEXP (operands[1], 0);
3935
  src2a = XEXP (operands[2], 0);
3936
 
3937
  if (GET_CODE (src1a) != REG
3938
      || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3939
    src1a = copy_to_mode_reg (Pmode, src1a);
3940
 
3941
  if (GET_CODE (src2a) != REG
3942
      || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3943
    src2a = copy_to_mode_reg (Pmode, src2a);
3944
 
3945
  emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3946
  emit_insn (gen_cond_to_int (operands[0]));
3947
 
3948
  return 1;
3949
}
3950
 
3951
 
3952
typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3953
 
3954
static shift_gen_func
3955
shift_gen_func_for (int mode, int code)
3956
{
3957
#define GFF(m,c,f) if (mode == m && code == c) return f
3958
  GFF(QImode,  ASHIFT,   gen_ashlqi3_i);
3959
  GFF(QImode,  ASHIFTRT, gen_ashrqi3_i);
3960
  GFF(QImode,  LSHIFTRT, gen_lshrqi3_i);
3961
  GFF(HImode,  ASHIFT,   gen_ashlhi3_i);
3962
  GFF(HImode,  ASHIFTRT, gen_ashrhi3_i);
3963
  GFF(HImode,  LSHIFTRT, gen_lshrhi3_i);
3964
  GFF(PSImode, ASHIFT,   gen_ashlpsi3_i);
3965
  GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3966
  GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3967
  GFF(SImode,  ASHIFT,   TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3968
  GFF(SImode,  ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3969
  GFF(SImode,  LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3970
#undef GFF
3971
  gcc_unreachable ();
3972
}
3973
 
3974
/* The m32c only has one shift, but it takes a signed count.  GCC
3975
   doesn't want this, so we fake it by negating any shift count when
3976
   we're pretending to shift the other way.  Also, the shift count is
3977
   limited to -8..8.  It's slightly better to use two shifts for 9..15
3978
   than to load the count into r1h, so we do that too.  */
3979
int
3980
m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3981
{
3982
  enum machine_mode mode = GET_MODE (operands[0]);
3983
  shift_gen_func func = shift_gen_func_for (mode, shift_code);
3984
  rtx temp;
3985
 
3986
  if (GET_CODE (operands[2]) == CONST_INT)
3987
    {
3988
      int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3989
      int count = INTVAL (operands[2]) * scale;
3990
 
3991
      while (count > maxc)
3992
        {
3993
          temp = gen_reg_rtx (mode);
3994
          emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3995
          operands[1] = temp;
3996
          count -= maxc;
3997
        }
3998
      while (count < -maxc)
3999
        {
4000
          temp = gen_reg_rtx (mode);
4001
          emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
4002
          operands[1] = temp;
4003
          count += maxc;
4004
        }
4005
      emit_insn (func (operands[0], operands[1], GEN_INT (count)));
4006
      return 1;
4007
    }
4008
 
4009
  temp = gen_reg_rtx (QImode);
4010
  if (scale < 0)
4011
    /* The pattern has a NEG that corresponds to this. */
4012
    emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
4013
  else if (TARGET_A16 && mode == SImode)
4014
    /* We do this because the code below may modify this, we don't
4015
       want to modify the origin of this value.  */
4016
    emit_move_insn (temp, operands[2]);
4017
  else
4018
    /* We'll only use it for the shift, no point emitting a move.  */
4019
    temp = operands[2];
4020
 
4021
  if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
4022
    {
4023
      /* The m16c has a limit of -16..16 for SI shifts, even when the
4024
         shift count is in a register.  Since there are so many targets
4025
         of these shifts, it's better to expand the RTL here than to
4026
         call a helper function.
4027
 
4028
         The resulting code looks something like this:
4029
 
4030
                cmp.b   r1h,-16
4031
                jge.b   1f
4032
                shl.l   -16,dest
4033
                add.b   r1h,16
4034
        1f:     cmp.b   r1h,16
4035
                jle.b   1f
4036
                shl.l   16,dest
4037
                sub.b   r1h,16
4038
        1f:     shl.l   r1h,dest
4039
 
4040
         We take advantage of the fact that "negative" shifts are
4041
         undefined to skip one of the comparisons.  */
4042
 
4043
      rtx count;
4044
      rtx label, insn, tempvar;
4045
 
4046
      emit_move_insn (operands[0], operands[1]);
4047
 
4048
      count = temp;
4049
      label = gen_label_rtx ();
4050
      LABEL_NUSES (label) ++;
4051
 
4052
      tempvar = gen_reg_rtx (mode);
4053
 
4054
      if (shift_code == ASHIFT)
4055
        {
4056
          /* This is a left shift.  We only need check positive counts.  */
4057
          emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
4058
                                          count, GEN_INT (16), label));
4059
          emit_insn (func (tempvar, operands[0], GEN_INT (8)));
4060
          emit_insn (func (operands[0], tempvar, GEN_INT (8)));
4061
          insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
4062
          emit_label_after (label, insn);
4063
        }
4064
      else
4065
        {
4066
          /* This is a right shift.  We only need check negative counts.  */
4067
          emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
4068
                                          count, GEN_INT (-16), label));
4069
          emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
4070
          emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
4071
          insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
4072
          emit_label_after (label, insn);
4073
        }
4074
      operands[1] = operands[0];
4075
      emit_insn (func (operands[0], operands[0], count));
4076
      return 1;
4077
    }
4078
 
4079
  operands[2] = temp;
4080
  return 0;
4081
}
4082
 
4083
/* The m32c has a limited range of operations that work on PSImode
4084
   values; we have to expand to SI, do the math, and truncate back to
4085
   PSI.  Yes, this is expensive, but hopefully gcc will learn to avoid
4086
   those cases.  */
4087
void
4088
m32c_expand_neg_mulpsi3 (rtx * operands)
4089
{
4090
  /* operands: a = b * i */
4091
  rtx temp1; /* b as SI */
4092
  rtx scale /* i as SI */;
4093
  rtx temp2; /* a*b as SI */
4094
 
4095
  temp1 = gen_reg_rtx (SImode);
4096
  temp2 = gen_reg_rtx (SImode);
4097
  if (GET_CODE (operands[2]) != CONST_INT)
4098
    {
4099
      scale = gen_reg_rtx (SImode);
4100
      emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
4101
    }
4102
  else
4103
    scale = copy_to_mode_reg (SImode, operands[2]);
4104
 
4105
  emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
4106
  temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
4107
  emit_insn (gen_truncsipsi2 (operands[0], temp2));
4108
}
4109
 
4110
/* Pattern Output Functions */
4111
 
4112
int
4113
m32c_expand_movcc (rtx *operands)
4114
{
4115
  rtx rel = operands[1];
4116
 
4117
  if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
4118
    return 1;
4119
  if (GET_CODE (operands[2]) != CONST_INT
4120
      || GET_CODE (operands[3]) != CONST_INT)
4121
    return 1;
4122
  if (GET_CODE (rel) == NE)
4123
    {
4124
      rtx tmp = operands[2];
4125
      operands[2] = operands[3];
4126
      operands[3] = tmp;
4127
      rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
4128
    }
4129
 
4130
  emit_move_insn (operands[0],
4131
                  gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
4132
                                        rel,
4133
                                        operands[2],
4134
                                        operands[3]));
4135
  return 0;
4136
}
4137
 
4138
/* Used for the "insv" pattern.  Return nonzero to fail, else done.  */
4139
int
4140
m32c_expand_insv (rtx *operands)
4141
{
4142
  rtx op0, src0, p;
4143
  int mask;
4144
 
4145
  if (INTVAL (operands[1]) != 1)
4146
    return 1;
4147
 
4148
  /* Our insv opcode (bset, bclr) can only insert a one-bit constant.  */
4149
  if (GET_CODE (operands[3]) != CONST_INT)
4150
    return 1;
4151
  if (INTVAL (operands[3]) != 0
4152
      && INTVAL (operands[3]) != 1
4153
      && INTVAL (operands[3]) != -1)
4154
    return 1;
4155
 
4156
  mask = 1 << INTVAL (operands[2]);
4157
 
4158
  op0 = operands[0];
4159
  if (GET_CODE (op0) == SUBREG
4160
      && SUBREG_BYTE (op0) == 0)
4161
    {
4162
      rtx sub = SUBREG_REG (op0);
4163
      if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
4164
        op0 = sub;
4165
    }
4166
 
4167
  if (!can_create_pseudo_p ()
4168
      || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
4169
    src0 = op0;
4170
  else
4171
    {
4172
      src0 = gen_reg_rtx (GET_MODE (op0));
4173
      emit_move_insn (src0, op0);
4174
    }
4175
 
4176
  if (GET_MODE (op0) == HImode
4177
      && INTVAL (operands[2]) >= 8
4178
      && GET_CODE (op0) == MEM)
4179
    {
4180
      /* We are little endian.  */
4181
      rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
4182
      MEM_COPY_ATTRIBUTES (new_mem, op0);
4183
      mask >>= 8;
4184
    }
4185
 
4186
  /* First, we generate a mask with the correct polarity.  If we are
4187
     storing a zero, we want an AND mask, so invert it.  */
4188
  if (INTVAL (operands[3]) == 0)
4189
    {
4190
      /* Storing a zero, use an AND mask */
4191
      if (GET_MODE (op0) == HImode)
4192
        mask ^= 0xffff;
4193
      else
4194
        mask ^= 0xff;
4195
    }
4196
  /* Now we need to properly sign-extend the mask in case we need to
4197
     fall back to an AND or OR opcode.  */
4198
  if (GET_MODE (op0) == HImode)
4199
    {
4200
      if (mask & 0x8000)
4201
        mask -= 0x10000;
4202
    }
4203
  else
4204
    {
4205
      if (mask & 0x80)
4206
        mask -= 0x100;
4207
    }
4208
 
4209
  switch (  (INTVAL (operands[3]) ? 4 : 0)
4210
          + ((GET_MODE (op0) == HImode) ? 2 : 0)
4211
          + (TARGET_A24 ? 1 : 0))
4212
    {
4213
    case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
4214
    case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
4215
    case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
4216
    case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
4217
    case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
4218
    case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
4219
    case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
4220
    case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
4221
    default: p = NULL_RTX; break; /* Not reached, but silences a warning.  */
4222
    }
4223
 
4224
  emit_insn (p);
4225
  return 0;
4226
}
4227
 
4228
const char *
4229
m32c_scc_pattern(rtx *operands, RTX_CODE code)
4230
{
4231
  static char buf[30];
4232
  if (GET_CODE (operands[0]) == REG
4233
      && REGNO (operands[0]) == R0_REGNO)
4234
    {
4235
      if (code == EQ)
4236
        return "stzx\t#1,#0,r0l";
4237
      if (code == NE)
4238
        return "stzx\t#0,#1,r0l";
4239
    }
4240
  sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4241
  return buf;
4242
}
4243
 
4244
/* Encode symbol attributes of a SYMBOL_REF into its
4245
   SYMBOL_REF_FLAGS. */
4246
static void
4247
m32c_encode_section_info (tree decl, rtx rtl, int first)
4248
{
4249
  int extra_flags = 0;
4250
 
4251
  default_encode_section_info (decl, rtl, first);
4252
  if (TREE_CODE (decl) == FUNCTION_DECL
4253
      && m32c_special_page_vector_p (decl))
4254
 
4255
    extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4256
 
4257
  if (extra_flags)
4258
    SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4259
}
4260
 
4261
/* Returns TRUE if the current function is a leaf, and thus we can
4262
   determine which registers an interrupt function really needs to
4263
   save.  The logic below is mostly about finding the insn sequence
4264
   that's the function, versus any sequence that might be open for the
4265
   current insn.  */
4266
static int
4267
m32c_leaf_function_p (void)
4268
{
4269
  rtx saved_first, saved_last;
4270
  struct sequence_stack *seq;
4271
  int rv;
4272
 
4273
  saved_first = crtl->emit.x_first_insn;
4274
  saved_last = crtl->emit.x_last_insn;
4275
  for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
4276
    ;
4277
  if (seq)
4278
    {
4279
      crtl->emit.x_first_insn = seq->first;
4280
      crtl->emit.x_last_insn = seq->last;
4281
    }
4282
 
4283
  rv = leaf_function_p ();
4284
 
4285
  crtl->emit.x_first_insn = saved_first;
4286
  crtl->emit.x_last_insn = saved_last;
4287
  return rv;
4288
}
4289
 
4290
/* Returns TRUE if the current function needs to use the ENTER/EXIT
4291
   opcodes.  If the function doesn't need the frame base or stack
4292
   pointer, it can use the simpler RTS opcode.  */
4293
static bool
4294
m32c_function_needs_enter (void)
4295
{
4296
  rtx insn;
4297
  struct sequence_stack *seq;
4298
  rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4299
  rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4300
 
4301
  insn = get_insns ();
4302
  for (seq = crtl->emit.sequence_stack;
4303
       seq;
4304
       insn = seq->first, seq = seq->next);
4305
 
4306
  while (insn)
4307
    {
4308
      if (reg_mentioned_p (sp, insn))
4309
        return true;
4310
      if (reg_mentioned_p (fb, insn))
4311
        return true;
4312
      insn = NEXT_INSN (insn);
4313
    }
4314
  return false;
4315
}
4316
 
4317
/* Mark all the subexpressions of the PARALLEL rtx PAR as
4318
   frame-related.  Return PAR.
4319
 
4320
   dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4321
   PARALLEL rtx other than the first if they do not have the
4322
   FRAME_RELATED flag set on them.  So this function is handy for
4323
   marking up 'enter' instructions.  */
4324
static rtx
4325
m32c_all_frame_related (rtx par)
4326
{
4327
  int len = XVECLEN (par, 0);
4328
  int i;
4329
 
4330
  for (i = 0; i < len; i++)
4331
    F (XVECEXP (par, 0, i));
4332
 
4333
  return par;
4334
}
4335
 
4336
/* Emits the prologue.  See the frame layout comment earlier in this
4337
   file.  We can reserve up to 256 bytes with the ENTER opcode, beyond
4338
   that we manually update sp.  */
4339
void
4340
m32c_emit_prologue (void)
4341
{
4342
  int frame_size, extra_frame_size = 0, reg_save_size;
4343
  int complex_prologue = 0;
4344
 
4345
  cfun->machine->is_leaf = m32c_leaf_function_p ();
4346
  if (interrupt_p (cfun->decl))
4347
    {
4348
      cfun->machine->is_interrupt = 1;
4349
      complex_prologue = 1;
4350
    }
4351
  else if (bank_switch_p (cfun->decl))
4352
    warning (OPT_Wattributes,
4353
             "%<bank_switch%> has no effect on non-interrupt functions");
4354
 
4355
  reg_save_size = m32c_pushm_popm (PP_justcount);
4356
 
4357
  if (interrupt_p (cfun->decl))
4358
    {
4359
      if (bank_switch_p (cfun->decl))
4360
        emit_insn (gen_fset_b ());
4361
      else if (cfun->machine->intr_pushm)
4362
        emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4363
    }
4364
 
4365
  frame_size =
4366
    m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4367
  if (frame_size == 0
4368
      && !m32c_function_needs_enter ())
4369
    cfun->machine->use_rts = 1;
4370
 
4371
  if (frame_size > 254)
4372
    {
4373
      extra_frame_size = frame_size - 254;
4374
      frame_size = 254;
4375
    }
4376
  if (cfun->machine->use_rts == 0)
4377
    F (emit_insn (m32c_all_frame_related
4378
                  (TARGET_A16
4379
                   ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4380
                   : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4381
 
4382
  if (extra_frame_size)
4383
    {
4384
      complex_prologue = 1;
4385
      if (TARGET_A16)
4386
        F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4387
                                  gen_rtx_REG (HImode, SP_REGNO),
4388
                                  GEN_INT (-extra_frame_size))));
4389
      else
4390
        F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4391
                                   gen_rtx_REG (PSImode, SP_REGNO),
4392
                                   GEN_INT (-extra_frame_size))));
4393
    }
4394
 
4395
  complex_prologue += m32c_pushm_popm (PP_pushm);
4396
 
4397
  /* This just emits a comment into the .s file for debugging.  */
4398
  if (complex_prologue)
4399
    emit_insn (gen_prologue_end ());
4400
}
4401
 
4402
/* Likewise, for the epilogue.  The only exception is that, for
4403
   interrupts, we must manually unwind the frame as the REIT opcode
4404
   doesn't do that.  */
4405
void
4406
m32c_emit_epilogue (void)
4407
{
4408
  int popm_count = m32c_pushm_popm (PP_justcount);
4409
 
4410
  /* This just emits a comment into the .s file for debugging.  */
4411
  if (popm_count > 0 || cfun->machine->is_interrupt)
4412
    emit_insn (gen_epilogue_start ());
4413
 
4414
  if (popm_count > 0)
4415
    m32c_pushm_popm (PP_popm);
4416
 
4417
  if (cfun->machine->is_interrupt)
4418
    {
4419
      enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4420
 
4421
      /* REIT clears B flag and restores $fp for us, but we still
4422
         have to fix up the stack.  USE_RTS just means we didn't
4423
         emit ENTER.  */
4424
      if (!cfun->machine->use_rts)
4425
        {
4426
          emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4427
                          gen_rtx_REG (spmode, FP_REGNO));
4428
          emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4429
                          gen_rtx_REG (spmode, A0_REGNO));
4430
          /* We can't just add this to the POPM because it would be in
4431
             the wrong order, and wouldn't fix the stack if we're bank
4432
             switching.  */
4433
          if (TARGET_A16)
4434
            emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4435
          else
4436
            emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4437
        }
4438
      if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4439
        emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4440
 
4441
      /* The FREIT (Fast REturn from InTerrupt) instruction should be
4442
         generated only for M32C/M32CM targets (generate the REIT
4443
         instruction otherwise).  */
4444
      if (fast_interrupt_p (cfun->decl))
4445
        {
4446
          /* Check if fast_attribute is set for M32C or M32CM.  */
4447
          if (TARGET_A24)
4448
            {
4449
              emit_jump_insn (gen_epilogue_freit ());
4450
            }
4451
          /* If fast_interrupt attribute is set for an R8C or M16C
4452
             target ignore this attribute and generated REIT
4453
             instruction.  */
4454
          else
4455
            {
4456
              warning (OPT_Wattributes,
4457
                       "%<fast_interrupt%> attribute directive ignored");
4458
              emit_jump_insn (gen_epilogue_reit_16 ());
4459
            }
4460
        }
4461
      else if (TARGET_A16)
4462
        emit_jump_insn (gen_epilogue_reit_16 ());
4463
      else
4464
        emit_jump_insn (gen_epilogue_reit_24 ());
4465
    }
4466
  else if (cfun->machine->use_rts)
4467
    emit_jump_insn (gen_epilogue_rts ());
4468
  else if (TARGET_A16)
4469
    emit_jump_insn (gen_epilogue_exitd_16 ());
4470
  else
4471
    emit_jump_insn (gen_epilogue_exitd_24 ());
4472
}
4473
 
4474
void
4475
m32c_emit_eh_epilogue (rtx ret_addr)
4476
{
4477
  /* R0[R2] has the stack adjustment.  R1[R3] has the address to
4478
     return to.  We have to fudge the stack, pop everything, pop SP
4479
     (fudged), and return (fudged).  This is actually easier to do in
4480
     assembler, so punt to libgcc.  */
4481
  emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4482
  /*  emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4483
}
4484
 
4485
/* Indicate which flags must be properly set for a given conditional.  */
4486
static int
4487
flags_needed_for_conditional (rtx cond)
4488
{
4489
  switch (GET_CODE (cond))
4490
    {
4491
    case LE:
4492
    case GT:
4493
      return FLAGS_OSZ;
4494
    case LEU:
4495
    case GTU:
4496
      return FLAGS_ZC;
4497
    case LT:
4498
    case GE:
4499
      return FLAGS_OS;
4500
    case LTU:
4501
    case GEU:
4502
      return FLAGS_C;
4503
    case EQ:
4504
    case NE:
4505
      return FLAGS_Z;
4506
    default:
4507
      return FLAGS_N;
4508
    }
4509
}
4510
 
4511
#define DEBUG_CMP 0
4512
 
4513
/* Returns true if a compare insn is redundant because it would only
4514
   set flags that are already set correctly.  */
4515
static bool
4516
m32c_compare_redundant (rtx cmp, rtx *operands)
4517
{
4518
  int flags_needed;
4519
  int pflags;
4520
  rtx prev, pp, next;
4521
  rtx op0, op1;
4522
#if DEBUG_CMP
4523
  int prev_icode, i;
4524
#endif
4525
 
4526
  op0 = operands[0];
4527
  op1 = operands[1];
4528
 
4529
#if DEBUG_CMP
4530
  fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4531
  debug_rtx(cmp);
4532
  for (i=0; i<2; i++)
4533
    {
4534
      fprintf(stderr, "operands[%d] = ", i);
4535
      debug_rtx(operands[i]);
4536
    }
4537
#endif
4538
 
4539
  next = next_nonnote_insn (cmp);
4540
  if (!next || !INSN_P (next))
4541
    {
4542
#if DEBUG_CMP
4543
      fprintf(stderr, "compare not followed by insn\n");
4544
      debug_rtx(next);
4545
#endif
4546
      return false;
4547
    }
4548
  if (GET_CODE (PATTERN (next)) == SET
4549
      && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4550
    {
4551
      next = XEXP (XEXP (PATTERN (next), 1), 0);
4552
    }
4553
  else if (GET_CODE (PATTERN (next)) == SET)
4554
    {
4555
      /* If this is a conditional, flags_needed will be something
4556
         other than FLAGS_N, which we test below.  */
4557
      next = XEXP (PATTERN (next), 1);
4558
    }
4559
  else
4560
    {
4561
#if DEBUG_CMP
4562
      fprintf(stderr, "compare not followed by conditional\n");
4563
      debug_rtx(next);
4564
#endif
4565
      return false;
4566
    }
4567
#if DEBUG_CMP
4568
  fprintf(stderr, "conditional is: ");
4569
  debug_rtx(next);
4570
#endif
4571
 
4572
  flags_needed = flags_needed_for_conditional (next);
4573
  if (flags_needed == FLAGS_N)
4574
    {
4575
#if DEBUG_CMP
4576
      fprintf(stderr, "compare not followed by conditional\n");
4577
      debug_rtx(next);
4578
#endif
4579
      return false;
4580
    }
4581
 
4582
  /* Compare doesn't set overflow and carry the same way that
4583
     arithmetic instructions do, so we can't replace those.  */
4584
  if (flags_needed & FLAGS_OC)
4585
    return false;
4586
 
4587
  prev = cmp;
4588
  do {
4589
    prev = prev_nonnote_insn (prev);
4590
    if (!prev)
4591
      {
4592
#if DEBUG_CMP
4593
        fprintf(stderr, "No previous insn.\n");
4594
#endif
4595
        return false;
4596
      }
4597
    if (!INSN_P (prev))
4598
      {
4599
#if DEBUG_CMP
4600
        fprintf(stderr, "Previous insn is a non-insn.\n");
4601
#endif
4602
        return false;
4603
      }
4604
    pp = PATTERN (prev);
4605
    if (GET_CODE (pp) != SET)
4606
      {
4607
#if DEBUG_CMP
4608
        fprintf(stderr, "Previous insn is not a SET.\n");
4609
#endif
4610
        return false;
4611
      }
4612
    pflags = get_attr_flags (prev);
4613
 
4614
    /* Looking up attributes of previous insns corrupted the recog
4615
       tables.  */
4616
    INSN_UID (cmp) = -1;
4617
    recog (PATTERN (cmp), cmp, 0);
4618
 
4619
    if (pflags == FLAGS_N
4620
        && reg_mentioned_p (op0, pp))
4621
      {
4622
#if DEBUG_CMP
4623
        fprintf(stderr, "intermediate non-flags insn uses op:\n");
4624
        debug_rtx(prev);
4625
#endif
4626
        return false;
4627
      }
4628
 
4629
    /* Check for comparisons against memory - between volatiles and
4630
       aliases, we just can't risk this one.  */
4631
    if (GET_CODE (operands[0]) == MEM
4632
        || GET_CODE (operands[0]) == MEM)
4633
      {
4634
#if DEBUG_CMP
4635
        fprintf(stderr, "comparisons with memory:\n");
4636
        debug_rtx(prev);
4637
#endif
4638
        return false;
4639
      }
4640
 
4641
    /* Check for PREV changing a register that's used to compute a
4642
       value in CMP, even if it doesn't otherwise change flags.  */
4643
    if (GET_CODE (operands[0]) == REG
4644
        && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4645
      {
4646
#if DEBUG_CMP
4647
        fprintf(stderr, "sub-value affected, op0:\n");
4648
        debug_rtx(prev);
4649
#endif
4650
        return false;
4651
      }
4652
    if (GET_CODE (operands[1]) == REG
4653
        && rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4654
      {
4655
#if DEBUG_CMP
4656
        fprintf(stderr, "sub-value affected, op1:\n");
4657
        debug_rtx(prev);
4658
#endif
4659
        return false;
4660
      }
4661
 
4662
  } while (pflags == FLAGS_N);
4663
#if DEBUG_CMP
4664
  fprintf(stderr, "previous flag-setting insn:\n");
4665
  debug_rtx(prev);
4666
  debug_rtx(pp);
4667
#endif
4668
 
4669
  if (GET_CODE (pp) == SET
4670
      && GET_CODE (XEXP (pp, 0)) == REG
4671
      && REGNO (XEXP (pp, 0)) == FLG_REGNO
4672
      && GET_CODE (XEXP (pp, 1)) == COMPARE)
4673
    {
4674
      /* Adjacent cbranches must have the same operands to be
4675
         redundant.  */
4676
      rtx pop0 = XEXP (XEXP (pp, 1), 0);
4677
      rtx pop1 = XEXP (XEXP (pp, 1), 1);
4678
#if DEBUG_CMP
4679
      fprintf(stderr, "adjacent cbranches\n");
4680
      debug_rtx(pop0);
4681
      debug_rtx(pop1);
4682
#endif
4683
      if (rtx_equal_p (op0, pop0)
4684
          && rtx_equal_p (op1, pop1))
4685
        return true;
4686
#if DEBUG_CMP
4687
      fprintf(stderr, "prev cmp not same\n");
4688
#endif
4689
      return false;
4690
    }
4691
 
4692
  /* Else the previous insn must be a SET, with either the source or
4693
     dest equal to operands[0], and operands[1] must be zero.  */
4694
 
4695
  if (!rtx_equal_p (op1, const0_rtx))
4696
    {
4697
#if DEBUG_CMP
4698
      fprintf(stderr, "operands[1] not const0_rtx\n");
4699
#endif
4700
      return false;
4701
    }
4702
  if (GET_CODE (pp) != SET)
4703
    {
4704
#if DEBUG_CMP
4705
      fprintf (stderr, "pp not set\n");
4706
#endif
4707
      return false;
4708
    }
4709
  if (!rtx_equal_p (op0, SET_SRC (pp))
4710
      && !rtx_equal_p (op0, SET_DEST (pp)))
4711
    {
4712
#if DEBUG_CMP
4713
      fprintf(stderr, "operands[0] not found in set\n");
4714
#endif
4715
      return false;
4716
    }
4717
 
4718
#if DEBUG_CMP
4719
  fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4720
#endif
4721
  if ((pflags & flags_needed) == flags_needed)
4722
    return true;
4723
 
4724
  return false;
4725
}
4726
 
4727
/* Return the pattern for a compare.  This will be commented out if
4728
   the compare is redundant, else a normal pattern is returned.  Thus,
4729
   the assembler output says where the compare would have been.  */
4730
char *
4731
m32c_output_compare (rtx insn, rtx *operands)
4732
{
4733
  static char templ[] = ";cmp.b\t%1,%0";
4734
  /*                             ^ 5  */
4735
 
4736
  templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4737
  if (m32c_compare_redundant (insn, operands))
4738
    {
4739
#if DEBUG_CMP
4740
      fprintf(stderr, "cbranch: cmp not needed\n");
4741
#endif
4742
      return templ;
4743
    }
4744
 
4745
#if DEBUG_CMP
4746
  fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4747
#endif
4748
  return templ + 1;
4749
}
4750
 
4751
#undef TARGET_ENCODE_SECTION_INFO
4752
#define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4753
 
4754
/* If the frame pointer isn't used, we detect it manually.  But the
4755
   stack pointer doesn't have as flexible addressing as the frame
4756
   pointer, so we always assume we have it.  */
4757
 
4758
#undef TARGET_FRAME_POINTER_REQUIRED
4759
#define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4760
 
4761
/* The Global `targetm' Variable. */
4762
 
4763
struct gcc_target targetm = TARGET_INITIALIZER;
4764
 
4765
#include "gt-m32c.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.