OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [tags/] [gnu-src/] [gcc-4.5.1/] [gcc-4.5.1-or32-1.0rc2/] [gcc/] [config/] [mcore/] [mcore.c] - Blame information for rev 437

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 282 jeremybenn
/* Output routines for Motorola MCore processor
2
   Copyright (C) 1993, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3
   2009 Free Software Foundation, Inc.
4
 
5
   This file is part of GCC.
6
 
7
   GCC is free software; you can redistribute it and/or modify it
8
   under the terms of the GNU General Public License as published
9
   by the Free Software Foundation; either version 3, or (at your
10
   option) any later version.
11
 
12
   GCC is distributed in the hope that it will be useful, but WITHOUT
13
   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14
   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15
   License for more details.
16
 
17
   You should have received a copy of the GNU General Public License
18
   along with GCC; see the file COPYING3.  If not see
19
   <http://www.gnu.org/licenses/>.  */
20
 
21
#include "config.h"
22
#include "system.h"
23
#include "coretypes.h"
24
#include "tm.h"
25
#include "rtl.h"
26
#include "tree.h"
27
#include "tm_p.h"
28
#include "assert.h"
29
#include "mcore.h"
30
#include "regs.h"
31
#include "hard-reg-set.h"
32
#include "real.h"
33
#include "insn-config.h"
34
#include "conditions.h"
35
#include "output.h"
36
#include "insn-attr.h"
37
#include "flags.h"
38
#include "obstack.h"
39
#include "expr.h"
40
#include "reload.h"
41
#include "recog.h"
42
#include "function.h"
43
#include "ggc.h"
44
#include "toplev.h"
45
#include "target.h"
46
#include "target-def.h"
47
#include "df.h"
48
 
49
/* Maximum size we are allowed to grow the stack in a single operation.
50
   If we want more, we must do it in increments of at most this size.
51
   If this value is 0, we don't check at all.  */
52
int mcore_stack_increment = STACK_UNITS_MAXSTEP;
53
 
54
/* For dumping information about frame sizes.  */
55
char * mcore_current_function_name = 0;
56
long   mcore_current_compilation_timestamp = 0;
57
 
58
/* Global variables for machine-dependent things.  */
59
 
60
/* Provides the class number of the smallest class containing
61
   reg number.  */
62
const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
63
{
64
  GENERAL_REGS, ONLYR1_REGS,  LRW_REGS,     LRW_REGS,
65
  LRW_REGS,     LRW_REGS,     LRW_REGS,     LRW_REGS,
66
  LRW_REGS,     LRW_REGS,     LRW_REGS,     LRW_REGS,
67
  LRW_REGS,     LRW_REGS,     LRW_REGS,     GENERAL_REGS,
68
  GENERAL_REGS, C_REGS,       NO_REGS,      NO_REGS,
69
};
70
 
71
/* Provide reg_class from a letter such as appears in the machine
72
   description.  */
73
const enum reg_class reg_class_from_letter[] =
74
{
75
  /* a */ LRW_REGS, /* b */ ONLYR1_REGS, /* c */ C_REGS,  /* d */ NO_REGS,
76
  /* e */ NO_REGS, /* f */ NO_REGS, /* g */ NO_REGS, /* h */ NO_REGS,
77
  /* i */ NO_REGS, /* j */ NO_REGS, /* k */ NO_REGS, /* l */ NO_REGS,
78
  /* m */ NO_REGS, /* n */ NO_REGS, /* o */ NO_REGS, /* p */ NO_REGS,
79
  /* q */ NO_REGS, /* r */ GENERAL_REGS, /* s */ NO_REGS, /* t */ NO_REGS,
80
  /* u */ NO_REGS, /* v */ NO_REGS, /* w */ NO_REGS, /* x */ ALL_REGS,
81
  /* y */ NO_REGS, /* z */ NO_REGS
82
};
83
 
84
struct mcore_frame
85
{
86
  int arg_size;                 /* Stdarg spills (bytes).  */
87
  int reg_size;                 /* Non-volatile reg saves (bytes).  */
88
  int reg_mask;                 /* Non-volatile reg saves.  */
89
  int local_size;               /* Locals.  */
90
  int outbound_size;            /* Arg overflow on calls out.  */
91
  int pad_outbound;
92
  int pad_local;
93
  int pad_reg;
94
  /* Describe the steps we'll use to grow it.  */
95
#define MAX_STACK_GROWS 4       /* Gives us some spare space.  */
96
  int growth[MAX_STACK_GROWS];
97
  int arg_offset;
98
  int reg_offset;
99
  int reg_growth;
100
  int local_growth;
101
};
102
 
103
typedef enum
104
{
105
  COND_NO,
106
  COND_MOV_INSN,
107
  COND_CLR_INSN,
108
  COND_INC_INSN,
109
  COND_DEC_INSN,
110
  COND_BRANCH_INSN
111
}
112
cond_type;
113
 
114
static void       output_stack_adjust           (int, int);
115
static int        calc_live_regs                (int *);
116
static int        try_constant_tricks           (long, HOST_WIDE_INT *, HOST_WIDE_INT *);
117
static const char *     output_inline_const     (enum machine_mode, rtx *);
118
static void       layout_mcore_frame            (struct mcore_frame *);
119
static void       mcore_setup_incoming_varargs  (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
120
static cond_type  is_cond_candidate             (rtx);
121
static rtx        emit_new_cond_insn            (rtx, int);
122
static rtx        conditionalize_block          (rtx);
123
static void       conditionalize_optimization   (void);
124
static void       mcore_reorg                   (void);
125
static rtx        handle_structs_in_regs        (enum machine_mode, const_tree, int);
126
static void       mcore_mark_dllexport          (tree);
127
static void       mcore_mark_dllimport          (tree);
128
static int        mcore_dllexport_p             (tree);
129
static int        mcore_dllimport_p             (tree);
130
static tree       mcore_handle_naked_attribute  (tree *, tree, tree, int, bool *);
131
#ifdef OBJECT_FORMAT_ELF
132
static void       mcore_asm_named_section       (const char *,
133
                                                 unsigned int, tree);
134
#endif
135
static void       mcore_unique_section          (tree, int);
136
static void mcore_encode_section_info           (tree, rtx, int);
137
static const char *mcore_strip_name_encoding    (const char *);
138
static int        mcore_const_costs             (rtx, RTX_CODE);
139
static int        mcore_and_cost                (rtx);
140
static int        mcore_ior_cost                (rtx);
141
static bool       mcore_rtx_costs               (rtx, int, int, int *, bool);
142
static void       mcore_external_libcall        (rtx);
143
static bool       mcore_return_in_memory        (const_tree, const_tree);
144
static int        mcore_arg_partial_bytes       (CUMULATIVE_ARGS *,
145
                                                 enum machine_mode,
146
                                                 tree, bool);
147
static void       mcore_asm_trampoline_template (FILE *);
148
static void       mcore_trampoline_init         (rtx, tree, rtx);
149
 
150
/* MCore specific attributes.  */
151
 
152
static const struct attribute_spec mcore_attribute_table[] =
153
{
154
  /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
155
  { "dllexport", 0, 0, true,  false, false, NULL },
156
  { "dllimport", 0, 0, true,  false, false, NULL },
157
  { "naked",     0, 0, true,  false, false, mcore_handle_naked_attribute },
158
  { NULL,        0, 0, false, false, false, NULL }
159
};
160
 
161
/* Initialize the GCC target structure.  */
162
#undef  TARGET_ASM_EXTERNAL_LIBCALL
163
#define TARGET_ASM_EXTERNAL_LIBCALL     mcore_external_libcall
164
 
165
#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
166
#undef  TARGET_MERGE_DECL_ATTRIBUTES
167
#define TARGET_MERGE_DECL_ATTRIBUTES    merge_dllimport_decl_attributes
168
#endif
169
 
170
#ifdef OBJECT_FORMAT_ELF
171
#undef  TARGET_ASM_UNALIGNED_HI_OP
172
#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
173
#undef  TARGET_ASM_UNALIGNED_SI_OP
174
#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
175
#endif
176
 
177
#undef  TARGET_ATTRIBUTE_TABLE
178
#define TARGET_ATTRIBUTE_TABLE          mcore_attribute_table
179
#undef  TARGET_ASM_UNIQUE_SECTION
180
#define TARGET_ASM_UNIQUE_SECTION       mcore_unique_section
181
#undef  TARGET_ASM_FUNCTION_RODATA_SECTION
182
#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
183
#undef  TARGET_DEFAULT_TARGET_FLAGS
184
#define TARGET_DEFAULT_TARGET_FLAGS     TARGET_DEFAULT
185
#undef  TARGET_ENCODE_SECTION_INFO
186
#define TARGET_ENCODE_SECTION_INFO      mcore_encode_section_info
187
#undef  TARGET_STRIP_NAME_ENCODING
188
#define TARGET_STRIP_NAME_ENCODING      mcore_strip_name_encoding
189
#undef  TARGET_RTX_COSTS
190
#define TARGET_RTX_COSTS                mcore_rtx_costs
191
#undef  TARGET_ADDRESS_COST
192
#define TARGET_ADDRESS_COST             hook_int_rtx_bool_0
193
#undef  TARGET_MACHINE_DEPENDENT_REORG
194
#define TARGET_MACHINE_DEPENDENT_REORG  mcore_reorg
195
 
196
#undef  TARGET_PROMOTE_FUNCTION_MODE
197
#define TARGET_PROMOTE_FUNCTION_MODE    default_promote_function_mode_always_promote
198
#undef  TARGET_PROMOTE_PROTOTYPES
199
#define TARGET_PROMOTE_PROTOTYPES       hook_bool_const_tree_true
200
 
201
#undef  TARGET_RETURN_IN_MEMORY
202
#define TARGET_RETURN_IN_MEMORY         mcore_return_in_memory
203
#undef  TARGET_MUST_PASS_IN_STACK
204
#define TARGET_MUST_PASS_IN_STACK       must_pass_in_stack_var_size
205
#undef  TARGET_PASS_BY_REFERENCE
206
#define TARGET_PASS_BY_REFERENCE  hook_pass_by_reference_must_pass_in_stack
207
#undef  TARGET_ARG_PARTIAL_BYTES
208
#define TARGET_ARG_PARTIAL_BYTES        mcore_arg_partial_bytes
209
 
210
#undef  TARGET_SETUP_INCOMING_VARARGS
211
#define TARGET_SETUP_INCOMING_VARARGS   mcore_setup_incoming_varargs
212
 
213
#undef  TARGET_ASM_TRAMPOLINE_TEMPLATE
214
#define TARGET_ASM_TRAMPOLINE_TEMPLATE  mcore_asm_trampoline_template
215
#undef  TARGET_TRAMPOLINE_INIT
216
#define TARGET_TRAMPOLINE_INIT          mcore_trampoline_init
217
 
218
struct gcc_target targetm = TARGET_INITIALIZER;
219
 
220
/* Adjust the stack and return the number of bytes taken to do it.  */
221
static void
222
output_stack_adjust (int direction, int size)
223
{
224
  /* If extending stack a lot, we do it incrementally.  */
225
  if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
226
    {
227
      rtx tmp = gen_rtx_REG (SImode, 1);
228
      rtx memref;
229
 
230
      emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
231
      do
232
        {
233
          emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
234
          memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
235
          MEM_VOLATILE_P (memref) = 1;
236
          emit_insn (gen_movsi (memref, stack_pointer_rtx));
237
          size -= mcore_stack_increment;
238
        }
239
      while (size > mcore_stack_increment);
240
 
241
      /* SIZE is now the residual for the last adjustment,
242
         which doesn't require a probe.  */
243
    }
244
 
245
  if (size)
246
    {
247
      rtx insn;
248
      rtx val = GEN_INT (size);
249
 
250
      if (size > 32)
251
        {
252
          rtx nval = gen_rtx_REG (SImode, 1);
253
          emit_insn (gen_movsi (nval, val));
254
          val = nval;
255
        }
256
 
257
      if (direction > 0)
258
        insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
259
      else
260
        insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
261
 
262
      emit_insn (insn);
263
    }
264
}
265
 
266
/* Work out the registers which need to be saved,
267
   both as a mask and a count.  */
268
 
269
static int
270
calc_live_regs (int * count)
271
{
272
  int reg;
273
  int live_regs_mask = 0;
274
 
275
  * count = 0;
276
 
277
  for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
278
    {
279
      if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
280
        {
281
          (*count)++;
282
          live_regs_mask |= (1 << reg);
283
        }
284
    }
285
 
286
  return live_regs_mask;
287
}
288
 
289
/* Print the operand address in x to the stream.  */
290
 
291
void
292
mcore_print_operand_address (FILE * stream, rtx x)
293
{
294
  switch (GET_CODE (x))
295
    {
296
    case REG:
297
      fprintf (stream, "(%s)", reg_names[REGNO (x)]);
298
      break;
299
 
300
    case PLUS:
301
      {
302
        rtx base = XEXP (x, 0);
303
        rtx index = XEXP (x, 1);
304
 
305
        if (GET_CODE (base) != REG)
306
          {
307
            /* Ensure that BASE is a register (one of them must be).  */
308
            rtx temp = base;
309
            base = index;
310
            index = temp;
311
          }
312
 
313
        switch (GET_CODE (index))
314
          {
315
          case CONST_INT:
316
            fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
317
                     reg_names[REGNO(base)], INTVAL (index));
318
            break;
319
 
320
          default:
321
            gcc_unreachable ();
322
          }
323
      }
324
 
325
      break;
326
 
327
    default:
328
      output_addr_const (stream, x);
329
      break;
330
    }
331
}
332
 
333
/* Print operand x (an rtx) in assembler syntax to file stream
334
   according to modifier code.
335
 
336
   'R'  print the next register or memory location along, i.e. the lsw in
337
        a double word value
338
   'O'  print a constant without the #
339
   'M'  print a constant as its negative
340
   'P'  print log2 of a power of two
341
   'Q'  print log2 of an inverse of a power of two
342
   'U'  print register for ldm/stm instruction
343
   'X'  print byte number for xtrbN instruction.  */
344
 
345
void
346
mcore_print_operand (FILE * stream, rtx x, int code)
347
{
348
  switch (code)
349
    {
350
    case 'N':
351
      if (INTVAL(x) == -1)
352
        fprintf (asm_out_file, "32");
353
      else
354
        fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
355
      break;
356
    case 'P':
357
      fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
358
      break;
359
    case 'Q':
360
      fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
361
      break;
362
    case 'O':
363
      fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
364
      break;
365
    case 'M':
366
      fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
367
      break;
368
    case 'R':
369
      /* Next location along in memory or register.  */
370
      switch (GET_CODE (x))
371
        {
372
        case REG:
373
          fputs (reg_names[REGNO (x) + 1], (stream));
374
          break;
375
        case MEM:
376
          mcore_print_operand_address
377
            (stream, XEXP (adjust_address (x, SImode, 4), 0));
378
          break;
379
        default:
380
          gcc_unreachable ();
381
        }
382
      break;
383
    case 'U':
384
      fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
385
               reg_names[REGNO (x) + 3]);
386
      break;
387
    case 'x':
388
      fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
389
      break;
390
    case 'X':
391
      fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
392
      break;
393
 
394
    default:
395
      switch (GET_CODE (x))
396
        {
397
        case REG:
398
          fputs (reg_names[REGNO (x)], (stream));
399
          break;
400
        case MEM:
401
          output_address (XEXP (x, 0));
402
          break;
403
        default:
404
          output_addr_const (stream, x);
405
          break;
406
        }
407
      break;
408
    }
409
}
410
 
411
/* What does a constant cost ?  */
412
 
413
static int
414
mcore_const_costs (rtx exp, enum rtx_code code)
415
{
416
  HOST_WIDE_INT val = INTVAL (exp);
417
 
418
  /* Easy constants.  */
419
  if (   CONST_OK_FOR_I (val)
420
      || CONST_OK_FOR_M (val)
421
      || CONST_OK_FOR_N (val)
422
      || (code == PLUS && CONST_OK_FOR_L (val)))
423
    return 1;
424
  else if (code == AND
425
           && (   CONST_OK_FOR_M (~val)
426
               || CONST_OK_FOR_N (~val)))
427
    return 2;
428
  else if (code == PLUS
429
           && (   CONST_OK_FOR_I (-val)
430
               || CONST_OK_FOR_M (-val)
431
               || CONST_OK_FOR_N (-val)))
432
    return 2;
433
 
434
  return 5;
435
}
436
 
437
/* What does an and instruction cost - we do this b/c immediates may
438
   have been relaxed.   We want to ensure that cse will cse relaxed immeds
439
   out.  Otherwise we'll get bad code (multiple reloads of the same const).  */
440
 
441
static int
442
mcore_and_cost (rtx x)
443
{
444
  HOST_WIDE_INT val;
445
 
446
  if (GET_CODE (XEXP (x, 1)) != CONST_INT)
447
    return 2;
448
 
449
  val = INTVAL (XEXP (x, 1));
450
 
451
  /* Do it directly.  */
452
  if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
453
    return 2;
454
  /* Takes one instruction to load.  */
455
  else if (const_ok_for_mcore (val))
456
    return 3;
457
  /* Takes two instructions to load.  */
458
  else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
459
    return 4;
460
 
461
  /* Takes a lrw to load.  */
462
  return 5;
463
}
464
 
465
/* What does an or cost - see and_cost().  */
466
 
467
static int
468
mcore_ior_cost (rtx x)
469
{
470
  HOST_WIDE_INT val;
471
 
472
  if (GET_CODE (XEXP (x, 1)) != CONST_INT)
473
    return 2;
474
 
475
  val = INTVAL (XEXP (x, 1));
476
 
477
  /* Do it directly with bclri.  */
478
  if (CONST_OK_FOR_M (val))
479
    return 2;
480
  /* Takes one instruction to load.  */
481
  else if (const_ok_for_mcore (val))
482
    return 3;
483
  /* Takes two instructions to load.  */
484
  else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
485
    return 4;
486
 
487
  /* Takes a lrw to load.  */
488
  return 5;
489
}
490
 
491
static bool
492
mcore_rtx_costs (rtx x, int code, int outer_code, int * total,
493
                 bool speed ATTRIBUTE_UNUSED)
494
{
495
  switch (code)
496
    {
497
    case CONST_INT:
498
      *total = mcore_const_costs (x, (enum rtx_code) outer_code);
499
      return true;
500
    case CONST:
501
    case LABEL_REF:
502
    case SYMBOL_REF:
503
      *total = 5;
504
      return true;
505
    case CONST_DOUBLE:
506
      *total = 10;
507
      return true;
508
 
509
    case AND:
510
      *total = COSTS_N_INSNS (mcore_and_cost (x));
511
      return true;
512
 
513
    case IOR:
514
      *total = COSTS_N_INSNS (mcore_ior_cost (x));
515
      return true;
516
 
517
    case DIV:
518
    case UDIV:
519
    case MOD:
520
    case UMOD:
521
    case FLOAT:
522
    case FIX:
523
      *total = COSTS_N_INSNS (100);
524
      return true;
525
 
526
    default:
527
      return false;
528
    }
529
}
530
 
531
/* Prepare the operands for a comparison.  Return whether the branch/setcc
532
   should reverse the operands.  */
533
 
534
bool
535
mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
536
{
537
  rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
538
  bool invert;
539
 
540
  if (GET_CODE (op1) == CONST_INT)
541
    {
542
      HOST_WIDE_INT val = INTVAL (op1);
543
 
544
      switch (code)
545
        {
546
        case GTU:
547
          /* Unsigned > 0 is the same as != 0; everything else is converted
548
             below to LEU (reversed cmphs).  */
549
          if (val == 0)
550
            code = NE;
551
          break;
552
 
553
        /* Check whether (LE A imm) can become (LT A imm + 1),
554
           or (GT A imm) can become (GE A imm + 1).  */
555
        case GT:
556
        case LE:
557
          if (CONST_OK_FOR_J (val + 1))
558
            {
559
              op1 = GEN_INT (val + 1);
560
              code = code == LE ? LT : GE;
561
            }
562
          break;
563
 
564
        default:
565
          break;
566
        }
567
    }
568
 
569
  if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
570
    op1 = force_reg (SImode, op1);
571
 
572
  /* cmpnei: 0-31 (K immediate)
573
     cmplti: 1-32 (J immediate, 0 using btsti x,31).  */
574
  invert = false;
575
  switch (code)
576
    {
577
    case EQ:    /* Use inverted condition, cmpne.  */
578
      code = NE;
579
      invert = true;
580
      /* Drop through.  */
581
 
582
    case NE:    /* Use normal condition, cmpne.  */
583
      if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
584
        op1 = force_reg (SImode, op1);
585
      break;
586
 
587
    case LE:    /* Use inverted condition, reversed cmplt.  */
588
      code = GT;
589
      invert = true;
590
      /* Drop through.  */
591
 
592
    case GT:    /* Use normal condition, reversed cmplt.  */
593
      if (GET_CODE (op1) == CONST_INT)
594
        op1 = force_reg (SImode, op1);
595
      break;
596
 
597
    case GE:    /* Use inverted condition, cmplt.  */
598
      code = LT;
599
      invert = true;
600
      /* Drop through.  */
601
 
602
    case LT:    /* Use normal condition, cmplt.  */
603
      if (GET_CODE (op1) == CONST_INT &&
604
          /* covered by btsti x,31.  */
605
          INTVAL (op1) != 0 &&
606
          ! CONST_OK_FOR_J (INTVAL (op1)))
607
        op1 = force_reg (SImode, op1);
608
      break;
609
 
610
    case GTU:   /* Use inverted condition, cmple.  */
611
      /* We coped with unsigned > 0 above.  */
612
      gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
613
      code = LEU;
614
      invert = true;
615
      /* Drop through.  */
616
 
617
    case LEU:   /* Use normal condition, reversed cmphs.  */
618
      if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
619
        op1 = force_reg (SImode, op1);
620
      break;
621
 
622
    case LTU:   /* Use inverted condition, cmphs.  */
623
      code = GEU;
624
      invert = true;
625
      /* Drop through.  */
626
 
627
    case GEU:   /* Use normal condition, cmphs.  */
628
      if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
629
        op1 = force_reg (SImode, op1);
630
      break;
631
 
632
    default:
633
      break;
634
    }
635
 
636
  emit_insn (gen_rtx_SET (VOIDmode,
637
                          cc_reg,
638
                          gen_rtx_fmt_ee (code, CCmode, op0, op1)));
639
  return invert;
640
}
641
 
642
int
643
mcore_symbolic_address_p (rtx x)
644
{
645
  switch (GET_CODE (x))
646
    {
647
    case SYMBOL_REF:
648
    case LABEL_REF:
649
      return 1;
650
    case CONST:
651
      x = XEXP (x, 0);
652
      return (   (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
653
               || GET_CODE (XEXP (x, 0)) == LABEL_REF)
654
              && GET_CODE (XEXP (x, 1)) == CONST_INT);
655
    default:
656
      return 0;
657
    }
658
}
659
 
660
/* Functions to output assembly code for a function call.  */
661
 
662
char *
663
mcore_output_call (rtx operands[], int index)
664
{
665
  static char buffer[20];
666
  rtx addr = operands [index];
667
 
668
  if (REG_P (addr))
669
    {
670
      if (TARGET_CG_DATA)
671
        {
672
          gcc_assert (mcore_current_function_name);
673
 
674
          ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
675
                              "unknown", 1);
676
        }
677
 
678
      sprintf (buffer, "jsr\t%%%d", index);
679
    }
680
  else
681
    {
682
      if (TARGET_CG_DATA)
683
        {
684
          gcc_assert (mcore_current_function_name);
685
          gcc_assert (GET_CODE (addr) == SYMBOL_REF);
686
 
687
          ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
688
                              XSTR (addr, 0), 0);
689
        }
690
 
691
      sprintf (buffer, "jbsr\t%%%d", index);
692
    }
693
 
694
  return buffer;
695
}
696
 
697
/* Can we load a constant with a single instruction ?  */
698
 
699
int
700
const_ok_for_mcore (HOST_WIDE_INT value)
701
{
702
  if (value >= 0 && value <= 127)
703
    return 1;
704
 
705
  /* Try exact power of two.  */
706
  if (CONST_OK_FOR_M (value))
707
    return 1;
708
 
709
  /* Try exact power of two - 1.  */
710
  if (CONST_OK_FOR_N (value) && value != -1)
711
    return 1;
712
 
713
  return 0;
714
}
715
 
716
/* Can we load a constant inline with up to 2 instructions ?  */
717
 
718
int
719
mcore_const_ok_for_inline (HOST_WIDE_INT value)
720
{
721
  HOST_WIDE_INT x, y;
722
 
723
  return try_constant_tricks (value, & x, & y) > 0;
724
}
725
 
726
/* Are we loading the constant using a not ?  */
727
 
728
int
729
mcore_const_trick_uses_not (HOST_WIDE_INT value)
730
{
731
  HOST_WIDE_INT x, y;
732
 
733
  return try_constant_tricks (value, & x, & y) == 2;
734
}
735
 
736
/* Try tricks to load a constant inline and return the trick number if
737
   success (0 is non-inlinable).
738
 
739
   0: not inlinable
740
   1: single instruction (do the usual thing)
741
   2: single insn followed by a 'not'
742
   3: single insn followed by a subi
743
   4: single insn followed by an addi
744
   5: single insn followed by rsubi
745
   6: single insn followed by bseti
746
   7: single insn followed by bclri
747
   8: single insn followed by rotli
748
   9: single insn followed by lsli
749
   10: single insn followed by ixh
750
   11: single insn followed by ixw.  */
751
 
752
static int
753
try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
754
{
755
  HOST_WIDE_INT i;
756
  unsigned HOST_WIDE_INT bit, shf, rot;
757
 
758
  if (const_ok_for_mcore (value))
759
    return 1;   /* Do the usual thing.  */
760
 
761
  if (! TARGET_HARDLIT)
762
    return 0;
763
 
764
  if (const_ok_for_mcore (~value))
765
    {
766
      *x = ~value;
767
      return 2;
768
    }
769
 
770
  for (i = 1; i <= 32; i++)
771
    {
772
      if (const_ok_for_mcore (value - i))
773
        {
774
          *x = value - i;
775
          *y = i;
776
 
777
          return 3;
778
        }
779
 
780
      if (const_ok_for_mcore (value + i))
781
        {
782
          *x = value + i;
783
          *y = i;
784
 
785
          return 4;
786
        }
787
    }
788
 
789
  bit = 0x80000000ULL;
790
 
791
  for (i = 0; i <= 31; i++)
792
    {
793
      if (const_ok_for_mcore (i - value))
794
        {
795
          *x = i - value;
796
          *y = i;
797
 
798
          return 5;
799
        }
800
 
801
      if (const_ok_for_mcore (value & ~bit))
802
        {
803
          *y = bit;
804
          *x = value & ~bit;
805
          return 6;
806
        }
807
 
808
      if (const_ok_for_mcore (value | bit))
809
        {
810
          *y = ~bit;
811
          *x = value | bit;
812
 
813
          return 7;
814
        }
815
 
816
      bit >>= 1;
817
    }
818
 
819
  shf = value;
820
  rot = value;
821
 
822
  for (i = 1; i < 31; i++)
823
    {
824
      int c;
825
 
826
      /* MCore has rotate left.  */
827
      c = rot << 31;
828
      rot >>= 1;
829
      rot &= 0x7FFFFFFF;
830
      rot |= c;   /* Simulate rotate.  */
831
 
832
      if (const_ok_for_mcore (rot))
833
        {
834
          *y = i;
835
          *x = rot;
836
 
837
          return 8;
838
        }
839
 
840
      if (shf & 1)
841
        shf = 0; /* Can't use logical shift, low order bit is one.  */
842
 
843
      shf >>= 1;
844
 
845
      if (shf != 0 && const_ok_for_mcore (shf))
846
        {
847
          *y = i;
848
          *x = shf;
849
 
850
          return 9;
851
        }
852
    }
853
 
854
  if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
855
    {
856
      *x = value / 3;
857
 
858
      return 10;
859
    }
860
 
861
  if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
862
    {
863
      *x = value / 5;
864
 
865
      return 11;
866
    }
867
 
868
  return 0;
869
}
870
 
871
/* Check whether reg is dead at first.  This is done by searching ahead
872
   for either the next use (i.e., reg is live), a death note, or a set of
873
   reg.  Don't just use dead_or_set_p() since reload does not always mark
874
   deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
875
   can ignore subregs by extracting the actual register.  BRC  */
876
 
877
int
878
mcore_is_dead (rtx first, rtx reg)
879
{
880
  rtx insn;
881
 
882
  /* For mcore, subregs can't live independently of their parent regs.  */
883
  if (GET_CODE (reg) == SUBREG)
884
    reg = SUBREG_REG (reg);
885
 
886
  /* Dies immediately.  */
887
  if (dead_or_set_p (first, reg))
888
    return 1;
889
 
890
  /* Look for conclusive evidence of live/death, otherwise we have
891
     to assume that it is live.  */
892
  for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
893
    {
894
      if (GET_CODE (insn) == JUMP_INSN)
895
        return 0;        /* We lose track, assume it is alive.  */
896
 
897
      else if (GET_CODE(insn) == CALL_INSN)
898
        {
899
          /* Call's might use it for target or register parms.  */
900
          if (reg_referenced_p (reg, PATTERN (insn))
901
              || find_reg_fusage (insn, USE, reg))
902
            return 0;
903
          else if (dead_or_set_p (insn, reg))
904
            return 1;
905
        }
906
      else if (GET_CODE (insn) == INSN)
907
        {
908
          if (reg_referenced_p (reg, PATTERN (insn)))
909
            return 0;
910
          else if (dead_or_set_p (insn, reg))
911
            return 1;
912
        }
913
    }
914
 
915
  /* No conclusive evidence either way, we cannot take the chance
916
     that control flow hid the use from us -- "I'm not dead yet".  */
917
  return 0;
918
}
919
 
920
/* Count the number of ones in mask.  */
921
 
922
int
923
mcore_num_ones (HOST_WIDE_INT mask)
924
{
925
  /* A trick to count set bits recently posted on comp.compilers.  */
926
  mask =  (mask >> 1  & 0x55555555) + (mask & 0x55555555);
927
  mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
928
  mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
929
  mask = ((mask >> 8) + mask);
930
 
931
  return (mask + (mask >> 16)) & 0xff;
932
}
933
 
934
/* Count the number of zeros in mask.  */
935
 
936
int
937
mcore_num_zeros (HOST_WIDE_INT mask)
938
{
939
  return 32 - mcore_num_ones (mask);
940
}
941
 
942
/* Determine byte being masked.  */
943
 
944
int
945
mcore_byte_offset (unsigned int mask)
946
{
947
  if (mask == 0x00ffffffL)
948
    return 0;
949
  else if (mask == 0xff00ffffL)
950
    return 1;
951
  else if (mask == 0xffff00ffL)
952
    return 2;
953
  else if (mask == 0xffffff00L)
954
    return 3;
955
 
956
  return -1;
957
}
958
 
959
/* Determine halfword being masked.  */
960
 
961
int
962
mcore_halfword_offset (unsigned int mask)
963
{
964
  if (mask == 0x0000ffffL)
965
    return 0;
966
  else if (mask == 0xffff0000L)
967
    return 1;
968
 
969
  return -1;
970
}
971
 
972
/* Output a series of bseti's corresponding to mask.  */
973
 
974
const char *
975
mcore_output_bseti (rtx dst, int mask)
976
{
977
  rtx out_operands[2];
978
  int bit;
979
 
980
  out_operands[0] = dst;
981
 
982
  for (bit = 0; bit < 32; bit++)
983
    {
984
      if ((mask & 0x1) == 0x1)
985
        {
986
          out_operands[1] = GEN_INT (bit);
987
 
988
          output_asm_insn ("bseti\t%0,%1", out_operands);
989
        }
990
      mask >>= 1;
991
    }
992
 
993
  return "";
994
}
995
 
996
/* Output a series of bclri's corresponding to mask.  */
997
 
998
const char *
999
mcore_output_bclri (rtx dst, int mask)
1000
{
1001
  rtx out_operands[2];
1002
  int bit;
1003
 
1004
  out_operands[0] = dst;
1005
 
1006
  for (bit = 0; bit < 32; bit++)
1007
    {
1008
      if ((mask & 0x1) == 0x0)
1009
        {
1010
          out_operands[1] = GEN_INT (bit);
1011
 
1012
          output_asm_insn ("bclri\t%0,%1", out_operands);
1013
        }
1014
 
1015
      mask >>= 1;
1016
    }
1017
 
1018
  return "";
1019
}
1020
 
1021
/* Output a conditional move of two constants that are +/- 1 within each
1022
   other.  See the "movtK" patterns in mcore.md.   I'm not sure this is
1023
   really worth the effort.  */
1024
 
1025
const char *
1026
mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
1027
{
1028
  HOST_WIDE_INT load_value;
1029
  HOST_WIDE_INT adjust_value;
1030
  rtx out_operands[4];
1031
 
1032
  out_operands[0] = operands[0];
1033
 
1034
  /* Check to see which constant is loadable.  */
1035
  if (const_ok_for_mcore (INTVAL (operands[1])))
1036
    {
1037
      out_operands[1] = operands[1];
1038
      out_operands[2] = operands[2];
1039
    }
1040
  else if (const_ok_for_mcore (INTVAL (operands[2])))
1041
    {
1042
      out_operands[1] = operands[2];
1043
      out_operands[2] = operands[1];
1044
 
1045
      /* Complement test since constants are swapped.  */
1046
      cmp_t = (cmp_t == 0);
1047
    }
1048
  load_value   = INTVAL (out_operands[1]);
1049
  adjust_value = INTVAL (out_operands[2]);
1050
 
1051
  /* First output the test if folded into the pattern.  */
1052
 
1053
  if (test)
1054
    output_asm_insn (test, operands);
1055
 
1056
  /* Load the constant - for now, only support constants that can be
1057
     generated with a single instruction.  maybe add general inlinable
1058
     constants later (this will increase the # of patterns since the
1059
     instruction sequence has a different length attribute).  */
1060
  if (load_value >= 0 && load_value <= 127)
1061
    output_asm_insn ("movi\t%0,%1", out_operands);
1062
  else if (CONST_OK_FOR_M (load_value))
1063
    output_asm_insn ("bgeni\t%0,%P1", out_operands);
1064
  else if (CONST_OK_FOR_N (load_value))
1065
    output_asm_insn ("bmaski\t%0,%N1", out_operands);
1066
 
1067
  /* Output the constant adjustment.  */
1068
  if (load_value > adjust_value)
1069
    {
1070
      if (cmp_t)
1071
        output_asm_insn ("decf\t%0", out_operands);
1072
      else
1073
        output_asm_insn ("dect\t%0", out_operands);
1074
    }
1075
  else
1076
    {
1077
      if (cmp_t)
1078
        output_asm_insn ("incf\t%0", out_operands);
1079
      else
1080
        output_asm_insn ("inct\t%0", out_operands);
1081
    }
1082
 
1083
  return "";
1084
}
1085
 
1086
/* Outputs the peephole for moving a constant that gets not'ed followed
1087
   by an and (i.e. combine the not and the and into andn). BRC  */
1088
 
1089
const char *
1090
mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
1091
{
1092
  HOST_WIDE_INT x, y;
1093
  rtx out_operands[3];
1094
  const char * load_op;
1095
  char buf[256];
1096
  int trick_no;
1097
 
1098
  trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1099
  gcc_assert (trick_no == 2);
1100
 
1101
  out_operands[0] = operands[0];
1102
  out_operands[1] = GEN_INT (x);
1103
  out_operands[2] = operands[2];
1104
 
1105
  if (x >= 0 && x <= 127)
1106
    load_op = "movi\t%0,%1";
1107
 
1108
  /* Try exact power of two.  */
1109
  else if (CONST_OK_FOR_M (x))
1110
    load_op = "bgeni\t%0,%P1";
1111
 
1112
  /* Try exact power of two - 1.  */
1113
  else if (CONST_OK_FOR_N (x))
1114
    load_op = "bmaski\t%0,%N1";
1115
 
1116
  else
1117
    {
1118
      load_op = "BADMOVI-andn\t%0, %1";
1119
      gcc_unreachable ();
1120
    }
1121
 
1122
  sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1123
  output_asm_insn (buf, out_operands);
1124
 
1125
  return "";
1126
}
1127
 
1128
/* Output an inline constant.  */
1129
 
1130
static const char *
1131
output_inline_const (enum machine_mode mode, rtx operands[])
1132
{
1133
  HOST_WIDE_INT x = 0, y = 0;
1134
  int trick_no;
1135
  rtx out_operands[3];
1136
  char buf[256];
1137
  char load_op[256];
1138
  const char *dst_fmt;
1139
  HOST_WIDE_INT value;
1140
 
1141
  value = INTVAL (operands[1]);
1142
 
1143
  trick_no = try_constant_tricks (value, &x, &y);
1144
  /* lrw's are handled separately: Large inlinable constants never get
1145
     turned into lrw's.  Our caller uses try_constant_tricks to back
1146
     off to an lrw rather than calling this routine.  */
1147
  gcc_assert (trick_no != 0);
1148
 
1149
  if (trick_no == 1)
1150
    x = value;
1151
 
1152
  /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment.  */
1153
  out_operands[0] = operands[0];
1154
  out_operands[1] = GEN_INT (x);
1155
 
1156
  if (trick_no > 2)
1157
    out_operands[2] = GEN_INT (y);
1158
 
1159
  /* Select dst format based on mode.  */
1160
  if (mode == DImode && (! TARGET_LITTLE_END))
1161
    dst_fmt = "%R0";
1162
  else
1163
    dst_fmt = "%0";
1164
 
1165
  if (x >= 0 && x <= 127)
1166
    sprintf (load_op, "movi\t%s,%%1", dst_fmt);
1167
 
1168
  /* Try exact power of two.  */
1169
  else if (CONST_OK_FOR_M (x))
1170
    sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
1171
 
1172
  /* Try exact power of two - 1.  */
1173
  else if (CONST_OK_FOR_N (x))
1174
    sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
1175
 
1176
  else
1177
    {
1178
      sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1179
      gcc_unreachable ();
1180
    }
1181
 
1182
  switch (trick_no)
1183
    {
1184
    case 1:
1185
      strcpy (buf, load_op);
1186
      break;
1187
    case 2:   /* not */
1188
      sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1189
      break;
1190
    case 3:   /* add */
1191
      sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1192
      break;
1193
    case 4:   /* sub */
1194
      sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1195
      break;
1196
    case 5:   /* rsub */
1197
      /* Never happens unless -mrsubi, see try_constant_tricks().  */
1198
      sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1199
      break;
1200
    case 6:   /* bseti */
1201
      sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1202
      break;
1203
    case 7:   /* bclr */
1204
      sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1205
      break;
1206
    case 8:   /* rotl */
1207
      sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1208
      break;
1209
    case 9:   /* lsl */
1210
      sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1211
      break;
1212
    case 10:  /* ixh */
1213
      sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1214
      break;
1215
    case 11:  /* ixw */
1216
      sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1217
      break;
1218
    default:
1219
      return "";
1220
    }
1221
 
1222
  output_asm_insn (buf, out_operands);
1223
 
1224
  return "";
1225
}
1226
 
1227
/* Output a move of a word or less value.  */
1228
 
1229
const char *
1230
mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1231
                   enum machine_mode mode ATTRIBUTE_UNUSED)
1232
{
1233
  rtx dst = operands[0];
1234
  rtx src = operands[1];
1235
 
1236
  if (GET_CODE (dst) == REG)
1237
    {
1238
      if (GET_CODE (src) == REG)
1239
        {
1240
          if (REGNO (src) == CC_REG)            /* r-c */
1241
            return "mvc\t%0";
1242
          else
1243
            return "mov\t%0,%1";                /* r-r*/
1244
        }
1245
      else if (GET_CODE (src) == MEM)
1246
        {
1247
          if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1248
            return "lrw\t%0,[%1]";              /* a-R */
1249
          else
1250
            switch (GET_MODE (src))             /* r-m */
1251
              {
1252
              case SImode:
1253
                return "ldw\t%0,%1";
1254
              case HImode:
1255
                return "ld.h\t%0,%1";
1256
              case QImode:
1257
                return "ld.b\t%0,%1";
1258
              default:
1259
                gcc_unreachable ();
1260
              }
1261
        }
1262
      else if (GET_CODE (src) == CONST_INT)
1263
        {
1264
          HOST_WIDE_INT x, y;
1265
 
1266
          if (CONST_OK_FOR_I (INTVAL (src)))       /* r-I */
1267
            return "movi\t%0,%1";
1268
          else if (CONST_OK_FOR_M (INTVAL (src)))  /* r-M */
1269
            return "bgeni\t%0,%P1\t// %1 %x1";
1270
          else if (CONST_OK_FOR_N (INTVAL (src)))  /* r-N */
1271
            return "bmaski\t%0,%N1\t// %1 %x1";
1272
          else if (try_constant_tricks (INTVAL (src), &x, &y))     /* R-P */
1273
            return output_inline_const (SImode, operands);  /* 1-2 insns */
1274
          else
1275
            return "lrw\t%0,%x1\t// %1";        /* Get it from literal pool.  */
1276
        }
1277
      else
1278
        return "lrw\t%0, %1";                /* Into the literal pool.  */
1279
    }
1280
  else if (GET_CODE (dst) == MEM)               /* m-r */
1281
    switch (GET_MODE (dst))
1282
      {
1283
      case SImode:
1284
        return "stw\t%1,%0";
1285
      case HImode:
1286
        return "st.h\t%1,%0";
1287
      case QImode:
1288
        return "st.b\t%1,%0";
1289
      default:
1290
        gcc_unreachable ();
1291
      }
1292
 
1293
  gcc_unreachable ();
1294
}
1295
 
1296
/* Return a sequence of instructions to perform DI or DF move.
1297
   Since the MCORE cannot move a DI or DF in one instruction, we have
1298
   to take care when we see overlapping source and dest registers.  */
1299
 
1300
const char *
1301
mcore_output_movedouble (rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED)
1302
{
1303
  rtx dst = operands[0];
1304
  rtx src = operands[1];
1305
 
1306
  if (GET_CODE (dst) == REG)
1307
    {
1308
      if (GET_CODE (src) == REG)
1309
        {
1310
          int dstreg = REGNO (dst);
1311
          int srcreg = REGNO (src);
1312
 
1313
          /* Ensure the second source not overwritten.  */
1314
          if (srcreg + 1 == dstreg)
1315
            return "mov %R0,%R1\n\tmov  %0,%1";
1316
          else
1317
            return "mov %0,%1\n\tmov    %R0,%R1";
1318
        }
1319
      else if (GET_CODE (src) == MEM)
1320
        {
1321
          rtx memexp = memexp = XEXP (src, 0);
1322
          int dstreg = REGNO (dst);
1323
          int basereg = -1;
1324
 
1325
          if (GET_CODE (memexp) == LABEL_REF)
1326
            return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1327
          else if (GET_CODE (memexp) == REG)
1328
            basereg = REGNO (memexp);
1329
          else if (GET_CODE (memexp) == PLUS)
1330
            {
1331
              if (GET_CODE (XEXP (memexp, 0)) == REG)
1332
                basereg = REGNO (XEXP (memexp, 0));
1333
              else if (GET_CODE (XEXP (memexp, 1)) == REG)
1334
                basereg = REGNO (XEXP (memexp, 1));
1335
              else
1336
                gcc_unreachable ();
1337
            }
1338
          else
1339
            gcc_unreachable ();
1340
 
1341
          /* ??? length attribute is wrong here.  */
1342
          if (dstreg == basereg)
1343
            {
1344
              /* Just load them in reverse order.  */
1345
              return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1346
 
1347
              /* XXX: alternative: move basereg to basereg+1
1348
                 and then fall through.  */
1349
            }
1350
          else
1351
            return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1352
        }
1353
      else if (GET_CODE (src) == CONST_INT)
1354
        {
1355
          if (TARGET_LITTLE_END)
1356
            {
1357
              if (CONST_OK_FOR_I (INTVAL (src)))
1358
                output_asm_insn ("movi  %0,%1", operands);
1359
              else if (CONST_OK_FOR_M (INTVAL (src)))
1360
                output_asm_insn ("bgeni %0,%P1", operands);
1361
              else if (CONST_OK_FOR_N (INTVAL (src)))
1362
                output_asm_insn ("bmaski        %0,%N1", operands);
1363
              else
1364
                gcc_unreachable ();
1365
 
1366
              if (INTVAL (src) < 0)
1367
                return "bmaski  %R0,32";
1368
              else
1369
                return "movi    %R0,0";
1370
            }
1371
          else
1372
            {
1373
              if (CONST_OK_FOR_I (INTVAL (src)))
1374
                output_asm_insn ("movi  %R0,%1", operands);
1375
              else if (CONST_OK_FOR_M (INTVAL (src)))
1376
                output_asm_insn ("bgeni %R0,%P1", operands);
1377
              else if (CONST_OK_FOR_N (INTVAL (src)))
1378
                output_asm_insn ("bmaski        %R0,%N1", operands);
1379
              else
1380
                gcc_unreachable ();
1381
 
1382
              if (INTVAL (src) < 0)
1383
                return "bmaski  %0,32";
1384
              else
1385
                return "movi    %0,0";
1386
            }
1387
        }
1388
      else
1389
        gcc_unreachable ();
1390
    }
1391
  else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1392
    return "stw\t%1,%0\n\tstw\t%R1,%R0";
1393
  else
1394
    gcc_unreachable ();
1395
}
1396
 
1397
/* Predicates used by the templates.  */
1398
 
1399
int
1400
mcore_arith_S_operand (rtx op)
1401
{
1402
  if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1403
    return 1;
1404
 
1405
  return 0;
1406
}
1407
 
1408
/* Expand insert bit field.  BRC  */
1409
 
1410
int
1411
mcore_expand_insv (rtx operands[])
1412
{
1413
  int width = INTVAL (operands[1]);
1414
  int posn = INTVAL (operands[2]);
1415
  int mask;
1416
  rtx mreg, sreg, ereg;
1417
 
1418
  /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1419
     for width==1 must be removed.  Look around line 368.  This is something
1420
     we really want the md part to do.  */
1421
  if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1422
    {
1423
      /* Do directly with bseti or bclri.  */
1424
      /* RBE: 2/97 consider only low bit of constant.  */
1425
      if ((INTVAL (operands[3]) & 1) == 0)
1426
        {
1427
          mask = ~(1 << posn);
1428
          emit_insn (gen_rtx_SET (SImode, operands[0],
1429
                              gen_rtx_AND (SImode, operands[0], GEN_INT (mask))));
1430
        }
1431
      else
1432
        {
1433
          mask = 1 << posn;
1434
          emit_insn (gen_rtx_SET (SImode, operands[0],
1435
                            gen_rtx_IOR (SImode, operands[0], GEN_INT (mask))));
1436
        }
1437
 
1438
      return 1;
1439
    }
1440
 
1441
  /* Look at some bit-field placements that we aren't interested
1442
     in handling ourselves, unless specifically directed to do so.  */
1443
  if (! TARGET_W_FIELD)
1444
    return 0;            /* Generally, give up about now.  */
1445
 
1446
  if (width == 8 && posn % 8 == 0)
1447
    /* Byte sized and aligned; let caller break it up.  */
1448
    return 0;
1449
 
1450
  if (width == 16 && posn % 16 == 0)
1451
    /* Short sized and aligned; let caller break it up.  */
1452
    return 0;
1453
 
1454
  /* The general case - we can do this a little bit better than what the
1455
     machine independent part tries.  This will get rid of all the subregs
1456
     that mess up constant folding in combine when working with relaxed
1457
     immediates.  */
1458
 
1459
  /* If setting the entire field, do it directly.  */
1460
  if (GET_CODE (operands[3]) == CONST_INT
1461
      && INTVAL (operands[3]) == ((1 << width) - 1))
1462
    {
1463
      mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1464
      emit_insn (gen_rtx_SET (SImode, operands[0],
1465
                         gen_rtx_IOR (SImode, operands[0], mreg)));
1466
      return 1;
1467
    }
1468
 
1469
  /* Generate the clear mask.  */
1470
  mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1471
 
1472
  /* Clear the field, to overlay it later with the source.  */
1473
  emit_insn (gen_rtx_SET (SImode, operands[0],
1474
                      gen_rtx_AND (SImode, operands[0], mreg)));
1475
 
1476
  /* If the source is constant 0, we've nothing to add back.  */
1477
  if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1478
    return 1;
1479
 
1480
  /* XXX: Should we worry about more games with constant values?
1481
     We've covered the high profile: set/clear single-bit and many-bit
1482
     fields. How often do we see "arbitrary bit pattern" constants?  */
1483
  sreg = copy_to_mode_reg (SImode, operands[3]);
1484
 
1485
  /* Extract src as same width as dst (needed for signed values).  We
1486
     always have to do this since we widen everything to SImode.
1487
     We don't have to mask if we're shifting this up against the
1488
     MSB of the register (e.g., the shift will push out any hi-order
1489
     bits.  */
1490
  if (width + posn != (int) GET_MODE_SIZE (SImode))
1491
    {
1492
      ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1493
      emit_insn (gen_rtx_SET (SImode, sreg,
1494
                          gen_rtx_AND (SImode, sreg, ereg)));
1495
    }
1496
 
1497
  /* Insert source value in dest.  */
1498
  if (posn != 0)
1499
    emit_insn (gen_rtx_SET (SImode, sreg,
1500
                        gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn))));
1501
 
1502
  emit_insn (gen_rtx_SET (SImode, operands[0],
1503
                      gen_rtx_IOR (SImode, operands[0], sreg)));
1504
 
1505
  return 1;
1506
}
1507
 
1508
/* ??? Block move stuff stolen from m88k.  This code has not been
1509
   verified for correctness.  */
1510
 
1511
/* Emit code to perform a block move.  Choose the best method.
1512
 
1513
   OPERANDS[0] is the destination.
1514
   OPERANDS[1] is the source.
1515
   OPERANDS[2] is the size.
1516
   OPERANDS[3] is the alignment safe to use.  */
1517
 
1518
/* Emit code to perform a block move with an offset sequence of ldw/st
1519
   instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...).  SIZE and ALIGN are
1520
   known constants.  DEST and SRC are registers.  OFFSET is the known
1521
   starting point for the output pattern.  */
1522
 
1523
static const enum machine_mode mode_from_align[] =
1524
{
1525
  VOIDmode, QImode, HImode, VOIDmode, SImode,
1526
};
1527
 
1528
static void
1529
block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
1530
{
1531
  rtx temp[2];
1532
  enum machine_mode mode[2];
1533
  int amount[2];
1534
  bool active[2];
1535
  int phase = 0;
1536
  int next;
1537
  int offset_ld = 0;
1538
  int offset_st = 0;
1539
  rtx x;
1540
 
1541
  x = XEXP (dst_mem, 0);
1542
  if (!REG_P (x))
1543
    {
1544
      x = force_reg (Pmode, x);
1545
      dst_mem = replace_equiv_address (dst_mem, x);
1546
    }
1547
 
1548
  x = XEXP (src_mem, 0);
1549
  if (!REG_P (x))
1550
    {
1551
      x = force_reg (Pmode, x);
1552
      src_mem = replace_equiv_address (src_mem, x);
1553
    }
1554
 
1555
  active[0] = active[1] = false;
1556
 
1557
  do
1558
    {
1559
      next = phase;
1560
      phase ^= 1;
1561
 
1562
      if (size > 0)
1563
        {
1564
          int next_amount;
1565
 
1566
          next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1567
          next_amount = MIN (next_amount, align);
1568
 
1569
          amount[next] = next_amount;
1570
          mode[next] = mode_from_align[next_amount];
1571
          temp[next] = gen_reg_rtx (mode[next]);
1572
 
1573
          x = adjust_address (src_mem, mode[next], offset_ld);
1574
          emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
1575
 
1576
          offset_ld += next_amount;
1577
          size -= next_amount;
1578
          active[next] = true;
1579
        }
1580
 
1581
      if (active[phase])
1582
        {
1583
          active[phase] = false;
1584
 
1585
          x = adjust_address (dst_mem, mode[phase], offset_st);
1586
          emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
1587
 
1588
          offset_st += amount[phase];
1589
        }
1590
    }
1591
  while (active[next]);
1592
}
1593
 
1594
bool
1595
mcore_expand_block_move (rtx *operands)
1596
{
1597
  HOST_WIDE_INT align, bytes, max;
1598
 
1599
  if (GET_CODE (operands[2]) != CONST_INT)
1600
    return false;
1601
 
1602
  bytes = INTVAL (operands[2]);
1603
  align = INTVAL (operands[3]);
1604
 
1605
  if (bytes <= 0)
1606
    return false;
1607
  if (align > 4)
1608
    align = 4;
1609
 
1610
  switch (align)
1611
    {
1612
    case 4:
1613
      if (bytes & 1)
1614
        max = 4*4;
1615
      else if (bytes & 3)
1616
        max = 8*4;
1617
      else
1618
        max = 16*4;
1619
      break;
1620
    case 2:
1621
      max = 4*2;
1622
      break;
1623
    case 1:
1624
      max = 4*1;
1625
      break;
1626
    default:
1627
      gcc_unreachable ();
1628
    }
1629
 
1630
  if (bytes <= max)
1631
    {
1632
      block_move_sequence (operands[0], operands[1], bytes, align);
1633
      return true;
1634
    }
1635
 
1636
  return false;
1637
}
1638
 
1639
 
1640
/* Code to generate prologue and epilogue sequences.  */
1641
static int number_of_regs_before_varargs;
1642
 
1643
/* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1644
   for a varargs function.  */
1645
static int current_function_anonymous_args;
1646
 
1647
#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1648
#define STORE_REACH (64)        /* Maximum displace of word store + 4.  */
1649
#define ADDI_REACH (32)         /* Maximum addi operand.  */
1650
 
1651
static void
1652
layout_mcore_frame (struct mcore_frame * infp)
1653
{
1654
  int n;
1655
  unsigned int i;
1656
  int nbytes;
1657
  int regarg;
1658
  int localregarg;
1659
  int localreg;
1660
  int outbounds;
1661
  unsigned int growths;
1662
  int step;
1663
 
1664
  /* Might have to spill bytes to re-assemble a big argument that
1665
     was passed partially in registers and partially on the stack.  */
1666
  nbytes = crtl->args.pretend_args_size;
1667
 
1668
  /* Determine how much space for spilled anonymous args (e.g., stdarg).  */
1669
  if (current_function_anonymous_args)
1670
    nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1671
 
1672
  infp->arg_size = nbytes;
1673
 
1674
  /* How much space to save non-volatile registers we stomp.  */
1675
  infp->reg_mask = calc_live_regs (& n);
1676
  infp->reg_size = n * 4;
1677
 
1678
  /* And the rest of it... locals and space for overflowed outbounds.  */
1679
  infp->local_size = get_frame_size ();
1680
  infp->outbound_size = crtl->outgoing_args_size;
1681
 
1682
  /* Make sure we have a whole number of words for the locals.  */
1683
  if (infp->local_size % STACK_BYTES)
1684
    infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1685
 
1686
  /* Only thing we know we have to pad is the outbound space, since
1687
     we've aligned our locals assuming that base of locals is aligned.  */
1688
  infp->pad_local = 0;
1689
  infp->pad_reg = 0;
1690
  infp->pad_outbound = 0;
1691
  if (infp->outbound_size % STACK_BYTES)
1692
    infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1693
 
1694
  /* Now we see how we want to stage the prologue so that it does
1695
     the most appropriate stack growth and register saves to either:
1696
     (1) run fast,
1697
     (2) reduce instruction space, or
1698
     (3) reduce stack space.  */
1699
  for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
1700
    infp->growth[i] = 0;
1701
 
1702
  regarg      = infp->reg_size + infp->arg_size;
1703
  localregarg = infp->local_size + regarg;
1704
  localreg    = infp->local_size + infp->reg_size;
1705
  outbounds   = infp->outbound_size + infp->pad_outbound;
1706
  growths     = 0;
1707
 
1708
  /* XXX: Consider one where we consider localregarg + outbound too! */
1709
 
1710
  /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1711
     use stw's with offsets and buy the frame in one shot.  */
1712
  if (localregarg <= ADDI_REACH
1713
      && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1714
    {
1715
      /* Make sure we'll be aligned.  */
1716
      if (localregarg % STACK_BYTES)
1717
        infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1718
 
1719
      step = localregarg + infp->pad_reg;
1720
      infp->reg_offset = infp->local_size;
1721
 
1722
      if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1723
        {
1724
          step += outbounds;
1725
          infp->reg_offset += outbounds;
1726
          outbounds = 0;
1727
        }
1728
 
1729
      infp->arg_offset = step - 4;
1730
      infp->growth[growths++] = step;
1731
      infp->reg_growth = growths;
1732
      infp->local_growth = growths;
1733
 
1734
      /* If we haven't already folded it in.  */
1735
      if (outbounds)
1736
        infp->growth[growths++] = outbounds;
1737
 
1738
      goto finish;
1739
    }
1740
 
1741
  /* Frame can't be done with a single subi, but can be done with 2
1742
     insns.  If the 'stm' is getting <= 2 registers, we use stw's and
1743
     shift some of the stack purchase into the first subi, so both are
1744
     single instructions.  */
1745
  if (localregarg <= STORE_REACH
1746
      && (infp->local_size > ADDI_REACH)
1747
      && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1748
    {
1749
      int all;
1750
 
1751
      /* Make sure we'll be aligned; use either pad_reg or pad_local.  */
1752
      if (localregarg % STACK_BYTES)
1753
        infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1754
 
1755
      all = localregarg + infp->pad_reg + infp->pad_local;
1756
      step = ADDI_REACH;        /* As much up front as we can.  */
1757
      if (step > all)
1758
        step = all;
1759
 
1760
      /* XXX: Consider whether step will still be aligned; we believe so.  */
1761
      infp->arg_offset = step - 4;
1762
      infp->growth[growths++] = step;
1763
      infp->reg_growth = growths;
1764
      infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1765
      all -= step;
1766
 
1767
      /* Can we fold in any space required for outbounds?  */
1768
      if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1769
        {
1770
          all += outbounds;
1771
          outbounds = 0;
1772
        }
1773
 
1774
      /* Get the rest of the locals in place.  */
1775
      step = all;
1776
      infp->growth[growths++] = step;
1777
      infp->local_growth = growths;
1778
      all -= step;
1779
 
1780
      assert (all == 0);
1781
 
1782
      /* Finish off if we need to do so.  */
1783
      if (outbounds)
1784
        infp->growth[growths++] = outbounds;
1785
 
1786
      goto finish;
1787
    }
1788
 
1789
  /* Registers + args is nicely aligned, so we'll buy that in one shot.
1790
     Then we buy the rest of the frame in 1 or 2 steps depending on
1791
     whether we need a frame pointer.  */
1792
  if ((regarg % STACK_BYTES) == 0)
1793
    {
1794
      infp->growth[growths++] = regarg;
1795
      infp->reg_growth = growths;
1796
      infp->arg_offset = regarg - 4;
1797
      infp->reg_offset = 0;
1798
 
1799
      if (infp->local_size % STACK_BYTES)
1800
        infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1801
 
1802
      step = infp->local_size + infp->pad_local;
1803
 
1804
      if (!frame_pointer_needed)
1805
        {
1806
          step += outbounds;
1807
          outbounds = 0;
1808
        }
1809
 
1810
      infp->growth[growths++] = step;
1811
      infp->local_growth = growths;
1812
 
1813
      /* If there's any left to be done.  */
1814
      if (outbounds)
1815
        infp->growth[growths++] = outbounds;
1816
 
1817
      goto finish;
1818
    }
1819
 
1820
  /* XXX: optimizations that we'll want to play with....
1821
     -- regarg is not aligned, but it's a small number of registers;
1822
        use some of localsize so that regarg is aligned and then
1823
        save the registers.  */
1824
 
1825
  /* Simple encoding; plods down the stack buying the pieces as it goes.
1826
     -- does not optimize space consumption.
1827
     -- does not attempt to optimize instruction counts.
1828
     -- but it is safe for all alignments.  */
1829
  if (regarg % STACK_BYTES != 0)
1830
    infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1831
 
1832
  infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1833
  infp->reg_growth = growths;
1834
  infp->arg_offset = infp->growth[0] - 4;
1835
  infp->reg_offset = 0;
1836
 
1837
  if (frame_pointer_needed)
1838
    {
1839
      if (infp->local_size % STACK_BYTES != 0)
1840
        infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1841
 
1842
      infp->growth[growths++] = infp->local_size + infp->pad_local;
1843
      infp->local_growth = growths;
1844
 
1845
      infp->growth[growths++] = outbounds;
1846
    }
1847
  else
1848
    {
1849
      if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1850
        infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1851
 
1852
      infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1853
      infp->local_growth = growths;
1854
    }
1855
 
1856
  /* Anything else that we've forgotten?, plus a few consistency checks.  */
1857
 finish:
1858
  assert (infp->reg_offset >= 0);
1859
  assert (growths <= MAX_STACK_GROWS);
1860
 
1861
  for (i = 0; i < growths; i++)
1862
    gcc_assert (!(infp->growth[i] % STACK_BYTES));
1863
}
1864
 
1865
/* Define the offset between two registers, one to be eliminated, and
1866
   the other its replacement, at the start of a routine.  */
1867
 
1868
int
1869
mcore_initial_elimination_offset (int from, int to)
1870
{
1871
  int above_frame;
1872
  int below_frame;
1873
  struct mcore_frame fi;
1874
 
1875
  layout_mcore_frame (& fi);
1876
 
1877
  /* fp to ap */
1878
  above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1879
  /* sp to fp */
1880
  below_frame = fi.outbound_size + fi.pad_outbound;
1881
 
1882
  if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1883
    return above_frame;
1884
 
1885
  if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1886
    return above_frame + below_frame;
1887
 
1888
  if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1889
    return below_frame;
1890
 
1891
  gcc_unreachable ();
1892
}
1893
 
1894
/* Keep track of some information about varargs for the prolog.  */
1895
 
1896
static void
1897
mcore_setup_incoming_varargs (CUMULATIVE_ARGS *args_so_far,
1898
                              enum machine_mode mode, tree type,
1899
                              int * ptr_pretend_size ATTRIBUTE_UNUSED,
1900
                              int second_time ATTRIBUTE_UNUSED)
1901
{
1902
  current_function_anonymous_args = 1;
1903
 
1904
  /* We need to know how many argument registers are used before
1905
     the varargs start, so that we can push the remaining argument
1906
     registers during the prologue.  */
1907
  number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
1908
 
1909
  /* There is a bug somewhere in the arg handling code.
1910
     Until I can find it this workaround always pushes the
1911
     last named argument onto the stack.  */
1912
  number_of_regs_before_varargs = *args_so_far;
1913
 
1914
  /* The last named argument may be split between argument registers
1915
     and the stack.  Allow for this here.  */
1916
  if (number_of_regs_before_varargs > NPARM_REGS)
1917
    number_of_regs_before_varargs = NPARM_REGS;
1918
}
1919
 
1920
void
1921
mcore_expand_prolog (void)
1922
{
1923
  struct mcore_frame fi;
1924
  int space_allocated = 0;
1925
  int growth = 0;
1926
 
1927
  /* Find out what we're doing.  */
1928
  layout_mcore_frame (&fi);
1929
 
1930
  space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1931
    fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1932
 
1933
  if (TARGET_CG_DATA)
1934
    {
1935
      /* Emit a symbol for this routine's frame size.  */
1936
      rtx x;
1937
 
1938
      x = DECL_RTL (current_function_decl);
1939
 
1940
      gcc_assert (GET_CODE (x) == MEM);
1941
 
1942
      x = XEXP (x, 0);
1943
 
1944
      gcc_assert (GET_CODE (x) == SYMBOL_REF);
1945
 
1946
      if (mcore_current_function_name)
1947
        free (mcore_current_function_name);
1948
 
1949
      mcore_current_function_name = xstrdup (XSTR (x, 0));
1950
 
1951
      ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1952
 
1953
      if (cfun->calls_alloca)
1954
        ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1955
 
1956
      /* 970425: RBE:
1957
         We're looking at how the 8byte alignment affects stack layout
1958
         and where we had to pad things. This emits information we can
1959
         extract which tells us about frame sizes and the like.  */
1960
      fprintf (asm_out_file,
1961
               "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
1962
               mcore_current_function_name,
1963
               fi.arg_size, fi.reg_size, fi.reg_mask,
1964
               fi.local_size, fi.outbound_size,
1965
               frame_pointer_needed);
1966
    }
1967
 
1968
  if (mcore_naked_function_p ())
1969
    return;
1970
 
1971
  /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes.  */
1972
  output_stack_adjust (-1, fi.growth[growth++]);        /* Grows it.  */
1973
 
1974
  /* If we have a parameter passed partially in regs and partially in memory,
1975
     the registers will have been stored to memory already in function.c.  So
1976
     we only need to do something here for varargs functions.  */
1977
  if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
1978
    {
1979
      int offset;
1980
      int rn = FIRST_PARM_REG + NPARM_REGS - 1;
1981
      int remaining = fi.arg_size;
1982
 
1983
      for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
1984
        {
1985
          emit_insn (gen_movsi
1986
                     (gen_rtx_MEM (SImode,
1987
                               plus_constant (stack_pointer_rtx, offset)),
1988
                      gen_rtx_REG (SImode, rn)));
1989
        }
1990
    }
1991
 
1992
  /* Do we need another stack adjustment before we do the register saves?  */
1993
  if (growth < fi.reg_growth)
1994
    output_stack_adjust (-1, fi.growth[growth++]);              /* Grows it.  */
1995
 
1996
  if (fi.reg_size != 0)
1997
    {
1998
      int i;
1999
      int offs = fi.reg_offset;
2000
 
2001
      for (i = 15; i >= 0; i--)
2002
        {
2003
          if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2004
            {
2005
              int first_reg = 15;
2006
 
2007
              while (fi.reg_mask & (1 << first_reg))
2008
                first_reg--;
2009
              first_reg++;
2010
 
2011
              emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2012
                                             gen_rtx_REG (SImode, first_reg),
2013
                                             GEN_INT (16 - first_reg)));
2014
 
2015
              i -= (15 - first_reg);
2016
              offs += (16 - first_reg) * 4;
2017
            }
2018
          else if (fi.reg_mask & (1 << i))
2019
            {
2020
              emit_insn (gen_movsi
2021
                         (gen_rtx_MEM (SImode,
2022
                                   plus_constant (stack_pointer_rtx, offs)),
2023
                          gen_rtx_REG (SImode, i)));
2024
              offs += 4;
2025
            }
2026
        }
2027
    }
2028
 
2029
  /* Figure the locals + outbounds.  */
2030
  if (frame_pointer_needed)
2031
    {
2032
      /* If we haven't already purchased to 'fp'.  */
2033
      if (growth < fi.local_growth)
2034
        output_stack_adjust (-1, fi.growth[growth++]);          /* Grows it.  */
2035
 
2036
      emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2037
 
2038
      /* ... and then go any remaining distance for outbounds, etc.  */
2039
      if (fi.growth[growth])
2040
        output_stack_adjust (-1, fi.growth[growth++]);
2041
    }
2042
  else
2043
    {
2044
      if (growth < fi.local_growth)
2045
        output_stack_adjust (-1, fi.growth[growth++]);          /* Grows it.  */
2046
      if (fi.growth[growth])
2047
        output_stack_adjust (-1, fi.growth[growth++]);
2048
    }
2049
}
2050
 
2051
void
2052
mcore_expand_epilog (void)
2053
{
2054
  struct mcore_frame fi;
2055
  int i;
2056
  int offs;
2057
  int growth = MAX_STACK_GROWS - 1 ;
2058
 
2059
 
2060
  /* Find out what we're doing.  */
2061
  layout_mcore_frame(&fi);
2062
 
2063
  if (mcore_naked_function_p ())
2064
    return;
2065
 
2066
  /* If we had a frame pointer, restore the sp from that.  */
2067
  if (frame_pointer_needed)
2068
    {
2069
      emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2070
      growth = fi.local_growth - 1;
2071
    }
2072
  else
2073
    {
2074
      /* XXX: while loop should accumulate and do a single sell.  */
2075
      while (growth >= fi.local_growth)
2076
        {
2077
          if (fi.growth[growth] != 0)
2078
            output_stack_adjust (1, fi.growth[growth]);
2079
          growth--;
2080
        }
2081
    }
2082
 
2083
  /* Make sure we've shrunk stack back to the point where the registers
2084
     were laid down. This is typically 0/1 iterations.  Then pull the
2085
     register save information back off the stack.  */
2086
  while (growth >= fi.reg_growth)
2087
    output_stack_adjust ( 1, fi.growth[growth--]);
2088
 
2089
  offs = fi.reg_offset;
2090
 
2091
  for (i = 15; i >= 0; i--)
2092
    {
2093
      if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2094
        {
2095
          int first_reg;
2096
 
2097
          /* Find the starting register.  */
2098
          first_reg = 15;
2099
 
2100
          while (fi.reg_mask & (1 << first_reg))
2101
            first_reg--;
2102
 
2103
          first_reg++;
2104
 
2105
          emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2106
                                        gen_rtx_MEM (SImode, stack_pointer_rtx),
2107
                                        GEN_INT (16 - first_reg)));
2108
 
2109
          i -= (15 - first_reg);
2110
          offs += (16 - first_reg) * 4;
2111
        }
2112
      else if (fi.reg_mask & (1 << i))
2113
        {
2114
          emit_insn (gen_movsi
2115
                     (gen_rtx_REG (SImode, i),
2116
                      gen_rtx_MEM (SImode,
2117
                               plus_constant (stack_pointer_rtx, offs))));
2118
          offs += 4;
2119
        }
2120
    }
2121
 
2122
  /* Give back anything else.  */
2123
  /* XXX: Should accumulate total and then give it back.  */
2124
  while (growth >= 0)
2125
    output_stack_adjust ( 1, fi.growth[growth--]);
2126
}
2127
 
2128
/* This code is borrowed from the SH port.  */
2129
 
2130
/* The MCORE cannot load a large constant into a register, constants have to
2131
   come from a pc relative load.  The reference of a pc relative load
2132
   instruction must be less than 1k in front of the instruction.  This
2133
   means that we often have to dump a constant inside a function, and
2134
   generate code to branch around it.
2135
 
2136
   It is important to minimize this, since the branches will slow things
2137
   down and make things bigger.
2138
 
2139
   Worst case code looks like:
2140
 
2141
   lrw   L1,r0
2142
   br    L2
2143
   align
2144
   L1:   .long value
2145
   L2:
2146
   ..
2147
 
2148
   lrw   L3,r0
2149
   br    L4
2150
   align
2151
   L3:   .long value
2152
   L4:
2153
   ..
2154
 
2155
   We fix this by performing a scan before scheduling, which notices which
2156
   instructions need to have their operands fetched from the constant table
2157
   and builds the table.
2158
 
2159
   The algorithm is:
2160
 
2161
   scan, find an instruction which needs a pcrel move.  Look forward, find the
2162
   last barrier which is within MAX_COUNT bytes of the requirement.
2163
   If there isn't one, make one.  Process all the instructions between
2164
   the find and the barrier.
2165
 
2166
   In the above example, we can tell that L3 is within 1k of L1, so
2167
   the first move can be shrunk from the 2 insn+constant sequence into
2168
   just 1 insn, and the constant moved to L3 to make:
2169
 
2170
   lrw          L1,r0
2171
   ..
2172
   lrw          L3,r0
2173
   bra          L4
2174
   align
2175
   L3:.long value
2176
   L4:.long value
2177
 
2178
   Then the second move becomes the target for the shortening process.  */
2179
 
2180
typedef struct
2181
{
2182
  rtx value;                    /* Value in table.  */
2183
  rtx label;                    /* Label of value.  */
2184
} pool_node;
2185
 
2186
/* The maximum number of constants that can fit into one pool, since
2187
   the pc relative range is 0...1020 bytes and constants are at least 4
2188
   bytes long.  We subtract 4 from the range to allow for the case where
2189
   we need to add a branch/align before the constant pool.  */
2190
 
2191
#define MAX_COUNT 1016
2192
#define MAX_POOL_SIZE (MAX_COUNT/4)
2193
static pool_node pool_vector[MAX_POOL_SIZE];
2194
static int pool_size;
2195
 
2196
/* Dump out any constants accumulated in the final pass.  These
2197
   will only be labels.  */
2198
 
2199
const char *
2200
mcore_output_jump_label_table (void)
2201
{
2202
  int i;
2203
 
2204
  if (pool_size)
2205
    {
2206
      fprintf (asm_out_file, "\t.align 2\n");
2207
 
2208
      for (i = 0; i < pool_size; i++)
2209
        {
2210
          pool_node * p = pool_vector + i;
2211
 
2212
          (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2213
 
2214
          output_asm_insn (".long       %0", &p->value);
2215
        }
2216
 
2217
      pool_size = 0;
2218
    }
2219
 
2220
  return "";
2221
}
2222
 
2223
/* Check whether insn is a candidate for a conditional.  */
2224
 
2225
static cond_type
2226
is_cond_candidate (rtx insn)
2227
{
2228
  /* The only things we conditionalize are those that can be directly
2229
     changed into a conditional.  Only bother with SImode items.  If
2230
     we wanted to be a little more aggressive, we could also do other
2231
     modes such as DImode with reg-reg move or load 0.  */
2232
  if (GET_CODE (insn) == INSN)
2233
    {
2234
      rtx pat = PATTERN (insn);
2235
      rtx src, dst;
2236
 
2237
      if (GET_CODE (pat) != SET)
2238
        return COND_NO;
2239
 
2240
      dst = XEXP (pat, 0);
2241
 
2242
      if ((GET_CODE (dst) != REG &&
2243
           GET_CODE (dst) != SUBREG) ||
2244
          GET_MODE (dst) != SImode)
2245
        return COND_NO;
2246
 
2247
      src = XEXP (pat, 1);
2248
 
2249
      if ((GET_CODE (src) == REG ||
2250
           (GET_CODE (src) == SUBREG &&
2251
            GET_CODE (SUBREG_REG (src)) == REG)) &&
2252
          GET_MODE (src) == SImode)
2253
        return COND_MOV_INSN;
2254
      else if (GET_CODE (src) == CONST_INT &&
2255
               INTVAL (src) == 0)
2256
        return COND_CLR_INSN;
2257
      else if (GET_CODE (src) == PLUS &&
2258
               (GET_CODE (XEXP (src, 0)) == REG ||
2259
                (GET_CODE (XEXP (src, 0)) == SUBREG &&
2260
                 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2261
               GET_MODE (XEXP (src, 0)) == SImode &&
2262
               GET_CODE (XEXP (src, 1)) == CONST_INT &&
2263
               INTVAL (XEXP (src, 1)) == 1)
2264
        return COND_INC_INSN;
2265
      else if (((GET_CODE (src) == MINUS &&
2266
                 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2267
                 INTVAL( XEXP (src, 1)) == 1) ||
2268
                (GET_CODE (src) == PLUS &&
2269
                 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2270
                 INTVAL (XEXP (src, 1)) == -1)) &&
2271
               (GET_CODE (XEXP (src, 0)) == REG ||
2272
                (GET_CODE (XEXP (src, 0)) == SUBREG &&
2273
                 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2274
               GET_MODE (XEXP (src, 0)) == SImode)
2275
        return COND_DEC_INSN;
2276
 
2277
      /* Some insns that we don't bother with:
2278
         (set (rx:DI) (ry:DI))
2279
         (set (rx:DI) (const_int 0))
2280
      */
2281
 
2282
    }
2283
  else if (GET_CODE (insn) == JUMP_INSN &&
2284
           GET_CODE (PATTERN (insn)) == SET &&
2285
           GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2286
    return COND_BRANCH_INSN;
2287
 
2288
  return COND_NO;
2289
}
2290
 
2291
/* Emit a conditional version of insn and replace the old insn with the
2292
   new one.  Return the new insn if emitted.  */
2293
 
2294
static rtx
2295
emit_new_cond_insn (rtx insn, int cond)
2296
{
2297
  rtx c_insn = 0;
2298
  rtx pat, dst, src;
2299
  cond_type num;
2300
 
2301
  if ((num = is_cond_candidate (insn)) == COND_NO)
2302
    return NULL;
2303
 
2304
  pat = PATTERN (insn);
2305
 
2306
  if (GET_CODE (insn) == INSN)
2307
    {
2308
      dst = SET_DEST (pat);
2309
      src = SET_SRC (pat);
2310
    }
2311
  else
2312
    {
2313
      dst = JUMP_LABEL (insn);
2314
      src = NULL_RTX;
2315
    }
2316
 
2317
  switch (num)
2318
    {
2319
    case COND_MOV_INSN:
2320
    case COND_CLR_INSN:
2321
      if (cond)
2322
        c_insn = gen_movt0 (dst, src, dst);
2323
      else
2324
        c_insn = gen_movt0 (dst, dst, src);
2325
      break;
2326
 
2327
    case COND_INC_INSN:
2328
      if (cond)
2329
        c_insn = gen_incscc (dst, dst);
2330
      else
2331
        c_insn = gen_incscc_false (dst, dst);
2332
      break;
2333
 
2334
    case COND_DEC_INSN:
2335
      if (cond)
2336
        c_insn = gen_decscc (dst, dst);
2337
      else
2338
        c_insn = gen_decscc_false (dst, dst);
2339
      break;
2340
 
2341
    case COND_BRANCH_INSN:
2342
      if (cond)
2343
        c_insn = gen_branch_true (dst);
2344
      else
2345
        c_insn = gen_branch_false (dst);
2346
      break;
2347
 
2348
    default:
2349
      return NULL;
2350
    }
2351
 
2352
  /* Only copy the notes if they exist.  */
2353
  if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2354
    {
2355
      /* We really don't need to bother with the notes and links at this
2356
         point, but go ahead and save the notes.  This will help is_dead()
2357
         when applying peepholes (links don't matter since they are not
2358
         used any more beyond this point for the mcore).  */
2359
      REG_NOTES (c_insn) = REG_NOTES (insn);
2360
    }
2361
 
2362
  if (num == COND_BRANCH_INSN)
2363
    {
2364
      /* For jumps, we need to be a little bit careful and emit the new jump
2365
         before the old one and to update the use count for the target label.
2366
         This way, the barrier following the old (uncond) jump will get
2367
         deleted, but the label won't.  */
2368
      c_insn = emit_jump_insn_before (c_insn, insn);
2369
 
2370
      ++ LABEL_NUSES (dst);
2371
 
2372
      JUMP_LABEL (c_insn) = dst;
2373
    }
2374
  else
2375
    c_insn = emit_insn_after (c_insn, insn);
2376
 
2377
  delete_insn (insn);
2378
 
2379
  return c_insn;
2380
}
2381
 
2382
/* Attempt to change a basic block into a series of conditional insns.  This
2383
   works by taking the branch at the end of the 1st block and scanning for the
2384
   end of the 2nd block.  If all instructions in the 2nd block have cond.
2385
   versions and the label at the start of block 3 is the same as the target
2386
   from the branch at block 1, then conditionalize all insn in block 2 using
2387
   the inverse condition of the branch at block 1.  (Note I'm bending the
2388
   definition of basic block here.)
2389
 
2390
   e.g., change:
2391
 
2392
                bt      L2             <-- end of block 1 (delete)
2393
                mov     r7,r8
2394
                addu    r7,1
2395
                br      L3             <-- end of block 2
2396
 
2397
        L2:     ...                    <-- start of block 3 (NUSES==1)
2398
        L3:     ...
2399
 
2400
   to:
2401
 
2402
                movf    r7,r8
2403
                incf    r7
2404
                bf      L3
2405
 
2406
        L3:     ...
2407
 
2408
   we can delete the L2 label if NUSES==1 and re-apply the optimization
2409
   starting at the last instruction of block 2.  This may allow an entire
2410
   if-then-else statement to be conditionalized.  BRC  */
2411
static rtx
2412
conditionalize_block (rtx first)
2413
{
2414
  rtx insn;
2415
  rtx br_pat;
2416
  rtx end_blk_1_br = 0;
2417
  rtx end_blk_2_insn = 0;
2418
  rtx start_blk_3_lab = 0;
2419
  int cond;
2420
  int br_lab_num;
2421
  int blk_size = 0;
2422
 
2423
 
2424
  /* Check that the first insn is a candidate conditional jump.  This is
2425
     the one that we'll eliminate.  If not, advance to the next insn to
2426
     try.  */
2427
  if (GET_CODE (first) != JUMP_INSN ||
2428
      GET_CODE (PATTERN (first)) != SET ||
2429
      GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2430
    return NEXT_INSN (first);
2431
 
2432
  /* Extract some information we need.  */
2433
  end_blk_1_br = first;
2434
  br_pat = PATTERN (end_blk_1_br);
2435
 
2436
  /* Complement the condition since we use the reverse cond. for the insns.  */
2437
  cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2438
 
2439
  /* Determine what kind of branch we have.  */
2440
  if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2441
    {
2442
      /* A normal branch, so extract label out of first arm.  */
2443
      br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2444
    }
2445
  else
2446
    {
2447
      /* An inverse branch, so extract the label out of the 2nd arm
2448
         and complement the condition.  */
2449
      cond = (cond == 0);
2450
      br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2451
    }
2452
 
2453
  /* Scan forward for the start of block 2: it must start with a
2454
     label and that label must be the same as the branch target
2455
     label from block 1.  We don't care about whether block 2 actually
2456
     ends with a branch or a label (an uncond. branch is
2457
     conditionalizable).  */
2458
  for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2459
    {
2460
      enum rtx_code code;
2461
 
2462
      code = GET_CODE (insn);
2463
 
2464
      /* Look for the label at the start of block 3.  */
2465
      if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2466
        break;
2467
 
2468
      /* Skip barriers, notes, and conditionalizable insns.  If the
2469
         insn is not conditionalizable or makes this optimization fail,
2470
         just return the next insn so we can start over from that point.  */
2471
      if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2472
        return NEXT_INSN (insn);
2473
 
2474
      /* Remember the last real insn before the label (i.e. end of block 2).  */
2475
      if (code == JUMP_INSN || code == INSN)
2476
        {
2477
          blk_size ++;
2478
          end_blk_2_insn = insn;
2479
        }
2480
    }
2481
 
2482
  if (!insn)
2483
    return insn;
2484
 
2485
  /* It is possible for this optimization to slow performance if the blocks
2486
     are long.  This really depends upon whether the branch is likely taken
2487
     or not.  If the branch is taken, we slow performance in many cases.  But,
2488
     if the branch is not taken, we always help performance (for a single
2489
     block, but for a double block (i.e. when the optimization is re-applied)
2490
     this is not true since the 'right thing' depends on the overall length of
2491
     the collapsed block).  As a compromise, don't apply this optimization on
2492
     blocks larger than size 2 (unlikely for the mcore) when speed is important.
2493
     the best threshold depends on the latencies of the instructions (i.e.,
2494
     the branch penalty).  */
2495
  if (optimize > 1 && blk_size > 2)
2496
    return insn;
2497
 
2498
  /* At this point, we've found the start of block 3 and we know that
2499
     it is the destination of the branch from block 1.   Also, all
2500
     instructions in the block 2 are conditionalizable.  So, apply the
2501
     conditionalization and delete the branch.  */
2502
  start_blk_3_lab = insn;
2503
 
2504
  for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2505
       insn = NEXT_INSN (insn))
2506
    {
2507
      rtx newinsn;
2508
 
2509
      if (INSN_DELETED_P (insn))
2510
        continue;
2511
 
2512
      /* Try to form a conditional variant of the instruction and emit it.  */
2513
      if ((newinsn = emit_new_cond_insn (insn, cond)))
2514
        {
2515
          if (end_blk_2_insn == insn)
2516
            end_blk_2_insn = newinsn;
2517
 
2518
          insn = newinsn;
2519
        }
2520
    }
2521
 
2522
  /* Note whether we will delete the label starting blk 3 when the jump
2523
     gets deleted.  If so, we want to re-apply this optimization at the
2524
     last real instruction right before the label.  */
2525
  if (LABEL_NUSES (start_blk_3_lab) == 1)
2526
    {
2527
      start_blk_3_lab = 0;
2528
    }
2529
 
2530
  /* ??? we probably should redistribute the death notes for this insn, esp.
2531
     the death of cc, but it doesn't really matter this late in the game.
2532
     The peepholes all use is_dead() which will find the correct death
2533
     regardless of whether there is a note.  */
2534
  delete_insn (end_blk_1_br);
2535
 
2536
  if (! start_blk_3_lab)
2537
    return end_blk_2_insn;
2538
 
2539
  /* Return the insn right after the label at the start of block 3.  */
2540
  return NEXT_INSN (start_blk_3_lab);
2541
}
2542
 
2543
/* Apply the conditionalization of blocks optimization.  This is the
2544
   outer loop that traverses through the insns scanning for a branch
2545
   that signifies an opportunity to apply the optimization.  Note that
2546
   this optimization is applied late.  If we could apply it earlier,
2547
   say before cse 2, it may expose more optimization opportunities.
2548
   but, the pay back probably isn't really worth the effort (we'd have
2549
   to update all reg/flow/notes/links/etc to make it work - and stick it
2550
   in before cse 2).  */
2551
 
2552
static void
2553
conditionalize_optimization (void)
2554
{
2555
  rtx insn;
2556
 
2557
  for (insn = get_insns (); insn; insn = conditionalize_block (insn))
2558
    continue;
2559
}
2560
 
2561
static int saved_warn_return_type = -1;
2562
static int saved_warn_return_type_count = 0;
2563
 
2564
/* This is to handle loads from the constant pool.  */
2565
 
2566
static void
2567
mcore_reorg (void)
2568
{
2569
  /* Reset this variable.  */
2570
  current_function_anonymous_args = 0;
2571
 
2572
  /* Restore the warn_return_type if it has been altered.  */
2573
  if (saved_warn_return_type != -1)
2574
    {
2575
      /* Only restore the value if we have reached another function.
2576
         The test of warn_return_type occurs in final_function () in
2577
         c-decl.c a long time after the code for the function is generated,
2578
         so we need a counter to tell us when we have finished parsing that
2579
         function and can restore the flag.  */
2580
      if (--saved_warn_return_type_count == 0)
2581
        {
2582
          warn_return_type = saved_warn_return_type;
2583
          saved_warn_return_type = -1;
2584
        }
2585
    }
2586
 
2587
  if (optimize == 0)
2588
    return;
2589
 
2590
  /* Conditionalize blocks where we can.  */
2591
  conditionalize_optimization ();
2592
 
2593
  /* Literal pool generation is now pushed off until the assembler.  */
2594
}
2595
 
2596
 
2597
/* Return true if X is something that can be moved directly into r15.  */
2598
 
2599
bool
2600
mcore_r15_operand_p (rtx x)
2601
{
2602
  switch (GET_CODE (x))
2603
    {
2604
    case CONST_INT:
2605
      return mcore_const_ok_for_inline (INTVAL (x));
2606
 
2607
    case REG:
2608
    case SUBREG:
2609
    case MEM:
2610
      return 1;
2611
 
2612
    default:
2613
      return 0;
2614
    }
2615
}
2616
 
2617
/* Implement SECONDARY_RELOAD_CLASS.  If RCLASS contains r15, and we can't
2618
   directly move X into it, use r1-r14 as a temporary.  */
2619
 
2620
enum reg_class
2621
mcore_secondary_reload_class (enum reg_class rclass,
2622
                              enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2623
{
2624
  if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
2625
      && !mcore_r15_operand_p (x))
2626
    return LRW_REGS;
2627
  return NO_REGS;
2628
}
2629
 
2630
/* Return the reg_class to use when reloading the rtx X into the class
2631
   RCLASS.  If X is too complex to move directly into r15, prefer to
2632
   use LRW_REGS instead.  */
2633
 
2634
enum reg_class
2635
mcore_reload_class (rtx x, enum reg_class rclass)
2636
{
2637
  if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
2638
    return LRW_REGS;
2639
 
2640
  return rclass;
2641
}
2642
 
2643
/* Tell me if a pair of reg/subreg rtx's actually refer to the same
2644
   register.  Note that the current version doesn't worry about whether
2645
   they are the same mode or note (e.g., a QImode in r2 matches an HImode
2646
   in r2 matches an SImode in r2. Might think in the future about whether
2647
   we want to be able to say something about modes.  */
2648
 
2649
int
2650
mcore_is_same_reg (rtx x, rtx y)
2651
{
2652
  /* Strip any and all of the subreg wrappers.  */
2653
  while (GET_CODE (x) == SUBREG)
2654
    x = SUBREG_REG (x);
2655
 
2656
  while (GET_CODE (y) == SUBREG)
2657
    y = SUBREG_REG (y);
2658
 
2659
  if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2660
    return 1;
2661
 
2662
  return 0;
2663
}
2664
 
2665
void
2666
mcore_override_options (void)
2667
{
2668
  /* Only the m340 supports little endian code.  */
2669
  if (TARGET_LITTLE_END && ! TARGET_M340)
2670
    target_flags |= MASK_M340;
2671
}
2672
 
2673
/* Compute the number of word sized registers needed to
2674
   hold a function argument of mode MODE and type TYPE.  */
2675
 
2676
int
2677
mcore_num_arg_regs (enum machine_mode mode, const_tree type)
2678
{
2679
  int size;
2680
 
2681
  if (targetm.calls.must_pass_in_stack (mode, type))
2682
    return 0;
2683
 
2684
  if (type && mode == BLKmode)
2685
    size = int_size_in_bytes (type);
2686
  else
2687
    size = GET_MODE_SIZE (mode);
2688
 
2689
  return ROUND_ADVANCE (size);
2690
}
2691
 
2692
static rtx
2693
handle_structs_in_regs (enum machine_mode mode, const_tree type, int reg)
2694
{
2695
  int size;
2696
 
2697
  /* The MCore ABI defines that a structure whose size is not a whole multiple
2698
     of bytes is passed packed into registers (or spilled onto the stack if
2699
     not enough registers are available) with the last few bytes of the
2700
     structure being packed, left-justified, into the last register/stack slot.
2701
     GCC handles this correctly if the last word is in a stack slot, but we
2702
     have to generate a special, PARALLEL RTX if the last word is in an
2703
     argument register.  */
2704
  if (type
2705
      && TYPE_MODE (type) == BLKmode
2706
      && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2707
      && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2708
      && (size % UNITS_PER_WORD != 0)
2709
      && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2710
    {
2711
      rtx    arg_regs [NPARM_REGS];
2712
      int    nregs;
2713
      rtx    result;
2714
      rtvec  rtvec;
2715
 
2716
      for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2717
        {
2718
          arg_regs [nregs] =
2719
            gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2720
                               GEN_INT (nregs * UNITS_PER_WORD));
2721
          nregs ++;
2722
        }
2723
 
2724
      /* We assume here that NPARM_REGS == 6.  The assert checks this.  */
2725
      assert (ARRAY_SIZE (arg_regs) == 6);
2726
      rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2727
                          arg_regs[3], arg_regs[4], arg_regs[5]);
2728
 
2729
      result = gen_rtx_PARALLEL (mode, rtvec);
2730
      return result;
2731
    }
2732
 
2733
  return gen_rtx_REG (mode, reg);
2734
}
2735
 
2736
rtx
2737
mcore_function_value (const_tree valtype, const_tree func)
2738
{
2739
  enum machine_mode mode;
2740
  int unsigned_p;
2741
 
2742
  mode = TYPE_MODE (valtype);
2743
 
2744
  /* Since we promote return types, we must promote the mode here too.  */
2745
  mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
2746
 
2747
  return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2748
}
2749
 
2750
/* Define where to put the arguments to a function.
2751
   Value is zero to push the argument on the stack,
2752
   or a hard register in which to store the argument.
2753
 
2754
   MODE is the argument's machine mode.
2755
   TYPE is the data type of the argument (as a tree).
2756
    This is null for libcalls where that information may
2757
    not be available.
2758
   CUM is a variable of type CUMULATIVE_ARGS which gives info about
2759
    the preceding args and about the function being called.
2760
   NAMED is nonzero if this argument is a named parameter
2761
    (otherwise it is an extra parameter matching an ellipsis).
2762
 
2763
   On MCore the first args are normally in registers
2764
   and the rest are pushed.  Any arg that starts within the first
2765
   NPARM_REGS words is at least partially passed in a register unless
2766
   its data type forbids.  */
2767
 
2768
rtx
2769
mcore_function_arg (CUMULATIVE_ARGS cum, enum machine_mode mode,
2770
                    tree type, int named)
2771
{
2772
  int arg_reg;
2773
 
2774
  if (! named || mode == VOIDmode)
2775
    return 0;
2776
 
2777
  if (targetm.calls.must_pass_in_stack (mode, type))
2778
    return 0;
2779
 
2780
  arg_reg = ROUND_REG (cum, mode);
2781
 
2782
  if (arg_reg < NPARM_REGS)
2783
    return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2784
 
2785
  return 0;
2786
}
2787
 
2788
/* Returns the number of bytes of argument registers required to hold *part*
2789
   of a parameter of machine mode MODE and type TYPE (which may be NULL if
2790
   the type is not known).  If the argument fits entirely in the argument
2791
   registers, or entirely on the stack, then 0 is returned.  CUM is the
2792
   number of argument registers already used by earlier parameters to
2793
   the function.  */
2794
 
2795
static int
2796
mcore_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2797
                         tree type, bool named)
2798
{
2799
  int reg = ROUND_REG (*cum, mode);
2800
 
2801
  if (named == 0)
2802
    return 0;
2803
 
2804
  if (targetm.calls.must_pass_in_stack (mode, type))
2805
    return 0;
2806
 
2807
  /* REG is not the *hardware* register number of the register that holds
2808
     the argument, it is the *argument* register number.  So for example,
2809
     the first argument to a function goes in argument register 0, which
2810
     translates (for the MCore) into hardware register 2.  The second
2811
     argument goes into argument register 1, which translates into hardware
2812
     register 3, and so on.  NPARM_REGS is the number of argument registers
2813
     supported by the target, not the maximum hardware register number of
2814
     the target.  */
2815
  if (reg >= NPARM_REGS)
2816
    return 0;
2817
 
2818
  /* If the argument fits entirely in registers, return 0.  */
2819
  if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2820
    return 0;
2821
 
2822
  /* The argument overflows the number of available argument registers.
2823
     Compute how many argument registers have not yet been assigned to
2824
     hold an argument.  */
2825
  reg = NPARM_REGS - reg;
2826
 
2827
  /* Return partially in registers and partially on the stack.  */
2828
  return reg * UNITS_PER_WORD;
2829
}
2830
 
2831
/* Return nonzero if SYMBOL is marked as being dllexport'd.  */
2832
 
2833
int
2834
mcore_dllexport_name_p (const char * symbol)
2835
{
2836
  return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2837
}
2838
 
2839
/* Return nonzero if SYMBOL is marked as being dllimport'd.  */
2840
 
2841
int
2842
mcore_dllimport_name_p (const char * symbol)
2843
{
2844
  return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2845
}
2846
 
2847
/* Mark a DECL as being dllexport'd.  */
2848
 
2849
static void
2850
mcore_mark_dllexport (tree decl)
2851
{
2852
  const char * oldname;
2853
  char * newname;
2854
  rtx    rtlname;
2855
  tree   idp;
2856
 
2857
  rtlname = XEXP (DECL_RTL (decl), 0);
2858
 
2859
  if (GET_CODE (rtlname) == MEM)
2860
    rtlname = XEXP (rtlname, 0);
2861
  gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2862
  oldname = XSTR (rtlname, 0);
2863
 
2864
  if (mcore_dllexport_name_p (oldname))
2865
    return;  /* Already done.  */
2866
 
2867
  newname = XALLOCAVEC (char, strlen (oldname) + 4);
2868
  sprintf (newname, "@e.%s", oldname);
2869
 
2870
  /* We pass newname through get_identifier to ensure it has a unique
2871
     address.  RTL processing can sometimes peek inside the symbol ref
2872
     and compare the string's addresses to see if two symbols are
2873
     identical.  */
2874
  /* ??? At least I think that's why we do this.  */
2875
  idp = get_identifier (newname);
2876
 
2877
  XEXP (DECL_RTL (decl), 0) =
2878
    gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
2879
}
2880
 
2881
/* Mark a DECL as being dllimport'd.  */
2882
 
2883
static void
2884
mcore_mark_dllimport (tree decl)
2885
{
2886
  const char * oldname;
2887
  char * newname;
2888
  tree   idp;
2889
  rtx    rtlname;
2890
  rtx    newrtl;
2891
 
2892
  rtlname = XEXP (DECL_RTL (decl), 0);
2893
 
2894
  if (GET_CODE (rtlname) == MEM)
2895
    rtlname = XEXP (rtlname, 0);
2896
  gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2897
  oldname = XSTR (rtlname, 0);
2898
 
2899
  gcc_assert (!mcore_dllexport_name_p (oldname));
2900
  if (mcore_dllimport_name_p (oldname))
2901
    return; /* Already done.  */
2902
 
2903
  /* ??? One can well ask why we're making these checks here,
2904
     and that would be a good question.  */
2905
 
2906
  /* Imported variables can't be initialized.  */
2907
  if (TREE_CODE (decl) == VAR_DECL
2908
      && !DECL_VIRTUAL_P (decl)
2909
      && DECL_INITIAL (decl))
2910
    {
2911
      error ("initialized variable %q+D is marked dllimport", decl);
2912
      return;
2913
    }
2914
 
2915
  /* `extern' needn't be specified with dllimport.
2916
     Specify `extern' now and hope for the best.  Sigh.  */
2917
  if (TREE_CODE (decl) == VAR_DECL
2918
      /* ??? Is this test for vtables needed?  */
2919
      && !DECL_VIRTUAL_P (decl))
2920
    {
2921
      DECL_EXTERNAL (decl) = 1;
2922
      TREE_PUBLIC (decl) = 1;
2923
    }
2924
 
2925
  newname = XALLOCAVEC (char, strlen (oldname) + 11);
2926
  sprintf (newname, "@i.__imp_%s", oldname);
2927
 
2928
  /* We pass newname through get_identifier to ensure it has a unique
2929
     address.  RTL processing can sometimes peek inside the symbol ref
2930
     and compare the string's addresses to see if two symbols are
2931
     identical.  */
2932
  /* ??? At least I think that's why we do this.  */
2933
  idp = get_identifier (newname);
2934
 
2935
  newrtl = gen_rtx_MEM (Pmode,
2936
                    gen_rtx_SYMBOL_REF (Pmode,
2937
                             IDENTIFIER_POINTER (idp)));
2938
  XEXP (DECL_RTL (decl), 0) = newrtl;
2939
}
2940
 
2941
static int
2942
mcore_dllexport_p (tree decl)
2943
{
2944
  if (   TREE_CODE (decl) != VAR_DECL
2945
      && TREE_CODE (decl) != FUNCTION_DECL)
2946
    return 0;
2947
 
2948
  return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
2949
}
2950
 
2951
static int
2952
mcore_dllimport_p (tree decl)
2953
{
2954
  if (   TREE_CODE (decl) != VAR_DECL
2955
      && TREE_CODE (decl) != FUNCTION_DECL)
2956
    return 0;
2957
 
2958
  return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
2959
}
2960
 
2961
/* We must mark dll symbols specially.  Definitions of dllexport'd objects
2962
   install some info in the .drective (PE) or .exports (ELF) sections.  */
2963
 
2964
static void
2965
mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
2966
{
2967
  /* Mark the decl so we can tell from the rtl whether the object is
2968
     dllexport'd or dllimport'd.  */
2969
  if (mcore_dllexport_p (decl))
2970
    mcore_mark_dllexport (decl);
2971
  else if (mcore_dllimport_p (decl))
2972
    mcore_mark_dllimport (decl);
2973
 
2974
  /* It might be that DECL has already been marked as dllimport, but
2975
     a subsequent definition nullified that.  The attribute is gone
2976
     but DECL_RTL still has @i.__imp_foo.  We need to remove that.  */
2977
  else if ((TREE_CODE (decl) == FUNCTION_DECL
2978
            || TREE_CODE (decl) == VAR_DECL)
2979
           && DECL_RTL (decl) != NULL_RTX
2980
           && GET_CODE (DECL_RTL (decl)) == MEM
2981
           && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
2982
           && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
2983
           && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
2984
    {
2985
      const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
2986
      tree idp = get_identifier (oldname + 9);
2987
      rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
2988
 
2989
      XEXP (DECL_RTL (decl), 0) = newrtl;
2990
 
2991
      /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
2992
         ??? We leave these alone for now.  */
2993
    }
2994
}
2995
 
2996
/* Undo the effects of the above.  */
2997
 
2998
static const char *
2999
mcore_strip_name_encoding (const char * str)
3000
{
3001
  return str + (str[0] == '@' ? 3 : 0);
3002
}
3003
 
3004
/* MCore specific attribute support.
3005
   dllexport - for exporting a function/variable that will live in a dll
3006
   dllimport - for importing a function/variable from a dll
3007
   naked     - do not create a function prologue/epilogue.  */
3008
 
3009
/* Handle a "naked" attribute; arguments as in
3010
   struct attribute_spec.handler.  */
3011
 
3012
static tree
3013
mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3014
                              int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
3015
{
3016
  if (TREE_CODE (*node) == FUNCTION_DECL)
3017
    {
3018
      /* PR14310 - don't complain about lack of return statement
3019
         in naked functions.  The solution here is a gross hack
3020
         but this is the only way to solve the problem without
3021
         adding a new feature to GCC.  I did try submitting a patch
3022
         that would add such a new feature, but it was (rightfully)
3023
         rejected on the grounds that it was creeping featurism,
3024
         so hence this code.  */
3025
      if (warn_return_type)
3026
        {
3027
          saved_warn_return_type = warn_return_type;
3028
          warn_return_type = 0;
3029
          saved_warn_return_type_count = 2;
3030
        }
3031
      else if (saved_warn_return_type_count)
3032
        saved_warn_return_type_count = 2;
3033
    }
3034
  else
3035
    {
3036
      warning (OPT_Wattributes, "%qE attribute only applies to functions",
3037
               name);
3038
      *no_add_attrs = true;
3039
    }
3040
 
3041
  return NULL_TREE;
3042
}
3043
 
3044
/* ??? It looks like this is PE specific?  Oh well, this is what the
3045
   old code did as well.  */
3046
 
3047
static void
3048
mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
3049
{
3050
  int len;
3051
  const char * name;
3052
  char * string;
3053
  const char * prefix;
3054
 
3055
  name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3056
 
3057
  /* Strip off any encoding in name.  */
3058
  name = (* targetm.strip_name_encoding) (name);
3059
 
3060
  /* The object is put in, for example, section .text$foo.
3061
     The linker will then ultimately place them in .text
3062
     (everything from the $ on is stripped).  */
3063
  if (TREE_CODE (decl) == FUNCTION_DECL)
3064
    prefix = ".text$";
3065
  /* For compatibility with EPOC, we ignore the fact that the
3066
     section might have relocs against it.  */
3067
  else if (decl_readonly_section (decl, 0))
3068
    prefix = ".rdata$";
3069
  else
3070
    prefix = ".data$";
3071
 
3072
  len = strlen (name) + strlen (prefix);
3073
  string = XALLOCAVEC (char, len + 1);
3074
 
3075
  sprintf (string, "%s%s", prefix, name);
3076
 
3077
  DECL_SECTION_NAME (decl) = build_string (len, string);
3078
}
3079
 
3080
int
3081
mcore_naked_function_p (void)
3082
{
3083
  return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3084
}
3085
 
3086
#ifdef OBJECT_FORMAT_ELF
3087
static void
3088
mcore_asm_named_section (const char *name,
3089
                         unsigned int flags ATTRIBUTE_UNUSED,
3090
                         tree decl ATTRIBUTE_UNUSED)
3091
{
3092
  fprintf (asm_out_file, "\t.section %s\n", name);
3093
}
3094
#endif /* OBJECT_FORMAT_ELF */
3095
 
3096
/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL.  */
3097
 
3098
static void
3099
mcore_external_libcall (rtx fun)
3100
{
3101
  fprintf (asm_out_file, "\t.import\t");
3102
  assemble_name (asm_out_file, XSTR (fun, 0));
3103
  fprintf (asm_out_file, "\n");
3104
}
3105
 
3106
/* Worker function for TARGET_RETURN_IN_MEMORY.  */
3107
 
3108
static bool
3109
mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
3110
{
3111
  const HOST_WIDE_INT size = int_size_in_bytes (type);
3112
  return (size == -1 || size > 2 * UNITS_PER_WORD);
3113
}
3114
 
3115
/* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3116
   Output assembler code for a block containing the constant parts
3117
   of a trampoline, leaving space for the variable parts.
3118
 
3119
   On the MCore, the trampoline looks like:
3120
        lrw     r1,  function
3121
        lrw     r13, area
3122
        jmp     r13
3123
        or      r0, r0
3124
    .literals                                                */
3125
 
3126
static void
3127
mcore_asm_trampoline_template (FILE *f)
3128
{
3129
  fprintf (f, "\t.short 0x7102\n");
3130
  fprintf (f, "\t.short 0x7d02\n");
3131
  fprintf (f, "\t.short 0x00cd\n");
3132
  fprintf (f, "\t.short 0x1e00\n");
3133
  fprintf (f, "\t.long  0\n");
3134
  fprintf (f, "\t.long  0\n");
3135
}
3136
 
3137
/* Worker function for TARGET_TRAMPOLINE_INIT.  */
3138
 
3139
static void
3140
mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3141
{
3142
  rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3143
  rtx mem;
3144
 
3145
  emit_block_move (m_tramp, assemble_trampoline_template (),
3146
                   GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3147
 
3148
  mem = adjust_address (m_tramp, SImode, 8);
3149
  emit_move_insn (mem, chain_value);
3150
  mem = adjust_address (m_tramp, SImode, 12);
3151
  emit_move_insn (mem, fnaddr);
3152
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.