OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [config/] [mcore/] [mcore.c] - Blame information for rev 709

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 709 jeremybenn
/* Output routines for Motorola MCore processor
2
   Copyright (C) 1993, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008,
3
   2009, 2010, 2011 Free Software Foundation, Inc.
4
 
5
   This file is part of GCC.
6
 
7
   GCC is free software; you can redistribute it and/or modify it
8
   under the terms of the GNU General Public License as published
9
   by the Free Software Foundation; either version 3, or (at your
10
   option) any later version.
11
 
12
   GCC is distributed in the hope that it will be useful, but WITHOUT
13
   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14
   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15
   License for more details.
16
 
17
   You should have received a copy of the GNU General Public License
18
   along with GCC; see the file COPYING3.  If not see
19
   <http://www.gnu.org/licenses/>.  */
20
 
21
#include "config.h"
22
#include "system.h"
23
#include "coretypes.h"
24
#include "tm.h"
25
#include "rtl.h"
26
#include "tree.h"
27
#include "tm_p.h"
28
#include "mcore.h"
29
#include "regs.h"
30
#include "hard-reg-set.h"
31
#include "insn-config.h"
32
#include "conditions.h"
33
#include "output.h"
34
#include "insn-attr.h"
35
#include "flags.h"
36
#include "obstack.h"
37
#include "expr.h"
38
#include "reload.h"
39
#include "recog.h"
40
#include "function.h"
41
#include "ggc.h"
42
#include "diagnostic-core.h"
43
#include "target.h"
44
#include "target-def.h"
45
#include "df.h"
46
 
47
/* For dumping information about frame sizes.  */
48
char * mcore_current_function_name = 0;
49
long   mcore_current_compilation_timestamp = 0;
50
 
51
/* Global variables for machine-dependent things.  */
52
 
53
/* Provides the class number of the smallest class containing
54
   reg number.  */
55
const enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
56
{
57
  GENERAL_REGS, ONLYR1_REGS,  LRW_REGS,     LRW_REGS,
58
  LRW_REGS,     LRW_REGS,     LRW_REGS,     LRW_REGS,
59
  LRW_REGS,     LRW_REGS,     LRW_REGS,     LRW_REGS,
60
  LRW_REGS,     LRW_REGS,     LRW_REGS,     GENERAL_REGS,
61
  GENERAL_REGS, C_REGS,       NO_REGS,      NO_REGS,
62
};
63
 
64
struct mcore_frame
65
{
66
  int arg_size;                 /* Stdarg spills (bytes).  */
67
  int reg_size;                 /* Non-volatile reg saves (bytes).  */
68
  int reg_mask;                 /* Non-volatile reg saves.  */
69
  int local_size;               /* Locals.  */
70
  int outbound_size;            /* Arg overflow on calls out.  */
71
  int pad_outbound;
72
  int pad_local;
73
  int pad_reg;
74
  /* Describe the steps we'll use to grow it.  */
75
#define MAX_STACK_GROWS 4       /* Gives us some spare space.  */
76
  int growth[MAX_STACK_GROWS];
77
  int arg_offset;
78
  int reg_offset;
79
  int reg_growth;
80
  int local_growth;
81
};
82
 
83
typedef enum
84
{
85
  COND_NO,
86
  COND_MOV_INSN,
87
  COND_CLR_INSN,
88
  COND_INC_INSN,
89
  COND_DEC_INSN,
90
  COND_BRANCH_INSN
91
}
92
cond_type;
93
 
94
static void       output_stack_adjust           (int, int);
95
static int        calc_live_regs                (int *);
96
static int        try_constant_tricks           (long, HOST_WIDE_INT *, HOST_WIDE_INT *);
97
static const char *     output_inline_const     (enum machine_mode, rtx *);
98
static void       layout_mcore_frame            (struct mcore_frame *);
99
static void       mcore_setup_incoming_varargs  (cumulative_args_t, enum machine_mode, tree, int *, int);
100
static cond_type  is_cond_candidate             (rtx);
101
static rtx        emit_new_cond_insn            (rtx, int);
102
static rtx        conditionalize_block          (rtx);
103
static void       conditionalize_optimization   (void);
104
static void       mcore_reorg                   (void);
105
static rtx        handle_structs_in_regs        (enum machine_mode, const_tree, int);
106
static void       mcore_mark_dllexport          (tree);
107
static void       mcore_mark_dllimport          (tree);
108
static int        mcore_dllexport_p             (tree);
109
static int        mcore_dllimport_p             (tree);
110
static tree       mcore_handle_naked_attribute  (tree *, tree, tree, int, bool *);
111
#ifdef OBJECT_FORMAT_ELF
112
static void       mcore_asm_named_section       (const char *,
113
                                                 unsigned int, tree);
114
#endif
115
static void       mcore_print_operand           (FILE *, rtx, int);
116
static void       mcore_print_operand_address   (FILE *, rtx);
117
static bool       mcore_print_operand_punct_valid_p (unsigned char code);
118
static void       mcore_unique_section          (tree, int);
119
static void mcore_encode_section_info           (tree, rtx, int);
120
static const char *mcore_strip_name_encoding    (const char *);
121
static int        mcore_const_costs             (rtx, RTX_CODE);
122
static int        mcore_and_cost                (rtx);
123
static int        mcore_ior_cost                (rtx);
124
static bool       mcore_rtx_costs               (rtx, int, int, int,
125
                                                 int *, bool);
126
static void       mcore_external_libcall        (rtx);
127
static bool       mcore_return_in_memory        (const_tree, const_tree);
128
static int        mcore_arg_partial_bytes       (cumulative_args_t,
129
                                                 enum machine_mode,
130
                                                 tree, bool);
131
static rtx        mcore_function_arg            (cumulative_args_t,
132
                                                 enum machine_mode,
133
                                                 const_tree, bool);
134
static void       mcore_function_arg_advance    (cumulative_args_t,
135
                                                 enum machine_mode,
136
                                                 const_tree, bool);
137
static unsigned int mcore_function_arg_boundary (enum machine_mode,
138
                                                 const_tree);
139
static void       mcore_asm_trampoline_template (FILE *);
140
static void       mcore_trampoline_init         (rtx, tree, rtx);
141
static void       mcore_option_override         (void);
142
static bool       mcore_legitimate_constant_p   (enum machine_mode, rtx);
143
 
144
/* MCore specific attributes.  */
145
 
146
static const struct attribute_spec mcore_attribute_table[] =
147
{
148
  /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
149
       affects_type_identity } */
150
  { "dllexport", 0, 0, true,  false, false, NULL, false },
151
  { "dllimport", 0, 0, true,  false, false, NULL, false },
152
  { "naked",     0, 0, true,  false, false, mcore_handle_naked_attribute,
153
    false },
154
  { NULL,        0, 0, false, false, false, NULL, false }
155
};
156
 
157
/* Initialize the GCC target structure.  */
158
#undef  TARGET_ASM_EXTERNAL_LIBCALL
159
#define TARGET_ASM_EXTERNAL_LIBCALL     mcore_external_libcall
160
 
161
#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
162
#undef  TARGET_MERGE_DECL_ATTRIBUTES
163
#define TARGET_MERGE_DECL_ATTRIBUTES    merge_dllimport_decl_attributes
164
#endif
165
 
166
#ifdef OBJECT_FORMAT_ELF
167
#undef  TARGET_ASM_UNALIGNED_HI_OP
168
#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
169
#undef  TARGET_ASM_UNALIGNED_SI_OP
170
#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
171
#endif
172
 
173
#undef  TARGET_PRINT_OPERAND
174
#define TARGET_PRINT_OPERAND            mcore_print_operand
175
#undef  TARGET_PRINT_OPERAND_ADDRESS
176
#define TARGET_PRINT_OPERAND_ADDRESS    mcore_print_operand_address
177
#undef  TARGET_PRINT_OPERAND_PUNCT_VALID_P
178
#define TARGET_PRINT_OPERAND_PUNCT_VALID_P mcore_print_operand_punct_valid_p
179
 
180
#undef  TARGET_ATTRIBUTE_TABLE
181
#define TARGET_ATTRIBUTE_TABLE          mcore_attribute_table
182
#undef  TARGET_ASM_UNIQUE_SECTION
183
#define TARGET_ASM_UNIQUE_SECTION       mcore_unique_section
184
#undef  TARGET_ASM_FUNCTION_RODATA_SECTION
185
#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
186
#undef  TARGET_ENCODE_SECTION_INFO
187
#define TARGET_ENCODE_SECTION_INFO      mcore_encode_section_info
188
#undef  TARGET_STRIP_NAME_ENCODING
189
#define TARGET_STRIP_NAME_ENCODING      mcore_strip_name_encoding
190
#undef  TARGET_RTX_COSTS
191
#define TARGET_RTX_COSTS                mcore_rtx_costs
192
#undef  TARGET_ADDRESS_COST
193
#define TARGET_ADDRESS_COST             hook_int_rtx_bool_0
194
#undef  TARGET_MACHINE_DEPENDENT_REORG
195
#define TARGET_MACHINE_DEPENDENT_REORG  mcore_reorg
196
 
197
#undef  TARGET_PROMOTE_FUNCTION_MODE
198
#define TARGET_PROMOTE_FUNCTION_MODE    default_promote_function_mode_always_promote
199
#undef  TARGET_PROMOTE_PROTOTYPES
200
#define TARGET_PROMOTE_PROTOTYPES       hook_bool_const_tree_true
201
 
202
#undef  TARGET_RETURN_IN_MEMORY
203
#define TARGET_RETURN_IN_MEMORY         mcore_return_in_memory
204
#undef  TARGET_MUST_PASS_IN_STACK
205
#define TARGET_MUST_PASS_IN_STACK       must_pass_in_stack_var_size
206
#undef  TARGET_PASS_BY_REFERENCE
207
#define TARGET_PASS_BY_REFERENCE  hook_pass_by_reference_must_pass_in_stack
208
#undef  TARGET_ARG_PARTIAL_BYTES
209
#define TARGET_ARG_PARTIAL_BYTES        mcore_arg_partial_bytes
210
#undef  TARGET_FUNCTION_ARG
211
#define TARGET_FUNCTION_ARG             mcore_function_arg
212
#undef  TARGET_FUNCTION_ARG_ADVANCE
213
#define TARGET_FUNCTION_ARG_ADVANCE     mcore_function_arg_advance
214
#undef  TARGET_FUNCTION_ARG_BOUNDARY
215
#define TARGET_FUNCTION_ARG_BOUNDARY    mcore_function_arg_boundary
216
 
217
#undef  TARGET_SETUP_INCOMING_VARARGS
218
#define TARGET_SETUP_INCOMING_VARARGS   mcore_setup_incoming_varargs
219
 
220
#undef  TARGET_ASM_TRAMPOLINE_TEMPLATE
221
#define TARGET_ASM_TRAMPOLINE_TEMPLATE  mcore_asm_trampoline_template
222
#undef  TARGET_TRAMPOLINE_INIT
223
#define TARGET_TRAMPOLINE_INIT          mcore_trampoline_init
224
 
225
#undef TARGET_OPTION_OVERRIDE
226
#define TARGET_OPTION_OVERRIDE mcore_option_override
227
 
228
#undef TARGET_LEGITIMATE_CONSTANT_P
229
#define TARGET_LEGITIMATE_CONSTANT_P mcore_legitimate_constant_p
230
 
231
struct gcc_target targetm = TARGET_INITIALIZER;
232
 
233
/* Adjust the stack and return the number of bytes taken to do it.  */
234
static void
235
output_stack_adjust (int direction, int size)
236
{
237
  /* If extending stack a lot, we do it incrementally.  */
238
  if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
239
    {
240
      rtx tmp = gen_rtx_REG (SImode, 1);
241
      rtx memref;
242
 
243
      emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
244
      do
245
        {
246
          emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
247
          memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
248
          MEM_VOLATILE_P (memref) = 1;
249
          emit_insn (gen_movsi (memref, stack_pointer_rtx));
250
          size -= mcore_stack_increment;
251
        }
252
      while (size > mcore_stack_increment);
253
 
254
      /* SIZE is now the residual for the last adjustment,
255
         which doesn't require a probe.  */
256
    }
257
 
258
  if (size)
259
    {
260
      rtx insn;
261
      rtx val = GEN_INT (size);
262
 
263
      if (size > 32)
264
        {
265
          rtx nval = gen_rtx_REG (SImode, 1);
266
          emit_insn (gen_movsi (nval, val));
267
          val = nval;
268
        }
269
 
270
      if (direction > 0)
271
        insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
272
      else
273
        insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
274
 
275
      emit_insn (insn);
276
    }
277
}
278
 
279
/* Work out the registers which need to be saved,
280
   both as a mask and a count.  */
281
 
282
static int
283
calc_live_regs (int * count)
284
{
285
  int reg;
286
  int live_regs_mask = 0;
287
 
288
  * count = 0;
289
 
290
  for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
291
    {
292
      if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
293
        {
294
          (*count)++;
295
          live_regs_mask |= (1 << reg);
296
        }
297
    }
298
 
299
  return live_regs_mask;
300
}
301
 
302
/* Print the operand address in x to the stream.  */
303
 
304
static void
305
mcore_print_operand_address (FILE * stream, rtx x)
306
{
307
  switch (GET_CODE (x))
308
    {
309
    case REG:
310
      fprintf (stream, "(%s)", reg_names[REGNO (x)]);
311
      break;
312
 
313
    case PLUS:
314
      {
315
        rtx base = XEXP (x, 0);
316
        rtx index = XEXP (x, 1);
317
 
318
        if (GET_CODE (base) != REG)
319
          {
320
            /* Ensure that BASE is a register (one of them must be).  */
321
            rtx temp = base;
322
            base = index;
323
            index = temp;
324
          }
325
 
326
        switch (GET_CODE (index))
327
          {
328
          case CONST_INT:
329
            fprintf (stream, "(%s," HOST_WIDE_INT_PRINT_DEC ")",
330
                     reg_names[REGNO(base)], INTVAL (index));
331
            break;
332
 
333
          default:
334
            gcc_unreachable ();
335
          }
336
      }
337
 
338
      break;
339
 
340
    default:
341
      output_addr_const (stream, x);
342
      break;
343
    }
344
}
345
 
346
static bool
347
mcore_print_operand_punct_valid_p (unsigned char code)
348
{
349
  return (code == '.' || code == '#' || code == '*' || code == '^'
350
          || code == '!');
351
}
352
 
353
/* Print operand x (an rtx) in assembler syntax to file stream
354
   according to modifier code.
355
 
356
   'R'  print the next register or memory location along, i.e. the lsw in
357
        a double word value
358
   'O'  print a constant without the #
359
   'M'  print a constant as its negative
360
   'P'  print log2 of a power of two
361
   'Q'  print log2 of an inverse of a power of two
362
   'U'  print register for ldm/stm instruction
363
   'X'  print byte number for xtrbN instruction.  */
364
 
365
static void
366
mcore_print_operand (FILE * stream, rtx x, int code)
367
{
368
  switch (code)
369
    {
370
    case 'N':
371
      if (INTVAL(x) == -1)
372
        fprintf (asm_out_file, "32");
373
      else
374
        fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
375
      break;
376
    case 'P':
377
      fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) & 0xffffffff));
378
      break;
379
    case 'Q':
380
      fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
381
      break;
382
    case 'O':
383
      fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
384
      break;
385
    case 'M':
386
      fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, - INTVAL (x));
387
      break;
388
    case 'R':
389
      /* Next location along in memory or register.  */
390
      switch (GET_CODE (x))
391
        {
392
        case REG:
393
          fputs (reg_names[REGNO (x) + 1], (stream));
394
          break;
395
        case MEM:
396
          mcore_print_operand_address
397
            (stream, XEXP (adjust_address (x, SImode, 4), 0));
398
          break;
399
        default:
400
          gcc_unreachable ();
401
        }
402
      break;
403
    case 'U':
404
      fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
405
               reg_names[REGNO (x) + 3]);
406
      break;
407
    case 'x':
408
      fprintf (asm_out_file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
409
      break;
410
    case 'X':
411
      fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, 3 - INTVAL (x) / 8);
412
      break;
413
 
414
    default:
415
      switch (GET_CODE (x))
416
        {
417
        case REG:
418
          fputs (reg_names[REGNO (x)], (stream));
419
          break;
420
        case MEM:
421
          output_address (XEXP (x, 0));
422
          break;
423
        default:
424
          output_addr_const (stream, x);
425
          break;
426
        }
427
      break;
428
    }
429
}
430
 
431
/* What does a constant cost ?  */
432
 
433
static int
434
mcore_const_costs (rtx exp, enum rtx_code code)
435
{
436
  HOST_WIDE_INT val = INTVAL (exp);
437
 
438
  /* Easy constants.  */
439
  if (   CONST_OK_FOR_I (val)
440
      || CONST_OK_FOR_M (val)
441
      || CONST_OK_FOR_N (val)
442
      || (code == PLUS && CONST_OK_FOR_L (val)))
443
    return 1;
444
  else if (code == AND
445
           && (   CONST_OK_FOR_M (~val)
446
               || CONST_OK_FOR_N (~val)))
447
    return 2;
448
  else if (code == PLUS
449
           && (   CONST_OK_FOR_I (-val)
450
               || CONST_OK_FOR_M (-val)
451
               || CONST_OK_FOR_N (-val)))
452
    return 2;
453
 
454
  return 5;
455
}
456
 
457
/* What does an and instruction cost - we do this b/c immediates may
458
   have been relaxed.   We want to ensure that cse will cse relaxed immeds
459
   out.  Otherwise we'll get bad code (multiple reloads of the same const).  */
460
 
461
static int
462
mcore_and_cost (rtx x)
463
{
464
  HOST_WIDE_INT val;
465
 
466
  if (GET_CODE (XEXP (x, 1)) != CONST_INT)
467
    return 2;
468
 
469
  val = INTVAL (XEXP (x, 1));
470
 
471
  /* Do it directly.  */
472
  if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
473
    return 2;
474
  /* Takes one instruction to load.  */
475
  else if (const_ok_for_mcore (val))
476
    return 3;
477
  /* Takes two instructions to load.  */
478
  else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
479
    return 4;
480
 
481
  /* Takes a lrw to load.  */
482
  return 5;
483
}
484
 
485
/* What does an or cost - see and_cost().  */
486
 
487
static int
488
mcore_ior_cost (rtx x)
489
{
490
  HOST_WIDE_INT val;
491
 
492
  if (GET_CODE (XEXP (x, 1)) != CONST_INT)
493
    return 2;
494
 
495
  val = INTVAL (XEXP (x, 1));
496
 
497
  /* Do it directly with bclri.  */
498
  if (CONST_OK_FOR_M (val))
499
    return 2;
500
  /* Takes one instruction to load.  */
501
  else if (const_ok_for_mcore (val))
502
    return 3;
503
  /* Takes two instructions to load.  */
504
  else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
505
    return 4;
506
 
507
  /* Takes a lrw to load.  */
508
  return 5;
509
}
510
 
511
static bool
512
mcore_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
513
                 int * total, bool speed ATTRIBUTE_UNUSED)
514
{
515
  switch (code)
516
    {
517
    case CONST_INT:
518
      *total = mcore_const_costs (x, (enum rtx_code) outer_code);
519
      return true;
520
    case CONST:
521
    case LABEL_REF:
522
    case SYMBOL_REF:
523
      *total = 5;
524
      return true;
525
    case CONST_DOUBLE:
526
      *total = 10;
527
      return true;
528
 
529
    case AND:
530
      *total = COSTS_N_INSNS (mcore_and_cost (x));
531
      return true;
532
 
533
    case IOR:
534
      *total = COSTS_N_INSNS (mcore_ior_cost (x));
535
      return true;
536
 
537
    case DIV:
538
    case UDIV:
539
    case MOD:
540
    case UMOD:
541
    case FLOAT:
542
    case FIX:
543
      *total = COSTS_N_INSNS (100);
544
      return true;
545
 
546
    default:
547
      return false;
548
    }
549
}
550
 
551
/* Prepare the operands for a comparison.  Return whether the branch/setcc
552
   should reverse the operands.  */
553
 
554
bool
555
mcore_gen_compare (enum rtx_code code, rtx op0, rtx op1)
556
{
557
  rtx cc_reg = gen_rtx_REG (CCmode, CC_REG);
558
  bool invert;
559
 
560
  if (GET_CODE (op1) == CONST_INT)
561
    {
562
      HOST_WIDE_INT val = INTVAL (op1);
563
 
564
      switch (code)
565
        {
566
        case GTU:
567
          /* Unsigned > 0 is the same as != 0; everything else is converted
568
             below to LEU (reversed cmphs).  */
569
          if (val == 0)
570
            code = NE;
571
          break;
572
 
573
        /* Check whether (LE A imm) can become (LT A imm + 1),
574
           or (GT A imm) can become (GE A imm + 1).  */
575
        case GT:
576
        case LE:
577
          if (CONST_OK_FOR_J (val + 1))
578
            {
579
              op1 = GEN_INT (val + 1);
580
              code = code == LE ? LT : GE;
581
            }
582
          break;
583
 
584
        default:
585
          break;
586
        }
587
    }
588
 
589
  if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
590
    op1 = force_reg (SImode, op1);
591
 
592
  /* cmpnei: 0-31 (K immediate)
593
     cmplti: 1-32 (J immediate, 0 using btsti x,31).  */
594
  invert = false;
595
  switch (code)
596
    {
597
    case EQ:    /* Use inverted condition, cmpne.  */
598
      code = NE;
599
      invert = true;
600
      /* Drop through.  */
601
 
602
    case NE:    /* Use normal condition, cmpne.  */
603
      if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
604
        op1 = force_reg (SImode, op1);
605
      break;
606
 
607
    case LE:    /* Use inverted condition, reversed cmplt.  */
608
      code = GT;
609
      invert = true;
610
      /* Drop through.  */
611
 
612
    case GT:    /* Use normal condition, reversed cmplt.  */
613
      if (GET_CODE (op1) == CONST_INT)
614
        op1 = force_reg (SImode, op1);
615
      break;
616
 
617
    case GE:    /* Use inverted condition, cmplt.  */
618
      code = LT;
619
      invert = true;
620
      /* Drop through.  */
621
 
622
    case LT:    /* Use normal condition, cmplt.  */
623
      if (GET_CODE (op1) == CONST_INT &&
624
          /* covered by btsti x,31.  */
625
          INTVAL (op1) != 0 &&
626
          ! CONST_OK_FOR_J (INTVAL (op1)))
627
        op1 = force_reg (SImode, op1);
628
      break;
629
 
630
    case GTU:   /* Use inverted condition, cmple.  */
631
      /* We coped with unsigned > 0 above.  */
632
      gcc_assert (GET_CODE (op1) != CONST_INT || INTVAL (op1) != 0);
633
      code = LEU;
634
      invert = true;
635
      /* Drop through.  */
636
 
637
    case LEU:   /* Use normal condition, reversed cmphs.  */
638
      if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
639
        op1 = force_reg (SImode, op1);
640
      break;
641
 
642
    case LTU:   /* Use inverted condition, cmphs.  */
643
      code = GEU;
644
      invert = true;
645
      /* Drop through.  */
646
 
647
    case GEU:   /* Use normal condition, cmphs.  */
648
      if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
649
        op1 = force_reg (SImode, op1);
650
      break;
651
 
652
    default:
653
      break;
654
    }
655
 
656
  emit_insn (gen_rtx_SET (VOIDmode,
657
                          cc_reg,
658
                          gen_rtx_fmt_ee (code, CCmode, op0, op1)));
659
  return invert;
660
}
661
 
662
int
663
mcore_symbolic_address_p (rtx x)
664
{
665
  switch (GET_CODE (x))
666
    {
667
    case SYMBOL_REF:
668
    case LABEL_REF:
669
      return 1;
670
    case CONST:
671
      x = XEXP (x, 0);
672
      return (   (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
673
               || GET_CODE (XEXP (x, 0)) == LABEL_REF)
674
              && GET_CODE (XEXP (x, 1)) == CONST_INT);
675
    default:
676
      return 0;
677
    }
678
}
679
 
680
/* Functions to output assembly code for a function call.  */
681
 
682
char *
683
mcore_output_call (rtx operands[], int index)
684
{
685
  static char buffer[20];
686
  rtx addr = operands [index];
687
 
688
  if (REG_P (addr))
689
    {
690
      if (TARGET_CG_DATA)
691
        {
692
          gcc_assert (mcore_current_function_name);
693
 
694
          ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
695
                              "unknown", 1);
696
        }
697
 
698
      sprintf (buffer, "jsr\t%%%d", index);
699
    }
700
  else
701
    {
702
      if (TARGET_CG_DATA)
703
        {
704
          gcc_assert (mcore_current_function_name);
705
          gcc_assert (GET_CODE (addr) == SYMBOL_REF);
706
 
707
          ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
708
                              XSTR (addr, 0), 0);
709
        }
710
 
711
      sprintf (buffer, "jbsr\t%%%d", index);
712
    }
713
 
714
  return buffer;
715
}
716
 
717
/* Can we load a constant with a single instruction ?  */
718
 
719
int
720
const_ok_for_mcore (HOST_WIDE_INT value)
721
{
722
  if (value >= 0 && value <= 127)
723
    return 1;
724
 
725
  /* Try exact power of two.  */
726
  if (CONST_OK_FOR_M (value))
727
    return 1;
728
 
729
  /* Try exact power of two - 1.  */
730
  if (CONST_OK_FOR_N (value) && value != -1)
731
    return 1;
732
 
733
  return 0;
734
}
735
 
736
/* Can we load a constant inline with up to 2 instructions ?  */
737
 
738
int
739
mcore_const_ok_for_inline (HOST_WIDE_INT value)
740
{
741
  HOST_WIDE_INT x, y;
742
 
743
  return try_constant_tricks (value, & x, & y) > 0;
744
}
745
 
746
/* Are we loading the constant using a not ?  */
747
 
748
int
749
mcore_const_trick_uses_not (HOST_WIDE_INT value)
750
{
751
  HOST_WIDE_INT x, y;
752
 
753
  return try_constant_tricks (value, & x, & y) == 2;
754
}
755
 
756
/* Try tricks to load a constant inline and return the trick number if
757
   success (0 is non-inlinable).
758
 
759
   0: not inlinable
760
   1: single instruction (do the usual thing)
761
   2: single insn followed by a 'not'
762
   3: single insn followed by a subi
763
   4: single insn followed by an addi
764
   5: single insn followed by rsubi
765
   6: single insn followed by bseti
766
   7: single insn followed by bclri
767
   8: single insn followed by rotli
768
   9: single insn followed by lsli
769
   10: single insn followed by ixh
770
   11: single insn followed by ixw.  */
771
 
772
static int
773
try_constant_tricks (HOST_WIDE_INT value, HOST_WIDE_INT * x, HOST_WIDE_INT * y)
774
{
775
  HOST_WIDE_INT i;
776
  unsigned HOST_WIDE_INT bit, shf, rot;
777
 
778
  if (const_ok_for_mcore (value))
779
    return 1;   /* Do the usual thing.  */
780
 
781
  if (! TARGET_HARDLIT)
782
    return 0;
783
 
784
  if (const_ok_for_mcore (~value))
785
    {
786
      *x = ~value;
787
      return 2;
788
    }
789
 
790
  for (i = 1; i <= 32; i++)
791
    {
792
      if (const_ok_for_mcore (value - i))
793
        {
794
          *x = value - i;
795
          *y = i;
796
 
797
          return 3;
798
        }
799
 
800
      if (const_ok_for_mcore (value + i))
801
        {
802
          *x = value + i;
803
          *y = i;
804
 
805
          return 4;
806
        }
807
    }
808
 
809
  bit = 0x80000000ULL;
810
 
811
  for (i = 0; i <= 31; i++)
812
    {
813
      if (const_ok_for_mcore (i - value))
814
        {
815
          *x = i - value;
816
          *y = i;
817
 
818
          return 5;
819
        }
820
 
821
      if (const_ok_for_mcore (value & ~bit))
822
        {
823
          *y = bit;
824
          *x = value & ~bit;
825
          return 6;
826
        }
827
 
828
      if (const_ok_for_mcore (value | bit))
829
        {
830
          *y = ~bit;
831
          *x = value | bit;
832
 
833
          return 7;
834
        }
835
 
836
      bit >>= 1;
837
    }
838
 
839
  shf = value;
840
  rot = value;
841
 
842
  for (i = 1; i < 31; i++)
843
    {
844
      int c;
845
 
846
      /* MCore has rotate left.  */
847
      c = rot << 31;
848
      rot >>= 1;
849
      rot &= 0x7FFFFFFF;
850
      rot |= c;   /* Simulate rotate.  */
851
 
852
      if (const_ok_for_mcore (rot))
853
        {
854
          *y = i;
855
          *x = rot;
856
 
857
          return 8;
858
        }
859
 
860
      if (shf & 1)
861
        shf = 0; /* Can't use logical shift, low order bit is one.  */
862
 
863
      shf >>= 1;
864
 
865
      if (shf != 0 && const_ok_for_mcore (shf))
866
        {
867
          *y = i;
868
          *x = shf;
869
 
870
          return 9;
871
        }
872
    }
873
 
874
  if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
875
    {
876
      *x = value / 3;
877
 
878
      return 10;
879
    }
880
 
881
  if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
882
    {
883
      *x = value / 5;
884
 
885
      return 11;
886
    }
887
 
888
  return 0;
889
}
890
 
891
/* Check whether reg is dead at first.  This is done by searching ahead
892
   for either the next use (i.e., reg is live), a death note, or a set of
893
   reg.  Don't just use dead_or_set_p() since reload does not always mark
894
   deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
895
   can ignore subregs by extracting the actual register.  BRC  */
896
 
897
int
898
mcore_is_dead (rtx first, rtx reg)
899
{
900
  rtx insn;
901
 
902
  /* For mcore, subregs can't live independently of their parent regs.  */
903
  if (GET_CODE (reg) == SUBREG)
904
    reg = SUBREG_REG (reg);
905
 
906
  /* Dies immediately.  */
907
  if (dead_or_set_p (first, reg))
908
    return 1;
909
 
910
  /* Look for conclusive evidence of live/death, otherwise we have
911
     to assume that it is live.  */
912
  for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
913
    {
914
      if (GET_CODE (insn) == JUMP_INSN)
915
        return 0;        /* We lose track, assume it is alive.  */
916
 
917
      else if (GET_CODE(insn) == CALL_INSN)
918
        {
919
          /* Call's might use it for target or register parms.  */
920
          if (reg_referenced_p (reg, PATTERN (insn))
921
              || find_reg_fusage (insn, USE, reg))
922
            return 0;
923
          else if (dead_or_set_p (insn, reg))
924
            return 1;
925
        }
926
      else if (GET_CODE (insn) == INSN)
927
        {
928
          if (reg_referenced_p (reg, PATTERN (insn)))
929
            return 0;
930
          else if (dead_or_set_p (insn, reg))
931
            return 1;
932
        }
933
    }
934
 
935
  /* No conclusive evidence either way, we cannot take the chance
936
     that control flow hid the use from us -- "I'm not dead yet".  */
937
  return 0;
938
}
939
 
940
/* Count the number of ones in mask.  */
941
 
942
int
943
mcore_num_ones (HOST_WIDE_INT mask)
944
{
945
  /* A trick to count set bits recently posted on comp.compilers.  */
946
  mask =  (mask >> 1  & 0x55555555) + (mask & 0x55555555);
947
  mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
948
  mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
949
  mask = ((mask >> 8) + mask);
950
 
951
  return (mask + (mask >> 16)) & 0xff;
952
}
953
 
954
/* Count the number of zeros in mask.  */
955
 
956
int
957
mcore_num_zeros (HOST_WIDE_INT mask)
958
{
959
  return 32 - mcore_num_ones (mask);
960
}
961
 
962
/* Determine byte being masked.  */
963
 
964
int
965
mcore_byte_offset (unsigned int mask)
966
{
967
  if (mask == 0x00ffffffL)
968
    return 0;
969
  else if (mask == 0xff00ffffL)
970
    return 1;
971
  else if (mask == 0xffff00ffL)
972
    return 2;
973
  else if (mask == 0xffffff00L)
974
    return 3;
975
 
976
  return -1;
977
}
978
 
979
/* Determine halfword being masked.  */
980
 
981
int
982
mcore_halfword_offset (unsigned int mask)
983
{
984
  if (mask == 0x0000ffffL)
985
    return 0;
986
  else if (mask == 0xffff0000L)
987
    return 1;
988
 
989
  return -1;
990
}
991
 
992
/* Output a series of bseti's corresponding to mask.  */
993
 
994
const char *
995
mcore_output_bseti (rtx dst, int mask)
996
{
997
  rtx out_operands[2];
998
  int bit;
999
 
1000
  out_operands[0] = dst;
1001
 
1002
  for (bit = 0; bit < 32; bit++)
1003
    {
1004
      if ((mask & 0x1) == 0x1)
1005
        {
1006
          out_operands[1] = GEN_INT (bit);
1007
 
1008
          output_asm_insn ("bseti\t%0,%1", out_operands);
1009
        }
1010
      mask >>= 1;
1011
    }
1012
 
1013
  return "";
1014
}
1015
 
1016
/* Output a series of bclri's corresponding to mask.  */
1017
 
1018
const char *
1019
mcore_output_bclri (rtx dst, int mask)
1020
{
1021
  rtx out_operands[2];
1022
  int bit;
1023
 
1024
  out_operands[0] = dst;
1025
 
1026
  for (bit = 0; bit < 32; bit++)
1027
    {
1028
      if ((mask & 0x1) == 0x0)
1029
        {
1030
          out_operands[1] = GEN_INT (bit);
1031
 
1032
          output_asm_insn ("bclri\t%0,%1", out_operands);
1033
        }
1034
 
1035
      mask >>= 1;
1036
    }
1037
 
1038
  return "";
1039
}
1040
 
1041
/* Output a conditional move of two constants that are +/- 1 within each
1042
   other.  See the "movtK" patterns in mcore.md.   I'm not sure this is
1043
   really worth the effort.  */
1044
 
1045
const char *
1046
mcore_output_cmov (rtx operands[], int cmp_t, const char * test)
1047
{
1048
  HOST_WIDE_INT load_value;
1049
  HOST_WIDE_INT adjust_value;
1050
  rtx out_operands[4];
1051
 
1052
  out_operands[0] = operands[0];
1053
 
1054
  /* Check to see which constant is loadable.  */
1055
  if (const_ok_for_mcore (INTVAL (operands[1])))
1056
    {
1057
      out_operands[1] = operands[1];
1058
      out_operands[2] = operands[2];
1059
    }
1060
  else if (const_ok_for_mcore (INTVAL (operands[2])))
1061
    {
1062
      out_operands[1] = operands[2];
1063
      out_operands[2] = operands[1];
1064
 
1065
      /* Complement test since constants are swapped.  */
1066
      cmp_t = (cmp_t == 0);
1067
    }
1068
  load_value   = INTVAL (out_operands[1]);
1069
  adjust_value = INTVAL (out_operands[2]);
1070
 
1071
  /* First output the test if folded into the pattern.  */
1072
 
1073
  if (test)
1074
    output_asm_insn (test, operands);
1075
 
1076
  /* Load the constant - for now, only support constants that can be
1077
     generated with a single instruction.  maybe add general inlinable
1078
     constants later (this will increase the # of patterns since the
1079
     instruction sequence has a different length attribute).  */
1080
  if (load_value >= 0 && load_value <= 127)
1081
    output_asm_insn ("movi\t%0,%1", out_operands);
1082
  else if (CONST_OK_FOR_M (load_value))
1083
    output_asm_insn ("bgeni\t%0,%P1", out_operands);
1084
  else if (CONST_OK_FOR_N (load_value))
1085
    output_asm_insn ("bmaski\t%0,%N1", out_operands);
1086
 
1087
  /* Output the constant adjustment.  */
1088
  if (load_value > adjust_value)
1089
    {
1090
      if (cmp_t)
1091
        output_asm_insn ("decf\t%0", out_operands);
1092
      else
1093
        output_asm_insn ("dect\t%0", out_operands);
1094
    }
1095
  else
1096
    {
1097
      if (cmp_t)
1098
        output_asm_insn ("incf\t%0", out_operands);
1099
      else
1100
        output_asm_insn ("inct\t%0", out_operands);
1101
    }
1102
 
1103
  return "";
1104
}
1105
 
1106
/* Outputs the peephole for moving a constant that gets not'ed followed
1107
   by an and (i.e. combine the not and the and into andn). BRC  */
1108
 
1109
const char *
1110
mcore_output_andn (rtx insn ATTRIBUTE_UNUSED, rtx operands[])
1111
{
1112
  HOST_WIDE_INT x, y;
1113
  rtx out_operands[3];
1114
  const char * load_op;
1115
  char buf[256];
1116
  int trick_no;
1117
 
1118
  trick_no = try_constant_tricks (INTVAL (operands[1]), &x, &y);
1119
  gcc_assert (trick_no == 2);
1120
 
1121
  out_operands[0] = operands[0];
1122
  out_operands[1] = GEN_INT (x);
1123
  out_operands[2] = operands[2];
1124
 
1125
  if (x >= 0 && x <= 127)
1126
    load_op = "movi\t%0,%1";
1127
 
1128
  /* Try exact power of two.  */
1129
  else if (CONST_OK_FOR_M (x))
1130
    load_op = "bgeni\t%0,%P1";
1131
 
1132
  /* Try exact power of two - 1.  */
1133
  else if (CONST_OK_FOR_N (x))
1134
    load_op = "bmaski\t%0,%N1";
1135
 
1136
  else
1137
    {
1138
      load_op = "BADMOVI-andn\t%0, %1";
1139
      gcc_unreachable ();
1140
    }
1141
 
1142
  sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
1143
  output_asm_insn (buf, out_operands);
1144
 
1145
  return "";
1146
}
1147
 
1148
/* Output an inline constant.  */
1149
 
1150
static const char *
1151
output_inline_const (enum machine_mode mode, rtx operands[])
1152
{
1153
  HOST_WIDE_INT x = 0, y = 0;
1154
  int trick_no;
1155
  rtx out_operands[3];
1156
  char buf[256];
1157
  char load_op[256];
1158
  const char *dst_fmt;
1159
  HOST_WIDE_INT value;
1160
 
1161
  value = INTVAL (operands[1]);
1162
 
1163
  trick_no = try_constant_tricks (value, &x, &y);
1164
  /* lrw's are handled separately: Large inlinable constants never get
1165
     turned into lrw's.  Our caller uses try_constant_tricks to back
1166
     off to an lrw rather than calling this routine.  */
1167
  gcc_assert (trick_no != 0);
1168
 
1169
  if (trick_no == 1)
1170
    x = value;
1171
 
1172
  /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment.  */
1173
  out_operands[0] = operands[0];
1174
  out_operands[1] = GEN_INT (x);
1175
 
1176
  if (trick_no > 2)
1177
    out_operands[2] = GEN_INT (y);
1178
 
1179
  /* Select dst format based on mode.  */
1180
  if (mode == DImode && (! TARGET_LITTLE_END))
1181
    dst_fmt = "%R0";
1182
  else
1183
    dst_fmt = "%0";
1184
 
1185
  if (x >= 0 && x <= 127)
1186
    sprintf (load_op, "movi\t%s,%%1", dst_fmt);
1187
 
1188
  /* Try exact power of two.  */
1189
  else if (CONST_OK_FOR_M (x))
1190
    sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
1191
 
1192
  /* Try exact power of two - 1.  */
1193
  else if (CONST_OK_FOR_N (x))
1194
    sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
1195
 
1196
  else
1197
    {
1198
      sprintf (load_op, "BADMOVI-inline_const %s, %%1", dst_fmt);
1199
      gcc_unreachable ();
1200
    }
1201
 
1202
  switch (trick_no)
1203
    {
1204
    case 1:
1205
      strcpy (buf, load_op);
1206
      break;
1207
    case 2:   /* not */
1208
      sprintf (buf, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1209
      break;
1210
    case 3:   /* add */
1211
      sprintf (buf, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1212
      break;
1213
    case 4:   /* sub */
1214
      sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1215
      break;
1216
    case 5:   /* rsub */
1217
      /* Never happens unless -mrsubi, see try_constant_tricks().  */
1218
      sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1219
      break;
1220
    case 6:   /* bseti */
1221
      sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1222
      break;
1223
    case 7:   /* bclr */
1224
      sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1225
      break;
1226
    case 8:   /* rotl */
1227
      sprintf (buf, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1228
      break;
1229
    case 9:   /* lsl */
1230
      sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op, dst_fmt, value, value);
1231
      break;
1232
    case 10:  /* ixh */
1233
      sprintf (buf, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1234
      break;
1235
    case 11:  /* ixw */
1236
      sprintf (buf, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op, dst_fmt, dst_fmt, value, value);
1237
      break;
1238
    default:
1239
      return "";
1240
    }
1241
 
1242
  output_asm_insn (buf, out_operands);
1243
 
1244
  return "";
1245
}
1246
 
1247
/* Output a move of a word or less value.  */
1248
 
1249
const char *
1250
mcore_output_move (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1251
                   enum machine_mode mode ATTRIBUTE_UNUSED)
1252
{
1253
  rtx dst = operands[0];
1254
  rtx src = operands[1];
1255
 
1256
  if (GET_CODE (dst) == REG)
1257
    {
1258
      if (GET_CODE (src) == REG)
1259
        {
1260
          if (REGNO (src) == CC_REG)            /* r-c */
1261
            return "mvc\t%0";
1262
          else
1263
            return "mov\t%0,%1";                /* r-r*/
1264
        }
1265
      else if (GET_CODE (src) == MEM)
1266
        {
1267
          if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
1268
            return "lrw\t%0,[%1]";              /* a-R */
1269
          else
1270
            switch (GET_MODE (src))             /* r-m */
1271
              {
1272
              case SImode:
1273
                return "ldw\t%0,%1";
1274
              case HImode:
1275
                return "ld.h\t%0,%1";
1276
              case QImode:
1277
                return "ld.b\t%0,%1";
1278
              default:
1279
                gcc_unreachable ();
1280
              }
1281
        }
1282
      else if (GET_CODE (src) == CONST_INT)
1283
        {
1284
          HOST_WIDE_INT x, y;
1285
 
1286
          if (CONST_OK_FOR_I (INTVAL (src)))       /* r-I */
1287
            return "movi\t%0,%1";
1288
          else if (CONST_OK_FOR_M (INTVAL (src)))  /* r-M */
1289
            return "bgeni\t%0,%P1\t// %1 %x1";
1290
          else if (CONST_OK_FOR_N (INTVAL (src)))  /* r-N */
1291
            return "bmaski\t%0,%N1\t// %1 %x1";
1292
          else if (try_constant_tricks (INTVAL (src), &x, &y))     /* R-P */
1293
            return output_inline_const (SImode, operands);  /* 1-2 insns */
1294
          else
1295
            return "lrw\t%0,%x1\t// %1";        /* Get it from literal pool.  */
1296
        }
1297
      else
1298
        return "lrw\t%0, %1";                /* Into the literal pool.  */
1299
    }
1300
  else if (GET_CODE (dst) == MEM)               /* m-r */
1301
    switch (GET_MODE (dst))
1302
      {
1303
      case SImode:
1304
        return "stw\t%1,%0";
1305
      case HImode:
1306
        return "st.h\t%1,%0";
1307
      case QImode:
1308
        return "st.b\t%1,%0";
1309
      default:
1310
        gcc_unreachable ();
1311
      }
1312
 
1313
  gcc_unreachable ();
1314
}
1315
 
1316
/* Return a sequence of instructions to perform DI or DF move.
1317
   Since the MCORE cannot move a DI or DF in one instruction, we have
1318
   to take care when we see overlapping source and dest registers.  */
1319
 
1320
const char *
1321
mcore_output_movedouble (rtx operands[], enum machine_mode mode ATTRIBUTE_UNUSED)
1322
{
1323
  rtx dst = operands[0];
1324
  rtx src = operands[1];
1325
 
1326
  if (GET_CODE (dst) == REG)
1327
    {
1328
      if (GET_CODE (src) == REG)
1329
        {
1330
          int dstreg = REGNO (dst);
1331
          int srcreg = REGNO (src);
1332
 
1333
          /* Ensure the second source not overwritten.  */
1334
          if (srcreg + 1 == dstreg)
1335
            return "mov %R0,%R1\n\tmov  %0,%1";
1336
          else
1337
            return "mov %0,%1\n\tmov    %R0,%R1";
1338
        }
1339
      else if (GET_CODE (src) == MEM)
1340
        {
1341
          rtx memexp = memexp = XEXP (src, 0);
1342
          int dstreg = REGNO (dst);
1343
          int basereg = -1;
1344
 
1345
          if (GET_CODE (memexp) == LABEL_REF)
1346
            return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1347
          else if (GET_CODE (memexp) == REG)
1348
            basereg = REGNO (memexp);
1349
          else if (GET_CODE (memexp) == PLUS)
1350
            {
1351
              if (GET_CODE (XEXP (memexp, 0)) == REG)
1352
                basereg = REGNO (XEXP (memexp, 0));
1353
              else if (GET_CODE (XEXP (memexp, 1)) == REG)
1354
                basereg = REGNO (XEXP (memexp, 1));
1355
              else
1356
                gcc_unreachable ();
1357
            }
1358
          else
1359
            gcc_unreachable ();
1360
 
1361
          /* ??? length attribute is wrong here.  */
1362
          if (dstreg == basereg)
1363
            {
1364
              /* Just load them in reverse order.  */
1365
              return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1366
 
1367
              /* XXX: alternative: move basereg to basereg+1
1368
                 and then fall through.  */
1369
            }
1370
          else
1371
            return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1372
        }
1373
      else if (GET_CODE (src) == CONST_INT)
1374
        {
1375
          if (TARGET_LITTLE_END)
1376
            {
1377
              if (CONST_OK_FOR_I (INTVAL (src)))
1378
                output_asm_insn ("movi  %0,%1", operands);
1379
              else if (CONST_OK_FOR_M (INTVAL (src)))
1380
                output_asm_insn ("bgeni %0,%P1", operands);
1381
              else if (CONST_OK_FOR_N (INTVAL (src)))
1382
                output_asm_insn ("bmaski        %0,%N1", operands);
1383
              else
1384
                gcc_unreachable ();
1385
 
1386
              if (INTVAL (src) < 0)
1387
                return "bmaski  %R0,32";
1388
              else
1389
                return "movi    %R0,0";
1390
            }
1391
          else
1392
            {
1393
              if (CONST_OK_FOR_I (INTVAL (src)))
1394
                output_asm_insn ("movi  %R0,%1", operands);
1395
              else if (CONST_OK_FOR_M (INTVAL (src)))
1396
                output_asm_insn ("bgeni %R0,%P1", operands);
1397
              else if (CONST_OK_FOR_N (INTVAL (src)))
1398
                output_asm_insn ("bmaski        %R0,%N1", operands);
1399
              else
1400
                gcc_unreachable ();
1401
 
1402
              if (INTVAL (src) < 0)
1403
                return "bmaski  %0,32";
1404
              else
1405
                return "movi    %0,0";
1406
            }
1407
        }
1408
      else
1409
        gcc_unreachable ();
1410
    }
1411
  else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
1412
    return "stw\t%1,%0\n\tstw\t%R1,%R0";
1413
  else
1414
    gcc_unreachable ();
1415
}
1416
 
1417
/* Predicates used by the templates.  */
1418
 
1419
int
1420
mcore_arith_S_operand (rtx op)
1421
{
1422
  if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
1423
    return 1;
1424
 
1425
  return 0;
1426
}
1427
 
1428
/* Expand insert bit field.  BRC  */
1429
 
1430
int
1431
mcore_expand_insv (rtx operands[])
1432
{
1433
  int width = INTVAL (operands[1]);
1434
  int posn = INTVAL (operands[2]);
1435
  int mask;
1436
  rtx mreg, sreg, ereg;
1437
 
1438
  /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1439
     for width==1 must be removed.  Look around line 368.  This is something
1440
     we really want the md part to do.  */
1441
  if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
1442
    {
1443
      /* Do directly with bseti or bclri.  */
1444
      /* RBE: 2/97 consider only low bit of constant.  */
1445
      if ((INTVAL (operands[3]) & 1) == 0)
1446
        {
1447
          mask = ~(1 << posn);
1448
          emit_insn (gen_rtx_SET (SImode, operands[0],
1449
                              gen_rtx_AND (SImode, operands[0], GEN_INT (mask))));
1450
        }
1451
      else
1452
        {
1453
          mask = 1 << posn;
1454
          emit_insn (gen_rtx_SET (SImode, operands[0],
1455
                            gen_rtx_IOR (SImode, operands[0], GEN_INT (mask))));
1456
        }
1457
 
1458
      return 1;
1459
    }
1460
 
1461
  /* Look at some bit-field placements that we aren't interested
1462
     in handling ourselves, unless specifically directed to do so.  */
1463
  if (! TARGET_W_FIELD)
1464
    return 0;            /* Generally, give up about now.  */
1465
 
1466
  if (width == 8 && posn % 8 == 0)
1467
    /* Byte sized and aligned; let caller break it up.  */
1468
    return 0;
1469
 
1470
  if (width == 16 && posn % 16 == 0)
1471
    /* Short sized and aligned; let caller break it up.  */
1472
    return 0;
1473
 
1474
  /* The general case - we can do this a little bit better than what the
1475
     machine independent part tries.  This will get rid of all the subregs
1476
     that mess up constant folding in combine when working with relaxed
1477
     immediates.  */
1478
 
1479
  /* If setting the entire field, do it directly.  */
1480
  if (GET_CODE (operands[3]) == CONST_INT
1481
      && INTVAL (operands[3]) == ((1 << width) - 1))
1482
    {
1483
      mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
1484
      emit_insn (gen_rtx_SET (SImode, operands[0],
1485
                         gen_rtx_IOR (SImode, operands[0], mreg)));
1486
      return 1;
1487
    }
1488
 
1489
  /* Generate the clear mask.  */
1490
  mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
1491
 
1492
  /* Clear the field, to overlay it later with the source.  */
1493
  emit_insn (gen_rtx_SET (SImode, operands[0],
1494
                      gen_rtx_AND (SImode, operands[0], mreg)));
1495
 
1496
  /* If the source is constant 0, we've nothing to add back.  */
1497
  if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
1498
    return 1;
1499
 
1500
  /* XXX: Should we worry about more games with constant values?
1501
     We've covered the high profile: set/clear single-bit and many-bit
1502
     fields. How often do we see "arbitrary bit pattern" constants?  */
1503
  sreg = copy_to_mode_reg (SImode, operands[3]);
1504
 
1505
  /* Extract src as same width as dst (needed for signed values).  We
1506
     always have to do this since we widen everything to SImode.
1507
     We don't have to mask if we're shifting this up against the
1508
     MSB of the register (e.g., the shift will push out any hi-order
1509
     bits.  */
1510
  if (width + posn != (int) GET_MODE_SIZE (SImode))
1511
    {
1512
      ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
1513
      emit_insn (gen_rtx_SET (SImode, sreg,
1514
                          gen_rtx_AND (SImode, sreg, ereg)));
1515
    }
1516
 
1517
  /* Insert source value in dest.  */
1518
  if (posn != 0)
1519
    emit_insn (gen_rtx_SET (SImode, sreg,
1520
                        gen_rtx_ASHIFT (SImode, sreg, GEN_INT (posn))));
1521
 
1522
  emit_insn (gen_rtx_SET (SImode, operands[0],
1523
                      gen_rtx_IOR (SImode, operands[0], sreg)));
1524
 
1525
  return 1;
1526
}
1527
 
1528
/* ??? Block move stuff stolen from m88k.  This code has not been
1529
   verified for correctness.  */
1530
 
1531
/* Emit code to perform a block move.  Choose the best method.
1532
 
1533
   OPERANDS[0] is the destination.
1534
   OPERANDS[1] is the source.
1535
   OPERANDS[2] is the size.
1536
   OPERANDS[3] is the alignment safe to use.  */
1537
 
1538
/* Emit code to perform a block move with an offset sequence of ldw/st
1539
   instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...).  SIZE and ALIGN are
1540
   known constants.  DEST and SRC are registers.  OFFSET is the known
1541
   starting point for the output pattern.  */
1542
 
1543
static const enum machine_mode mode_from_align[] =
1544
{
1545
  VOIDmode, QImode, HImode, VOIDmode, SImode,
1546
};
1547
 
1548
static void
1549
block_move_sequence (rtx dst_mem, rtx src_mem, int size, int align)
1550
{
1551
  rtx temp[2];
1552
  enum machine_mode mode[2];
1553
  int amount[2];
1554
  bool active[2];
1555
  int phase = 0;
1556
  int next;
1557
  int offset_ld = 0;
1558
  int offset_st = 0;
1559
  rtx x;
1560
 
1561
  x = XEXP (dst_mem, 0);
1562
  if (!REG_P (x))
1563
    {
1564
      x = force_reg (Pmode, x);
1565
      dst_mem = replace_equiv_address (dst_mem, x);
1566
    }
1567
 
1568
  x = XEXP (src_mem, 0);
1569
  if (!REG_P (x))
1570
    {
1571
      x = force_reg (Pmode, x);
1572
      src_mem = replace_equiv_address (src_mem, x);
1573
    }
1574
 
1575
  active[0] = active[1] = false;
1576
 
1577
  do
1578
    {
1579
      next = phase;
1580
      phase ^= 1;
1581
 
1582
      if (size > 0)
1583
        {
1584
          int next_amount;
1585
 
1586
          next_amount = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
1587
          next_amount = MIN (next_amount, align);
1588
 
1589
          amount[next] = next_amount;
1590
          mode[next] = mode_from_align[next_amount];
1591
          temp[next] = gen_reg_rtx (mode[next]);
1592
 
1593
          x = adjust_address (src_mem, mode[next], offset_ld);
1594
          emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
1595
 
1596
          offset_ld += next_amount;
1597
          size -= next_amount;
1598
          active[next] = true;
1599
        }
1600
 
1601
      if (active[phase])
1602
        {
1603
          active[phase] = false;
1604
 
1605
          x = adjust_address (dst_mem, mode[phase], offset_st);
1606
          emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
1607
 
1608
          offset_st += amount[phase];
1609
        }
1610
    }
1611
  while (active[next]);
1612
}
1613
 
1614
bool
1615
mcore_expand_block_move (rtx *operands)
1616
{
1617
  HOST_WIDE_INT align, bytes, max;
1618
 
1619
  if (GET_CODE (operands[2]) != CONST_INT)
1620
    return false;
1621
 
1622
  bytes = INTVAL (operands[2]);
1623
  align = INTVAL (operands[3]);
1624
 
1625
  if (bytes <= 0)
1626
    return false;
1627
  if (align > 4)
1628
    align = 4;
1629
 
1630
  switch (align)
1631
    {
1632
    case 4:
1633
      if (bytes & 1)
1634
        max = 4*4;
1635
      else if (bytes & 3)
1636
        max = 8*4;
1637
      else
1638
        max = 16*4;
1639
      break;
1640
    case 2:
1641
      max = 4*2;
1642
      break;
1643
    case 1:
1644
      max = 4*1;
1645
      break;
1646
    default:
1647
      gcc_unreachable ();
1648
    }
1649
 
1650
  if (bytes <= max)
1651
    {
1652
      block_move_sequence (operands[0], operands[1], bytes, align);
1653
      return true;
1654
    }
1655
 
1656
  return false;
1657
}
1658
 
1659
 
1660
/* Code to generate prologue and epilogue sequences.  */
1661
static int number_of_regs_before_varargs;
1662
 
1663
/* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1664
   for a varargs function.  */
1665
static int current_function_anonymous_args;
1666
 
1667
#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1668
#define STORE_REACH (64)        /* Maximum displace of word store + 4.  */
1669
#define ADDI_REACH (32)         /* Maximum addi operand.  */
1670
 
1671
static void
1672
layout_mcore_frame (struct mcore_frame * infp)
1673
{
1674
  int n;
1675
  unsigned int i;
1676
  int nbytes;
1677
  int regarg;
1678
  int localregarg;
1679
  int outbounds;
1680
  unsigned int growths;
1681
  int step;
1682
 
1683
  /* Might have to spill bytes to re-assemble a big argument that
1684
     was passed partially in registers and partially on the stack.  */
1685
  nbytes = crtl->args.pretend_args_size;
1686
 
1687
  /* Determine how much space for spilled anonymous args (e.g., stdarg).  */
1688
  if (current_function_anonymous_args)
1689
    nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
1690
 
1691
  infp->arg_size = nbytes;
1692
 
1693
  /* How much space to save non-volatile registers we stomp.  */
1694
  infp->reg_mask = calc_live_regs (& n);
1695
  infp->reg_size = n * 4;
1696
 
1697
  /* And the rest of it... locals and space for overflowed outbounds.  */
1698
  infp->local_size = get_frame_size ();
1699
  infp->outbound_size = crtl->outgoing_args_size;
1700
 
1701
  /* Make sure we have a whole number of words for the locals.  */
1702
  if (infp->local_size % STACK_BYTES)
1703
    infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
1704
 
1705
  /* Only thing we know we have to pad is the outbound space, since
1706
     we've aligned our locals assuming that base of locals is aligned.  */
1707
  infp->pad_local = 0;
1708
  infp->pad_reg = 0;
1709
  infp->pad_outbound = 0;
1710
  if (infp->outbound_size % STACK_BYTES)
1711
    infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
1712
 
1713
  /* Now we see how we want to stage the prologue so that it does
1714
     the most appropriate stack growth and register saves to either:
1715
     (1) run fast,
1716
     (2) reduce instruction space, or
1717
     (3) reduce stack space.  */
1718
  for (i = 0; i < ARRAY_SIZE (infp->growth); i++)
1719
    infp->growth[i] = 0;
1720
 
1721
  regarg      = infp->reg_size + infp->arg_size;
1722
  localregarg = infp->local_size + regarg;
1723
  outbounds   = infp->outbound_size + infp->pad_outbound;
1724
  growths     = 0;
1725
 
1726
  /* XXX: Consider one where we consider localregarg + outbound too! */
1727
 
1728
  /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1729
     use stw's with offsets and buy the frame in one shot.  */
1730
  if (localregarg <= ADDI_REACH
1731
      && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1732
    {
1733
      /* Make sure we'll be aligned.  */
1734
      if (localregarg % STACK_BYTES)
1735
        infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1736
 
1737
      step = localregarg + infp->pad_reg;
1738
      infp->reg_offset = infp->local_size;
1739
 
1740
      if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
1741
        {
1742
          step += outbounds;
1743
          infp->reg_offset += outbounds;
1744
          outbounds = 0;
1745
        }
1746
 
1747
      infp->arg_offset = step - 4;
1748
      infp->growth[growths++] = step;
1749
      infp->reg_growth = growths;
1750
      infp->local_growth = growths;
1751
 
1752
      /* If we haven't already folded it in.  */
1753
      if (outbounds)
1754
        infp->growth[growths++] = outbounds;
1755
 
1756
      goto finish;
1757
    }
1758
 
1759
  /* Frame can't be done with a single subi, but can be done with 2
1760
     insns.  If the 'stm' is getting <= 2 registers, we use stw's and
1761
     shift some of the stack purchase into the first subi, so both are
1762
     single instructions.  */
1763
  if (localregarg <= STORE_REACH
1764
      && (infp->local_size > ADDI_REACH)
1765
      && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
1766
    {
1767
      int all;
1768
 
1769
      /* Make sure we'll be aligned; use either pad_reg or pad_local.  */
1770
      if (localregarg % STACK_BYTES)
1771
        infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
1772
 
1773
      all = localregarg + infp->pad_reg + infp->pad_local;
1774
      step = ADDI_REACH;        /* As much up front as we can.  */
1775
      if (step > all)
1776
        step = all;
1777
 
1778
      /* XXX: Consider whether step will still be aligned; we believe so.  */
1779
      infp->arg_offset = step - 4;
1780
      infp->growth[growths++] = step;
1781
      infp->reg_growth = growths;
1782
      infp->reg_offset = step - infp->pad_reg - infp->reg_size;
1783
      all -= step;
1784
 
1785
      /* Can we fold in any space required for outbounds?  */
1786
      if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
1787
        {
1788
          all += outbounds;
1789
          outbounds = 0;
1790
        }
1791
 
1792
      /* Get the rest of the locals in place.  */
1793
      step = all;
1794
      infp->growth[growths++] = step;
1795
      infp->local_growth = growths;
1796
      all -= step;
1797
 
1798
      gcc_assert (all == 0);
1799
 
1800
      /* Finish off if we need to do so.  */
1801
      if (outbounds)
1802
        infp->growth[growths++] = outbounds;
1803
 
1804
      goto finish;
1805
    }
1806
 
1807
  /* Registers + args is nicely aligned, so we'll buy that in one shot.
1808
     Then we buy the rest of the frame in 1 or 2 steps depending on
1809
     whether we need a frame pointer.  */
1810
  if ((regarg % STACK_BYTES) == 0)
1811
    {
1812
      infp->growth[growths++] = regarg;
1813
      infp->reg_growth = growths;
1814
      infp->arg_offset = regarg - 4;
1815
      infp->reg_offset = 0;
1816
 
1817
      if (infp->local_size % STACK_BYTES)
1818
        infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1819
 
1820
      step = infp->local_size + infp->pad_local;
1821
 
1822
      if (!frame_pointer_needed)
1823
        {
1824
          step += outbounds;
1825
          outbounds = 0;
1826
        }
1827
 
1828
      infp->growth[growths++] = step;
1829
      infp->local_growth = growths;
1830
 
1831
      /* If there's any left to be done.  */
1832
      if (outbounds)
1833
        infp->growth[growths++] = outbounds;
1834
 
1835
      goto finish;
1836
    }
1837
 
1838
  /* XXX: optimizations that we'll want to play with....
1839
     -- regarg is not aligned, but it's a small number of registers;
1840
        use some of localsize so that regarg is aligned and then
1841
        save the registers.  */
1842
 
1843
  /* Simple encoding; plods down the stack buying the pieces as it goes.
1844
     -- does not optimize space consumption.
1845
     -- does not attempt to optimize instruction counts.
1846
     -- but it is safe for all alignments.  */
1847
  if (regarg % STACK_BYTES != 0)
1848
    infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
1849
 
1850
  infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
1851
  infp->reg_growth = growths;
1852
  infp->arg_offset = infp->growth[0] - 4;
1853
  infp->reg_offset = 0;
1854
 
1855
  if (frame_pointer_needed)
1856
    {
1857
      if (infp->local_size % STACK_BYTES != 0)
1858
        infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
1859
 
1860
      infp->growth[growths++] = infp->local_size + infp->pad_local;
1861
      infp->local_growth = growths;
1862
 
1863
      infp->growth[growths++] = outbounds;
1864
    }
1865
  else
1866
    {
1867
      if ((infp->local_size + outbounds) % STACK_BYTES != 0)
1868
        infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
1869
 
1870
      infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
1871
      infp->local_growth = growths;
1872
    }
1873
 
1874
  /* Anything else that we've forgotten?, plus a few consistency checks.  */
1875
 finish:
1876
  gcc_assert (infp->reg_offset >= 0);
1877
  gcc_assert (growths <= MAX_STACK_GROWS);
1878
 
1879
  for (i = 0; i < growths; i++)
1880
    gcc_assert (!(infp->growth[i] % STACK_BYTES));
1881
}
1882
 
1883
/* Define the offset between two registers, one to be eliminated, and
1884
   the other its replacement, at the start of a routine.  */
1885
 
1886
int
1887
mcore_initial_elimination_offset (int from, int to)
1888
{
1889
  int above_frame;
1890
  int below_frame;
1891
  struct mcore_frame fi;
1892
 
1893
  layout_mcore_frame (& fi);
1894
 
1895
  /* fp to ap */
1896
  above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
1897
  /* sp to fp */
1898
  below_frame = fi.outbound_size + fi.pad_outbound;
1899
 
1900
  if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
1901
    return above_frame;
1902
 
1903
  if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1904
    return above_frame + below_frame;
1905
 
1906
  if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
1907
    return below_frame;
1908
 
1909
  gcc_unreachable ();
1910
}
1911
 
1912
/* Keep track of some information about varargs for the prolog.  */
1913
 
1914
static void
1915
mcore_setup_incoming_varargs (cumulative_args_t args_so_far_v,
1916
                              enum machine_mode mode, tree type,
1917
                              int * ptr_pretend_size ATTRIBUTE_UNUSED,
1918
                              int second_time ATTRIBUTE_UNUSED)
1919
{
1920
  CUMULATIVE_ARGS *args_so_far = get_cumulative_args (args_so_far_v);
1921
 
1922
  current_function_anonymous_args = 1;
1923
 
1924
  /* We need to know how many argument registers are used before
1925
     the varargs start, so that we can push the remaining argument
1926
     registers during the prologue.  */
1927
  number_of_regs_before_varargs = *args_so_far + mcore_num_arg_regs (mode, type);
1928
 
1929
  /* There is a bug somewhere in the arg handling code.
1930
     Until I can find it this workaround always pushes the
1931
     last named argument onto the stack.  */
1932
  number_of_regs_before_varargs = *args_so_far;
1933
 
1934
  /* The last named argument may be split between argument registers
1935
     and the stack.  Allow for this here.  */
1936
  if (number_of_regs_before_varargs > NPARM_REGS)
1937
    number_of_regs_before_varargs = NPARM_REGS;
1938
}
1939
 
1940
void
1941
mcore_expand_prolog (void)
1942
{
1943
  struct mcore_frame fi;
1944
  int space_allocated = 0;
1945
  int growth = 0;
1946
 
1947
  /* Find out what we're doing.  */
1948
  layout_mcore_frame (&fi);
1949
 
1950
  space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
1951
    fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
1952
 
1953
  if (TARGET_CG_DATA)
1954
    {
1955
      /* Emit a symbol for this routine's frame size.  */
1956
      rtx x;
1957
 
1958
      x = DECL_RTL (current_function_decl);
1959
 
1960
      gcc_assert (GET_CODE (x) == MEM);
1961
 
1962
      x = XEXP (x, 0);
1963
 
1964
      gcc_assert (GET_CODE (x) == SYMBOL_REF);
1965
 
1966
      free (mcore_current_function_name);
1967
 
1968
      mcore_current_function_name = xstrdup (XSTR (x, 0));
1969
 
1970
      ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
1971
 
1972
      if (cfun->calls_alloca)
1973
        ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
1974
 
1975
      /* 970425: RBE:
1976
         We're looking at how the 8byte alignment affects stack layout
1977
         and where we had to pad things. This emits information we can
1978
         extract which tells us about frame sizes and the like.  */
1979
      fprintf (asm_out_file,
1980
               "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
1981
               mcore_current_function_name,
1982
               fi.arg_size, fi.reg_size, fi.reg_mask,
1983
               fi.local_size, fi.outbound_size,
1984
               frame_pointer_needed);
1985
    }
1986
 
1987
  if (mcore_naked_function_p ())
1988
    return;
1989
 
1990
  /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes.  */
1991
  output_stack_adjust (-1, fi.growth[growth++]);        /* Grows it.  */
1992
 
1993
  /* If we have a parameter passed partially in regs and partially in memory,
1994
     the registers will have been stored to memory already in function.c.  So
1995
     we only need to do something here for varargs functions.  */
1996
  if (fi.arg_size != 0 && crtl->args.pretend_args_size == 0)
1997
    {
1998
      int offset;
1999
      int rn = FIRST_PARM_REG + NPARM_REGS - 1;
2000
      int remaining = fi.arg_size;
2001
 
2002
      for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
2003
        {
2004
          emit_insn (gen_movsi
2005
                     (gen_rtx_MEM (SImode,
2006
                               plus_constant (stack_pointer_rtx, offset)),
2007
                      gen_rtx_REG (SImode, rn)));
2008
        }
2009
    }
2010
 
2011
  /* Do we need another stack adjustment before we do the register saves?  */
2012
  if (growth < fi.reg_growth)
2013
    output_stack_adjust (-1, fi.growth[growth++]);              /* Grows it.  */
2014
 
2015
  if (fi.reg_size != 0)
2016
    {
2017
      int i;
2018
      int offs = fi.reg_offset;
2019
 
2020
      for (i = 15; i >= 0; i--)
2021
        {
2022
          if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2023
            {
2024
              int first_reg = 15;
2025
 
2026
              while (fi.reg_mask & (1 << first_reg))
2027
                first_reg--;
2028
              first_reg++;
2029
 
2030
              emit_insn (gen_store_multiple (gen_rtx_MEM (SImode, stack_pointer_rtx),
2031
                                             gen_rtx_REG (SImode, first_reg),
2032
                                             GEN_INT (16 - first_reg)));
2033
 
2034
              i -= (15 - first_reg);
2035
              offs += (16 - first_reg) * 4;
2036
            }
2037
          else if (fi.reg_mask & (1 << i))
2038
            {
2039
              emit_insn (gen_movsi
2040
                         (gen_rtx_MEM (SImode,
2041
                                   plus_constant (stack_pointer_rtx, offs)),
2042
                          gen_rtx_REG (SImode, i)));
2043
              offs += 4;
2044
            }
2045
        }
2046
    }
2047
 
2048
  /* Figure the locals + outbounds.  */
2049
  if (frame_pointer_needed)
2050
    {
2051
      /* If we haven't already purchased to 'fp'.  */
2052
      if (growth < fi.local_growth)
2053
        output_stack_adjust (-1, fi.growth[growth++]);          /* Grows it.  */
2054
 
2055
      emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
2056
 
2057
      /* ... and then go any remaining distance for outbounds, etc.  */
2058
      if (fi.growth[growth])
2059
        output_stack_adjust (-1, fi.growth[growth++]);
2060
    }
2061
  else
2062
    {
2063
      if (growth < fi.local_growth)
2064
        output_stack_adjust (-1, fi.growth[growth++]);          /* Grows it.  */
2065
      if (fi.growth[growth])
2066
        output_stack_adjust (-1, fi.growth[growth++]);
2067
    }
2068
}
2069
 
2070
void
2071
mcore_expand_epilog (void)
2072
{
2073
  struct mcore_frame fi;
2074
  int i;
2075
  int offs;
2076
  int growth = MAX_STACK_GROWS - 1 ;
2077
 
2078
 
2079
  /* Find out what we're doing.  */
2080
  layout_mcore_frame(&fi);
2081
 
2082
  if (mcore_naked_function_p ())
2083
    return;
2084
 
2085
  /* If we had a frame pointer, restore the sp from that.  */
2086
  if (frame_pointer_needed)
2087
    {
2088
      emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
2089
      growth = fi.local_growth - 1;
2090
    }
2091
  else
2092
    {
2093
      /* XXX: while loop should accumulate and do a single sell.  */
2094
      while (growth >= fi.local_growth)
2095
        {
2096
          if (fi.growth[growth] != 0)
2097
            output_stack_adjust (1, fi.growth[growth]);
2098
          growth--;
2099
        }
2100
    }
2101
 
2102
  /* Make sure we've shrunk stack back to the point where the registers
2103
     were laid down. This is typically 0/1 iterations.  Then pull the
2104
     register save information back off the stack.  */
2105
  while (growth >= fi.reg_growth)
2106
    output_stack_adjust ( 1, fi.growth[growth--]);
2107
 
2108
  offs = fi.reg_offset;
2109
 
2110
  for (i = 15; i >= 0; i--)
2111
    {
2112
      if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
2113
        {
2114
          int first_reg;
2115
 
2116
          /* Find the starting register.  */
2117
          first_reg = 15;
2118
 
2119
          while (fi.reg_mask & (1 << first_reg))
2120
            first_reg--;
2121
 
2122
          first_reg++;
2123
 
2124
          emit_insn (gen_load_multiple (gen_rtx_REG (SImode, first_reg),
2125
                                        gen_rtx_MEM (SImode, stack_pointer_rtx),
2126
                                        GEN_INT (16 - first_reg)));
2127
 
2128
          i -= (15 - first_reg);
2129
          offs += (16 - first_reg) * 4;
2130
        }
2131
      else if (fi.reg_mask & (1 << i))
2132
        {
2133
          emit_insn (gen_movsi
2134
                     (gen_rtx_REG (SImode, i),
2135
                      gen_rtx_MEM (SImode,
2136
                               plus_constant (stack_pointer_rtx, offs))));
2137
          offs += 4;
2138
        }
2139
    }
2140
 
2141
  /* Give back anything else.  */
2142
  /* XXX: Should accumulate total and then give it back.  */
2143
  while (growth >= 0)
2144
    output_stack_adjust ( 1, fi.growth[growth--]);
2145
}
2146
 
2147
/* This code is borrowed from the SH port.  */
2148
 
2149
/* The MCORE cannot load a large constant into a register, constants have to
2150
   come from a pc relative load.  The reference of a pc relative load
2151
   instruction must be less than 1k in front of the instruction.  This
2152
   means that we often have to dump a constant inside a function, and
2153
   generate code to branch around it.
2154
 
2155
   It is important to minimize this, since the branches will slow things
2156
   down and make things bigger.
2157
 
2158
   Worst case code looks like:
2159
 
2160
   lrw   L1,r0
2161
   br    L2
2162
   align
2163
   L1:   .long value
2164
   L2:
2165
   ..
2166
 
2167
   lrw   L3,r0
2168
   br    L4
2169
   align
2170
   L3:   .long value
2171
   L4:
2172
   ..
2173
 
2174
   We fix this by performing a scan before scheduling, which notices which
2175
   instructions need to have their operands fetched from the constant table
2176
   and builds the table.
2177
 
2178
   The algorithm is:
2179
 
2180
   scan, find an instruction which needs a pcrel move.  Look forward, find the
2181
   last barrier which is within MAX_COUNT bytes of the requirement.
2182
   If there isn't one, make one.  Process all the instructions between
2183
   the find and the barrier.
2184
 
2185
   In the above example, we can tell that L3 is within 1k of L1, so
2186
   the first move can be shrunk from the 2 insn+constant sequence into
2187
   just 1 insn, and the constant moved to L3 to make:
2188
 
2189
   lrw          L1,r0
2190
   ..
2191
   lrw          L3,r0
2192
   bra          L4
2193
   align
2194
   L3:.long value
2195
   L4:.long value
2196
 
2197
   Then the second move becomes the target for the shortening process.  */
2198
 
2199
typedef struct
2200
{
2201
  rtx value;                    /* Value in table.  */
2202
  rtx label;                    /* Label of value.  */
2203
} pool_node;
2204
 
2205
/* The maximum number of constants that can fit into one pool, since
2206
   the pc relative range is 0...1020 bytes and constants are at least 4
2207
   bytes long.  We subtract 4 from the range to allow for the case where
2208
   we need to add a branch/align before the constant pool.  */
2209
 
2210
#define MAX_COUNT 1016
2211
#define MAX_POOL_SIZE (MAX_COUNT/4)
2212
static pool_node pool_vector[MAX_POOL_SIZE];
2213
static int pool_size;
2214
 
2215
/* Dump out any constants accumulated in the final pass.  These
2216
   will only be labels.  */
2217
 
2218
const char *
2219
mcore_output_jump_label_table (void)
2220
{
2221
  int i;
2222
 
2223
  if (pool_size)
2224
    {
2225
      fprintf (asm_out_file, "\t.align 2\n");
2226
 
2227
      for (i = 0; i < pool_size; i++)
2228
        {
2229
          pool_node * p = pool_vector + i;
2230
 
2231
          (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
2232
 
2233
          output_asm_insn (".long       %0", &p->value);
2234
        }
2235
 
2236
      pool_size = 0;
2237
    }
2238
 
2239
  return "";
2240
}
2241
 
2242
/* Check whether insn is a candidate for a conditional.  */
2243
 
2244
static cond_type
2245
is_cond_candidate (rtx insn)
2246
{
2247
  /* The only things we conditionalize are those that can be directly
2248
     changed into a conditional.  Only bother with SImode items.  If
2249
     we wanted to be a little more aggressive, we could also do other
2250
     modes such as DImode with reg-reg move or load 0.  */
2251
  if (GET_CODE (insn) == INSN)
2252
    {
2253
      rtx pat = PATTERN (insn);
2254
      rtx src, dst;
2255
 
2256
      if (GET_CODE (pat) != SET)
2257
        return COND_NO;
2258
 
2259
      dst = XEXP (pat, 0);
2260
 
2261
      if ((GET_CODE (dst) != REG &&
2262
           GET_CODE (dst) != SUBREG) ||
2263
          GET_MODE (dst) != SImode)
2264
        return COND_NO;
2265
 
2266
      src = XEXP (pat, 1);
2267
 
2268
      if ((GET_CODE (src) == REG ||
2269
           (GET_CODE (src) == SUBREG &&
2270
            GET_CODE (SUBREG_REG (src)) == REG)) &&
2271
          GET_MODE (src) == SImode)
2272
        return COND_MOV_INSN;
2273
      else if (GET_CODE (src) == CONST_INT &&
2274
               INTVAL (src) == 0)
2275
        return COND_CLR_INSN;
2276
      else if (GET_CODE (src) == PLUS &&
2277
               (GET_CODE (XEXP (src, 0)) == REG ||
2278
                (GET_CODE (XEXP (src, 0)) == SUBREG &&
2279
                 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2280
               GET_MODE (XEXP (src, 0)) == SImode &&
2281
               GET_CODE (XEXP (src, 1)) == CONST_INT &&
2282
               INTVAL (XEXP (src, 1)) == 1)
2283
        return COND_INC_INSN;
2284
      else if (((GET_CODE (src) == MINUS &&
2285
                 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2286
                 INTVAL( XEXP (src, 1)) == 1) ||
2287
                (GET_CODE (src) == PLUS &&
2288
                 GET_CODE (XEXP (src, 1)) == CONST_INT &&
2289
                 INTVAL (XEXP (src, 1)) == -1)) &&
2290
               (GET_CODE (XEXP (src, 0)) == REG ||
2291
                (GET_CODE (XEXP (src, 0)) == SUBREG &&
2292
                 GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
2293
               GET_MODE (XEXP (src, 0)) == SImode)
2294
        return COND_DEC_INSN;
2295
 
2296
      /* Some insns that we don't bother with:
2297
         (set (rx:DI) (ry:DI))
2298
         (set (rx:DI) (const_int 0))
2299
      */
2300
 
2301
    }
2302
  else if (GET_CODE (insn) == JUMP_INSN &&
2303
           GET_CODE (PATTERN (insn)) == SET &&
2304
           GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
2305
    return COND_BRANCH_INSN;
2306
 
2307
  return COND_NO;
2308
}
2309
 
2310
/* Emit a conditional version of insn and replace the old insn with the
2311
   new one.  Return the new insn if emitted.  */
2312
 
2313
static rtx
2314
emit_new_cond_insn (rtx insn, int cond)
2315
{
2316
  rtx c_insn = 0;
2317
  rtx pat, dst, src;
2318
  cond_type num;
2319
 
2320
  if ((num = is_cond_candidate (insn)) == COND_NO)
2321
    return NULL;
2322
 
2323
  pat = PATTERN (insn);
2324
 
2325
  if (GET_CODE (insn) == INSN)
2326
    {
2327
      dst = SET_DEST (pat);
2328
      src = SET_SRC (pat);
2329
    }
2330
  else
2331
    {
2332
      dst = JUMP_LABEL (insn);
2333
      src = NULL_RTX;
2334
    }
2335
 
2336
  switch (num)
2337
    {
2338
    case COND_MOV_INSN:
2339
    case COND_CLR_INSN:
2340
      if (cond)
2341
        c_insn = gen_movt0 (dst, src, dst);
2342
      else
2343
        c_insn = gen_movt0 (dst, dst, src);
2344
      break;
2345
 
2346
    case COND_INC_INSN:
2347
      if (cond)
2348
        c_insn = gen_incscc (dst, dst);
2349
      else
2350
        c_insn = gen_incscc_false (dst, dst);
2351
      break;
2352
 
2353
    case COND_DEC_INSN:
2354
      if (cond)
2355
        c_insn = gen_decscc (dst, dst);
2356
      else
2357
        c_insn = gen_decscc_false (dst, dst);
2358
      break;
2359
 
2360
    case COND_BRANCH_INSN:
2361
      if (cond)
2362
        c_insn = gen_branch_true (dst);
2363
      else
2364
        c_insn = gen_branch_false (dst);
2365
      break;
2366
 
2367
    default:
2368
      return NULL;
2369
    }
2370
 
2371
  /* Only copy the notes if they exist.  */
2372
  if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
2373
    {
2374
      /* We really don't need to bother with the notes and links at this
2375
         point, but go ahead and save the notes.  This will help is_dead()
2376
         when applying peepholes (links don't matter since they are not
2377
         used any more beyond this point for the mcore).  */
2378
      REG_NOTES (c_insn) = REG_NOTES (insn);
2379
    }
2380
 
2381
  if (num == COND_BRANCH_INSN)
2382
    {
2383
      /* For jumps, we need to be a little bit careful and emit the new jump
2384
         before the old one and to update the use count for the target label.
2385
         This way, the barrier following the old (uncond) jump will get
2386
         deleted, but the label won't.  */
2387
      c_insn = emit_jump_insn_before (c_insn, insn);
2388
 
2389
      ++ LABEL_NUSES (dst);
2390
 
2391
      JUMP_LABEL (c_insn) = dst;
2392
    }
2393
  else
2394
    c_insn = emit_insn_after (c_insn, insn);
2395
 
2396
  delete_insn (insn);
2397
 
2398
  return c_insn;
2399
}
2400
 
2401
/* Attempt to change a basic block into a series of conditional insns.  This
2402
   works by taking the branch at the end of the 1st block and scanning for the
2403
   end of the 2nd block.  If all instructions in the 2nd block have cond.
2404
   versions and the label at the start of block 3 is the same as the target
2405
   from the branch at block 1, then conditionalize all insn in block 2 using
2406
   the inverse condition of the branch at block 1.  (Note I'm bending the
2407
   definition of basic block here.)
2408
 
2409
   e.g., change:
2410
 
2411
                bt      L2             <-- end of block 1 (delete)
2412
                mov     r7,r8
2413
                addu    r7,1
2414
                br      L3             <-- end of block 2
2415
 
2416
        L2:     ...                    <-- start of block 3 (NUSES==1)
2417
        L3:     ...
2418
 
2419
   to:
2420
 
2421
                movf    r7,r8
2422
                incf    r7
2423
                bf      L3
2424
 
2425
        L3:     ...
2426
 
2427
   we can delete the L2 label if NUSES==1 and re-apply the optimization
2428
   starting at the last instruction of block 2.  This may allow an entire
2429
   if-then-else statement to be conditionalized.  BRC  */
2430
static rtx
2431
conditionalize_block (rtx first)
2432
{
2433
  rtx insn;
2434
  rtx br_pat;
2435
  rtx end_blk_1_br = 0;
2436
  rtx end_blk_2_insn = 0;
2437
  rtx start_blk_3_lab = 0;
2438
  int cond;
2439
  int br_lab_num;
2440
  int blk_size = 0;
2441
 
2442
 
2443
  /* Check that the first insn is a candidate conditional jump.  This is
2444
     the one that we'll eliminate.  If not, advance to the next insn to
2445
     try.  */
2446
  if (GET_CODE (first) != JUMP_INSN ||
2447
      GET_CODE (PATTERN (first)) != SET ||
2448
      GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
2449
    return NEXT_INSN (first);
2450
 
2451
  /* Extract some information we need.  */
2452
  end_blk_1_br = first;
2453
  br_pat = PATTERN (end_blk_1_br);
2454
 
2455
  /* Complement the condition since we use the reverse cond. for the insns.  */
2456
  cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
2457
 
2458
  /* Determine what kind of branch we have.  */
2459
  if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
2460
    {
2461
      /* A normal branch, so extract label out of first arm.  */
2462
      br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
2463
    }
2464
  else
2465
    {
2466
      /* An inverse branch, so extract the label out of the 2nd arm
2467
         and complement the condition.  */
2468
      cond = (cond == 0);
2469
      br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
2470
    }
2471
 
2472
  /* Scan forward for the start of block 2: it must start with a
2473
     label and that label must be the same as the branch target
2474
     label from block 1.  We don't care about whether block 2 actually
2475
     ends with a branch or a label (an uncond. branch is
2476
     conditionalizable).  */
2477
  for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
2478
    {
2479
      enum rtx_code code;
2480
 
2481
      code = GET_CODE (insn);
2482
 
2483
      /* Look for the label at the start of block 3.  */
2484
      if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
2485
        break;
2486
 
2487
      /* Skip barriers, notes, and conditionalizable insns.  If the
2488
         insn is not conditionalizable or makes this optimization fail,
2489
         just return the next insn so we can start over from that point.  */
2490
      if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
2491
        return NEXT_INSN (insn);
2492
 
2493
      /* Remember the last real insn before the label (i.e. end of block 2).  */
2494
      if (code == JUMP_INSN || code == INSN)
2495
        {
2496
          blk_size ++;
2497
          end_blk_2_insn = insn;
2498
        }
2499
    }
2500
 
2501
  if (!insn)
2502
    return insn;
2503
 
2504
  /* It is possible for this optimization to slow performance if the blocks
2505
     are long.  This really depends upon whether the branch is likely taken
2506
     or not.  If the branch is taken, we slow performance in many cases.  But,
2507
     if the branch is not taken, we always help performance (for a single
2508
     block, but for a double block (i.e. when the optimization is re-applied)
2509
     this is not true since the 'right thing' depends on the overall length of
2510
     the collapsed block).  As a compromise, don't apply this optimization on
2511
     blocks larger than size 2 (unlikely for the mcore) when speed is important.
2512
     the best threshold depends on the latencies of the instructions (i.e.,
2513
     the branch penalty).  */
2514
  if (optimize > 1 && blk_size > 2)
2515
    return insn;
2516
 
2517
  /* At this point, we've found the start of block 3 and we know that
2518
     it is the destination of the branch from block 1.   Also, all
2519
     instructions in the block 2 are conditionalizable.  So, apply the
2520
     conditionalization and delete the branch.  */
2521
  start_blk_3_lab = insn;
2522
 
2523
  for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
2524
       insn = NEXT_INSN (insn))
2525
    {
2526
      rtx newinsn;
2527
 
2528
      if (INSN_DELETED_P (insn))
2529
        continue;
2530
 
2531
      /* Try to form a conditional variant of the instruction and emit it.  */
2532
      if ((newinsn = emit_new_cond_insn (insn, cond)))
2533
        {
2534
          if (end_blk_2_insn == insn)
2535
            end_blk_2_insn = newinsn;
2536
 
2537
          insn = newinsn;
2538
        }
2539
    }
2540
 
2541
  /* Note whether we will delete the label starting blk 3 when the jump
2542
     gets deleted.  If so, we want to re-apply this optimization at the
2543
     last real instruction right before the label.  */
2544
  if (LABEL_NUSES (start_blk_3_lab) == 1)
2545
    {
2546
      start_blk_3_lab = 0;
2547
    }
2548
 
2549
  /* ??? we probably should redistribute the death notes for this insn, esp.
2550
     the death of cc, but it doesn't really matter this late in the game.
2551
     The peepholes all use is_dead() which will find the correct death
2552
     regardless of whether there is a note.  */
2553
  delete_insn (end_blk_1_br);
2554
 
2555
  if (! start_blk_3_lab)
2556
    return end_blk_2_insn;
2557
 
2558
  /* Return the insn right after the label at the start of block 3.  */
2559
  return NEXT_INSN (start_blk_3_lab);
2560
}
2561
 
2562
/* Apply the conditionalization of blocks optimization.  This is the
2563
   outer loop that traverses through the insns scanning for a branch
2564
   that signifies an opportunity to apply the optimization.  Note that
2565
   this optimization is applied late.  If we could apply it earlier,
2566
   say before cse 2, it may expose more optimization opportunities.
2567
   but, the pay back probably isn't really worth the effort (we'd have
2568
   to update all reg/flow/notes/links/etc to make it work - and stick it
2569
   in before cse 2).  */
2570
 
2571
static void
2572
conditionalize_optimization (void)
2573
{
2574
  rtx insn;
2575
 
2576
  for (insn = get_insns (); insn; insn = conditionalize_block (insn))
2577
    continue;
2578
}
2579
 
2580
static int saved_warn_return_type = -1;
2581
static int saved_warn_return_type_count = 0;
2582
 
2583
/* This is to handle loads from the constant pool.  */
2584
 
2585
static void
2586
mcore_reorg (void)
2587
{
2588
  /* Reset this variable.  */
2589
  current_function_anonymous_args = 0;
2590
 
2591
  /* Restore the warn_return_type if it has been altered.  */
2592
  if (saved_warn_return_type != -1)
2593
    {
2594
      /* Only restore the value if we have reached another function.
2595
         The test of warn_return_type occurs in final_function () in
2596
         c-decl.c a long time after the code for the function is generated,
2597
         so we need a counter to tell us when we have finished parsing that
2598
         function and can restore the flag.  */
2599
      if (--saved_warn_return_type_count == 0)
2600
        {
2601
          warn_return_type = saved_warn_return_type;
2602
          saved_warn_return_type = -1;
2603
        }
2604
    }
2605
 
2606
  if (optimize == 0)
2607
    return;
2608
 
2609
  /* Conditionalize blocks where we can.  */
2610
  conditionalize_optimization ();
2611
 
2612
  /* Literal pool generation is now pushed off until the assembler.  */
2613
}
2614
 
2615
 
2616
/* Return true if X is something that can be moved directly into r15.  */
2617
 
2618
bool
2619
mcore_r15_operand_p (rtx x)
2620
{
2621
  switch (GET_CODE (x))
2622
    {
2623
    case CONST_INT:
2624
      return mcore_const_ok_for_inline (INTVAL (x));
2625
 
2626
    case REG:
2627
    case SUBREG:
2628
    case MEM:
2629
      return 1;
2630
 
2631
    default:
2632
      return 0;
2633
    }
2634
}
2635
 
2636
/* Implement SECONDARY_RELOAD_CLASS.  If RCLASS contains r15, and we can't
2637
   directly move X into it, use r1-r14 as a temporary.  */
2638
 
2639
enum reg_class
2640
mcore_secondary_reload_class (enum reg_class rclass,
2641
                              enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2642
{
2643
  if (TEST_HARD_REG_BIT (reg_class_contents[rclass], 15)
2644
      && !mcore_r15_operand_p (x))
2645
    return LRW_REGS;
2646
  return NO_REGS;
2647
}
2648
 
2649
/* Return the reg_class to use when reloading the rtx X into the class
2650
   RCLASS.  If X is too complex to move directly into r15, prefer to
2651
   use LRW_REGS instead.  */
2652
 
2653
enum reg_class
2654
mcore_reload_class (rtx x, enum reg_class rclass)
2655
{
2656
  if (reg_class_subset_p (LRW_REGS, rclass) && !mcore_r15_operand_p (x))
2657
    return LRW_REGS;
2658
 
2659
  return rclass;
2660
}
2661
 
2662
/* Tell me if a pair of reg/subreg rtx's actually refer to the same
2663
   register.  Note that the current version doesn't worry about whether
2664
   they are the same mode or note (e.g., a QImode in r2 matches an HImode
2665
   in r2 matches an SImode in r2. Might think in the future about whether
2666
   we want to be able to say something about modes.  */
2667
 
2668
int
2669
mcore_is_same_reg (rtx x, rtx y)
2670
{
2671
  /* Strip any and all of the subreg wrappers.  */
2672
  while (GET_CODE (x) == SUBREG)
2673
    x = SUBREG_REG (x);
2674
 
2675
  while (GET_CODE (y) == SUBREG)
2676
    y = SUBREG_REG (y);
2677
 
2678
  if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
2679
    return 1;
2680
 
2681
  return 0;
2682
}
2683
 
2684
static void
2685
mcore_option_override (void)
2686
{
2687
  /* Only the m340 supports little endian code.  */
2688
  if (TARGET_LITTLE_END && ! TARGET_M340)
2689
    target_flags |= MASK_M340;
2690
}
2691
 
2692
 
2693
/* Compute the number of word sized registers needed to
2694
   hold a function argument of mode MODE and type TYPE.  */
2695
 
2696
int
2697
mcore_num_arg_regs (enum machine_mode mode, const_tree type)
2698
{
2699
  int size;
2700
 
2701
  if (targetm.calls.must_pass_in_stack (mode, type))
2702
    return 0;
2703
 
2704
  if (type && mode == BLKmode)
2705
    size = int_size_in_bytes (type);
2706
  else
2707
    size = GET_MODE_SIZE (mode);
2708
 
2709
  return ROUND_ADVANCE (size);
2710
}
2711
 
2712
static rtx
2713
handle_structs_in_regs (enum machine_mode mode, const_tree type, int reg)
2714
{
2715
  int size;
2716
 
2717
  /* The MCore ABI defines that a structure whose size is not a whole multiple
2718
     of bytes is passed packed into registers (or spilled onto the stack if
2719
     not enough registers are available) with the last few bytes of the
2720
     structure being packed, left-justified, into the last register/stack slot.
2721
     GCC handles this correctly if the last word is in a stack slot, but we
2722
     have to generate a special, PARALLEL RTX if the last word is in an
2723
     argument register.  */
2724
  if (type
2725
      && TYPE_MODE (type) == BLKmode
2726
      && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
2727
      && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
2728
      && (size % UNITS_PER_WORD != 0)
2729
      && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
2730
    {
2731
      rtx    arg_regs [NPARM_REGS];
2732
      int    nregs;
2733
      rtx    result;
2734
      rtvec  rtvec;
2735
 
2736
      for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
2737
        {
2738
          arg_regs [nregs] =
2739
            gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
2740
                               GEN_INT (nregs * UNITS_PER_WORD));
2741
          nregs ++;
2742
        }
2743
 
2744
      /* We assume here that NPARM_REGS == 6.  The assert checks this.  */
2745
      gcc_assert (ARRAY_SIZE (arg_regs) == 6);
2746
      rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
2747
                          arg_regs[3], arg_regs[4], arg_regs[5]);
2748
 
2749
      result = gen_rtx_PARALLEL (mode, rtvec);
2750
      return result;
2751
    }
2752
 
2753
  return gen_rtx_REG (mode, reg);
2754
}
2755
 
2756
rtx
2757
mcore_function_value (const_tree valtype, const_tree func)
2758
{
2759
  enum machine_mode mode;
2760
  int unsigned_p;
2761
 
2762
  mode = TYPE_MODE (valtype);
2763
 
2764
  /* Since we promote return types, we must promote the mode here too.  */
2765
  mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
2766
 
2767
  return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
2768
}
2769
 
2770
/* Define where to put the arguments to a function.
2771
   Value is zero to push the argument on the stack,
2772
   or a hard register in which to store the argument.
2773
 
2774
   MODE is the argument's machine mode.
2775
   TYPE is the data type of the argument (as a tree).
2776
    This is null for libcalls where that information may
2777
    not be available.
2778
   CUM is a variable of type CUMULATIVE_ARGS which gives info about
2779
    the preceding args and about the function being called.
2780
   NAMED is nonzero if this argument is a named parameter
2781
    (otherwise it is an extra parameter matching an ellipsis).
2782
 
2783
   On MCore the first args are normally in registers
2784
   and the rest are pushed.  Any arg that starts within the first
2785
   NPARM_REGS words is at least partially passed in a register unless
2786
   its data type forbids.  */
2787
 
2788
static rtx
2789
mcore_function_arg (cumulative_args_t cum, enum machine_mode mode,
2790
                    const_tree type, bool named)
2791
{
2792
  int arg_reg;
2793
 
2794
  if (! named || mode == VOIDmode)
2795
    return 0;
2796
 
2797
  if (targetm.calls.must_pass_in_stack (mode, type))
2798
    return 0;
2799
 
2800
  arg_reg = ROUND_REG (*get_cumulative_args (cum), mode);
2801
 
2802
  if (arg_reg < NPARM_REGS)
2803
    return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
2804
 
2805
  return 0;
2806
}
2807
 
2808
static void
2809
mcore_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
2810
                            const_tree type, bool named ATTRIBUTE_UNUSED)
2811
{
2812
  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2813
 
2814
  *cum = (ROUND_REG (*cum, mode)
2815
          + (int)named * mcore_num_arg_regs (mode, type));
2816
}
2817
 
2818
static unsigned int
2819
mcore_function_arg_boundary (enum machine_mode mode,
2820
                             const_tree type ATTRIBUTE_UNUSED)
2821
{
2822
  /* Doubles must be aligned to an 8 byte boundary.  */
2823
  return (mode != BLKmode && GET_MODE_SIZE (mode) == 8
2824
          ? BIGGEST_ALIGNMENT
2825
          : PARM_BOUNDARY);
2826
}
2827
 
2828
/* Returns the number of bytes of argument registers required to hold *part*
2829
   of a parameter of machine mode MODE and type TYPE (which may be NULL if
2830
   the type is not known).  If the argument fits entirely in the argument
2831
   registers, or entirely on the stack, then 0 is returned.  CUM is the
2832
   number of argument registers already used by earlier parameters to
2833
   the function.  */
2834
 
2835
static int
2836
mcore_arg_partial_bytes (cumulative_args_t cum, enum machine_mode mode,
2837
                         tree type, bool named)
2838
{
2839
  int reg = ROUND_REG (*get_cumulative_args (cum), mode);
2840
 
2841
  if (named == 0)
2842
    return 0;
2843
 
2844
  if (targetm.calls.must_pass_in_stack (mode, type))
2845
    return 0;
2846
 
2847
  /* REG is not the *hardware* register number of the register that holds
2848
     the argument, it is the *argument* register number.  So for example,
2849
     the first argument to a function goes in argument register 0, which
2850
     translates (for the MCore) into hardware register 2.  The second
2851
     argument goes into argument register 1, which translates into hardware
2852
     register 3, and so on.  NPARM_REGS is the number of argument registers
2853
     supported by the target, not the maximum hardware register number of
2854
     the target.  */
2855
  if (reg >= NPARM_REGS)
2856
    return 0;
2857
 
2858
  /* If the argument fits entirely in registers, return 0.  */
2859
  if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
2860
    return 0;
2861
 
2862
  /* The argument overflows the number of available argument registers.
2863
     Compute how many argument registers have not yet been assigned to
2864
     hold an argument.  */
2865
  reg = NPARM_REGS - reg;
2866
 
2867
  /* Return partially in registers and partially on the stack.  */
2868
  return reg * UNITS_PER_WORD;
2869
}
2870
 
2871
/* Return nonzero if SYMBOL is marked as being dllexport'd.  */
2872
 
2873
int
2874
mcore_dllexport_name_p (const char * symbol)
2875
{
2876
  return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
2877
}
2878
 
2879
/* Return nonzero if SYMBOL is marked as being dllimport'd.  */
2880
 
2881
int
2882
mcore_dllimport_name_p (const char * symbol)
2883
{
2884
  return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
2885
}
2886
 
2887
/* Mark a DECL as being dllexport'd.  */
2888
 
2889
static void
2890
mcore_mark_dllexport (tree decl)
2891
{
2892
  const char * oldname;
2893
  char * newname;
2894
  rtx    rtlname;
2895
  tree   idp;
2896
 
2897
  rtlname = XEXP (DECL_RTL (decl), 0);
2898
 
2899
  if (GET_CODE (rtlname) == MEM)
2900
    rtlname = XEXP (rtlname, 0);
2901
  gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2902
  oldname = XSTR (rtlname, 0);
2903
 
2904
  if (mcore_dllexport_name_p (oldname))
2905
    return;  /* Already done.  */
2906
 
2907
  newname = XALLOCAVEC (char, strlen (oldname) + 4);
2908
  sprintf (newname, "@e.%s", oldname);
2909
 
2910
  /* We pass newname through get_identifier to ensure it has a unique
2911
     address.  RTL processing can sometimes peek inside the symbol ref
2912
     and compare the string's addresses to see if two symbols are
2913
     identical.  */
2914
  /* ??? At least I think that's why we do this.  */
2915
  idp = get_identifier (newname);
2916
 
2917
  XEXP (DECL_RTL (decl), 0) =
2918
    gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
2919
}
2920
 
2921
/* Mark a DECL as being dllimport'd.  */
2922
 
2923
static void
2924
mcore_mark_dllimport (tree decl)
2925
{
2926
  const char * oldname;
2927
  char * newname;
2928
  tree   idp;
2929
  rtx    rtlname;
2930
  rtx    newrtl;
2931
 
2932
  rtlname = XEXP (DECL_RTL (decl), 0);
2933
 
2934
  if (GET_CODE (rtlname) == MEM)
2935
    rtlname = XEXP (rtlname, 0);
2936
  gcc_assert (GET_CODE (rtlname) == SYMBOL_REF);
2937
  oldname = XSTR (rtlname, 0);
2938
 
2939
  gcc_assert (!mcore_dllexport_name_p (oldname));
2940
  if (mcore_dllimport_name_p (oldname))
2941
    return; /* Already done.  */
2942
 
2943
  /* ??? One can well ask why we're making these checks here,
2944
     and that would be a good question.  */
2945
 
2946
  /* Imported variables can't be initialized.  */
2947
  if (TREE_CODE (decl) == VAR_DECL
2948
      && !DECL_VIRTUAL_P (decl)
2949
      && DECL_INITIAL (decl))
2950
    {
2951
      error ("initialized variable %q+D is marked dllimport", decl);
2952
      return;
2953
    }
2954
 
2955
  /* `extern' needn't be specified with dllimport.
2956
     Specify `extern' now and hope for the best.  Sigh.  */
2957
  if (TREE_CODE (decl) == VAR_DECL
2958
      /* ??? Is this test for vtables needed?  */
2959
      && !DECL_VIRTUAL_P (decl))
2960
    {
2961
      DECL_EXTERNAL (decl) = 1;
2962
      TREE_PUBLIC (decl) = 1;
2963
    }
2964
 
2965
  newname = XALLOCAVEC (char, strlen (oldname) + 11);
2966
  sprintf (newname, "@i.__imp_%s", oldname);
2967
 
2968
  /* We pass newname through get_identifier to ensure it has a unique
2969
     address.  RTL processing can sometimes peek inside the symbol ref
2970
     and compare the string's addresses to see if two symbols are
2971
     identical.  */
2972
  /* ??? At least I think that's why we do this.  */
2973
  idp = get_identifier (newname);
2974
 
2975
  newrtl = gen_rtx_MEM (Pmode,
2976
                    gen_rtx_SYMBOL_REF (Pmode,
2977
                             IDENTIFIER_POINTER (idp)));
2978
  XEXP (DECL_RTL (decl), 0) = newrtl;
2979
}
2980
 
2981
static int
2982
mcore_dllexport_p (tree decl)
2983
{
2984
  if (   TREE_CODE (decl) != VAR_DECL
2985
      && TREE_CODE (decl) != FUNCTION_DECL)
2986
    return 0;
2987
 
2988
  return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl)) != 0;
2989
}
2990
 
2991
static int
2992
mcore_dllimport_p (tree decl)
2993
{
2994
  if (   TREE_CODE (decl) != VAR_DECL
2995
      && TREE_CODE (decl) != FUNCTION_DECL)
2996
    return 0;
2997
 
2998
  return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl)) != 0;
2999
}
3000
 
3001
/* We must mark dll symbols specially.  Definitions of dllexport'd objects
3002
   install some info in the .drective (PE) or .exports (ELF) sections.  */
3003
 
3004
static void
3005
mcore_encode_section_info (tree decl, rtx rtl ATTRIBUTE_UNUSED, int first ATTRIBUTE_UNUSED)
3006
{
3007
  /* Mark the decl so we can tell from the rtl whether the object is
3008
     dllexport'd or dllimport'd.  */
3009
  if (mcore_dllexport_p (decl))
3010
    mcore_mark_dllexport (decl);
3011
  else if (mcore_dllimport_p (decl))
3012
    mcore_mark_dllimport (decl);
3013
 
3014
  /* It might be that DECL has already been marked as dllimport, but
3015
     a subsequent definition nullified that.  The attribute is gone
3016
     but DECL_RTL still has @i.__imp_foo.  We need to remove that.  */
3017
  else if ((TREE_CODE (decl) == FUNCTION_DECL
3018
            || TREE_CODE (decl) == VAR_DECL)
3019
           && DECL_RTL (decl) != NULL_RTX
3020
           && GET_CODE (DECL_RTL (decl)) == MEM
3021
           && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
3022
           && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
3023
           && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
3024
    {
3025
      const char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
3026
      tree idp = get_identifier (oldname + 9);
3027
      rtx newrtl = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (idp));
3028
 
3029
      XEXP (DECL_RTL (decl), 0) = newrtl;
3030
 
3031
      /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
3032
         ??? We leave these alone for now.  */
3033
    }
3034
}
3035
 
3036
/* Undo the effects of the above.  */
3037
 
3038
static const char *
3039
mcore_strip_name_encoding (const char * str)
3040
{
3041
  return str + (str[0] == '@' ? 3 : 0);
3042
}
3043
 
3044
/* MCore specific attribute support.
3045
   dllexport - for exporting a function/variable that will live in a dll
3046
   dllimport - for importing a function/variable from a dll
3047
   naked     - do not create a function prologue/epilogue.  */
3048
 
3049
/* Handle a "naked" attribute; arguments as in
3050
   struct attribute_spec.handler.  */
3051
 
3052
static tree
3053
mcore_handle_naked_attribute (tree * node, tree name, tree args ATTRIBUTE_UNUSED,
3054
                              int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
3055
{
3056
  if (TREE_CODE (*node) == FUNCTION_DECL)
3057
    {
3058
      /* PR14310 - don't complain about lack of return statement
3059
         in naked functions.  The solution here is a gross hack
3060
         but this is the only way to solve the problem without
3061
         adding a new feature to GCC.  I did try submitting a patch
3062
         that would add such a new feature, but it was (rightfully)
3063
         rejected on the grounds that it was creeping featurism,
3064
         so hence this code.  */
3065
      if (warn_return_type)
3066
        {
3067
          saved_warn_return_type = warn_return_type;
3068
          warn_return_type = 0;
3069
          saved_warn_return_type_count = 2;
3070
        }
3071
      else if (saved_warn_return_type_count)
3072
        saved_warn_return_type_count = 2;
3073
    }
3074
  else
3075
    {
3076
      warning (OPT_Wattributes, "%qE attribute only applies to functions",
3077
               name);
3078
      *no_add_attrs = true;
3079
    }
3080
 
3081
  return NULL_TREE;
3082
}
3083
 
3084
/* ??? It looks like this is PE specific?  Oh well, this is what the
3085
   old code did as well.  */
3086
 
3087
static void
3088
mcore_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
3089
{
3090
  int len;
3091
  const char * name;
3092
  char * string;
3093
  const char * prefix;
3094
 
3095
  name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
3096
 
3097
  /* Strip off any encoding in name.  */
3098
  name = (* targetm.strip_name_encoding) (name);
3099
 
3100
  /* The object is put in, for example, section .text$foo.
3101
     The linker will then ultimately place them in .text
3102
     (everything from the $ on is stripped).  */
3103
  if (TREE_CODE (decl) == FUNCTION_DECL)
3104
    prefix = ".text$";
3105
  /* For compatibility with EPOC, we ignore the fact that the
3106
     section might have relocs against it.  */
3107
  else if (decl_readonly_section (decl, 0))
3108
    prefix = ".rdata$";
3109
  else
3110
    prefix = ".data$";
3111
 
3112
  len = strlen (name) + strlen (prefix);
3113
  string = XALLOCAVEC (char, len + 1);
3114
 
3115
  sprintf (string, "%s%s", prefix, name);
3116
 
3117
  DECL_SECTION_NAME (decl) = build_string (len, string);
3118
}
3119
 
3120
int
3121
mcore_naked_function_p (void)
3122
{
3123
  return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl)) != NULL_TREE;
3124
}
3125
 
3126
#ifdef OBJECT_FORMAT_ELF
3127
static void
3128
mcore_asm_named_section (const char *name,
3129
                         unsigned int flags ATTRIBUTE_UNUSED,
3130
                         tree decl ATTRIBUTE_UNUSED)
3131
{
3132
  fprintf (asm_out_file, "\t.section %s\n", name);
3133
}
3134
#endif /* OBJECT_FORMAT_ELF */
3135
 
3136
/* Worker function for TARGET_ASM_EXTERNAL_LIBCALL.  */
3137
 
3138
static void
3139
mcore_external_libcall (rtx fun)
3140
{
3141
  fprintf (asm_out_file, "\t.import\t");
3142
  assemble_name (asm_out_file, XSTR (fun, 0));
3143
  fprintf (asm_out_file, "\n");
3144
}
3145
 
3146
/* Worker function for TARGET_RETURN_IN_MEMORY.  */
3147
 
3148
static bool
3149
mcore_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
3150
{
3151
  const HOST_WIDE_INT size = int_size_in_bytes (type);
3152
  return (size == -1 || size > 2 * UNITS_PER_WORD);
3153
}
3154
 
3155
/* Worker function for TARGET_ASM_TRAMPOLINE_TEMPLATE.
3156
   Output assembler code for a block containing the constant parts
3157
   of a trampoline, leaving space for the variable parts.
3158
 
3159
   On the MCore, the trampoline looks like:
3160
        lrw     r1,  function
3161
        lrw     r13, area
3162
        jmp     r13
3163
        or      r0, r0
3164
    .literals                                                */
3165
 
3166
static void
3167
mcore_asm_trampoline_template (FILE *f)
3168
{
3169
  fprintf (f, "\t.short 0x7102\n");
3170
  fprintf (f, "\t.short 0x7d02\n");
3171
  fprintf (f, "\t.short 0x00cd\n");
3172
  fprintf (f, "\t.short 0x1e00\n");
3173
  fprintf (f, "\t.long  0\n");
3174
  fprintf (f, "\t.long  0\n");
3175
}
3176
 
3177
/* Worker function for TARGET_TRAMPOLINE_INIT.  */
3178
 
3179
static void
3180
mcore_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3181
{
3182
  rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
3183
  rtx mem;
3184
 
3185
  emit_block_move (m_tramp, assemble_trampoline_template (),
3186
                   GEN_INT (2*UNITS_PER_WORD), BLOCK_OP_NORMAL);
3187
 
3188
  mem = adjust_address (m_tramp, SImode, 8);
3189
  emit_move_insn (mem, chain_value);
3190
  mem = adjust_address (m_tramp, SImode, 12);
3191
  emit_move_insn (mem, fnaddr);
3192
}
3193
 
3194
/* Implement TARGET_LEGITIMATE_CONSTANT_P
3195
 
3196
   On the MCore, allow anything but a double.  */
3197
 
3198
static bool
3199
mcore_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3200
{
3201
  return GET_CODE (x) != CONST_DOUBLE;
3202
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.