OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [config/] [mips/] [mips.c] - Blame information for rev 801

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 709 jeremybenn
/* Subroutines used for MIPS code generation.
2
   Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4
   2011, 2012
5
   Free Software Foundation, Inc.
6
   Contributed by A. Lichnewsky, lich@inria.inria.fr.
7
   Changes by Michael Meissner, meissner@osf.org.
8
   64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
9
   Brendan Eich, brendan@microunity.com.
10
 
11
This file is part of GCC.
12
 
13
GCC is free software; you can redistribute it and/or modify
14
it under the terms of the GNU General Public License as published by
15
the Free Software Foundation; either version 3, or (at your option)
16
any later version.
17
 
18
GCC is distributed in the hope that it will be useful,
19
but WITHOUT ANY WARRANTY; without even the implied warranty of
20
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
GNU General Public License for more details.
22
 
23
You should have received a copy of the GNU General Public License
24
along with GCC; see the file COPYING3.  If not see
25
<http://www.gnu.org/licenses/>.  */
26
 
27
#include "config.h"
28
#include "system.h"
29
#include "coretypes.h"
30
#include "tm.h"
31
#include "rtl.h"
32
#include "regs.h"
33
#include "hard-reg-set.h"
34
#include "insn-config.h"
35
#include "conditions.h"
36
#include "insn-attr.h"
37
#include "recog.h"
38
#include "output.h"
39
#include "tree.h"
40
#include "function.h"
41
#include "expr.h"
42
#include "optabs.h"
43
#include "libfuncs.h"
44
#include "flags.h"
45
#include "reload.h"
46
#include "tm_p.h"
47
#include "ggc.h"
48
#include "gstab.h"
49
#include "hashtab.h"
50
#include "debug.h"
51
#include "target.h"
52
#include "target-def.h"
53
#include "integrate.h"
54
#include "langhooks.h"
55
#include "cfglayout.h"
56
#include "sched-int.h"
57
#include "gimple.h"
58
#include "bitmap.h"
59
#include "diagnostic.h"
60
#include "target-globals.h"
61
#include "opts.h"
62
 
63
/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF.  */
64
#define UNSPEC_ADDRESS_P(X)                                     \
65
  (GET_CODE (X) == UNSPEC                                       \
66
   && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST                       \
67
   && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
68
 
69
/* Extract the symbol or label from UNSPEC wrapper X.  */
70
#define UNSPEC_ADDRESS(X) \
71
  XVECEXP (X, 0, 0)
72
 
73
/* Extract the symbol type from UNSPEC wrapper X.  */
74
#define UNSPEC_ADDRESS_TYPE(X) \
75
  ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
76
 
77
/* The maximum distance between the top of the stack frame and the
78
   value $sp has when we save and restore registers.
79
 
80
   The value for normal-mode code must be a SMALL_OPERAND and must
81
   preserve the maximum stack alignment.  We therefore use a value
82
   of 0x7ff0 in this case.
83
 
84
   MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
85
   up to 0x7f8 bytes and can usually save or restore all the registers
86
   that we need to save or restore.  (Note that we can only use these
87
   instructions for o32, for which the stack alignment is 8 bytes.)
88
 
89
   We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
90
   RESTORE are not available.  We can then use unextended instructions
91
   to save and restore registers, and to allocate and deallocate the top
92
   part of the frame.  */
93
#define MIPS_MAX_FIRST_STACK_STEP                                       \
94
  (!TARGET_MIPS16 ? 0x7ff0                                              \
95
   : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8                              \
96
   : TARGET_64BIT ? 0x100 : 0x400)
97
 
98
/* True if INSN is a mips.md pattern or asm statement.  */
99
#define USEFUL_INSN_P(INSN)                                             \
100
  (NONDEBUG_INSN_P (INSN)                                               \
101
   && GET_CODE (PATTERN (INSN)) != USE                                  \
102
   && GET_CODE (PATTERN (INSN)) != CLOBBER                              \
103
   && GET_CODE (PATTERN (INSN)) != ADDR_VEC                             \
104
   && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
105
 
106
/* If INSN is a delayed branch sequence, return the first instruction
107
   in the sequence, otherwise return INSN itself.  */
108
#define SEQ_BEGIN(INSN)                                                 \
109
  (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE               \
110
   ? XVECEXP (PATTERN (INSN), 0, 0)                                       \
111
   : (INSN))
112
 
113
/* Likewise for the last instruction in a delayed branch sequence.  */
114
#define SEQ_END(INSN)                                                   \
115
  (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE               \
116
   ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
117
   : (INSN))
118
 
119
/* Execute the following loop body with SUBINSN set to each instruction
120
   between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive.  */
121
#define FOR_EACH_SUBINSN(SUBINSN, INSN)                                 \
122
  for ((SUBINSN) = SEQ_BEGIN (INSN);                                    \
123
       (SUBINSN) != NEXT_INSN (SEQ_END (INSN));                         \
124
       (SUBINSN) = NEXT_INSN (SUBINSN))
125
 
126
/* True if bit BIT is set in VALUE.  */
127
#define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
128
 
129
/* Return the opcode for a ptr_mode load of the form:
130
 
131
       l[wd]    DEST, OFFSET(BASE).  */
132
#define MIPS_LOAD_PTR(DEST, OFFSET, BASE)       \
133
  (((ptr_mode == DImode ? 0x37 : 0x23) << 26)   \
134
   | ((BASE) << 21)                             \
135
   | ((DEST) << 16)                             \
136
   | (OFFSET))
137
 
138
/* Return the opcode to move register SRC into register DEST.  */
139
#define MIPS_MOVE(DEST, SRC)            \
140
  ((TARGET_64BIT ? 0x2d : 0x21)         \
141
   | ((DEST) << 11)                     \
142
   | ((SRC) << 21))
143
 
144
/* Return the opcode for:
145
 
146
       lui      DEST, VALUE.  */
147
#define MIPS_LUI(DEST, VALUE) \
148
  ((0xf << 26) | ((DEST) << 16) | (VALUE))
149
 
150
/* Return the opcode to jump to register DEST.  */
151
#define MIPS_JR(DEST) \
152
  (((DEST) << 21) | 0x8)
153
 
154
/* Return the opcode for:
155
 
156
       bal     . + (1 + OFFSET) * 4.  */
157
#define MIPS_BAL(OFFSET) \
158
  ((0x1 << 26) | (0x11 << 16) | (OFFSET))
159
 
160
/* Return the usual opcode for a nop.  */
161
#define MIPS_NOP 0
162
 
163
/* Classifies an address.
164
 
165
   ADDRESS_REG
166
       A natural register + offset address.  The register satisfies
167
       mips_valid_base_register_p and the offset is a const_arith_operand.
168
 
169
   ADDRESS_LO_SUM
170
       A LO_SUM rtx.  The first operand is a valid base register and
171
       the second operand is a symbolic address.
172
 
173
   ADDRESS_CONST_INT
174
       A signed 16-bit constant address.
175
 
176
   ADDRESS_SYMBOLIC:
177
       A constant symbolic address.  */
178
enum mips_address_type {
179
  ADDRESS_REG,
180
  ADDRESS_LO_SUM,
181
  ADDRESS_CONST_INT,
182
  ADDRESS_SYMBOLIC
183
};
184
 
185
/* Macros to create an enumeration identifier for a function prototype.  */
186
#define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
187
#define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
188
#define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
189
#define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
190
 
191
/* Classifies the prototype of a built-in function.  */
192
enum mips_function_type {
193
#define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
194
#include "config/mips/mips-ftypes.def"
195
#undef DEF_MIPS_FTYPE
196
  MIPS_MAX_FTYPE_MAX
197
};
198
 
199
/* Specifies how a built-in function should be converted into rtl.  */
200
enum mips_builtin_type {
201
  /* The function corresponds directly to an .md pattern.  The return
202
     value is mapped to operand 0 and the arguments are mapped to
203
     operands 1 and above.  */
204
  MIPS_BUILTIN_DIRECT,
205
 
206
  /* The function corresponds directly to an .md pattern.  There is no return
207
     value and the arguments are mapped to operands 0 and above.  */
208
  MIPS_BUILTIN_DIRECT_NO_TARGET,
209
 
210
  /* The function corresponds to a comparison instruction followed by
211
     a mips_cond_move_tf_ps pattern.  The first two arguments are the
212
     values to compare and the second two arguments are the vector
213
     operands for the movt.ps or movf.ps instruction (in assembly order).  */
214
  MIPS_BUILTIN_MOVF,
215
  MIPS_BUILTIN_MOVT,
216
 
217
  /* The function corresponds to a V2SF comparison instruction.  Operand 0
218
     of this instruction is the result of the comparison, which has mode
219
     CCV2 or CCV4.  The function arguments are mapped to operands 1 and
220
     above.  The function's return value is an SImode boolean that is
221
     true under the following conditions:
222
 
223
     MIPS_BUILTIN_CMP_ANY: one of the registers is true
224
     MIPS_BUILTIN_CMP_ALL: all of the registers are true
225
     MIPS_BUILTIN_CMP_LOWER: the first register is true
226
     MIPS_BUILTIN_CMP_UPPER: the second register is true.  */
227
  MIPS_BUILTIN_CMP_ANY,
228
  MIPS_BUILTIN_CMP_ALL,
229
  MIPS_BUILTIN_CMP_UPPER,
230
  MIPS_BUILTIN_CMP_LOWER,
231
 
232
  /* As above, but the instruction only sets a single $fcc register.  */
233
  MIPS_BUILTIN_CMP_SINGLE,
234
 
235
  /* For generating bposge32 branch instructions in MIPS32 DSP ASE.  */
236
  MIPS_BUILTIN_BPOSGE32
237
};
238
 
239
/* Invoke MACRO (COND) for each C.cond.fmt condition.  */
240
#define MIPS_FP_CONDITIONS(MACRO) \
241
  MACRO (f),    \
242
  MACRO (un),   \
243
  MACRO (eq),   \
244
  MACRO (ueq),  \
245
  MACRO (olt),  \
246
  MACRO (ult),  \
247
  MACRO (ole),  \
248
  MACRO (ule),  \
249
  MACRO (sf),   \
250
  MACRO (ngle), \
251
  MACRO (seq),  \
252
  MACRO (ngl),  \
253
  MACRO (lt),   \
254
  MACRO (nge),  \
255
  MACRO (le),   \
256
  MACRO (ngt)
257
 
258
/* Enumerates the codes above as MIPS_FP_COND_<X>.  */
259
#define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
260
enum mips_fp_condition {
261
  MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
262
};
263
 
264
/* Index X provides the string representation of MIPS_FP_COND_<X>.  */
265
#define STRINGIFY(X) #X
266
static const char *const mips_fp_conditions[] = {
267
  MIPS_FP_CONDITIONS (STRINGIFY)
268
};
269
 
270
/* Information about a function's frame layout.  */
271
struct GTY(())  mips_frame_info {
272
  /* The size of the frame in bytes.  */
273
  HOST_WIDE_INT total_size;
274
 
275
  /* The number of bytes allocated to variables.  */
276
  HOST_WIDE_INT var_size;
277
 
278
  /* The number of bytes allocated to outgoing function arguments.  */
279
  HOST_WIDE_INT args_size;
280
 
281
  /* The number of bytes allocated to the .cprestore slot, or 0 if there
282
     is no such slot.  */
283
  HOST_WIDE_INT cprestore_size;
284
 
285
  /* Bit X is set if the function saves or restores GPR X.  */
286
  unsigned int mask;
287
 
288
  /* Likewise FPR X.  */
289
  unsigned int fmask;
290
 
291
  /* Likewise doubleword accumulator X ($acX).  */
292
  unsigned int acc_mask;
293
 
294
  /* The number of GPRs, FPRs, doubleword accumulators and COP0
295
     registers saved.  */
296
  unsigned int num_gp;
297
  unsigned int num_fp;
298
  unsigned int num_acc;
299
  unsigned int num_cop0_regs;
300
 
301
  /* The offset of the topmost GPR, FPR, accumulator and COP0-register
302
     save slots from the top of the frame, or zero if no such slots are
303
     needed.  */
304
  HOST_WIDE_INT gp_save_offset;
305
  HOST_WIDE_INT fp_save_offset;
306
  HOST_WIDE_INT acc_save_offset;
307
  HOST_WIDE_INT cop0_save_offset;
308
 
309
  /* Likewise, but giving offsets from the bottom of the frame.  */
310
  HOST_WIDE_INT gp_sp_offset;
311
  HOST_WIDE_INT fp_sp_offset;
312
  HOST_WIDE_INT acc_sp_offset;
313
  HOST_WIDE_INT cop0_sp_offset;
314
 
315
  /* Similar, but the value passed to _mcount.  */
316
  HOST_WIDE_INT ra_fp_offset;
317
 
318
  /* The offset of arg_pointer_rtx from the bottom of the frame.  */
319
  HOST_WIDE_INT arg_pointer_offset;
320
 
321
  /* The offset of hard_frame_pointer_rtx from the bottom of the frame.  */
322
  HOST_WIDE_INT hard_frame_pointer_offset;
323
};
324
 
325
struct GTY(())  machine_function {
326
  /* The register returned by mips16_gp_pseudo_reg; see there for details.  */
327
  rtx mips16_gp_pseudo_rtx;
328
 
329
  /* The number of extra stack bytes taken up by register varargs.
330
     This area is allocated by the callee at the very top of the frame.  */
331
  int varargs_size;
332
 
333
  /* The current frame information, calculated by mips_compute_frame_info.  */
334
  struct mips_frame_info frame;
335
 
336
  /* The register to use as the function's global pointer, or INVALID_REGNUM
337
     if the function doesn't need one.  */
338
  unsigned int global_pointer;
339
 
340
  /* How many instructions it takes to load a label into $AT, or 0 if
341
     this property hasn't yet been calculated.  */
342
  unsigned int load_label_num_insns;
343
 
344
  /* True if mips_adjust_insn_length should ignore an instruction's
345
     hazard attribute.  */
346
  bool ignore_hazard_length_p;
347
 
348
  /* True if the whole function is suitable for .set noreorder and
349
     .set nomacro.  */
350
  bool all_noreorder_p;
351
 
352
  /* True if the function has "inflexible" and "flexible" references
353
     to the global pointer.  See mips_cfun_has_inflexible_gp_ref_p
354
     and mips_cfun_has_flexible_gp_ref_p for details.  */
355
  bool has_inflexible_gp_insn_p;
356
  bool has_flexible_gp_insn_p;
357
 
358
  /* True if the function's prologue must load the global pointer
359
     value into pic_offset_table_rtx and store the same value in
360
     the function's cprestore slot (if any).  Even if this value
361
     is currently false, we may decide to set it to true later;
362
     see mips_must_initialize_gp_p () for details.  */
363
  bool must_initialize_gp_p;
364
 
365
  /* True if the current function must restore $gp after any potential
366
     clobber.  This value is only meaningful during the first post-epilogue
367
     split_insns pass; see mips_must_initialize_gp_p () for details.  */
368
  bool must_restore_gp_when_clobbered_p;
369
 
370
  /* True if this is an interrupt handler.  */
371
  bool interrupt_handler_p;
372
 
373
  /* True if this is an interrupt handler that uses shadow registers.  */
374
  bool use_shadow_register_set_p;
375
 
376
  /* True if this is an interrupt handler that should keep interrupts
377
     masked.  */
378
  bool keep_interrupts_masked_p;
379
 
380
  /* True if this is an interrupt handler that should use DERET
381
     instead of ERET.  */
382
  bool use_debug_exception_return_p;
383
};
384
 
385
/* Information about a single argument.  */
386
struct mips_arg_info {
387
  /* True if the argument is passed in a floating-point register, or
388
     would have been if we hadn't run out of registers.  */
389
  bool fpr_p;
390
 
391
  /* The number of words passed in registers, rounded up.  */
392
  unsigned int reg_words;
393
 
394
  /* For EABI, the offset of the first register from GP_ARG_FIRST or
395
     FP_ARG_FIRST.  For other ABIs, the offset of the first register from
396
     the start of the ABI's argument structure (see the CUMULATIVE_ARGS
397
     comment for details).
398
 
399
     The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
400
     on the stack.  */
401
  unsigned int reg_offset;
402
 
403
  /* The number of words that must be passed on the stack, rounded up.  */
404
  unsigned int stack_words;
405
 
406
  /* The offset from the start of the stack overflow area of the argument's
407
     first stack word.  Only meaningful when STACK_WORDS is nonzero.  */
408
  unsigned int stack_offset;
409
};
410
 
411
/* Information about an address described by mips_address_type.
412
 
413
   ADDRESS_CONST_INT
414
       No fields are used.
415
 
416
   ADDRESS_REG
417
       REG is the base register and OFFSET is the constant offset.
418
 
419
   ADDRESS_LO_SUM
420
       REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
421
       is the type of symbol it references.
422
 
423
   ADDRESS_SYMBOLIC
424
       SYMBOL_TYPE is the type of symbol that the address references.  */
425
struct mips_address_info {
426
  enum mips_address_type type;
427
  rtx reg;
428
  rtx offset;
429
  enum mips_symbol_type symbol_type;
430
};
431
 
432
/* One stage in a constant building sequence.  These sequences have
433
   the form:
434
 
435
        A = VALUE[0]
436
        A = A CODE[1] VALUE[1]
437
        A = A CODE[2] VALUE[2]
438
        ...
439
 
440
   where A is an accumulator, each CODE[i] is a binary rtl operation
441
   and each VALUE[i] is a constant integer.  CODE[0] is undefined.  */
442
struct mips_integer_op {
443
  enum rtx_code code;
444
  unsigned HOST_WIDE_INT value;
445
};
446
 
447
/* The largest number of operations needed to load an integer constant.
448
   The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
449
   When the lowest bit is clear, we can try, but reject a sequence with
450
   an extra SLL at the end.  */
451
#define MIPS_MAX_INTEGER_OPS 7
452
 
453
/* Information about a MIPS16e SAVE or RESTORE instruction.  */
454
struct mips16e_save_restore_info {
455
  /* The number of argument registers saved by a SAVE instruction.
456
 
457
  unsigned int nargs;
458
 
459
  /* Bit X is set if the instruction saves or restores GPR X.  */
460
  unsigned int mask;
461
 
462
  /* The total number of bytes to allocate.  */
463
  HOST_WIDE_INT size;
464
};
465
 
466
/* Costs of various operations on the different architectures.  */
467
 
468
struct mips_rtx_cost_data
469
{
470
  unsigned short fp_add;
471
  unsigned short fp_mult_sf;
472
  unsigned short fp_mult_df;
473
  unsigned short fp_div_sf;
474
  unsigned short fp_div_df;
475
  unsigned short int_mult_si;
476
  unsigned short int_mult_di;
477
  unsigned short int_div_si;
478
  unsigned short int_div_di;
479
  unsigned short branch_cost;
480
  unsigned short memory_latency;
481
};
482
 
483
/* Global variables for machine-dependent things.  */
484
 
485
/* The -G setting, or the configuration's default small-data limit if
486
   no -G option is given.  */
487
static unsigned int mips_small_data_threshold;
488
 
489
/* The number of file directives written by mips_output_filename.  */
490
int num_source_filenames;
491
 
492
/* The name that appeared in the last .file directive written by
493
   mips_output_filename, or "" if mips_output_filename hasn't
494
   written anything yet.  */
495
const char *current_function_file = "";
496
 
497
/* A label counter used by PUT_SDB_BLOCK_START and PUT_SDB_BLOCK_END.  */
498
int sdb_label_count;
499
 
500
/* Arrays that map GCC register numbers to debugger register numbers.  */
501
int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
502
int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
503
 
504
/* Information about the current function's epilogue, used only while
505
   expanding it.  */
506
static struct {
507
  /* A list of queued REG_CFA_RESTORE notes.  */
508
  rtx cfa_restores;
509
 
510
  /* The CFA is currently defined as CFA_REG + CFA_OFFSET.  */
511
  rtx cfa_reg;
512
  HOST_WIDE_INT cfa_offset;
513
 
514
  /* The offset of the CFA from the stack pointer while restoring
515
     registers.  */
516
  HOST_WIDE_INT cfa_restore_sp_offset;
517
} mips_epilogue;
518
 
519
/* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs.  */
520
struct mips_asm_switch mips_noreorder = { "reorder", 0 };
521
struct mips_asm_switch mips_nomacro = { "macro", 0 };
522
struct mips_asm_switch mips_noat = { "at", 0 };
523
 
524
/* True if we're writing out a branch-likely instruction rather than a
525
   normal branch.  */
526
static bool mips_branch_likely;
527
 
528
/* The current instruction-set architecture.  */
529
enum processor mips_arch;
530
const struct mips_cpu_info *mips_arch_info;
531
 
532
/* The processor that we should tune the code for.  */
533
enum processor mips_tune;
534
const struct mips_cpu_info *mips_tune_info;
535
 
536
/* The ISA level associated with mips_arch.  */
537
int mips_isa;
538
 
539
/* The architecture selected by -mipsN, or null if -mipsN wasn't used.  */
540
static const struct mips_cpu_info *mips_isa_option_info;
541
 
542
/* Which cost information to use.  */
543
static const struct mips_rtx_cost_data *mips_cost;
544
 
545
/* The ambient target flags, excluding MASK_MIPS16.  */
546
static int mips_base_target_flags;
547
 
548
/* True if MIPS16 is the default mode.  */
549
bool mips_base_mips16;
550
 
551
/* The ambient values of other global variables.  */
552
static int mips_base_schedule_insns; /* flag_schedule_insns */
553
static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
554
static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
555
static int mips_base_align_loops; /* align_loops */
556
static int mips_base_align_jumps; /* align_jumps */
557
static int mips_base_align_functions; /* align_functions */
558
 
559
/* Index [M][R] is true if register R is allowed to hold a value of mode M.  */
560
bool mips_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
561
 
562
/* Index C is true if character C is a valid PRINT_OPERAND punctation
563
   character.  */
564
static bool mips_print_operand_punct[256];
565
 
566
static GTY (()) int mips_output_filename_first_time = 1;
567
 
568
/* mips_split_p[X] is true if symbols of type X can be split by
569
   mips_split_symbol.  */
570
bool mips_split_p[NUM_SYMBOL_TYPES];
571
 
572
/* mips_split_hi_p[X] is true if the high parts of symbols of type X
573
   can be split by mips_split_symbol.  */
574
bool mips_split_hi_p[NUM_SYMBOL_TYPES];
575
 
576
/* mips_use_pcrel_pool_p[X] is true if symbols of type X should be
577
   forced into a PC-relative constant pool.  */
578
bool mips_use_pcrel_pool_p[NUM_SYMBOL_TYPES];
579
 
580
/* mips_lo_relocs[X] is the relocation to use when a symbol of type X
581
   appears in a LO_SUM.  It can be null if such LO_SUMs aren't valid or
582
   if they are matched by a special .md file pattern.  */
583
const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
584
 
585
/* Likewise for HIGHs.  */
586
const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
587
 
588
/* Target state for MIPS16.  */
589
struct target_globals *mips16_globals;
590
 
591
/* Cached value of can_issue_more. This is cached in mips_variable_issue hook
592
   and returned from mips_sched_reorder2.  */
593
static int cached_can_issue_more;
594
 
595
/* True if the output uses __mips16_rdhwr.  */
596
static bool mips_need_mips16_rdhwr_p;
597
 
598
/* Index R is the smallest register class that contains register R.  */
599
const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = {
600
  LEA_REGS,     LEA_REGS,       M16_REGS,       V1_REG,
601
  M16_REGS,     M16_REGS,       M16_REGS,       M16_REGS,
602
  LEA_REGS,     LEA_REGS,       LEA_REGS,       LEA_REGS,
603
  LEA_REGS,     LEA_REGS,       LEA_REGS,       LEA_REGS,
604
  M16_REGS,     M16_REGS,       LEA_REGS,       LEA_REGS,
605
  LEA_REGS,     LEA_REGS,       LEA_REGS,       LEA_REGS,
606
  T_REG,        PIC_FN_ADDR_REG, LEA_REGS,      LEA_REGS,
607
  LEA_REGS,     LEA_REGS,       LEA_REGS,       LEA_REGS,
608
  FP_REGS,      FP_REGS,        FP_REGS,        FP_REGS,
609
  FP_REGS,      FP_REGS,        FP_REGS,        FP_REGS,
610
  FP_REGS,      FP_REGS,        FP_REGS,        FP_REGS,
611
  FP_REGS,      FP_REGS,        FP_REGS,        FP_REGS,
612
  FP_REGS,      FP_REGS,        FP_REGS,        FP_REGS,
613
  FP_REGS,      FP_REGS,        FP_REGS,        FP_REGS,
614
  FP_REGS,      FP_REGS,        FP_REGS,        FP_REGS,
615
  FP_REGS,      FP_REGS,        FP_REGS,        FP_REGS,
616
  MD0_REG,      MD1_REG,        NO_REGS,        ST_REGS,
617
  ST_REGS,      ST_REGS,        ST_REGS,        ST_REGS,
618
  ST_REGS,      ST_REGS,        ST_REGS,        NO_REGS,
619
  NO_REGS,      FRAME_REGS,     FRAME_REGS,     NO_REGS,
620
  COP0_REGS,    COP0_REGS,      COP0_REGS,      COP0_REGS,
621
  COP0_REGS,    COP0_REGS,      COP0_REGS,      COP0_REGS,
622
  COP0_REGS,    COP0_REGS,      COP0_REGS,      COP0_REGS,
623
  COP0_REGS,    COP0_REGS,      COP0_REGS,      COP0_REGS,
624
  COP0_REGS,    COP0_REGS,      COP0_REGS,      COP0_REGS,
625
  COP0_REGS,    COP0_REGS,      COP0_REGS,      COP0_REGS,
626
  COP0_REGS,    COP0_REGS,      COP0_REGS,      COP0_REGS,
627
  COP0_REGS,    COP0_REGS,      COP0_REGS,      COP0_REGS,
628
  COP2_REGS,    COP2_REGS,      COP2_REGS,      COP2_REGS,
629
  COP2_REGS,    COP2_REGS,      COP2_REGS,      COP2_REGS,
630
  COP2_REGS,    COP2_REGS,      COP2_REGS,      COP2_REGS,
631
  COP2_REGS,    COP2_REGS,      COP2_REGS,      COP2_REGS,
632
  COP2_REGS,    COP2_REGS,      COP2_REGS,      COP2_REGS,
633
  COP2_REGS,    COP2_REGS,      COP2_REGS,      COP2_REGS,
634
  COP2_REGS,    COP2_REGS,      COP2_REGS,      COP2_REGS,
635
  COP2_REGS,    COP2_REGS,      COP2_REGS,      COP2_REGS,
636
  COP3_REGS,    COP3_REGS,      COP3_REGS,      COP3_REGS,
637
  COP3_REGS,    COP3_REGS,      COP3_REGS,      COP3_REGS,
638
  COP3_REGS,    COP3_REGS,      COP3_REGS,      COP3_REGS,
639
  COP3_REGS,    COP3_REGS,      COP3_REGS,      COP3_REGS,
640
  COP3_REGS,    COP3_REGS,      COP3_REGS,      COP3_REGS,
641
  COP3_REGS,    COP3_REGS,      COP3_REGS,      COP3_REGS,
642
  COP3_REGS,    COP3_REGS,      COP3_REGS,      COP3_REGS,
643
  COP3_REGS,    COP3_REGS,      COP3_REGS,      COP3_REGS,
644
  DSP_ACC_REGS, DSP_ACC_REGS,   DSP_ACC_REGS,   DSP_ACC_REGS,
645
  DSP_ACC_REGS, DSP_ACC_REGS,   ALL_REGS,       ALL_REGS,
646
  ALL_REGS,     ALL_REGS,       ALL_REGS,       ALL_REGS
647
};
648
 
649
/* The value of TARGET_ATTRIBUTE_TABLE.  */
650
static const struct attribute_spec mips_attribute_table[] = {
651
  /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
652
       om_diagnostic } */
653
  { "long_call",   0, 0, false, true,  true,  NULL, false },
654
  { "far",         0, 0, false, true,  true,  NULL, false },
655
  { "near",        0, 0, false, true,  true,  NULL, false },
656
  /* We would really like to treat "mips16" and "nomips16" as type
657
     attributes, but GCC doesn't provide the hooks we need to support
658
     the right conversion rules.  As declaration attributes, they affect
659
     code generation but don't carry other semantics.  */
660
  { "mips16",      0, 0, true,  false, false, NULL, false },
661
  { "nomips16",    0, 0, true,  false, false, NULL, false },
662
  /* Allow functions to be specified as interrupt handlers */
663
  { "interrupt",   0, 0, false, true,  true, NULL, false },
664
  { "use_shadow_register_set",  0, 0, false, true,  true, NULL, false },
665
  { "keep_interrupts_masked",   0, 0, false, true,  true, NULL, false },
666
  { "use_debug_exception_return", 0, 0, false, true,  true, NULL, false },
667
  { NULL,          0, 0, false, false, false, NULL, false }
668
};
669
 
670
/* A table describing all the processors GCC knows about; see
671
   mips-cpus.def for details.  */
672
static const struct mips_cpu_info mips_cpu_info_table[] = {
673
#define MIPS_CPU(NAME, CPU, ISA, FLAGS) \
674
  { NAME, CPU, ISA, FLAGS },
675
#include "mips-cpus.def"
676
#undef MIPS_CPU
677
};
678
 
679
/* Default costs.  If these are used for a processor we should look
680
   up the actual costs.  */
681
#define DEFAULT_COSTS COSTS_N_INSNS (6),  /* fp_add */       \
682
                      COSTS_N_INSNS (7),  /* fp_mult_sf */   \
683
                      COSTS_N_INSNS (8),  /* fp_mult_df */   \
684
                      COSTS_N_INSNS (23), /* fp_div_sf */    \
685
                      COSTS_N_INSNS (36), /* fp_div_df */    \
686
                      COSTS_N_INSNS (10), /* int_mult_si */  \
687
                      COSTS_N_INSNS (10), /* int_mult_di */  \
688
                      COSTS_N_INSNS (69), /* int_div_si */   \
689
                      COSTS_N_INSNS (69), /* int_div_di */   \
690
                                       2, /* branch_cost */  \
691
                                       4  /* memory_latency */
692
 
693
/* Floating-point costs for processors without an FPU.  Just assume that
694
   all floating-point libcalls are very expensive.  */
695
#define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */       \
696
                      COSTS_N_INSNS (256), /* fp_mult_sf */   \
697
                      COSTS_N_INSNS (256), /* fp_mult_df */   \
698
                      COSTS_N_INSNS (256), /* fp_div_sf */    \
699
                      COSTS_N_INSNS (256)  /* fp_div_df */
700
 
701
/* Costs to use when optimizing for size.  */
702
static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = {
703
  COSTS_N_INSNS (1),            /* fp_add */
704
  COSTS_N_INSNS (1),            /* fp_mult_sf */
705
  COSTS_N_INSNS (1),            /* fp_mult_df */
706
  COSTS_N_INSNS (1),            /* fp_div_sf */
707
  COSTS_N_INSNS (1),            /* fp_div_df */
708
  COSTS_N_INSNS (1),            /* int_mult_si */
709
  COSTS_N_INSNS (1),            /* int_mult_di */
710
  COSTS_N_INSNS (1),            /* int_div_si */
711
  COSTS_N_INSNS (1),            /* int_div_di */
712
                   2,           /* branch_cost */
713
                   4            /* memory_latency */
714
};
715
 
716
/* Costs to use when optimizing for speed, indexed by processor.  */
717
static const struct mips_rtx_cost_data
718
  mips_rtx_cost_data[NUM_PROCESSOR_VALUES] = {
719
  { /* R3000 */
720
    COSTS_N_INSNS (2),            /* fp_add */
721
    COSTS_N_INSNS (4),            /* fp_mult_sf */
722
    COSTS_N_INSNS (5),            /* fp_mult_df */
723
    COSTS_N_INSNS (12),           /* fp_div_sf */
724
    COSTS_N_INSNS (19),           /* fp_div_df */
725
    COSTS_N_INSNS (12),           /* int_mult_si */
726
    COSTS_N_INSNS (12),           /* int_mult_di */
727
    COSTS_N_INSNS (35),           /* int_div_si */
728
    COSTS_N_INSNS (35),           /* int_div_di */
729
                     1,           /* branch_cost */
730
                     4            /* memory_latency */
731
  },
732
  { /* 4KC */
733
    SOFT_FP_COSTS,
734
    COSTS_N_INSNS (6),            /* int_mult_si */
735
    COSTS_N_INSNS (6),            /* int_mult_di */
736
    COSTS_N_INSNS (36),           /* int_div_si */
737
    COSTS_N_INSNS (36),           /* int_div_di */
738
                     1,           /* branch_cost */
739
                     4            /* memory_latency */
740
  },
741
  { /* 4KP */
742
    SOFT_FP_COSTS,
743
    COSTS_N_INSNS (36),           /* int_mult_si */
744
    COSTS_N_INSNS (36),           /* int_mult_di */
745
    COSTS_N_INSNS (37),           /* int_div_si */
746
    COSTS_N_INSNS (37),           /* int_div_di */
747
                     1,           /* branch_cost */
748
                     4            /* memory_latency */
749
  },
750
  { /* 5KC */
751
    SOFT_FP_COSTS,
752
    COSTS_N_INSNS (4),            /* int_mult_si */
753
    COSTS_N_INSNS (11),           /* int_mult_di */
754
    COSTS_N_INSNS (36),           /* int_div_si */
755
    COSTS_N_INSNS (68),           /* int_div_di */
756
                     1,           /* branch_cost */
757
                     4            /* memory_latency */
758
  },
759
  { /* 5KF */
760
    COSTS_N_INSNS (4),            /* fp_add */
761
    COSTS_N_INSNS (4),            /* fp_mult_sf */
762
    COSTS_N_INSNS (5),            /* fp_mult_df */
763
    COSTS_N_INSNS (17),           /* fp_div_sf */
764
    COSTS_N_INSNS (32),           /* fp_div_df */
765
    COSTS_N_INSNS (4),            /* int_mult_si */
766
    COSTS_N_INSNS (11),           /* int_mult_di */
767
    COSTS_N_INSNS (36),           /* int_div_si */
768
    COSTS_N_INSNS (68),           /* int_div_di */
769
                     1,           /* branch_cost */
770
                     4            /* memory_latency */
771
  },
772
  { /* 20KC */
773
    COSTS_N_INSNS (4),            /* fp_add */
774
    COSTS_N_INSNS (4),            /* fp_mult_sf */
775
    COSTS_N_INSNS (5),            /* fp_mult_df */
776
    COSTS_N_INSNS (17),           /* fp_div_sf */
777
    COSTS_N_INSNS (32),           /* fp_div_df */
778
    COSTS_N_INSNS (4),            /* int_mult_si */
779
    COSTS_N_INSNS (7),            /* int_mult_di */
780
    COSTS_N_INSNS (42),           /* int_div_si */
781
    COSTS_N_INSNS (72),           /* int_div_di */
782
                     1,           /* branch_cost */
783
                     4            /* memory_latency */
784
  },
785
  { /* 24KC */
786
    SOFT_FP_COSTS,
787
    COSTS_N_INSNS (5),            /* int_mult_si */
788
    COSTS_N_INSNS (5),            /* int_mult_di */
789
    COSTS_N_INSNS (41),           /* int_div_si */
790
    COSTS_N_INSNS (41),           /* int_div_di */
791
                     1,           /* branch_cost */
792
                     4            /* memory_latency */
793
  },
794
  { /* 24KF2_1 */
795
    COSTS_N_INSNS (8),            /* fp_add */
796
    COSTS_N_INSNS (8),            /* fp_mult_sf */
797
    COSTS_N_INSNS (10),           /* fp_mult_df */
798
    COSTS_N_INSNS (34),           /* fp_div_sf */
799
    COSTS_N_INSNS (64),           /* fp_div_df */
800
    COSTS_N_INSNS (5),            /* int_mult_si */
801
    COSTS_N_INSNS (5),            /* int_mult_di */
802
    COSTS_N_INSNS (41),           /* int_div_si */
803
    COSTS_N_INSNS (41),           /* int_div_di */
804
                     1,           /* branch_cost */
805
                     4            /* memory_latency */
806
  },
807
  { /* 24KF1_1 */
808
    COSTS_N_INSNS (4),            /* fp_add */
809
    COSTS_N_INSNS (4),            /* fp_mult_sf */
810
    COSTS_N_INSNS (5),            /* fp_mult_df */
811
    COSTS_N_INSNS (17),           /* fp_div_sf */
812
    COSTS_N_INSNS (32),           /* fp_div_df */
813
    COSTS_N_INSNS (5),            /* int_mult_si */
814
    COSTS_N_INSNS (5),            /* int_mult_di */
815
    COSTS_N_INSNS (41),           /* int_div_si */
816
    COSTS_N_INSNS (41),           /* int_div_di */
817
                     1,           /* branch_cost */
818
                     4            /* memory_latency */
819
  },
820
  { /* 74KC */
821
    SOFT_FP_COSTS,
822
    COSTS_N_INSNS (5),            /* int_mult_si */
823
    COSTS_N_INSNS (5),            /* int_mult_di */
824
    COSTS_N_INSNS (41),           /* int_div_si */
825
    COSTS_N_INSNS (41),           /* int_div_di */
826
                     1,           /* branch_cost */
827
                     4            /* memory_latency */
828
  },
829
  { /* 74KF2_1 */
830
    COSTS_N_INSNS (8),            /* fp_add */
831
    COSTS_N_INSNS (8),            /* fp_mult_sf */
832
    COSTS_N_INSNS (10),           /* fp_mult_df */
833
    COSTS_N_INSNS (34),           /* fp_div_sf */
834
    COSTS_N_INSNS (64),           /* fp_div_df */
835
    COSTS_N_INSNS (5),            /* int_mult_si */
836
    COSTS_N_INSNS (5),            /* int_mult_di */
837
    COSTS_N_INSNS (41),           /* int_div_si */
838
    COSTS_N_INSNS (41),           /* int_div_di */
839
                     1,           /* branch_cost */
840
                     4            /* memory_latency */
841
  },
842
  { /* 74KF1_1 */
843
    COSTS_N_INSNS (4),            /* fp_add */
844
    COSTS_N_INSNS (4),            /* fp_mult_sf */
845
    COSTS_N_INSNS (5),            /* fp_mult_df */
846
    COSTS_N_INSNS (17),           /* fp_div_sf */
847
    COSTS_N_INSNS (32),           /* fp_div_df */
848
    COSTS_N_INSNS (5),            /* int_mult_si */
849
    COSTS_N_INSNS (5),            /* int_mult_di */
850
    COSTS_N_INSNS (41),           /* int_div_si */
851
    COSTS_N_INSNS (41),           /* int_div_di */
852
                     1,           /* branch_cost */
853
                     4            /* memory_latency */
854
  },
855
  { /* 74KF3_2 */
856
    COSTS_N_INSNS (6),            /* fp_add */
857
    COSTS_N_INSNS (6),            /* fp_mult_sf */
858
    COSTS_N_INSNS (7),            /* fp_mult_df */
859
    COSTS_N_INSNS (25),           /* fp_div_sf */
860
    COSTS_N_INSNS (48),           /* fp_div_df */
861
    COSTS_N_INSNS (5),            /* int_mult_si */
862
    COSTS_N_INSNS (5),            /* int_mult_di */
863
    COSTS_N_INSNS (41),           /* int_div_si */
864
    COSTS_N_INSNS (41),           /* int_div_di */
865
                     1,           /* branch_cost */
866
                     4            /* memory_latency */
867
  },
868
  { /* Loongson-2E */
869
    DEFAULT_COSTS
870
  },
871
  { /* Loongson-2F */
872
    DEFAULT_COSTS
873
  },
874
  { /* Loongson-3A */
875
    DEFAULT_COSTS
876
  },
877
  { /* M4k */
878
    DEFAULT_COSTS
879
  },
880
    /* Octeon */
881
  {
882
    SOFT_FP_COSTS,
883
    COSTS_N_INSNS (5),            /* int_mult_si */
884
    COSTS_N_INSNS (5),            /* int_mult_di */
885
    COSTS_N_INSNS (72),           /* int_div_si */
886
    COSTS_N_INSNS (72),           /* int_div_di */
887
                     1,           /* branch_cost */
888
                     4            /* memory_latency */
889
  },
890
    /* Octeon II */
891
  {
892
    SOFT_FP_COSTS,
893
    COSTS_N_INSNS (6),            /* int_mult_si */
894
    COSTS_N_INSNS (6),            /* int_mult_di */
895
    COSTS_N_INSNS (18),           /* int_div_si */
896
    COSTS_N_INSNS (35),           /* int_div_di */
897
                     4,           /* branch_cost */
898
                     4            /* memory_latency */
899
  },
900
  { /* R3900 */
901
    COSTS_N_INSNS (2),            /* fp_add */
902
    COSTS_N_INSNS (4),            /* fp_mult_sf */
903
    COSTS_N_INSNS (5),            /* fp_mult_df */
904
    COSTS_N_INSNS (12),           /* fp_div_sf */
905
    COSTS_N_INSNS (19),           /* fp_div_df */
906
    COSTS_N_INSNS (2),            /* int_mult_si */
907
    COSTS_N_INSNS (2),            /* int_mult_di */
908
    COSTS_N_INSNS (35),           /* int_div_si */
909
    COSTS_N_INSNS (35),           /* int_div_di */
910
                     1,           /* branch_cost */
911
                     4            /* memory_latency */
912
  },
913
  { /* R6000 */
914
    COSTS_N_INSNS (3),            /* fp_add */
915
    COSTS_N_INSNS (5),            /* fp_mult_sf */
916
    COSTS_N_INSNS (6),            /* fp_mult_df */
917
    COSTS_N_INSNS (15),           /* fp_div_sf */
918
    COSTS_N_INSNS (16),           /* fp_div_df */
919
    COSTS_N_INSNS (17),           /* int_mult_si */
920
    COSTS_N_INSNS (17),           /* int_mult_di */
921
    COSTS_N_INSNS (38),           /* int_div_si */
922
    COSTS_N_INSNS (38),           /* int_div_di */
923
                     2,           /* branch_cost */
924
                     6            /* memory_latency */
925
  },
926
  { /* R4000 */
927
     COSTS_N_INSNS (6),           /* fp_add */
928
     COSTS_N_INSNS (7),           /* fp_mult_sf */
929
     COSTS_N_INSNS (8),           /* fp_mult_df */
930
     COSTS_N_INSNS (23),          /* fp_div_sf */
931
     COSTS_N_INSNS (36),          /* fp_div_df */
932
     COSTS_N_INSNS (10),          /* int_mult_si */
933
     COSTS_N_INSNS (10),          /* int_mult_di */
934
     COSTS_N_INSNS (69),          /* int_div_si */
935
     COSTS_N_INSNS (69),          /* int_div_di */
936
                      2,          /* branch_cost */
937
                      6           /* memory_latency */
938
  },
939
  { /* R4100 */
940
    DEFAULT_COSTS
941
  },
942
  { /* R4111 */
943
    DEFAULT_COSTS
944
  },
945
  { /* R4120 */
946
    DEFAULT_COSTS
947
  },
948
  { /* R4130 */
949
    /* The only costs that appear to be updated here are
950
       integer multiplication.  */
951
    SOFT_FP_COSTS,
952
    COSTS_N_INSNS (4),            /* int_mult_si */
953
    COSTS_N_INSNS (6),            /* int_mult_di */
954
    COSTS_N_INSNS (69),           /* int_div_si */
955
    COSTS_N_INSNS (69),           /* int_div_di */
956
                     1,           /* branch_cost */
957
                     4            /* memory_latency */
958
  },
959
  { /* R4300 */
960
    DEFAULT_COSTS
961
  },
962
  { /* R4600 */
963
    DEFAULT_COSTS
964
  },
965
  { /* R4650 */
966
    DEFAULT_COSTS
967
  },
968
  { /* R5000 */
969
    COSTS_N_INSNS (6),            /* fp_add */
970
    COSTS_N_INSNS (4),            /* fp_mult_sf */
971
    COSTS_N_INSNS (5),            /* fp_mult_df */
972
    COSTS_N_INSNS (23),           /* fp_div_sf */
973
    COSTS_N_INSNS (36),           /* fp_div_df */
974
    COSTS_N_INSNS (5),            /* int_mult_si */
975
    COSTS_N_INSNS (5),            /* int_mult_di */
976
    COSTS_N_INSNS (36),           /* int_div_si */
977
    COSTS_N_INSNS (36),           /* int_div_di */
978
                     1,           /* branch_cost */
979
                     4            /* memory_latency */
980
  },
981
  { /* R5400 */
982
    COSTS_N_INSNS (6),            /* fp_add */
983
    COSTS_N_INSNS (5),            /* fp_mult_sf */
984
    COSTS_N_INSNS (6),            /* fp_mult_df */
985
    COSTS_N_INSNS (30),           /* fp_div_sf */
986
    COSTS_N_INSNS (59),           /* fp_div_df */
987
    COSTS_N_INSNS (3),            /* int_mult_si */
988
    COSTS_N_INSNS (4),            /* int_mult_di */
989
    COSTS_N_INSNS (42),           /* int_div_si */
990
    COSTS_N_INSNS (74),           /* int_div_di */
991
                     1,           /* branch_cost */
992
                     4            /* memory_latency */
993
  },
994
  { /* R5500 */
995
    COSTS_N_INSNS (6),            /* fp_add */
996
    COSTS_N_INSNS (5),            /* fp_mult_sf */
997
    COSTS_N_INSNS (6),            /* fp_mult_df */
998
    COSTS_N_INSNS (30),           /* fp_div_sf */
999
    COSTS_N_INSNS (59),           /* fp_div_df */
1000
    COSTS_N_INSNS (5),            /* int_mult_si */
1001
    COSTS_N_INSNS (9),            /* int_mult_di */
1002
    COSTS_N_INSNS (42),           /* int_div_si */
1003
    COSTS_N_INSNS (74),           /* int_div_di */
1004
                     1,           /* branch_cost */
1005
                     4            /* memory_latency */
1006
  },
1007
  { /* R7000 */
1008
    /* The only costs that are changed here are
1009
       integer multiplication.  */
1010
    COSTS_N_INSNS (6),            /* fp_add */
1011
    COSTS_N_INSNS (7),            /* fp_mult_sf */
1012
    COSTS_N_INSNS (8),            /* fp_mult_df */
1013
    COSTS_N_INSNS (23),           /* fp_div_sf */
1014
    COSTS_N_INSNS (36),           /* fp_div_df */
1015
    COSTS_N_INSNS (5),            /* int_mult_si */
1016
    COSTS_N_INSNS (9),            /* int_mult_di */
1017
    COSTS_N_INSNS (69),           /* int_div_si */
1018
    COSTS_N_INSNS (69),           /* int_div_di */
1019
                     1,           /* branch_cost */
1020
                     4            /* memory_latency */
1021
  },
1022
  { /* R8000 */
1023
    DEFAULT_COSTS
1024
  },
1025
  { /* R9000 */
1026
    /* The only costs that are changed here are
1027
       integer multiplication.  */
1028
    COSTS_N_INSNS (6),            /* fp_add */
1029
    COSTS_N_INSNS (7),            /* fp_mult_sf */
1030
    COSTS_N_INSNS (8),            /* fp_mult_df */
1031
    COSTS_N_INSNS (23),           /* fp_div_sf */
1032
    COSTS_N_INSNS (36),           /* fp_div_df */
1033
    COSTS_N_INSNS (3),            /* int_mult_si */
1034
    COSTS_N_INSNS (8),            /* int_mult_di */
1035
    COSTS_N_INSNS (69),           /* int_div_si */
1036
    COSTS_N_INSNS (69),           /* int_div_di */
1037
                     1,           /* branch_cost */
1038
                     4            /* memory_latency */
1039
  },
1040
  { /* R1x000 */
1041
    COSTS_N_INSNS (2),            /* fp_add */
1042
    COSTS_N_INSNS (2),            /* fp_mult_sf */
1043
    COSTS_N_INSNS (2),            /* fp_mult_df */
1044
    COSTS_N_INSNS (12),           /* fp_div_sf */
1045
    COSTS_N_INSNS (19),           /* fp_div_df */
1046
    COSTS_N_INSNS (5),            /* int_mult_si */
1047
    COSTS_N_INSNS (9),            /* int_mult_di */
1048
    COSTS_N_INSNS (34),           /* int_div_si */
1049
    COSTS_N_INSNS (66),           /* int_div_di */
1050
                     1,           /* branch_cost */
1051
                     4            /* memory_latency */
1052
  },
1053
  { /* SB1 */
1054
    /* These costs are the same as the SB-1A below.  */
1055
    COSTS_N_INSNS (4),            /* fp_add */
1056
    COSTS_N_INSNS (4),            /* fp_mult_sf */
1057
    COSTS_N_INSNS (4),            /* fp_mult_df */
1058
    COSTS_N_INSNS (24),           /* fp_div_sf */
1059
    COSTS_N_INSNS (32),           /* fp_div_df */
1060
    COSTS_N_INSNS (3),            /* int_mult_si */
1061
    COSTS_N_INSNS (4),            /* int_mult_di */
1062
    COSTS_N_INSNS (36),           /* int_div_si */
1063
    COSTS_N_INSNS (68),           /* int_div_di */
1064
                     1,           /* branch_cost */
1065
                     4            /* memory_latency */
1066
  },
1067
  { /* SB1-A */
1068
    /* These costs are the same as the SB-1 above.  */
1069
    COSTS_N_INSNS (4),            /* fp_add */
1070
    COSTS_N_INSNS (4),            /* fp_mult_sf */
1071
    COSTS_N_INSNS (4),            /* fp_mult_df */
1072
    COSTS_N_INSNS (24),           /* fp_div_sf */
1073
    COSTS_N_INSNS (32),           /* fp_div_df */
1074
    COSTS_N_INSNS (3),            /* int_mult_si */
1075
    COSTS_N_INSNS (4),            /* int_mult_di */
1076
    COSTS_N_INSNS (36),           /* int_div_si */
1077
    COSTS_N_INSNS (68),           /* int_div_di */
1078
                     1,           /* branch_cost */
1079
                     4            /* memory_latency */
1080
  },
1081
  { /* SR71000 */
1082
    DEFAULT_COSTS
1083
  },
1084
  { /* XLR */
1085
    SOFT_FP_COSTS,
1086
    COSTS_N_INSNS (8),            /* int_mult_si */
1087
    COSTS_N_INSNS (8),            /* int_mult_di */
1088
    COSTS_N_INSNS (72),           /* int_div_si */
1089
    COSTS_N_INSNS (72),           /* int_div_di */
1090
                     1,           /* branch_cost */
1091
                     4            /* memory_latency */
1092
  }
1093
};
1094
 
1095
static rtx mips_find_pic_call_symbol (rtx, rtx, bool);
1096
static int mips_register_move_cost (enum machine_mode, reg_class_t,
1097
                                    reg_class_t);
1098
static unsigned int mips_function_arg_boundary (enum machine_mode, const_tree);
1099
 
1100
/* This hash table keeps track of implicit "mips16" and "nomips16" attributes
1101
   for -mflip_mips16.  It maps decl names onto a boolean mode setting.  */
1102
struct GTY (())  mflip_mips16_entry {
1103
  const char *name;
1104
  bool mips16_p;
1105
};
1106
static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
1107
 
1108
/* Hash table callbacks for mflip_mips16_htab.  */
1109
 
1110
static hashval_t
1111
mflip_mips16_htab_hash (const void *entry)
1112
{
1113
  return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
1114
}
1115
 
1116
static int
1117
mflip_mips16_htab_eq (const void *entry, const void *name)
1118
{
1119
  return strcmp (((const struct mflip_mips16_entry *) entry)->name,
1120
                 (const char *) name) == 0;
1121
}
1122
 
1123
/* True if -mflip-mips16 should next add an attribute for the default MIPS16
1124
   mode, false if it should next add an attribute for the opposite mode.  */
1125
static GTY(()) bool mips16_flipper;
1126
 
1127
/* DECL is a function that needs a default "mips16" or "nomips16" attribute
1128
   for -mflip-mips16.  Return true if it should use "mips16" and false if
1129
   it should use "nomips16".  */
1130
 
1131
static bool
1132
mflip_mips16_use_mips16_p (tree decl)
1133
{
1134
  struct mflip_mips16_entry *entry;
1135
  const char *name;
1136
  hashval_t hash;
1137
  void **slot;
1138
 
1139
  /* Use the opposite of the command-line setting for anonymous decls.  */
1140
  if (!DECL_NAME (decl))
1141
    return !mips_base_mips16;
1142
 
1143
  if (!mflip_mips16_htab)
1144
    mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
1145
                                         mflip_mips16_htab_eq, NULL);
1146
 
1147
  name = IDENTIFIER_POINTER (DECL_NAME (decl));
1148
  hash = htab_hash_string (name);
1149
  slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
1150
  entry = (struct mflip_mips16_entry *) *slot;
1151
  if (!entry)
1152
    {
1153
      mips16_flipper = !mips16_flipper;
1154
      entry = ggc_alloc_mflip_mips16_entry ();
1155
      entry->name = name;
1156
      entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
1157
      *slot = entry;
1158
    }
1159
  return entry->mips16_p;
1160
}
1161
 
1162
/* Predicates to test for presence of "near" and "far"/"long_call"
1163
   attributes on the given TYPE.  */
1164
 
1165
static bool
1166
mips_near_type_p (const_tree type)
1167
{
1168
  return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1169
}
1170
 
1171
static bool
1172
mips_far_type_p (const_tree type)
1173
{
1174
  return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1175
          || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1176
}
1177
 
1178
/* Similar predicates for "mips16"/"nomips16" function attributes.  */
1179
 
1180
static bool
1181
mips_mips16_decl_p (const_tree decl)
1182
{
1183
  return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1184
}
1185
 
1186
static bool
1187
mips_nomips16_decl_p (const_tree decl)
1188
{
1189
  return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1190
}
1191
 
1192
/* Check if the interrupt attribute is set for a function.  */
1193
 
1194
static bool
1195
mips_interrupt_type_p (tree type)
1196
{
1197
  return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type)) != NULL;
1198
}
1199
 
1200
/* Check if the attribute to use shadow register set is set for a function.  */
1201
 
1202
static bool
1203
mips_use_shadow_register_set_p (tree type)
1204
{
1205
  return lookup_attribute ("use_shadow_register_set",
1206
                           TYPE_ATTRIBUTES (type)) != NULL;
1207
}
1208
 
1209
/* Check if the attribute to keep interrupts masked is set for a function.  */
1210
 
1211
static bool
1212
mips_keep_interrupts_masked_p (tree type)
1213
{
1214
  return lookup_attribute ("keep_interrupts_masked",
1215
                           TYPE_ATTRIBUTES (type)) != NULL;
1216
}
1217
 
1218
/* Check if the attribute to use debug exception return is set for
1219
   a function.  */
1220
 
1221
static bool
1222
mips_use_debug_exception_return_p (tree type)
1223
{
1224
  return lookup_attribute ("use_debug_exception_return",
1225
                           TYPE_ATTRIBUTES (type)) != NULL;
1226
}
1227
 
1228
/* Return true if function DECL is a MIPS16 function.  Return the ambient
1229
   setting if DECL is null.  */
1230
 
1231
static bool
1232
mips_use_mips16_mode_p (tree decl)
1233
{
1234
  if (decl)
1235
    {
1236
      /* Nested functions must use the same frame pointer as their
1237
         parent and must therefore use the same ISA mode.  */
1238
      tree parent = decl_function_context (decl);
1239
      if (parent)
1240
        decl = parent;
1241
      if (mips_mips16_decl_p (decl))
1242
        return true;
1243
      if (mips_nomips16_decl_p (decl))
1244
        return false;
1245
    }
1246
  return mips_base_mips16;
1247
}
1248
 
1249
/* Implement TARGET_COMP_TYPE_ATTRIBUTES.  */
1250
 
1251
static int
1252
mips_comp_type_attributes (const_tree type1, const_tree type2)
1253
{
1254
  /* Disallow mixed near/far attributes.  */
1255
  if (mips_far_type_p (type1) && mips_near_type_p (type2))
1256
    return 0;
1257
  if (mips_near_type_p (type1) && mips_far_type_p (type2))
1258
    return 0;
1259
  return 1;
1260
}
1261
 
1262
/* Implement TARGET_INSERT_ATTRIBUTES.  */
1263
 
1264
static void
1265
mips_insert_attributes (tree decl, tree *attributes)
1266
{
1267
  const char *name;
1268
  bool mips16_p, nomips16_p;
1269
 
1270
  /* Check for "mips16" and "nomips16" attributes.  */
1271
  mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
1272
  nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
1273
  if (TREE_CODE (decl) != FUNCTION_DECL)
1274
    {
1275
      if (mips16_p)
1276
        error ("%qs attribute only applies to functions", "mips16");
1277
      if (nomips16_p)
1278
        error ("%qs attribute only applies to functions", "nomips16");
1279
    }
1280
  else
1281
    {
1282
      mips16_p |= mips_mips16_decl_p (decl);
1283
      nomips16_p |= mips_nomips16_decl_p (decl);
1284
      if (mips16_p || nomips16_p)
1285
        {
1286
          /* DECL cannot be simultaneously "mips16" and "nomips16".  */
1287
          if (mips16_p && nomips16_p)
1288
            error ("%qE cannot have both %<mips16%> and "
1289
                   "%<nomips16%> attributes",
1290
                   DECL_NAME (decl));
1291
        }
1292
      else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
1293
        {
1294
          /* Implement -mflip-mips16.  If DECL has neither a "nomips16" nor a
1295
             "mips16" attribute, arbitrarily pick one.  We must pick the same
1296
             setting for duplicate declarations of a function.  */
1297
          name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
1298
          *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1299
        }
1300
    }
1301
}
1302
 
1303
/* Implement TARGET_MERGE_DECL_ATTRIBUTES.  */
1304
 
1305
static tree
1306
mips_merge_decl_attributes (tree olddecl, tree newdecl)
1307
{
1308
  /* The decls' "mips16" and "nomips16" attributes must match exactly.  */
1309
  if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
1310
    error ("%qE redeclared with conflicting %qs attributes",
1311
           DECL_NAME (newdecl), "mips16");
1312
  if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
1313
    error ("%qE redeclared with conflicting %qs attributes",
1314
           DECL_NAME (newdecl), "nomips16");
1315
 
1316
  return merge_attributes (DECL_ATTRIBUTES (olddecl),
1317
                           DECL_ATTRIBUTES (newdecl));
1318
}
1319
 
1320
/* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1321
   and *OFFSET_PTR.  Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise.  */
1322
 
1323
static void
1324
mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1325
{
1326
  if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
1327
    {
1328
      *base_ptr = XEXP (x, 0);
1329
      *offset_ptr = INTVAL (XEXP (x, 1));
1330
    }
1331
  else
1332
    {
1333
      *base_ptr = x;
1334
      *offset_ptr = 0;
1335
    }
1336
}
1337
 
1338
static unsigned int mips_build_integer (struct mips_integer_op *,
1339
                                        unsigned HOST_WIDE_INT);
1340
 
1341
/* A subroutine of mips_build_integer, with the same interface.
1342
   Assume that the final action in the sequence should be a left shift.  */
1343
 
1344
static unsigned int
1345
mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1346
{
1347
  unsigned int i, shift;
1348
 
1349
  /* Shift VALUE right until its lowest bit is set.  Shift arithmetically
1350
     since signed numbers are easier to load than unsigned ones.  */
1351
  shift = 0;
1352
  while ((value & 1) == 0)
1353
    value /= 2, shift++;
1354
 
1355
  i = mips_build_integer (codes, value);
1356
  codes[i].code = ASHIFT;
1357
  codes[i].value = shift;
1358
  return i + 1;
1359
}
1360
 
1361
/* As for mips_build_shift, but assume that the final action will be
1362
   an IOR or PLUS operation.  */
1363
 
1364
static unsigned int
1365
mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1366
{
1367
  unsigned HOST_WIDE_INT high;
1368
  unsigned int i;
1369
 
1370
  high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1371
  if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1372
    {
1373
      /* The constant is too complex to load with a simple LUI/ORI pair,
1374
         so we want to give the recursive call as many trailing zeros as
1375
         possible.  In this case, we know bit 16 is set and that the
1376
         low 16 bits form a negative number.  If we subtract that number
1377
         from VALUE, we will clear at least the lowest 17 bits, maybe more.  */
1378
      i = mips_build_integer (codes, CONST_HIGH_PART (value));
1379
      codes[i].code = PLUS;
1380
      codes[i].value = CONST_LOW_PART (value);
1381
    }
1382
  else
1383
    {
1384
      /* Either this is a simple LUI/ORI pair, or clearing the lowest 16
1385
         bits gives a value with at least 17 trailing zeros.  */
1386
      i = mips_build_integer (codes, high);
1387
      codes[i].code = IOR;
1388
      codes[i].value = value & 0xffff;
1389
    }
1390
  return i + 1;
1391
}
1392
 
1393
/* Fill CODES with a sequence of rtl operations to load VALUE.
1394
   Return the number of operations needed.  */
1395
 
1396
static unsigned int
1397
mips_build_integer (struct mips_integer_op *codes,
1398
                    unsigned HOST_WIDE_INT value)
1399
{
1400
  if (SMALL_OPERAND (value)
1401
      || SMALL_OPERAND_UNSIGNED (value)
1402
      || LUI_OPERAND (value))
1403
    {
1404
      /* The value can be loaded with a single instruction.  */
1405
      codes[0].code = UNKNOWN;
1406
      codes[0].value = value;
1407
      return 1;
1408
    }
1409
  else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1410
    {
1411
      /* Either the constant is a simple LUI/ORI combination or its
1412
         lowest bit is set.  We don't want to shift in this case.  */
1413
      return mips_build_lower (codes, value);
1414
    }
1415
  else if ((value & 0xffff) == 0)
1416
    {
1417
      /* The constant will need at least three actions.  The lowest
1418
         16 bits are clear, so the final action will be a shift.  */
1419
      return mips_build_shift (codes, value);
1420
    }
1421
  else
1422
    {
1423
      /* The final action could be a shift, add or inclusive OR.
1424
         Rather than use a complex condition to select the best
1425
         approach, try both mips_build_shift and mips_build_lower
1426
         and pick the one that gives the shortest sequence.
1427
         Note that this case is only used once per constant.  */
1428
      struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1429
      unsigned int cost, alt_cost;
1430
 
1431
      cost = mips_build_shift (codes, value);
1432
      alt_cost = mips_build_lower (alt_codes, value);
1433
      if (alt_cost < cost)
1434
        {
1435
          memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1436
          cost = alt_cost;
1437
        }
1438
      return cost;
1439
    }
1440
}
1441
 
1442
/* Implement TARGET_LEGITIMATE_CONSTANT_P.  */
1443
 
1444
static bool
1445
mips_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
1446
{
1447
  return mips_const_insns (x) > 0;
1448
}
1449
 
1450
/* Return a SYMBOL_REF for a MIPS16 function called NAME.  */
1451
 
1452
static rtx
1453
mips16_stub_function (const char *name)
1454
{
1455
  rtx x;
1456
 
1457
  x = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
1458
  SYMBOL_REF_FLAGS (x) |= (SYMBOL_FLAG_EXTERNAL | SYMBOL_FLAG_FUNCTION);
1459
  return x;
1460
}
1461
 
1462
/* Return true if symbols of type TYPE require a GOT access.  */
1463
 
1464
static bool
1465
mips_got_symbol_type_p (enum mips_symbol_type type)
1466
{
1467
  switch (type)
1468
    {
1469
    case SYMBOL_GOT_PAGE_OFST:
1470
    case SYMBOL_GOT_DISP:
1471
      return true;
1472
 
1473
    default:
1474
      return false;
1475
    }
1476
}
1477
 
1478
/* Return true if X is a thread-local symbol.  */
1479
 
1480
static bool
1481
mips_tls_symbol_p (rtx x)
1482
{
1483
  return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1484
}
1485
 
1486
/* Return true if SYMBOL_REF X is associated with a global symbol
1487
   (in the STB_GLOBAL sense).  */
1488
 
1489
static bool
1490
mips_global_symbol_p (const_rtx x)
1491
{
1492
  const_tree decl = SYMBOL_REF_DECL (x);
1493
 
1494
  if (!decl)
1495
    return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x);
1496
 
1497
  /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1498
     or weak symbols.  Relocations in the object file will be against
1499
     the target symbol, so it's that symbol's binding that matters here.  */
1500
  return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1501
}
1502
 
1503
/* Return true if function X is a libgcc MIPS16 stub function.  */
1504
 
1505
static bool
1506
mips16_stub_function_p (const_rtx x)
1507
{
1508
  return (GET_CODE (x) == SYMBOL_REF
1509
          && strncmp (XSTR (x, 0), "__mips16_", 9) == 0);
1510
}
1511
 
1512
/* Return true if function X is a locally-defined and locally-binding
1513
   MIPS16 function.  */
1514
 
1515
static bool
1516
mips16_local_function_p (const_rtx x)
1517
{
1518
  return (GET_CODE (x) == SYMBOL_REF
1519
          && SYMBOL_REF_LOCAL_P (x)
1520
          && !SYMBOL_REF_EXTERNAL_P (x)
1521
          && mips_use_mips16_mode_p (SYMBOL_REF_DECL (x)));
1522
}
1523
 
1524
/* Return true if SYMBOL_REF X binds locally.  */
1525
 
1526
static bool
1527
mips_symbol_binds_local_p (const_rtx x)
1528
{
1529
  return (SYMBOL_REF_DECL (x)
1530
          ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1531
          : SYMBOL_REF_LOCAL_P (x));
1532
}
1533
 
1534
/* Return true if rtx constants of mode MODE should be put into a small
1535
   data section.  */
1536
 
1537
static bool
1538
mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1539
{
1540
  return (!TARGET_EMBEDDED_DATA
1541
          && TARGET_LOCAL_SDATA
1542
          && GET_MODE_SIZE (mode) <= mips_small_data_threshold);
1543
}
1544
 
1545
/* Return true if X should not be moved directly into register $25.
1546
   We need this because many versions of GAS will treat "la $25,foo" as
1547
   part of a call sequence and so allow a global "foo" to be lazily bound.  */
1548
 
1549
bool
1550
mips_dangerous_for_la25_p (rtx x)
1551
{
1552
  return (!TARGET_EXPLICIT_RELOCS
1553
          && TARGET_USE_GOT
1554
          && GET_CODE (x) == SYMBOL_REF
1555
          && mips_global_symbol_p (x));
1556
}
1557
 
1558
/* Return true if calls to X might need $25 to be valid on entry.  */
1559
 
1560
bool
1561
mips_use_pic_fn_addr_reg_p (const_rtx x)
1562
{
1563
  if (!TARGET_USE_PIC_FN_ADDR_REG)
1564
    return false;
1565
 
1566
  /* MIPS16 stub functions are guaranteed not to use $25.  */
1567
  if (mips16_stub_function_p (x))
1568
    return false;
1569
 
1570
  if (GET_CODE (x) == SYMBOL_REF)
1571
    {
1572
      /* If PLTs and copy relocations are available, the static linker
1573
         will make sure that $25 is valid on entry to the target function.  */
1574
      if (TARGET_ABICALLS_PIC0)
1575
        return false;
1576
 
1577
      /* Locally-defined functions use absolute accesses to set up
1578
         the global pointer.  */
1579
      if (TARGET_ABSOLUTE_ABICALLS
1580
          && mips_symbol_binds_local_p (x)
1581
          && !SYMBOL_REF_EXTERNAL_P (x))
1582
        return false;
1583
    }
1584
 
1585
  return true;
1586
}
1587
 
1588
/* Return the method that should be used to access SYMBOL_REF or
1589
   LABEL_REF X in context CONTEXT.  */
1590
 
1591
static enum mips_symbol_type
1592
mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1593
{
1594
  if (TARGET_RTP_PIC)
1595
    return SYMBOL_GOT_DISP;
1596
 
1597
  if (GET_CODE (x) == LABEL_REF)
1598
    {
1599
      /* Only return SYMBOL_PC_RELATIVE if we are generating MIPS16
1600
         code and if we know that the label is in the current function's
1601
         text section.  LABEL_REFs are used for jump tables as well as
1602
         text labels, so we must check whether jump tables live in the
1603
         text section.  */
1604
      if (TARGET_MIPS16_SHORT_JUMP_TABLES
1605
          && !LABEL_REF_NONLOCAL_P (x))
1606
        return SYMBOL_PC_RELATIVE;
1607
 
1608
      if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1609
        return SYMBOL_GOT_PAGE_OFST;
1610
 
1611
      return SYMBOL_ABSOLUTE;
1612
    }
1613
 
1614
  gcc_assert (GET_CODE (x) == SYMBOL_REF);
1615
 
1616
  if (SYMBOL_REF_TLS_MODEL (x))
1617
    return SYMBOL_TLS;
1618
 
1619
  if (CONSTANT_POOL_ADDRESS_P (x))
1620
    {
1621
      if (TARGET_MIPS16_TEXT_LOADS)
1622
        return SYMBOL_PC_RELATIVE;
1623
 
1624
      if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1625
        return SYMBOL_PC_RELATIVE;
1626
 
1627
      if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1628
        return SYMBOL_GP_RELATIVE;
1629
    }
1630
 
1631
  /* Do not use small-data accesses for weak symbols; they may end up
1632
     being zero.  */
1633
  if (TARGET_GPOPT && SYMBOL_REF_SMALL_P (x) && !SYMBOL_REF_WEAK (x))
1634
    return SYMBOL_GP_RELATIVE;
1635
 
1636
  /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1637
     is in effect.  */
1638
  if (TARGET_ABICALLS_PIC2
1639
      && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1640
    {
1641
      /* There are three cases to consider:
1642
 
1643
            - o32 PIC (either with or without explicit relocs)
1644
            - n32/n64 PIC without explicit relocs
1645
            - n32/n64 PIC with explicit relocs
1646
 
1647
         In the first case, both local and global accesses will use an
1648
         R_MIPS_GOT16 relocation.  We must correctly predict which of
1649
         the two semantics (local or global) the assembler and linker
1650
         will apply.  The choice depends on the symbol's binding rather
1651
         than its visibility.
1652
 
1653
         In the second case, the assembler will not use R_MIPS_GOT16
1654
         relocations, but it chooses between local and global accesses
1655
         in the same way as for o32 PIC.
1656
 
1657
         In the third case we have more freedom since both forms of
1658
         access will work for any kind of symbol.  However, there seems
1659
         little point in doing things differently.  */
1660
      if (mips_global_symbol_p (x))
1661
        return SYMBOL_GOT_DISP;
1662
 
1663
      return SYMBOL_GOT_PAGE_OFST;
1664
    }
1665
 
1666
  return SYMBOL_ABSOLUTE;
1667
}
1668
 
1669
/* Classify the base of symbolic expression X, given that X appears in
1670
   context CONTEXT.  */
1671
 
1672
static enum mips_symbol_type
1673
mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1674
{
1675
  rtx offset;
1676
 
1677
  split_const (x, &x, &offset);
1678
  if (UNSPEC_ADDRESS_P (x))
1679
    return UNSPEC_ADDRESS_TYPE (x);
1680
 
1681
  return mips_classify_symbol (x, context);
1682
}
1683
 
1684
/* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1685
   is the alignment in bytes of SYMBOL_REF X.  */
1686
 
1687
static bool
1688
mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1689
{
1690
  HOST_WIDE_INT align;
1691
 
1692
  align = SYMBOL_REF_DECL (x) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x)) : 1;
1693
  return IN_RANGE (offset, 0, align - 1);
1694
}
1695
 
1696
/* Return true if X is a symbolic constant that can be used in context
1697
   CONTEXT.  If it is, store the type of the symbol in *SYMBOL_TYPE.  */
1698
 
1699
bool
1700
mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1701
                          enum mips_symbol_type *symbol_type)
1702
{
1703
  rtx offset;
1704
 
1705
  split_const (x, &x, &offset);
1706
  if (UNSPEC_ADDRESS_P (x))
1707
    {
1708
      *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1709
      x = UNSPEC_ADDRESS (x);
1710
    }
1711
  else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1712
    {
1713
      *symbol_type = mips_classify_symbol (x, context);
1714
      if (*symbol_type == SYMBOL_TLS)
1715
        return false;
1716
    }
1717
  else
1718
    return false;
1719
 
1720
  if (offset == const0_rtx)
1721
    return true;
1722
 
1723
  /* Check whether a nonzero offset is valid for the underlying
1724
     relocations.  */
1725
  switch (*symbol_type)
1726
    {
1727
    case SYMBOL_ABSOLUTE:
1728
    case SYMBOL_64_HIGH:
1729
    case SYMBOL_64_MID:
1730
    case SYMBOL_64_LOW:
1731
      /* If the target has 64-bit pointers and the object file only
1732
         supports 32-bit symbols, the values of those symbols will be
1733
         sign-extended.  In this case we can't allow an arbitrary offset
1734
         in case the 32-bit value X + OFFSET has a different sign from X.  */
1735
      if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1736
        return offset_within_block_p (x, INTVAL (offset));
1737
 
1738
      /* In other cases the relocations can handle any offset.  */
1739
      return true;
1740
 
1741
    case SYMBOL_PC_RELATIVE:
1742
      /* Allow constant pool references to be converted to LABEL+CONSTANT.
1743
         In this case, we no longer have access to the underlying constant,
1744
         but the original symbol-based access was known to be valid.  */
1745
      if (GET_CODE (x) == LABEL_REF)
1746
        return true;
1747
 
1748
      /* Fall through.  */
1749
 
1750
    case SYMBOL_GP_RELATIVE:
1751
      /* Make sure that the offset refers to something within the
1752
         same object block.  This should guarantee that the final
1753
         PC- or GP-relative offset is within the 16-bit limit.  */
1754
      return offset_within_block_p (x, INTVAL (offset));
1755
 
1756
    case SYMBOL_GOT_PAGE_OFST:
1757
    case SYMBOL_GOTOFF_PAGE:
1758
      /* If the symbol is global, the GOT entry will contain the symbol's
1759
         address, and we will apply a 16-bit offset after loading it.
1760
         If the symbol is local, the linker should provide enough local
1761
         GOT entries for a 16-bit offset, but larger offsets may lead
1762
         to GOT overflow.  */
1763
      return SMALL_INT (offset);
1764
 
1765
    case SYMBOL_TPREL:
1766
    case SYMBOL_DTPREL:
1767
      /* There is no carry between the HI and LO REL relocations, so the
1768
         offset is only valid if we know it won't lead to such a carry.  */
1769
      return mips_offset_within_alignment_p (x, INTVAL (offset));
1770
 
1771
    case SYMBOL_GOT_DISP:
1772
    case SYMBOL_GOTOFF_DISP:
1773
    case SYMBOL_GOTOFF_CALL:
1774
    case SYMBOL_GOTOFF_LOADGP:
1775
    case SYMBOL_TLSGD:
1776
    case SYMBOL_TLSLDM:
1777
    case SYMBOL_GOTTPREL:
1778
    case SYMBOL_TLS:
1779
    case SYMBOL_HALF:
1780
      return false;
1781
    }
1782
  gcc_unreachable ();
1783
}
1784
 
1785
/* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1786
   single instruction.  We rely on the fact that, in the worst case,
1787
   all instructions involved in a MIPS16 address calculation are usually
1788
   extended ones.  */
1789
 
1790
static int
1791
mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1792
{
1793
  if (mips_use_pcrel_pool_p[(int) type])
1794
    {
1795
      if (mode == MAX_MACHINE_MODE)
1796
        /* LEAs will be converted into constant-pool references by
1797
           mips_reorg.  */
1798
        type = SYMBOL_PC_RELATIVE;
1799
      else
1800
        /* The constant must be loaded and then dereferenced.  */
1801
        return 0;
1802
    }
1803
 
1804
  switch (type)
1805
    {
1806
    case SYMBOL_ABSOLUTE:
1807
      /* When using 64-bit symbols, we need 5 preparatory instructions,
1808
         such as:
1809
 
1810
             lui     $at,%highest(symbol)
1811
             daddiu  $at,$at,%higher(symbol)
1812
             dsll    $at,$at,16
1813
             daddiu  $at,$at,%hi(symbol)
1814
             dsll    $at,$at,16
1815
 
1816
         The final address is then $at + %lo(symbol).  With 32-bit
1817
         symbols we just need a preparatory LUI for normal mode and
1818
         a preparatory LI and SLL for MIPS16.  */
1819
      return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1820
 
1821
    case SYMBOL_GP_RELATIVE:
1822
      /* Treat GP-relative accesses as taking a single instruction on
1823
         MIPS16 too; the copy of $gp can often be shared.  */
1824
      return 1;
1825
 
1826
    case SYMBOL_PC_RELATIVE:
1827
      /* PC-relative constants can be only be used with ADDIUPC,
1828
         DADDIUPC, LWPC and LDPC.  */
1829
      if (mode == MAX_MACHINE_MODE
1830
          || GET_MODE_SIZE (mode) == 4
1831
          || GET_MODE_SIZE (mode) == 8)
1832
        return 1;
1833
 
1834
      /* The constant must be loaded using ADDIUPC or DADDIUPC first.  */
1835
      return 0;
1836
 
1837
    case SYMBOL_GOT_DISP:
1838
      /* The constant will have to be loaded from the GOT before it
1839
         is used in an address.  */
1840
      if (mode != MAX_MACHINE_MODE)
1841
        return 0;
1842
 
1843
      /* Fall through.  */
1844
 
1845
    case SYMBOL_GOT_PAGE_OFST:
1846
      /* Unless -funit-at-a-time is in effect, we can't be sure whether the
1847
         local/global classification is accurate.  The worst cases are:
1848
 
1849
         (1) For local symbols when generating o32 or o64 code.  The assembler
1850
             will use:
1851
 
1852
                 lw           $at,%got(symbol)
1853
                 nop
1854
 
1855
             ...and the final address will be $at + %lo(symbol).
1856
 
1857
         (2) For global symbols when -mxgot.  The assembler will use:
1858
 
1859
                 lui     $at,%got_hi(symbol)
1860
                 (d)addu $at,$at,$gp
1861
 
1862
             ...and the final address will be $at + %got_lo(symbol).  */
1863
      return 3;
1864
 
1865
    case SYMBOL_GOTOFF_PAGE:
1866
    case SYMBOL_GOTOFF_DISP:
1867
    case SYMBOL_GOTOFF_CALL:
1868
    case SYMBOL_GOTOFF_LOADGP:
1869
    case SYMBOL_64_HIGH:
1870
    case SYMBOL_64_MID:
1871
    case SYMBOL_64_LOW:
1872
    case SYMBOL_TLSGD:
1873
    case SYMBOL_TLSLDM:
1874
    case SYMBOL_DTPREL:
1875
    case SYMBOL_GOTTPREL:
1876
    case SYMBOL_TPREL:
1877
    case SYMBOL_HALF:
1878
      /* A 16-bit constant formed by a single relocation, or a 32-bit
1879
         constant formed from a high 16-bit relocation and a low 16-bit
1880
         relocation.  Use mips_split_p to determine which.  32-bit
1881
         constants need an "lui; addiu" sequence for normal mode and
1882
         an "li; sll; addiu" sequence for MIPS16 mode.  */
1883
      return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1884
 
1885
    case SYMBOL_TLS:
1886
      /* We don't treat a bare TLS symbol as a constant.  */
1887
      return 0;
1888
    }
1889
  gcc_unreachable ();
1890
}
1891
 
1892
/* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1893
   to load symbols of type TYPE into a register.  Return 0 if the given
1894
   type of symbol cannot be used as an immediate operand.
1895
 
1896
   Otherwise, return the number of instructions needed to load or store
1897
   values of mode MODE to or from addresses of type TYPE.  Return 0 if
1898
   the given type of symbol is not valid in addresses.
1899
 
1900
   In both cases, treat extended MIPS16 instructions as two instructions.  */
1901
 
1902
static int
1903
mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1904
{
1905
  return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1906
}
1907
 
1908
/* A for_each_rtx callback.  Stop the search if *X references a
1909
   thread-local symbol.  */
1910
 
1911
static int
1912
mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1913
{
1914
  return mips_tls_symbol_p (*x);
1915
}
1916
 
1917
/* Implement TARGET_CANNOT_FORCE_CONST_MEM.  */
1918
 
1919
static bool
1920
mips_cannot_force_const_mem (enum machine_mode mode, rtx x)
1921
{
1922
  enum mips_symbol_type type;
1923
  rtx base, offset;
1924
 
1925
  /* There is no assembler syntax for expressing an address-sized
1926
     high part.  */
1927
  if (GET_CODE (x) == HIGH)
1928
    return true;
1929
 
1930
  /* As an optimization, reject constants that mips_legitimize_move
1931
     can expand inline.
1932
 
1933
     Suppose we have a multi-instruction sequence that loads constant C
1934
     into register R.  If R does not get allocated a hard register, and
1935
     R is used in an operand that allows both registers and memory
1936
     references, reload will consider forcing C into memory and using
1937
     one of the instruction's memory alternatives.  Returning false
1938
     here will force it to use an input reload instead.  */
1939
  if (CONST_INT_P (x) && mips_legitimate_constant_p (mode, x))
1940
    return true;
1941
 
1942
  split_const (x, &base, &offset);
1943
  if (mips_symbolic_constant_p (base, SYMBOL_CONTEXT_LEA, &type))
1944
    {
1945
      /* See whether we explicitly want these symbols in the pool.  */
1946
      if (mips_use_pcrel_pool_p[(int) type])
1947
        return false;
1948
 
1949
      /* The same optimization as for CONST_INT.  */
1950
      if (SMALL_INT (offset) && mips_symbol_insns (type, MAX_MACHINE_MODE) > 0)
1951
        return true;
1952
 
1953
      /* If MIPS16 constant pools live in the text section, they should
1954
         not refer to anything that might need run-time relocation.  */
1955
      if (TARGET_MIPS16_PCREL_LOADS && mips_got_symbol_type_p (type))
1956
        return true;
1957
    }
1958
 
1959
  /* TLS symbols must be computed by mips_legitimize_move.  */
1960
  if (for_each_rtx (&x, &mips_tls_symbol_ref_1, NULL))
1961
    return true;
1962
 
1963
  return false;
1964
}
1965
 
1966
/* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P.  We can't use blocks for
1967
   constants when we're using a per-function constant pool.  */
1968
 
1969
static bool
1970
mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1971
                                const_rtx x ATTRIBUTE_UNUSED)
1972
{
1973
  return !TARGET_MIPS16_PCREL_LOADS;
1974
}
1975
 
1976
/* Return true if register REGNO is a valid base register for mode MODE.
1977
   STRICT_P is true if REG_OK_STRICT is in effect.  */
1978
 
1979
int
1980
mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode,
1981
                               bool strict_p)
1982
{
1983
  if (!HARD_REGISTER_NUM_P (regno))
1984
    {
1985
      if (!strict_p)
1986
        return true;
1987
      regno = reg_renumber[regno];
1988
    }
1989
 
1990
  /* These fake registers will be eliminated to either the stack or
1991
     hard frame pointer, both of which are usually valid base registers.
1992
     Reload deals with the cases where the eliminated form isn't valid.  */
1993
  if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1994
    return true;
1995
 
1996
  /* In MIPS16 mode, the stack pointer can only address word and doubleword
1997
     values, nothing smaller.  There are two problems here:
1998
 
1999
       (a) Instantiating virtual registers can introduce new uses of the
2000
           stack pointer.  If these virtual registers are valid addresses,
2001
           the stack pointer should be too.
2002
 
2003
       (b) Most uses of the stack pointer are not made explicit until
2004
           FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
2005
           We don't know until that stage whether we'll be eliminating to the
2006
           stack pointer (which needs the restriction) or the hard frame
2007
           pointer (which doesn't).
2008
 
2009
     All in all, it seems more consistent to only enforce this restriction
2010
     during and after reload.  */
2011
  if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
2012
    return !strict_p || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
2013
 
2014
  return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
2015
}
2016
 
2017
/* Return true if X is a valid base register for mode MODE.
2018
   STRICT_P is true if REG_OK_STRICT is in effect.  */
2019
 
2020
static bool
2021
mips_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
2022
{
2023
  if (!strict_p && GET_CODE (x) == SUBREG)
2024
    x = SUBREG_REG (x);
2025
 
2026
  return (REG_P (x)
2027
          && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
2028
}
2029
 
2030
/* Return true if, for every base register BASE_REG, (plus BASE_REG X)
2031
   can address a value of mode MODE.  */
2032
 
2033
static bool
2034
mips_valid_offset_p (rtx x, enum machine_mode mode)
2035
{
2036
  /* Check that X is a signed 16-bit number.  */
2037
  if (!const_arith_operand (x, Pmode))
2038
    return false;
2039
 
2040
  /* We may need to split multiword moves, so make sure that every word
2041
     is accessible.  */
2042
  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
2043
      && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
2044
    return false;
2045
 
2046
  return true;
2047
}
2048
 
2049
/* Return true if a LO_SUM can address a value of mode MODE when the
2050
   LO_SUM symbol has type SYMBOL_TYPE.  */
2051
 
2052
static bool
2053
mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, enum machine_mode mode)
2054
{
2055
  /* Check that symbols of type SYMBOL_TYPE can be used to access values
2056
     of mode MODE.  */
2057
  if (mips_symbol_insns (symbol_type, mode) == 0)
2058
    return false;
2059
 
2060
  /* Check that there is a known low-part relocation.  */
2061
  if (mips_lo_relocs[symbol_type] == NULL)
2062
    return false;
2063
 
2064
  /* We may need to split multiword moves, so make sure that each word
2065
     can be accessed without inducing a carry.  This is mainly needed
2066
     for o64, which has historically only guaranteed 64-bit alignment
2067
     for 128-bit types.  */
2068
  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
2069
      && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
2070
    return false;
2071
 
2072
  return true;
2073
}
2074
 
2075
/* Return true if X is a valid address for machine mode MODE.  If it is,
2076
   fill in INFO appropriately.  STRICT_P is true if REG_OK_STRICT is in
2077
   effect.  */
2078
 
2079
static bool
2080
mips_classify_address (struct mips_address_info *info, rtx x,
2081
                       enum machine_mode mode, bool strict_p)
2082
{
2083
  switch (GET_CODE (x))
2084
    {
2085
    case REG:
2086
    case SUBREG:
2087
      info->type = ADDRESS_REG;
2088
      info->reg = x;
2089
      info->offset = const0_rtx;
2090
      return mips_valid_base_register_p (info->reg, mode, strict_p);
2091
 
2092
    case PLUS:
2093
      info->type = ADDRESS_REG;
2094
      info->reg = XEXP (x, 0);
2095
      info->offset = XEXP (x, 1);
2096
      return (mips_valid_base_register_p (info->reg, mode, strict_p)
2097
              && mips_valid_offset_p (info->offset, mode));
2098
 
2099
    case LO_SUM:
2100
      info->type = ADDRESS_LO_SUM;
2101
      info->reg = XEXP (x, 0);
2102
      info->offset = XEXP (x, 1);
2103
      /* We have to trust the creator of the LO_SUM to do something vaguely
2104
         sane.  Target-independent code that creates a LO_SUM should also
2105
         create and verify the matching HIGH.  Target-independent code that
2106
         adds an offset to a LO_SUM must prove that the offset will not
2107
         induce a carry.  Failure to do either of these things would be
2108
         a bug, and we are not required to check for it here.  The MIPS
2109
         backend itself should only create LO_SUMs for valid symbolic
2110
         constants, with the high part being either a HIGH or a copy
2111
         of _gp. */
2112
      info->symbol_type
2113
        = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
2114
      return (mips_valid_base_register_p (info->reg, mode, strict_p)
2115
              && mips_valid_lo_sum_p (info->symbol_type, mode));
2116
 
2117
    case CONST_INT:
2118
      /* Small-integer addresses don't occur very often, but they
2119
         are legitimate if $0 is a valid base register.  */
2120
      info->type = ADDRESS_CONST_INT;
2121
      return !TARGET_MIPS16 && SMALL_INT (x);
2122
 
2123
    case CONST:
2124
    case LABEL_REF:
2125
    case SYMBOL_REF:
2126
      info->type = ADDRESS_SYMBOLIC;
2127
      return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
2128
                                        &info->symbol_type)
2129
              && mips_symbol_insns (info->symbol_type, mode) > 0
2130
              && !mips_split_p[info->symbol_type]);
2131
 
2132
    default:
2133
      return false;
2134
    }
2135
}
2136
 
2137
/* Implement TARGET_LEGITIMATE_ADDRESS_P.  */
2138
 
2139
static bool
2140
mips_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2141
{
2142
  struct mips_address_info addr;
2143
 
2144
  return mips_classify_address (&addr, x, mode, strict_p);
2145
}
2146
 
2147
/* Return true if X is a legitimate $sp-based address for mode MDOE.  */
2148
 
2149
bool
2150
mips_stack_address_p (rtx x, enum machine_mode mode)
2151
{
2152
  struct mips_address_info addr;
2153
 
2154
  return (mips_classify_address (&addr, x, mode, false)
2155
          && addr.type == ADDRESS_REG
2156
          && addr.reg == stack_pointer_rtx);
2157
}
2158
 
2159
/* Return true if ADDR matches the pattern for the LWXS load scaled indexed
2160
   address instruction.  Note that such addresses are not considered
2161
   legitimate in the TARGET_LEGITIMATE_ADDRESS_P sense, because their use
2162
   is so restricted.  */
2163
 
2164
static bool
2165
mips_lwxs_address_p (rtx addr)
2166
{
2167
  if (ISA_HAS_LWXS
2168
      && GET_CODE (addr) == PLUS
2169
      && REG_P (XEXP (addr, 1)))
2170
    {
2171
      rtx offset = XEXP (addr, 0);
2172
      if (GET_CODE (offset) == MULT
2173
          && REG_P (XEXP (offset, 0))
2174
          && CONST_INT_P (XEXP (offset, 1))
2175
          && INTVAL (XEXP (offset, 1)) == 4)
2176
        return true;
2177
    }
2178
  return false;
2179
}
2180
 
2181
/* Return true if ADDR matches the pattern for the L{B,H,W,D}{,U}X load
2182
   indexed address instruction.  Note that such addresses are
2183
   not considered legitimate in the TARGET_LEGITIMATE_ADDRESS_P
2184
   sense, because their use is so restricted.  */
2185
 
2186
static bool
2187
mips_lx_address_p (rtx addr, enum machine_mode mode)
2188
{
2189
  if (GET_CODE (addr) != PLUS
2190
      || !REG_P (XEXP (addr, 0))
2191
      || !REG_P (XEXP (addr, 1)))
2192
    return false;
2193
  if (ISA_HAS_LBX && mode == QImode)
2194
    return true;
2195
  if (ISA_HAS_LHX && mode == HImode)
2196
    return true;
2197
  if (ISA_HAS_LWX && mode == SImode)
2198
    return true;
2199
  if (ISA_HAS_LDX && mode == DImode)
2200
    return true;
2201
  return false;
2202
}
2203
 
2204
/* Return true if a value at OFFSET bytes from base register BASE can be
2205
   accessed using an unextended MIPS16 instruction.  MODE is the mode of
2206
   the value.
2207
 
2208
   Usually the offset in an unextended instruction is a 5-bit field.
2209
   The offset is unsigned and shifted left once for LH and SH, twice
2210
   for LW and SW, and so on.  An exception is LWSP and SWSP, which have
2211
   an 8-bit immediate field that's shifted left twice.  */
2212
 
2213
static bool
2214
mips16_unextended_reference_p (enum machine_mode mode, rtx base,
2215
                               unsigned HOST_WIDE_INT offset)
2216
{
2217
  if (mode != BLKmode && offset % GET_MODE_SIZE (mode) == 0)
2218
    {
2219
      if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
2220
        return offset < 256U * GET_MODE_SIZE (mode);
2221
      return offset < 32U * GET_MODE_SIZE (mode);
2222
    }
2223
  return false;
2224
}
2225
 
2226
/* Return the number of instructions needed to load or store a value
2227
   of mode MODE at address X.  Return 0 if X isn't valid for MODE.
2228
   Assume that multiword moves may need to be split into word moves
2229
   if MIGHT_SPLIT_P, otherwise assume that a single load or store is
2230
   enough.
2231
 
2232
   For MIPS16 code, count extended instructions as two instructions.  */
2233
 
2234
int
2235
mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2236
{
2237
  struct mips_address_info addr;
2238
  int factor;
2239
 
2240
  /* BLKmode is used for single unaligned loads and stores and should
2241
     not count as a multiword mode.  (GET_MODE_SIZE (BLKmode) is pretty
2242
     meaningless, so we have to single it out as a special case one way
2243
     or the other.)  */
2244
  if (mode != BLKmode && might_split_p)
2245
    factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2246
  else
2247
    factor = 1;
2248
 
2249
  if (mips_classify_address (&addr, x, mode, false))
2250
    switch (addr.type)
2251
      {
2252
      case ADDRESS_REG:
2253
        if (TARGET_MIPS16
2254
            && !mips16_unextended_reference_p (mode, addr.reg,
2255
                                               UINTVAL (addr.offset)))
2256
          return factor * 2;
2257
        return factor;
2258
 
2259
      case ADDRESS_LO_SUM:
2260
        return TARGET_MIPS16 ? factor * 2 : factor;
2261
 
2262
      case ADDRESS_CONST_INT:
2263
        return factor;
2264
 
2265
      case ADDRESS_SYMBOLIC:
2266
        return factor * mips_symbol_insns (addr.symbol_type, mode);
2267
      }
2268
  return 0;
2269
}
2270
 
2271
/* Return the number of instructions needed to load constant X.
2272
   Return 0 if X isn't a valid constant.  */
2273
 
2274
int
2275
mips_const_insns (rtx x)
2276
{
2277
  struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2278
  enum mips_symbol_type symbol_type;
2279
  rtx offset;
2280
 
2281
  switch (GET_CODE (x))
2282
    {
2283
    case HIGH:
2284
      if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2285
                                     &symbol_type)
2286
          || !mips_split_p[symbol_type])
2287
        return 0;
2288
 
2289
      /* This is simply an LUI for normal mode.  It is an extended
2290
         LI followed by an extended SLL for MIPS16.  */
2291
      return TARGET_MIPS16 ? 4 : 1;
2292
 
2293
    case CONST_INT:
2294
      if (TARGET_MIPS16)
2295
        /* Unsigned 8-bit constants can be loaded using an unextended
2296
           LI instruction.  Unsigned 16-bit constants can be loaded
2297
           using an extended LI.  Negative constants must be loaded
2298
           using LI and then negated.  */
2299
        return (IN_RANGE (INTVAL (x), 0, 255) ? 1
2300
                : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2301
                : IN_RANGE (-INTVAL (x), 0, 255) ? 2
2302
                : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2303
                : 0);
2304
 
2305
      return mips_build_integer (codes, INTVAL (x));
2306
 
2307
    case CONST_DOUBLE:
2308
    case CONST_VECTOR:
2309
      /* Allow zeros for normal mode, where we can use $0.  */
2310
      return !TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
2311
 
2312
    case CONST:
2313
      if (CONST_GP_P (x))
2314
        return 1;
2315
 
2316
      /* See if we can refer to X directly.  */
2317
      if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2318
        return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2319
 
2320
      /* Otherwise try splitting the constant into a base and offset.
2321
         If the offset is a 16-bit value, we can load the base address
2322
         into a register and then use (D)ADDIU to add in the offset.
2323
         If the offset is larger, we can load the base and offset
2324
         into separate registers and add them together with (D)ADDU.
2325
         However, the latter is only possible before reload; during
2326
         and after reload, we must have the option of forcing the
2327
         constant into the pool instead.  */
2328
      split_const (x, &x, &offset);
2329
      if (offset != 0)
2330
        {
2331
          int n = mips_const_insns (x);
2332
          if (n != 0)
2333
            {
2334
              if (SMALL_INT (offset))
2335
                return n + 1;
2336
              else if (!targetm.cannot_force_const_mem (GET_MODE (x), x))
2337
                return n + 1 + mips_build_integer (codes, INTVAL (offset));
2338
            }
2339
        }
2340
      return 0;
2341
 
2342
    case SYMBOL_REF:
2343
    case LABEL_REF:
2344
      return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2345
                                MAX_MACHINE_MODE);
2346
 
2347
    default:
2348
      return 0;
2349
    }
2350
}
2351
 
2352
/* X is a doubleword constant that can be handled by splitting it into
2353
   two words and loading each word separately.  Return the number of
2354
   instructions required to do this.  */
2355
 
2356
int
2357
mips_split_const_insns (rtx x)
2358
{
2359
  unsigned int low, high;
2360
 
2361
  low = mips_const_insns (mips_subword (x, false));
2362
  high = mips_const_insns (mips_subword (x, true));
2363
  gcc_assert (low > 0 && high > 0);
2364
  return low + high;
2365
}
2366
 
2367
/* Return the number of instructions needed to implement INSN,
2368
   given that it loads from or stores to MEM.  Count extended
2369
   MIPS16 instructions as two instructions.  */
2370
 
2371
int
2372
mips_load_store_insns (rtx mem, rtx insn)
2373
{
2374
  enum machine_mode mode;
2375
  bool might_split_p;
2376
  rtx set;
2377
 
2378
  gcc_assert (MEM_P (mem));
2379
  mode = GET_MODE (mem);
2380
 
2381
  /* Try to prove that INSN does not need to be split.  */
2382
  might_split_p = true;
2383
  if (GET_MODE_BITSIZE (mode) == 64)
2384
    {
2385
      set = single_set (insn);
2386
      if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2387
        might_split_p = false;
2388
    }
2389
 
2390
  return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2391
}
2392
 
2393
/* Return the number of instructions needed for an integer division.  */
2394
 
2395
int
2396
mips_idiv_insns (void)
2397
{
2398
  int count;
2399
 
2400
  count = 1;
2401
  if (TARGET_CHECK_ZERO_DIV)
2402
    {
2403
      if (GENERATE_DIVIDE_TRAPS)
2404
        count++;
2405
      else
2406
        count += 2;
2407
    }
2408
 
2409
  if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2410
    count++;
2411
  return count;
2412
}
2413
 
2414
/* Emit a move from SRC to DEST.  Assume that the move expanders can
2415
   handle all moves if !can_create_pseudo_p ().  The distinction is
2416
   important because, unlike emit_move_insn, the move expanders know
2417
   how to force Pmode objects into the constant pool even when the
2418
   constant pool address is not itself legitimate.  */
2419
 
2420
rtx
2421
mips_emit_move (rtx dest, rtx src)
2422
{
2423
  return (can_create_pseudo_p ()
2424
          ? emit_move_insn (dest, src)
2425
          : emit_move_insn_1 (dest, src));
2426
}
2427
 
2428
/* Emit an instruction of the form (set TARGET (CODE OP0)).  */
2429
 
2430
static void
2431
mips_emit_unary (enum rtx_code code, rtx target, rtx op0)
2432
{
2433
  emit_insn (gen_rtx_SET (VOIDmode, target,
2434
                          gen_rtx_fmt_e (code, GET_MODE (op0), op0)));
2435
}
2436
 
2437
/* Compute (CODE OP0) and store the result in a new register of mode MODE.
2438
   Return that new register.  */
2439
 
2440
static rtx
2441
mips_force_unary (enum machine_mode mode, enum rtx_code code, rtx op0)
2442
{
2443
  rtx reg;
2444
 
2445
  reg = gen_reg_rtx (mode);
2446
  mips_emit_unary (code, reg, op0);
2447
  return reg;
2448
}
2449
 
2450
/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)).  */
2451
 
2452
void
2453
mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2454
{
2455
  emit_insn (gen_rtx_SET (VOIDmode, target,
2456
                          gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2457
}
2458
 
2459
/* Compute (CODE OP0 OP1) and store the result in a new register
2460
   of mode MODE.  Return that new register.  */
2461
 
2462
static rtx
2463
mips_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
2464
{
2465
  rtx reg;
2466
 
2467
  reg = gen_reg_rtx (mode);
2468
  mips_emit_binary (code, reg, op0, op1);
2469
  return reg;
2470
}
2471
 
2472
/* Copy VALUE to a register and return that register.  If new pseudos
2473
   are allowed, copy it into a new register, otherwise use DEST.  */
2474
 
2475
static rtx
2476
mips_force_temporary (rtx dest, rtx value)
2477
{
2478
  if (can_create_pseudo_p ())
2479
    return force_reg (Pmode, value);
2480
  else
2481
    {
2482
      mips_emit_move (dest, value);
2483
      return dest;
2484
    }
2485
}
2486
 
2487
/* Emit a call sequence with call pattern PATTERN and return the call
2488
   instruction itself (which is not necessarily the last instruction
2489
   emitted).  ORIG_ADDR is the original, unlegitimized address,
2490
   ADDR is the legitimized form, and LAZY_P is true if the call
2491
   address is lazily-bound.  */
2492
 
2493
static rtx
2494
mips_emit_call_insn (rtx pattern, rtx orig_addr, rtx addr, bool lazy_p)
2495
{
2496
  rtx insn, reg;
2497
 
2498
  insn = emit_call_insn (pattern);
2499
 
2500
  if (TARGET_MIPS16 && mips_use_pic_fn_addr_reg_p (orig_addr))
2501
    {
2502
      /* MIPS16 JALRs only take MIPS16 registers.  If the target
2503
         function requires $25 to be valid on entry, we must copy it
2504
         there separately.  The move instruction can be put in the
2505
         call's delay slot.  */
2506
      reg = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
2507
      emit_insn_before (gen_move_insn (reg, addr), insn);
2508
      use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
2509
    }
2510
 
2511
  if (lazy_p)
2512
    /* Lazy-binding stubs require $gp to be valid on entry.  */
2513
    use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2514
 
2515
  if (TARGET_USE_GOT)
2516
    {
2517
      /* See the comment above load_call<mode> for details.  */
2518
      use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2519
               gen_rtx_REG (Pmode, GOT_VERSION_REGNUM));
2520
      emit_insn (gen_update_got_version ());
2521
    }
2522
  return insn;
2523
}
2524
 
2525
/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
2526
   then add CONST_INT OFFSET to the result.  */
2527
 
2528
static rtx
2529
mips_unspec_address_offset (rtx base, rtx offset,
2530
                            enum mips_symbol_type symbol_type)
2531
{
2532
  base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2533
                         UNSPEC_ADDRESS_FIRST + symbol_type);
2534
  if (offset != const0_rtx)
2535
    base = gen_rtx_PLUS (Pmode, base, offset);
2536
  return gen_rtx_CONST (Pmode, base);
2537
}
2538
 
2539
/* Return an UNSPEC address with underlying address ADDRESS and symbol
2540
   type SYMBOL_TYPE.  */
2541
 
2542
rtx
2543
mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2544
{
2545
  rtx base, offset;
2546
 
2547
  split_const (address, &base, &offset);
2548
  return mips_unspec_address_offset (base, offset, symbol_type);
2549
}
2550
 
2551
/* If OP is an UNSPEC address, return the address to which it refers,
2552
   otherwise return OP itself.  */
2553
 
2554
static rtx
2555
mips_strip_unspec_address (rtx op)
2556
{
2557
  rtx base, offset;
2558
 
2559
  split_const (op, &base, &offset);
2560
  if (UNSPEC_ADDRESS_P (base))
2561
    op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
2562
  return op;
2563
}
2564
 
2565
/* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2566
   high part to BASE and return the result.  Just return BASE otherwise.
2567
   TEMP is as for mips_force_temporary.
2568
 
2569
   The returned expression can be used as the first operand to a LO_SUM.  */
2570
 
2571
static rtx
2572
mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2573
                         enum mips_symbol_type symbol_type)
2574
{
2575
  if (mips_split_p[symbol_type])
2576
    {
2577
      addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2578
      addr = mips_force_temporary (temp, addr);
2579
      base = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2580
    }
2581
  return base;
2582
}
2583
 
2584
/* Return an instruction that copies $gp into register REG.  We want
2585
   GCC to treat the register's value as constant, so that its value
2586
   can be rematerialized on demand.  */
2587
 
2588
static rtx
2589
gen_load_const_gp (rtx reg)
2590
{
2591
  return PMODE_INSN (gen_load_const_gp, (reg));
2592
}
2593
 
2594
/* Return a pseudo register that contains the value of $gp throughout
2595
   the current function.  Such registers are needed by MIPS16 functions,
2596
   for which $gp itself is not a valid base register or addition operand.  */
2597
 
2598
static rtx
2599
mips16_gp_pseudo_reg (void)
2600
{
2601
  if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
2602
    {
2603
      rtx insn, scan;
2604
 
2605
      cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
2606
 
2607
      push_topmost_sequence ();
2608
 
2609
      scan = get_insns ();
2610
      while (NEXT_INSN (scan) && !INSN_P (NEXT_INSN (scan)))
2611
        scan = NEXT_INSN (scan);
2612
 
2613
      insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
2614
      emit_insn_after (insn, scan);
2615
 
2616
      pop_topmost_sequence ();
2617
    }
2618
 
2619
  return cfun->machine->mips16_gp_pseudo_rtx;
2620
}
2621
 
2622
/* Return a base register that holds pic_offset_table_rtx.
2623
   TEMP, if nonnull, is a scratch Pmode base register.  */
2624
 
2625
rtx
2626
mips_pic_base_register (rtx temp)
2627
{
2628
  if (!TARGET_MIPS16)
2629
    return pic_offset_table_rtx;
2630
 
2631
  if (currently_expanding_to_rtl)
2632
    return mips16_gp_pseudo_reg ();
2633
 
2634
  if (can_create_pseudo_p ())
2635
    temp = gen_reg_rtx (Pmode);
2636
 
2637
  if (TARGET_USE_GOT)
2638
    /* The first post-reload split exposes all references to $gp
2639
       (both uses and definitions).  All references must remain
2640
       explicit after that point.
2641
 
2642
       It is safe to introduce uses of $gp at any time, so for
2643
       simplicity, we do that before the split too.  */
2644
    mips_emit_move (temp, pic_offset_table_rtx);
2645
  else
2646
    emit_insn (gen_load_const_gp (temp));
2647
  return temp;
2648
}
2649
 
2650
/* Return the RHS of a load_call<mode> insn.  */
2651
 
2652
static rtx
2653
mips_unspec_call (rtx reg, rtx symbol)
2654
{
2655
  rtvec vec;
2656
 
2657
  vec = gen_rtvec (3, reg, symbol, gen_rtx_REG (SImode, GOT_VERSION_REGNUM));
2658
  return gen_rtx_UNSPEC (Pmode, vec, UNSPEC_LOAD_CALL);
2659
}
2660
 
2661
/* If SRC is the RHS of a load_call<mode> insn, return the underlying symbol
2662
   reference.  Return NULL_RTX otherwise.  */
2663
 
2664
static rtx
2665
mips_strip_unspec_call (rtx src)
2666
{
2667
  if (GET_CODE (src) == UNSPEC && XINT (src, 1) == UNSPEC_LOAD_CALL)
2668
    return mips_strip_unspec_address (XVECEXP (src, 0, 1));
2669
  return NULL_RTX;
2670
}
2671
 
2672
/* Create and return a GOT reference of type TYPE for address ADDR.
2673
   TEMP, if nonnull, is a scratch Pmode base register.  */
2674
 
2675
rtx
2676
mips_got_load (rtx temp, rtx addr, enum mips_symbol_type type)
2677
{
2678
  rtx base, high, lo_sum_symbol;
2679
 
2680
  base = mips_pic_base_register (temp);
2681
 
2682
  /* If we used the temporary register to load $gp, we can't use
2683
     it for the high part as well.  */
2684
  if (temp != NULL && reg_overlap_mentioned_p (base, temp))
2685
    temp = NULL;
2686
 
2687
  high = mips_unspec_offset_high (temp, base, addr, type);
2688
  lo_sum_symbol = mips_unspec_address (addr, type);
2689
 
2690
  if (type == SYMBOL_GOTOFF_CALL)
2691
    return mips_unspec_call (high, lo_sum_symbol);
2692
  else
2693
    return PMODE_INSN (gen_unspec_got, (high, lo_sum_symbol));
2694
}
2695
 
2696
/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2697
   it appears in a MEM of that mode.  Return true if ADDR is a legitimate
2698
   constant in that context and can be split into high and low parts.
2699
   If so, and if LOW_OUT is nonnull, emit the high part and store the
2700
   low part in *LOW_OUT.  Leave *LOW_OUT unchanged otherwise.
2701
 
2702
   TEMP is as for mips_force_temporary and is used to load the high
2703
   part into a register.
2704
 
2705
   When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
2706
   a legitimize SET_SRC for an .md pattern, otherwise the low part
2707
   is guaranteed to be a legitimate address for mode MODE.  */
2708
 
2709
bool
2710
mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
2711
{
2712
  enum mips_symbol_context context;
2713
  enum mips_symbol_type symbol_type;
2714
  rtx high;
2715
 
2716
  context = (mode == MAX_MACHINE_MODE
2717
             ? SYMBOL_CONTEXT_LEA
2718
             : SYMBOL_CONTEXT_MEM);
2719
  if (GET_CODE (addr) == HIGH && context == SYMBOL_CONTEXT_LEA)
2720
    {
2721
      addr = XEXP (addr, 0);
2722
      if (mips_symbolic_constant_p (addr, context, &symbol_type)
2723
          && mips_symbol_insns (symbol_type, mode) > 0
2724
          && mips_split_hi_p[symbol_type])
2725
        {
2726
          if (low_out)
2727
            switch (symbol_type)
2728
              {
2729
              case SYMBOL_GOT_PAGE_OFST:
2730
                /* The high part of a page/ofst pair is loaded from the GOT.  */
2731
                *low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_PAGE);
2732
                break;
2733
 
2734
              default:
2735
                gcc_unreachable ();
2736
              }
2737
          return true;
2738
        }
2739
    }
2740
  else
2741
    {
2742
      if (mips_symbolic_constant_p (addr, context, &symbol_type)
2743
          && mips_symbol_insns (symbol_type, mode) > 0
2744
          && mips_split_p[symbol_type])
2745
        {
2746
          if (low_out)
2747
            switch (symbol_type)
2748
              {
2749
              case SYMBOL_GOT_DISP:
2750
                /* SYMBOL_GOT_DISP symbols are loaded from the GOT.  */
2751
                *low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_DISP);
2752
                break;
2753
 
2754
              case SYMBOL_GP_RELATIVE:
2755
                high = mips_pic_base_register (temp);
2756
                *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
2757
                break;
2758
 
2759
              default:
2760
                high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2761
                high = mips_force_temporary (temp, high);
2762
                *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
2763
                break;
2764
              }
2765
          return true;
2766
        }
2767
    }
2768
  return false;
2769
}
2770
 
2771
/* Return a legitimate address for REG + OFFSET.  TEMP is as for
2772
   mips_force_temporary; it is only needed when OFFSET is not a
2773
   SMALL_OPERAND.  */
2774
 
2775
static rtx
2776
mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2777
{
2778
  if (!SMALL_OPERAND (offset))
2779
    {
2780
      rtx high;
2781
 
2782
      if (TARGET_MIPS16)
2783
        {
2784
          /* Load the full offset into a register so that we can use
2785
             an unextended instruction for the address itself.  */
2786
          high = GEN_INT (offset);
2787
          offset = 0;
2788
        }
2789
      else
2790
        {
2791
          /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
2792
             The addition inside the macro CONST_HIGH_PART may cause an
2793
             overflow, so we need to force a sign-extension check.  */
2794
          high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
2795
          offset = CONST_LOW_PART (offset);
2796
        }
2797
      high = mips_force_temporary (temp, high);
2798
      reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2799
    }
2800
  return plus_constant (reg, offset);
2801
}
2802
 
2803
/* The __tls_get_attr symbol.  */
2804
static GTY(()) rtx mips_tls_symbol;
2805
 
2806
/* Return an instruction sequence that calls __tls_get_addr.  SYM is
2807
   the TLS symbol we are referencing and TYPE is the symbol type to use
2808
   (either global dynamic or local dynamic).  V0 is an RTX for the
2809
   return value location.  */
2810
 
2811
static rtx
2812
mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2813
{
2814
  rtx insn, loc, a0;
2815
 
2816
  a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2817
 
2818
  if (!mips_tls_symbol)
2819
    mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2820
 
2821
  loc = mips_unspec_address (sym, type);
2822
 
2823
  start_sequence ();
2824
 
2825
  emit_insn (gen_rtx_SET (Pmode, a0,
2826
                          gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2827
  insn = mips_expand_call (MIPS_CALL_NORMAL, v0, mips_tls_symbol,
2828
                           const0_rtx, NULL_RTX, false);
2829
  RTL_CONST_CALL_P (insn) = 1;
2830
  use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2831
  insn = get_insns ();
2832
 
2833
  end_sequence ();
2834
 
2835
  return insn;
2836
}
2837
 
2838
/* Return a pseudo register that contains the current thread pointer.  */
2839
 
2840
static rtx
2841
mips_get_tp (void)
2842
{
2843
  rtx tp, fn;
2844
 
2845
  tp = gen_reg_rtx (Pmode);
2846
  if (TARGET_MIPS16)
2847
    {
2848
      mips_need_mips16_rdhwr_p = true;
2849
      fn = mips16_stub_function ("__mips16_rdhwr");
2850
      SYMBOL_REF_FLAGS (fn) |= SYMBOL_FLAG_LOCAL;
2851
      if (!call_insn_operand (fn, VOIDmode))
2852
        fn = force_reg (Pmode, fn);
2853
      emit_insn (PMODE_INSN (gen_tls_get_tp_mips16, (tp, fn)));
2854
    }
2855
  else
2856
    emit_insn (PMODE_INSN (gen_tls_get_tp, (tp)));
2857
  return tp;
2858
}
2859
 
2860
/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
2861
   its address.  The return value will be both a valid address and a valid
2862
   SET_SRC (either a REG or a LO_SUM).  */
2863
 
2864
static rtx
2865
mips_legitimize_tls_address (rtx loc)
2866
{
2867
  rtx dest, insn, v0, tp, tmp1, tmp2, eqv, offset;
2868
  enum tls_model model;
2869
 
2870
  model = SYMBOL_REF_TLS_MODEL (loc);
2871
  /* Only TARGET_ABICALLS code can have more than one module; other
2872
     code must be be static and should not use a GOT.  All TLS models
2873
     reduce to local exec in this situation.  */
2874
  if (!TARGET_ABICALLS)
2875
    model = TLS_MODEL_LOCAL_EXEC;
2876
 
2877
  switch (model)
2878
    {
2879
    case TLS_MODEL_GLOBAL_DYNAMIC:
2880
      v0 = gen_rtx_REG (Pmode, GP_RETURN);
2881
      insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2882
      dest = gen_reg_rtx (Pmode);
2883
      emit_libcall_block (insn, dest, v0, loc);
2884
      break;
2885
 
2886
    case TLS_MODEL_LOCAL_DYNAMIC:
2887
      v0 = gen_rtx_REG (Pmode, GP_RETURN);
2888
      insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2889
      tmp1 = gen_reg_rtx (Pmode);
2890
 
2891
      /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2892
         share the LDM result with other LD model accesses.  */
2893
      eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2894
                            UNSPEC_TLS_LDM);
2895
      emit_libcall_block (insn, tmp1, v0, eqv);
2896
 
2897
      offset = mips_unspec_address (loc, SYMBOL_DTPREL);
2898
      if (mips_split_p[SYMBOL_DTPREL])
2899
        {
2900
          tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2901
          dest = gen_rtx_LO_SUM (Pmode, tmp2, offset);
2902
        }
2903
      else
2904
        dest = expand_binop (Pmode, add_optab, tmp1, offset,
2905
                             0, 0, OPTAB_DIRECT);
2906
      break;
2907
 
2908
    case TLS_MODEL_INITIAL_EXEC:
2909
      tp = mips_get_tp ();
2910
      tmp1 = gen_reg_rtx (Pmode);
2911
      tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2912
      if (Pmode == DImode)
2913
        emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2914
      else
2915
        emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2916
      dest = gen_reg_rtx (Pmode);
2917
      emit_insn (gen_add3_insn (dest, tmp1, tp));
2918
      break;
2919
 
2920
    case TLS_MODEL_LOCAL_EXEC:
2921
      tmp1 = mips_get_tp ();
2922
      offset = mips_unspec_address (loc, SYMBOL_TPREL);
2923
      if (mips_split_p[SYMBOL_TPREL])
2924
        {
2925
          tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_TPREL);
2926
          dest = gen_rtx_LO_SUM (Pmode, tmp2, offset);
2927
        }
2928
      else
2929
        dest = expand_binop (Pmode, add_optab, tmp1, offset,
2930
                             0, 0, OPTAB_DIRECT);
2931
      break;
2932
 
2933
    default:
2934
      gcc_unreachable ();
2935
    }
2936
  return dest;
2937
}
2938
 
2939
/* If X is not a valid address for mode MODE, force it into a register.  */
2940
 
2941
static rtx
2942
mips_force_address (rtx x, enum machine_mode mode)
2943
{
2944
  if (!mips_legitimate_address_p (mode, x, false))
2945
    x = force_reg (Pmode, x);
2946
  return x;
2947
}
2948
 
2949
/* This function is used to implement LEGITIMIZE_ADDRESS.  If X can
2950
   be legitimized in a way that the generic machinery might not expect,
2951
   return a new address, otherwise return NULL.  MODE is the mode of
2952
   the memory being accessed.  */
2953
 
2954
static rtx
2955
mips_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
2956
                         enum machine_mode mode)
2957
{
2958
  rtx base, addr;
2959
  HOST_WIDE_INT offset;
2960
 
2961
  if (mips_tls_symbol_p (x))
2962
    return mips_legitimize_tls_address (x);
2963
 
2964
  /* See if the address can split into a high part and a LO_SUM.  */
2965
  if (mips_split_symbol (NULL, x, mode, &addr))
2966
    return mips_force_address (addr, mode);
2967
 
2968
  /* Handle BASE + OFFSET using mips_add_offset.  */
2969
  mips_split_plus (x, &base, &offset);
2970
  if (offset != 0)
2971
    {
2972
      if (!mips_valid_base_register_p (base, mode, false))
2973
        base = copy_to_mode_reg (Pmode, base);
2974
      addr = mips_add_offset (NULL, base, offset);
2975
      return mips_force_address (addr, mode);
2976
    }
2977
 
2978
  return x;
2979
}
2980
 
2981
/* Load VALUE into DEST.  TEMP is as for mips_force_temporary.  */
2982
 
2983
void
2984
mips_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
2985
{
2986
  struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2987
  enum machine_mode mode;
2988
  unsigned int i, num_ops;
2989
  rtx x;
2990
 
2991
  mode = GET_MODE (dest);
2992
  num_ops = mips_build_integer (codes, value);
2993
 
2994
  /* Apply each binary operation to X.  Invariant: X is a legitimate
2995
     source operand for a SET pattern.  */
2996
  x = GEN_INT (codes[0].value);
2997
  for (i = 1; i < num_ops; i++)
2998
    {
2999
      if (!can_create_pseudo_p ())
3000
        {
3001
          emit_insn (gen_rtx_SET (VOIDmode, temp, x));
3002
          x = temp;
3003
        }
3004
      else
3005
        x = force_reg (mode, x);
3006
      x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
3007
    }
3008
 
3009
  emit_insn (gen_rtx_SET (VOIDmode, dest, x));
3010
}
3011
 
3012
/* Subroutine of mips_legitimize_move.  Move constant SRC into register
3013
   DEST given that SRC satisfies immediate_operand but doesn't satisfy
3014
   move_operand.  */
3015
 
3016
static void
3017
mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
3018
{
3019
  rtx base, offset;
3020
 
3021
  /* Split moves of big integers into smaller pieces.  */
3022
  if (splittable_const_int_operand (src, mode))
3023
    {
3024
      mips_move_integer (dest, dest, INTVAL (src));
3025
      return;
3026
    }
3027
 
3028
  /* Split moves of symbolic constants into high/low pairs.  */
3029
  if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
3030
    {
3031
      emit_insn (gen_rtx_SET (VOIDmode, dest, src));
3032
      return;
3033
    }
3034
 
3035
  /* Generate the appropriate access sequences for TLS symbols.  */
3036
  if (mips_tls_symbol_p (src))
3037
    {
3038
      mips_emit_move (dest, mips_legitimize_tls_address (src));
3039
      return;
3040
    }
3041
 
3042
  /* If we have (const (plus symbol offset)), and that expression cannot
3043
     be forced into memory, load the symbol first and add in the offset.
3044
     In non-MIPS16 mode, prefer to do this even if the constant _can_ be
3045
     forced into memory, as it usually produces better code.  */
3046
  split_const (src, &base, &offset);
3047
  if (offset != const0_rtx
3048
      && (targetm.cannot_force_const_mem (mode, src)
3049
          || (!TARGET_MIPS16 && can_create_pseudo_p ())))
3050
    {
3051
      base = mips_force_temporary (dest, base);
3052
      mips_emit_move (dest, mips_add_offset (NULL, base, INTVAL (offset)));
3053
      return;
3054
    }
3055
 
3056
  src = force_const_mem (mode, src);
3057
 
3058
  /* When using explicit relocs, constant pool references are sometimes
3059
     not legitimate addresses.  */
3060
  mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
3061
  mips_emit_move (dest, src);
3062
}
3063
 
3064
/* If (set DEST SRC) is not a valid move instruction, emit an equivalent
3065
   sequence that is valid.  */
3066
 
3067
bool
3068
mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
3069
{
3070
  if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
3071
    {
3072
      mips_emit_move (dest, force_reg (mode, src));
3073
      return true;
3074
    }
3075
 
3076
  /* We need to deal with constants that would be legitimate
3077
     immediate_operands but aren't legitimate move_operands.  */
3078
  if (CONSTANT_P (src) && !move_operand (src, mode))
3079
    {
3080
      mips_legitimize_const_move (mode, dest, src);
3081
      set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
3082
      return true;
3083
    }
3084
  return false;
3085
}
3086
 
3087
/* Return true if value X in context CONTEXT is a small-data address
3088
   that can be rewritten as a LO_SUM.  */
3089
 
3090
static bool
3091
mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
3092
{
3093
  enum mips_symbol_type symbol_type;
3094
 
3095
  return (mips_lo_relocs[SYMBOL_GP_RELATIVE]
3096
          && !mips_split_p[SYMBOL_GP_RELATIVE]
3097
          && mips_symbolic_constant_p (x, context, &symbol_type)
3098
          && symbol_type == SYMBOL_GP_RELATIVE);
3099
}
3100
 
3101
/* A for_each_rtx callback for mips_small_data_pattern_p.  DATA is the
3102
   containing MEM, or null if none.  */
3103
 
3104
static int
3105
mips_small_data_pattern_1 (rtx *loc, void *data)
3106
{
3107
  enum mips_symbol_context context;
3108
 
3109
  /* Ignore things like "g" constraints in asms.  We make no particular
3110
     guarantee about which symbolic constants are acceptable as asm operands
3111
     versus which must be forced into a GPR.  */
3112
  if (GET_CODE (*loc) == LO_SUM || GET_CODE (*loc) == ASM_OPERANDS)
3113
    return -1;
3114
 
3115
  if (MEM_P (*loc))
3116
    {
3117
      if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
3118
        return 1;
3119
      return -1;
3120
    }
3121
 
3122
  context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
3123
  return mips_rewrite_small_data_p (*loc, context);
3124
}
3125
 
3126
/* Return true if OP refers to small data symbols directly, not through
3127
   a LO_SUM.  */
3128
 
3129
bool
3130
mips_small_data_pattern_p (rtx op)
3131
{
3132
  return for_each_rtx (&op, mips_small_data_pattern_1, NULL);
3133
}
3134
 
3135
/* A for_each_rtx callback, used by mips_rewrite_small_data.
3136
   DATA is the containing MEM, or null if none.  */
3137
 
3138
static int
3139
mips_rewrite_small_data_1 (rtx *loc, void *data)
3140
{
3141
  enum mips_symbol_context context;
3142
 
3143
  if (MEM_P (*loc))
3144
    {
3145
      for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
3146
      return -1;
3147
    }
3148
 
3149
  context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
3150
  if (mips_rewrite_small_data_p (*loc, context))
3151
    *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
3152
 
3153
  if (GET_CODE (*loc) == LO_SUM)
3154
    return -1;
3155
 
3156
  return 0;
3157
}
3158
 
3159
/* Rewrite instruction pattern PATTERN so that it refers to small data
3160
   using explicit relocations.  */
3161
 
3162
rtx
3163
mips_rewrite_small_data (rtx pattern)
3164
{
3165
  pattern = copy_insn (pattern);
3166
  for_each_rtx (&pattern, mips_rewrite_small_data_1, NULL);
3167
  return pattern;
3168
}
3169
 
3170
/* We need a lot of little routines to check the range of MIPS16 immediate
3171
   operands.  */
3172
 
3173
static int
3174
m16_check_op (rtx op, int low, int high, int mask)
3175
{
3176
  return (CONST_INT_P (op)
3177
          && IN_RANGE (INTVAL (op), low, high)
3178
          && (INTVAL (op) & mask) == 0);
3179
}
3180
 
3181
int
3182
m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3183
{
3184
  return m16_check_op (op, 0x1, 0x8, 0);
3185
}
3186
 
3187
int
3188
m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3189
{
3190
  return m16_check_op (op, -0x8, 0x7, 0);
3191
}
3192
 
3193
int
3194
m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3195
{
3196
  return m16_check_op (op, -0x7, 0x8, 0);
3197
}
3198
 
3199
int
3200
m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3201
{
3202
  return m16_check_op (op, -0x10, 0xf, 0);
3203
}
3204
 
3205
int
3206
m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3207
{
3208
  return m16_check_op (op, -0xf, 0x10, 0);
3209
}
3210
 
3211
int
3212
m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3213
{
3214
  return m16_check_op (op, -0x10 << 2, 0xf << 2, 3);
3215
}
3216
 
3217
int
3218
m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3219
{
3220
  return m16_check_op (op, -0xf << 2, 0x10 << 2, 3);
3221
}
3222
 
3223
int
3224
m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3225
{
3226
  return m16_check_op (op, -0x80, 0x7f, 0);
3227
}
3228
 
3229
int
3230
m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3231
{
3232
  return m16_check_op (op, -0x7f, 0x80, 0);
3233
}
3234
 
3235
int
3236
m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3237
{
3238
  return m16_check_op (op, 0x0, 0xff, 0);
3239
}
3240
 
3241
int
3242
m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3243
{
3244
  return m16_check_op (op, -0xff, 0x0, 0);
3245
}
3246
 
3247
int
3248
m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3249
{
3250
  return m16_check_op (op, -0x1, 0xfe, 0);
3251
}
3252
 
3253
int
3254
m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3255
{
3256
  return m16_check_op (op, 0x0, 0xff << 2, 3);
3257
}
3258
 
3259
int
3260
m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3261
{
3262
  return m16_check_op (op, -0xff << 2, 0x0, 3);
3263
}
3264
 
3265
int
3266
m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3267
{
3268
  return m16_check_op (op, -0x80 << 3, 0x7f << 3, 7);
3269
}
3270
 
3271
int
3272
m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
3273
{
3274
  return m16_check_op (op, -0x7f << 3, 0x80 << 3, 7);
3275
}
3276
 
3277
/* The cost of loading values from the constant pool.  It should be
3278
   larger than the cost of any constant we want to synthesize inline.  */
3279
#define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
3280
 
3281
/* Return the cost of X when used as an operand to the MIPS16 instruction
3282
   that implements CODE.  Return -1 if there is no such instruction, or if
3283
   X is not a valid immediate operand for it.  */
3284
 
3285
static int
3286
mips16_constant_cost (int code, HOST_WIDE_INT x)
3287
{
3288
  switch (code)
3289
    {
3290
    case ASHIFT:
3291
    case ASHIFTRT:
3292
    case LSHIFTRT:
3293
      /* Shifts by between 1 and 8 bits (inclusive) are unextended,
3294
         other shifts are extended.  The shift patterns truncate the shift
3295
         count to the right size, so there are no out-of-range values.  */
3296
      if (IN_RANGE (x, 1, 8))
3297
        return 0;
3298
      return COSTS_N_INSNS (1);
3299
 
3300
    case PLUS:
3301
      if (IN_RANGE (x, -128, 127))
3302
        return 0;
3303
      if (SMALL_OPERAND (x))
3304
        return COSTS_N_INSNS (1);
3305
      return -1;
3306
 
3307
    case LEU:
3308
      /* Like LE, but reject the always-true case.  */
3309
      if (x == -1)
3310
        return -1;
3311
    case LE:
3312
      /* We add 1 to the immediate and use SLT.  */
3313
      x += 1;
3314
    case XOR:
3315
      /* We can use CMPI for an xor with an unsigned 16-bit X.  */
3316
    case LT:
3317
    case LTU:
3318
      if (IN_RANGE (x, 0, 255))
3319
        return 0;
3320
      if (SMALL_OPERAND_UNSIGNED (x))
3321
        return COSTS_N_INSNS (1);
3322
      return -1;
3323
 
3324
    case EQ:
3325
    case NE:
3326
      /* Equality comparisons with 0 are cheap.  */
3327
      if (x == 0)
3328
        return 0;
3329
      return -1;
3330
 
3331
    default:
3332
      return -1;
3333
    }
3334
}
3335
 
3336
/* Return true if there is a non-MIPS16 instruction that implements CODE
3337
   and if that instruction accepts X as an immediate operand.  */
3338
 
3339
static int
3340
mips_immediate_operand_p (int code, HOST_WIDE_INT x)
3341
{
3342
  switch (code)
3343
    {
3344
    case ASHIFT:
3345
    case ASHIFTRT:
3346
    case LSHIFTRT:
3347
      /* All shift counts are truncated to a valid constant.  */
3348
      return true;
3349
 
3350
    case ROTATE:
3351
    case ROTATERT:
3352
      /* Likewise rotates, if the target supports rotates at all.  */
3353
      return ISA_HAS_ROR;
3354
 
3355
    case AND:
3356
    case IOR:
3357
    case XOR:
3358
      /* These instructions take 16-bit unsigned immediates.  */
3359
      return SMALL_OPERAND_UNSIGNED (x);
3360
 
3361
    case PLUS:
3362
    case LT:
3363
    case LTU:
3364
      /* These instructions take 16-bit signed immediates.  */
3365
      return SMALL_OPERAND (x);
3366
 
3367
    case EQ:
3368
    case NE:
3369
    case GT:
3370
    case GTU:
3371
      /* The "immediate" forms of these instructions are really
3372
         implemented as comparisons with register 0.  */
3373
      return x == 0;
3374
 
3375
    case GE:
3376
    case GEU:
3377
      /* Likewise, meaning that the only valid immediate operand is 1.  */
3378
      return x == 1;
3379
 
3380
    case LE:
3381
      /* We add 1 to the immediate and use SLT.  */
3382
      return SMALL_OPERAND (x + 1);
3383
 
3384
    case LEU:
3385
      /* Likewise SLTU, but reject the always-true case.  */
3386
      return SMALL_OPERAND (x + 1) && x + 1 != 0;
3387
 
3388
    case SIGN_EXTRACT:
3389
    case ZERO_EXTRACT:
3390
      /* The bit position and size are immediate operands.  */
3391
      return ISA_HAS_EXT_INS;
3392
 
3393
    default:
3394
      /* By default assume that $0 can be used for 0.  */
3395
      return x == 0;
3396
    }
3397
}
3398
 
3399
/* Return the cost of binary operation X, given that the instruction
3400
   sequence for a word-sized or smaller operation has cost SINGLE_COST
3401
   and that the sequence of a double-word operation has cost DOUBLE_COST.
3402
   If SPEED is true, optimize for speed otherwise optimize for size.  */
3403
 
3404
static int
3405
mips_binary_cost (rtx x, int single_cost, int double_cost, bool speed)
3406
{
3407
  int cost;
3408
 
3409
  if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
3410
    cost = double_cost;
3411
  else
3412
    cost = single_cost;
3413
  return (cost
3414
          + set_src_cost (XEXP (x, 0), speed)
3415
          + rtx_cost (XEXP (x, 1), GET_CODE (x), 1, speed));
3416
}
3417
 
3418
/* Return the cost of floating-point multiplications of mode MODE.  */
3419
 
3420
static int
3421
mips_fp_mult_cost (enum machine_mode mode)
3422
{
3423
  return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
3424
}
3425
 
3426
/* Return the cost of floating-point divisions of mode MODE.  */
3427
 
3428
static int
3429
mips_fp_div_cost (enum machine_mode mode)
3430
{
3431
  return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
3432
}
3433
 
3434
/* Return the cost of sign-extending OP to mode MODE, not including the
3435
   cost of OP itself.  */
3436
 
3437
static int
3438
mips_sign_extend_cost (enum machine_mode mode, rtx op)
3439
{
3440
  if (MEM_P (op))
3441
    /* Extended loads are as cheap as unextended ones.  */
3442
    return 0;
3443
 
3444
  if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3445
    /* A sign extension from SImode to DImode in 64-bit mode is free.  */
3446
    return 0;
3447
 
3448
  if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3449
    /* We can use SEB or SEH.  */
3450
    return COSTS_N_INSNS (1);
3451
 
3452
  /* We need to use a shift left and a shift right.  */
3453
  return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3454
}
3455
 
3456
/* Return the cost of zero-extending OP to mode MODE, not including the
3457
   cost of OP itself.  */
3458
 
3459
static int
3460
mips_zero_extend_cost (enum machine_mode mode, rtx op)
3461
{
3462
  if (MEM_P (op))
3463
    /* Extended loads are as cheap as unextended ones.  */
3464
    return 0;
3465
 
3466
  if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3467
    /* We need a shift left by 32 bits and a shift right by 32 bits.  */
3468
    return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3469
 
3470
  if (GENERATE_MIPS16E)
3471
    /* We can use ZEB or ZEH.  */
3472
    return COSTS_N_INSNS (1);
3473
 
3474
  if (TARGET_MIPS16)
3475
    /* We need to load 0xff or 0xffff into a register and use AND.  */
3476
    return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3477
 
3478
  /* We can use ANDI.  */
3479
  return COSTS_N_INSNS (1);
3480
}
3481
 
3482
/* Implement TARGET_RTX_COSTS.  */
3483
 
3484
static bool
3485
mips_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
3486
                int *total, bool speed)
3487
{
3488
  enum machine_mode mode = GET_MODE (x);
3489
  bool float_mode_p = FLOAT_MODE_P (mode);
3490
  int cost;
3491
  rtx addr;
3492
 
3493
  /* The cost of a COMPARE is hard to define for MIPS.  COMPAREs don't
3494
     appear in the instruction stream, and the cost of a comparison is
3495
     really the cost of the branch or scc condition.  At the time of
3496
     writing, GCC only uses an explicit outer COMPARE code when optabs
3497
     is testing whether a constant is expensive enough to force into a
3498
     register.  We want optabs to pass such constants through the MIPS
3499
     expanders instead, so make all constants very cheap here.  */
3500
  if (outer_code == COMPARE)
3501
    {
3502
      gcc_assert (CONSTANT_P (x));
3503
      *total = 0;
3504
      return true;
3505
    }
3506
 
3507
  switch (code)
3508
    {
3509
    case CONST_INT:
3510
      /* Treat *clear_upper32-style ANDs as having zero cost in the
3511
         second operand.  The cost is entirely in the first operand.
3512
 
3513
         ??? This is needed because we would otherwise try to CSE
3514
         the constant operand.  Although that's the right thing for
3515
         instructions that continue to be a register operation throughout
3516
         compilation, it is disastrous for instructions that could
3517
         later be converted into a memory operation.  */
3518
      if (TARGET_64BIT
3519
          && outer_code == AND
3520
          && UINTVAL (x) == 0xffffffff)
3521
        {
3522
          *total = 0;
3523
          return true;
3524
        }
3525
 
3526
      if (TARGET_MIPS16)
3527
        {
3528
          cost = mips16_constant_cost (outer_code, INTVAL (x));
3529
          if (cost >= 0)
3530
            {
3531
              *total = cost;
3532
              return true;
3533
            }
3534
        }
3535
      else
3536
        {
3537
          /* When not optimizing for size, we care more about the cost
3538
             of hot code, and hot code is often in a loop.  If a constant
3539
             operand needs to be forced into a register, we will often be
3540
             able to hoist the constant load out of the loop, so the load
3541
             should not contribute to the cost.  */
3542
          if (speed || mips_immediate_operand_p (outer_code, INTVAL (x)))
3543
            {
3544
              *total = 0;
3545
              return true;
3546
            }
3547
        }
3548
      /* Fall through.  */
3549
 
3550
    case CONST:
3551
    case SYMBOL_REF:
3552
    case LABEL_REF:
3553
    case CONST_DOUBLE:
3554
      if (force_to_mem_operand (x, VOIDmode))
3555
        {
3556
          *total = COSTS_N_INSNS (1);
3557
          return true;
3558
        }
3559
      cost = mips_const_insns (x);
3560
      if (cost > 0)
3561
        {
3562
          /* If the constant is likely to be stored in a GPR, SETs of
3563
             single-insn constants are as cheap as register sets; we
3564
             never want to CSE them.
3565
 
3566
             Don't reduce the cost of storing a floating-point zero in
3567
             FPRs.  If we have a zero in an FPR for other reasons, we
3568
             can get better cfg-cleanup and delayed-branch results by
3569
             using it consistently, rather than using $0 sometimes and
3570
             an FPR at other times.  Also, moves between floating-point
3571
             registers are sometimes cheaper than (D)MTC1 $0.  */
3572
          if (cost == 1
3573
              && outer_code == SET
3574
              && !(float_mode_p && TARGET_HARD_FLOAT))
3575
            cost = 0;
3576
          /* When non-MIPS16 code loads a constant N>1 times, we rarely
3577
             want to CSE the constant itself.  It is usually better to
3578
             have N copies of the last operation in the sequence and one
3579
             shared copy of the other operations.  (Note that this is
3580
             not true for MIPS16 code, where the final operation in the
3581
             sequence is often an extended instruction.)
3582
 
3583
             Also, if we have a CONST_INT, we don't know whether it is
3584
             for a word or doubleword operation, so we cannot rely on
3585
             the result of mips_build_integer.  */
3586
          else if (!TARGET_MIPS16
3587
                   && (outer_code == SET || mode == VOIDmode))
3588
            cost = 1;
3589
          *total = COSTS_N_INSNS (cost);
3590
          return true;
3591
        }
3592
      /* The value will need to be fetched from the constant pool.  */
3593
      *total = CONSTANT_POOL_COST;
3594
      return true;
3595
 
3596
    case MEM:
3597
      /* If the address is legitimate, return the number of
3598
         instructions it needs.  */
3599
      addr = XEXP (x, 0);
3600
      cost = mips_address_insns (addr, mode, true);
3601
      if (cost > 0)
3602
        {
3603
          *total = COSTS_N_INSNS (cost + 1);
3604
          return true;
3605
        }
3606
      /* Check for a scaled indexed address.  */
3607
      if (mips_lwxs_address_p (addr)
3608
          || mips_lx_address_p (addr, mode))
3609
        {
3610
          *total = COSTS_N_INSNS (2);
3611
          return true;
3612
        }
3613
      /* Otherwise use the default handling.  */
3614
      return false;
3615
 
3616
    case FFS:
3617
      *total = COSTS_N_INSNS (6);
3618
      return false;
3619
 
3620
    case NOT:
3621
      *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3622
      return false;
3623
 
3624
    case AND:
3625
      /* Check for a *clear_upper32 pattern and treat it like a zero
3626
         extension.  See the pattern's comment for details.  */
3627
      if (TARGET_64BIT
3628
          && mode == DImode
3629
          && CONST_INT_P (XEXP (x, 1))
3630
          && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3631
        {
3632
          *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3633
                    + set_src_cost (XEXP (x, 0), speed));
3634
          return true;
3635
        }
3636
      /* Fall through.  */
3637
 
3638
    case IOR:
3639
    case XOR:
3640
      /* Double-word operations use two single-word operations.  */
3641
      *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2),
3642
                                 speed);
3643
      return true;
3644
 
3645
    case ASHIFT:
3646
    case ASHIFTRT:
3647
    case LSHIFTRT:
3648
    case ROTATE:
3649
    case ROTATERT:
3650
      if (CONSTANT_P (XEXP (x, 1)))
3651
        *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
3652
                                   speed);
3653
      else
3654
        *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12),
3655
                                   speed);
3656
      return true;
3657
 
3658
    case ABS:
3659
      if (float_mode_p)
3660
        *total = mips_cost->fp_add;
3661
      else
3662
        *total = COSTS_N_INSNS (4);
3663
      return false;
3664
 
3665
    case LO_SUM:
3666
      /* Low-part immediates need an extended MIPS16 instruction.  */
3667
      *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3668
                + set_src_cost (XEXP (x, 0), speed));
3669
      return true;
3670
 
3671
    case LT:
3672
    case LTU:
3673
    case LE:
3674
    case LEU:
3675
    case GT:
3676
    case GTU:
3677
    case GE:
3678
    case GEU:
3679
    case EQ:
3680
    case NE:
3681
    case UNORDERED:
3682
    case LTGT:
3683
      /* Branch comparisons have VOIDmode, so use the first operand's
3684
         mode instead.  */
3685
      mode = GET_MODE (XEXP (x, 0));
3686
      if (FLOAT_MODE_P (mode))
3687
        {
3688
          *total = mips_cost->fp_add;
3689
          return false;
3690
        }
3691
      *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
3692
                                 speed);
3693
      return true;
3694
 
3695
    case MINUS:
3696
      if (float_mode_p
3697
          && (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
3698
          && TARGET_FUSED_MADD
3699
          && !HONOR_NANS (mode)
3700
          && !HONOR_SIGNED_ZEROS (mode))
3701
        {
3702
          /* See if we can use NMADD or NMSUB.  See mips.md for the
3703
             associated patterns.  */
3704
          rtx op0 = XEXP (x, 0);
3705
          rtx op1 = XEXP (x, 1);
3706
          if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3707
            {
3708
              *total = (mips_fp_mult_cost (mode)
3709
                        + set_src_cost (XEXP (XEXP (op0, 0), 0), speed)
3710
                        + set_src_cost (XEXP (op0, 1), speed)
3711
                        + set_src_cost (op1, speed));
3712
              return true;
3713
            }
3714
          if (GET_CODE (op1) == MULT)
3715
            {
3716
              *total = (mips_fp_mult_cost (mode)
3717
                        + set_src_cost (op0, speed)
3718
                        + set_src_cost (XEXP (op1, 0), speed)
3719
                        + set_src_cost (XEXP (op1, 1), speed));
3720
              return true;
3721
            }
3722
        }
3723
      /* Fall through.  */
3724
 
3725
    case PLUS:
3726
      if (float_mode_p)
3727
        {
3728
          /* If this is part of a MADD or MSUB, treat the PLUS as
3729
             being free.  */
3730
          if (ISA_HAS_FP4
3731
              && TARGET_FUSED_MADD
3732
              && GET_CODE (XEXP (x, 0)) == MULT)
3733
            *total = 0;
3734
          else
3735
            *total = mips_cost->fp_add;
3736
          return false;
3737
        }
3738
 
3739
      /* Double-word operations require three single-word operations and
3740
         an SLTU.  The MIPS16 version then needs to move the result of
3741
         the SLTU from $24 to a MIPS16 register.  */
3742
      *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3743
                                 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4),
3744
                                 speed);
3745
      return true;
3746
 
3747
    case NEG:
3748
      if (float_mode_p
3749
          && (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
3750
          && TARGET_FUSED_MADD
3751
          && !HONOR_NANS (mode)
3752
          && HONOR_SIGNED_ZEROS (mode))
3753
        {
3754
          /* See if we can use NMADD or NMSUB.  See mips.md for the
3755
             associated patterns.  */
3756
          rtx op = XEXP (x, 0);
3757
          if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3758
              && GET_CODE (XEXP (op, 0)) == MULT)
3759
            {
3760
              *total = (mips_fp_mult_cost (mode)
3761
                        + set_src_cost (XEXP (XEXP (op, 0), 0), speed)
3762
                        + set_src_cost (XEXP (XEXP (op, 0), 1), speed)
3763
                        + set_src_cost (XEXP (op, 1), speed));
3764
              return true;
3765
            }
3766
        }
3767
 
3768
      if (float_mode_p)
3769
        *total = mips_cost->fp_add;
3770
      else
3771
        *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3772
      return false;
3773
 
3774
    case MULT:
3775
      if (float_mode_p)
3776
        *total = mips_fp_mult_cost (mode);
3777
      else if (mode == DImode && !TARGET_64BIT)
3778
        /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3779
           where the mulsidi3 always includes an MFHI and an MFLO.  */
3780
        *total = (speed
3781
                  ? mips_cost->int_mult_si * 3 + 6
3782
                  : COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9));
3783
      else if (!speed)
3784
        *total = (ISA_HAS_MUL3 ? 1 : 2);
3785
      else if (mode == DImode)
3786
        *total = mips_cost->int_mult_di;
3787
      else
3788
        *total = mips_cost->int_mult_si;
3789
      return false;
3790
 
3791
    case DIV:
3792
      /* Check for a reciprocal.  */
3793
      if (float_mode_p
3794
          && ISA_HAS_FP4
3795
          && flag_unsafe_math_optimizations
3796
          && XEXP (x, 0) == CONST1_RTX (mode))
3797
        {
3798
          if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT)
3799
            /* An rsqrt<mode>a or rsqrt<mode>b pattern.  Count the
3800
               division as being free.  */
3801
            *total = set_src_cost (XEXP (x, 1), speed);
3802
          else
3803
            *total = (mips_fp_div_cost (mode)
3804
                      + set_src_cost (XEXP (x, 1), speed));
3805
          return true;
3806
        }
3807
      /* Fall through.  */
3808
 
3809
    case SQRT:
3810
    case MOD:
3811
      if (float_mode_p)
3812
        {
3813
          *total = mips_fp_div_cost (mode);
3814
          return false;
3815
        }
3816
      /* Fall through.  */
3817
 
3818
    case UDIV:
3819
    case UMOD:
3820
      if (!speed)
3821
        {
3822
          /* It is our responsibility to make division by a power of 2
3823
             as cheap as 2 register additions if we want the division
3824
             expanders to be used for such operations; see the setting
3825
             of sdiv_pow2_cheap in optabs.c.  Using (D)DIV for MIPS16
3826
             should always produce shorter code than using
3827
             expand_sdiv2_pow2.  */
3828
          if (TARGET_MIPS16
3829
              && CONST_INT_P (XEXP (x, 1))
3830
              && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3831
            {
3832
              *total = COSTS_N_INSNS (2) + set_src_cost (XEXP (x, 0), speed);
3833
              return true;
3834
            }
3835
          *total = COSTS_N_INSNS (mips_idiv_insns ());
3836
        }
3837
      else if (mode == DImode)
3838
        *total = mips_cost->int_div_di;
3839
      else
3840
        *total = mips_cost->int_div_si;
3841
      return false;
3842
 
3843
    case SIGN_EXTEND:
3844
      *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3845
      return false;
3846
 
3847
    case ZERO_EXTEND:
3848
      if (outer_code == SET
3849
          && ISA_HAS_BADDU
3850
          && (GET_CODE (XEXP (x, 0)) == TRUNCATE
3851
              || GET_CODE (XEXP (x, 0)) == SUBREG)
3852
          && GET_MODE (XEXP (x, 0)) == QImode
3853
          && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
3854
        {
3855
          *total = set_src_cost (XEXP (XEXP (x, 0), 0), speed);
3856
          return true;
3857
        }
3858
      *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3859
      return false;
3860
 
3861
    case FLOAT:
3862
    case UNSIGNED_FLOAT:
3863
    case FIX:
3864
    case FLOAT_EXTEND:
3865
    case FLOAT_TRUNCATE:
3866
      *total = mips_cost->fp_add;
3867
      return false;
3868
 
3869
    default:
3870
      return false;
3871
    }
3872
}
3873
 
3874
/* Implement TARGET_ADDRESS_COST.  */
3875
 
3876
static int
3877
mips_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
3878
{
3879
  return mips_address_insns (addr, SImode, false);
3880
}
3881
 
3882
/* Information about a single instruction in a multi-instruction
3883
   asm sequence.  */
3884
struct mips_multi_member {
3885
  /* True if this is a label, false if it is code.  */
3886
  bool is_label_p;
3887
 
3888
  /* The output_asm_insn format of the instruction.  */
3889
  const char *format;
3890
 
3891
  /* The operands to the instruction.  */
3892
  rtx operands[MAX_RECOG_OPERANDS];
3893
};
3894
typedef struct mips_multi_member mips_multi_member;
3895
 
3896
/* Vector definitions for the above.  */
3897
DEF_VEC_O(mips_multi_member);
3898
DEF_VEC_ALLOC_O(mips_multi_member, heap);
3899
 
3900
/* The instructions that make up the current multi-insn sequence.  */
3901
static VEC (mips_multi_member, heap) *mips_multi_members;
3902
 
3903
/* How many instructions (as opposed to labels) are in the current
3904
   multi-insn sequence.  */
3905
static unsigned int mips_multi_num_insns;
3906
 
3907
/* Start a new multi-insn sequence.  */
3908
 
3909
static void
3910
mips_multi_start (void)
3911
{
3912
  VEC_truncate (mips_multi_member, mips_multi_members, 0);
3913
  mips_multi_num_insns = 0;
3914
}
3915
 
3916
/* Add a new, uninitialized member to the current multi-insn sequence.  */
3917
 
3918
static struct mips_multi_member *
3919
mips_multi_add (void)
3920
{
3921
  return VEC_safe_push (mips_multi_member, heap, mips_multi_members, 0);
3922
}
3923
 
3924
/* Add a normal insn with the given asm format to the current multi-insn
3925
   sequence.  The other arguments are a null-terminated list of operands.  */
3926
 
3927
static void
3928
mips_multi_add_insn (const char *format, ...)
3929
{
3930
  struct mips_multi_member *member;
3931
  va_list ap;
3932
  unsigned int i;
3933
  rtx op;
3934
 
3935
  member = mips_multi_add ();
3936
  member->is_label_p = false;
3937
  member->format = format;
3938
  va_start (ap, format);
3939
  i = 0;
3940
  while ((op = va_arg (ap, rtx)))
3941
    member->operands[i++] = op;
3942
  va_end (ap);
3943
  mips_multi_num_insns++;
3944
}
3945
 
3946
/* Add the given label definition to the current multi-insn sequence.
3947
   The definition should include the colon.  */
3948
 
3949
static void
3950
mips_multi_add_label (const char *label)
3951
{
3952
  struct mips_multi_member *member;
3953
 
3954
  member = mips_multi_add ();
3955
  member->is_label_p = true;
3956
  member->format = label;
3957
}
3958
 
3959
/* Return the index of the last member of the current multi-insn sequence.  */
3960
 
3961
static unsigned int
3962
mips_multi_last_index (void)
3963
{
3964
  return VEC_length (mips_multi_member, mips_multi_members) - 1;
3965
}
3966
 
3967
/* Add a copy of an existing instruction to the current multi-insn
3968
   sequence.  I is the index of the instruction that should be copied.  */
3969
 
3970
static void
3971
mips_multi_copy_insn (unsigned int i)
3972
{
3973
  struct mips_multi_member *member;
3974
 
3975
  member = mips_multi_add ();
3976
  memcpy (member, VEC_index (mips_multi_member, mips_multi_members, i),
3977
          sizeof (*member));
3978
  gcc_assert (!member->is_label_p);
3979
}
3980
 
3981
/* Change the operand of an existing instruction in the current
3982
   multi-insn sequence.  I is the index of the instruction,
3983
   OP is the index of the operand, and X is the new value.  */
3984
 
3985
static void
3986
mips_multi_set_operand (unsigned int i, unsigned int op, rtx x)
3987
{
3988
  VEC_index (mips_multi_member, mips_multi_members, i)->operands[op] = x;
3989
}
3990
 
3991
/* Write out the asm code for the current multi-insn sequence.  */
3992
 
3993
static void
3994
mips_multi_write (void)
3995
{
3996
  struct mips_multi_member *member;
3997
  unsigned int i;
3998
 
3999
  FOR_EACH_VEC_ELT (mips_multi_member, mips_multi_members, i, member)
4000
    if (member->is_label_p)
4001
      fprintf (asm_out_file, "%s\n", member->format);
4002
    else
4003
      output_asm_insn (member->format, member->operands);
4004
}
4005
 
4006
/* Return one word of double-word value OP, taking into account the fixed
4007
   endianness of certain registers.  HIGH_P is true to select the high part,
4008
   false to select the low part.  */
4009
 
4010
rtx
4011
mips_subword (rtx op, bool high_p)
4012
{
4013
  unsigned int byte, offset;
4014
  enum machine_mode mode;
4015
 
4016
  mode = GET_MODE (op);
4017
  if (mode == VOIDmode)
4018
    mode = TARGET_64BIT ? TImode : DImode;
4019
 
4020
  if (TARGET_BIG_ENDIAN ? !high_p : high_p)
4021
    byte = UNITS_PER_WORD;
4022
  else
4023
    byte = 0;
4024
 
4025
  if (FP_REG_RTX_P (op))
4026
    {
4027
      /* Paired FPRs are always ordered little-endian.  */
4028
      offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
4029
      return gen_rtx_REG (word_mode, REGNO (op) + offset);
4030
    }
4031
 
4032
  if (MEM_P (op))
4033
    return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
4034
 
4035
  return simplify_gen_subreg (word_mode, op, mode, byte);
4036
}
4037
 
4038
/* Return true if a 64-bit move from SRC to DEST should be split into two.  */
4039
 
4040
bool
4041
mips_split_64bit_move_p (rtx dest, rtx src)
4042
{
4043
  if (TARGET_64BIT)
4044
    return false;
4045
 
4046
  /* FPR-to-FPR moves can be done in a single instruction, if they're
4047
     allowed at all.  */
4048
  if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
4049
    return false;
4050
 
4051
  /* Check for floating-point loads and stores.  */
4052
  if (ISA_HAS_LDC1_SDC1)
4053
    {
4054
      if (FP_REG_RTX_P (dest) && MEM_P (src))
4055
        return false;
4056
      if (FP_REG_RTX_P (src) && MEM_P (dest))
4057
        return false;
4058
    }
4059
  return true;
4060
}
4061
 
4062
/* Split a doubleword move from SRC to DEST.  On 32-bit targets,
4063
   this function handles 64-bit moves for which mips_split_64bit_move_p
4064
   holds.  For 64-bit targets, this function handles 128-bit moves.  */
4065
 
4066
void
4067
mips_split_doubleword_move (rtx dest, rtx src)
4068
{
4069
  rtx low_dest;
4070
 
4071
  if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
4072
    {
4073
      if (!TARGET_64BIT && GET_MODE (dest) == DImode)
4074
        emit_insn (gen_move_doubleword_fprdi (dest, src));
4075
      else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
4076
        emit_insn (gen_move_doubleword_fprdf (dest, src));
4077
      else if (!TARGET_64BIT && GET_MODE (dest) == V2SFmode)
4078
        emit_insn (gen_move_doubleword_fprv2sf (dest, src));
4079
      else if (!TARGET_64BIT && GET_MODE (dest) == V2SImode)
4080
        emit_insn (gen_move_doubleword_fprv2si (dest, src));
4081
      else if (!TARGET_64BIT && GET_MODE (dest) == V4HImode)
4082
        emit_insn (gen_move_doubleword_fprv4hi (dest, src));
4083
      else if (!TARGET_64BIT && GET_MODE (dest) == V8QImode)
4084
        emit_insn (gen_move_doubleword_fprv8qi (dest, src));
4085
      else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
4086
        emit_insn (gen_move_doubleword_fprtf (dest, src));
4087
      else
4088
        gcc_unreachable ();
4089
    }
4090
  else if (REG_P (dest) && REGNO (dest) == MD_REG_FIRST)
4091
    {
4092
      low_dest = mips_subword (dest, false);
4093
      mips_emit_move (low_dest, mips_subword (src, false));
4094
      if (TARGET_64BIT)
4095
        emit_insn (gen_mthidi_ti (dest, mips_subword (src, true), low_dest));
4096
      else
4097
        emit_insn (gen_mthisi_di (dest, mips_subword (src, true), low_dest));
4098
    }
4099
  else if (REG_P (src) && REGNO (src) == MD_REG_FIRST)
4100
    {
4101
      mips_emit_move (mips_subword (dest, false), mips_subword (src, false));
4102
      if (TARGET_64BIT)
4103
        emit_insn (gen_mfhidi_ti (mips_subword (dest, true), src));
4104
      else
4105
        emit_insn (gen_mfhisi_di (mips_subword (dest, true), src));
4106
    }
4107
  else
4108
    {
4109
      /* The operation can be split into two normal moves.  Decide in
4110
         which order to do them.  */
4111
      low_dest = mips_subword (dest, false);
4112
      if (REG_P (low_dest)
4113
          && reg_overlap_mentioned_p (low_dest, src))
4114
        {
4115
          mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
4116
          mips_emit_move (low_dest, mips_subword (src, false));
4117
        }
4118
      else
4119
        {
4120
          mips_emit_move (low_dest, mips_subword (src, false));
4121
          mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
4122
        }
4123
    }
4124
}
4125
 
4126
/* Return the appropriate instructions to move SRC into DEST.  Assume
4127
   that SRC is operand 1 and DEST is operand 0.  */
4128
 
4129
const char *
4130
mips_output_move (rtx dest, rtx src)
4131
{
4132
  enum rtx_code dest_code, src_code;
4133
  enum machine_mode mode;
4134
  enum mips_symbol_type symbol_type;
4135
  bool dbl_p;
4136
 
4137
  dest_code = GET_CODE (dest);
4138
  src_code = GET_CODE (src);
4139
  mode = GET_MODE (dest);
4140
  dbl_p = (GET_MODE_SIZE (mode) == 8);
4141
 
4142
  if (dbl_p && mips_split_64bit_move_p (dest, src))
4143
    return "#";
4144
 
4145
  if ((src_code == REG && GP_REG_P (REGNO (src)))
4146
      || (!TARGET_MIPS16 && src == CONST0_RTX (mode)))
4147
    {
4148
      if (dest_code == REG)
4149
        {
4150
          if (GP_REG_P (REGNO (dest)))
4151
            return "move\t%0,%z1";
4152
 
4153
          /* Moves to HI are handled by special .md insns.  */
4154
          if (REGNO (dest) == LO_REGNUM)
4155
            return "mtlo\t%z1";
4156
 
4157
          if (DSP_ACC_REG_P (REGNO (dest)))
4158
            {
4159
              static char retval[] = "mt__\t%z1,%q0";
4160
 
4161
              retval[2] = reg_names[REGNO (dest)][4];
4162
              retval[3] = reg_names[REGNO (dest)][5];
4163
              return retval;
4164
            }
4165
 
4166
          if (FP_REG_P (REGNO (dest)))
4167
            return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
4168
 
4169
          if (ALL_COP_REG_P (REGNO (dest)))
4170
            {
4171
              static char retval[] = "dmtc_\t%z1,%0";
4172
 
4173
              retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
4174
              return dbl_p ? retval : retval + 1;
4175
            }
4176
        }
4177
      if (dest_code == MEM)
4178
        switch (GET_MODE_SIZE (mode))
4179
          {
4180
          case 1: return "sb\t%z1,%0";
4181
          case 2: return "sh\t%z1,%0";
4182
          case 4: return "sw\t%z1,%0";
4183
          case 8: return "sd\t%z1,%0";
4184
          }
4185
    }
4186
  if (dest_code == REG && GP_REG_P (REGNO (dest)))
4187
    {
4188
      if (src_code == REG)
4189
        {
4190
          /* Moves from HI are handled by special .md insns.  */
4191
          if (REGNO (src) == LO_REGNUM)
4192
            {
4193
              /* When generating VR4120 or VR4130 code, we use MACC and
4194
                 DMACC instead of MFLO.  This avoids both the normal
4195
                 MIPS III HI/LO hazards and the errata related to
4196
                 -mfix-vr4130.  */
4197
              if (ISA_HAS_MACCHI)
4198
                return dbl_p ? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
4199
              return "mflo\t%0";
4200
            }
4201
 
4202
          if (DSP_ACC_REG_P (REGNO (src)))
4203
            {
4204
              static char retval[] = "mf__\t%0,%q1";
4205
 
4206
              retval[2] = reg_names[REGNO (src)][4];
4207
              retval[3] = reg_names[REGNO (src)][5];
4208
              return retval;
4209
            }
4210
 
4211
          if (FP_REG_P (REGNO (src)))
4212
            return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
4213
 
4214
          if (ALL_COP_REG_P (REGNO (src)))
4215
            {
4216
              static char retval[] = "dmfc_\t%0,%1";
4217
 
4218
              retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
4219
              return dbl_p ? retval : retval + 1;
4220
            }
4221
 
4222
          if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
4223
            return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
4224
        }
4225
 
4226
      if (src_code == MEM)
4227
        switch (GET_MODE_SIZE (mode))
4228
          {
4229
          case 1: return "lbu\t%0,%1";
4230
          case 2: return "lhu\t%0,%1";
4231
          case 4: return "lw\t%0,%1";
4232
          case 8: return "ld\t%0,%1";
4233
          }
4234
 
4235
      if (src_code == CONST_INT)
4236
        {
4237
          /* Don't use the X format for the operand itself, because that
4238
             will give out-of-range numbers for 64-bit hosts and 32-bit
4239
             targets.  */
4240
          if (!TARGET_MIPS16)
4241
            return "li\t%0,%1\t\t\t# %X1";
4242
 
4243
          if (SMALL_OPERAND_UNSIGNED (INTVAL (src)))
4244
            return "li\t%0,%1";
4245
 
4246
          if (SMALL_OPERAND_UNSIGNED (-INTVAL (src)))
4247
            return "#";
4248
        }
4249
 
4250
      if (src_code == HIGH)
4251
        return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
4252
 
4253
      if (CONST_GP_P (src))
4254
        return "move\t%0,%1";
4255
 
4256
      if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
4257
          && mips_lo_relocs[symbol_type] != 0)
4258
        {
4259
          /* A signed 16-bit constant formed by applying a relocation
4260
             operator to a symbolic address.  */
4261
          gcc_assert (!mips_split_p[symbol_type]);
4262
          return "li\t%0,%R1";
4263
        }
4264
 
4265
      if (symbolic_operand (src, VOIDmode))
4266
        {
4267
          gcc_assert (TARGET_MIPS16
4268
                      ? TARGET_MIPS16_TEXT_LOADS
4269
                      : !TARGET_EXPLICIT_RELOCS);
4270
          return dbl_p ? "dla\t%0,%1" : "la\t%0,%1";
4271
        }
4272
    }
4273
  if (src_code == REG && FP_REG_P (REGNO (src)))
4274
    {
4275
      if (dest_code == REG && FP_REG_P (REGNO (dest)))
4276
        {
4277
          if (GET_MODE (dest) == V2SFmode)
4278
            return "mov.ps\t%0,%1";
4279
          else
4280
            return dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1";
4281
        }
4282
 
4283
      if (dest_code == MEM)
4284
        return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
4285
    }
4286
  if (dest_code == REG && FP_REG_P (REGNO (dest)))
4287
    {
4288
      if (src_code == MEM)
4289
        return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
4290
    }
4291
  if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
4292
    {
4293
      static char retval[] = "l_c_\t%0,%1";
4294
 
4295
      retval[1] = (dbl_p ? 'd' : 'w');
4296
      retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
4297
      return retval;
4298
    }
4299
  if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
4300
    {
4301
      static char retval[] = "s_c_\t%1,%0";
4302
 
4303
      retval[1] = (dbl_p ? 'd' : 'w');
4304
      retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
4305
      return retval;
4306
    }
4307
  gcc_unreachable ();
4308
}
4309
 
4310
/* Return true if CMP1 is a suitable second operand for integer ordering
4311
   test CODE.  See also the *sCC patterns in mips.md.  */
4312
 
4313
static bool
4314
mips_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
4315
{
4316
  switch (code)
4317
    {
4318
    case GT:
4319
    case GTU:
4320
      return reg_or_0_operand (cmp1, VOIDmode);
4321
 
4322
    case GE:
4323
    case GEU:
4324
      return !TARGET_MIPS16 && cmp1 == const1_rtx;
4325
 
4326
    case LT:
4327
    case LTU:
4328
      return arith_operand (cmp1, VOIDmode);
4329
 
4330
    case LE:
4331
      return sle_operand (cmp1, VOIDmode);
4332
 
4333
    case LEU:
4334
      return sleu_operand (cmp1, VOIDmode);
4335
 
4336
    default:
4337
      gcc_unreachable ();
4338
    }
4339
}
4340
 
4341
/* Return true if *CMP1 (of mode MODE) is a valid second operand for
4342
   integer ordering test *CODE, or if an equivalent combination can
4343
   be formed by adjusting *CODE and *CMP1.  When returning true, update
4344
   *CODE and *CMP1 with the chosen code and operand, otherwise leave
4345
   them alone.  */
4346
 
4347
static bool
4348
mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
4349
                                  enum machine_mode mode)
4350
{
4351
  HOST_WIDE_INT plus_one;
4352
 
4353
  if (mips_int_order_operand_ok_p (*code, *cmp1))
4354
    return true;
4355
 
4356
  if (CONST_INT_P (*cmp1))
4357
    switch (*code)
4358
      {
4359
      case LE:
4360
        plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
4361
        if (INTVAL (*cmp1) < plus_one)
4362
          {
4363
            *code = LT;
4364
            *cmp1 = force_reg (mode, GEN_INT (plus_one));
4365
            return true;
4366
          }
4367
        break;
4368
 
4369
      case LEU:
4370
        plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
4371
        if (plus_one != 0)
4372
          {
4373
            *code = LTU;
4374
            *cmp1 = force_reg (mode, GEN_INT (plus_one));
4375
            return true;
4376
          }
4377
        break;
4378
 
4379
      default:
4380
        break;
4381
      }
4382
  return false;
4383
}
4384
 
4385
/* Compare CMP0 and CMP1 using ordering test CODE and store the result
4386
   in TARGET.  CMP0 and TARGET are register_operands.  If INVERT_PTR
4387
   is nonnull, it's OK to set TARGET to the inverse of the result and
4388
   flip *INVERT_PTR instead.  */
4389
 
4390
static void
4391
mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
4392
                          rtx target, rtx cmp0, rtx cmp1)
4393
{
4394
  enum machine_mode mode;
4395
 
4396
  /* First see if there is a MIPS instruction that can do this operation.
4397
     If not, try doing the same for the inverse operation.  If that also
4398
     fails, force CMP1 into a register and try again.  */
4399
  mode = GET_MODE (cmp0);
4400
  if (mips_canonicalize_int_order_test (&code, &cmp1, mode))
4401
    mips_emit_binary (code, target, cmp0, cmp1);
4402
  else
4403
    {
4404
      enum rtx_code inv_code = reverse_condition (code);
4405
      if (!mips_canonicalize_int_order_test (&inv_code, &cmp1, mode))
4406
        {
4407
          cmp1 = force_reg (mode, cmp1);
4408
          mips_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
4409
        }
4410
      else if (invert_ptr == 0)
4411
        {
4412
          rtx inv_target;
4413
 
4414
          inv_target = mips_force_binary (GET_MODE (target),
4415
                                          inv_code, cmp0, cmp1);
4416
          mips_emit_binary (XOR, target, inv_target, const1_rtx);
4417
        }
4418
      else
4419
        {
4420
          *invert_ptr = !*invert_ptr;
4421
          mips_emit_binary (inv_code, target, cmp0, cmp1);
4422
        }
4423
    }
4424
}
4425
 
4426
/* Return a register that is zero iff CMP0 and CMP1 are equal.
4427
   The register will have the same mode as CMP0.  */
4428
 
4429
static rtx
4430
mips_zero_if_equal (rtx cmp0, rtx cmp1)
4431
{
4432
  if (cmp1 == const0_rtx)
4433
    return cmp0;
4434
 
4435
  if (uns_arith_operand (cmp1, VOIDmode))
4436
    return expand_binop (GET_MODE (cmp0), xor_optab,
4437
                         cmp0, cmp1, 0, 0, OPTAB_DIRECT);
4438
 
4439
  return expand_binop (GET_MODE (cmp0), sub_optab,
4440
                       cmp0, cmp1, 0, 0, OPTAB_DIRECT);
4441
}
4442
 
4443
/* Convert *CODE into a code that can be used in a floating-point
4444
   scc instruction (C.cond.fmt).  Return true if the values of
4445
   the condition code registers will be inverted, with 0 indicating
4446
   that the condition holds.  */
4447
 
4448
static bool
4449
mips_reversed_fp_cond (enum rtx_code *code)
4450
{
4451
  switch (*code)
4452
    {
4453
    case NE:
4454
    case LTGT:
4455
    case ORDERED:
4456
      *code = reverse_condition_maybe_unordered (*code);
4457
      return true;
4458
 
4459
    default:
4460
      return false;
4461
    }
4462
}
4463
 
4464
/* Convert a comparison into something that can be used in a branch or
4465
   conditional move.  On entry, *OP0 and *OP1 are the values being
4466
   compared and *CODE is the code used to compare them.
4467
 
4468
   Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
4469
   If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
4470
   otherwise any standard branch condition can be used.  The standard branch
4471
   conditions are:
4472
 
4473
      - EQ or NE between two registers.
4474
      - any comparison between a register and zero.  */
4475
 
4476
static void
4477
mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
4478
{
4479
  rtx cmp_op0 = *op0;
4480
  rtx cmp_op1 = *op1;
4481
 
4482
  if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT)
4483
    {
4484
      if (!need_eq_ne_p && *op1 == const0_rtx)
4485
        ;
4486
      else if (*code == EQ || *code == NE)
4487
        {
4488
          if (need_eq_ne_p)
4489
            {
4490
              *op0 = mips_zero_if_equal (cmp_op0, cmp_op1);
4491
              *op1 = const0_rtx;
4492
            }
4493
          else
4494
            *op1 = force_reg (GET_MODE (cmp_op0), cmp_op1);
4495
        }
4496
      else
4497
        {
4498
          /* The comparison needs a separate scc instruction.  Store the
4499
             result of the scc in *OP0 and compare it against zero.  */
4500
          bool invert = false;
4501
          *op0 = gen_reg_rtx (GET_MODE (cmp_op0));
4502
          mips_emit_int_order_test (*code, &invert, *op0, cmp_op0, cmp_op1);
4503
          *code = (invert ? EQ : NE);
4504
          *op1 = const0_rtx;
4505
        }
4506
    }
4507
  else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_op0)))
4508
    {
4509
      *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
4510
      mips_emit_binary (*code, *op0, cmp_op0, cmp_op1);
4511
      *code = NE;
4512
      *op1 = const0_rtx;
4513
    }
4514
  else
4515
    {
4516
      enum rtx_code cmp_code;
4517
 
4518
      /* Floating-point tests use a separate C.cond.fmt comparison to
4519
         set a condition code register.  The branch or conditional move
4520
         will then compare that register against zero.
4521
 
4522
         Set CMP_CODE to the code of the comparison instruction and
4523
         *CODE to the code that the branch or move should use.  */
4524
      cmp_code = *code;
4525
      *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
4526
      *op0 = (ISA_HAS_8CC
4527
              ? gen_reg_rtx (CCmode)
4528
              : gen_rtx_REG (CCmode, FPSW_REGNUM));
4529
      *op1 = const0_rtx;
4530
      mips_emit_binary (cmp_code, *op0, cmp_op0, cmp_op1);
4531
    }
4532
}
4533
 
4534
/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
4535
   and OPERAND[3].  Store the result in OPERANDS[0].
4536
 
4537
   On 64-bit targets, the mode of the comparison and target will always be
4538
   SImode, thus possibly narrower than that of the comparison's operands.  */
4539
 
4540
void
4541
mips_expand_scc (rtx operands[])
4542
{
4543
  rtx target = operands[0];
4544
  enum rtx_code code = GET_CODE (operands[1]);
4545
  rtx op0 = operands[2];
4546
  rtx op1 = operands[3];
4547
 
4548
  gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT);
4549
 
4550
  if (code == EQ || code == NE)
4551
    {
4552
      if (ISA_HAS_SEQ_SNE
4553
          && reg_imm10_operand (op1, GET_MODE (op1)))
4554
        mips_emit_binary (code, target, op0, op1);
4555
      else
4556
        {
4557
          rtx zie = mips_zero_if_equal (op0, op1);
4558
          mips_emit_binary (code, target, zie, const0_rtx);
4559
        }
4560
    }
4561
  else
4562
    mips_emit_int_order_test (code, 0, target, op0, op1);
4563
}
4564
 
4565
/* Compare OPERANDS[1] with OPERANDS[2] using comparison code
4566
   CODE and jump to OPERANDS[3] if the condition holds.  */
4567
 
4568
void
4569
mips_expand_conditional_branch (rtx *operands)
4570
{
4571
  enum rtx_code code = GET_CODE (operands[0]);
4572
  rtx op0 = operands[1];
4573
  rtx op1 = operands[2];
4574
  rtx condition;
4575
 
4576
  mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
4577
  condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4578
  emit_jump_insn (gen_condjump (condition, operands[3]));
4579
}
4580
 
4581
/* Implement:
4582
 
4583
   (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
4584
   (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS))  */
4585
 
4586
void
4587
mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
4588
                       enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
4589
{
4590
  rtx cmp_result;
4591
  bool reversed_p;
4592
 
4593
  reversed_p = mips_reversed_fp_cond (&cond);
4594
  cmp_result = gen_reg_rtx (CCV2mode);
4595
  emit_insn (gen_scc_ps (cmp_result,
4596
                         gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
4597
  if (reversed_p)
4598
    emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
4599
                                         cmp_result));
4600
  else
4601
    emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
4602
                                         cmp_result));
4603
}
4604
 
4605
/* Perform the comparison in OPERANDS[1].  Move OPERANDS[2] into OPERANDS[0]
4606
   if the condition holds, otherwise move OPERANDS[3] into OPERANDS[0].  */
4607
 
4608
void
4609
mips_expand_conditional_move (rtx *operands)
4610
{
4611
  rtx cond;
4612
  enum rtx_code code = GET_CODE (operands[1]);
4613
  rtx op0 = XEXP (operands[1], 0);
4614
  rtx op1 = XEXP (operands[1], 1);
4615
 
4616
  mips_emit_compare (&code, &op0, &op1, true);
4617
  cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1);
4618
  emit_insn (gen_rtx_SET (VOIDmode, operands[0],
4619
                          gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
4620
                                                operands[2], operands[3])));
4621
}
4622
 
4623
/* Perform the comparison in COMPARISON, then trap if the condition holds.  */
4624
 
4625
void
4626
mips_expand_conditional_trap (rtx comparison)
4627
{
4628
  rtx op0, op1;
4629
  enum machine_mode mode;
4630
  enum rtx_code code;
4631
 
4632
  /* MIPS conditional trap instructions don't have GT or LE flavors,
4633
     so we must swap the operands and convert to LT and GE respectively.  */
4634
  code = GET_CODE (comparison);
4635
  switch (code)
4636
    {
4637
    case GT:
4638
    case LE:
4639
    case GTU:
4640
    case LEU:
4641
      code = swap_condition (code);
4642
      op0 = XEXP (comparison, 1);
4643
      op1 = XEXP (comparison, 0);
4644
      break;
4645
 
4646
    default:
4647
      op0 = XEXP (comparison, 0);
4648
      op1 = XEXP (comparison, 1);
4649
      break;
4650
    }
4651
 
4652
  mode = GET_MODE (XEXP (comparison, 0));
4653
  op0 = force_reg (mode, op0);
4654
  if (!arith_operand (op1, mode))
4655
    op1 = force_reg (mode, op1);
4656
 
4657
  emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4658
                              gen_rtx_fmt_ee (code, mode, op0, op1),
4659
                              const0_rtx));
4660
}
4661
 
4662
/* Initialize *CUM for a call to a function of type FNTYPE.  */
4663
 
4664
void
4665
mips_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype)
4666
{
4667
  memset (cum, 0, sizeof (*cum));
4668
  cum->prototype = (fntype && prototype_p (fntype));
4669
  cum->gp_reg_found = (cum->prototype && stdarg_p (fntype));
4670
}
4671
 
4672
/* Fill INFO with information about a single argument.  CUM is the
4673
   cumulative state for earlier arguments.  MODE is the mode of this
4674
   argument and TYPE is its type (if known).  NAMED is true if this
4675
   is a named (fixed) argument rather than a variable one.  */
4676
 
4677
static void
4678
mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
4679
                   enum machine_mode mode, const_tree type, bool named)
4680
{
4681
  bool doubleword_aligned_p;
4682
  unsigned int num_bytes, num_words, max_regs;
4683
 
4684
  /* Work out the size of the argument.  */
4685
  num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4686
  num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4687
 
4688
  /* Decide whether it should go in a floating-point register, assuming
4689
     one is free.  Later code checks for availability.
4690
 
4691
     The checks against UNITS_PER_FPVALUE handle the soft-float and
4692
     single-float cases.  */
4693
  switch (mips_abi)
4694
    {
4695
    case ABI_EABI:
4696
      /* The EABI conventions have traditionally been defined in terms
4697
         of TYPE_MODE, regardless of the actual type.  */
4698
      info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4699
                      || mode == V2SFmode)
4700
                     && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4701
      break;
4702
 
4703
    case ABI_32:
4704
    case ABI_O64:
4705
      /* Only leading floating-point scalars are passed in
4706
         floating-point registers.  We also handle vector floats the same
4707
         say, which is OK because they are not covered by the standard ABI.  */
4708
      info->fpr_p = (!cum->gp_reg_found
4709
                     && cum->arg_number < 2
4710
                     && (type == 0
4711
                         || SCALAR_FLOAT_TYPE_P (type)
4712
                         || VECTOR_FLOAT_TYPE_P (type))
4713
                     && (GET_MODE_CLASS (mode) == MODE_FLOAT
4714
                         || mode == V2SFmode)
4715
                     && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4716
      break;
4717
 
4718
    case ABI_N32:
4719
    case ABI_64:
4720
      /* Scalar, complex and vector floating-point types are passed in
4721
         floating-point registers, as long as this is a named rather
4722
         than a variable argument.  */
4723
      info->fpr_p = (named
4724
                     && (type == 0 || FLOAT_TYPE_P (type))
4725
                     && (GET_MODE_CLASS (mode) == MODE_FLOAT
4726
                         || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4727
                         || mode == V2SFmode)
4728
                     && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4729
 
4730
      /* ??? According to the ABI documentation, the real and imaginary
4731
         parts of complex floats should be passed in individual registers.
4732
         The real and imaginary parts of stack arguments are supposed
4733
         to be contiguous and there should be an extra word of padding
4734
         at the end.
4735
 
4736
         This has two problems.  First, it makes it impossible to use a
4737
         single "void *" va_list type, since register and stack arguments
4738
         are passed differently.  (At the time of writing, MIPSpro cannot
4739
         handle complex float varargs correctly.)  Second, it's unclear
4740
         what should happen when there is only one register free.
4741
 
4742
         For now, we assume that named complex floats should go into FPRs
4743
         if there are two FPRs free, otherwise they should be passed in the
4744
         same way as a struct containing two floats.  */
4745
      if (info->fpr_p
4746
          && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4747
          && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4748
        {
4749
          if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4750
            info->fpr_p = false;
4751
          else
4752
            num_words = 2;
4753
        }
4754
      break;
4755
 
4756
    default:
4757
      gcc_unreachable ();
4758
    }
4759
 
4760
  /* See whether the argument has doubleword alignment.  */
4761
  doubleword_aligned_p = (mips_function_arg_boundary (mode, type)
4762
                          > BITS_PER_WORD);
4763
 
4764
  /* Set REG_OFFSET to the register count we're interested in.
4765
     The EABI allocates the floating-point registers separately,
4766
     but the other ABIs allocate them like integer registers.  */
4767
  info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4768
                      ? cum->num_fprs
4769
                      : cum->num_gprs);
4770
 
4771
  /* Advance to an even register if the argument is doubleword-aligned.  */
4772
  if (doubleword_aligned_p)
4773
    info->reg_offset += info->reg_offset & 1;
4774
 
4775
  /* Work out the offset of a stack argument.  */
4776
  info->stack_offset = cum->stack_words;
4777
  if (doubleword_aligned_p)
4778
    info->stack_offset += info->stack_offset & 1;
4779
 
4780
  max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4781
 
4782
  /* Partition the argument between registers and stack.  */
4783
  info->reg_words = MIN (num_words, max_regs);
4784
  info->stack_words = num_words - info->reg_words;
4785
}
4786
 
4787
/* INFO describes a register argument that has the normal format for the
4788
   argument's mode.  Return the register it uses, assuming that FPRs are
4789
   available if HARD_FLOAT_P.  */
4790
 
4791
static unsigned int
4792
mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4793
{
4794
  if (!info->fpr_p || !hard_float_p)
4795
    return GP_ARG_FIRST + info->reg_offset;
4796
  else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4797
    /* In o32, the second argument is always passed in $f14
4798
       for TARGET_DOUBLE_FLOAT, regardless of whether the
4799
       first argument was a word or doubleword.  */
4800
    return FP_ARG_FIRST + 2;
4801
  else
4802
    return FP_ARG_FIRST + info->reg_offset;
4803
}
4804
 
4805
/* Implement TARGET_STRICT_ARGUMENT_NAMING.  */
4806
 
4807
static bool
4808
mips_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
4809
{
4810
  return !TARGET_OLDABI;
4811
}
4812
 
4813
/* Implement TARGET_FUNCTION_ARG.  */
4814
 
4815
static rtx
4816
mips_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
4817
                   const_tree type, bool named)
4818
{
4819
  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4820
  struct mips_arg_info info;
4821
 
4822
  /* We will be called with a mode of VOIDmode after the last argument
4823
     has been seen.  Whatever we return will be passed to the call expander.
4824
     If we need a MIPS16 fp_code, return a REG with the code stored as
4825
     the mode.  */
4826
  if (mode == VOIDmode)
4827
    {
4828
      if (TARGET_MIPS16 && cum->fp_code != 0)
4829
        return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4830
      else
4831
        return NULL;
4832
    }
4833
 
4834
  mips_get_arg_info (&info, cum, mode, type, named);
4835
 
4836
  /* Return straight away if the whole argument is passed on the stack.  */
4837
  if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4838
    return NULL;
4839
 
4840
  /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
4841
     contains a double in its entirety, then that 64-bit chunk is passed
4842
     in a floating-point register.  */
4843
  if (TARGET_NEWABI
4844
      && TARGET_HARD_FLOAT
4845
      && named
4846
      && type != 0
4847
      && TREE_CODE (type) == RECORD_TYPE
4848
      && TYPE_SIZE_UNIT (type)
4849
      && host_integerp (TYPE_SIZE_UNIT (type), 1))
4850
    {
4851
      tree field;
4852
 
4853
      /* First check to see if there is any such field.  */
4854
      for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
4855
        if (TREE_CODE (field) == FIELD_DECL
4856
            && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4857
            && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4858
            && host_integerp (bit_position (field), 0)
4859
            && int_bit_position (field) % BITS_PER_WORD == 0)
4860
          break;
4861
 
4862
      if (field != 0)
4863
        {
4864
          /* Now handle the special case by returning a PARALLEL
4865
             indicating where each 64-bit chunk goes.  INFO.REG_WORDS
4866
             chunks are passed in registers.  */
4867
          unsigned int i;
4868
          HOST_WIDE_INT bitpos;
4869
          rtx ret;
4870
 
4871
          /* assign_parms checks the mode of ENTRY_PARM, so we must
4872
             use the actual mode here.  */
4873
          ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4874
 
4875
          bitpos = 0;
4876
          field = TYPE_FIELDS (type);
4877
          for (i = 0; i < info.reg_words; i++)
4878
            {
4879
              rtx reg;
4880
 
4881
              for (; field; field = DECL_CHAIN (field))
4882
                if (TREE_CODE (field) == FIELD_DECL
4883
                    && int_bit_position (field) >= bitpos)
4884
                  break;
4885
 
4886
              if (field
4887
                  && int_bit_position (field) == bitpos
4888
                  && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
4889
                  && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4890
                reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4891
              else
4892
                reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4893
 
4894
              XVECEXP (ret, 0, i)
4895
                = gen_rtx_EXPR_LIST (VOIDmode, reg,
4896
                                     GEN_INT (bitpos / BITS_PER_UNIT));
4897
 
4898
              bitpos += BITS_PER_WORD;
4899
            }
4900
          return ret;
4901
        }
4902
    }
4903
 
4904
  /* Handle the n32/n64 conventions for passing complex floating-point
4905
     arguments in FPR pairs.  The real part goes in the lower register
4906
     and the imaginary part goes in the upper register.  */
4907
  if (TARGET_NEWABI
4908
      && info.fpr_p
4909
      && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4910
    {
4911
      rtx real, imag;
4912
      enum machine_mode inner;
4913
      unsigned int regno;
4914
 
4915
      inner = GET_MODE_INNER (mode);
4916
      regno = FP_ARG_FIRST + info.reg_offset;
4917
      if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4918
        {
4919
          /* Real part in registers, imaginary part on stack.  */
4920
          gcc_assert (info.stack_words == info.reg_words);
4921
          return gen_rtx_REG (inner, regno);
4922
        }
4923
      else
4924
        {
4925
          gcc_assert (info.stack_words == 0);
4926
          real = gen_rtx_EXPR_LIST (VOIDmode,
4927
                                    gen_rtx_REG (inner, regno),
4928
                                    const0_rtx);
4929
          imag = gen_rtx_EXPR_LIST (VOIDmode,
4930
                                    gen_rtx_REG (inner,
4931
                                                 regno + info.reg_words / 2),
4932
                                    GEN_INT (GET_MODE_SIZE (inner)));
4933
          return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4934
        }
4935
    }
4936
 
4937
  return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4938
}
4939
 
4940
/* Implement TARGET_FUNCTION_ARG_ADVANCE.  */
4941
 
4942
static void
4943
mips_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
4944
                           const_tree type, bool named)
4945
{
4946
  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
4947
  struct mips_arg_info info;
4948
 
4949
  mips_get_arg_info (&info, cum, mode, type, named);
4950
 
4951
  if (!info.fpr_p)
4952
    cum->gp_reg_found = true;
4953
 
4954
  /* See the comment above the CUMULATIVE_ARGS structure in mips.h for
4955
     an explanation of what this code does.  It assumes that we're using
4956
     either the o32 or the o64 ABI, both of which pass at most 2 arguments
4957
     in FPRs.  */
4958
  if (cum->arg_number < 2 && info.fpr_p)
4959
    cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4960
 
4961
  /* Advance the register count.  This has the effect of setting
4962
     num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
4963
     argument required us to skip the final GPR and pass the whole
4964
     argument on the stack.  */
4965
  if (mips_abi != ABI_EABI || !info.fpr_p)
4966
    cum->num_gprs = info.reg_offset + info.reg_words;
4967
  else if (info.reg_words > 0)
4968
    cum->num_fprs += MAX_FPRS_PER_FMT;
4969
 
4970
  /* Advance the stack word count.  */
4971
  if (info.stack_words > 0)
4972
    cum->stack_words = info.stack_offset + info.stack_words;
4973
 
4974
  cum->arg_number++;
4975
}
4976
 
4977
/* Implement TARGET_ARG_PARTIAL_BYTES.  */
4978
 
4979
static int
4980
mips_arg_partial_bytes (cumulative_args_t cum,
4981
                        enum machine_mode mode, tree type, bool named)
4982
{
4983
  struct mips_arg_info info;
4984
 
4985
  mips_get_arg_info (&info, get_cumulative_args (cum), mode, type, named);
4986
  return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4987
}
4988
 
4989
/* Implement TARGET_FUNCTION_ARG_BOUNDARY.  Every parameter gets at
4990
   least PARM_BOUNDARY bits of alignment, but will be given anything up
4991
   to STACK_BOUNDARY bits if the type requires it.  */
4992
 
4993
static unsigned int
4994
mips_function_arg_boundary (enum machine_mode mode, const_tree type)
4995
{
4996
  unsigned int alignment;
4997
 
4998
  alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4999
  if (alignment < PARM_BOUNDARY)
5000
    alignment = PARM_BOUNDARY;
5001
  if (alignment > STACK_BOUNDARY)
5002
    alignment = STACK_BOUNDARY;
5003
  return alignment;
5004
}
5005
 
5006
/* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
5007
   upward rather than downward.  In other words, return true if the
5008
   first byte of the stack slot has useful data, false if the last
5009
   byte does.  */
5010
 
5011
bool
5012
mips_pad_arg_upward (enum machine_mode mode, const_tree type)
5013
{
5014
  /* On little-endian targets, the first byte of every stack argument
5015
     is passed in the first byte of the stack slot.  */
5016
  if (!BYTES_BIG_ENDIAN)
5017
    return true;
5018
 
5019
  /* Otherwise, integral types are padded downward: the last byte of a
5020
     stack argument is passed in the last byte of the stack slot.  */
5021
  if (type != 0
5022
      ? (INTEGRAL_TYPE_P (type)
5023
         || POINTER_TYPE_P (type)
5024
         || FIXED_POINT_TYPE_P (type))
5025
      : (SCALAR_INT_MODE_P (mode)
5026
         || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
5027
    return false;
5028
 
5029
  /* Big-endian o64 pads floating-point arguments downward.  */
5030
  if (mips_abi == ABI_O64)
5031
    if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
5032
      return false;
5033
 
5034
  /* Other types are padded upward for o32, o64, n32 and n64.  */
5035
  if (mips_abi != ABI_EABI)
5036
    return true;
5037
 
5038
  /* Arguments smaller than a stack slot are padded downward.  */
5039
  if (mode != BLKmode)
5040
    return GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY;
5041
  else
5042
    return int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT);
5043
}
5044
 
5045
/* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...).  Return !BYTES_BIG_ENDIAN
5046
   if the least significant byte of the register has useful data.  Return
5047
   the opposite if the most significant byte does.  */
5048
 
5049
bool
5050
mips_pad_reg_upward (enum machine_mode mode, tree type)
5051
{
5052
  /* No shifting is required for floating-point arguments.  */
5053
  if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
5054
    return !BYTES_BIG_ENDIAN;
5055
 
5056
  /* Otherwise, apply the same padding to register arguments as we do
5057
     to stack arguments.  */
5058
  return mips_pad_arg_upward (mode, type);
5059
}
5060
 
5061
/* Return nonzero when an argument must be passed by reference.  */
5062
 
5063
static bool
5064
mips_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
5065
                        enum machine_mode mode, const_tree type,
5066
                        bool named ATTRIBUTE_UNUSED)
5067
{
5068
  if (mips_abi == ABI_EABI)
5069
    {
5070
      int size;
5071
 
5072
      /* ??? How should SCmode be handled?  */
5073
      if (mode == DImode || mode == DFmode
5074
          || mode == DQmode || mode == UDQmode
5075
          || mode == DAmode || mode == UDAmode)
5076
        return 0;
5077
 
5078
      size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
5079
      return size == -1 || size > UNITS_PER_WORD;
5080
    }
5081
  else
5082
    {
5083
      /* If we have a variable-sized parameter, we have no choice.  */
5084
      return targetm.calls.must_pass_in_stack (mode, type);
5085
    }
5086
}
5087
 
5088
/* Implement TARGET_CALLEE_COPIES.  */
5089
 
5090
static bool
5091
mips_callee_copies (cumulative_args_t cum ATTRIBUTE_UNUSED,
5092
                    enum machine_mode mode ATTRIBUTE_UNUSED,
5093
                    const_tree type ATTRIBUTE_UNUSED, bool named)
5094
{
5095
  return mips_abi == ABI_EABI && named;
5096
}
5097
 
5098
/* See whether VALTYPE is a record whose fields should be returned in
5099
   floating-point registers.  If so, return the number of fields and
5100
   list them in FIELDS (which should have two elements).  Return 0
5101
   otherwise.
5102
 
5103
   For n32 & n64, a structure with one or two fields is returned in
5104
   floating-point registers as long as every field has a floating-point
5105
   type.  */
5106
 
5107
static int
5108
mips_fpr_return_fields (const_tree valtype, tree *fields)
5109
{
5110
  tree field;
5111
  int i;
5112
 
5113
  if (!TARGET_NEWABI)
5114
    return 0;
5115
 
5116
  if (TREE_CODE (valtype) != RECORD_TYPE)
5117
    return 0;
5118
 
5119
  i = 0;
5120
  for (field = TYPE_FIELDS (valtype); field != 0; field = DECL_CHAIN (field))
5121
    {
5122
      if (TREE_CODE (field) != FIELD_DECL)
5123
        continue;
5124
 
5125
      if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
5126
        return 0;
5127
 
5128
      if (i == 2)
5129
        return 0;
5130
 
5131
      fields[i++] = field;
5132
    }
5133
  return i;
5134
}
5135
 
5136
/* Implement TARGET_RETURN_IN_MSB.  For n32 & n64, we should return
5137
   a value in the most significant part of $2/$3 if:
5138
 
5139
      - the target is big-endian;
5140
 
5141
      - the value has a structure or union type (we generalize this to
5142
        cover aggregates from other languages too); and
5143
 
5144
      - the structure is not returned in floating-point registers.  */
5145
 
5146
static bool
5147
mips_return_in_msb (const_tree valtype)
5148
{
5149
  tree fields[2];
5150
 
5151
  return (TARGET_NEWABI
5152
          && TARGET_BIG_ENDIAN
5153
          && AGGREGATE_TYPE_P (valtype)
5154
          && mips_fpr_return_fields (valtype, fields) == 0);
5155
}
5156
 
5157
/* Return true if the function return value MODE will get returned in a
5158
   floating-point register.  */
5159
 
5160
static bool
5161
mips_return_mode_in_fpr_p (enum machine_mode mode)
5162
{
5163
  return ((GET_MODE_CLASS (mode) == MODE_FLOAT
5164
           || mode == V2SFmode
5165
           || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5166
          && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
5167
}
5168
 
5169
/* Return the representation of an FPR return register when the
5170
   value being returned in FP_RETURN has mode VALUE_MODE and the
5171
   return type itself has mode TYPE_MODE.  On NewABI targets,
5172
   the two modes may be different for structures like:
5173
 
5174
       struct __attribute__((packed)) foo { float f; }
5175
 
5176
   where we return the SFmode value of "f" in FP_RETURN, but where
5177
   the structure itself has mode BLKmode.  */
5178
 
5179
static rtx
5180
mips_return_fpr_single (enum machine_mode type_mode,
5181
                        enum machine_mode value_mode)
5182
{
5183
  rtx x;
5184
 
5185
  x = gen_rtx_REG (value_mode, FP_RETURN);
5186
  if (type_mode != value_mode)
5187
    {
5188
      x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
5189
      x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
5190
    }
5191
  return x;
5192
}
5193
 
5194
/* Return a composite value in a pair of floating-point registers.
5195
   MODE1 and OFFSET1 are the mode and byte offset for the first value,
5196
   likewise MODE2 and OFFSET2 for the second.  MODE is the mode of the
5197
   complete value.
5198
 
5199
   For n32 & n64, $f0 always holds the first value and $f2 the second.
5200
   Otherwise the values are packed together as closely as possible.  */
5201
 
5202
static rtx
5203
mips_return_fpr_pair (enum machine_mode mode,
5204
                      enum machine_mode mode1, HOST_WIDE_INT offset1,
5205
                      enum machine_mode mode2, HOST_WIDE_INT offset2)
5206
{
5207
  int inc;
5208
 
5209
  inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
5210
  return gen_rtx_PARALLEL
5211
    (mode,
5212
     gen_rtvec (2,
5213
                gen_rtx_EXPR_LIST (VOIDmode,
5214
                                   gen_rtx_REG (mode1, FP_RETURN),
5215
                                   GEN_INT (offset1)),
5216
                gen_rtx_EXPR_LIST (VOIDmode,
5217
                                   gen_rtx_REG (mode2, FP_RETURN + inc),
5218
                                   GEN_INT (offset2))));
5219
 
5220
}
5221
 
5222
/* Implement TARGET_FUNCTION_VALUE and TARGET_LIBCALL_VALUE.
5223
   For normal calls, VALTYPE is the return type and MODE is VOIDmode.
5224
   For libcalls, VALTYPE is null and MODE is the mode of the return value.  */
5225
 
5226
static rtx
5227
mips_function_value_1 (const_tree valtype, const_tree fn_decl_or_type,
5228
                       enum machine_mode mode)
5229
{
5230
  if (valtype)
5231
    {
5232
      tree fields[2];
5233
      int unsigned_p;
5234
      const_tree func;
5235
 
5236
      if (fn_decl_or_type && DECL_P (fn_decl_or_type))
5237
        func = fn_decl_or_type;
5238
      else
5239
        func = NULL;
5240
 
5241
      mode = TYPE_MODE (valtype);
5242
      unsigned_p = TYPE_UNSIGNED (valtype);
5243
 
5244
      /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
5245
         return values, promote the mode here too.  */
5246
      mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
5247
 
5248
      /* Handle structures whose fields are returned in $f0/$f2.  */
5249
      switch (mips_fpr_return_fields (valtype, fields))
5250
        {
5251
        case 1:
5252
          return mips_return_fpr_single (mode,
5253
                                         TYPE_MODE (TREE_TYPE (fields[0])));
5254
 
5255
        case 2:
5256
          return mips_return_fpr_pair (mode,
5257
                                       TYPE_MODE (TREE_TYPE (fields[0])),
5258
                                       int_byte_position (fields[0]),
5259
                                       TYPE_MODE (TREE_TYPE (fields[1])),
5260
                                       int_byte_position (fields[1]));
5261
        }
5262
 
5263
      /* If a value is passed in the most significant part of a register, see
5264
         whether we have to round the mode up to a whole number of words.  */
5265
      if (mips_return_in_msb (valtype))
5266
        {
5267
          HOST_WIDE_INT size = int_size_in_bytes (valtype);
5268
          if (size % UNITS_PER_WORD != 0)
5269
            {
5270
              size += UNITS_PER_WORD - size % UNITS_PER_WORD;
5271
              mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5272
            }
5273
        }
5274
 
5275
      /* For EABI, the class of return register depends entirely on MODE.
5276
         For example, "struct { some_type x; }" and "union { some_type x; }"
5277
         are returned in the same way as a bare "some_type" would be.
5278
         Other ABIs only use FPRs for scalar, complex or vector types.  */
5279
      if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
5280
        return gen_rtx_REG (mode, GP_RETURN);
5281
    }
5282
 
5283
  if (!TARGET_MIPS16)
5284
    {
5285
      /* Handle long doubles for n32 & n64.  */
5286
      if (mode == TFmode)
5287
        return mips_return_fpr_pair (mode,
5288
                                     DImode, 0,
5289
                                     DImode, GET_MODE_SIZE (mode) / 2);
5290
 
5291
      if (mips_return_mode_in_fpr_p (mode))
5292
        {
5293
          if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5294
            return mips_return_fpr_pair (mode,
5295
                                         GET_MODE_INNER (mode), 0,
5296
                                         GET_MODE_INNER (mode),
5297
                                         GET_MODE_SIZE (mode) / 2);
5298
          else
5299
            return gen_rtx_REG (mode, FP_RETURN);
5300
        }
5301
    }
5302
 
5303
  return gen_rtx_REG (mode, GP_RETURN);
5304
}
5305
 
5306
/* Implement TARGET_FUNCTION_VALUE.  */
5307
 
5308
static rtx
5309
mips_function_value (const_tree valtype, const_tree fn_decl_or_type,
5310
                     bool outgoing ATTRIBUTE_UNUSED)
5311
{
5312
  return mips_function_value_1 (valtype, fn_decl_or_type, VOIDmode);
5313
}
5314
 
5315
/* Implement TARGET_LIBCALL_VALUE.  */
5316
 
5317
static rtx
5318
mips_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
5319
{
5320
  return mips_function_value_1 (NULL_TREE, NULL_TREE, mode);
5321
}
5322
 
5323
/* Implement TARGET_FUNCTION_VALUE_REGNO_P.
5324
 
5325
   On the MIPS, R2 R3 and F0 F2 are the only register thus used.
5326
   Currently, R2 and F0 are only implemented here (C has no complex type).  */
5327
 
5328
static bool
5329
mips_function_value_regno_p (const unsigned int regno)
5330
{
5331
  if (regno == GP_RETURN
5332
      || regno == FP_RETURN
5333
      || (LONG_DOUBLE_TYPE_SIZE == 128
5334
          && FP_RETURN != GP_RETURN
5335
          && regno == FP_RETURN + 2))
5336
    return true;
5337
 
5338
  return false;
5339
}
5340
 
5341
/* Implement TARGET_RETURN_IN_MEMORY.  Under the o32 and o64 ABIs,
5342
   all BLKmode objects are returned in memory.  Under the n32, n64
5343
   and embedded ABIs, small structures are returned in a register.
5344
   Objects with varying size must still be returned in memory, of
5345
   course.  */
5346
 
5347
static bool
5348
mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
5349
{
5350
  return (TARGET_OLDABI
5351
          ? TYPE_MODE (type) == BLKmode
5352
          : !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD));
5353
}
5354
 
5355
/* Implement TARGET_SETUP_INCOMING_VARARGS.  */
5356
 
5357
static void
5358
mips_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
5359
                             tree type, int *pretend_size ATTRIBUTE_UNUSED,
5360
                             int no_rtl)
5361
{
5362
  CUMULATIVE_ARGS local_cum;
5363
  int gp_saved, fp_saved;
5364
 
5365
  /* The caller has advanced CUM up to, but not beyond, the last named
5366
     argument.  Advance a local copy of CUM past the last "real" named
5367
     argument, to find out how many registers are left over.  */
5368
  local_cum = *get_cumulative_args (cum);
5369
  mips_function_arg_advance (pack_cumulative_args (&local_cum), mode, type,
5370
                             true);
5371
 
5372
  /* Found out how many registers we need to save.  */
5373
  gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
5374
  fp_saved = (EABI_FLOAT_VARARGS_P
5375
              ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
5376
              : 0);
5377
 
5378
  if (!no_rtl)
5379
    {
5380
      if (gp_saved > 0)
5381
        {
5382
          rtx ptr, mem;
5383
 
5384
          ptr = plus_constant (virtual_incoming_args_rtx,
5385
                               REG_PARM_STACK_SPACE (cfun->decl)
5386
                               - gp_saved * UNITS_PER_WORD);
5387
          mem = gen_frame_mem (BLKmode, ptr);
5388
          set_mem_alias_set (mem, get_varargs_alias_set ());
5389
 
5390
          move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
5391
                               mem, gp_saved);
5392
        }
5393
      if (fp_saved > 0)
5394
        {
5395
          /* We can't use move_block_from_reg, because it will use
5396
             the wrong mode.  */
5397
          enum machine_mode mode;
5398
          int off, i;
5399
 
5400
          /* Set OFF to the offset from virtual_incoming_args_rtx of
5401
             the first float register.  The FP save area lies below
5402
             the integer one, and is aligned to UNITS_PER_FPVALUE bytes.  */
5403
          off = (-gp_saved * UNITS_PER_WORD) & -UNITS_PER_FPVALUE;
5404
          off -= fp_saved * UNITS_PER_FPREG;
5405
 
5406
          mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
5407
 
5408
          for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
5409
               i += MAX_FPRS_PER_FMT)
5410
            {
5411
              rtx ptr, mem;
5412
 
5413
              ptr = plus_constant (virtual_incoming_args_rtx, off);
5414
              mem = gen_frame_mem (mode, ptr);
5415
              set_mem_alias_set (mem, get_varargs_alias_set ());
5416
              mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
5417
              off += UNITS_PER_HWFPVALUE;
5418
            }
5419
        }
5420
    }
5421
  if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
5422
    cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
5423
                                   + fp_saved * UNITS_PER_FPREG);
5424
}
5425
 
5426
/* Implement TARGET_BUILTIN_VA_LIST.  */
5427
 
5428
static tree
5429
mips_build_builtin_va_list (void)
5430
{
5431
  if (EABI_FLOAT_VARARGS_P)
5432
    {
5433
      /* We keep 3 pointers, and two offsets.
5434
 
5435
         Two pointers are to the overflow area, which starts at the CFA.
5436
         One of these is constant, for addressing into the GPR save area
5437
         below it.  The other is advanced up the stack through the
5438
         overflow region.
5439
 
5440
         The third pointer is to the bottom of the GPR save area.
5441
         Since the FPR save area is just below it, we can address
5442
         FPR slots off this pointer.
5443
 
5444
         We also keep two one-byte offsets, which are to be subtracted
5445
         from the constant pointers to yield addresses in the GPR and
5446
         FPR save areas.  These are downcounted as float or non-float
5447
         arguments are used, and when they get to zero, the argument
5448
         must be obtained from the overflow region.  */
5449
      tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
5450
      tree array, index;
5451
 
5452
      record = lang_hooks.types.make_type (RECORD_TYPE);
5453
 
5454
      f_ovfl = build_decl (BUILTINS_LOCATION,
5455
                           FIELD_DECL, get_identifier ("__overflow_argptr"),
5456
                           ptr_type_node);
5457
      f_gtop = build_decl (BUILTINS_LOCATION,
5458
                           FIELD_DECL, get_identifier ("__gpr_top"),
5459
                           ptr_type_node);
5460
      f_ftop = build_decl (BUILTINS_LOCATION,
5461
                           FIELD_DECL, get_identifier ("__fpr_top"),
5462
                           ptr_type_node);
5463
      f_goff = build_decl (BUILTINS_LOCATION,
5464
                           FIELD_DECL, get_identifier ("__gpr_offset"),
5465
                           unsigned_char_type_node);
5466
      f_foff = build_decl (BUILTINS_LOCATION,
5467
                           FIELD_DECL, get_identifier ("__fpr_offset"),
5468
                           unsigned_char_type_node);
5469
      /* Explicitly pad to the size of a pointer, so that -Wpadded won't
5470
         warn on every user file.  */
5471
      index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
5472
      array = build_array_type (unsigned_char_type_node,
5473
                                build_index_type (index));
5474
      f_res = build_decl (BUILTINS_LOCATION,
5475
                          FIELD_DECL, get_identifier ("__reserved"), array);
5476
 
5477
      DECL_FIELD_CONTEXT (f_ovfl) = record;
5478
      DECL_FIELD_CONTEXT (f_gtop) = record;
5479
      DECL_FIELD_CONTEXT (f_ftop) = record;
5480
      DECL_FIELD_CONTEXT (f_goff) = record;
5481
      DECL_FIELD_CONTEXT (f_foff) = record;
5482
      DECL_FIELD_CONTEXT (f_res) = record;
5483
 
5484
      TYPE_FIELDS (record) = f_ovfl;
5485
      DECL_CHAIN (f_ovfl) = f_gtop;
5486
      DECL_CHAIN (f_gtop) = f_ftop;
5487
      DECL_CHAIN (f_ftop) = f_goff;
5488
      DECL_CHAIN (f_goff) = f_foff;
5489
      DECL_CHAIN (f_foff) = f_res;
5490
 
5491
      layout_type (record);
5492
      return record;
5493
    }
5494
  else if (TARGET_IRIX6)
5495
    /* On IRIX 6, this type is 'char *'.  */
5496
    return build_pointer_type (char_type_node);
5497
  else
5498
    /* Otherwise, we use 'void *'.  */
5499
    return ptr_type_node;
5500
}
5501
 
5502
/* Implement TARGET_EXPAND_BUILTIN_VA_START.  */
5503
 
5504
static void
5505
mips_va_start (tree valist, rtx nextarg)
5506
{
5507
  if (EABI_FLOAT_VARARGS_P)
5508
    {
5509
      const CUMULATIVE_ARGS *cum;
5510
      tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5511
      tree ovfl, gtop, ftop, goff, foff;
5512
      tree t;
5513
      int gpr_save_area_size;
5514
      int fpr_save_area_size;
5515
      int fpr_offset;
5516
 
5517
      cum = &crtl->args.info;
5518
      gpr_save_area_size
5519
        = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
5520
      fpr_save_area_size
5521
        = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
5522
 
5523
      f_ovfl = TYPE_FIELDS (va_list_type_node);
5524
      f_gtop = DECL_CHAIN (f_ovfl);
5525
      f_ftop = DECL_CHAIN (f_gtop);
5526
      f_goff = DECL_CHAIN (f_ftop);
5527
      f_foff = DECL_CHAIN (f_goff);
5528
 
5529
      ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5530
                     NULL_TREE);
5531
      gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
5532
                     NULL_TREE);
5533
      ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
5534
                     NULL_TREE);
5535
      goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
5536
                     NULL_TREE);
5537
      foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
5538
                     NULL_TREE);
5539
 
5540
      /* Emit code to initialize OVFL, which points to the next varargs
5541
         stack argument.  CUM->STACK_WORDS gives the number of stack
5542
         words used by named arguments.  */
5543
      t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
5544
      if (cum->stack_words > 0)
5545
        t = fold_build_pointer_plus_hwi (t, cum->stack_words * UNITS_PER_WORD);
5546
      t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
5547
      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5548
 
5549
      /* Emit code to initialize GTOP, the top of the GPR save area.  */
5550
      t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
5551
      t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
5552
      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5553
 
5554
      /* Emit code to initialize FTOP, the top of the FPR save area.
5555
         This address is gpr_save_area_bytes below GTOP, rounded
5556
         down to the next fp-aligned boundary.  */
5557
      t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
5558
      fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
5559
      fpr_offset &= -UNITS_PER_FPVALUE;
5560
      if (fpr_offset)
5561
        t = fold_build_pointer_plus_hwi (t, -fpr_offset);
5562
      t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
5563
      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5564
 
5565
      /* Emit code to initialize GOFF, the offset from GTOP of the
5566
         next GPR argument.  */
5567
      t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
5568
                  build_int_cst (TREE_TYPE (goff), gpr_save_area_size));
5569
      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5570
 
5571
      /* Likewise emit code to initialize FOFF, the offset from FTOP
5572
         of the next FPR argument.  */
5573
      t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
5574
                  build_int_cst (TREE_TYPE (foff), fpr_save_area_size));
5575
      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
5576
    }
5577
  else
5578
    {
5579
      nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
5580
      std_expand_builtin_va_start (valist, nextarg);
5581
    }
5582
}
5583
 
5584
/* Like std_gimplify_va_arg_expr, but apply alignment to zero-sized
5585
   types as well.  */
5586
 
5587
static tree
5588
mips_std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
5589
                               gimple_seq *post_p)
5590
{
5591
  tree addr, t, type_size, rounded_size, valist_tmp;
5592
  unsigned HOST_WIDE_INT align, boundary;
5593
  bool indirect;
5594
 
5595
  indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
5596
  if (indirect)
5597
    type = build_pointer_type (type);
5598
 
5599
  align = PARM_BOUNDARY / BITS_PER_UNIT;
5600
  boundary = targetm.calls.function_arg_boundary (TYPE_MODE (type), type);
5601
 
5602
  /* When we align parameter on stack for caller, if the parameter
5603
     alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
5604
     aligned at MAX_SUPPORTED_STACK_ALIGNMENT.  We will match callee
5605
     here with caller.  */
5606
  if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
5607
    boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
5608
 
5609
  boundary /= BITS_PER_UNIT;
5610
 
5611
  /* Hoist the valist value into a temporary for the moment.  */
5612
  valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
5613
 
5614
  /* va_list pointer is aligned to PARM_BOUNDARY.  If argument actually
5615
     requires greater alignment, we must perform dynamic alignment.  */
5616
  if (boundary > align)
5617
    {
5618
      t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
5619
                  fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
5620
      gimplify_and_add (t, pre_p);
5621
 
5622
      t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
5623
                  fold_build2 (BIT_AND_EXPR, TREE_TYPE (valist),
5624
                               valist_tmp,
5625
                               build_int_cst (TREE_TYPE (valist), -boundary)));
5626
      gimplify_and_add (t, pre_p);
5627
    }
5628
  else
5629
    boundary = align;
5630
 
5631
  /* If the actual alignment is less than the alignment of the type,
5632
     adjust the type accordingly so that we don't assume strict alignment
5633
     when dereferencing the pointer.  */
5634
  boundary *= BITS_PER_UNIT;
5635
  if (boundary < TYPE_ALIGN (type))
5636
    {
5637
      type = build_variant_type_copy (type);
5638
      TYPE_ALIGN (type) = boundary;
5639
    }
5640
 
5641
  /* Compute the rounded size of the type.  */
5642
  type_size = size_in_bytes (type);
5643
  rounded_size = round_up (type_size, align);
5644
 
5645
  /* Reduce rounded_size so it's sharable with the postqueue.  */
5646
  gimplify_expr (&rounded_size, pre_p, post_p, is_gimple_val, fb_rvalue);
5647
 
5648
  /* Get AP.  */
5649
  addr = valist_tmp;
5650
  if (PAD_VARARGS_DOWN && !integer_zerop (rounded_size))
5651
    {
5652
      /* Small args are padded downward.  */
5653
      t = fold_build2_loc (input_location, GT_EXPR, sizetype,
5654
                       rounded_size, size_int (align));
5655
      t = fold_build3 (COND_EXPR, sizetype, t, size_zero_node,
5656
                       size_binop (MINUS_EXPR, rounded_size, type_size));
5657
      addr = fold_build_pointer_plus (addr, t);
5658
    }
5659
 
5660
  /* Compute new value for AP.  */
5661
  t = fold_build_pointer_plus (valist_tmp, rounded_size);
5662
  t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
5663
  gimplify_and_add (t, pre_p);
5664
 
5665
  addr = fold_convert (build_pointer_type (type), addr);
5666
 
5667
  if (indirect)
5668
    addr = build_va_arg_indirect_ref (addr);
5669
 
5670
  return build_va_arg_indirect_ref (addr);
5671
}
5672
 
5673
/* Implement TARGET_GIMPLIFY_VA_ARG_EXPR.  */
5674
 
5675
static tree
5676
mips_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
5677
                           gimple_seq *post_p)
5678
{
5679
  tree addr;
5680
  bool indirect_p;
5681
 
5682
  indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5683
  if (indirect_p)
5684
    type = build_pointer_type (type);
5685
 
5686
  if (!EABI_FLOAT_VARARGS_P)
5687
    addr = mips_std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5688
  else
5689
    {
5690
      tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
5691
      tree ovfl, top, off, align;
5692
      HOST_WIDE_INT size, rsize, osize;
5693
      tree t, u;
5694
 
5695
      f_ovfl = TYPE_FIELDS (va_list_type_node);
5696
      f_gtop = DECL_CHAIN (f_ovfl);
5697
      f_ftop = DECL_CHAIN (f_gtop);
5698
      f_goff = DECL_CHAIN (f_ftop);
5699
      f_foff = DECL_CHAIN (f_goff);
5700
 
5701
      /* Let:
5702
 
5703
         TOP be the top of the GPR or FPR save area;
5704
         OFF be the offset from TOP of the next register;
5705
         ADDR_RTX be the address of the argument;
5706
         SIZE be the number of bytes in the argument type;
5707
         RSIZE be the number of bytes used to store the argument
5708
           when it's in the register save area; and
5709
         OSIZE be the number of bytes used to store it when it's
5710
           in the stack overflow area.
5711
 
5712
         The code we want is:
5713
 
5714
         1: off &= -rsize;        // round down
5715
         2: if (off != 0)
5716
         3:   {
5717
         4:     addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
5718
         5:     off -= rsize;
5719
         6:   }
5720
         7: else
5721
         8:   {
5722
         9:     ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
5723
         10:    addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
5724
         11:    ovfl += osize;
5725
         14:  }
5726
 
5727
         [1] and [9] can sometimes be optimized away.  */
5728
 
5729
      ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
5730
                     NULL_TREE);
5731
      size = int_size_in_bytes (type);
5732
 
5733
      if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
5734
          && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
5735
        {
5736
          top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop),
5737
                        unshare_expr (valist), f_ftop, NULL_TREE);
5738
          off = build3 (COMPONENT_REF, TREE_TYPE (f_foff),
5739
                        unshare_expr (valist), f_foff, NULL_TREE);
5740
 
5741
          /* When va_start saves FPR arguments to the stack, each slot
5742
             takes up UNITS_PER_HWFPVALUE bytes, regardless of the
5743
             argument's precision.  */
5744
          rsize = UNITS_PER_HWFPVALUE;
5745
 
5746
          /* Overflow arguments are padded to UNITS_PER_WORD bytes
5747
             (= PARM_BOUNDARY bits).  This can be different from RSIZE
5748
             in two cases:
5749
 
5750
             (1) On 32-bit targets when TYPE is a structure such as:
5751
 
5752
             struct s { float f; };
5753
 
5754
             Such structures are passed in paired FPRs, so RSIZE
5755
             will be 8 bytes.  However, the structure only takes
5756
             up 4 bytes of memory, so OSIZE will only be 4.
5757
 
5758
             (2) In combinations such as -mgp64 -msingle-float
5759
             -fshort-double.  Doubles passed in registers will then take
5760
             up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
5761
             stack take up UNITS_PER_WORD bytes.  */
5762
          osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
5763
        }
5764
      else
5765
        {
5766
          top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop),
5767
                        unshare_expr (valist), f_gtop, NULL_TREE);
5768
          off = build3 (COMPONENT_REF, TREE_TYPE (f_goff),
5769
                        unshare_expr (valist), f_goff, NULL_TREE);
5770
          rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5771
          if (rsize > UNITS_PER_WORD)
5772
            {
5773
              /* [1] Emit code for: off &= -rsize.      */
5774
              t = build2 (BIT_AND_EXPR, TREE_TYPE (off), unshare_expr (off),
5775
                          build_int_cst (TREE_TYPE (off), -rsize));
5776
              gimplify_assign (unshare_expr (off), t, pre_p);
5777
            }
5778
          osize = rsize;
5779
        }
5780
 
5781
      /* [2] Emit code to branch if off == 0.  */
5782
      t = build2 (NE_EXPR, boolean_type_node, unshare_expr (off),
5783
                  build_int_cst (TREE_TYPE (off), 0));
5784
      addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5785
 
5786
      /* [5] Emit code for: off -= rsize.  We do this as a form of
5787
         post-decrement not available to C.  */
5788
      t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
5789
      t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
5790
 
5791
      /* [4] Emit code for:
5792
         addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0).  */
5793
      t = fold_convert (sizetype, t);
5794
      t = fold_build1 (NEGATE_EXPR, sizetype, t);
5795
      t = fold_build_pointer_plus (top, t);
5796
      if (BYTES_BIG_ENDIAN && rsize > size)
5797
        t = fold_build_pointer_plus_hwi (t, rsize - size);
5798
      COND_EXPR_THEN (addr) = t;
5799
 
5800
      if (osize > UNITS_PER_WORD)
5801
        {
5802
          /* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize.  */
5803
          t = fold_build_pointer_plus_hwi (unshare_expr (ovfl), osize - 1);
5804
          u = build_int_cst (TREE_TYPE (t), -osize);
5805
          t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t, u);
5806
          align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl),
5807
                          unshare_expr (ovfl), t);
5808
        }
5809
      else
5810
        align = NULL;
5811
 
5812
      /* [10, 11] Emit code for:
5813
         addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
5814
         ovfl += osize.  */
5815
      u = fold_convert (TREE_TYPE (ovfl), build_int_cst (NULL_TREE, osize));
5816
      t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5817
      if (BYTES_BIG_ENDIAN && osize > size)
5818
        t = fold_build_pointer_plus_hwi (t, osize - size);
5819
 
5820
      /* String [9] and [10, 11] together.  */
5821
      if (align)
5822
        t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5823
      COND_EXPR_ELSE (addr) = t;
5824
 
5825
      addr = fold_convert (build_pointer_type (type), addr);
5826
      addr = build_va_arg_indirect_ref (addr);
5827
    }
5828
 
5829
  if (indirect_p)
5830
    addr = build_va_arg_indirect_ref (addr);
5831
 
5832
  return addr;
5833
}
5834
 
5835
/* Declare a unique, locally-binding function called NAME, then start
5836
   its definition.  */
5837
 
5838
static void
5839
mips_start_unique_function (const char *name)
5840
{
5841
  tree decl;
5842
 
5843
  decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
5844
                     get_identifier (name),
5845
                     build_function_type_list (void_type_node, NULL_TREE));
5846
  DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
5847
                                   NULL_TREE, void_type_node);
5848
  TREE_PUBLIC (decl) = 1;
5849
  TREE_STATIC (decl) = 1;
5850
 
5851
  DECL_COMDAT_GROUP (decl) = DECL_ASSEMBLER_NAME (decl);
5852
 
5853
  targetm.asm_out.unique_section (decl, 0);
5854
  switch_to_section (get_named_section (decl, NULL, 0));
5855
 
5856
  targetm.asm_out.globalize_label (asm_out_file, name);
5857
  fputs ("\t.hidden\t", asm_out_file);
5858
  assemble_name (asm_out_file, name);
5859
  putc ('\n', asm_out_file);
5860
}
5861
 
5862
/* Start a definition of function NAME.  MIPS16_P indicates whether the
5863
   function contains MIPS16 code.  */
5864
 
5865
static void
5866
mips_start_function_definition (const char *name, bool mips16_p)
5867
{
5868
  if (mips16_p)
5869
    fprintf (asm_out_file, "\t.set\tmips16\n");
5870
  else
5871
    fprintf (asm_out_file, "\t.set\tnomips16\n");
5872
 
5873
  if (!flag_inhibit_size_directive)
5874
    {
5875
      fputs ("\t.ent\t", asm_out_file);
5876
      assemble_name (asm_out_file, name);
5877
      fputs ("\n", asm_out_file);
5878
    }
5879
 
5880
  ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, name, "function");
5881
 
5882
  /* Start the definition proper.  */
5883
  assemble_name (asm_out_file, name);
5884
  fputs (":\n", asm_out_file);
5885
}
5886
 
5887
/* End a function definition started by mips_start_function_definition.  */
5888
 
5889
static void
5890
mips_end_function_definition (const char *name)
5891
{
5892
  if (!flag_inhibit_size_directive)
5893
    {
5894
      fputs ("\t.end\t", asm_out_file);
5895
      assemble_name (asm_out_file, name);
5896
      fputs ("\n", asm_out_file);
5897
    }
5898
}
5899
 
5900
/* Output a definition of the __mips16_rdhwr function.  */
5901
 
5902
static void
5903
mips_output_mips16_rdhwr (void)
5904
{
5905
  const char *name;
5906
 
5907
  name = "__mips16_rdhwr";
5908
  mips_start_unique_function (name);
5909
  mips_start_function_definition (name, false);
5910
  fprintf (asm_out_file,
5911
           "\t.set\tpush\n"
5912
           "\t.set\tmips32r2\n"
5913
           "\t.set\tnoreorder\n"
5914
           "\trdhwr\t$3,$29\n"
5915
           "\t.set\tpop\n"
5916
           "\tj\t$31\n");
5917
  mips_end_function_definition (name);
5918
}
5919
 
5920
/* Return true if calls to X can use R_MIPS_CALL* relocations.  */
5921
 
5922
static bool
5923
mips_ok_for_lazy_binding_p (rtx x)
5924
{
5925
  return (TARGET_USE_GOT
5926
          && GET_CODE (x) == SYMBOL_REF
5927
          && !SYMBOL_REF_BIND_NOW_P (x)
5928
          && !mips_symbol_binds_local_p (x));
5929
}
5930
 
5931
/* Load function address ADDR into register DEST.  TYPE is as for
5932
   mips_expand_call.  Return true if we used an explicit lazy-binding
5933
   sequence.  */
5934
 
5935
static bool
5936
mips_load_call_address (enum mips_call_type type, rtx dest, rtx addr)
5937
{
5938
  /* If we're generating PIC, and this call is to a global function,
5939
     try to allow its address to be resolved lazily.  This isn't
5940
     possible for sibcalls when $gp is call-saved because the value
5941
     of $gp on entry to the stub would be our caller's gp, not ours.  */
5942
  if (TARGET_EXPLICIT_RELOCS
5943
      && !(type == MIPS_CALL_SIBCALL && TARGET_CALL_SAVED_GP)
5944
      && mips_ok_for_lazy_binding_p (addr))
5945
    {
5946
      addr = mips_got_load (dest, addr, SYMBOL_GOTOFF_CALL);
5947
      emit_insn (gen_rtx_SET (VOIDmode, dest, addr));
5948
      return true;
5949
    }
5950
  else
5951
    {
5952
      mips_emit_move (dest, addr);
5953
      return false;
5954
    }
5955
}
5956
 
5957
/* Each locally-defined hard-float MIPS16 function has a local symbol
5958
   associated with it.  This hash table maps the function symbol (FUNC)
5959
   to the local symbol (LOCAL). */
5960
struct GTY(()) mips16_local_alias {
5961
  rtx func;
5962
  rtx local;
5963
};
5964
static GTY ((param_is (struct mips16_local_alias))) htab_t mips16_local_aliases;
5965
 
5966
/* Hash table callbacks for mips16_local_aliases.  */
5967
 
5968
static hashval_t
5969
mips16_local_aliases_hash (const void *entry)
5970
{
5971
  const struct mips16_local_alias *alias;
5972
 
5973
  alias = (const struct mips16_local_alias *) entry;
5974
  return htab_hash_string (XSTR (alias->func, 0));
5975
}
5976
 
5977
static int
5978
mips16_local_aliases_eq (const void *entry1, const void *entry2)
5979
{
5980
  const struct mips16_local_alias *alias1, *alias2;
5981
 
5982
  alias1 = (const struct mips16_local_alias *) entry1;
5983
  alias2 = (const struct mips16_local_alias *) entry2;
5984
  return rtx_equal_p (alias1->func, alias2->func);
5985
}
5986
 
5987
/* FUNC is the symbol for a locally-defined hard-float MIPS16 function.
5988
   Return a local alias for it, creating a new one if necessary.  */
5989
 
5990
static rtx
5991
mips16_local_alias (rtx func)
5992
{
5993
  struct mips16_local_alias *alias, tmp_alias;
5994
  void **slot;
5995
 
5996
  /* Create the hash table if this is the first call.  */
5997
  if (mips16_local_aliases == NULL)
5998
    mips16_local_aliases = htab_create_ggc (37, mips16_local_aliases_hash,
5999
                                            mips16_local_aliases_eq, NULL);
6000
 
6001
  /* Look up the function symbol, creating a new entry if need be.  */
6002
  tmp_alias.func = func;
6003
  slot = htab_find_slot (mips16_local_aliases, &tmp_alias, INSERT);
6004
  gcc_assert (slot != NULL);
6005
 
6006
  alias = (struct mips16_local_alias *) *slot;
6007
  if (alias == NULL)
6008
    {
6009
      const char *func_name, *local_name;
6010
      rtx local;
6011
 
6012
      /* Create a new SYMBOL_REF for the local symbol.  The choice of
6013
         __fn_local_* is based on the __fn_stub_* names that we've
6014
         traditionally used for the non-MIPS16 stub.  */
6015
      func_name = targetm.strip_name_encoding (XSTR (func, 0));
6016
      local_name = ACONCAT (("__fn_local_", func_name, NULL));
6017
      local = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (local_name));
6018
      SYMBOL_REF_FLAGS (local) = SYMBOL_REF_FLAGS (func) | SYMBOL_FLAG_LOCAL;
6019
 
6020
      /* Create a new structure to represent the mapping.  */
6021
      alias = ggc_alloc_mips16_local_alias ();
6022
      alias->func = func;
6023
      alias->local = local;
6024
      *slot = alias;
6025
    }
6026
  return alias->local;
6027
}
6028
 
6029
/* A chained list of functions for which mips16_build_call_stub has already
6030
   generated a stub.  NAME is the name of the function and FP_RET_P is true
6031
   if the function returns a value in floating-point registers.  */
6032
struct mips16_stub {
6033
  struct mips16_stub *next;
6034
  char *name;
6035
  bool fp_ret_p;
6036
};
6037
static struct mips16_stub *mips16_stubs;
6038
 
6039
/* Return the two-character string that identifies floating-point
6040
   return mode MODE in the name of a MIPS16 function stub.  */
6041
 
6042
static const char *
6043
mips16_call_stub_mode_suffix (enum machine_mode mode)
6044
{
6045
  if (mode == SFmode)
6046
    return "sf";
6047
  else if (mode == DFmode)
6048
    return "df";
6049
  else if (mode == SCmode)
6050
    return "sc";
6051
  else if (mode == DCmode)
6052
    return "dc";
6053
  else if (mode == V2SFmode)
6054
    return "df";
6055
  else
6056
    gcc_unreachable ();
6057
}
6058
 
6059
/* Write instructions to move a 32-bit value between general register
6060
   GPREG and floating-point register FPREG.  DIRECTION is 't' to move
6061
   from GPREG to FPREG and 'f' to move in the opposite direction.  */
6062
 
6063
static void
6064
mips_output_32bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
6065
{
6066
  fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
6067
           reg_names[gpreg], reg_names[fpreg]);
6068
}
6069
 
6070
/* Likewise for 64-bit values.  */
6071
 
6072
static void
6073
mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
6074
{
6075
  if (TARGET_64BIT)
6076
    fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
6077
             reg_names[gpreg], reg_names[fpreg]);
6078
  else if (TARGET_FLOAT64)
6079
    {
6080
      fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
6081
               reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
6082
      fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
6083
               reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
6084
    }
6085
  else
6086
    {
6087
      /* Move the least-significant word.  */
6088
      fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
6089
               reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
6090
      /* ...then the most significant word.  */
6091
      fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
6092
               reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg + 1]);
6093
    }
6094
}
6095
 
6096
/* Write out code to move floating-point arguments into or out of
6097
   general registers.  FP_CODE is the code describing which arguments
6098
   are present (see the comment above the definition of CUMULATIVE_ARGS
6099
   in mips.h).  DIRECTION is as for mips_output_32bit_xfer.  */
6100
 
6101
static void
6102
mips_output_args_xfer (int fp_code, char direction)
6103
{
6104
  unsigned int gparg, fparg, f;
6105
  CUMULATIVE_ARGS cum;
6106
 
6107
  /* This code only works for o32 and o64.  */
6108
  gcc_assert (TARGET_OLDABI);
6109
 
6110
  mips_init_cumulative_args (&cum, NULL);
6111
 
6112
  for (f = (unsigned int) fp_code; f != 0; f >>= 2)
6113
    {
6114
      enum machine_mode mode;
6115
      struct mips_arg_info info;
6116
 
6117
      if ((f & 3) == 1)
6118
        mode = SFmode;
6119
      else if ((f & 3) == 2)
6120
        mode = DFmode;
6121
      else
6122
        gcc_unreachable ();
6123
 
6124
      mips_get_arg_info (&info, &cum, mode, NULL, true);
6125
      gparg = mips_arg_regno (&info, false);
6126
      fparg = mips_arg_regno (&info, true);
6127
 
6128
      if (mode == SFmode)
6129
        mips_output_32bit_xfer (direction, gparg, fparg);
6130
      else
6131
        mips_output_64bit_xfer (direction, gparg, fparg);
6132
 
6133
      mips_function_arg_advance (pack_cumulative_args (&cum), mode, NULL, true);
6134
    }
6135
}
6136
 
6137
/* Write a MIPS16 stub for the current function.  This stub is used
6138
   for functions which take arguments in the floating-point registers.
6139
   It is normal-mode code that moves the floating-point arguments
6140
   into the general registers and then jumps to the MIPS16 code.  */
6141
 
6142
static void
6143
mips16_build_function_stub (void)
6144
{
6145
  const char *fnname, *alias_name, *separator;
6146
  char *secname, *stubname;
6147
  tree stubdecl;
6148
  unsigned int f;
6149
  rtx symbol, alias;
6150
 
6151
  /* Create the name of the stub, and its unique section.  */
6152
  symbol = XEXP (DECL_RTL (current_function_decl), 0);
6153
  alias = mips16_local_alias (symbol);
6154
 
6155
  fnname = targetm.strip_name_encoding (XSTR (symbol, 0));
6156
  alias_name = targetm.strip_name_encoding (XSTR (alias, 0));
6157
  secname = ACONCAT ((".mips16.fn.", fnname, NULL));
6158
  stubname = ACONCAT (("__fn_stub_", fnname, NULL));
6159
 
6160
  /* Build a decl for the stub.  */
6161
  stubdecl = build_decl (BUILTINS_LOCATION,
6162
                         FUNCTION_DECL, get_identifier (stubname),
6163
                         build_function_type_list (void_type_node, NULL_TREE));
6164
  DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
6165
  DECL_RESULT (stubdecl) = build_decl (BUILTINS_LOCATION,
6166
                                       RESULT_DECL, NULL_TREE, void_type_node);
6167
 
6168
  /* Output a comment.  */
6169
  fprintf (asm_out_file, "\t# Stub function for %s (",
6170
           current_function_name ());
6171
  separator = "";
6172
  for (f = (unsigned int) crtl->args.info.fp_code; f != 0; f >>= 2)
6173
    {
6174
      fprintf (asm_out_file, "%s%s", separator,
6175
               (f & 3) == 1 ? "float" : "double");
6176
      separator = ", ";
6177
    }
6178
  fprintf (asm_out_file, ")\n");
6179
 
6180
  /* Start the function definition.  */
6181
  assemble_start_function (stubdecl, stubname);
6182
  mips_start_function_definition (stubname, false);
6183
 
6184
  /* If generating pic2 code, either set up the global pointer or
6185
     switch to pic0.  */
6186
  if (TARGET_ABICALLS_PIC2)
6187
    {
6188
      if (TARGET_ABSOLUTE_ABICALLS)
6189
        fprintf (asm_out_file, "\t.option\tpic0\n");
6190
      else
6191
        {
6192
          output_asm_insn ("%(.cpload\t%^%)", NULL);
6193
          /* Emit an R_MIPS_NONE relocation to tell the linker what the
6194
             target function is.  Use a local GOT access when loading the
6195
             symbol, to cut down on the number of unnecessary GOT entries
6196
             for stubs that aren't needed.  */
6197
          output_asm_insn (".reloc\t0,R_MIPS_NONE,%0", &symbol);
6198
          symbol = alias;
6199
        }
6200
    }
6201
 
6202
  /* Load the address of the MIPS16 function into $25.  Do this first so
6203
     that targets with coprocessor interlocks can use an MFC1 to fill the
6204
     delay slot.  */
6205
  output_asm_insn ("la\t%^,%0", &symbol);
6206
 
6207
  /* Move the arguments from floating-point registers to general registers.  */
6208
  mips_output_args_xfer (crtl->args.info.fp_code, 'f');
6209
 
6210
  /* Jump to the MIPS16 function.  */
6211
  output_asm_insn ("jr\t%^", NULL);
6212
 
6213
  if (TARGET_ABICALLS_PIC2 && TARGET_ABSOLUTE_ABICALLS)
6214
    fprintf (asm_out_file, "\t.option\tpic2\n");
6215
 
6216
  mips_end_function_definition (stubname);
6217
 
6218
  /* If the linker needs to create a dynamic symbol for the target
6219
     function, it will associate the symbol with the stub (which,
6220
     unlike the target function, follows the proper calling conventions).
6221
     It is therefore useful to have a local alias for the target function,
6222
     so that it can still be identified as MIPS16 code.  As an optimization,
6223
     this symbol can also be used for indirect MIPS16 references from
6224
     within this file.  */
6225
  ASM_OUTPUT_DEF (asm_out_file, alias_name, fnname);
6226
 
6227
  switch_to_section (function_section (current_function_decl));
6228
}
6229
 
6230
/* The current function is a MIPS16 function that returns a value in an FPR.
6231
   Copy the return value from its soft-float to its hard-float location.
6232
   libgcc2 has special non-MIPS16 helper functions for each case.  */
6233
 
6234
static void
6235
mips16_copy_fpr_return_value (void)
6236
{
6237
  rtx fn, insn, retval;
6238
  tree return_type;
6239
  enum machine_mode return_mode;
6240
  const char *name;
6241
 
6242
  return_type = DECL_RESULT (current_function_decl);
6243
  return_mode = DECL_MODE (return_type);
6244
 
6245
  name = ACONCAT (("__mips16_ret_",
6246
                   mips16_call_stub_mode_suffix (return_mode),
6247
                   NULL));
6248
  fn = mips16_stub_function (name);
6249
 
6250
  /* The function takes arguments in $2 (and possibly $3), so calls
6251
     to it cannot be lazily bound.  */
6252
  SYMBOL_REF_FLAGS (fn) |= SYMBOL_FLAG_BIND_NOW;
6253
 
6254
  /* Model the call as something that takes the GPR return value as
6255
     argument and returns an "updated" value.  */
6256
  retval = gen_rtx_REG (return_mode, GP_RETURN);
6257
  insn = mips_expand_call (MIPS_CALL_EPILOGUE, retval, fn,
6258
                           const0_rtx, NULL_RTX, false);
6259
  use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
6260
}
6261
 
6262
/* Consider building a stub for a MIPS16 call to function *FN_PTR.
6263
   RETVAL is the location of the return value, or null if this is
6264
   a "call" rather than a "call_value".  ARGS_SIZE is the size of the
6265
   arguments and FP_CODE is the code built by mips_function_arg;
6266
   see the comment before the fp_code field in CUMULATIVE_ARGS for details.
6267
 
6268
   There are three alternatives:
6269
 
6270
   - If a stub was needed, emit the call and return the call insn itself.
6271
 
6272
   - If we can avoid using a stub by redirecting the call, set *FN_PTR
6273
     to the new target and return null.
6274
 
6275
   - If *FN_PTR doesn't need a stub, return null and leave *FN_PTR
6276
     unmodified.
6277
 
6278
   A stub is needed for calls to functions that, in normal mode,
6279
   receive arguments in FPRs or return values in FPRs.  The stub
6280
   copies the arguments from their soft-float positions to their
6281
   hard-float positions, calls the real function, then copies the
6282
   return value from its hard-float position to its soft-float
6283
   position.
6284
 
6285
   We can emit a JAL to *FN_PTR even when *FN_PTR might need a stub.
6286
   If *FN_PTR turns out to be to a non-MIPS16 function, the linker
6287
   automatically redirects the JAL to the stub, otherwise the JAL
6288
   continues to call FN directly.  */
6289
 
6290
static rtx
6291
mips16_build_call_stub (rtx retval, rtx *fn_ptr, rtx args_size, int fp_code)
6292
{
6293
  const char *fnname;
6294
  bool fp_ret_p;
6295
  struct mips16_stub *l;
6296
  rtx insn, fn;
6297
 
6298
  /* We don't need to do anything if we aren't in MIPS16 mode, or if
6299
     we were invoked with the -msoft-float option.  */
6300
  if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
6301
    return NULL_RTX;
6302
 
6303
  /* Figure out whether the value might come back in a floating-point
6304
     register.  */
6305
  fp_ret_p = retval && mips_return_mode_in_fpr_p (GET_MODE (retval));
6306
 
6307
  /* We don't need to do anything if there were no floating-point
6308
     arguments and the value will not be returned in a floating-point
6309
     register.  */
6310
  if (fp_code == 0 && !fp_ret_p)
6311
    return NULL_RTX;
6312
 
6313
  /* We don't need to do anything if this is a call to a special
6314
     MIPS16 support function.  */
6315
  fn = *fn_ptr;
6316
  if (mips16_stub_function_p (fn))
6317
    return NULL_RTX;
6318
 
6319
  /* If we're calling a locally-defined MIPS16 function, we know that
6320
     it will return values in both the "soft-float" and "hard-float"
6321
     registers.  There is no need to use a stub to move the latter
6322
     to the former.  */
6323
  if (fp_code == 0 && mips16_local_function_p (fn))
6324
    return NULL_RTX;
6325
 
6326
  /* This code will only work for o32 and o64 abis.  The other ABI's
6327
     require more sophisticated support.  */
6328
  gcc_assert (TARGET_OLDABI);
6329
 
6330
  /* If we're calling via a function pointer, use one of the magic
6331
     libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
6332
     Each stub expects the function address to arrive in register $2.  */
6333
  if (GET_CODE (fn) != SYMBOL_REF
6334
      || !call_insn_operand (fn, VOIDmode))
6335
    {
6336
      char buf[30];
6337
      rtx stub_fn, insn, addr;
6338
      bool lazy_p;
6339
 
6340
      /* If this is a locally-defined and locally-binding function,
6341
         avoid the stub by calling the local alias directly.  */
6342
      if (mips16_local_function_p (fn))
6343
        {
6344
          *fn_ptr = mips16_local_alias (fn);
6345
          return NULL_RTX;
6346
        }
6347
 
6348
      /* Create a SYMBOL_REF for the libgcc.a function.  */
6349
      if (fp_ret_p)
6350
        sprintf (buf, "__mips16_call_stub_%s_%d",
6351
                 mips16_call_stub_mode_suffix (GET_MODE (retval)),
6352
                 fp_code);
6353
      else
6354
        sprintf (buf, "__mips16_call_stub_%d", fp_code);
6355
      stub_fn = mips16_stub_function (buf);
6356
 
6357
      /* The function uses $2 as an argument, so calls to it
6358
         cannot be lazily bound.  */
6359
      SYMBOL_REF_FLAGS (stub_fn) |= SYMBOL_FLAG_BIND_NOW;
6360
 
6361
      /* Load the target function into $2.  */
6362
      addr = gen_rtx_REG (Pmode, GP_REG_FIRST + 2);
6363
      lazy_p = mips_load_call_address (MIPS_CALL_NORMAL, addr, fn);
6364
 
6365
      /* Emit the call.  */
6366
      insn = mips_expand_call (MIPS_CALL_NORMAL, retval, stub_fn,
6367
                               args_size, NULL_RTX, lazy_p);
6368
 
6369
      /* Tell GCC that this call does indeed use the value of $2.  */
6370
      use_reg (&CALL_INSN_FUNCTION_USAGE (insn), addr);
6371
 
6372
      /* If we are handling a floating-point return value, we need to
6373
         save $18 in the function prologue.  Putting a note on the
6374
         call will mean that df_regs_ever_live_p ($18) will be true if the
6375
         call is not eliminated, and we can check that in the prologue
6376
         code.  */
6377
      if (fp_ret_p)
6378
        CALL_INSN_FUNCTION_USAGE (insn) =
6379
          gen_rtx_EXPR_LIST (VOIDmode,
6380
                             gen_rtx_CLOBBER (VOIDmode,
6381
                                              gen_rtx_REG (word_mode, 18)),
6382
                             CALL_INSN_FUNCTION_USAGE (insn));
6383
 
6384
      return insn;
6385
    }
6386
 
6387
  /* We know the function we are going to call.  If we have already
6388
     built a stub, we don't need to do anything further.  */
6389
  fnname = targetm.strip_name_encoding (XSTR (fn, 0));
6390
  for (l = mips16_stubs; l != NULL; l = l->next)
6391
    if (strcmp (l->name, fnname) == 0)
6392
      break;
6393
 
6394
  if (l == NULL)
6395
    {
6396
      const char *separator;
6397
      char *secname, *stubname;
6398
      tree stubid, stubdecl;
6399
      unsigned int f;
6400
 
6401
      /* If the function does not return in FPRs, the special stub
6402
         section is named
6403
             .mips16.call.FNNAME
6404
 
6405
         If the function does return in FPRs, the stub section is named
6406
             .mips16.call.fp.FNNAME
6407
 
6408
         Build a decl for the stub.  */
6409
      secname = ACONCAT ((".mips16.call.", fp_ret_p ? "fp." : "",
6410
                          fnname, NULL));
6411
      stubname = ACONCAT (("__call_stub_", fp_ret_p ? "fp_" : "",
6412
                           fnname, NULL));
6413
      stubid = get_identifier (stubname);
6414
      stubdecl = build_decl (BUILTINS_LOCATION,
6415
                             FUNCTION_DECL, stubid,
6416
                             build_function_type_list (void_type_node,
6417
                                                       NULL_TREE));
6418
      DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
6419
      DECL_RESULT (stubdecl) = build_decl (BUILTINS_LOCATION,
6420
                                           RESULT_DECL, NULL_TREE,
6421
                                           void_type_node);
6422
 
6423
      /* Output a comment.  */
6424
      fprintf (asm_out_file, "\t# Stub function to call %s%s (",
6425
               (fp_ret_p
6426
                ? (GET_MODE (retval) == SFmode ? "float " : "double ")
6427
                : ""),
6428
               fnname);
6429
      separator = "";
6430
      for (f = (unsigned int) fp_code; f != 0; f >>= 2)
6431
        {
6432
          fprintf (asm_out_file, "%s%s", separator,
6433
                   (f & 3) == 1 ? "float" : "double");
6434
          separator = ", ";
6435
        }
6436
      fprintf (asm_out_file, ")\n");
6437
 
6438
      /* Start the function definition.  */
6439
      assemble_start_function (stubdecl, stubname);
6440
      mips_start_function_definition (stubname, false);
6441
 
6442
      if (fp_ret_p)
6443
        {
6444
          fprintf (asm_out_file, "\t.cfi_startproc\n");
6445
 
6446
          /* Create a fake CFA 4 bytes below the stack pointer.
6447
             This works around unwinders (like libgcc's) that expect
6448
             the CFA for non-signal frames to be unique.  */
6449
          fprintf (asm_out_file, "\t.cfi_def_cfa 29,-4\n");
6450
 
6451
          /* "Save" $sp in itself so we don't use the fake CFA.
6452
             This is: DW_CFA_val_expression r29, { DW_OP_reg29 }.  */
6453
          fprintf (asm_out_file, "\t.cfi_escape 0x16,29,1,0x6d\n");
6454
        }
6455
      else
6456
        {
6457
          /* Load the address of the MIPS16 function into $25.  Do this
6458
             first so that targets with coprocessor interlocks can use
6459
             an MFC1 to fill the delay slot.  */
6460
          if (TARGET_EXPLICIT_RELOCS)
6461
            {
6462
              output_asm_insn ("lui\t%^,%%hi(%0)", &fn);
6463
              output_asm_insn ("addiu\t%^,%^,%%lo(%0)", &fn);
6464
            }
6465
          else
6466
            output_asm_insn ("la\t%^,%0", &fn);
6467
        }
6468
 
6469
      /* Move the arguments from general registers to floating-point
6470
         registers.  */
6471
      mips_output_args_xfer (fp_code, 't');
6472
 
6473
      if (fp_ret_p)
6474
        {
6475
          /* Save the return address in $18 and call the non-MIPS16 function.
6476
             The stub's caller knows that $18 might be clobbered, even though
6477
             $18 is usually a call-saved register.  */
6478
          fprintf (asm_out_file, "\tmove\t%s,%s\n",
6479
                   reg_names[GP_REG_FIRST + 18], reg_names[RETURN_ADDR_REGNUM]);
6480
          output_asm_insn (MIPS_CALL ("jal", &fn, 0, -1), &fn);
6481
          fprintf (asm_out_file, "\t.cfi_register 31,18\n");
6482
 
6483
          /* Move the result from floating-point registers to
6484
             general registers.  */
6485
          switch (GET_MODE (retval))
6486
            {
6487
            case SCmode:
6488
              mips_output_32bit_xfer ('f', GP_RETURN + TARGET_BIG_ENDIAN,
6489
                                      TARGET_BIG_ENDIAN
6490
                                      ? FP_REG_FIRST + MAX_FPRS_PER_FMT
6491
                                      : FP_REG_FIRST);
6492
              mips_output_32bit_xfer ('f', GP_RETURN + TARGET_LITTLE_ENDIAN,
6493
                                      TARGET_LITTLE_ENDIAN
6494
                                      ? FP_REG_FIRST + MAX_FPRS_PER_FMT
6495
                                      : FP_REG_FIRST);
6496
              if (GET_MODE (retval) == SCmode && TARGET_64BIT)
6497
                {
6498
                  /* On 64-bit targets, complex floats are returned in
6499
                     a single GPR, such that "sd" on a suitably-aligned
6500
                     target would store the value correctly.  */
6501
                  fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
6502
                           reg_names[GP_RETURN + TARGET_BIG_ENDIAN],
6503
                           reg_names[GP_RETURN + TARGET_BIG_ENDIAN]);
6504
                  fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
6505
                           reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN],
6506
                           reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN]);
6507
                  fprintf (asm_out_file, "\tdsrl\t%s,%s,32\n",
6508
                           reg_names[GP_RETURN + TARGET_BIG_ENDIAN],
6509
                           reg_names[GP_RETURN + TARGET_BIG_ENDIAN]);
6510
                  fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
6511
                           reg_names[GP_RETURN],
6512
                           reg_names[GP_RETURN],
6513
                           reg_names[GP_RETURN + 1]);
6514
                }
6515
              break;
6516
 
6517
            case SFmode:
6518
              mips_output_32bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
6519
              break;
6520
 
6521
            case DCmode:
6522
              mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
6523
                                      FP_REG_FIRST + MAX_FPRS_PER_FMT);
6524
              /* Fall though.  */
6525
            case DFmode:
6526
            case V2SFmode:
6527
              mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
6528
              break;
6529
 
6530
            default:
6531
              gcc_unreachable ();
6532
            }
6533
          fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 18]);
6534
          fprintf (asm_out_file, "\t.cfi_endproc\n");
6535
        }
6536
      else
6537
        {
6538
          /* Jump to the previously-loaded address.  */
6539
          output_asm_insn ("jr\t%^", NULL);
6540
        }
6541
 
6542
#ifdef ASM_DECLARE_FUNCTION_SIZE
6543
      ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
6544
#endif
6545
 
6546
      mips_end_function_definition (stubname);
6547
 
6548
      /* Record this stub.  */
6549
      l = XNEW (struct mips16_stub);
6550
      l->name = xstrdup (fnname);
6551
      l->fp_ret_p = fp_ret_p;
6552
      l->next = mips16_stubs;
6553
      mips16_stubs = l;
6554
    }
6555
 
6556
  /* If we expect a floating-point return value, but we've built a
6557
     stub which does not expect one, then we're in trouble.  We can't
6558
     use the existing stub, because it won't handle the floating-point
6559
     value.  We can't build a new stub, because the linker won't know
6560
     which stub to use for the various calls in this object file.
6561
     Fortunately, this case is illegal, since it means that a function
6562
     was declared in two different ways in a single compilation.  */
6563
  if (fp_ret_p && !l->fp_ret_p)
6564
    error ("cannot handle inconsistent calls to %qs", fnname);
6565
 
6566
  if (retval == NULL_RTX)
6567
    insn = gen_call_internal_direct (fn, args_size);
6568
  else
6569
    insn = gen_call_value_internal_direct (retval, fn, args_size);
6570
  insn = mips_emit_call_insn (insn, fn, fn, false);
6571
 
6572
  /* If we are calling a stub which handles a floating-point return
6573
     value, we need to arrange to save $18 in the prologue.  We do this
6574
     by marking the function call as using the register.  The prologue
6575
     will later see that it is used, and emit code to save it.  */
6576
  if (fp_ret_p)
6577
    CALL_INSN_FUNCTION_USAGE (insn) =
6578
      gen_rtx_EXPR_LIST (VOIDmode,
6579
                         gen_rtx_CLOBBER (VOIDmode,
6580
                                          gen_rtx_REG (word_mode, 18)),
6581
                         CALL_INSN_FUNCTION_USAGE (insn));
6582
 
6583
  return insn;
6584
}
6585
 
6586
/* Expand a call of type TYPE.  RESULT is where the result will go (null
6587
   for "call"s and "sibcall"s), ADDR is the address of the function,
6588
   ARGS_SIZE is the size of the arguments and AUX is the value passed
6589
   to us by mips_function_arg.  LAZY_P is true if this call already
6590
   involves a lazily-bound function address (such as when calling
6591
   functions through a MIPS16 hard-float stub).
6592
 
6593
   Return the call itself.  */
6594
 
6595
rtx
6596
mips_expand_call (enum mips_call_type type, rtx result, rtx addr,
6597
                  rtx args_size, rtx aux, bool lazy_p)
6598
{
6599
  rtx orig_addr, pattern, insn;
6600
  int fp_code;
6601
 
6602
  fp_code = aux == 0 ? 0 : (int) GET_MODE (aux);
6603
  insn = mips16_build_call_stub (result, &addr, args_size, fp_code);
6604
  if (insn)
6605
    {
6606
      gcc_assert (!lazy_p && type == MIPS_CALL_NORMAL);
6607
      return insn;
6608
    }
6609
                                 ;
6610
  orig_addr = addr;
6611
  if (!call_insn_operand (addr, VOIDmode))
6612
    {
6613
      if (type == MIPS_CALL_EPILOGUE)
6614
        addr = MIPS_EPILOGUE_TEMP (Pmode);
6615
      else
6616
        addr = gen_reg_rtx (Pmode);
6617
      lazy_p |= mips_load_call_address (type, addr, orig_addr);
6618
    }
6619
 
6620
  if (result == 0)
6621
    {
6622
      rtx (*fn) (rtx, rtx);
6623
 
6624
      if (type == MIPS_CALL_SIBCALL)
6625
        fn = gen_sibcall_internal;
6626
      else
6627
        fn = gen_call_internal;
6628
 
6629
      pattern = fn (addr, args_size);
6630
    }
6631
  else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
6632
    {
6633
      /* Handle return values created by mips_return_fpr_pair.  */
6634
      rtx (*fn) (rtx, rtx, rtx, rtx);
6635
      rtx reg1, reg2;
6636
 
6637
      if (type == MIPS_CALL_SIBCALL)
6638
        fn = gen_sibcall_value_multiple_internal;
6639
      else
6640
        fn = gen_call_value_multiple_internal;
6641
 
6642
      reg1 = XEXP (XVECEXP (result, 0, 0), 0);
6643
      reg2 = XEXP (XVECEXP (result, 0, 1), 0);
6644
      pattern = fn (reg1, addr, args_size, reg2);
6645
    }
6646
  else
6647
    {
6648
      rtx (*fn) (rtx, rtx, rtx);
6649
 
6650
      if (type == MIPS_CALL_SIBCALL)
6651
        fn = gen_sibcall_value_internal;
6652
      else
6653
        fn = gen_call_value_internal;
6654
 
6655
      /* Handle return values created by mips_return_fpr_single.  */
6656
      if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
6657
        result = XEXP (XVECEXP (result, 0, 0), 0);
6658
      pattern = fn (result, addr, args_size);
6659
    }
6660
 
6661
  return mips_emit_call_insn (pattern, orig_addr, addr, lazy_p);
6662
}
6663
 
6664
/* Split call instruction INSN into a $gp-clobbering call and
6665
   (where necessary) an instruction to restore $gp from its save slot.
6666
   CALL_PATTERN is the pattern of the new call.  */
6667
 
6668
void
6669
mips_split_call (rtx insn, rtx call_pattern)
6670
{
6671
  emit_call_insn (call_pattern);
6672
  if (!find_reg_note (insn, REG_NORETURN, 0))
6673
    /* Pick a temporary register that is suitable for both MIPS16 and
6674
       non-MIPS16 code.  $4 and $5 are used for returning complex double
6675
       values in soft-float code, so $6 is the first suitable candidate.  */
6676
    mips_restore_gp_from_cprestore_slot (gen_rtx_REG (Pmode, GP_ARG_FIRST + 2));
6677
}
6678
 
6679
/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL.  */
6680
 
6681
static bool
6682
mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
6683
{
6684
  if (!TARGET_SIBCALLS)
6685
    return false;
6686
 
6687
  /* Interrupt handlers need special epilogue code and therefore can't
6688
     use sibcalls.  */
6689
  if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
6690
    return false;
6691
 
6692
  /* We can't do a sibcall if the called function is a MIPS16 function
6693
     because there is no direct "jx" instruction equivalent to "jalx" to
6694
     switch the ISA mode.  We only care about cases where the sibling
6695
     and normal calls would both be direct.  */
6696
  if (decl
6697
      && mips_use_mips16_mode_p (decl)
6698
      && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
6699
    return false;
6700
 
6701
  /* When -minterlink-mips16 is in effect, assume that non-locally-binding
6702
     functions could be MIPS16 ones unless an attribute explicitly tells
6703
     us otherwise.  */
6704
  if (TARGET_INTERLINK_MIPS16
6705
      && decl
6706
      && (DECL_EXTERNAL (decl) || !targetm.binds_local_p (decl))
6707
      && !mips_nomips16_decl_p (decl)
6708
      && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
6709
    return false;
6710
 
6711
  /* Otherwise OK.  */
6712
  return true;
6713
}
6714
 
6715
/* Emit code to move general operand SRC into condition-code
6716
   register DEST given that SCRATCH is a scratch TFmode FPR.
6717
   The sequence is:
6718
 
6719
        FP1 = SRC
6720
        FP2 = 0.0f
6721
        DEST = FP2 < FP1
6722
 
6723
   where FP1 and FP2 are single-precision FPRs taken from SCRATCH.  */
6724
 
6725
void
6726
mips_expand_fcc_reload (rtx dest, rtx src, rtx scratch)
6727
{
6728
  rtx fp1, fp2;
6729
 
6730
  /* Change the source to SFmode.  */
6731
  if (MEM_P (src))
6732
    src = adjust_address (src, SFmode, 0);
6733
  else if (REG_P (src) || GET_CODE (src) == SUBREG)
6734
    src = gen_rtx_REG (SFmode, true_regnum (src));
6735
 
6736
  fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
6737
  fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
6738
 
6739
  mips_emit_move (copy_rtx (fp1), src);
6740
  mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
6741
  emit_insn (gen_slt_sf (dest, fp2, fp1));
6742
}
6743
 
6744
/* Implement MOVE_BY_PIECES_P.  */
6745
 
6746
bool
6747
mips_move_by_pieces_p (unsigned HOST_WIDE_INT size, unsigned int align)
6748
{
6749
  if (HAVE_movmemsi)
6750
    {
6751
      /* movmemsi is meant to generate code that is at least as good as
6752
         move_by_pieces.  However, movmemsi effectively uses a by-pieces
6753
         implementation both for moves smaller than a word and for
6754
         word-aligned moves of no more than MIPS_MAX_MOVE_BYTES_STRAIGHT
6755
         bytes.  We should allow the tree-level optimisers to do such
6756
         moves by pieces, as it often exposes other optimization
6757
         opportunities.  We might as well continue to use movmemsi at
6758
         the rtl level though, as it produces better code when
6759
         scheduling is disabled (such as at -O).  */
6760
      if (currently_expanding_to_rtl)
6761
        return false;
6762
      if (align < BITS_PER_WORD)
6763
        return size < UNITS_PER_WORD;
6764
      return size <= MIPS_MAX_MOVE_BYTES_STRAIGHT;
6765
    }
6766
  /* The default value.  If this becomes a target hook, we should
6767
     call the default definition instead.  */
6768
  return (move_by_pieces_ninsns (size, align, MOVE_MAX_PIECES + 1)
6769
          < (unsigned int) MOVE_RATIO (optimize_insn_for_speed_p ()));
6770
}
6771
 
6772
/* Implement STORE_BY_PIECES_P.  */
6773
 
6774
bool
6775
mips_store_by_pieces_p (unsigned HOST_WIDE_INT size, unsigned int align)
6776
{
6777
  /* Storing by pieces involves moving constants into registers
6778
     of size MIN (ALIGN, BITS_PER_WORD), then storing them.
6779
     We need to decide whether it is cheaper to load the address of
6780
     constant data into a register and use a block move instead.  */
6781
 
6782
  /* If the data is only byte aligned, then:
6783
 
6784
     (a1) A block move of less than 4 bytes would involve three 3 LBs and
6785
          3 SBs.  We might as well use 3 single-instruction LIs and 3 SBs
6786
          instead.
6787
 
6788
     (a2) A block move of 4 bytes from aligned source data can use an
6789
          LW/SWL/SWR sequence.  This is often better than the 4 LIs and
6790
          4 SBs that we would generate when storing by pieces.  */
6791
  if (align <= BITS_PER_UNIT)
6792
    return size < 4;
6793
 
6794
  /* If the data is 2-byte aligned, then:
6795
 
6796
     (b1) A block move of less than 4 bytes would use a combination of LBs,
6797
          LHs, SBs and SHs.  We get better code by using single-instruction
6798
          LIs, SBs and SHs instead.
6799
 
6800
     (b2) A block move of 4 bytes from aligned source data would again use
6801
          an LW/SWL/SWR sequence.  In most cases, loading the address of
6802
          the source data would require at least one extra instruction.
6803
          It is often more efficient to use 2 single-instruction LIs and
6804
          2 SHs instead.
6805
 
6806
     (b3) A block move of up to 3 additional bytes would be like (b1).
6807
 
6808
     (b4) A block move of 8 bytes from aligned source data can use two
6809
          LW/SWL/SWR sequences or a single LD/SDL/SDR sequence.  Both
6810
          sequences are better than the 4 LIs and 4 SHs that we'd generate
6811
          when storing by pieces.
6812
 
6813
     The reasoning for higher alignments is similar:
6814
 
6815
     (c1) A block move of less than 4 bytes would be the same as (b1).
6816
 
6817
     (c2) A block move of 4 bytes would use an LW/SW sequence.  Again,
6818
          loading the address of the source data would typically require
6819
          at least one extra instruction.  It is generally better to use
6820
          LUI/ORI/SW instead.
6821
 
6822
     (c3) A block move of up to 3 additional bytes would be like (b1).
6823
 
6824
     (c4) A block move of 8 bytes can use two LW/SW sequences or a single
6825
          LD/SD sequence, and in these cases we've traditionally preferred
6826
          the memory copy over the more bulky constant moves.  */
6827
  return size < 8;
6828
}
6829
 
6830
/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
6831
   Assume that the areas do not overlap.  */
6832
 
6833
static void
6834
mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
6835
{
6836
  HOST_WIDE_INT offset, delta;
6837
  unsigned HOST_WIDE_INT bits;
6838
  int i;
6839
  enum machine_mode mode;
6840
  rtx *regs;
6841
 
6842
  /* Work out how many bits to move at a time.  If both operands have
6843
     half-word alignment, it is usually better to move in half words.
6844
     For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
6845
     and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
6846
     Otherwise move word-sized chunks.  */
6847
  if (MEM_ALIGN (src) == BITS_PER_WORD / 2
6848
      && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
6849
    bits = BITS_PER_WORD / 2;
6850
  else
6851
    bits = BITS_PER_WORD;
6852
 
6853
  mode = mode_for_size (bits, MODE_INT, 0);
6854
  delta = bits / BITS_PER_UNIT;
6855
 
6856
  /* Allocate a buffer for the temporary registers.  */
6857
  regs = XALLOCAVEC (rtx, length / delta);
6858
 
6859
  /* Load as many BITS-sized chunks as possible.  Use a normal load if
6860
     the source has enough alignment, otherwise use left/right pairs.  */
6861
  for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
6862
    {
6863
      regs[i] = gen_reg_rtx (mode);
6864
      if (MEM_ALIGN (src) >= bits)
6865
        mips_emit_move (regs[i], adjust_address (src, mode, offset));
6866
      else
6867
        {
6868
          rtx part = adjust_address (src, BLKmode, offset);
6869
          if (!mips_expand_ext_as_unaligned_load (regs[i], part, bits, 0))
6870
            gcc_unreachable ();
6871
        }
6872
    }
6873
 
6874
  /* Copy the chunks to the destination.  */
6875
  for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
6876
    if (MEM_ALIGN (dest) >= bits)
6877
      mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
6878
    else
6879
      {
6880
        rtx part = adjust_address (dest, BLKmode, offset);
6881
        if (!mips_expand_ins_as_unaligned_store (part, regs[i], bits, 0))
6882
          gcc_unreachable ();
6883
      }
6884
 
6885
  /* Mop up any left-over bytes.  */
6886
  if (offset < length)
6887
    {
6888
      src = adjust_address (src, BLKmode, offset);
6889
      dest = adjust_address (dest, BLKmode, offset);
6890
      move_by_pieces (dest, src, length - offset,
6891
                      MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
6892
    }
6893
}
6894
 
6895
/* Helper function for doing a loop-based block operation on memory
6896
   reference MEM.  Each iteration of the loop will operate on LENGTH
6897
   bytes of MEM.
6898
 
6899
   Create a new base register for use within the loop and point it to
6900
   the start of MEM.  Create a new memory reference that uses this
6901
   register.  Store them in *LOOP_REG and *LOOP_MEM respectively.  */
6902
 
6903
static void
6904
mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
6905
                       rtx *loop_reg, rtx *loop_mem)
6906
{
6907
  *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
6908
 
6909
  /* Although the new mem does not refer to a known location,
6910
     it does keep up to LENGTH bytes of alignment.  */
6911
  *loop_mem = change_address (mem, BLKmode, *loop_reg);
6912
  set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
6913
}
6914
 
6915
/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
6916
   bytes at a time.  LENGTH must be at least BYTES_PER_ITER.  Assume that
6917
   the memory regions do not overlap.  */
6918
 
6919
static void
6920
mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
6921
                      HOST_WIDE_INT bytes_per_iter)
6922
{
6923
  rtx label, src_reg, dest_reg, final_src, test;
6924
  HOST_WIDE_INT leftover;
6925
 
6926
  leftover = length % bytes_per_iter;
6927
  length -= leftover;
6928
 
6929
  /* Create registers and memory references for use within the loop.  */
6930
  mips_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
6931
  mips_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
6932
 
6933
  /* Calculate the value that SRC_REG should have after the last iteration
6934
     of the loop.  */
6935
  final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
6936
                                   0, 0, OPTAB_WIDEN);
6937
 
6938
  /* Emit the start of the loop.  */
6939
  label = gen_label_rtx ();
6940
  emit_label (label);
6941
 
6942
  /* Emit the loop body.  */
6943
  mips_block_move_straight (dest, src, bytes_per_iter);
6944
 
6945
  /* Move on to the next block.  */
6946
  mips_emit_move (src_reg, plus_constant (src_reg, bytes_per_iter));
6947
  mips_emit_move (dest_reg, plus_constant (dest_reg, bytes_per_iter));
6948
 
6949
  /* Emit the loop condition.  */
6950
  test = gen_rtx_NE (VOIDmode, src_reg, final_src);
6951
  if (Pmode == DImode)
6952
    emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
6953
  else
6954
    emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
6955
 
6956
  /* Mop up any left-over bytes.  */
6957
  if (leftover)
6958
    mips_block_move_straight (dest, src, leftover);
6959
}
6960
 
6961
/* Expand a movmemsi instruction, which copies LENGTH bytes from
6962
   memory reference SRC to memory reference DEST.  */
6963
 
6964
bool
6965
mips_expand_block_move (rtx dest, rtx src, rtx length)
6966
{
6967
  if (CONST_INT_P (length))
6968
    {
6969
      if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)
6970
        {
6971
          mips_block_move_straight (dest, src, INTVAL (length));
6972
          return true;
6973
        }
6974
      else if (optimize)
6975
        {
6976
          mips_block_move_loop (dest, src, INTVAL (length),
6977
                                MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER);
6978
          return true;
6979
        }
6980
    }
6981
  return false;
6982
}
6983
 
6984
/* Expand a loop of synci insns for the address range [BEGIN, END).  */
6985
 
6986
void
6987
mips_expand_synci_loop (rtx begin, rtx end)
6988
{
6989
  rtx inc, label, end_label, cmp_result, mask, length;
6990
 
6991
  /* Create end_label.  */
6992
  end_label = gen_label_rtx ();
6993
 
6994
  /* Check if begin equals end.  */
6995
  cmp_result = gen_rtx_EQ (VOIDmode, begin, end);
6996
  emit_jump_insn (gen_condjump (cmp_result, end_label));
6997
 
6998
  /* Load INC with the cache line size (rdhwr INC,$1).  */
6999
  inc = gen_reg_rtx (Pmode);
7000
  emit_insn (PMODE_INSN (gen_rdhwr_synci_step, (inc)));
7001
 
7002
  /* Check if inc is 0.  */
7003
  cmp_result = gen_rtx_EQ (VOIDmode, inc, const0_rtx);
7004
  emit_jump_insn (gen_condjump (cmp_result, end_label));
7005
 
7006
  /* Calculate mask.  */
7007
  mask = mips_force_unary (Pmode, NEG, inc);
7008
 
7009
  /* Mask out begin by mask.  */
7010
  begin = mips_force_binary (Pmode, AND, begin, mask);
7011
 
7012
  /* Calculate length.  */
7013
  length = mips_force_binary (Pmode, MINUS, end, begin);
7014
 
7015
  /* Loop back to here.  */
7016
  label = gen_label_rtx ();
7017
  emit_label (label);
7018
 
7019
  emit_insn (gen_synci (begin));
7020
 
7021
  /* Update length.  */
7022
  mips_emit_binary (MINUS, length, length, inc);
7023
 
7024
  /* Update begin.  */
7025
  mips_emit_binary (PLUS, begin, begin, inc);
7026
 
7027
  /* Check if length is greater than 0.  */
7028
  cmp_result = gen_rtx_GT (VOIDmode, length, const0_rtx);
7029
  emit_jump_insn (gen_condjump (cmp_result, label));
7030
 
7031
  emit_label (end_label);
7032
}
7033
 
7034
/* Expand a QI or HI mode atomic memory operation.
7035
 
7036
   GENERATOR contains a pointer to the gen_* function that generates
7037
   the SI mode underlying atomic operation using masks that we
7038
   calculate.
7039
 
7040
   RESULT is the return register for the operation.  Its value is NULL
7041
   if unused.
7042
 
7043
   MEM is the location of the atomic access.
7044
 
7045
   OLDVAL is the first operand for the operation.
7046
 
7047
   NEWVAL is the optional second operand for the operation.  Its value
7048
   is NULL if unused.  */
7049
 
7050
void
7051
mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator,
7052
                         rtx result, rtx mem, rtx oldval, rtx newval)
7053
{
7054
  rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask;
7055
  rtx unshifted_mask_reg, mask, inverted_mask, si_op;
7056
  rtx res = NULL;
7057
  enum machine_mode mode;
7058
 
7059
  mode = GET_MODE (mem);
7060
 
7061
  /* Compute the address of the containing SImode value.  */
7062
  orig_addr = force_reg (Pmode, XEXP (mem, 0));
7063
  memsi_addr = mips_force_binary (Pmode, AND, orig_addr,
7064
                                  force_reg (Pmode, GEN_INT (-4)));
7065
 
7066
  /* Create a memory reference for it.  */
7067
  memsi = gen_rtx_MEM (SImode, memsi_addr);
7068
  set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
7069
  MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
7070
 
7071
  /* Work out the byte offset of the QImode or HImode value,
7072
     counting from the least significant byte.  */
7073
  shift = mips_force_binary (Pmode, AND, orig_addr, GEN_INT (3));
7074
  if (TARGET_BIG_ENDIAN)
7075
    mips_emit_binary (XOR, shift, shift, GEN_INT (mode == QImode ? 3 : 2));
7076
 
7077
  /* Multiply by eight to convert the shift value from bytes to bits.  */
7078
  mips_emit_binary (ASHIFT, shift, shift, GEN_INT (3));
7079
 
7080
  /* Make the final shift an SImode value, so that it can be used in
7081
     SImode operations.  */
7082
  shiftsi = force_reg (SImode, gen_lowpart (SImode, shift));
7083
 
7084
  /* Set MASK to an inclusive mask of the QImode or HImode value.  */
7085
  unshifted_mask = GEN_INT (GET_MODE_MASK (mode));
7086
  unshifted_mask_reg = force_reg (SImode, unshifted_mask);
7087
  mask = mips_force_binary (SImode, ASHIFT, unshifted_mask_reg, shiftsi);
7088
 
7089
  /* Compute the equivalent exclusive mask.  */
7090
  inverted_mask = gen_reg_rtx (SImode);
7091
  emit_insn (gen_rtx_SET (VOIDmode, inverted_mask,
7092
                          gen_rtx_NOT (SImode, mask)));
7093
 
7094
  /* Shift the old value into place.  */
7095
  if (oldval != const0_rtx)
7096
    {
7097
      oldval = convert_modes (SImode, mode, oldval, true);
7098
      oldval = force_reg (SImode, oldval);
7099
      oldval = mips_force_binary (SImode, ASHIFT, oldval, shiftsi);
7100
    }
7101
 
7102
  /* Do the same for the new value.  */
7103
  if (newval && newval != const0_rtx)
7104
    {
7105
      newval = convert_modes (SImode, mode, newval, true);
7106
      newval = force_reg (SImode, newval);
7107
      newval = mips_force_binary (SImode, ASHIFT, newval, shiftsi);
7108
    }
7109
 
7110
  /* Do the SImode atomic access.  */
7111
  if (result)
7112
    res = gen_reg_rtx (SImode);
7113
  if (newval)
7114
    si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, newval);
7115
  else if (result)
7116
    si_op = generator.fn_5 (res, memsi, mask, inverted_mask, oldval);
7117
  else
7118
    si_op = generator.fn_4 (memsi, mask, inverted_mask, oldval);
7119
 
7120
  emit_insn (si_op);
7121
 
7122
  if (result)
7123
    {
7124
      /* Shift and convert the result.  */
7125
      mips_emit_binary (AND, res, res, mask);
7126
      mips_emit_binary (LSHIFTRT, res, res, shiftsi);
7127
      mips_emit_move (result, gen_lowpart (GET_MODE (result), res));
7128
    }
7129
}
7130
 
7131
/* Return true if it is possible to use left/right accesses for a
7132
   bitfield of WIDTH bits starting BITPOS bits into *OP.  When
7133
   returning true, update *OP, *LEFT and *RIGHT as follows:
7134
 
7135
   *OP is a BLKmode reference to the whole field.
7136
 
7137
   *LEFT is a QImode reference to the first byte if big endian or
7138
   the last byte if little endian.  This address can be used in the
7139
   left-side instructions (LWL, SWL, LDL, SDL).
7140
 
7141
   *RIGHT is a QImode reference to the opposite end of the field and
7142
   can be used in the patterning right-side instruction.  */
7143
 
7144
static bool
7145
mips_get_unaligned_mem (rtx *op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos,
7146
                        rtx *left, rtx *right)
7147
{
7148
  rtx first, last;
7149
 
7150
  /* Check that the operand really is a MEM.  Not all the extv and
7151
     extzv predicates are checked.  */
7152
  if (!MEM_P (*op))
7153
    return false;
7154
 
7155
  /* Check that the size is valid.  */
7156
  if (width != 32 && (!TARGET_64BIT || width != 64))
7157
    return false;
7158
 
7159
  /* We can only access byte-aligned values.  Since we are always passed
7160
     a reference to the first byte of the field, it is not necessary to
7161
     do anything with BITPOS after this check.  */
7162
  if (bitpos % BITS_PER_UNIT != 0)
7163
    return false;
7164
 
7165
  /* Reject aligned bitfields: we want to use a normal load or store
7166
     instead of a left/right pair.  */
7167
  if (MEM_ALIGN (*op) >= width)
7168
    return false;
7169
 
7170
  /* Adjust *OP to refer to the whole field.  This also has the effect
7171
     of legitimizing *OP's address for BLKmode, possibly simplifying it.  */
7172
  *op = adjust_address (*op, BLKmode, 0);
7173
  set_mem_size (*op, width / BITS_PER_UNIT);
7174
 
7175
  /* Get references to both ends of the field.  We deliberately don't
7176
     use the original QImode *OP for FIRST since the new BLKmode one
7177
     might have a simpler address.  */
7178
  first = adjust_address (*op, QImode, 0);
7179
  last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
7180
 
7181
  /* Allocate to LEFT and RIGHT according to endianness.  LEFT should
7182
     correspond to the MSB and RIGHT to the LSB.  */
7183
  if (TARGET_BIG_ENDIAN)
7184
    *left = first, *right = last;
7185
  else
7186
    *left = last, *right = first;
7187
 
7188
  return true;
7189
}
7190
 
7191
/* Try to use left/right loads to expand an "extv" or "extzv" pattern.
7192
   DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
7193
   the operation is the equivalent of:
7194
 
7195
      (set DEST (*_extract SRC WIDTH BITPOS))
7196
 
7197
   Return true on success.  */
7198
 
7199
bool
7200
mips_expand_ext_as_unaligned_load (rtx dest, rtx src, HOST_WIDE_INT width,
7201
                                   HOST_WIDE_INT bitpos)
7202
{
7203
  rtx left, right, temp;
7204
 
7205
  /* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
7206
     be a paradoxical word_mode subreg.  This is the only case in which
7207
     we allow the destination to be larger than the source.  */
7208
  if (GET_CODE (dest) == SUBREG
7209
      && GET_MODE (dest) == DImode
7210
      && GET_MODE (SUBREG_REG (dest)) == SImode)
7211
    dest = SUBREG_REG (dest);
7212
 
7213
  /* After the above adjustment, the destination must be the same
7214
     width as the source.  */
7215
  if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
7216
    return false;
7217
 
7218
  if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
7219
    return false;
7220
 
7221
  temp = gen_reg_rtx (GET_MODE (dest));
7222
  if (GET_MODE (dest) == DImode)
7223
    {
7224
      emit_insn (gen_mov_ldl (temp, src, left));
7225
      emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
7226
    }
7227
  else
7228
    {
7229
      emit_insn (gen_mov_lwl (temp, src, left));
7230
      emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
7231
    }
7232
  return true;
7233
}
7234
 
7235
/* Try to use left/right stores to expand an "ins" pattern.  DEST, WIDTH,
7236
   BITPOS and SRC are the operands passed to the expander; the operation
7237
   is the equivalent of:
7238
 
7239
       (set (zero_extract DEST WIDTH BITPOS) SRC)
7240
 
7241
   Return true on success.  */
7242
 
7243
bool
7244
mips_expand_ins_as_unaligned_store (rtx dest, rtx src, HOST_WIDE_INT width,
7245
                                    HOST_WIDE_INT bitpos)
7246
{
7247
  rtx left, right;
7248
  enum machine_mode mode;
7249
 
7250
  if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
7251
    return false;
7252
 
7253
  mode = mode_for_size (width, MODE_INT, 0);
7254
  src = gen_lowpart (mode, src);
7255
  if (mode == DImode)
7256
    {
7257
      emit_insn (gen_mov_sdl (dest, src, left));
7258
      emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
7259
    }
7260
  else
7261
    {
7262
      emit_insn (gen_mov_swl (dest, src, left));
7263
      emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
7264
    }
7265
  return true;
7266
}
7267
 
7268
/* Return true if X is a MEM with the same size as MODE.  */
7269
 
7270
bool
7271
mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
7272
{
7273
  return (MEM_P (x)
7274
          && MEM_SIZE_KNOWN_P (x)
7275
          && MEM_SIZE (x) == GET_MODE_SIZE (mode));
7276
}
7277
 
7278
/* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
7279
   source of an "ext" instruction or the destination of an "ins"
7280
   instruction.  OP must be a register operand and the following
7281
   conditions must hold:
7282
 
7283
 
7284
 
7285
 
7286
 
7287
   Also reject lengths equal to a word as they are better handled
7288
   by the move patterns.  */
7289
 
7290
bool
7291
mips_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos)
7292
{
7293
  if (!ISA_HAS_EXT_INS
7294
      || !register_operand (op, VOIDmode)
7295
      || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
7296
    return false;
7297
 
7298
  if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1))
7299
    return false;
7300
 
7301
  if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op)))
7302
    return false;
7303
 
7304
  return true;
7305
}
7306
 
7307
/* Check if MASK and SHIFT are valid in mask-low-and-shift-left
7308
   operation if MAXLEN is the maxium length of consecutive bits that
7309
   can make up MASK.  MODE is the mode of the operation.  See
7310
   mask_low_and_shift_len for the actual definition.  */
7311
 
7312
bool
7313
mask_low_and_shift_p (enum machine_mode mode, rtx mask, rtx shift, int maxlen)
7314
{
7315
  return IN_RANGE (mask_low_and_shift_len (mode, mask, shift), 1, maxlen);
7316
}
7317
 
7318
/* Return true iff OP1 and OP2 are valid operands together for the
7319
   *and<MODE>3 and *and<MODE>3_mips16 patterns.  For the cases to consider,
7320
   see the table in the comment before the pattern.  */
7321
 
7322
bool
7323
and_operands_ok (enum machine_mode mode, rtx op1, rtx op2)
7324
{
7325
  return (memory_operand (op1, mode)
7326
          ? and_load_operand (op2, mode)
7327
          : and_reg_operand (op2, mode));
7328
}
7329
 
7330
/* The canonical form of a mask-low-and-shift-left operation is
7331
   (and (ashift X SHIFT) MASK) where MASK has the lower SHIFT number of bits
7332
   cleared.  Thus we need to shift MASK to the right before checking if it
7333
   is a valid mask value.  MODE is the mode of the operation.  If true
7334
   return the length of the mask, otherwise return -1.  */
7335
 
7336
int
7337
mask_low_and_shift_len (enum machine_mode mode, rtx mask, rtx shift)
7338
{
7339
  HOST_WIDE_INT shval;
7340
 
7341
  shval = INTVAL (shift) & (GET_MODE_BITSIZE (mode) - 1);
7342
  return exact_log2 ((UINTVAL (mask) >> shval) + 1);
7343
}
7344
 
7345
/* Return true if -msplit-addresses is selected and should be honored.
7346
 
7347
   -msplit-addresses is a half-way house between explicit relocations
7348
   and the traditional assembler macros.  It can split absolute 32-bit
7349
   symbolic constants into a high/lo_sum pair but uses macros for other
7350
   sorts of access.
7351
 
7352
   Like explicit relocation support for REL targets, it relies
7353
   on GNU extensions in the assembler and the linker.
7354
 
7355
   Although this code should work for -O0, it has traditionally
7356
   been treated as an optimization.  */
7357
 
7358
static bool
7359
mips_split_addresses_p (void)
7360
{
7361
  return (TARGET_SPLIT_ADDRESSES
7362
          && optimize
7363
          && !TARGET_MIPS16
7364
          && !flag_pic
7365
          && !ABI_HAS_64BIT_SYMBOLS);
7366
}
7367
 
7368
/* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs.  */
7369
 
7370
static void
7371
mips_init_relocs (void)
7372
{
7373
  memset (mips_split_p, '\0', sizeof (mips_split_p));
7374
  memset (mips_split_hi_p, '\0', sizeof (mips_split_hi_p));
7375
  memset (mips_use_pcrel_pool_p, '\0', sizeof (mips_use_pcrel_pool_p));
7376
  memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
7377
  memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
7378
 
7379
  if (TARGET_MIPS16_PCREL_LOADS)
7380
    mips_use_pcrel_pool_p[SYMBOL_ABSOLUTE] = true;
7381
  else
7382
    {
7383
      if (ABI_HAS_64BIT_SYMBOLS)
7384
        {
7385
          if (TARGET_EXPLICIT_RELOCS)
7386
            {
7387
              mips_split_p[SYMBOL_64_HIGH] = true;
7388
              mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
7389
              mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
7390
 
7391
              mips_split_p[SYMBOL_64_MID] = true;
7392
              mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
7393
              mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
7394
 
7395
              mips_split_p[SYMBOL_64_LOW] = true;
7396
              mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
7397
              mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
7398
 
7399
              mips_split_p[SYMBOL_ABSOLUTE] = true;
7400
              mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
7401
            }
7402
        }
7403
      else
7404
        {
7405
          if (TARGET_EXPLICIT_RELOCS
7406
              || mips_split_addresses_p ()
7407
              || TARGET_MIPS16)
7408
            {
7409
              mips_split_p[SYMBOL_ABSOLUTE] = true;
7410
              mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
7411
              mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
7412
            }
7413
        }
7414
    }
7415
 
7416
  if (TARGET_MIPS16)
7417
    {
7418
      /* The high part is provided by a pseudo copy of $gp.  */
7419
      mips_split_p[SYMBOL_GP_RELATIVE] = true;
7420
      mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
7421
    }
7422
  else if (TARGET_EXPLICIT_RELOCS)
7423
    /* Small data constants are kept whole until after reload,
7424
       then lowered by mips_rewrite_small_data.  */
7425
    mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
7426
 
7427
  if (TARGET_EXPLICIT_RELOCS)
7428
    {
7429
      mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
7430
      if (TARGET_NEWABI)
7431
        {
7432
          mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
7433
          mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
7434
        }
7435
      else
7436
        {
7437
          mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
7438
          mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
7439
        }
7440
      if (TARGET_MIPS16)
7441
        /* Expose the use of $28 as soon as possible.  */
7442
        mips_split_hi_p[SYMBOL_GOT_PAGE_OFST] = true;
7443
 
7444
      if (TARGET_XGOT)
7445
        {
7446
          /* The HIGH and LO_SUM are matched by special .md patterns.  */
7447
          mips_split_p[SYMBOL_GOT_DISP] = true;
7448
 
7449
          mips_split_p[SYMBOL_GOTOFF_DISP] = true;
7450
          mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
7451
          mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
7452
 
7453
          mips_split_p[SYMBOL_GOTOFF_CALL] = true;
7454
          mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
7455
          mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
7456
        }
7457
      else
7458
        {
7459
          if (TARGET_NEWABI)
7460
            mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
7461
          else
7462
            mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
7463
          mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
7464
          if (TARGET_MIPS16)
7465
            /* Expose the use of $28 as soon as possible.  */
7466
            mips_split_p[SYMBOL_GOT_DISP] = true;
7467
        }
7468
    }
7469
 
7470
  if (TARGET_NEWABI)
7471
    {
7472
      mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
7473
      mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
7474
      mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
7475
    }
7476
 
7477
  mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
7478
  mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
7479
 
7480
  if (TARGET_MIPS16_PCREL_LOADS)
7481
    {
7482
      mips_use_pcrel_pool_p[SYMBOL_DTPREL] = true;
7483
      mips_use_pcrel_pool_p[SYMBOL_TPREL] = true;
7484
    }
7485
  else
7486
    {
7487
      mips_split_p[SYMBOL_DTPREL] = true;
7488
      mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
7489
      mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
7490
 
7491
      mips_split_p[SYMBOL_TPREL] = true;
7492
      mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
7493
      mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
7494
    }
7495
 
7496
  mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
7497
  mips_lo_relocs[SYMBOL_HALF] = "%half(";
7498
}
7499
 
7500
/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
7501
   in context CONTEXT.  RELOCS is the array of relocations to use.  */
7502
 
7503
static void
7504
mips_print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
7505
                          const char **relocs)
7506
{
7507
  enum mips_symbol_type symbol_type;
7508
  const char *p;
7509
 
7510
  symbol_type = mips_classify_symbolic_expression (op, context);
7511
  gcc_assert (relocs[symbol_type]);
7512
 
7513
  fputs (relocs[symbol_type], file);
7514
  output_addr_const (file, mips_strip_unspec_address (op));
7515
  for (p = relocs[symbol_type]; *p != 0; p++)
7516
    if (*p == '(')
7517
      fputc (')', file);
7518
}
7519
 
7520
/* Start a new block with the given asm switch enabled.  If we need
7521
   to print a directive, emit PREFIX before it and SUFFIX after it.  */
7522
 
7523
static void
7524
mips_push_asm_switch_1 (struct mips_asm_switch *asm_switch,
7525
                        const char *prefix, const char *suffix)
7526
{
7527
  if (asm_switch->nesting_level == 0)
7528
    fprintf (asm_out_file, "%s.set\tno%s%s", prefix, asm_switch->name, suffix);
7529
  asm_switch->nesting_level++;
7530
}
7531
 
7532
/* Likewise, but end a block.  */
7533
 
7534
static void
7535
mips_pop_asm_switch_1 (struct mips_asm_switch *asm_switch,
7536
                       const char *prefix, const char *suffix)
7537
{
7538
  gcc_assert (asm_switch->nesting_level);
7539
  asm_switch->nesting_level--;
7540
  if (asm_switch->nesting_level == 0)
7541
    fprintf (asm_out_file, "%s.set\t%s%s", prefix, asm_switch->name, suffix);
7542
}
7543
 
7544
/* Wrappers around mips_push_asm_switch_1 and mips_pop_asm_switch_1
7545
   that either print a complete line or print nothing.  */
7546
 
7547
void
7548
mips_push_asm_switch (struct mips_asm_switch *asm_switch)
7549
{
7550
  mips_push_asm_switch_1 (asm_switch, "\t", "\n");
7551
}
7552
 
7553
void
7554
mips_pop_asm_switch (struct mips_asm_switch *asm_switch)
7555
{
7556
  mips_pop_asm_switch_1 (asm_switch, "\t", "\n");
7557
}
7558
 
7559
/* Print the text for PRINT_OPERAND punctation character CH to FILE.
7560
   The punctuation characters are:
7561
 
7562
   '('  Start a nested ".set noreorder" block.
7563
   ')'  End a nested ".set noreorder" block.
7564
   '['  Start a nested ".set noat" block.
7565
   ']'  End a nested ".set noat" block.
7566
   '<'  Start a nested ".set nomacro" block.
7567
   '>'  End a nested ".set nomacro" block.
7568
   '*'  Behave like %(%< if generating a delayed-branch sequence.
7569
   '#'  Print a nop if in a ".set noreorder" block.
7570
   '/'  Like '#', but do nothing within a delayed-branch sequence.
7571
   '?'  Print "l" if mips_branch_likely is true
7572
   '~'  Print a nop if mips_branch_likely is true
7573
   '.'  Print the name of the register with a hard-wired zero (zero or $0).
7574
   '@'  Print the name of the assembler temporary register (at or $1).
7575
   '^'  Print the name of the pic call-through register (t9 or $25).
7576
   '+'  Print the name of the gp register (usually gp or $28).
7577
   '$'  Print the name of the stack pointer register (sp or $29).
7578
 
7579
   See also mips_init_print_operand_pucnt.  */
7580
 
7581
static void
7582
mips_print_operand_punctuation (FILE *file, int ch)
7583
{
7584
  switch (ch)
7585
    {
7586
    case '(':
7587
      mips_push_asm_switch_1 (&mips_noreorder, "", "\n\t");
7588
      break;
7589
 
7590
    case ')':
7591
      mips_pop_asm_switch_1 (&mips_noreorder, "\n\t", "");
7592
      break;
7593
 
7594
    case '[':
7595
      mips_push_asm_switch_1 (&mips_noat, "", "\n\t");
7596
      break;
7597
 
7598
    case ']':
7599
      mips_pop_asm_switch_1 (&mips_noat, "\n\t", "");
7600
      break;
7601
 
7602
    case '<':
7603
      mips_push_asm_switch_1 (&mips_nomacro, "", "\n\t");
7604
      break;
7605
 
7606
    case '>':
7607
      mips_pop_asm_switch_1 (&mips_nomacro, "\n\t", "");
7608
      break;
7609
 
7610
    case '*':
7611
      if (final_sequence != 0)
7612
        {
7613
          mips_print_operand_punctuation (file, '(');
7614
          mips_print_operand_punctuation (file, '<');
7615
        }
7616
      break;
7617
 
7618
    case '#':
7619
      if (mips_noreorder.nesting_level > 0)
7620
        fputs ("\n\tnop", file);
7621
      break;
7622
 
7623
    case '/':
7624
      /* Print an extra newline so that the delayed insn is separated
7625
         from the following ones.  This looks neater and is consistent
7626
         with non-nop delayed sequences.  */
7627
      if (mips_noreorder.nesting_level > 0 && final_sequence == 0)
7628
        fputs ("\n\tnop\n", file);
7629
      break;
7630
 
7631
    case '?':
7632
      if (mips_branch_likely)
7633
        putc ('l', file);
7634
      break;
7635
 
7636
    case '~':
7637
      if (mips_branch_likely)
7638
        fputs ("\n\tnop", file);
7639
      break;
7640
 
7641
    case '.':
7642
      fputs (reg_names[GP_REG_FIRST + 0], file);
7643
      break;
7644
 
7645
    case '@':
7646
      fputs (reg_names[AT_REGNUM], file);
7647
      break;
7648
 
7649
    case '^':
7650
      fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
7651
      break;
7652
 
7653
    case '+':
7654
      fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
7655
      break;
7656
 
7657
    case '$':
7658
      fputs (reg_names[STACK_POINTER_REGNUM], file);
7659
      break;
7660
 
7661
    default:
7662
      gcc_unreachable ();
7663
      break;
7664
    }
7665
}
7666
 
7667
/* Initialize mips_print_operand_punct.  */
7668
 
7669
static void
7670
mips_init_print_operand_punct (void)
7671
{
7672
  const char *p;
7673
 
7674
  for (p = "()[]<>*#/?~.@^+$"; *p; p++)
7675
    mips_print_operand_punct[(unsigned char) *p] = true;
7676
}
7677
 
7678
/* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
7679
   associated with condition CODE.  Print the condition part of the
7680
   opcode to FILE.  */
7681
 
7682
static void
7683
mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
7684
{
7685
  switch (code)
7686
    {
7687
    case EQ:
7688
    case NE:
7689
    case GT:
7690
    case GE:
7691
    case LT:
7692
    case LE:
7693
    case GTU:
7694
    case GEU:
7695
    case LTU:
7696
    case LEU:
7697
      /* Conveniently, the MIPS names for these conditions are the same
7698
         as their RTL equivalents.  */
7699
      fputs (GET_RTX_NAME (code), file);
7700
      break;
7701
 
7702
    default:
7703
      output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
7704
      break;
7705
    }
7706
}
7707
 
7708
/* Likewise floating-point branches.  */
7709
 
7710
static void
7711
mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
7712
{
7713
  switch (code)
7714
    {
7715
    case EQ:
7716
      fputs ("c1f", file);
7717
      break;
7718
 
7719
    case NE:
7720
      fputs ("c1t", file);
7721
      break;
7722
 
7723
    default:
7724
      output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
7725
      break;
7726
    }
7727
}
7728
 
7729
/* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P.  */
7730
 
7731
static bool
7732
mips_print_operand_punct_valid_p (unsigned char code)
7733
{
7734
  return mips_print_operand_punct[code];
7735
}
7736
 
7737
/* Implement TARGET_PRINT_OPERAND.  The MIPS-specific operand codes are:
7738
 
7739
   'X'  Print CONST_INT OP in hexadecimal format.
7740
   'x'  Print the low 16 bits of CONST_INT OP in hexadecimal format.
7741
   'd'  Print CONST_INT OP in decimal.
7742
   'm'  Print one less than CONST_INT OP in decimal.
7743
   'h'  Print the high-part relocation associated with OP, after stripping
7744
          any outermost HIGH.
7745
   'R'  Print the low-part relocation associated with OP.
7746
   'C'  Print the integer branch condition for comparison OP.
7747
   'N'  Print the inverse of the integer branch condition for comparison OP.
7748
   'F'  Print the FPU branch condition for comparison OP.
7749
   'W'  Print the inverse of the FPU branch condition for comparison OP.
7750
   'T'  Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
7751
              'z' for (eq:?I ...), 'n' for (ne:?I ...).
7752
   't'  Like 'T', but with the EQ/NE cases reversed
7753
   'Y'  Print mips_fp_conditions[INTVAL (OP)]
7754
   'Z'  Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
7755
   'q'  Print a DSP accumulator register.
7756
   'D'  Print the second part of a double-word register or memory operand.
7757
   'L'  Print the low-order register in a double-word register operand.
7758
   'M'  Print high-order register in a double-word register operand.
7759
   'z'  Print $0 if OP is zero, otherwise print OP normally.  */
7760
 
7761
static void
7762
mips_print_operand (FILE *file, rtx op, int letter)
7763
{
7764
  enum rtx_code code;
7765
 
7766
  if (mips_print_operand_punct_valid_p (letter))
7767
    {
7768
      mips_print_operand_punctuation (file, letter);
7769
      return;
7770
    }
7771
 
7772
  gcc_assert (op);
7773
  code = GET_CODE (op);
7774
 
7775
  switch (letter)
7776
    {
7777
    case 'X':
7778
      if (CONST_INT_P (op))
7779
        fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
7780
      else
7781
        output_operand_lossage ("invalid use of '%%%c'", letter);
7782
      break;
7783
 
7784
    case 'x':
7785
      if (CONST_INT_P (op))
7786
        fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
7787
      else
7788
        output_operand_lossage ("invalid use of '%%%c'", letter);
7789
      break;
7790
 
7791
    case 'd':
7792
      if (CONST_INT_P (op))
7793
        fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
7794
      else
7795
        output_operand_lossage ("invalid use of '%%%c'", letter);
7796
      break;
7797
 
7798
    case 'm':
7799
      if (CONST_INT_P (op))
7800
        fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1);
7801
      else
7802
        output_operand_lossage ("invalid use of '%%%c'", letter);
7803
      break;
7804
 
7805
    case 'h':
7806
      if (code == HIGH)
7807
        op = XEXP (op, 0);
7808
      mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
7809
      break;
7810
 
7811
    case 'R':
7812
      mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
7813
      break;
7814
 
7815
    case 'C':
7816
      mips_print_int_branch_condition (file, code, letter);
7817
      break;
7818
 
7819
    case 'N':
7820
      mips_print_int_branch_condition (file, reverse_condition (code), letter);
7821
      break;
7822
 
7823
    case 'F':
7824
      mips_print_float_branch_condition (file, code, letter);
7825
      break;
7826
 
7827
    case 'W':
7828
      mips_print_float_branch_condition (file, reverse_condition (code),
7829
                                         letter);
7830
      break;
7831
 
7832
    case 'T':
7833
    case 't':
7834
      {
7835
        int truth = (code == NE) == (letter == 'T');
7836
        fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
7837
      }
7838
      break;
7839
 
7840
    case 'Y':
7841
      if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
7842
        fputs (mips_fp_conditions[UINTVAL (op)], file);
7843
      else
7844
        output_operand_lossage ("'%%%c' is not a valid operand prefix",
7845
                                letter);
7846
      break;
7847
 
7848
    case 'Z':
7849
      if (ISA_HAS_8CC)
7850
        {
7851
          mips_print_operand (file, op, 0);
7852
          fputc (',', file);
7853
        }
7854
      break;
7855
 
7856
    case 'q':
7857
      if (code == REG && MD_REG_P (REGNO (op)))
7858
        fprintf (file, "$ac0");
7859
      else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
7860
        fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
7861
      else
7862
        output_operand_lossage ("invalid use of '%%%c'", letter);
7863
      break;
7864
 
7865
    default:
7866
      switch (code)
7867
        {
7868
        case REG:
7869
          {
7870
            unsigned int regno = REGNO (op);
7871
            if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
7872
                || (letter == 'L' && TARGET_BIG_ENDIAN)
7873
                || letter == 'D')
7874
              regno++;
7875
            else if (letter && letter != 'z' && letter != 'M' && letter != 'L')
7876
              output_operand_lossage ("invalid use of '%%%c'", letter);
7877
            /* We need to print $0 .. $31 for COP0 registers.  */
7878
            if (COP0_REG_P (regno))
7879
              fprintf (file, "$%s", &reg_names[regno][4]);
7880
            else
7881
              fprintf (file, "%s", reg_names[regno]);
7882
          }
7883
          break;
7884
 
7885
        case MEM:
7886
          if (letter == 'D')
7887
            output_address (plus_constant (XEXP (op, 0), 4));
7888
          else if (letter && letter != 'z')
7889
            output_operand_lossage ("invalid use of '%%%c'", letter);
7890
          else
7891
            output_address (XEXP (op, 0));
7892
          break;
7893
 
7894
        default:
7895
          if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
7896
            fputs (reg_names[GP_REG_FIRST], file);
7897
          else if (letter && letter != 'z')
7898
            output_operand_lossage ("invalid use of '%%%c'", letter);
7899
          else if (CONST_GP_P (op))
7900
            fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
7901
          else
7902
            output_addr_const (file, mips_strip_unspec_address (op));
7903
          break;
7904
        }
7905
    }
7906
}
7907
 
7908
/* Implement TARGET_PRINT_OPERAND_ADDRESS.  */
7909
 
7910
static void
7911
mips_print_operand_address (FILE *file, rtx x)
7912
{
7913
  struct mips_address_info addr;
7914
 
7915
  if (mips_classify_address (&addr, x, word_mode, true))
7916
    switch (addr.type)
7917
      {
7918
      case ADDRESS_REG:
7919
        mips_print_operand (file, addr.offset, 0);
7920
        fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
7921
        return;
7922
 
7923
      case ADDRESS_LO_SUM:
7924
        mips_print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
7925
                                  mips_lo_relocs);
7926
        fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
7927
        return;
7928
 
7929
      case ADDRESS_CONST_INT:
7930
        output_addr_const (file, x);
7931
        fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
7932
        return;
7933
 
7934
      case ADDRESS_SYMBOLIC:
7935
        output_addr_const (file, mips_strip_unspec_address (x));
7936
        return;
7937
      }
7938
  gcc_unreachable ();
7939
}
7940
 
7941
/* Implement TARGET_ENCODE_SECTION_INFO.  */
7942
 
7943
static void
7944
mips_encode_section_info (tree decl, rtx rtl, int first)
7945
{
7946
  default_encode_section_info (decl, rtl, first);
7947
 
7948
  if (TREE_CODE (decl) == FUNCTION_DECL)
7949
    {
7950
      rtx symbol = XEXP (rtl, 0);
7951
      tree type = TREE_TYPE (decl);
7952
 
7953
      /* Encode whether the symbol is short or long.  */
7954
      if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
7955
          || mips_far_type_p (type))
7956
        SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
7957
    }
7958
}
7959
 
7960
/* Implement TARGET_SELECT_RTX_SECTION.  */
7961
 
7962
static section *
7963
mips_select_rtx_section (enum machine_mode mode, rtx x,
7964
                         unsigned HOST_WIDE_INT align)
7965
{
7966
  /* ??? Consider using mergeable small data sections.  */
7967
  if (mips_rtx_constant_in_small_data_p (mode))
7968
    return get_named_section (NULL, ".sdata", 0);
7969
 
7970
  return default_elf_select_rtx_section (mode, x, align);
7971
}
7972
 
7973
/* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
7974
 
7975
   The complication here is that, with the combination TARGET_ABICALLS
7976
   && !TARGET_ABSOLUTE_ABICALLS && !TARGET_GPWORD, jump tables will use
7977
   absolute addresses, and should therefore not be included in the
7978
   read-only part of a DSO.  Handle such cases by selecting a normal
7979
   data section instead of a read-only one.  The logic apes that in
7980
   default_function_rodata_section.  */
7981
 
7982
static section *
7983
mips_function_rodata_section (tree decl)
7984
{
7985
  if (!TARGET_ABICALLS || TARGET_ABSOLUTE_ABICALLS || TARGET_GPWORD)
7986
    return default_function_rodata_section (decl);
7987
 
7988
  if (decl && DECL_SECTION_NAME (decl))
7989
    {
7990
      const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7991
      if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
7992
        {
7993
          char *rname = ASTRDUP (name);
7994
          rname[14] = 'd';
7995
          return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
7996
        }
7997
      else if (flag_function_sections
7998
               && flag_data_sections
7999
               && strncmp (name, ".text.", 6) == 0)
8000
        {
8001
          char *rname = ASTRDUP (name);
8002
          memcpy (rname + 1, "data", 4);
8003
          return get_section (rname, SECTION_WRITE, decl);
8004
        }
8005
    }
8006
  return data_section;
8007
}
8008
 
8009
/* Implement TARGET_IN_SMALL_DATA_P.  */
8010
 
8011
static bool
8012
mips_in_small_data_p (const_tree decl)
8013
{
8014
  unsigned HOST_WIDE_INT size;
8015
 
8016
  if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
8017
    return false;
8018
 
8019
  /* We don't yet generate small-data references for -mabicalls
8020
     or VxWorks RTP code.  See the related -G handling in
8021
     mips_option_override.  */
8022
  if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
8023
    return false;
8024
 
8025
  if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
8026
    {
8027
      const char *name;
8028
 
8029
      /* Reject anything that isn't in a known small-data section.  */
8030
      name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8031
      if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
8032
        return false;
8033
 
8034
      /* If a symbol is defined externally, the assembler will use the
8035
         usual -G rules when deciding how to implement macros.  */
8036
      if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
8037
        return true;
8038
    }
8039
  else if (TARGET_EMBEDDED_DATA)
8040
    {
8041
      /* Don't put constants into the small data section: we want them
8042
         to be in ROM rather than RAM.  */
8043
      if (TREE_CODE (decl) != VAR_DECL)
8044
        return false;
8045
 
8046
      if (TREE_READONLY (decl)
8047
          && !TREE_SIDE_EFFECTS (decl)
8048
          && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
8049
        return false;
8050
    }
8051
 
8052
  /* Enforce -mlocal-sdata.  */
8053
  if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
8054
    return false;
8055
 
8056
  /* Enforce -mextern-sdata.  */
8057
  if (!TARGET_EXTERN_SDATA && DECL_P (decl))
8058
    {
8059
      if (DECL_EXTERNAL (decl))
8060
        return false;
8061
      if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
8062
        return false;
8063
    }
8064
 
8065
  /* We have traditionally not treated zero-sized objects as small data,
8066
     so this is now effectively part of the ABI.  */
8067
  size = int_size_in_bytes (TREE_TYPE (decl));
8068
  return size > 0 && size <= mips_small_data_threshold;
8069
}
8070
 
8071
/* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P.  We don't want to use
8072
   anchors for small data: the GP register acts as an anchor in that
8073
   case.  We also don't want to use them for PC-relative accesses,
8074
   where the PC acts as an anchor.  */
8075
 
8076
static bool
8077
mips_use_anchors_for_symbol_p (const_rtx symbol)
8078
{
8079
  switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
8080
    {
8081
    case SYMBOL_PC_RELATIVE:
8082
    case SYMBOL_GP_RELATIVE:
8083
      return false;
8084
 
8085
    default:
8086
      return default_use_anchors_for_symbol_p (symbol);
8087
    }
8088
}
8089
 
8090
/* The MIPS debug format wants all automatic variables and arguments
8091
   to be in terms of the virtual frame pointer (stack pointer before
8092
   any adjustment in the function), while the MIPS 3.0 linker wants
8093
   the frame pointer to be the stack pointer after the initial
8094
   adjustment.  So, we do the adjustment here.  The arg pointer (which
8095
   is eliminated) points to the virtual frame pointer, while the frame
8096
   pointer (which may be eliminated) points to the stack pointer after
8097
   the initial adjustments.  */
8098
 
8099
HOST_WIDE_INT
8100
mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
8101
{
8102
  rtx offset2 = const0_rtx;
8103
  rtx reg = eliminate_constant_term (addr, &offset2);
8104
 
8105
  if (offset == 0)
8106
    offset = INTVAL (offset2);
8107
 
8108
  if (reg == stack_pointer_rtx
8109
      || reg == frame_pointer_rtx
8110
      || reg == hard_frame_pointer_rtx)
8111
    {
8112
      offset -= cfun->machine->frame.total_size;
8113
      if (reg == hard_frame_pointer_rtx)
8114
        offset += cfun->machine->frame.hard_frame_pointer_offset;
8115
    }
8116
 
8117
  /* sdbout_parms does not want this to crash for unrecognized cases.  */
8118
#if 0
8119
  else if (reg != arg_pointer_rtx)
8120
    fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
8121
                addr);
8122
#endif
8123
 
8124
  return offset;
8125
}
8126
 
8127
/* Implement ASM_OUTPUT_EXTERNAL.  */
8128
 
8129
void
8130
mips_output_external (FILE *file, tree decl, const char *name)
8131
{
8132
  default_elf_asm_output_external (file, decl, name);
8133
 
8134
  /* We output the name if and only if TREE_SYMBOL_REFERENCED is
8135
     set in order to avoid putting out names that are never really
8136
     used. */
8137
  if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
8138
    {
8139
      if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
8140
        {
8141
          /* When using assembler macros, emit .extern directives for
8142
             all small-data externs so that the assembler knows how
8143
             big they are.
8144
 
8145
             In most cases it would be safe (though pointless) to emit
8146
             .externs for other symbols too.  One exception is when an
8147
             object is within the -G limit but declared by the user to
8148
             be in a section other than .sbss or .sdata.  */
8149
          fputs ("\t.extern\t", file);
8150
          assemble_name (file, name);
8151
          fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
8152
                   int_size_in_bytes (TREE_TYPE (decl)));
8153
        }
8154
    }
8155
}
8156
 
8157
/* Implement TARGET_ASM_OUTPUT_SOURCE_FILENAME.  */
8158
 
8159
static void
8160
mips_output_filename (FILE *stream, const char *name)
8161
{
8162
  /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
8163
     directives.  */
8164
  if (write_symbols == DWARF2_DEBUG)
8165
    return;
8166
  else if (mips_output_filename_first_time)
8167
    {
8168
      mips_output_filename_first_time = 0;
8169
      num_source_filenames += 1;
8170
      current_function_file = name;
8171
      fprintf (stream, "\t.file\t%d ", num_source_filenames);
8172
      output_quoted_string (stream, name);
8173
      putc ('\n', stream);
8174
    }
8175
  /* If we are emitting stabs, let dbxout.c handle this (except for
8176
     the mips_output_filename_first_time case).  */
8177
  else if (write_symbols == DBX_DEBUG)
8178
    return;
8179
  else if (name != current_function_file
8180
           && strcmp (name, current_function_file) != 0)
8181
    {
8182
      num_source_filenames += 1;
8183
      current_function_file = name;
8184
      fprintf (stream, "\t.file\t%d ", num_source_filenames);
8185
      output_quoted_string (stream, name);
8186
      putc ('\n', stream);
8187
    }
8188
}
8189
 
8190
/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL.  */
8191
 
8192
static void ATTRIBUTE_UNUSED
8193
mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
8194
{
8195
  switch (size)
8196
    {
8197
    case 4:
8198
      fputs ("\t.dtprelword\t", file);
8199
      break;
8200
 
8201
    case 8:
8202
      fputs ("\t.dtpreldword\t", file);
8203
      break;
8204
 
8205
    default:
8206
      gcc_unreachable ();
8207
    }
8208
  output_addr_const (file, x);
8209
  fputs ("+0x8000", file);
8210
}
8211
 
8212
/* Implement TARGET_DWARF_REGISTER_SPAN.  */
8213
 
8214
static rtx
8215
mips_dwarf_register_span (rtx reg)
8216
{
8217
  rtx high, low;
8218
  enum machine_mode mode;
8219
 
8220
  /* By default, GCC maps increasing register numbers to increasing
8221
     memory locations, but paired FPRs are always little-endian,
8222
     regardless of the prevailing endianness.  */
8223
  mode = GET_MODE (reg);
8224
  if (FP_REG_P (REGNO (reg))
8225
      && TARGET_BIG_ENDIAN
8226
      && MAX_FPRS_PER_FMT > 1
8227
      && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
8228
    {
8229
      gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
8230
      high = mips_subword (reg, true);
8231
      low = mips_subword (reg, false);
8232
      return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
8233
    }
8234
 
8235
  return NULL_RTX;
8236
}
8237
 
8238
/* Implement ASM_OUTPUT_ASCII.  */
8239
 
8240
void
8241
mips_output_ascii (FILE *stream, const char *string, size_t len)
8242
{
8243
  size_t i;
8244
  int cur_pos;
8245
 
8246
  cur_pos = 17;
8247
  fprintf (stream, "\t.ascii\t\"");
8248
  for (i = 0; i < len; i++)
8249
    {
8250
      int c;
8251
 
8252
      c = (unsigned char) string[i];
8253
      if (ISPRINT (c))
8254
        {
8255
          if (c == '\\' || c == '\"')
8256
            {
8257
              putc ('\\', stream);
8258
              cur_pos++;
8259
            }
8260
          putc (c, stream);
8261
          cur_pos++;
8262
        }
8263
      else
8264
        {
8265
          fprintf (stream, "\\%03o", c);
8266
          cur_pos += 4;
8267
        }
8268
 
8269
      if (cur_pos > 72 && i+1 < len)
8270
        {
8271
          cur_pos = 17;
8272
          fprintf (stream, "\"\n\t.ascii\t\"");
8273
        }
8274
    }
8275
  fprintf (stream, "\"\n");
8276
}
8277
 
8278
/* Return the pseudo-op for full SYMBOL_(D)TPREL address *ADDR.
8279
   Update *ADDR with the operand that should be printed.  */
8280
 
8281
const char *
8282
mips_output_tls_reloc_directive (rtx *addr)
8283
{
8284
  enum mips_symbol_type type;
8285
 
8286
  type = mips_classify_symbolic_expression (*addr, SYMBOL_CONTEXT_LEA);
8287
  *addr = mips_strip_unspec_address (*addr);
8288
  switch (type)
8289
    {
8290
    case SYMBOL_DTPREL:
8291
      return Pmode == SImode ? ".dtprelword\t%0" : ".dtpreldword\t%0";
8292
 
8293
    case SYMBOL_TPREL:
8294
      return Pmode == SImode ? ".tprelword\t%0" : ".tpreldword\t%0";
8295
 
8296
    default:
8297
      gcc_unreachable ();
8298
    }
8299
}
8300
 
8301
/* Emit either a label, .comm, or .lcomm directive.  When using assembler
8302
   macros, mark the symbol as written so that mips_asm_output_external
8303
   won't emit an .extern for it.  STREAM is the output file, NAME is the
8304
   name of the symbol, INIT_STRING is the string that should be written
8305
   before the symbol and FINAL_STRING is the string that should be
8306
   written after it.  FINAL_STRING is a printf format that consumes the
8307
   remaining arguments.  */
8308
 
8309
void
8310
mips_declare_object (FILE *stream, const char *name, const char *init_string,
8311
                     const char *final_string, ...)
8312
{
8313
  va_list ap;
8314
 
8315
  fputs (init_string, stream);
8316
  assemble_name (stream, name);
8317
  va_start (ap, final_string);
8318
  vfprintf (stream, final_string, ap);
8319
  va_end (ap);
8320
 
8321
  if (!TARGET_EXPLICIT_RELOCS)
8322
    {
8323
      tree name_tree = get_identifier (name);
8324
      TREE_ASM_WRITTEN (name_tree) = 1;
8325
    }
8326
}
8327
 
8328
/* Declare a common object of SIZE bytes using asm directive INIT_STRING.
8329
   NAME is the name of the object and ALIGN is the required alignment
8330
   in bytes.  TAKES_ALIGNMENT_P is true if the directive takes a third
8331
   alignment argument.  */
8332
 
8333
void
8334
mips_declare_common_object (FILE *stream, const char *name,
8335
                            const char *init_string,
8336
                            unsigned HOST_WIDE_INT size,
8337
                            unsigned int align, bool takes_alignment_p)
8338
{
8339
  if (!takes_alignment_p)
8340
    {
8341
      size += (align / BITS_PER_UNIT) - 1;
8342
      size -= size % (align / BITS_PER_UNIT);
8343
      mips_declare_object (stream, name, init_string,
8344
                           "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
8345
    }
8346
  else
8347
    mips_declare_object (stream, name, init_string,
8348
                         "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
8349
                         size, align / BITS_PER_UNIT);
8350
}
8351
 
8352
/* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON.  This is usually the same as the
8353
   elfos.h version, but we also need to handle -muninit-const-in-rodata.  */
8354
 
8355
void
8356
mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
8357
                                 unsigned HOST_WIDE_INT size,
8358
                                 unsigned int align)
8359
{
8360
  /* If the target wants uninitialized const declarations in
8361
     .rdata then don't put them in .comm.  */
8362
  if (TARGET_EMBEDDED_DATA
8363
      && TARGET_UNINIT_CONST_IN_RODATA
8364
      && TREE_CODE (decl) == VAR_DECL
8365
      && TREE_READONLY (decl)
8366
      && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
8367
    {
8368
      if (TREE_PUBLIC (decl) && DECL_NAME (decl))
8369
        targetm.asm_out.globalize_label (stream, name);
8370
 
8371
      switch_to_section (readonly_data_section);
8372
      ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
8373
      mips_declare_object (stream, name, "",
8374
                           ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
8375
                           size);
8376
    }
8377
  else
8378
    mips_declare_common_object (stream, name, "\n\t.comm\t",
8379
                                size, align, true);
8380
}
8381
 
8382
#ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8383
extern int size_directive_output;
8384
 
8385
/* Implement ASM_DECLARE_OBJECT_NAME.  This is like most of the standard ELF
8386
   definitions except that it uses mips_declare_object to emit the label.  */
8387
 
8388
void
8389
mips_declare_object_name (FILE *stream, const char *name,
8390
                          tree decl ATTRIBUTE_UNUSED)
8391
{
8392
#ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8393
  ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8394
#endif
8395
 
8396
  size_directive_output = 0;
8397
  if (!flag_inhibit_size_directive && DECL_SIZE (decl))
8398
    {
8399
      HOST_WIDE_INT size;
8400
 
8401
      size_directive_output = 1;
8402
      size = int_size_in_bytes (TREE_TYPE (decl));
8403
      ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8404
    }
8405
 
8406
  mips_declare_object (stream, name, "", ":\n");
8407
}
8408
 
8409
/* Implement ASM_FINISH_DECLARE_OBJECT.  This is generic ELF stuff.  */
8410
 
8411
void
8412
mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
8413
{
8414
  const char *name;
8415
 
8416
  name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8417
  if (!flag_inhibit_size_directive
8418
      && DECL_SIZE (decl) != 0
8419
      && !at_end
8420
      && top_level
8421
      && DECL_INITIAL (decl) == error_mark_node
8422
      && !size_directive_output)
8423
    {
8424
      HOST_WIDE_INT size;
8425
 
8426
      size_directive_output = 1;
8427
      size = int_size_in_bytes (TREE_TYPE (decl));
8428
      ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8429
    }
8430
}
8431
#endif
8432
 
8433
/* Return the FOO in the name of the ".mdebug.FOO" section associated
8434
   with the current ABI.  */
8435
 
8436
static const char *
8437
mips_mdebug_abi_name (void)
8438
{
8439
  switch (mips_abi)
8440
    {
8441
    case ABI_32:
8442
      return "abi32";
8443
    case ABI_O64:
8444
      return "abiO64";
8445
    case ABI_N32:
8446
      return "abiN32";
8447
    case ABI_64:
8448
      return "abi64";
8449
    case ABI_EABI:
8450
      return TARGET_64BIT ? "eabi64" : "eabi32";
8451
    default:
8452
      gcc_unreachable ();
8453
    }
8454
}
8455
 
8456
/* Implement TARGET_ASM_FILE_START.  */
8457
 
8458
static void
8459
mips_file_start (void)
8460
{
8461
  default_file_start ();
8462
 
8463
  /* Generate a special section to describe the ABI switches used to
8464
     produce the resultant binary.  This is unnecessary on IRIX and
8465
     causes unwanted warnings from the native linker.  */
8466
  if (!TARGET_IRIX6)
8467
    {
8468
      /* Record the ABI itself.  Modern versions of binutils encode
8469
         this information in the ELF header flags, but GDB needs the
8470
         information in order to correctly debug binaries produced by
8471
         older binutils.  See the function mips_gdbarch_init in
8472
         gdb/mips-tdep.c.  */
8473
      fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
8474
               mips_mdebug_abi_name ());
8475
 
8476
      /* There is no ELF header flag to distinguish long32 forms of the
8477
         EABI from long64 forms.  Emit a special section to help tools
8478
         such as GDB.  Do the same for o64, which is sometimes used with
8479
         -mlong64.  */
8480
      if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
8481
        fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
8482
                 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
8483
 
8484
#ifdef HAVE_AS_GNU_ATTRIBUTE
8485
      {
8486
        int attr;
8487
 
8488
        /* No floating-point operations, -mno-float.  */
8489
        if (TARGET_NO_FLOAT)
8490
          attr = 0;
8491
        /* Soft-float code, -msoft-float.  */
8492
        else if (!TARGET_HARD_FLOAT_ABI)
8493
          attr = 3;
8494
        /* Single-float code, -msingle-float.  */
8495
        else if (!TARGET_DOUBLE_FLOAT)
8496
          attr = 2;
8497
        /* 64-bit FP registers on a 32-bit target, -mips32r2 -mfp64.  */
8498
        else if (!TARGET_64BIT && TARGET_FLOAT64)
8499
          attr = 4;
8500
        /* Regular FP code, FP regs same size as GP regs, -mdouble-float.  */
8501
        else
8502
          attr = 1;
8503
 
8504
        fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", attr);
8505
      }
8506
#endif
8507
    }
8508
 
8509
  /* If TARGET_ABICALLS, tell GAS to generate -KPIC code.  */
8510
  if (TARGET_ABICALLS)
8511
    {
8512
      fprintf (asm_out_file, "\t.abicalls\n");
8513
      if (TARGET_ABICALLS_PIC0)
8514
        fprintf (asm_out_file, "\t.option\tpic0\n");
8515
    }
8516
 
8517
  if (flag_verbose_asm)
8518
    fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
8519
             ASM_COMMENT_START,
8520
             mips_small_data_threshold, mips_arch_info->name, mips_isa);
8521
}
8522
 
8523
/* Implement TARGET_ASM_CODE_END.  */
8524
 
8525
static void
8526
mips_code_end (void)
8527
{
8528
  if (mips_need_mips16_rdhwr_p)
8529
    mips_output_mips16_rdhwr ();
8530
}
8531
 
8532
/* Make the last instruction frame-related and note that it performs
8533
   the operation described by FRAME_PATTERN.  */
8534
 
8535
static void
8536
mips_set_frame_expr (rtx frame_pattern)
8537
{
8538
  rtx insn;
8539
 
8540
  insn = get_last_insn ();
8541
  RTX_FRAME_RELATED_P (insn) = 1;
8542
  REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
8543
                                      frame_pattern,
8544
                                      REG_NOTES (insn));
8545
}
8546
 
8547
/* Return a frame-related rtx that stores REG at MEM.
8548
   REG must be a single register.  */
8549
 
8550
static rtx
8551
mips_frame_set (rtx mem, rtx reg)
8552
{
8553
  rtx set;
8554
 
8555
  set = gen_rtx_SET (VOIDmode, mem, reg);
8556
  RTX_FRAME_RELATED_P (set) = 1;
8557
 
8558
  return set;
8559
}
8560
 
8561
/* Record that the epilogue has restored call-saved register REG.  */
8562
 
8563
static void
8564
mips_add_cfa_restore (rtx reg)
8565
{
8566
  mips_epilogue.cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
8567
                                               mips_epilogue.cfa_restores);
8568
}
8569
 
8570
/* If a MIPS16e SAVE or RESTORE instruction saves or restores register
8571
   mips16e_s2_s8_regs[X], it must also save the registers in indexes
8572
   X + 1 onwards.  Likewise mips16e_a0_a3_regs.  */
8573
static const unsigned char mips16e_s2_s8_regs[] = {
8574
  30, 23, 22, 21, 20, 19, 18
8575
};
8576
static const unsigned char mips16e_a0_a3_regs[] = {
8577
  4, 5, 6, 7
8578
};
8579
 
8580
/* A list of the registers that can be saved by the MIPS16e SAVE instruction,
8581
   ordered from the uppermost in memory to the lowest in memory.  */
8582
static const unsigned char mips16e_save_restore_regs[] = {
8583
  31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
8584
};
8585
 
8586
/* Return the index of the lowest X in the range [0, SIZE) for which
8587
   bit REGS[X] is set in MASK.  Return SIZE if there is no such X.  */
8588
 
8589
static unsigned int
8590
mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
8591
                             unsigned int size)
8592
{
8593
  unsigned int i;
8594
 
8595
  for (i = 0; i < size; i++)
8596
    if (BITSET_P (mask, regs[i]))
8597
      break;
8598
 
8599
  return i;
8600
}
8601
 
8602
/* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
8603
   is the number of set bits.  If *MASK_PTR contains REGS[X] for some X
8604
   in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
8605
   is true for all indexes (X, SIZE).  */
8606
 
8607
static void
8608
mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
8609
                        unsigned int size, unsigned int *num_regs_ptr)
8610
{
8611
  unsigned int i;
8612
 
8613
  i = mips16e_find_first_register (*mask_ptr, regs, size);
8614
  for (i++; i < size; i++)
8615
    if (!BITSET_P (*mask_ptr, regs[i]))
8616
      {
8617
        *num_regs_ptr += 1;
8618
        *mask_ptr |= 1 << regs[i];
8619
      }
8620
}
8621
 
8622
/* Return a simplified form of X using the register values in REG_VALUES.
8623
   REG_VALUES[R] is the last value assigned to hard register R, or null
8624
   if R has not been modified.
8625
 
8626
   This function is rather limited, but is good enough for our purposes.  */
8627
 
8628
static rtx
8629
mips16e_collect_propagate_value (rtx x, rtx *reg_values)
8630
{
8631
  x = avoid_constant_pool_reference (x);
8632
 
8633
  if (UNARY_P (x))
8634
    {
8635
      rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8636
      return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
8637
                                 x0, GET_MODE (XEXP (x, 0)));
8638
    }
8639
 
8640
  if (ARITHMETIC_P (x))
8641
    {
8642
      rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
8643
      rtx x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
8644
      return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
8645
    }
8646
 
8647
  if (REG_P (x)
8648
      && reg_values[REGNO (x)]
8649
      && !rtx_unstable_p (reg_values[REGNO (x)]))
8650
    return reg_values[REGNO (x)];
8651
 
8652
  return x;
8653
}
8654
 
8655
/* Return true if (set DEST SRC) stores an argument register into its
8656
   caller-allocated save slot, storing the number of that argument
8657
   register in *REGNO_PTR if so.  REG_VALUES is as for
8658
   mips16e_collect_propagate_value.  */
8659
 
8660
static bool
8661
mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
8662
                                 unsigned int *regno_ptr)
8663
{
8664
  unsigned int argno, regno;
8665
  HOST_WIDE_INT offset, required_offset;
8666
  rtx addr, base;
8667
 
8668
  /* Check that this is a word-mode store.  */
8669
  if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
8670
    return false;
8671
 
8672
  /* Check that the register being saved is an unmodified argument
8673
     register.  */
8674
  regno = REGNO (src);
8675
  if (!IN_RANGE (regno, GP_ARG_FIRST, GP_ARG_LAST) || reg_values[regno])
8676
    return false;
8677
  argno = regno - GP_ARG_FIRST;
8678
 
8679
  /* Check whether the address is an appropriate stack-pointer or
8680
     frame-pointer access.  */
8681
  addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
8682
  mips_split_plus (addr, &base, &offset);
8683
  required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
8684
  if (base == hard_frame_pointer_rtx)
8685
    required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
8686
  else if (base != stack_pointer_rtx)
8687
    return false;
8688
  if (offset != required_offset)
8689
    return false;
8690
 
8691
  *regno_ptr = regno;
8692
  return true;
8693
}
8694
 
8695
/* A subroutine of mips_expand_prologue, called only when generating
8696
   MIPS16e SAVE instructions.  Search the start of the function for any
8697
   instructions that save argument registers into their caller-allocated
8698
   save slots.  Delete such instructions and return a value N such that
8699
   saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
8700
   instructions redundant.  */
8701
 
8702
static unsigned int
8703
mips16e_collect_argument_saves (void)
8704
{
8705
  rtx reg_values[FIRST_PSEUDO_REGISTER];
8706
  rtx insn, next, set, dest, src;
8707
  unsigned int nargs, regno;
8708
 
8709
  push_topmost_sequence ();
8710
  nargs = 0;
8711
  memset (reg_values, 0, sizeof (reg_values));
8712
  for (insn = get_insns (); insn; insn = next)
8713
    {
8714
      next = NEXT_INSN (insn);
8715
      if (NOTE_P (insn) || DEBUG_INSN_P (insn))
8716
        continue;
8717
 
8718
      if (!INSN_P (insn))
8719
        break;
8720
 
8721
      set = PATTERN (insn);
8722
      if (GET_CODE (set) != SET)
8723
        break;
8724
 
8725
      dest = SET_DEST (set);
8726
      src = SET_SRC (set);
8727
      if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
8728
        {
8729
          if (!BITSET_P (cfun->machine->frame.mask, regno))
8730
            {
8731
              delete_insn (insn);
8732
              nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
8733
            }
8734
        }
8735
      else if (REG_P (dest) && GET_MODE (dest) == word_mode)
8736
        reg_values[REGNO (dest)]
8737
          = mips16e_collect_propagate_value (src, reg_values);
8738
      else
8739
        break;
8740
    }
8741
  pop_topmost_sequence ();
8742
 
8743
  return nargs;
8744
}
8745
 
8746
/* Return a move between register REGNO and memory location SP + OFFSET.
8747
   REG_PARM_P is true if SP + OFFSET belongs to REG_PARM_STACK_SPACE.
8748
   Make the move a load if RESTORE_P, otherwise make it a store.  */
8749
 
8750
static rtx
8751
mips16e_save_restore_reg (bool restore_p, bool reg_parm_p,
8752
                          HOST_WIDE_INT offset, unsigned int regno)
8753
{
8754
  rtx reg, mem;
8755
 
8756
  mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
8757
  reg = gen_rtx_REG (SImode, regno);
8758
  if (restore_p)
8759
    {
8760
      mips_add_cfa_restore (reg);
8761
      return gen_rtx_SET (VOIDmode, reg, mem);
8762
    }
8763
  if (reg_parm_p)
8764
    return gen_rtx_SET (VOIDmode, mem, reg);
8765
  return mips_frame_set (mem, reg);
8766
}
8767
 
8768
/* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
8769
   The instruction must:
8770
 
8771
     - Allocate or deallocate SIZE bytes in total; SIZE is known
8772
       to be nonzero.
8773
 
8774
     - Save or restore as many registers in *MASK_PTR as possible.
8775
       The instruction saves the first registers at the top of the
8776
       allocated area, with the other registers below it.
8777
 
8778
     - Save NARGS argument registers above the allocated area.
8779
 
8780
   (NARGS is always zero if RESTORE_P.)
8781
 
8782
   The SAVE and RESTORE instructions cannot save and restore all general
8783
   registers, so there may be some registers left over for the caller to
8784
   handle.  Destructively modify *MASK_PTR so that it contains the registers
8785
   that still need to be saved or restored.  The caller can save these
8786
   registers in the memory immediately below *OFFSET_PTR, which is a
8787
   byte offset from the bottom of the allocated stack area.  */
8788
 
8789
static rtx
8790
mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
8791
                            HOST_WIDE_INT *offset_ptr, unsigned int nargs,
8792
                            HOST_WIDE_INT size)
8793
{
8794
  rtx pattern, set;
8795
  HOST_WIDE_INT offset, top_offset;
8796
  unsigned int i, regno;
8797
  int n;
8798
 
8799
  gcc_assert (cfun->machine->frame.num_fp == 0);
8800
 
8801
  /* Calculate the number of elements in the PARALLEL.  We need one element
8802
     for the stack adjustment, one for each argument register save, and one
8803
     for each additional register move.  */
8804
  n = 1 + nargs;
8805
  for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8806
    if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
8807
      n++;
8808
 
8809
  /* Create the final PARALLEL.  */
8810
  pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
8811
  n = 0;
8812
 
8813
  /* Add the stack pointer adjustment.  */
8814
  set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8815
                     plus_constant (stack_pointer_rtx,
8816
                                    restore_p ? size : -size));
8817
  RTX_FRAME_RELATED_P (set) = 1;
8818
  XVECEXP (pattern, 0, n++) = set;
8819
 
8820
  /* Stack offsets in the PARALLEL are relative to the old stack pointer.  */
8821
  top_offset = restore_p ? size : 0;
8822
 
8823
  /* Save the arguments.  */
8824
  for (i = 0; i < nargs; i++)
8825
    {
8826
      offset = top_offset + i * UNITS_PER_WORD;
8827
      set = mips16e_save_restore_reg (restore_p, true, offset,
8828
                                      GP_ARG_FIRST + i);
8829
      XVECEXP (pattern, 0, n++) = set;
8830
    }
8831
 
8832
  /* Then fill in the other register moves.  */
8833
  offset = top_offset;
8834
  for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
8835
    {
8836
      regno = mips16e_save_restore_regs[i];
8837
      if (BITSET_P (*mask_ptr, regno))
8838
        {
8839
          offset -= UNITS_PER_WORD;
8840
          set = mips16e_save_restore_reg (restore_p, false, offset, regno);
8841
          XVECEXP (pattern, 0, n++) = set;
8842
          *mask_ptr &= ~(1 << regno);
8843
        }
8844
    }
8845
 
8846
  /* Tell the caller what offset it should use for the remaining registers.  */
8847
  *offset_ptr = size + (offset - top_offset);
8848
 
8849
  gcc_assert (n == XVECLEN (pattern, 0));
8850
 
8851
  return pattern;
8852
}
8853
 
8854
/* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
8855
   pointer.  Return true if PATTERN matches the kind of instruction
8856
   generated by mips16e_build_save_restore.  If INFO is nonnull,
8857
   initialize it when returning true.  */
8858
 
8859
bool
8860
mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
8861
                                struct mips16e_save_restore_info *info)
8862
{
8863
  unsigned int i, nargs, mask, extra;
8864
  HOST_WIDE_INT top_offset, save_offset, offset;
8865
  rtx set, reg, mem, base;
8866
  int n;
8867
 
8868
  if (!GENERATE_MIPS16E_SAVE_RESTORE)
8869
    return false;
8870
 
8871
  /* Stack offsets in the PARALLEL are relative to the old stack pointer.  */
8872
  top_offset = adjust > 0 ? adjust : 0;
8873
 
8874
  /* Interpret all other members of the PARALLEL.  */
8875
  save_offset = top_offset - UNITS_PER_WORD;
8876
  mask = 0;
8877
  nargs = 0;
8878
  i = 0;
8879
  for (n = 1; n < XVECLEN (pattern, 0); n++)
8880
    {
8881
      /* Check that we have a SET.  */
8882
      set = XVECEXP (pattern, 0, n);
8883
      if (GET_CODE (set) != SET)
8884
        return false;
8885
 
8886
      /* Check that the SET is a load (if restoring) or a store
8887
         (if saving).  */
8888
      mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
8889
      if (!MEM_P (mem))
8890
        return false;
8891
 
8892
      /* Check that the address is the sum of the stack pointer and a
8893
         possibly-zero constant offset.  */
8894
      mips_split_plus (XEXP (mem, 0), &base, &offset);
8895
      if (base != stack_pointer_rtx)
8896
        return false;
8897
 
8898
      /* Check that SET's other operand is a register.  */
8899
      reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
8900
      if (!REG_P (reg))
8901
        return false;
8902
 
8903
      /* Check for argument saves.  */
8904
      if (offset == top_offset + nargs * UNITS_PER_WORD
8905
          && REGNO (reg) == GP_ARG_FIRST + nargs)
8906
        nargs++;
8907
      else if (offset == save_offset)
8908
        {
8909
          while (mips16e_save_restore_regs[i++] != REGNO (reg))
8910
            if (i == ARRAY_SIZE (mips16e_save_restore_regs))
8911
              return false;
8912
 
8913
          mask |= 1 << REGNO (reg);
8914
          save_offset -= UNITS_PER_WORD;
8915
        }
8916
      else
8917
        return false;
8918
    }
8919
 
8920
  /* Check that the restrictions on register ranges are met.  */
8921
  extra = 0;
8922
  mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
8923
                          ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
8924
  mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
8925
                          ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
8926
  if (extra != 0)
8927
    return false;
8928
 
8929
  /* Make sure that the topmost argument register is not saved twice.
8930
     The checks above ensure that the same is then true for the other
8931
     argument registers.  */
8932
  if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
8933
    return false;
8934
 
8935
  /* Pass back information, if requested.  */
8936
  if (info)
8937
    {
8938
      info->nargs = nargs;
8939
      info->mask = mask;
8940
      info->size = (adjust > 0 ? adjust : -adjust);
8941
    }
8942
 
8943
  return true;
8944
}
8945
 
8946
/* Add a MIPS16e SAVE or RESTORE register-range argument to string S
8947
   for the register range [MIN_REG, MAX_REG].  Return a pointer to
8948
   the null terminator.  */
8949
 
8950
static char *
8951
mips16e_add_register_range (char *s, unsigned int min_reg,
8952
                            unsigned int max_reg)
8953
{
8954
  if (min_reg != max_reg)
8955
    s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
8956
  else
8957
    s += sprintf (s, ",%s", reg_names[min_reg]);
8958
  return s;
8959
}
8960
 
8961
/* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
8962
   PATTERN and ADJUST are as for mips16e_save_restore_pattern_p.  */
8963
 
8964
const char *
8965
mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
8966
{
8967
  static char buffer[300];
8968
 
8969
  struct mips16e_save_restore_info info;
8970
  unsigned int i, end;
8971
  char *s;
8972
 
8973
  /* Parse the pattern.  */
8974
  if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
8975
    gcc_unreachable ();
8976
 
8977
  /* Add the mnemonic.  */
8978
  s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
8979
  s += strlen (s);
8980
 
8981
  /* Save the arguments.  */
8982
  if (info.nargs > 1)
8983
    s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
8984
                  reg_names[GP_ARG_FIRST + info.nargs - 1]);
8985
  else if (info.nargs == 1)
8986
    s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
8987
 
8988
  /* Emit the amount of stack space to allocate or deallocate.  */
8989
  s += sprintf (s, "%d", (int) info.size);
8990
 
8991
  /* Save or restore $16.  */
8992
  if (BITSET_P (info.mask, 16))
8993
    s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
8994
 
8995
  /* Save or restore $17.  */
8996
  if (BITSET_P (info.mask, 17))
8997
    s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
8998
 
8999
  /* Save or restore registers in the range $s2...$s8, which
9000
     mips16e_s2_s8_regs lists in decreasing order.  Note that this
9001
     is a software register range; the hardware registers are not
9002
     numbered consecutively.  */
9003
  end = ARRAY_SIZE (mips16e_s2_s8_regs);
9004
  i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
9005
  if (i < end)
9006
    s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
9007
                                    mips16e_s2_s8_regs[i]);
9008
 
9009
  /* Save or restore registers in the range $a0...$a3.  */
9010
  end = ARRAY_SIZE (mips16e_a0_a3_regs);
9011
  i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
9012
  if (i < end)
9013
    s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
9014
                                    mips16e_a0_a3_regs[end - 1]);
9015
 
9016
  /* Save or restore $31.  */
9017
  if (BITSET_P (info.mask, RETURN_ADDR_REGNUM))
9018
    s += sprintf (s, ",%s", reg_names[RETURN_ADDR_REGNUM]);
9019
 
9020
  return buffer;
9021
}
9022
 
9023
/* Return true if the current function returns its value in a floating-point
9024
   register in MIPS16 mode.  */
9025
 
9026
static bool
9027
mips16_cfun_returns_in_fpr_p (void)
9028
{
9029
  tree return_type = DECL_RESULT (current_function_decl);
9030
  return (TARGET_MIPS16
9031
          && TARGET_HARD_FLOAT_ABI
9032
          && !aggregate_value_p (return_type, current_function_decl)
9033
          && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
9034
}
9035
 
9036
/* Return true if predicate PRED is true for at least one instruction.
9037
   Cache the result in *CACHE, and assume that the result is true
9038
   if *CACHE is already true.  */
9039
 
9040
static bool
9041
mips_find_gp_ref (bool *cache, bool (*pred) (rtx))
9042
{
9043
  rtx insn;
9044
 
9045
  if (!*cache)
9046
    {
9047
      push_topmost_sequence ();
9048
      for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9049
        if (USEFUL_INSN_P (insn) && pred (insn))
9050
          {
9051
            *cache = true;
9052
            break;
9053
          }
9054
      pop_topmost_sequence ();
9055
    }
9056
  return *cache;
9057
}
9058
 
9059
/* Return true if INSN refers to the global pointer in an "inflexible" way.
9060
   See mips_cfun_has_inflexible_gp_ref_p for details.  */
9061
 
9062
static bool
9063
mips_insn_has_inflexible_gp_ref_p (rtx insn)
9064
{
9065
  /* Uses of pic_offset_table_rtx in CALL_INSN_FUNCTION_USAGE
9066
     indicate that the target could be a traditional MIPS
9067
     lazily-binding stub.  */
9068
  return find_reg_fusage (insn, USE, pic_offset_table_rtx);
9069
}
9070
 
9071
/* Return true if the current function refers to the global pointer
9072
   in a way that forces $28 to be valid.  This means that we can't
9073
   change the choice of global pointer, even for NewABI code.
9074
 
9075
   One example of this (and one which needs several checks) is that
9076
   $28 must be valid when calling traditional MIPS lazy-binding stubs.
9077
   (This restriction does not apply to PLTs.)  */
9078
 
9079
static bool
9080
mips_cfun_has_inflexible_gp_ref_p (void)
9081
{
9082
  /* If the function has a nonlocal goto, $28 must hold the correct
9083
     global pointer for the target function.  That is, the target
9084
     of the goto implicitly uses $28.  */
9085
  if (crtl->has_nonlocal_goto)
9086
    return true;
9087
 
9088
  if (TARGET_ABICALLS_PIC2)
9089
    {
9090
      /* Symbolic accesses implicitly use the global pointer unless
9091
         -mexplicit-relocs is in effect.  JAL macros to symbolic addresses
9092
         might go to traditional MIPS lazy-binding stubs.  */
9093
      if (!TARGET_EXPLICIT_RELOCS)
9094
        return true;
9095
 
9096
      /* FUNCTION_PROFILER includes a JAL to _mcount, which again
9097
         can be lazily-bound.  */
9098
      if (crtl->profile)
9099
        return true;
9100
 
9101
      /* MIPS16 functions that return in FPRs need to call an
9102
         external libgcc routine.  This call is only made explict
9103
         during mips_expand_epilogue, and it too might be lazily bound.  */
9104
      if (mips16_cfun_returns_in_fpr_p ())
9105
        return true;
9106
    }
9107
 
9108
  return mips_find_gp_ref (&cfun->machine->has_inflexible_gp_insn_p,
9109
                           mips_insn_has_inflexible_gp_ref_p);
9110
}
9111
 
9112
/* Return true if INSN refers to the global pointer in a "flexible" way.
9113
   See mips_cfun_has_flexible_gp_ref_p for details.  */
9114
 
9115
static bool
9116
mips_insn_has_flexible_gp_ref_p (rtx insn)
9117
{
9118
  return (get_attr_got (insn) != GOT_UNSET
9119
          || mips_small_data_pattern_p (PATTERN (insn))
9120
          || reg_overlap_mentioned_p (pic_offset_table_rtx, PATTERN (insn)));
9121
}
9122
 
9123
/* Return true if the current function references the global pointer,
9124
   but if those references do not inherently require the global pointer
9125
   to be $28.  Assume !mips_cfun_has_inflexible_gp_ref_p ().  */
9126
 
9127
static bool
9128
mips_cfun_has_flexible_gp_ref_p (void)
9129
{
9130
  /* Reload can sometimes introduce constant pool references
9131
     into a function that otherwise didn't need them.  For example,
9132
     suppose we have an instruction like:
9133
 
9134
        (set (reg:DF R1) (float:DF (reg:SI R2)))
9135
 
9136
     If R2 turns out to be a constant such as 1, the instruction may
9137
     have a REG_EQUAL note saying that R1 == 1.0.  Reload then has
9138
     the option of using this constant if R2 doesn't get allocated
9139
     to a register.
9140
 
9141
     In cases like these, reload will have added the constant to the
9142
     pool but no instruction will yet refer to it.  */
9143
  if (TARGET_ABICALLS_PIC2 && !reload_completed && crtl->uses_const_pool)
9144
    return true;
9145
 
9146
  return mips_find_gp_ref (&cfun->machine->has_flexible_gp_insn_p,
9147
                           mips_insn_has_flexible_gp_ref_p);
9148
}
9149
 
9150
/* Return the register that should be used as the global pointer
9151
   within this function.  Return INVALID_REGNUM if the function
9152
   doesn't need a global pointer.  */
9153
 
9154
static unsigned int
9155
mips_global_pointer (void)
9156
{
9157
  unsigned int regno;
9158
 
9159
  /* $gp is always available unless we're using a GOT.  */
9160
  if (!TARGET_USE_GOT)
9161
    return GLOBAL_POINTER_REGNUM;
9162
 
9163
  /* If there are inflexible references to $gp, we must use the
9164
     standard register.  */
9165
  if (mips_cfun_has_inflexible_gp_ref_p ())
9166
    return GLOBAL_POINTER_REGNUM;
9167
 
9168
  /* If there are no current references to $gp, then the only uses
9169
     we can introduce later are those involved in long branches.  */
9170
  if (TARGET_ABSOLUTE_JUMPS && !mips_cfun_has_flexible_gp_ref_p ())
9171
    return INVALID_REGNUM;
9172
 
9173
  /* If the global pointer is call-saved, try to use a call-clobbered
9174
     alternative.  */
9175
  if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
9176
    for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
9177
      if (!df_regs_ever_live_p (regno)
9178
          && call_really_used_regs[regno]
9179
          && !fixed_regs[regno]
9180
          && regno != PIC_FUNCTION_ADDR_REGNUM)
9181
        return regno;
9182
 
9183
  return GLOBAL_POINTER_REGNUM;
9184
}
9185
 
9186
/* Return true if the current function's prologue must load the global
9187
   pointer value into pic_offset_table_rtx and store the same value in
9188
   the function's cprestore slot (if any).
9189
 
9190
   One problem we have to deal with is that, when emitting GOT-based
9191
   position independent code, long-branch sequences will need to load
9192
   the address of the branch target from the GOT.  We don't know until
9193
   the very end of compilation whether (and where) the function needs
9194
   long branches, so we must ensure that _any_ branch can access the
9195
   global pointer in some form.  However, we do not want to pessimize
9196
   the usual case in which all branches are short.
9197
 
9198
   We handle this as follows:
9199
 
9200
   (1) During reload, we set cfun->machine->global_pointer to
9201
       INVALID_REGNUM if we _know_ that the current function
9202
       doesn't need a global pointer.  This is only valid if
9203
       long branches don't need the GOT.
9204
 
9205
       Otherwise, we assume that we might need a global pointer
9206
       and pick an appropriate register.
9207
 
9208
   (2) If cfun->machine->global_pointer != INVALID_REGNUM,
9209
       we ensure that the global pointer is available at every
9210
       block boundary bar entry and exit.  We do this in one of two ways:
9211
 
9212
       - If the function has a cprestore slot, we ensure that this
9213
         slot is valid at every branch.  However, as explained in
9214
         point (6) below, there is no guarantee that pic_offset_table_rtx
9215
         itself is valid if new uses of the global pointer are introduced
9216
         after the first post-epilogue split.
9217
 
9218
         We guarantee that the cprestore slot is valid by loading it
9219
         into a fake register, CPRESTORE_SLOT_REGNUM.  We then make
9220
         this register live at every block boundary bar function entry
9221
         and exit.  It is then invalid to move the load (and thus the
9222
         preceding store) across a block boundary.
9223
 
9224
       - If the function has no cprestore slot, we guarantee that
9225
         pic_offset_table_rtx itself is valid at every branch.
9226
 
9227
       See mips_eh_uses for the handling of the register liveness.
9228
 
9229
   (3) During prologue and epilogue generation, we emit "ghost"
9230
       placeholder instructions to manipulate the global pointer.
9231
 
9232
   (4) During prologue generation, we set cfun->machine->must_initialize_gp_p
9233
       and cfun->machine->must_restore_gp_when_clobbered_p if we already know
9234
       that the function needs a global pointer.  (There is no need to set
9235
       them earlier than this, and doing it as late as possible leads to
9236
       fewer false positives.)
9237
 
9238
   (5) If cfun->machine->must_initialize_gp_p is true during a
9239
       split_insns pass, we split the ghost instructions into real
9240
       instructions.  These split instructions can then be optimized in
9241
       the usual way.  Otherwise, we keep the ghost instructions intact,
9242
       and optimize for the case where they aren't needed.  We still
9243
       have the option of splitting them later, if we need to introduce
9244
       new uses of the global pointer.
9245
 
9246
       For example, the scheduler ignores a ghost instruction that
9247
       stores $28 to the stack, but it handles the split form of
9248
       the ghost instruction as an ordinary store.
9249
 
9250
   (6) [OldABI only.]  If cfun->machine->must_restore_gp_when_clobbered_p
9251
       is true during the first post-epilogue split_insns pass, we split
9252
       calls and restore_gp patterns into instructions that explicitly
9253
       load pic_offset_table_rtx from the cprestore slot.  Otherwise,
9254
       we split these patterns into instructions that _don't_ load from
9255
       the cprestore slot.
9256
 
9257
       If cfun->machine->must_restore_gp_when_clobbered_p is true at the
9258
       time of the split, then any instructions that exist at that time
9259
       can make free use of pic_offset_table_rtx.  However, if we want
9260
       to introduce new uses of the global pointer after the split,
9261
       we must explicitly load the value from the cprestore slot, since
9262
       pic_offset_table_rtx itself might not be valid at a given point
9263
       in the function.
9264
 
9265
       The idea is that we want to be able to delete redundant
9266
       loads from the cprestore slot in the usual case where no
9267
       long branches are needed.
9268
 
9269
   (7) If cfun->machine->must_initialize_gp_p is still false at the end
9270
       of md_reorg, we decide whether the global pointer is needed for
9271
       long branches.  If so, we set cfun->machine->must_initialize_gp_p
9272
       to true and split the ghost instructions into real instructions
9273
       at that stage.
9274
 
9275
   Note that the ghost instructions must have a zero length for three reasons:
9276
 
9277
   - Giving the length of the underlying $gp sequence might cause
9278
     us to use long branches in cases where they aren't really needed.
9279
 
9280
   - They would perturb things like alignment calculations.
9281
 
9282
   - More importantly, the hazard detection in md_reorg relies on
9283
     empty instructions having a zero length.
9284
 
9285
   If we find a long branch and split the ghost instructions at the
9286
   end of md_reorg, the split could introduce more long branches.
9287
   That isn't a problem though, because we still do the split before
9288
   the final shorten_branches pass.
9289
 
9290
   This is extremely ugly, but it seems like the best compromise between
9291
   correctness and efficiency.  */
9292
 
9293
bool
9294
mips_must_initialize_gp_p (void)
9295
{
9296
  return cfun->machine->must_initialize_gp_p;
9297
}
9298
 
9299
/* Return true if REGNO is a register that is ordinarily call-clobbered
9300
   but must nevertheless be preserved by an interrupt handler.  */
9301
 
9302
static bool
9303
mips_interrupt_extra_call_saved_reg_p (unsigned int regno)
9304
{
9305
  if (MD_REG_P (regno))
9306
    return true;
9307
 
9308
  if (TARGET_DSP && DSP_ACC_REG_P (regno))
9309
    return true;
9310
 
9311
  if (GP_REG_P (regno) && !cfun->machine->use_shadow_register_set_p)
9312
    {
9313
      /* $0 is hard-wired.  */
9314
      if (regno == GP_REG_FIRST)
9315
        return false;
9316
 
9317
      /* The interrupt handler can treat kernel registers as
9318
         scratch registers.  */
9319
      if (KERNEL_REG_P (regno))
9320
        return false;
9321
 
9322
      /* The function will return the stack pointer to its original value
9323
         anyway.  */
9324
      if (regno == STACK_POINTER_REGNUM)
9325
        return false;
9326
 
9327
      /* Otherwise, return true for registers that aren't ordinarily
9328
         call-clobbered.  */
9329
      return call_really_used_regs[regno];
9330
    }
9331
 
9332
  return false;
9333
}
9334
 
9335
/* Return true if the current function should treat register REGNO
9336
   as call-saved.  */
9337
 
9338
static bool
9339
mips_cfun_call_saved_reg_p (unsigned int regno)
9340
{
9341
  /* If the user makes an ordinarily-call-saved register global,
9342
     that register is no longer call-saved.  */
9343
  if (global_regs[regno])
9344
    return false;
9345
 
9346
  /* Interrupt handlers need to save extra registers.  */
9347
  if (cfun->machine->interrupt_handler_p
9348
      && mips_interrupt_extra_call_saved_reg_p (regno))
9349
    return true;
9350
 
9351
  /* call_insns preserve $28 unless they explicitly say otherwise,
9352
     so call_really_used_regs[] treats $28 as call-saved.  However,
9353
     we want the ABI property rather than the default call_insn
9354
     property here.  */
9355
  return (regno == GLOBAL_POINTER_REGNUM
9356
          ? TARGET_CALL_SAVED_GP
9357
          : !call_really_used_regs[regno]);
9358
}
9359
 
9360
/* Return true if the function body might clobber register REGNO.
9361
   We know that REGNO is call-saved.  */
9362
 
9363
static bool
9364
mips_cfun_might_clobber_call_saved_reg_p (unsigned int regno)
9365
{
9366
  /* Some functions should be treated as clobbering all call-saved
9367
     registers.  */
9368
  if (crtl->saves_all_registers)
9369
    return true;
9370
 
9371
  /* DF handles cases where a register is explicitly referenced in
9372
     the rtl.  Incoming values are passed in call-clobbered registers,
9373
     so we can assume that any live call-saved register is set within
9374
     the function.  */
9375
  if (df_regs_ever_live_p (regno))
9376
    return true;
9377
 
9378
  /* Check for registers that are clobbered by FUNCTION_PROFILER.
9379
     These clobbers are not explicit in the rtl.  */
9380
  if (crtl->profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
9381
    return true;
9382
 
9383
  /* If we're using a call-saved global pointer, the function's
9384
     prologue will need to set it up.  */
9385
  if (cfun->machine->global_pointer == regno)
9386
    return true;
9387
 
9388
  /* The function's prologue will need to set the frame pointer if
9389
     frame_pointer_needed.  */
9390
  if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
9391
    return true;
9392
 
9393
  /* If a MIPS16 function returns a value in FPRs, its epilogue
9394
     will need to call an external libgcc routine.  This yet-to-be
9395
     generated call_insn will clobber $31.  */
9396
  if (regno == RETURN_ADDR_REGNUM && mips16_cfun_returns_in_fpr_p ())
9397
    return true;
9398
 
9399
  /* If REGNO is ordinarily call-clobbered, we must assume that any
9400
     called function could modify it.  */
9401
  if (cfun->machine->interrupt_handler_p
9402
      && !current_function_is_leaf
9403
      && mips_interrupt_extra_call_saved_reg_p (regno))
9404
    return true;
9405
 
9406
  return false;
9407
}
9408
 
9409
/* Return true if the current function must save register REGNO.  */
9410
 
9411
static bool
9412
mips_save_reg_p (unsigned int regno)
9413
{
9414
  if (mips_cfun_call_saved_reg_p (regno))
9415
    {
9416
      if (mips_cfun_might_clobber_call_saved_reg_p (regno))
9417
        return true;
9418
 
9419
      /* Save both registers in an FPR pair if either one is used.  This is
9420
         needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
9421
         register to be used without the even register.  */
9422
      if (FP_REG_P (regno)
9423
          && MAX_FPRS_PER_FMT == 2
9424
          && mips_cfun_might_clobber_call_saved_reg_p (regno + 1))
9425
        return true;
9426
    }
9427
 
9428
  /* We need to save the incoming return address if __builtin_eh_return
9429
     is being used to set a different return address.  */
9430
  if (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return)
9431
    return true;
9432
 
9433
  return false;
9434
}
9435
 
9436
/* Populate the current function's mips_frame_info structure.
9437
 
9438
   MIPS stack frames look like:
9439
 
9440
        +-------------------------------+
9441
        |                               |
9442
        |  incoming stack arguments     |
9443
        |                               |
9444
        +-------------------------------+
9445
        |                               |
9446
        |  caller-allocated save area   |
9447
      A |  for register arguments       |
9448
        |                               |
9449
        +-------------------------------+ <-- incoming stack pointer
9450
        |                               |
9451
        |  callee-allocated save area   |
9452
      B |  for arguments that are       |
9453
        |  split between registers and  |
9454
        |  the stack                    |
9455
        |                               |
9456
        +-------------------------------+ <-- arg_pointer_rtx
9457
        |                               |
9458
      C |  callee-allocated save area   |
9459
        |  for register varargs         |
9460
        |                               |
9461
        +-------------------------------+ <-- frame_pointer_rtx
9462
        |                               |       + cop0_sp_offset
9463
        |  COP0 reg save area           |       + UNITS_PER_WORD
9464
        |                               |
9465
        +-------------------------------+ <-- frame_pointer_rtx + acc_sp_offset
9466
        |                               |       + UNITS_PER_WORD
9467
        |  accumulator save area        |
9468
        |                               |
9469
        +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
9470
        |                               |       + UNITS_PER_HWFPVALUE
9471
        |  FPR save area                |
9472
        |                               |
9473
        +-------------------------------+ <-- stack_pointer_rtx + gp_sp_offset
9474
        |                               |       + UNITS_PER_WORD
9475
        |  GPR save area                |
9476
        |                               |
9477
        +-------------------------------+ <-- frame_pointer_rtx with
9478
        |                               | \     -fstack-protector
9479
        |  local variables              |  | var_size
9480
        |                               | /
9481
        +-------------------------------+
9482
        |                               | \
9483
        |  $gp save area                |  | cprestore_size
9484
        |                               | /
9485
      P +-------------------------------+ <-- hard_frame_pointer_rtx for
9486
        |                               | \     MIPS16 code
9487
        |  outgoing stack arguments     |  |
9488
        |                               |  |
9489
        +-------------------------------+  | args_size
9490
        |                               |  |
9491
        |  caller-allocated save area   |  |
9492
        |  for register arguments       |  |
9493
        |                               | /
9494
        +-------------------------------+ <-- stack_pointer_rtx
9495
                                              frame_pointer_rtx without
9496
                                                -fstack-protector
9497
                                              hard_frame_pointer_rtx for
9498
                                                non-MIPS16 code.
9499
 
9500
   At least two of A, B and C will be empty.
9501
 
9502
   Dynamic stack allocations such as alloca insert data at point P.
9503
   They decrease stack_pointer_rtx but leave frame_pointer_rtx and
9504
   hard_frame_pointer_rtx unchanged.  */
9505
 
9506
static void
9507
mips_compute_frame_info (void)
9508
{
9509
  struct mips_frame_info *frame;
9510
  HOST_WIDE_INT offset, size;
9511
  unsigned int regno, i;
9512
 
9513
  /* Set this function's interrupt properties.  */
9514
  if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
9515
    {
9516
      if (!ISA_MIPS32R2)
9517
        error ("the %<interrupt%> attribute requires a MIPS32r2 processor");
9518
      else if (TARGET_HARD_FLOAT)
9519
        error ("the %<interrupt%> attribute requires %<-msoft-float%>");
9520
      else if (TARGET_MIPS16)
9521
        error ("interrupt handlers cannot be MIPS16 functions");
9522
      else
9523
        {
9524
          cfun->machine->interrupt_handler_p = true;
9525
          cfun->machine->use_shadow_register_set_p =
9526
            mips_use_shadow_register_set_p (TREE_TYPE (current_function_decl));
9527
          cfun->machine->keep_interrupts_masked_p =
9528
            mips_keep_interrupts_masked_p (TREE_TYPE (current_function_decl));
9529
          cfun->machine->use_debug_exception_return_p =
9530
            mips_use_debug_exception_return_p (TREE_TYPE
9531
                                               (current_function_decl));
9532
        }
9533
    }
9534
 
9535
  frame = &cfun->machine->frame;
9536
  memset (frame, 0, sizeof (*frame));
9537
  size = get_frame_size ();
9538
 
9539
  cfun->machine->global_pointer = mips_global_pointer ();
9540
 
9541
  /* The first two blocks contain the outgoing argument area and the $gp save
9542
     slot.  This area isn't needed in leaf functions, but if the
9543
     target-independent frame size is nonzero, we have already committed to
9544
     allocating these in STARTING_FRAME_OFFSET for !FRAME_GROWS_DOWNWARD.  */
9545
  if ((size == 0 || FRAME_GROWS_DOWNWARD) && current_function_is_leaf)
9546
    {
9547
      /* The MIPS 3.0 linker does not like functions that dynamically
9548
         allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
9549
         looks like we are trying to create a second frame pointer to the
9550
         function, so allocate some stack space to make it happy.  */
9551
      if (cfun->calls_alloca)
9552
        frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
9553
      else
9554
        frame->args_size = 0;
9555
      frame->cprestore_size = 0;
9556
    }
9557
  else
9558
    {
9559
      frame->args_size = crtl->outgoing_args_size;
9560
      frame->cprestore_size = MIPS_GP_SAVE_AREA_SIZE;
9561
    }
9562
  offset = frame->args_size + frame->cprestore_size;
9563
 
9564
  /* Move above the local variables.  */
9565
  frame->var_size = MIPS_STACK_ALIGN (size);
9566
  offset += frame->var_size;
9567
 
9568
  /* Find out which GPRs we need to save.  */
9569
  for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
9570
    if (mips_save_reg_p (regno))
9571
      {
9572
        frame->num_gp++;
9573
        frame->mask |= 1 << (regno - GP_REG_FIRST);
9574
      }
9575
 
9576
  /* If this function calls eh_return, we must also save and restore the
9577
     EH data registers.  */
9578
  if (crtl->calls_eh_return)
9579
    for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
9580
      {
9581
        frame->num_gp++;
9582
        frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
9583
      }
9584
 
9585
  /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
9586
     $a3-$a0 and $s2-$s8.  If we save one register in the range, we must
9587
     save all later registers too.  */
9588
  if (GENERATE_MIPS16E_SAVE_RESTORE)
9589
    {
9590
      mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
9591
                              ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
9592
      mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
9593
                              ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
9594
    }
9595
 
9596
  /* Move above the GPR save area.  */
9597
  if (frame->num_gp > 0)
9598
    {
9599
      offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
9600
      frame->gp_sp_offset = offset - UNITS_PER_WORD;
9601
    }
9602
 
9603
  /* Find out which FPRs we need to save.  This loop must iterate over
9604
     the same space as its companion in mips_for_each_saved_gpr_and_fpr.  */
9605
  if (TARGET_HARD_FLOAT)
9606
    for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
9607
      if (mips_save_reg_p (regno))
9608
        {
9609
          frame->num_fp += MAX_FPRS_PER_FMT;
9610
          frame->fmask |= ~(~0 << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
9611
        }
9612
 
9613
  /* Move above the FPR save area.  */
9614
  if (frame->num_fp > 0)
9615
    {
9616
      offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
9617
      frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
9618
    }
9619
 
9620
  /* Add in space for the interrupt context information.  */
9621
  if (cfun->machine->interrupt_handler_p)
9622
    {
9623
      /* Check HI/LO.  */
9624
      if (mips_save_reg_p (LO_REGNUM) || mips_save_reg_p (HI_REGNUM))
9625
        {
9626
          frame->num_acc++;
9627
          frame->acc_mask |= (1 << 0);
9628
        }
9629
 
9630
      /* Check accumulators 1, 2, 3.  */
9631
      for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
9632
        if (mips_save_reg_p (i) || mips_save_reg_p (i + 1))
9633
          {
9634
            frame->num_acc++;
9635
            frame->acc_mask |= 1 << (((i - DSP_ACC_REG_FIRST) / 2) + 1);
9636
          }
9637
 
9638
      /* All interrupt context functions need space to preserve STATUS.  */
9639
      frame->num_cop0_regs++;
9640
 
9641
      /* If we don't keep interrupts masked, we need to save EPC.  */
9642
      if (!cfun->machine->keep_interrupts_masked_p)
9643
        frame->num_cop0_regs++;
9644
    }
9645
 
9646
  /* Move above the accumulator save area.  */
9647
  if (frame->num_acc > 0)
9648
    {
9649
      /* Each accumulator needs 2 words.  */
9650
      offset += frame->num_acc * 2 * UNITS_PER_WORD;
9651
      frame->acc_sp_offset = offset - UNITS_PER_WORD;
9652
    }
9653
 
9654
  /* Move above the COP0 register save area.  */
9655
  if (frame->num_cop0_regs > 0)
9656
    {
9657
      offset += frame->num_cop0_regs * UNITS_PER_WORD;
9658
      frame->cop0_sp_offset = offset - UNITS_PER_WORD;
9659
    }
9660
 
9661
  /* Move above the callee-allocated varargs save area.  */
9662
  offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
9663
  frame->arg_pointer_offset = offset;
9664
 
9665
  /* Move above the callee-allocated area for pretend stack arguments.  */
9666
  offset += crtl->args.pretend_args_size;
9667
  frame->total_size = offset;
9668
 
9669
  /* Work out the offsets of the save areas from the top of the frame.  */
9670
  if (frame->gp_sp_offset > 0)
9671
    frame->gp_save_offset = frame->gp_sp_offset - offset;
9672
  if (frame->fp_sp_offset > 0)
9673
    frame->fp_save_offset = frame->fp_sp_offset - offset;
9674
  if (frame->acc_sp_offset > 0)
9675
    frame->acc_save_offset = frame->acc_sp_offset - offset;
9676
  if (frame->num_cop0_regs > 0)
9677
    frame->cop0_save_offset = frame->cop0_sp_offset - offset;
9678
 
9679
  /* MIPS16 code offsets the frame pointer by the size of the outgoing
9680
     arguments.  This tends to increase the chances of using unextended
9681
     instructions for local variables and incoming arguments.  */
9682
  if (TARGET_MIPS16)
9683
    frame->hard_frame_pointer_offset = frame->args_size;
9684
}
9685
 
9686
/* Return the style of GP load sequence that is being used for the
9687
   current function.  */
9688
 
9689
enum mips_loadgp_style
9690
mips_current_loadgp_style (void)
9691
{
9692
  if (!TARGET_USE_GOT || cfun->machine->global_pointer == INVALID_REGNUM)
9693
    return LOADGP_NONE;
9694
 
9695
  if (TARGET_RTP_PIC)
9696
    return LOADGP_RTP;
9697
 
9698
  if (TARGET_ABSOLUTE_ABICALLS)
9699
    return LOADGP_ABSOLUTE;
9700
 
9701
  return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
9702
}
9703
 
9704
/* Implement TARGET_FRAME_POINTER_REQUIRED.  */
9705
 
9706
static bool
9707
mips_frame_pointer_required (void)
9708
{
9709
  /* If the function contains dynamic stack allocations, we need to
9710
     use the frame pointer to access the static parts of the frame.  */
9711
  if (cfun->calls_alloca)
9712
    return true;
9713
 
9714
  /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
9715
     reload may be unable to compute the address of a local variable,
9716
     since there is no way to add a large constant to the stack pointer
9717
     without using a second temporary register.  */
9718
  if (TARGET_MIPS16)
9719
    {
9720
      mips_compute_frame_info ();
9721
      if (!SMALL_OPERAND (cfun->machine->frame.total_size))
9722
        return true;
9723
    }
9724
 
9725
  return false;
9726
}
9727
 
9728
/* Make sure that we're not trying to eliminate to the wrong hard frame
9729
   pointer.  */
9730
 
9731
static bool
9732
mips_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
9733
{
9734
  return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
9735
}
9736
 
9737
/* Implement INITIAL_ELIMINATION_OFFSET.  FROM is either the frame pointer
9738
   or argument pointer.  TO is either the stack pointer or hard frame
9739
   pointer.  */
9740
 
9741
HOST_WIDE_INT
9742
mips_initial_elimination_offset (int from, int to)
9743
{
9744
  HOST_WIDE_INT offset;
9745
 
9746
  mips_compute_frame_info ();
9747
 
9748
  /* Set OFFSET to the offset from the end-of-prologue stack pointer.  */
9749
  switch (from)
9750
    {
9751
    case FRAME_POINTER_REGNUM:
9752
      if (FRAME_GROWS_DOWNWARD)
9753
        offset = (cfun->machine->frame.args_size
9754
                  + cfun->machine->frame.cprestore_size
9755
                  + cfun->machine->frame.var_size);
9756
      else
9757
        offset = 0;
9758
      break;
9759
 
9760
    case ARG_POINTER_REGNUM:
9761
      offset = cfun->machine->frame.arg_pointer_offset;
9762
      break;
9763
 
9764
    default:
9765
      gcc_unreachable ();
9766
    }
9767
 
9768
  if (to == HARD_FRAME_POINTER_REGNUM)
9769
    offset -= cfun->machine->frame.hard_frame_pointer_offset;
9770
 
9771
  return offset;
9772
}
9773
 
9774
/* Implement TARGET_EXTRA_LIVE_ON_ENTRY.  */
9775
 
9776
static void
9777
mips_extra_live_on_entry (bitmap regs)
9778
{
9779
  if (TARGET_USE_GOT)
9780
    {
9781
      /* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
9782
         the global pointer.   */
9783
      if (!TARGET_ABSOLUTE_ABICALLS)
9784
        bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
9785
 
9786
      /* The prologue may set MIPS16_PIC_TEMP_REGNUM to the value of
9787
         the global pointer.  */
9788
      if (TARGET_MIPS16)
9789
        bitmap_set_bit (regs, MIPS16_PIC_TEMP_REGNUM);
9790
 
9791
      /* See the comment above load_call<mode> for details.  */
9792
      bitmap_set_bit (regs, GOT_VERSION_REGNUM);
9793
    }
9794
}
9795
 
9796
/* Implement RETURN_ADDR_RTX.  We do not support moving back to a
9797
   previous frame.  */
9798
 
9799
rtx
9800
mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
9801
{
9802
  if (count != 0)
9803
    return const0_rtx;
9804
 
9805
  return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
9806
}
9807
 
9808
/* Emit code to change the current function's return address to
9809
   ADDRESS.  SCRATCH is available as a scratch register, if needed.
9810
   ADDRESS and SCRATCH are both word-mode GPRs.  */
9811
 
9812
void
9813
mips_set_return_address (rtx address, rtx scratch)
9814
{
9815
  rtx slot_address;
9816
 
9817
  gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
9818
  slot_address = mips_add_offset (scratch, stack_pointer_rtx,
9819
                                  cfun->machine->frame.gp_sp_offset);
9820
  mips_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
9821
}
9822
 
9823
/* Return true if the current function has a cprestore slot.  */
9824
 
9825
bool
9826
mips_cfun_has_cprestore_slot_p (void)
9827
{
9828
  return (cfun->machine->global_pointer != INVALID_REGNUM
9829
          && cfun->machine->frame.cprestore_size > 0);
9830
}
9831
 
9832
/* Fill *BASE and *OFFSET such that *BASE + *OFFSET refers to the
9833
   cprestore slot.  LOAD_P is true if the caller wants to load from
9834
   the cprestore slot; it is false if the caller wants to store to
9835
   the slot.  */
9836
 
9837
static void
9838
mips_get_cprestore_base_and_offset (rtx *base, HOST_WIDE_INT *offset,
9839
                                    bool load_p)
9840
{
9841
  const struct mips_frame_info *frame;
9842
 
9843
  frame = &cfun->machine->frame;
9844
  /* .cprestore always uses the stack pointer instead of the frame pointer.
9845
     We have a free choice for direct stores for non-MIPS16 functions,
9846
     and for MIPS16 functions whose cprestore slot is in range of the
9847
     stack pointer.  Using the stack pointer would sometimes give more
9848
     (early) scheduling freedom, but using the frame pointer would
9849
     sometimes give more (late) scheduling freedom.  It's hard to
9850
     predict which applies to a given function, so let's keep things
9851
     simple.
9852
 
9853
     Loads must always use the frame pointer in functions that call
9854
     alloca, and there's little benefit to using the stack pointer
9855
     otherwise.  */
9856
  if (frame_pointer_needed && !(TARGET_CPRESTORE_DIRECTIVE && !load_p))
9857
    {
9858
      *base = hard_frame_pointer_rtx;
9859
      *offset = frame->args_size - frame->hard_frame_pointer_offset;
9860
    }
9861
  else
9862
    {
9863
      *base = stack_pointer_rtx;
9864
      *offset = frame->args_size;
9865
    }
9866
}
9867
 
9868
/* Return true if X is the load or store address of the cprestore slot;
9869
   LOAD_P says which.  */
9870
 
9871
bool
9872
mips_cprestore_address_p (rtx x, bool load_p)
9873
{
9874
  rtx given_base, required_base;
9875
  HOST_WIDE_INT given_offset, required_offset;
9876
 
9877
  mips_split_plus (x, &given_base, &given_offset);
9878
  mips_get_cprestore_base_and_offset (&required_base, &required_offset, load_p);
9879
  return given_base == required_base && given_offset == required_offset;
9880
}
9881
 
9882
/* Return a MEM rtx for the cprestore slot.  LOAD_P is true if we are
9883
   going to load from it, false if we are going to store to it.
9884
   Use TEMP as a temporary register if need be.  */
9885
 
9886
static rtx
9887
mips_cprestore_slot (rtx temp, bool load_p)
9888
{
9889
  rtx base;
9890
  HOST_WIDE_INT offset;
9891
 
9892
  mips_get_cprestore_base_and_offset (&base, &offset, load_p);
9893
  return gen_frame_mem (Pmode, mips_add_offset (temp, base, offset));
9894
}
9895
 
9896
/* Emit instructions to save global pointer value GP into cprestore
9897
   slot MEM.  OFFSET is the offset that MEM applies to the base register.
9898
 
9899
   MEM may not be a legitimate address.  If it isn't, TEMP is a
9900
   temporary register that can be used, otherwise it is a SCRATCH.  */
9901
 
9902
void
9903
mips_save_gp_to_cprestore_slot (rtx mem, rtx offset, rtx gp, rtx temp)
9904
{
9905
  if (TARGET_CPRESTORE_DIRECTIVE)
9906
    {
9907
      gcc_assert (gp == pic_offset_table_rtx);
9908
      emit_insn (PMODE_INSN (gen_cprestore, (mem, offset)));
9909
    }
9910
  else
9911
    mips_emit_move (mips_cprestore_slot (temp, false), gp);
9912
}
9913
 
9914
/* Restore $gp from its save slot, using TEMP as a temporary base register
9915
   if need be.  This function is for o32 and o64 abicalls only.
9916
 
9917
   See mips_must_initialize_gp_p for details about how we manage the
9918
   global pointer.  */
9919
 
9920
void
9921
mips_restore_gp_from_cprestore_slot (rtx temp)
9922
{
9923
  gcc_assert (TARGET_ABICALLS && TARGET_OLDABI && epilogue_completed);
9924
 
9925
  if (!cfun->machine->must_restore_gp_when_clobbered_p)
9926
    {
9927
      emit_note (NOTE_INSN_DELETED);
9928
      return;
9929
    }
9930
 
9931
  if (TARGET_MIPS16)
9932
    {
9933
      mips_emit_move (temp, mips_cprestore_slot (temp, true));
9934
      mips_emit_move (pic_offset_table_rtx, temp);
9935
    }
9936
  else
9937
    mips_emit_move (pic_offset_table_rtx, mips_cprestore_slot (temp, true));
9938
  if (!TARGET_EXPLICIT_RELOCS)
9939
    emit_insn (gen_blockage ());
9940
}
9941
 
9942
/* A function to save or store a register.  The first argument is the
9943
   register and the second is the stack slot.  */
9944
typedef void (*mips_save_restore_fn) (rtx, rtx);
9945
 
9946
/* Use FN to save or restore register REGNO.  MODE is the register's
9947
   mode and OFFSET is the offset of its save slot from the current
9948
   stack pointer.  */
9949
 
9950
static void
9951
mips_save_restore_reg (enum machine_mode mode, int regno,
9952
                       HOST_WIDE_INT offset, mips_save_restore_fn fn)
9953
{
9954
  rtx mem;
9955
 
9956
  mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
9957
  fn (gen_rtx_REG (mode, regno), mem);
9958
}
9959
 
9960
/* Call FN for each accumlator that is saved by the current function.
9961
   SP_OFFSET is the offset of the current stack pointer from the start
9962
   of the frame.  */
9963
 
9964
static void
9965
mips_for_each_saved_acc (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
9966
{
9967
  HOST_WIDE_INT offset;
9968
  int regno;
9969
 
9970
  offset = cfun->machine->frame.acc_sp_offset - sp_offset;
9971
  if (BITSET_P (cfun->machine->frame.acc_mask, 0))
9972
    {
9973
      mips_save_restore_reg (word_mode, LO_REGNUM, offset, fn);
9974
      offset -= UNITS_PER_WORD;
9975
      mips_save_restore_reg (word_mode, HI_REGNUM, offset, fn);
9976
      offset -= UNITS_PER_WORD;
9977
    }
9978
 
9979
  for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
9980
    if (BITSET_P (cfun->machine->frame.acc_mask,
9981
                  ((regno - DSP_ACC_REG_FIRST) / 2) + 1))
9982
      {
9983
        mips_save_restore_reg (word_mode, regno, offset, fn);
9984
        offset -= UNITS_PER_WORD;
9985
      }
9986
}
9987
 
9988
/* Call FN for each register that is saved by the current function.
9989
   SP_OFFSET is the offset of the current stack pointer from the start
9990
   of the frame.  */
9991
 
9992
static void
9993
mips_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset,
9994
                                 mips_save_restore_fn fn)
9995
{
9996
  enum machine_mode fpr_mode;
9997
  HOST_WIDE_INT offset;
9998
  int regno;
9999
 
10000
  /* Save registers starting from high to low.  The debuggers prefer at least
10001
     the return register be stored at func+4, and also it allows us not to
10002
     need a nop in the epilogue if at least one register is reloaded in
10003
     addition to return address.  */
10004
  offset = cfun->machine->frame.gp_sp_offset - sp_offset;
10005
  for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
10006
    if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
10007
      {
10008
        /* Record the ra offset for use by mips_function_profiler.  */
10009
        if (regno == RETURN_ADDR_REGNUM)
10010
          cfun->machine->frame.ra_fp_offset = offset + sp_offset;
10011
        mips_save_restore_reg (word_mode, regno, offset, fn);
10012
        offset -= UNITS_PER_WORD;
10013
      }
10014
 
10015
  /* This loop must iterate over the same space as its companion in
10016
     mips_compute_frame_info.  */
10017
  offset = cfun->machine->frame.fp_sp_offset - sp_offset;
10018
  fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
10019
  for (regno = FP_REG_LAST - MAX_FPRS_PER_FMT + 1;
10020
       regno >= FP_REG_FIRST;
10021
       regno -= MAX_FPRS_PER_FMT)
10022
    if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
10023
      {
10024
        mips_save_restore_reg (fpr_mode, regno, offset, fn);
10025
        offset -= GET_MODE_SIZE (fpr_mode);
10026
      }
10027
}
10028
 
10029
/* Return true if a move between register REGNO and its save slot (MEM)
10030
   can be done in a single move.  LOAD_P is true if we are loading
10031
   from the slot, false if we are storing to it.  */
10032
 
10033
static bool
10034
mips_direct_save_slot_move_p (unsigned int regno, rtx mem, bool load_p)
10035
{
10036
  /* There is a specific MIPS16 instruction for saving $31 to the stack.  */
10037
  if (TARGET_MIPS16 && !load_p && regno == RETURN_ADDR_REGNUM)
10038
    return false;
10039
 
10040
  return mips_secondary_reload_class (REGNO_REG_CLASS (regno),
10041
                                      GET_MODE (mem), mem, load_p) == NO_REGS;
10042
}
10043
 
10044
/* Emit a move from SRC to DEST, given that one of them is a register
10045
   save slot and that the other is a register.  TEMP is a temporary
10046
   GPR of the same mode that is available if need be.  */
10047
 
10048
void
10049
mips_emit_save_slot_move (rtx dest, rtx src, rtx temp)
10050
{
10051
  unsigned int regno;
10052
  rtx mem;
10053
 
10054
  if (REG_P (src))
10055
    {
10056
      regno = REGNO (src);
10057
      mem = dest;
10058
    }
10059
  else
10060
    {
10061
      regno = REGNO (dest);
10062
      mem = src;
10063
    }
10064
 
10065
  if (regno == cfun->machine->global_pointer && !mips_must_initialize_gp_p ())
10066
    {
10067
      /* We don't yet know whether we'll need this instruction or not.
10068
         Postpone the decision by emitting a ghost move.  This move
10069
         is specifically not frame-related; only the split version is.  */
10070
      if (TARGET_64BIT)
10071
        emit_insn (gen_move_gpdi (dest, src));
10072
      else
10073
        emit_insn (gen_move_gpsi (dest, src));
10074
      return;
10075
    }
10076
 
10077
  if (regno == HI_REGNUM)
10078
    {
10079
      if (REG_P (dest))
10080
        {
10081
          mips_emit_move (temp, src);
10082
          if (TARGET_64BIT)
10083
            emit_insn (gen_mthisi_di (gen_rtx_REG (TImode, MD_REG_FIRST),
10084
                                      temp, gen_rtx_REG (DImode, LO_REGNUM)));
10085
          else
10086
            emit_insn (gen_mthisi_di (gen_rtx_REG (DImode, MD_REG_FIRST),
10087
                                      temp, gen_rtx_REG (SImode, LO_REGNUM)));
10088
        }
10089
      else
10090
        {
10091
          if (TARGET_64BIT)
10092
            emit_insn (gen_mfhidi_ti (temp,
10093
                                      gen_rtx_REG (TImode, MD_REG_FIRST)));
10094
          else
10095
            emit_insn (gen_mfhisi_di (temp,
10096
                                      gen_rtx_REG (DImode, MD_REG_FIRST)));
10097
          mips_emit_move (dest, temp);
10098
        }
10099
    }
10100
  else if (mips_direct_save_slot_move_p (regno, mem, mem == src))
10101
    mips_emit_move (dest, src);
10102
  else
10103
    {
10104
      gcc_assert (!reg_overlap_mentioned_p (dest, temp));
10105
      mips_emit_move (temp, src);
10106
      mips_emit_move (dest, temp);
10107
    }
10108
  if (MEM_P (dest))
10109
    mips_set_frame_expr (mips_frame_set (dest, src));
10110
}
10111
 
10112
/* If we're generating n32 or n64 abicalls, and the current function
10113
   does not use $28 as its global pointer, emit a cplocal directive.
10114
   Use pic_offset_table_rtx as the argument to the directive.  */
10115
 
10116
static void
10117
mips_output_cplocal (void)
10118
{
10119
  if (!TARGET_EXPLICIT_RELOCS
10120
      && mips_must_initialize_gp_p ()
10121
      && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
10122
    output_asm_insn (".cplocal %+", 0);
10123
}
10124
 
10125
/* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE.  */
10126
 
10127
static void
10128
mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
10129
{
10130
  const char *fnname;
10131
 
10132
#ifdef SDB_DEBUGGING_INFO
10133
  if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
10134
    SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
10135
#endif
10136
 
10137
  /* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
10138
     floating-point arguments.  */
10139
  if (TARGET_MIPS16
10140
      && TARGET_HARD_FLOAT_ABI
10141
      && crtl->args.info.fp_code != 0)
10142
    mips16_build_function_stub ();
10143
 
10144
  /* Get the function name the same way that toplev.c does before calling
10145
     assemble_start_function.  This is needed so that the name used here
10146
     exactly matches the name used in ASM_DECLARE_FUNCTION_NAME.  */
10147
  fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
10148
  mips_start_function_definition (fnname, TARGET_MIPS16);
10149
 
10150
  /* Output MIPS-specific frame information.  */
10151
  if (!flag_inhibit_size_directive)
10152
    {
10153
      const struct mips_frame_info *frame;
10154
 
10155
      frame = &cfun->machine->frame;
10156
 
10157
      /* .frame FRAMEREG, FRAMESIZE, RETREG.  */
10158
      fprintf (file,
10159
               "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
10160
               "# vars= " HOST_WIDE_INT_PRINT_DEC
10161
               ", regs= %d/%d"
10162
               ", args= " HOST_WIDE_INT_PRINT_DEC
10163
               ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
10164
               reg_names[frame_pointer_needed
10165
                         ? HARD_FRAME_POINTER_REGNUM
10166
                         : STACK_POINTER_REGNUM],
10167
               (frame_pointer_needed
10168
                ? frame->total_size - frame->hard_frame_pointer_offset
10169
                : frame->total_size),
10170
               reg_names[RETURN_ADDR_REGNUM],
10171
               frame->var_size,
10172
               frame->num_gp, frame->num_fp,
10173
               frame->args_size,
10174
               frame->cprestore_size);
10175
 
10176
      /* .mask MASK, OFFSET.  */
10177
      fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
10178
               frame->mask, frame->gp_save_offset);
10179
 
10180
      /* .fmask MASK, OFFSET.  */
10181
      fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
10182
               frame->fmask, frame->fp_save_offset);
10183
    }
10184
 
10185
  /* Handle the initialization of $gp for SVR4 PIC, if applicable.
10186
     Also emit the ".set noreorder; .set nomacro" sequence for functions
10187
     that need it.  */
10188
  if (mips_must_initialize_gp_p ()
10189
      && mips_current_loadgp_style () == LOADGP_OLDABI)
10190
    {
10191
      if (TARGET_MIPS16)
10192
        {
10193
          /* This is a fixed-form sequence.  The position of the
10194
             first two instructions is important because of the
10195
             way _gp_disp is defined.  */
10196
          output_asm_insn ("li\t$2,%%hi(_gp_disp)", 0);
10197
          output_asm_insn ("addiu\t$3,$pc,%%lo(_gp_disp)", 0);
10198
          output_asm_insn ("sll\t$2,16", 0);
10199
          output_asm_insn ("addu\t$2,$3", 0);
10200
        }
10201
      else
10202
        {
10203
          /* .cpload must be in a .set noreorder but not a
10204
             .set nomacro block.  */
10205
          mips_push_asm_switch (&mips_noreorder);
10206
          output_asm_insn (".cpload\t%^", 0);
10207
          if (!cfun->machine->all_noreorder_p)
10208
            mips_pop_asm_switch (&mips_noreorder);
10209
          else
10210
            mips_push_asm_switch (&mips_nomacro);
10211
        }
10212
    }
10213
  else if (cfun->machine->all_noreorder_p)
10214
    {
10215
      mips_push_asm_switch (&mips_noreorder);
10216
      mips_push_asm_switch (&mips_nomacro);
10217
    }
10218
 
10219
  /* Tell the assembler which register we're using as the global
10220
     pointer.  This is needed for thunks, since they can use either
10221
     explicit relocs or assembler macros.  */
10222
  mips_output_cplocal ();
10223
}
10224
 
10225
/* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE.  */
10226
 
10227
static void
10228
mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10229
                               HOST_WIDE_INT size ATTRIBUTE_UNUSED)
10230
{
10231
  const char *fnname;
10232
 
10233
  /* Reinstate the normal $gp.  */
10234
  SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
10235
  mips_output_cplocal ();
10236
 
10237
  if (cfun->machine->all_noreorder_p)
10238
    {
10239
      mips_pop_asm_switch (&mips_nomacro);
10240
      mips_pop_asm_switch (&mips_noreorder);
10241
    }
10242
 
10243
  /* Get the function name the same way that toplev.c does before calling
10244
     assemble_start_function.  This is needed so that the name used here
10245
     exactly matches the name used in ASM_DECLARE_FUNCTION_NAME.  */
10246
  fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
10247
  mips_end_function_definition (fnname);
10248
}
10249
 
10250
/* Emit an optimisation barrier for accesses to the current frame.  */
10251
 
10252
static void
10253
mips_frame_barrier (void)
10254
{
10255
  emit_clobber (gen_frame_mem (BLKmode, stack_pointer_rtx));
10256
}
10257
 
10258
/* Save register REG to MEM.  Make the instruction frame-related.  */
10259
 
10260
static void
10261
mips_save_reg (rtx reg, rtx mem)
10262
{
10263
  if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
10264
    {
10265
      rtx x1, x2;
10266
 
10267
      if (mips_split_64bit_move_p (mem, reg))
10268
        mips_split_doubleword_move (mem, reg);
10269
      else
10270
        mips_emit_move (mem, reg);
10271
 
10272
      x1 = mips_frame_set (mips_subword (mem, false),
10273
                           mips_subword (reg, false));
10274
      x2 = mips_frame_set (mips_subword (mem, true),
10275
                           mips_subword (reg, true));
10276
      mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
10277
    }
10278
  else
10279
    mips_emit_save_slot_move (mem, reg, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
10280
}
10281
 
10282
/* The __gnu_local_gp symbol.  */
10283
 
10284
static GTY(()) rtx mips_gnu_local_gp;
10285
 
10286
/* If we're generating n32 or n64 abicalls, emit instructions
10287
   to set up the global pointer.  */
10288
 
10289
static void
10290
mips_emit_loadgp (void)
10291
{
10292
  rtx addr, offset, incoming_address, base, index, pic_reg;
10293
 
10294
  pic_reg = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
10295
  switch (mips_current_loadgp_style ())
10296
    {
10297
    case LOADGP_ABSOLUTE:
10298
      if (mips_gnu_local_gp == NULL)
10299
        {
10300
          mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
10301
          SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
10302
        }
10303
      emit_insn (PMODE_INSN (gen_loadgp_absolute,
10304
                             (pic_reg, mips_gnu_local_gp)));
10305
      break;
10306
 
10307
    case LOADGP_OLDABI:
10308
      /* Added by mips_output_function_prologue.  */
10309
      break;
10310
 
10311
    case LOADGP_NEWABI:
10312
      addr = XEXP (DECL_RTL (current_function_decl), 0);
10313
      offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
10314
      incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
10315
      emit_insn (PMODE_INSN (gen_loadgp_newabi,
10316
                             (pic_reg, offset, incoming_address)));
10317
      break;
10318
 
10319
    case LOADGP_RTP:
10320
      base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
10321
      index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
10322
      emit_insn (PMODE_INSN (gen_loadgp_rtp, (pic_reg, base, index)));
10323
      break;
10324
 
10325
    default:
10326
      return;
10327
    }
10328
 
10329
  if (TARGET_MIPS16)
10330
    emit_insn (PMODE_INSN (gen_copygp_mips16,
10331
                           (pic_offset_table_rtx, pic_reg)));
10332
 
10333
  /* Emit a blockage if there are implicit uses of the GP register.
10334
     This includes profiled functions, because FUNCTION_PROFILE uses
10335
     a jal macro.  */
10336
  if (!TARGET_EXPLICIT_RELOCS || crtl->profile)
10337
    emit_insn (gen_loadgp_blockage ());
10338
}
10339
 
10340
/* A for_each_rtx callback.  Stop the search if *X is a kernel register.  */
10341
 
10342
static int
10343
mips_kernel_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
10344
{
10345
  return REG_P (*x) && KERNEL_REG_P (REGNO (*x));
10346
}
10347
 
10348
/* Expand the "prologue" pattern.  */
10349
 
10350
void
10351
mips_expand_prologue (void)
10352
{
10353
  const struct mips_frame_info *frame;
10354
  HOST_WIDE_INT size;
10355
  unsigned int nargs;
10356
  rtx insn;
10357
 
10358
  if (cfun->machine->global_pointer != INVALID_REGNUM)
10359
    {
10360
      /* Check whether an insn uses pic_offset_table_rtx, either explicitly
10361
         or implicitly.  If so, we can commit to using a global pointer
10362
         straight away, otherwise we need to defer the decision.  */
10363
      if (mips_cfun_has_inflexible_gp_ref_p ()
10364
          || mips_cfun_has_flexible_gp_ref_p ())
10365
        {
10366
          cfun->machine->must_initialize_gp_p = true;
10367
          cfun->machine->must_restore_gp_when_clobbered_p = true;
10368
        }
10369
 
10370
      SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
10371
    }
10372
 
10373
  frame = &cfun->machine->frame;
10374
  size = frame->total_size;
10375
 
10376
  if (flag_stack_usage_info)
10377
    current_function_static_stack_size = size;
10378
 
10379
  /* Save the registers.  Allocate up to MIPS_MAX_FIRST_STACK_STEP
10380
     bytes beforehand; this is enough to cover the register save area
10381
     without going out of range.  */
10382
  if (((frame->mask | frame->fmask | frame->acc_mask) != 0)
10383
      || frame->num_cop0_regs > 0)
10384
    {
10385
      HOST_WIDE_INT step1;
10386
 
10387
      step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
10388
      if (GENERATE_MIPS16E_SAVE_RESTORE)
10389
        {
10390
          HOST_WIDE_INT offset;
10391
          unsigned int mask, regno;
10392
 
10393
          /* Try to merge argument stores into the save instruction.  */
10394
          nargs = mips16e_collect_argument_saves ();
10395
 
10396
          /* Build the save instruction.  */
10397
          mask = frame->mask;
10398
          insn = mips16e_build_save_restore (false, &mask, &offset,
10399
                                             nargs, step1);
10400
          RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
10401
          mips_frame_barrier ();
10402
          size -= step1;
10403
 
10404
          /* Check if we need to save other registers.  */
10405
          for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
10406
            if (BITSET_P (mask, regno - GP_REG_FIRST))
10407
              {
10408
                offset -= UNITS_PER_WORD;
10409
                mips_save_restore_reg (word_mode, regno,
10410
                                       offset, mips_save_reg);
10411
              }
10412
        }
10413
      else
10414
        {
10415
          if (cfun->machine->interrupt_handler_p)
10416
            {
10417
              HOST_WIDE_INT offset;
10418
              rtx mem;
10419
 
10420
              /* If this interrupt is using a shadow register set, we need to
10421
                 get the stack pointer from the previous register set.  */
10422
              if (cfun->machine->use_shadow_register_set_p)
10423
                emit_insn (gen_mips_rdpgpr (stack_pointer_rtx,
10424
                                            stack_pointer_rtx));
10425
 
10426
              if (!cfun->machine->keep_interrupts_masked_p)
10427
                {
10428
                  /* Move from COP0 Cause to K0.  */
10429
                  emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K0_REG_NUM),
10430
                                            gen_rtx_REG (SImode,
10431
                                                         COP0_CAUSE_REG_NUM)));
10432
                  /* Move from COP0 EPC to K1.  */
10433
                  emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
10434
                                            gen_rtx_REG (SImode,
10435
                                                         COP0_EPC_REG_NUM)));
10436
                }
10437
 
10438
              /* Allocate the first part of the frame.  */
10439
              insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
10440
                                    GEN_INT (-step1));
10441
              RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
10442
              mips_frame_barrier ();
10443
              size -= step1;
10444
 
10445
              /* Start at the uppermost location for saving.  */
10446
              offset = frame->cop0_sp_offset - size;
10447
              if (!cfun->machine->keep_interrupts_masked_p)
10448
                {
10449
                  /* Push EPC into its stack slot.  */
10450
                  mem = gen_frame_mem (word_mode,
10451
                                       plus_constant (stack_pointer_rtx,
10452
                                                      offset));
10453
                  mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
10454
                  offset -= UNITS_PER_WORD;
10455
                }
10456
 
10457
              /* Move from COP0 Status to K1.  */
10458
              emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
10459
                                        gen_rtx_REG (SImode,
10460
                                                     COP0_STATUS_REG_NUM)));
10461
 
10462
              /* Right justify the RIPL in k0.  */
10463
              if (!cfun->machine->keep_interrupts_masked_p)
10464
                emit_insn (gen_lshrsi3 (gen_rtx_REG (SImode, K0_REG_NUM),
10465
                                        gen_rtx_REG (SImode, K0_REG_NUM),
10466
                                        GEN_INT (CAUSE_IPL)));
10467
 
10468
              /* Push Status into its stack slot.  */
10469
              mem = gen_frame_mem (word_mode,
10470
                                   plus_constant (stack_pointer_rtx, offset));
10471
              mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
10472
              offset -= UNITS_PER_WORD;
10473
 
10474
              /* Insert the RIPL into our copy of SR (k1) as the new IPL.  */
10475
              if (!cfun->machine->keep_interrupts_masked_p)
10476
                emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
10477
                                       GEN_INT (6),
10478
                                       GEN_INT (SR_IPL),
10479
                                       gen_rtx_REG (SImode, K0_REG_NUM)));
10480
 
10481
              if (!cfun->machine->keep_interrupts_masked_p)
10482
                /* Enable interrupts by clearing the KSU ERL and EXL bits.
10483
                   IE is already the correct value, so we don't have to do
10484
                   anything explicit.  */
10485
                emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
10486
                                       GEN_INT (4),
10487
                                       GEN_INT (SR_EXL),
10488
                                       gen_rtx_REG (SImode, GP_REG_FIRST)));
10489
              else
10490
                /* Disable interrupts by clearing the KSU, ERL, EXL,
10491
                   and IE bits.  */
10492
                emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
10493
                                       GEN_INT (5),
10494
                                       GEN_INT (SR_IE),
10495
                                       gen_rtx_REG (SImode, GP_REG_FIRST)));
10496
            }
10497
          else
10498
            {
10499
              insn = gen_add3_insn (stack_pointer_rtx,
10500
                                    stack_pointer_rtx,
10501
                                    GEN_INT (-step1));
10502
              RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
10503
              mips_frame_barrier ();
10504
              size -= step1;
10505
            }
10506
          mips_for_each_saved_acc (size, mips_save_reg);
10507
          mips_for_each_saved_gpr_and_fpr (size, mips_save_reg);
10508
        }
10509
    }
10510
 
10511
  /* Allocate the rest of the frame.  */
10512
  if (size > 0)
10513
    {
10514
      if (SMALL_OPERAND (-size))
10515
        RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
10516
                                                       stack_pointer_rtx,
10517
                                                       GEN_INT (-size)))) = 1;
10518
      else
10519
        {
10520
          mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
10521
          if (TARGET_MIPS16)
10522
            {
10523
              /* There are no instructions to add or subtract registers
10524
                 from the stack pointer, so use the frame pointer as a
10525
                 temporary.  We should always be using a frame pointer
10526
                 in this case anyway.  */
10527
              gcc_assert (frame_pointer_needed);
10528
              mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
10529
              emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
10530
                                        hard_frame_pointer_rtx,
10531
                                        MIPS_PROLOGUE_TEMP (Pmode)));
10532
              mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
10533
            }
10534
          else
10535
            emit_insn (gen_sub3_insn (stack_pointer_rtx,
10536
                                      stack_pointer_rtx,
10537
                                      MIPS_PROLOGUE_TEMP (Pmode)));
10538
 
10539
          /* Describe the combined effect of the previous instructions.  */
10540
          mips_set_frame_expr
10541
            (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
10542
                          plus_constant (stack_pointer_rtx, -size)));
10543
        }
10544
      mips_frame_barrier ();
10545
    }
10546
 
10547
  /* Set up the frame pointer, if we're using one.  */
10548
  if (frame_pointer_needed)
10549
    {
10550
      HOST_WIDE_INT offset;
10551
 
10552
      offset = frame->hard_frame_pointer_offset;
10553
      if (offset == 0)
10554
        {
10555
          insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
10556
          RTX_FRAME_RELATED_P (insn) = 1;
10557
        }
10558
      else if (SMALL_OPERAND (offset))
10559
        {
10560
          insn = gen_add3_insn (hard_frame_pointer_rtx,
10561
                                stack_pointer_rtx, GEN_INT (offset));
10562
          RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
10563
        }
10564
      else
10565
        {
10566
          mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
10567
          mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
10568
          emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
10569
                                    hard_frame_pointer_rtx,
10570
                                    MIPS_PROLOGUE_TEMP (Pmode)));
10571
          mips_set_frame_expr
10572
            (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
10573
                          plus_constant (stack_pointer_rtx, offset)));
10574
        }
10575
    }
10576
 
10577
  mips_emit_loadgp ();
10578
 
10579
  /* Initialize the $gp save slot.  */
10580
  if (mips_cfun_has_cprestore_slot_p ())
10581
    {
10582
      rtx base, mem, gp, temp;
10583
      HOST_WIDE_INT offset;
10584
 
10585
      mips_get_cprestore_base_and_offset (&base, &offset, false);
10586
      mem = gen_frame_mem (Pmode, plus_constant (base, offset));
10587
      gp = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
10588
      temp = (SMALL_OPERAND (offset)
10589
              ? gen_rtx_SCRATCH (Pmode)
10590
              : MIPS_PROLOGUE_TEMP (Pmode));
10591
      emit_insn (PMODE_INSN (gen_potential_cprestore,
10592
                             (mem, GEN_INT (offset), gp, temp)));
10593
 
10594
      mips_get_cprestore_base_and_offset (&base, &offset, true);
10595
      mem = gen_frame_mem (Pmode, plus_constant (base, offset));
10596
      emit_insn (PMODE_INSN (gen_use_cprestore, (mem)));
10597
    }
10598
 
10599
  /* We need to search back to the last use of K0 or K1.  */
10600
  if (cfun->machine->interrupt_handler_p)
10601
    {
10602
      for (insn = get_last_insn (); insn != NULL_RTX; insn = PREV_INSN (insn))
10603
        if (INSN_P (insn)
10604
            && for_each_rtx (&PATTERN (insn), mips_kernel_reg_p, NULL))
10605
          break;
10606
      /* Emit a move from K1 to COP0 Status after insn.  */
10607
      gcc_assert (insn != NULL_RTX);
10608
      emit_insn_after (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
10609
                                      gen_rtx_REG (SImode, K1_REG_NUM)),
10610
                       insn);
10611
    }
10612
 
10613
  /* If we are profiling, make sure no instructions are scheduled before
10614
     the call to mcount.  */
10615
  if (crtl->profile)
10616
    emit_insn (gen_blockage ());
10617
}
10618
 
10619
/* Attach all pending register saves to the previous instruction.
10620
   Return that instruction.  */
10621
 
10622
static rtx
10623
mips_epilogue_emit_cfa_restores (void)
10624
{
10625
  rtx insn;
10626
 
10627
  insn = get_last_insn ();
10628
  gcc_assert (insn && !REG_NOTES (insn));
10629
  if (mips_epilogue.cfa_restores)
10630
    {
10631
      RTX_FRAME_RELATED_P (insn) = 1;
10632
      REG_NOTES (insn) = mips_epilogue.cfa_restores;
10633
      mips_epilogue.cfa_restores = 0;
10634
    }
10635
  return insn;
10636
}
10637
 
10638
/* Like mips_epilogue_emit_cfa_restores, but also record that the CFA is
10639
   now at REG + OFFSET.  */
10640
 
10641
static void
10642
mips_epilogue_set_cfa (rtx reg, HOST_WIDE_INT offset)
10643
{
10644
  rtx insn;
10645
 
10646
  insn = mips_epilogue_emit_cfa_restores ();
10647
  if (reg != mips_epilogue.cfa_reg || offset != mips_epilogue.cfa_offset)
10648
    {
10649
      RTX_FRAME_RELATED_P (insn) = 1;
10650
      REG_NOTES (insn) = alloc_reg_note (REG_CFA_DEF_CFA,
10651
                                         plus_constant (reg, offset),
10652
                                         REG_NOTES (insn));
10653
      mips_epilogue.cfa_reg = reg;
10654
      mips_epilogue.cfa_offset = offset;
10655
    }
10656
}
10657
 
10658
/* Emit instructions to restore register REG from slot MEM.  Also update
10659
   the cfa_restores list.  */
10660
 
10661
static void
10662
mips_restore_reg (rtx reg, rtx mem)
10663
{
10664
  /* There's no MIPS16 instruction to load $31 directly.  Load into
10665
     $7 instead and adjust the return insn appropriately.  */
10666
  if (TARGET_MIPS16 && REGNO (reg) == RETURN_ADDR_REGNUM)
10667
    reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
10668
  else if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
10669
    {
10670
      mips_add_cfa_restore (mips_subword (reg, true));
10671
      mips_add_cfa_restore (mips_subword (reg, false));
10672
    }
10673
  else
10674
    mips_add_cfa_restore (reg);
10675
 
10676
  mips_emit_save_slot_move (reg, mem, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
10677
  if (REGNO (reg) == REGNO (mips_epilogue.cfa_reg))
10678
    /* The CFA is currently defined in terms of the register whose
10679
       value we have just restored.  Redefine the CFA in terms of
10680
       the stack pointer.  */
10681
    mips_epilogue_set_cfa (stack_pointer_rtx,
10682
                           mips_epilogue.cfa_restore_sp_offset);
10683
}
10684
 
10685
/* Emit code to set the stack pointer to BASE + OFFSET, given that
10686
   BASE + OFFSET is NEW_FRAME_SIZE bytes below the top of the frame.
10687
   BASE, if not the stack pointer, is available as a temporary.  */
10688
 
10689
static void
10690
mips_deallocate_stack (rtx base, rtx offset, HOST_WIDE_INT new_frame_size)
10691
{
10692
  if (base == stack_pointer_rtx && offset == const0_rtx)
10693
    return;
10694
 
10695
  mips_frame_barrier ();
10696
  if (offset == const0_rtx)
10697
    {
10698
      emit_move_insn (stack_pointer_rtx, base);
10699
      mips_epilogue_set_cfa (stack_pointer_rtx, new_frame_size);
10700
    }
10701
  else if (TARGET_MIPS16 && base != stack_pointer_rtx)
10702
    {
10703
      emit_insn (gen_add3_insn (base, base, offset));
10704
      mips_epilogue_set_cfa (base, new_frame_size);
10705
      emit_move_insn (stack_pointer_rtx, base);
10706
    }
10707
  else
10708
    {
10709
      emit_insn (gen_add3_insn (stack_pointer_rtx, base, offset));
10710
      mips_epilogue_set_cfa (stack_pointer_rtx, new_frame_size);
10711
    }
10712
}
10713
 
10714
/* Emit any instructions needed before a return.  */
10715
 
10716
void
10717
mips_expand_before_return (void)
10718
{
10719
  /* When using a call-clobbered gp, we start out with unified call
10720
     insns that include instructions to restore the gp.  We then split
10721
     these unified calls after reload.  These split calls explicitly
10722
     clobber gp, so there is no need to define
10723
     PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
10724
 
10725
     For consistency, we should also insert an explicit clobber of $28
10726
     before return insns, so that the post-reload optimizers know that
10727
     the register is not live on exit.  */
10728
  if (TARGET_CALL_CLOBBERED_GP)
10729
    emit_clobber (pic_offset_table_rtx);
10730
}
10731
 
10732
/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
10733
   says which.  */
10734
 
10735
void
10736
mips_expand_epilogue (bool sibcall_p)
10737
{
10738
  const struct mips_frame_info *frame;
10739
  HOST_WIDE_INT step1, step2;
10740
  rtx base, adjust, insn;
10741
 
10742
  if (!sibcall_p && mips_can_use_return_insn ())
10743
    {
10744
      emit_jump_insn (gen_return ());
10745
      return;
10746
    }
10747
 
10748
  /* In MIPS16 mode, if the return value should go into a floating-point
10749
     register, we need to call a helper routine to copy it over.  */
10750
  if (mips16_cfun_returns_in_fpr_p ())
10751
    mips16_copy_fpr_return_value ();
10752
 
10753
  /* Split the frame into two.  STEP1 is the amount of stack we should
10754
     deallocate before restoring the registers.  STEP2 is the amount we
10755
     should deallocate afterwards.
10756
 
10757
     Start off by assuming that no registers need to be restored.  */
10758
  frame = &cfun->machine->frame;
10759
  step1 = frame->total_size;
10760
  step2 = 0;
10761
 
10762
  /* Work out which register holds the frame address.  */
10763
  if (!frame_pointer_needed)
10764
    base = stack_pointer_rtx;
10765
  else
10766
    {
10767
      base = hard_frame_pointer_rtx;
10768
      step1 -= frame->hard_frame_pointer_offset;
10769
    }
10770
  mips_epilogue.cfa_reg = base;
10771
  mips_epilogue.cfa_offset = step1;
10772
  mips_epilogue.cfa_restores = NULL_RTX;
10773
 
10774
  /* If we need to restore registers, deallocate as much stack as
10775
     possible in the second step without going out of range.  */
10776
  if ((frame->mask | frame->fmask | frame->acc_mask) != 0
10777
      || frame->num_cop0_regs > 0)
10778
    {
10779
      step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
10780
      step1 -= step2;
10781
    }
10782
 
10783
  /* Get an rtx for STEP1 that we can add to BASE.  */
10784
  adjust = GEN_INT (step1);
10785
  if (!SMALL_OPERAND (step1))
10786
    {
10787
      mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
10788
      adjust = MIPS_EPILOGUE_TEMP (Pmode);
10789
    }
10790
  mips_deallocate_stack (base, adjust, step2);
10791
 
10792
  /* If we're using addressing macros, $gp is implicitly used by all
10793
     SYMBOL_REFs.  We must emit a blockage insn before restoring $gp
10794
     from the stack.  */
10795
  if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
10796
    emit_insn (gen_blockage ());
10797
 
10798
  mips_epilogue.cfa_restore_sp_offset = step2;
10799
  if (GENERATE_MIPS16E_SAVE_RESTORE && frame->mask != 0)
10800
    {
10801
      unsigned int regno, mask;
10802
      HOST_WIDE_INT offset;
10803
      rtx restore;
10804
 
10805
      /* Generate the restore instruction.  */
10806
      mask = frame->mask;
10807
      restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
10808
 
10809
      /* Restore any other registers manually.  */
10810
      for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
10811
        if (BITSET_P (mask, regno - GP_REG_FIRST))
10812
          {
10813
            offset -= UNITS_PER_WORD;
10814
            mips_save_restore_reg (word_mode, regno, offset, mips_restore_reg);
10815
          }
10816
 
10817
      /* Restore the remaining registers and deallocate the final bit
10818
         of the frame.  */
10819
      mips_frame_barrier ();
10820
      emit_insn (restore);
10821
      mips_epilogue_set_cfa (stack_pointer_rtx, 0);
10822
    }
10823
  else
10824
    {
10825
      /* Restore the registers.  */
10826
      mips_for_each_saved_acc (frame->total_size - step2, mips_restore_reg);
10827
      mips_for_each_saved_gpr_and_fpr (frame->total_size - step2,
10828
                                       mips_restore_reg);
10829
 
10830
      if (cfun->machine->interrupt_handler_p)
10831
        {
10832
          HOST_WIDE_INT offset;
10833
          rtx mem;
10834
 
10835
          offset = frame->cop0_sp_offset - (frame->total_size - step2);
10836
          if (!cfun->machine->keep_interrupts_masked_p)
10837
            {
10838
              /* Restore the original EPC.  */
10839
              mem = gen_frame_mem (word_mode,
10840
                                   plus_constant (stack_pointer_rtx, offset));
10841
              mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
10842
              offset -= UNITS_PER_WORD;
10843
 
10844
              /* Move to COP0 EPC.  */
10845
              emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_EPC_REG_NUM),
10846
                                        gen_rtx_REG (SImode, K0_REG_NUM)));
10847
            }
10848
 
10849
          /* Restore the original Status.  */
10850
          mem = gen_frame_mem (word_mode,
10851
                               plus_constant (stack_pointer_rtx, offset));
10852
          mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
10853
          offset -= UNITS_PER_WORD;
10854
 
10855
          /* If we don't use shoadow register set, we need to update SP.  */
10856
          if (!cfun->machine->use_shadow_register_set_p)
10857
            mips_deallocate_stack (stack_pointer_rtx, GEN_INT (step2), 0);
10858
          else
10859
            /* The choice of position is somewhat arbitrary in this case.  */
10860
            mips_epilogue_emit_cfa_restores ();
10861
 
10862
          /* Move to COP0 Status.  */
10863
          emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
10864
                                    gen_rtx_REG (SImode, K0_REG_NUM)));
10865
        }
10866
      else
10867
        /* Deallocate the final bit of the frame.  */
10868
        mips_deallocate_stack (stack_pointer_rtx, GEN_INT (step2), 0);
10869
    }
10870
  gcc_assert (!mips_epilogue.cfa_restores);
10871
 
10872
  /* Add in the __builtin_eh_return stack adjustment.  We need to
10873
     use a temporary in MIPS16 code.  */
10874
  if (crtl->calls_eh_return)
10875
    {
10876
      if (TARGET_MIPS16)
10877
        {
10878
          mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
10879
          emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
10880
                                    MIPS_EPILOGUE_TEMP (Pmode),
10881
                                    EH_RETURN_STACKADJ_RTX));
10882
          mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
10883
        }
10884
      else
10885
        emit_insn (gen_add3_insn (stack_pointer_rtx,
10886
                                  stack_pointer_rtx,
10887
                                  EH_RETURN_STACKADJ_RTX));
10888
    }
10889
 
10890
  if (!sibcall_p)
10891
    {
10892
      mips_expand_before_return ();
10893
      if (cfun->machine->interrupt_handler_p)
10894
        {
10895
          /* Interrupt handlers generate eret or deret.  */
10896
          if (cfun->machine->use_debug_exception_return_p)
10897
            emit_jump_insn (gen_mips_deret ());
10898
          else
10899
            emit_jump_insn (gen_mips_eret ());
10900
        }
10901
      else
10902
        {
10903
          rtx pat;
10904
 
10905
          /* When generating MIPS16 code, the normal
10906
             mips_for_each_saved_gpr_and_fpr path will restore the return
10907
             address into $7 rather than $31.  */
10908
          if (TARGET_MIPS16
10909
              && !GENERATE_MIPS16E_SAVE_RESTORE
10910
              && BITSET_P (frame->mask, RETURN_ADDR_REGNUM))
10911
            {
10912
              /* simple_returns cannot rely on values that are only available
10913
                 on paths through the epilogue (because return paths that do
10914
                 not pass through the epilogue may nevertheless reuse a
10915
                 simple_return that occurs at the end of the epilogue).
10916
                 Use a normal return here instead.  */
10917
              rtx reg = gen_rtx_REG (Pmode, GP_REG_FIRST + 7);
10918
              pat = gen_return_internal (reg);
10919
            }
10920
          else
10921
            {
10922
              rtx reg = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
10923
              pat = gen_simple_return_internal (reg);
10924
            }
10925
          emit_jump_insn (pat);
10926
        }
10927
    }
10928
 
10929
  /* Search from the beginning to the first use of K0 or K1.  */
10930
  if (cfun->machine->interrupt_handler_p
10931
      && !cfun->machine->keep_interrupts_masked_p)
10932
    {
10933
      for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
10934
        if (INSN_P (insn)
10935
            && for_each_rtx (&PATTERN(insn), mips_kernel_reg_p, NULL))
10936
          break;
10937
      gcc_assert (insn != NULL_RTX);
10938
      /* Insert disable interrupts before the first use of K0 or K1.  */
10939
      emit_insn_before (gen_mips_di (), insn);
10940
      emit_insn_before (gen_mips_ehb (), insn);
10941
    }
10942
}
10943
 
10944
/* Return nonzero if this function is known to have a null epilogue.
10945
   This allows the optimizer to omit jumps to jumps if no stack
10946
   was created.  */
10947
 
10948
bool
10949
mips_can_use_return_insn (void)
10950
{
10951
  /* Interrupt handlers need to go through the epilogue.  */
10952
  if (cfun->machine->interrupt_handler_p)
10953
    return false;
10954
 
10955
  if (!reload_completed)
10956
    return false;
10957
 
10958
  if (crtl->profile)
10959
    return false;
10960
 
10961
  /* In MIPS16 mode, a function that returns a floating-point value
10962
     needs to arrange to copy the return value into the floating-point
10963
     registers.  */
10964
  if (mips16_cfun_returns_in_fpr_p ())
10965
    return false;
10966
 
10967
  return cfun->machine->frame.total_size == 0;
10968
}
10969
 
10970
/* Return true if register REGNO can store a value of mode MODE.
10971
   The result of this function is cached in mips_hard_regno_mode_ok.  */
10972
 
10973
static bool
10974
mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
10975
{
10976
  unsigned int size;
10977
  enum mode_class mclass;
10978
 
10979
  if (mode == CCV2mode)
10980
    return (ISA_HAS_8CC
10981
            && ST_REG_P (regno)
10982
            && (regno - ST_REG_FIRST) % 2 == 0);
10983
 
10984
  if (mode == CCV4mode)
10985
    return (ISA_HAS_8CC
10986
            && ST_REG_P (regno)
10987
            && (regno - ST_REG_FIRST) % 4 == 0);
10988
 
10989
  if (mode == CCmode)
10990
    {
10991
      if (!ISA_HAS_8CC)
10992
        return regno == FPSW_REGNUM;
10993
 
10994
      return (ST_REG_P (regno)
10995
              || GP_REG_P (regno)
10996
              || FP_REG_P (regno));
10997
    }
10998
 
10999
  size = GET_MODE_SIZE (mode);
11000
  mclass = GET_MODE_CLASS (mode);
11001
 
11002
  if (GP_REG_P (regno))
11003
    return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
11004
 
11005
  if (FP_REG_P (regno)
11006
      && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
11007
          || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
11008
    {
11009
      /* Allow TFmode for CCmode reloads.  */
11010
      if (mode == TFmode && ISA_HAS_8CC)
11011
        return true;
11012
 
11013
      /* Allow 64-bit vector modes for Loongson-2E/2F.  */
11014
      if (TARGET_LOONGSON_VECTORS
11015
          && (mode == V2SImode
11016
              || mode == V4HImode
11017
              || mode == V8QImode
11018
              || mode == DImode))
11019
        return true;
11020
 
11021
      if (mclass == MODE_FLOAT
11022
          || mclass == MODE_COMPLEX_FLOAT
11023
          || mclass == MODE_VECTOR_FLOAT)
11024
        return size <= UNITS_PER_FPVALUE;
11025
 
11026
      /* Allow integer modes that fit into a single register.  We need
11027
         to put integers into FPRs when using instructions like CVT
11028
         and TRUNC.  There's no point allowing sizes smaller than a word,
11029
         because the FPU has no appropriate load/store instructions.  */
11030
      if (mclass == MODE_INT)
11031
        return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
11032
    }
11033
 
11034
  if (ACC_REG_P (regno)
11035
      && (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
11036
    {
11037
      if (MD_REG_P (regno))
11038
        {
11039
          /* After a multiplication or division, clobbering HI makes
11040
             the value of LO unpredictable, and vice versa.  This means
11041
             that, for all interesting cases, HI and LO are effectively
11042
             a single register.
11043
 
11044
             We model this by requiring that any value that uses HI
11045
             also uses LO.  */
11046
          if (size <= UNITS_PER_WORD * 2)
11047
            return regno == (size <= UNITS_PER_WORD ? LO_REGNUM : MD_REG_FIRST);
11048
        }
11049
      else
11050
        {
11051
          /* DSP accumulators do not have the same restrictions as
11052
             HI and LO, so we can treat them as normal doubleword
11053
             registers.  */
11054
          if (size <= UNITS_PER_WORD)
11055
            return true;
11056
 
11057
          if (size <= UNITS_PER_WORD * 2
11058
              && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)
11059
            return true;
11060
        }
11061
    }
11062
 
11063
  if (ALL_COP_REG_P (regno))
11064
    return mclass == MODE_INT && size <= UNITS_PER_WORD;
11065
 
11066
  if (regno == GOT_VERSION_REGNUM)
11067
    return mode == SImode;
11068
 
11069
  return false;
11070
}
11071
 
11072
/* Implement HARD_REGNO_NREGS.  */
11073
 
11074
unsigned int
11075
mips_hard_regno_nregs (int regno, enum machine_mode mode)
11076
{
11077
  if (ST_REG_P (regno))
11078
    /* The size of FP status registers is always 4, because they only hold
11079
       CCmode values, and CCmode is always considered to be 4 bytes wide.  */
11080
    return (GET_MODE_SIZE (mode) + 3) / 4;
11081
 
11082
  if (FP_REG_P (regno))
11083
    return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
11084
 
11085
  /* All other registers are word-sized.  */
11086
  return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
11087
}
11088
 
11089
/* Implement CLASS_MAX_NREGS, taking the maximum of the cases
11090
   in mips_hard_regno_nregs.  */
11091
 
11092
int
11093
mips_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
11094
{
11095
  int size;
11096
  HARD_REG_SET left;
11097
 
11098
  size = 0x8000;
11099
  COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
11100
  if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
11101
    {
11102
      if (HARD_REGNO_MODE_OK (ST_REG_FIRST, mode))
11103
        size = MIN (size, 4);
11104
      AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]);
11105
    }
11106
  if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
11107
    {
11108
      if (HARD_REGNO_MODE_OK (FP_REG_FIRST, mode))
11109
        size = MIN (size, UNITS_PER_FPREG);
11110
      AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
11111
    }
11112
  if (!hard_reg_set_empty_p (left))
11113
    size = MIN (size, UNITS_PER_WORD);
11114
  return (GET_MODE_SIZE (mode) + size - 1) / size;
11115
}
11116
 
11117
/* Implement CANNOT_CHANGE_MODE_CLASS.  */
11118
 
11119
bool
11120
mips_cannot_change_mode_class (enum machine_mode from,
11121
                               enum machine_mode to,
11122
                               enum reg_class rclass)
11123
{
11124
  /* Allow conversions between different Loongson integer vectors,
11125
     and between those vectors and DImode.  */
11126
  if (GET_MODE_SIZE (from) == 8 && GET_MODE_SIZE (to) == 8
11127
      && INTEGRAL_MODE_P (from) && INTEGRAL_MODE_P (to))
11128
    return false;
11129
 
11130
  /* Otherwise, there are several problems with changing the modes of
11131
     values in floating-point registers:
11132
 
11133
     - When a multi-word value is stored in paired floating-point
11134
       registers, the first register always holds the low word.  We
11135
       therefore can't allow FPRs to change between single-word and
11136
       multi-word modes on big-endian targets.
11137
 
11138
     - GCC assumes that each word of a multiword register can be
11139
       accessed individually using SUBREGs.  This is not true for
11140
       floating-point registers if they are bigger than a word.
11141
 
11142
     - Loading a 32-bit value into a 64-bit floating-point register
11143
       will not sign-extend the value, despite what LOAD_EXTEND_OP
11144
       says.  We can't allow FPRs to change from SImode to a wider
11145
       mode on 64-bit targets.
11146
 
11147
     - If the FPU has already interpreted a value in one format, we
11148
       must not ask it to treat the value as having a different
11149
       format.
11150
 
11151
     We therefore disallow all mode changes involving FPRs.  */
11152
 
11153
  return reg_classes_intersect_p (FP_REGS, rclass);
11154
}
11155
 
11156
/* Implement target hook small_register_classes_for_mode_p.  */
11157
 
11158
static bool
11159
mips_small_register_classes_for_mode_p (enum machine_mode mode
11160
                                        ATTRIBUTE_UNUSED)
11161
{
11162
  return TARGET_MIPS16;
11163
}
11164
 
11165
/* Return true if moves in mode MODE can use the FPU's mov.fmt instruction.  */
11166
 
11167
static bool
11168
mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
11169
{
11170
  switch (mode)
11171
    {
11172
    case SFmode:
11173
      return TARGET_HARD_FLOAT;
11174
 
11175
    case DFmode:
11176
      return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
11177
 
11178
    case V2SFmode:
11179
      return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
11180
 
11181
    default:
11182
      return false;
11183
    }
11184
}
11185
 
11186
/* Implement MODES_TIEABLE_P.  */
11187
 
11188
bool
11189
mips_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
11190
{
11191
  /* FPRs allow no mode punning, so it's not worth tying modes if we'd
11192
     prefer to put one of them in FPRs.  */
11193
  return (mode1 == mode2
11194
          || (!mips_mode_ok_for_mov_fmt_p (mode1)
11195
              && !mips_mode_ok_for_mov_fmt_p (mode2)));
11196
}
11197
 
11198
/* Implement TARGET_PREFERRED_RELOAD_CLASS.  */
11199
 
11200
static reg_class_t
11201
mips_preferred_reload_class (rtx x, reg_class_t rclass)
11202
{
11203
  if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, rclass))
11204
    return LEA_REGS;
11205
 
11206
  if (reg_class_subset_p (FP_REGS, rclass)
11207
      && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
11208
    return FP_REGS;
11209
 
11210
  if (reg_class_subset_p (GR_REGS, rclass))
11211
    rclass = GR_REGS;
11212
 
11213
  if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, rclass))
11214
    rclass = M16_REGS;
11215
 
11216
  return rclass;
11217
}
11218
 
11219
/* RCLASS is a class involved in a REGISTER_MOVE_COST calculation.
11220
   Return a "canonical" class to represent it in later calculations.  */
11221
 
11222
static reg_class_t
11223
mips_canonicalize_move_class (reg_class_t rclass)
11224
{
11225
  /* All moves involving accumulator registers have the same cost.  */
11226
  if (reg_class_subset_p (rclass, ACC_REGS))
11227
    rclass = ACC_REGS;
11228
 
11229
  /* Likewise promote subclasses of general registers to the most
11230
     interesting containing class.  */
11231
  if (TARGET_MIPS16 && reg_class_subset_p (rclass, M16_REGS))
11232
    rclass = M16_REGS;
11233
  else if (reg_class_subset_p (rclass, GENERAL_REGS))
11234
    rclass = GENERAL_REGS;
11235
 
11236
  return rclass;
11237
}
11238
 
11239
/* Return the cost of moving a value of mode MODE from a register of
11240
   class FROM to a GPR.  Return 0 for classes that are unions of other
11241
   classes handled by this function.  */
11242
 
11243
static int
11244
mips_move_to_gpr_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
11245
                       reg_class_t from)
11246
{
11247
  switch (from)
11248
    {
11249
    case GENERAL_REGS:
11250
      /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro.  */
11251
      return 2;
11252
 
11253
    case ACC_REGS:
11254
      /* MFLO and MFHI.  */
11255
      return 6;
11256
 
11257
    case FP_REGS:
11258
      /* MFC1, etc.  */
11259
      return 4;
11260
 
11261
    case ST_REGS:
11262
      /* LUI followed by MOVF.  */
11263
      return 4;
11264
 
11265
    case COP0_REGS:
11266
    case COP2_REGS:
11267
    case COP3_REGS:
11268
      /* This choice of value is historical.  */
11269
      return 5;
11270
 
11271
    default:
11272
      return 0;
11273
    }
11274
}
11275
 
11276
/* Return the cost of moving a value of mode MODE from a GPR to a
11277
   register of class TO.  Return 0 for classes that are unions of
11278
   other classes handled by this function.  */
11279
 
11280
static int
11281
mips_move_from_gpr_cost (enum machine_mode mode, reg_class_t to)
11282
{
11283
  switch (to)
11284
    {
11285
    case GENERAL_REGS:
11286
      /* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro.  */
11287
      return 2;
11288
 
11289
    case ACC_REGS:
11290
      /* MTLO and MTHI.  */
11291
      return 6;
11292
 
11293
    case FP_REGS:
11294
      /* MTC1, etc.  */
11295
      return 4;
11296
 
11297
    case ST_REGS:
11298
      /* A secondary reload through an FPR scratch.  */
11299
      return (mips_register_move_cost (mode, GENERAL_REGS, FP_REGS)
11300
              + mips_register_move_cost (mode, FP_REGS, ST_REGS));
11301
 
11302
    case COP0_REGS:
11303
    case COP2_REGS:
11304
    case COP3_REGS:
11305
      /* This choice of value is historical.  */
11306
      return 5;
11307
 
11308
    default:
11309
      return 0;
11310
    }
11311
}
11312
 
11313
/* Implement TARGET_REGISTER_MOVE_COST.  Return 0 for classes that are the
11314
   maximum of the move costs for subclasses; regclass will work out
11315
   the maximum for us.  */
11316
 
11317
static int
11318
mips_register_move_cost (enum machine_mode mode,
11319
                         reg_class_t from, reg_class_t to)
11320
{
11321
  reg_class_t dregs;
11322
  int cost1, cost2;
11323
 
11324
  from = mips_canonicalize_move_class (from);
11325
  to = mips_canonicalize_move_class (to);
11326
 
11327
  /* Handle moves that can be done without using general-purpose registers.  */
11328
  if (from == FP_REGS)
11329
    {
11330
      if (to == FP_REGS && mips_mode_ok_for_mov_fmt_p (mode))
11331
        /* MOV.FMT.  */
11332
        return 4;
11333
      if (to == ST_REGS)
11334
        /* The sequence generated by mips_expand_fcc_reload.  */
11335
        return 8;
11336
    }
11337
 
11338
  /* Handle cases in which only one class deviates from the ideal.  */
11339
  dregs = TARGET_MIPS16 ? M16_REGS : GENERAL_REGS;
11340
  if (from == dregs)
11341
    return mips_move_from_gpr_cost (mode, to);
11342
  if (to == dregs)
11343
    return mips_move_to_gpr_cost (mode, from);
11344
 
11345
  /* Handles cases that require a GPR temporary.  */
11346
  cost1 = mips_move_to_gpr_cost (mode, from);
11347
  if (cost1 != 0)
11348
    {
11349
      cost2 = mips_move_from_gpr_cost (mode, to);
11350
      if (cost2 != 0)
11351
        return cost1 + cost2;
11352
    }
11353
 
11354
  return 0;
11355
}
11356
 
11357
/* Implement TARGET_MEMORY_MOVE_COST.  */
11358
 
11359
static int
11360
mips_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
11361
{
11362
  return (mips_cost->memory_latency
11363
          + memory_move_secondary_cost (mode, rclass, in));
11364
}
11365
 
11366
/* Return the register class required for a secondary register when
11367
   copying between one of the registers in RCLASS and value X, which
11368
   has mode MODE.  X is the source of the move if IN_P, otherwise it
11369
   is the destination.  Return NO_REGS if no secondary register is
11370
   needed.  */
11371
 
11372
enum reg_class
11373
mips_secondary_reload_class (enum reg_class rclass,
11374
                             enum machine_mode mode, rtx x, bool in_p)
11375
{
11376
  int regno;
11377
 
11378
  /* If X is a constant that cannot be loaded into $25, it must be loaded
11379
     into some other GPR.  No other register class allows a direct move.  */
11380
  if (mips_dangerous_for_la25_p (x))
11381
    return reg_class_subset_p (rclass, LEA_REGS) ? NO_REGS : LEA_REGS;
11382
 
11383
  regno = true_regnum (x);
11384
  if (TARGET_MIPS16)
11385
    {
11386
      /* In MIPS16 mode, every move must involve a member of M16_REGS.  */
11387
      if (!reg_class_subset_p (rclass, M16_REGS) && !M16_REG_P (regno))
11388
        return M16_REGS;
11389
 
11390
      return NO_REGS;
11391
    }
11392
 
11393
  /* Copying from accumulator registers to anywhere other than a general
11394
     register requires a temporary general register.  */
11395
  if (reg_class_subset_p (rclass, ACC_REGS))
11396
    return GP_REG_P (regno) ? NO_REGS : GR_REGS;
11397
  if (ACC_REG_P (regno))
11398
    return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
11399
 
11400
  /* We can only copy a value to a condition code register from a
11401
     floating-point register, and even then we require a scratch
11402
     floating-point register.  We can only copy a value out of a
11403
     condition-code register into a general register.  */
11404
  if (reg_class_subset_p (rclass, ST_REGS))
11405
    {
11406
      if (in_p)
11407
        return FP_REGS;
11408
      return GP_REG_P (regno) ? NO_REGS : GR_REGS;
11409
    }
11410
  if (ST_REG_P (regno))
11411
    {
11412
      if (!in_p)
11413
        return FP_REGS;
11414
      return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
11415
    }
11416
 
11417
  if (reg_class_subset_p (rclass, FP_REGS))
11418
    {
11419
      if (MEM_P (x)
11420
          && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
11421
        /* In this case we can use lwc1, swc1, ldc1 or sdc1.  We'll use
11422
           pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported.  */
11423
        return NO_REGS;
11424
 
11425
      if (GP_REG_P (regno) || x == CONST0_RTX (mode))
11426
        /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1.  */
11427
        return NO_REGS;
11428
 
11429
      if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (mode, x))
11430
        /* We can force the constant to memory and use lwc1
11431
           and ldc1.  As above, we will use pairs of lwc1s if
11432
           ldc1 is not supported.  */
11433
        return NO_REGS;
11434
 
11435
      if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
11436
        /* In this case we can use mov.fmt.  */
11437
        return NO_REGS;
11438
 
11439
      /* Otherwise, we need to reload through an integer register.  */
11440
      return GR_REGS;
11441
    }
11442
  if (FP_REG_P (regno))
11443
    return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
11444
 
11445
  return NO_REGS;
11446
}
11447
 
11448
/* Implement TARGET_MODE_REP_EXTENDED.  */
11449
 
11450
static int
11451
mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
11452
{
11453
  /* On 64-bit targets, SImode register values are sign-extended to DImode.  */
11454
  if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
11455
    return SIGN_EXTEND;
11456
 
11457
  return UNKNOWN;
11458
}
11459
 
11460
/* Implement TARGET_VALID_POINTER_MODE.  */
11461
 
11462
static bool
11463
mips_valid_pointer_mode (enum machine_mode mode)
11464
{
11465
  return mode == SImode || (TARGET_64BIT && mode == DImode);
11466
}
11467
 
11468
/* Implement TARGET_VECTOR_MODE_SUPPORTED_P.  */
11469
 
11470
static bool
11471
mips_vector_mode_supported_p (enum machine_mode mode)
11472
{
11473
  switch (mode)
11474
    {
11475
    case V2SFmode:
11476
      return TARGET_PAIRED_SINGLE_FLOAT;
11477
 
11478
    case V2HImode:
11479
    case V4QImode:
11480
    case V2HQmode:
11481
    case V2UHQmode:
11482
    case V2HAmode:
11483
    case V2UHAmode:
11484
    case V4QQmode:
11485
    case V4UQQmode:
11486
      return TARGET_DSP;
11487
 
11488
    case V2SImode:
11489
    case V4HImode:
11490
    case V8QImode:
11491
      return TARGET_LOONGSON_VECTORS;
11492
 
11493
    default:
11494
      return false;
11495
    }
11496
}
11497
 
11498
/* Implement TARGET_SCALAR_MODE_SUPPORTED_P.  */
11499
 
11500
static bool
11501
mips_scalar_mode_supported_p (enum machine_mode mode)
11502
{
11503
  if (ALL_FIXED_POINT_MODE_P (mode)
11504
      && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
11505
    return true;
11506
 
11507
  return default_scalar_mode_supported_p (mode);
11508
}
11509
 
11510
/* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE.  */
11511
 
11512
static enum machine_mode
11513
mips_preferred_simd_mode (enum machine_mode mode ATTRIBUTE_UNUSED)
11514
{
11515
  if (TARGET_PAIRED_SINGLE_FLOAT
11516
      && mode == SFmode)
11517
    return V2SFmode;
11518
  return word_mode;
11519
}
11520
 
11521
/* Implement TARGET_INIT_LIBFUNCS.  */
11522
 
11523
static void
11524
mips_init_libfuncs (void)
11525
{
11526
  if (TARGET_FIX_VR4120)
11527
    {
11528
      /* Register the special divsi3 and modsi3 functions needed to work
11529
         around VR4120 division errata.  */
11530
      set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
11531
      set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
11532
    }
11533
 
11534
  if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
11535
    {
11536
      /* Register the MIPS16 -mhard-float stubs.  */
11537
      set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
11538
      set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
11539
      set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
11540
      set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
11541
 
11542
      set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
11543
      set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
11544
      set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
11545
      set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
11546
      set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
11547
      set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
11548
      set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
11549
 
11550
      set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
11551
      set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
11552
      set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
11553
 
11554
      if (TARGET_DOUBLE_FLOAT)
11555
        {
11556
          set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
11557
          set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
11558
          set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
11559
          set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
11560
 
11561
          set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
11562
          set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
11563
          set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
11564
          set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
11565
          set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
11566
          set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
11567
          set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
11568
 
11569
          set_conv_libfunc (sext_optab, DFmode, SFmode,
11570
                            "__mips16_extendsfdf2");
11571
          set_conv_libfunc (trunc_optab, SFmode, DFmode,
11572
                            "__mips16_truncdfsf2");
11573
          set_conv_libfunc (sfix_optab, SImode, DFmode,
11574
                            "__mips16_fix_truncdfsi");
11575
          set_conv_libfunc (sfloat_optab, DFmode, SImode,
11576
                            "__mips16_floatsidf");
11577
          set_conv_libfunc (ufloat_optab, DFmode, SImode,
11578
                            "__mips16_floatunsidf");
11579
        }
11580
    }
11581
 
11582
  /* The MIPS16 ISA does not have an encoding for "sync", so we rely
11583
     on an external non-MIPS16 routine to implement __sync_synchronize.
11584
     Similarly for the rest of the ll/sc libfuncs.  */
11585
  if (TARGET_MIPS16)
11586
    {
11587
      synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
11588
      init_sync_libfuncs (UNITS_PER_WORD);
11589
    }
11590
}
11591
 
11592
/* Build up a multi-insn sequence that loads label TARGET into $AT.  */
11593
 
11594
static void
11595
mips_process_load_label (rtx target)
11596
{
11597
  rtx base, gp, intop;
11598
  HOST_WIDE_INT offset;
11599
 
11600
  mips_multi_start ();
11601
  switch (mips_abi)
11602
    {
11603
    case ABI_N32:
11604
      mips_multi_add_insn ("lw\t%@,%%got_page(%0)(%+)", target, 0);
11605
      mips_multi_add_insn ("addiu\t%@,%@,%%got_ofst(%0)", target, 0);
11606
      break;
11607
 
11608
    case ABI_64:
11609
      mips_multi_add_insn ("ld\t%@,%%got_page(%0)(%+)", target, 0);
11610
      mips_multi_add_insn ("daddiu\t%@,%@,%%got_ofst(%0)", target, 0);
11611
      break;
11612
 
11613
    default:
11614
      gp = pic_offset_table_rtx;
11615
      if (mips_cfun_has_cprestore_slot_p ())
11616
        {
11617
          gp = gen_rtx_REG (Pmode, AT_REGNUM);
11618
          mips_get_cprestore_base_and_offset (&base, &offset, true);
11619
          if (!SMALL_OPERAND (offset))
11620
            {
11621
              intop = GEN_INT (CONST_HIGH_PART (offset));
11622
              mips_multi_add_insn ("lui\t%0,%1", gp, intop, 0);
11623
              mips_multi_add_insn ("addu\t%0,%0,%1", gp, base, 0);
11624
 
11625
              base = gp;
11626
              offset = CONST_LOW_PART (offset);
11627
            }
11628
          intop = GEN_INT (offset);
11629
          if (ISA_HAS_LOAD_DELAY)
11630
            mips_multi_add_insn ("lw\t%0,%1(%2)%#", gp, intop, base, 0);
11631
          else
11632
            mips_multi_add_insn ("lw\t%0,%1(%2)", gp, intop, base, 0);
11633
        }
11634
      if (ISA_HAS_LOAD_DELAY)
11635
        mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)%#", target, gp, 0);
11636
      else
11637
        mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)", target, gp, 0);
11638
      mips_multi_add_insn ("addiu\t%@,%@,%%lo(%0)", target, 0);
11639
      break;
11640
    }
11641
}
11642
 
11643
/* Return the number of instructions needed to load a label into $AT.  */
11644
 
11645
static unsigned int
11646
mips_load_label_num_insns (void)
11647
{
11648
  if (cfun->machine->load_label_num_insns == 0)
11649
    {
11650
      mips_process_load_label (pc_rtx);
11651
      cfun->machine->load_label_num_insns = mips_multi_num_insns;
11652
    }
11653
  return cfun->machine->load_label_num_insns;
11654
}
11655
 
11656
/* Emit an asm sequence to start a noat block and load the address
11657
   of a label into $1.  */
11658
 
11659
void
11660
mips_output_load_label (rtx target)
11661
{
11662
  mips_push_asm_switch (&mips_noat);
11663
  if (TARGET_EXPLICIT_RELOCS)
11664
    {
11665
      mips_process_load_label (target);
11666
      mips_multi_write ();
11667
    }
11668
  else
11669
    {
11670
      if (Pmode == DImode)
11671
        output_asm_insn ("dla\t%@,%0", &target);
11672
      else
11673
        output_asm_insn ("la\t%@,%0", &target);
11674
    }
11675
}
11676
 
11677
/* Return the length of INSN.  LENGTH is the initial length computed by
11678
   attributes in the machine-description file.  */
11679
 
11680
int
11681
mips_adjust_insn_length (rtx insn, int length)
11682
{
11683
  /* mips.md uses MAX_PIC_BRANCH_LENGTH as a placeholder for the length
11684
     of a PIC long-branch sequence.  Substitute the correct value.  */
11685
  if (length == MAX_PIC_BRANCH_LENGTH
11686
      && INSN_CODE (insn) >= 0
11687
      && get_attr_type (insn) == TYPE_BRANCH)
11688
    {
11689
      /* Add the branch-over instruction and its delay slot, if this
11690
         is a conditional branch.  */
11691
      length = simplejump_p (insn) ? 0 : 8;
11692
 
11693
      /* Load the label into $AT and jump to it.  Ignore the delay
11694
         slot of the jump.  */
11695
      length += 4 * mips_load_label_num_insns() + 4;
11696
    }
11697
 
11698
  /* A unconditional jump has an unfilled delay slot if it is not part
11699
     of a sequence.  A conditional jump normally has a delay slot, but
11700
     does not on MIPS16.  */
11701
  if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
11702
    length += 4;
11703
 
11704
  /* See how many nops might be needed to avoid hardware hazards.  */
11705
  if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
11706
    switch (get_attr_hazard (insn))
11707
      {
11708
      case HAZARD_NONE:
11709
        break;
11710
 
11711
      case HAZARD_DELAY:
11712
        length += 4;
11713
        break;
11714
 
11715
      case HAZARD_HILO:
11716
        length += 8;
11717
        break;
11718
      }
11719
 
11720
  /* In order to make it easier to share MIPS16 and non-MIPS16 patterns,
11721
     the .md file length attributes are 4-based for both modes.
11722
     Adjust the MIPS16 ones here.  */
11723
  if (TARGET_MIPS16)
11724
    length /= 2;
11725
 
11726
  return length;
11727
}
11728
 
11729
/* Return the assembly code for INSN, which has the operands given by
11730
   OPERANDS, and which branches to OPERANDS[0] if some condition is true.
11731
   BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[0]
11732
   is in range of a direct branch.  BRANCH_IF_FALSE is an inverted
11733
   version of BRANCH_IF_TRUE.  */
11734
 
11735
const char *
11736
mips_output_conditional_branch (rtx insn, rtx *operands,
11737
                                const char *branch_if_true,
11738
                                const char *branch_if_false)
11739
{
11740
  unsigned int length;
11741
  rtx taken, not_taken;
11742
 
11743
  gcc_assert (LABEL_P (operands[0]));
11744
 
11745
  length = get_attr_length (insn);
11746
  if (length <= 8)
11747
    {
11748
      /* Just a simple conditional branch.  */
11749
      mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
11750
      return branch_if_true;
11751
    }
11752
 
11753
  /* Generate a reversed branch around a direct jump.  This fallback does
11754
     not use branch-likely instructions.  */
11755
  mips_branch_likely = false;
11756
  not_taken = gen_label_rtx ();
11757
  taken = operands[0];
11758
 
11759
  /* Generate the reversed branch to NOT_TAKEN.  */
11760
  operands[0] = not_taken;
11761
  output_asm_insn (branch_if_false, operands);
11762
 
11763
  /* If INSN has a delay slot, we must provide delay slots for both the
11764
     branch to NOT_TAKEN and the conditional jump.  We must also ensure
11765
     that INSN's delay slot is executed in the appropriate cases.  */
11766
  if (final_sequence)
11767
    {
11768
      /* This first delay slot will always be executed, so use INSN's
11769
         delay slot if is not annulled.  */
11770
      if (!INSN_ANNULLED_BRANCH_P (insn))
11771
        {
11772
          final_scan_insn (XVECEXP (final_sequence, 0, 1),
11773
                           asm_out_file, optimize, 1, NULL);
11774
          INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
11775
        }
11776
      else
11777
        output_asm_insn ("nop", 0);
11778
      fprintf (asm_out_file, "\n");
11779
    }
11780
 
11781
  /* Output the unconditional branch to TAKEN.  */
11782
  if (TARGET_ABSOLUTE_JUMPS)
11783
    output_asm_insn (MIPS_ABSOLUTE_JUMP ("j\t%0%/"), &taken);
11784
  else
11785
    {
11786
      mips_output_load_label (taken);
11787
      output_asm_insn ("jr\t%@%]%/", 0);
11788
    }
11789
 
11790
  /* Now deal with its delay slot; see above.  */
11791
  if (final_sequence)
11792
    {
11793
      /* This delay slot will only be executed if the branch is taken.
11794
         Use INSN's delay slot if is annulled.  */
11795
      if (INSN_ANNULLED_BRANCH_P (insn))
11796
        {
11797
          final_scan_insn (XVECEXP (final_sequence, 0, 1),
11798
                           asm_out_file, optimize, 1, NULL);
11799
          INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
11800
        }
11801
      else
11802
        output_asm_insn ("nop", 0);
11803
      fprintf (asm_out_file, "\n");
11804
    }
11805
 
11806
  /* Output NOT_TAKEN.  */
11807
  targetm.asm_out.internal_label (asm_out_file, "L",
11808
                                  CODE_LABEL_NUMBER (not_taken));
11809
  return "";
11810
}
11811
 
11812
/* Return the assembly code for INSN, which branches to OPERANDS[0]
11813
   if some ordering condition is true.  The condition is given by
11814
   OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of
11815
   OPERANDS[1].  OPERANDS[2] is the comparison's first operand;
11816
   its second is always zero.  */
11817
 
11818
const char *
11819
mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
11820
{
11821
  const char *branch[2];
11822
 
11823
  /* Make BRANCH[1] branch to OPERANDS[0] when the condition is true.
11824
     Make BRANCH[0] branch on the inverse condition.  */
11825
  switch (GET_CODE (operands[1]))
11826
    {
11827
      /* These cases are equivalent to comparisons against zero.  */
11828
    case LEU:
11829
      inverted_p = !inverted_p;
11830
      /* Fall through.  */
11831
    case GTU:
11832
      branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%0");
11833
      branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%0");
11834
      break;
11835
 
11836
      /* These cases are always true or always false.  */
11837
    case LTU:
11838
      inverted_p = !inverted_p;
11839
      /* Fall through.  */
11840
    case GEU:
11841
      branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%0");
11842
      branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%0");
11843
      break;
11844
 
11845
    default:
11846
      branch[!inverted_p] = MIPS_BRANCH ("b%C1z", "%2,%0");
11847
      branch[inverted_p] = MIPS_BRANCH ("b%N1z", "%2,%0");
11848
      break;
11849
    }
11850
  return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
11851
}
11852
 
11853
/* Start a block of code that needs access to the LL, SC and SYNC
11854
   instructions.  */
11855
 
11856
static void
11857
mips_start_ll_sc_sync_block (void)
11858
{
11859
  if (!ISA_HAS_LL_SC)
11860
    {
11861
      output_asm_insn (".set\tpush", 0);
11862
      output_asm_insn (".set\tmips2", 0);
11863
    }
11864
}
11865
 
11866
/* End a block started by mips_start_ll_sc_sync_block.  */
11867
 
11868
static void
11869
mips_end_ll_sc_sync_block (void)
11870
{
11871
  if (!ISA_HAS_LL_SC)
11872
    output_asm_insn (".set\tpop", 0);
11873
}
11874
 
11875
/* Output and/or return the asm template for a sync instruction.  */
11876
 
11877
const char *
11878
mips_output_sync (void)
11879
{
11880
  mips_start_ll_sc_sync_block ();
11881
  output_asm_insn ("sync", 0);
11882
  mips_end_ll_sc_sync_block ();
11883
  return "";
11884
}
11885
 
11886
/* Return the asm template associated with sync_insn1 value TYPE.
11887
   IS_64BIT_P is true if we want a 64-bit rather than 32-bit operation.  */
11888
 
11889
static const char *
11890
mips_sync_insn1_template (enum attr_sync_insn1 type, bool is_64bit_p)
11891
{
11892
  switch (type)
11893
    {
11894
    case SYNC_INSN1_MOVE:
11895
      return "move\t%0,%z2";
11896
    case SYNC_INSN1_LI:
11897
      return "li\t%0,%2";
11898
    case SYNC_INSN1_ADDU:
11899
      return is_64bit_p ? "daddu\t%0,%1,%z2" : "addu\t%0,%1,%z2";
11900
    case SYNC_INSN1_ADDIU:
11901
      return is_64bit_p ? "daddiu\t%0,%1,%2" : "addiu\t%0,%1,%2";
11902
    case SYNC_INSN1_SUBU:
11903
      return is_64bit_p ? "dsubu\t%0,%1,%z2" : "subu\t%0,%1,%z2";
11904
    case SYNC_INSN1_AND:
11905
      return "and\t%0,%1,%z2";
11906
    case SYNC_INSN1_ANDI:
11907
      return "andi\t%0,%1,%2";
11908
    case SYNC_INSN1_OR:
11909
      return "or\t%0,%1,%z2";
11910
    case SYNC_INSN1_ORI:
11911
      return "ori\t%0,%1,%2";
11912
    case SYNC_INSN1_XOR:
11913
      return "xor\t%0,%1,%z2";
11914
    case SYNC_INSN1_XORI:
11915
      return "xori\t%0,%1,%2";
11916
    }
11917
  gcc_unreachable ();
11918
}
11919
 
11920
/* Return the asm template associated with sync_insn2 value TYPE.  */
11921
 
11922
static const char *
11923
mips_sync_insn2_template (enum attr_sync_insn2 type)
11924
{
11925
  switch (type)
11926
    {
11927
    case SYNC_INSN2_NOP:
11928
      gcc_unreachable ();
11929
    case SYNC_INSN2_AND:
11930
      return "and\t%0,%1,%z2";
11931
    case SYNC_INSN2_XOR:
11932
      return "xor\t%0,%1,%z2";
11933
    case SYNC_INSN2_NOT:
11934
      return "nor\t%0,%1,%.";
11935
    }
11936
  gcc_unreachable ();
11937
}
11938
 
11939
/* OPERANDS are the operands to a sync loop instruction and INDEX is
11940
   the value of the one of the sync_* attributes.  Return the operand
11941
   referred to by the attribute, or DEFAULT_VALUE if the insn doesn't
11942
   have the associated attribute.  */
11943
 
11944
static rtx
11945
mips_get_sync_operand (rtx *operands, int index, rtx default_value)
11946
{
11947
  if (index > 0)
11948
    default_value = operands[index - 1];
11949
  return default_value;
11950
}
11951
 
11952
/* INSN is a sync loop with operands OPERANDS.  Build up a multi-insn
11953
   sequence for it.  */
11954
 
11955
static void
11956
mips_process_sync_loop (rtx insn, rtx *operands)
11957
{
11958
  rtx at, mem, oldval, newval, inclusive_mask, exclusive_mask;
11959
  rtx required_oldval, insn1_op2, tmp1, tmp2, tmp3;
11960
  unsigned int tmp3_insn;
11961
  enum attr_sync_insn1 insn1;
11962
  enum attr_sync_insn2 insn2;
11963
  bool is_64bit_p;
11964
 
11965
  /* Read an operand from the sync_WHAT attribute and store it in
11966
     variable WHAT.  DEFAULT is the default value if no attribute
11967
     is specified.  */
11968
#define READ_OPERAND(WHAT, DEFAULT) \
11969
  WHAT = mips_get_sync_operand (operands, (int) get_attr_sync_##WHAT (insn), \
11970
                                DEFAULT)
11971
 
11972
  /* Read the memory.  */
11973
  READ_OPERAND (mem, 0);
11974
  gcc_assert (mem);
11975
  is_64bit_p = (GET_MODE_BITSIZE (GET_MODE (mem)) == 64);
11976
 
11977
  /* Read the other attributes.  */
11978
  at = gen_rtx_REG (GET_MODE (mem), AT_REGNUM);
11979
  READ_OPERAND (oldval, at);
11980
  READ_OPERAND (newval, at);
11981
  READ_OPERAND (inclusive_mask, 0);
11982
  READ_OPERAND (exclusive_mask, 0);
11983
  READ_OPERAND (required_oldval, 0);
11984
  READ_OPERAND (insn1_op2, 0);
11985
  insn1 = get_attr_sync_insn1 (insn);
11986
  insn2 = get_attr_sync_insn2 (insn);
11987
 
11988
  mips_multi_start ();
11989
 
11990
  /* Output the release side of the memory barrier.  */
11991
  if (get_attr_sync_release_barrier (insn) == SYNC_RELEASE_BARRIER_YES)
11992
    {
11993
      if (required_oldval == 0 && TARGET_OCTEON)
11994
        {
11995
          /* Octeon doesn't reorder reads, so a full barrier can be
11996
             created by using SYNCW to order writes combined with the
11997
             write from the following SC.  When the SC successfully
11998
             completes, we know that all preceding writes are also
11999
             committed to the coherent memory system.  It is possible
12000
             for a single SYNCW to fail, but a pair of them will never
12001
             fail, so we use two.  */
12002
          mips_multi_add_insn ("syncw", NULL);
12003
          mips_multi_add_insn ("syncw", NULL);
12004
        }
12005
      else
12006
        mips_multi_add_insn ("sync", NULL);
12007
    }
12008
 
12009
  /* Output the branch-back label.  */
12010
  mips_multi_add_label ("1:");
12011
 
12012
  /* OLDVAL = *MEM.  */
12013
  mips_multi_add_insn (is_64bit_p ? "lld\t%0,%1" : "ll\t%0,%1",
12014
                       oldval, mem, NULL);
12015
 
12016
  /* if ((OLDVAL & INCLUSIVE_MASK) != REQUIRED_OLDVAL) goto 2.  */
12017
  if (required_oldval)
12018
    {
12019
      if (inclusive_mask == 0)
12020
        tmp1 = oldval;
12021
      else
12022
        {
12023
          gcc_assert (oldval != at);
12024
          mips_multi_add_insn ("and\t%0,%1,%2",
12025
                               at, oldval, inclusive_mask, NULL);
12026
          tmp1 = at;
12027
        }
12028
      mips_multi_add_insn ("bne\t%0,%z1,2f", tmp1, required_oldval, NULL);
12029
    }
12030
 
12031
  /* $TMP1 = OLDVAL & EXCLUSIVE_MASK.  */
12032
  if (exclusive_mask == 0)
12033
    tmp1 = const0_rtx;
12034
  else
12035
    {
12036
      gcc_assert (oldval != at);
12037
      mips_multi_add_insn ("and\t%0,%1,%z2",
12038
                           at, oldval, exclusive_mask, NULL);
12039
      tmp1 = at;
12040
    }
12041
 
12042
  /* $TMP2 = INSN1 (OLDVAL, INSN1_OP2).
12043
 
12044
     We can ignore moves if $TMP4 != INSN1_OP2, since we'll still emit
12045
     at least one instruction in that case.  */
12046
  if (insn1 == SYNC_INSN1_MOVE
12047
      && (tmp1 != const0_rtx || insn2 != SYNC_INSN2_NOP))
12048
    tmp2 = insn1_op2;
12049
  else
12050
    {
12051
      mips_multi_add_insn (mips_sync_insn1_template (insn1, is_64bit_p),
12052
                           newval, oldval, insn1_op2, NULL);
12053
      tmp2 = newval;
12054
    }
12055
 
12056
  /* $TMP3 = INSN2 ($TMP2, INCLUSIVE_MASK).  */
12057
  if (insn2 == SYNC_INSN2_NOP)
12058
    tmp3 = tmp2;
12059
  else
12060
    {
12061
      mips_multi_add_insn (mips_sync_insn2_template (insn2),
12062
                           newval, tmp2, inclusive_mask, NULL);
12063
      tmp3 = newval;
12064
    }
12065
  tmp3_insn = mips_multi_last_index ();
12066
 
12067
  /* $AT = $TMP1 | $TMP3.  */
12068
  if (tmp1 == const0_rtx || tmp3 == const0_rtx)
12069
    {
12070
      mips_multi_set_operand (tmp3_insn, 0, at);
12071
      tmp3 = at;
12072
    }
12073
  else
12074
    {
12075
      gcc_assert (tmp1 != tmp3);
12076
      mips_multi_add_insn ("or\t%0,%1,%2", at, tmp1, tmp3, NULL);
12077
    }
12078
 
12079
  /* if (!commit (*MEM = $AT)) goto 1.
12080
 
12081
     This will sometimes be a delayed branch; see the write code below
12082
     for details.  */
12083
  mips_multi_add_insn (is_64bit_p ? "scd\t%0,%1" : "sc\t%0,%1", at, mem, NULL);
12084
  mips_multi_add_insn ("beq%?\t%0,%.,1b", at, NULL);
12085
 
12086
  /* if (INSN1 != MOVE && INSN1 != LI) NEWVAL = $TMP3 [delay slot].  */
12087
  if (insn1 != SYNC_INSN1_MOVE && insn1 != SYNC_INSN1_LI && tmp3 != newval)
12088
    {
12089
      mips_multi_copy_insn (tmp3_insn);
12090
      mips_multi_set_operand (mips_multi_last_index (), 0, newval);
12091
    }
12092
  else
12093
    mips_multi_add_insn ("nop", NULL);
12094
 
12095
  /* Output the acquire side of the memory barrier.  */
12096
  if (TARGET_SYNC_AFTER_SC)
12097
    mips_multi_add_insn ("sync", NULL);
12098
 
12099
  /* Output the exit label, if needed.  */
12100
  if (required_oldval)
12101
    mips_multi_add_label ("2:");
12102
 
12103
#undef READ_OPERAND
12104
}
12105
 
12106
/* Output and/or return the asm template for sync loop INSN, which has
12107
   the operands given by OPERANDS.  */
12108
 
12109
const char *
12110
mips_output_sync_loop (rtx insn, rtx *operands)
12111
{
12112
  mips_process_sync_loop (insn, operands);
12113
 
12114
  /* Use branch-likely instructions to work around the LL/SC R10000
12115
     errata.  */
12116
  mips_branch_likely = TARGET_FIX_R10000;
12117
 
12118
  mips_push_asm_switch (&mips_noreorder);
12119
  mips_push_asm_switch (&mips_nomacro);
12120
  mips_push_asm_switch (&mips_noat);
12121
  mips_start_ll_sc_sync_block ();
12122
 
12123
  mips_multi_write ();
12124
 
12125
  mips_end_ll_sc_sync_block ();
12126
  mips_pop_asm_switch (&mips_noat);
12127
  mips_pop_asm_switch (&mips_nomacro);
12128
  mips_pop_asm_switch (&mips_noreorder);
12129
 
12130
  return "";
12131
}
12132
 
12133
/* Return the number of individual instructions in sync loop INSN,
12134
   which has the operands given by OPERANDS.  */
12135
 
12136
unsigned int
12137
mips_sync_loop_insns (rtx insn, rtx *operands)
12138
{
12139
  mips_process_sync_loop (insn, operands);
12140
  return mips_multi_num_insns;
12141
}
12142
 
12143
/* Return the assembly code for DIV or DDIV instruction DIVISION, which has
12144
   the operands given by OPERANDS.  Add in a divide-by-zero check if needed.
12145
 
12146
   When working around R4000 and R4400 errata, we need to make sure that
12147
   the division is not immediately followed by a shift[1][2].  We also
12148
   need to stop the division from being put into a branch delay slot[3].
12149
   The easiest way to avoid both problems is to add a nop after the
12150
   division.  When a divide-by-zero check is needed, this nop can be
12151
   used to fill the branch delay slot.
12152
 
12153
   [1] If a double-word or a variable shift executes immediately
12154
       after starting an integer division, the shift may give an
12155
       incorrect result.  See quotations of errata #16 and #28 from
12156
       "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
12157
       in mips.md for details.
12158
 
12159
   [2] A similar bug to [1] exists for all revisions of the
12160
       R4000 and the R4400 when run in an MC configuration.
12161
       From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
12162
 
12163
       "19. In this following sequence:
12164
 
12165
                    ddiv                (or ddivu or div or divu)
12166
                    dsll32              (or dsrl32, dsra32)
12167
 
12168
            if an MPT stall occurs, while the divide is slipping the cpu
12169
            pipeline, then the following double shift would end up with an
12170
            incorrect result.
12171
 
12172
            Workaround: The compiler needs to avoid generating any
12173
            sequence with divide followed by extended double shift."
12174
 
12175
       This erratum is also present in "MIPS R4400MC Errata, Processor
12176
       Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
12177
       & 3.0" as errata #10 and #4, respectively.
12178
 
12179
   [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
12180
       (also valid for MIPS R4000MC processors):
12181
 
12182
       "52. R4000SC: This bug does not apply for the R4000PC.
12183
 
12184
            There are two flavors of this bug:
12185
 
12186
            1) If the instruction just after divide takes an RF exception
12187
               (tlb-refill, tlb-invalid) and gets an instruction cache
12188
               miss (both primary and secondary) and the line which is
12189
               currently in secondary cache at this index had the first
12190
               data word, where the bits 5..2 are set, then R4000 would
12191
               get a wrong result for the div.
12192
 
12193
            ##1
12194
                    nop
12195
                    div r8, r9
12196
                    -------------------         # end-of page. -tlb-refill
12197
                    nop
12198
            ##2
12199
                    nop
12200
                    div r8, r9
12201
                    -------------------         # end-of page. -tlb-invalid
12202
                    nop
12203
 
12204
            2) If the divide is in the taken branch delay slot, where the
12205
               target takes RF exception and gets an I-cache miss for the
12206
               exception vector or where I-cache miss occurs for the
12207
               target address, under the above mentioned scenarios, the
12208
               div would get wrong results.
12209
 
12210
            ##1
12211
                    j   r2              # to next page mapped or unmapped
12212
                    div r8,r9           # this bug would be there as long
12213
                                        # as there is an ICache miss and
12214
                    nop                 # the "data pattern" is present
12215
 
12216
            ##2
12217
                    beq r0, r0, NextPage        # to Next page
12218
                    div r8,r9
12219
                    nop
12220
 
12221
            This bug is present for div, divu, ddiv, and ddivu
12222
            instructions.
12223
 
12224
            Workaround: For item 1), OS could make sure that the next page
12225
            after the divide instruction is also mapped.  For item 2), the
12226
            compiler could make sure that the divide instruction is not in
12227
            the branch delay slot."
12228
 
12229
       These processors have PRId values of 0x00004220 and 0x00004300 for
12230
       the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400.  */
12231
 
12232
const char *
12233
mips_output_division (const char *division, rtx *operands)
12234
{
12235
  const char *s;
12236
 
12237
  s = division;
12238
  if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
12239
    {
12240
      output_asm_insn (s, operands);
12241
      s = "nop";
12242
    }
12243
  if (TARGET_CHECK_ZERO_DIV)
12244
    {
12245
      if (TARGET_MIPS16)
12246
        {
12247
          output_asm_insn (s, operands);
12248
          s = "bnez\t%2,1f\n\tbreak\t7\n1:";
12249
        }
12250
      else if (GENERATE_DIVIDE_TRAPS)
12251
        {
12252
          /* Avoid long replay penalty on load miss by putting the trap before
12253
             the divide.  */
12254
          if (TUNE_74K)
12255
            output_asm_insn ("teq\t%2,%.,7", operands);
12256
          else
12257
            {
12258
              output_asm_insn (s, operands);
12259
              s = "teq\t%2,%.,7";
12260
            }
12261
        }
12262
      else
12263
        {
12264
          output_asm_insn ("%(bne\t%2,%.,1f", operands);
12265
          output_asm_insn (s, operands);
12266
          s = "break\t7%)\n1:";
12267
        }
12268
    }
12269
  return s;
12270
}
12271
 
12272
/* Return true if IN_INSN is a multiply-add or multiply-subtract
12273
   instruction and if OUT_INSN assigns to the accumulator operand.  */
12274
 
12275
bool
12276
mips_linked_madd_p (rtx out_insn, rtx in_insn)
12277
{
12278
  rtx x;
12279
 
12280
  x = single_set (in_insn);
12281
  if (x == 0)
12282
    return false;
12283
 
12284
  x = SET_SRC (x);
12285
 
12286
  if (GET_CODE (x) == PLUS
12287
      && GET_CODE (XEXP (x, 0)) == MULT
12288
      && reg_set_p (XEXP (x, 1), out_insn))
12289
    return true;
12290
 
12291
  if (GET_CODE (x) == MINUS
12292
      && GET_CODE (XEXP (x, 1)) == MULT
12293
      && reg_set_p (XEXP (x, 0), out_insn))
12294
    return true;
12295
 
12296
  return false;
12297
}
12298
 
12299
/* True if the dependency between OUT_INSN and IN_INSN is on the store
12300
   data rather than the address.  We need this because the cprestore
12301
   pattern is type "store", but is defined using an UNSPEC_VOLATILE,
12302
   which causes the default routine to abort.  We just return false
12303
   for that case.  */
12304
 
12305
bool
12306
mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
12307
{
12308
  if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
12309
    return false;
12310
 
12311
  return !store_data_bypass_p (out_insn, in_insn);
12312
}
12313
 
12314
 
12315
/* Variables and flags used in scheduler hooks when tuning for
12316
   Loongson 2E/2F.  */
12317
static struct
12318
{
12319
  /* Variables to support Loongson 2E/2F round-robin [F]ALU1/2 dispatch
12320
     strategy.  */
12321
 
12322
  /* If true, then next ALU1/2 instruction will go to ALU1.  */
12323
  bool alu1_turn_p;
12324
 
12325
  /* If true, then next FALU1/2 unstruction will go to FALU1.  */
12326
  bool falu1_turn_p;
12327
 
12328
  /* Codes to query if [f]alu{1,2}_core units are subscribed or not.  */
12329
  int alu1_core_unit_code;
12330
  int alu2_core_unit_code;
12331
  int falu1_core_unit_code;
12332
  int falu2_core_unit_code;
12333
 
12334
  /* True if current cycle has a multi instruction.
12335
     This flag is used in mips_ls2_dfa_post_advance_cycle.  */
12336
  bool cycle_has_multi_p;
12337
 
12338
  /* Instructions to subscribe ls2_[f]alu{1,2}_turn_enabled units.
12339
     These are used in mips_ls2_dfa_post_advance_cycle to initialize
12340
     DFA state.
12341
     E.g., when alu1_turn_enabled_insn is issued it makes next ALU1/2
12342
     instruction to go ALU1.  */
12343
  rtx alu1_turn_enabled_insn;
12344
  rtx alu2_turn_enabled_insn;
12345
  rtx falu1_turn_enabled_insn;
12346
  rtx falu2_turn_enabled_insn;
12347
} mips_ls2;
12348
 
12349
/* Implement TARGET_SCHED_ADJUST_COST.  We assume that anti and output
12350
   dependencies have no cost, except on the 20Kc where output-dependence
12351
   is treated like input-dependence.  */
12352
 
12353
static int
12354
mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
12355
                  rtx dep ATTRIBUTE_UNUSED, int cost)
12356
{
12357
  if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
12358
      && TUNE_20KC)
12359
    return cost;
12360
  if (REG_NOTE_KIND (link) != 0)
12361
    return 0;
12362
  return cost;
12363
}
12364
 
12365
/* Return the number of instructions that can be issued per cycle.  */
12366
 
12367
static int
12368
mips_issue_rate (void)
12369
{
12370
  switch (mips_tune)
12371
    {
12372
    case PROCESSOR_74KC:
12373
    case PROCESSOR_74KF2_1:
12374
    case PROCESSOR_74KF1_1:
12375
    case PROCESSOR_74KF3_2:
12376
      /* The 74k is not strictly quad-issue cpu, but can be seen as one
12377
         by the scheduler.  It can issue 1 ALU, 1 AGEN and 2 FPU insns,
12378
         but in reality only a maximum of 3 insns can be issued as
12379
         floating-point loads and stores also require a slot in the
12380
         AGEN pipe.  */
12381
    case PROCESSOR_R10000:
12382
      /* All R10K Processors are quad-issue (being the first MIPS
12383
         processors to support this feature). */
12384
      return 4;
12385
 
12386
    case PROCESSOR_20KC:
12387
    case PROCESSOR_R4130:
12388
    case PROCESSOR_R5400:
12389
    case PROCESSOR_R5500:
12390
    case PROCESSOR_R7000:
12391
    case PROCESSOR_R9000:
12392
    case PROCESSOR_OCTEON:
12393
    case PROCESSOR_OCTEON2:
12394
      return 2;
12395
 
12396
    case PROCESSOR_SB1:
12397
    case PROCESSOR_SB1A:
12398
      /* This is actually 4, but we get better performance if we claim 3.
12399
         This is partly because of unwanted speculative code motion with the
12400
         larger number, and partly because in most common cases we can't
12401
         reach the theoretical max of 4.  */
12402
      return 3;
12403
 
12404
    case PROCESSOR_LOONGSON_2E:
12405
    case PROCESSOR_LOONGSON_2F:
12406
    case PROCESSOR_LOONGSON_3A:
12407
      return 4;
12408
 
12409
    default:
12410
      return 1;
12411
    }
12412
}
12413
 
12414
/* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook for Loongson2.  */
12415
 
12416
static void
12417
mips_ls2_init_dfa_post_cycle_insn (void)
12418
{
12419
  start_sequence ();
12420
  emit_insn (gen_ls2_alu1_turn_enabled_insn ());
12421
  mips_ls2.alu1_turn_enabled_insn = get_insns ();
12422
  end_sequence ();
12423
 
12424
  start_sequence ();
12425
  emit_insn (gen_ls2_alu2_turn_enabled_insn ());
12426
  mips_ls2.alu2_turn_enabled_insn = get_insns ();
12427
  end_sequence ();
12428
 
12429
  start_sequence ();
12430
  emit_insn (gen_ls2_falu1_turn_enabled_insn ());
12431
  mips_ls2.falu1_turn_enabled_insn = get_insns ();
12432
  end_sequence ();
12433
 
12434
  start_sequence ();
12435
  emit_insn (gen_ls2_falu2_turn_enabled_insn ());
12436
  mips_ls2.falu2_turn_enabled_insn = get_insns ();
12437
  end_sequence ();
12438
 
12439
  mips_ls2.alu1_core_unit_code = get_cpu_unit_code ("ls2_alu1_core");
12440
  mips_ls2.alu2_core_unit_code = get_cpu_unit_code ("ls2_alu2_core");
12441
  mips_ls2.falu1_core_unit_code = get_cpu_unit_code ("ls2_falu1_core");
12442
  mips_ls2.falu2_core_unit_code = get_cpu_unit_code ("ls2_falu2_core");
12443
}
12444
 
12445
/* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook.
12446
   Init data used in mips_dfa_post_advance_cycle.  */
12447
 
12448
static void
12449
mips_init_dfa_post_cycle_insn (void)
12450
{
12451
  if (TUNE_LOONGSON_2EF)
12452
    mips_ls2_init_dfa_post_cycle_insn ();
12453
}
12454
 
12455
/* Initialize STATE when scheduling for Loongson 2E/2F.
12456
   Support round-robin dispatch scheme by enabling only one of
12457
   ALU1/ALU2 and one of FALU1/FALU2 units for ALU1/2 and FALU1/2 instructions
12458
   respectively.  */
12459
 
12460
static void
12461
mips_ls2_dfa_post_advance_cycle (state_t state)
12462
{
12463
  if (cpu_unit_reservation_p (state, mips_ls2.alu1_core_unit_code))
12464
    {
12465
      /* Though there are no non-pipelined ALU1 insns,
12466
         we can get an instruction of type 'multi' before reload.  */
12467
      gcc_assert (mips_ls2.cycle_has_multi_p);
12468
      mips_ls2.alu1_turn_p = false;
12469
    }
12470
 
12471
  mips_ls2.cycle_has_multi_p = false;
12472
 
12473
  if (cpu_unit_reservation_p (state, mips_ls2.alu2_core_unit_code))
12474
    /* We have a non-pipelined alu instruction in the core,
12475
       adjust round-robin counter.  */
12476
    mips_ls2.alu1_turn_p = true;
12477
 
12478
  if (mips_ls2.alu1_turn_p)
12479
    {
12480
      if (state_transition (state, mips_ls2.alu1_turn_enabled_insn) >= 0)
12481
        gcc_unreachable ();
12482
    }
12483
  else
12484
    {
12485
      if (state_transition (state, mips_ls2.alu2_turn_enabled_insn) >= 0)
12486
        gcc_unreachable ();
12487
    }
12488
 
12489
  if (cpu_unit_reservation_p (state, mips_ls2.falu1_core_unit_code))
12490
    {
12491
      /* There are no non-pipelined FALU1 insns.  */
12492
      gcc_unreachable ();
12493
      mips_ls2.falu1_turn_p = false;
12494
    }
12495
 
12496
  if (cpu_unit_reservation_p (state, mips_ls2.falu2_core_unit_code))
12497
    /* We have a non-pipelined falu instruction in the core,
12498
       adjust round-robin counter.  */
12499
    mips_ls2.falu1_turn_p = true;
12500
 
12501
  if (mips_ls2.falu1_turn_p)
12502
    {
12503
      if (state_transition (state, mips_ls2.falu1_turn_enabled_insn) >= 0)
12504
        gcc_unreachable ();
12505
    }
12506
  else
12507
    {
12508
      if (state_transition (state, mips_ls2.falu2_turn_enabled_insn) >= 0)
12509
        gcc_unreachable ();
12510
    }
12511
}
12512
 
12513
/* Implement TARGET_SCHED_DFA_POST_ADVANCE_CYCLE.
12514
   This hook is being called at the start of each cycle.  */
12515
 
12516
static void
12517
mips_dfa_post_advance_cycle (void)
12518
{
12519
  if (TUNE_LOONGSON_2EF)
12520
    mips_ls2_dfa_post_advance_cycle (curr_state);
12521
}
12522
 
12523
/* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD.  This should
12524
   be as wide as the scheduling freedom in the DFA.  */
12525
 
12526
static int
12527
mips_multipass_dfa_lookahead (void)
12528
{
12529
  /* Can schedule up to 4 of the 6 function units in any one cycle.  */
12530
  if (TUNE_SB1)
12531
    return 4;
12532
 
12533
  if (TUNE_LOONGSON_2EF || TUNE_LOONGSON_3A)
12534
    return 4;
12535
 
12536
  if (TUNE_OCTEON)
12537
    return 2;
12538
 
12539
  return 0;
12540
}
12541
 
12542
/* Remove the instruction at index LOWER from ready queue READY and
12543
   reinsert it in front of the instruction at index HIGHER.  LOWER must
12544
   be <= HIGHER.  */
12545
 
12546
static void
12547
mips_promote_ready (rtx *ready, int lower, int higher)
12548
{
12549
  rtx new_head;
12550
  int i;
12551
 
12552
  new_head = ready[lower];
12553
  for (i = lower; i < higher; i++)
12554
    ready[i] = ready[i + 1];
12555
  ready[i] = new_head;
12556
}
12557
 
12558
/* If the priority of the instruction at POS2 in the ready queue READY
12559
   is within LIMIT units of that of the instruction at POS1, swap the
12560
   instructions if POS2 is not already less than POS1.  */
12561
 
12562
static void
12563
mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
12564
{
12565
  if (pos1 < pos2
12566
      && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
12567
    {
12568
      rtx temp;
12569
 
12570
      temp = ready[pos1];
12571
      ready[pos1] = ready[pos2];
12572
      ready[pos2] = temp;
12573
    }
12574
}
12575
 
12576
/* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
12577
   that may clobber hi or lo.  */
12578
static rtx mips_macc_chains_last_hilo;
12579
 
12580
/* A TUNE_MACC_CHAINS helper function.  Record that instruction INSN has
12581
   been scheduled, updating mips_macc_chains_last_hilo appropriately.  */
12582
 
12583
static void
12584
mips_macc_chains_record (rtx insn)
12585
{
12586
  if (get_attr_may_clobber_hilo (insn))
12587
    mips_macc_chains_last_hilo = insn;
12588
}
12589
 
12590
/* A TUNE_MACC_CHAINS helper function.  Search ready queue READY, which
12591
   has NREADY elements, looking for a multiply-add or multiply-subtract
12592
   instruction that is cumulative with mips_macc_chains_last_hilo.
12593
   If there is one, promote it ahead of anything else that might
12594
   clobber hi or lo.  */
12595
 
12596
static void
12597
mips_macc_chains_reorder (rtx *ready, int nready)
12598
{
12599
  int i, j;
12600
 
12601
  if (mips_macc_chains_last_hilo != 0)
12602
    for (i = nready - 1; i >= 0; i--)
12603
      if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
12604
        {
12605
          for (j = nready - 1; j > i; j--)
12606
            if (recog_memoized (ready[j]) >= 0
12607
                && get_attr_may_clobber_hilo (ready[j]))
12608
              {
12609
                mips_promote_ready (ready, i, j);
12610
                break;
12611
              }
12612
          break;
12613
        }
12614
}
12615
 
12616
/* The last instruction to be scheduled.  */
12617
static rtx vr4130_last_insn;
12618
 
12619
/* A note_stores callback used by vr4130_true_reg_dependence_p.  DATA
12620
   points to an rtx that is initially an instruction.  Nullify the rtx
12621
   if the instruction uses the value of register X.  */
12622
 
12623
static void
12624
vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
12625
                                void *data)
12626
{
12627
  rtx *insn_ptr;
12628
 
12629
  insn_ptr = (rtx *) data;
12630
  if (REG_P (x)
12631
      && *insn_ptr != 0
12632
      && reg_referenced_p (x, PATTERN (*insn_ptr)))
12633
    *insn_ptr = 0;
12634
}
12635
 
12636
/* Return true if there is true register dependence between vr4130_last_insn
12637
   and INSN.  */
12638
 
12639
static bool
12640
vr4130_true_reg_dependence_p (rtx insn)
12641
{
12642
  note_stores (PATTERN (vr4130_last_insn),
12643
               vr4130_true_reg_dependence_p_1, &insn);
12644
  return insn == 0;
12645
}
12646
 
12647
/* A TUNE_MIPS4130 helper function.  Given that INSN1 is at the head of
12648
   the ready queue and that INSN2 is the instruction after it, return
12649
   true if it is worth promoting INSN2 ahead of INSN1.  Look for cases
12650
   in which INSN1 and INSN2 can probably issue in parallel, but for
12651
   which (INSN2, INSN1) should be less sensitive to instruction
12652
   alignment than (INSN1, INSN2).  See 4130.md for more details.  */
12653
 
12654
static bool
12655
vr4130_swap_insns_p (rtx insn1, rtx insn2)
12656
{
12657
  sd_iterator_def sd_it;
12658
  dep_t dep;
12659
 
12660
  /* Check for the following case:
12661
 
12662
     1) there is some other instruction X with an anti dependence on INSN1;
12663
     2) X has a higher priority than INSN2; and
12664
     3) X is an arithmetic instruction (and thus has no unit restrictions).
12665
 
12666
     If INSN1 is the last instruction blocking X, it would better to
12667
     choose (INSN1, X) over (INSN2, INSN1).  */
12668
  FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
12669
    if (DEP_TYPE (dep) == REG_DEP_ANTI
12670
        && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
12671
        && recog_memoized (DEP_CON (dep)) >= 0
12672
        && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
12673
      return false;
12674
 
12675
  if (vr4130_last_insn != 0
12676
      && recog_memoized (insn1) >= 0
12677
      && recog_memoized (insn2) >= 0)
12678
    {
12679
      /* See whether INSN1 and INSN2 use different execution units,
12680
         or if they are both ALU-type instructions.  If so, they can
12681
         probably execute in parallel.  */
12682
      enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
12683
      enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
12684
      if (class1 != class2 || class1 == VR4130_CLASS_ALU)
12685
        {
12686
          /* If only one of the instructions has a dependence on
12687
             vr4130_last_insn, prefer to schedule the other one first.  */
12688
          bool dep1_p = vr4130_true_reg_dependence_p (insn1);
12689
          bool dep2_p = vr4130_true_reg_dependence_p (insn2);
12690
          if (dep1_p != dep2_p)
12691
            return dep1_p;
12692
 
12693
          /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
12694
             is not an ALU-type instruction and if INSN1 uses the same
12695
             execution unit.  (Note that if this condition holds, we already
12696
             know that INSN2 uses a different execution unit.)  */
12697
          if (class1 != VR4130_CLASS_ALU
12698
              && recog_memoized (vr4130_last_insn) >= 0
12699
              && class1 == get_attr_vr4130_class (vr4130_last_insn))
12700
            return true;
12701
        }
12702
    }
12703
  return false;
12704
}
12705
 
12706
/* A TUNE_MIPS4130 helper function.  (READY, NREADY) describes a ready
12707
   queue with at least two instructions.  Swap the first two if
12708
   vr4130_swap_insns_p says that it could be worthwhile.  */
12709
 
12710
static void
12711
vr4130_reorder (rtx *ready, int nready)
12712
{
12713
  if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
12714
    mips_promote_ready (ready, nready - 2, nready - 1);
12715
}
12716
 
12717
/* Record whether last 74k AGEN instruction was a load or store.  */
12718
static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
12719
 
12720
/* Initialize mips_last_74k_agen_insn from INSN.  A null argument
12721
   resets to TYPE_UNKNOWN state.  */
12722
 
12723
static void
12724
mips_74k_agen_init (rtx insn)
12725
{
12726
  if (!insn || CALL_P (insn) || JUMP_P (insn))
12727
    mips_last_74k_agen_insn = TYPE_UNKNOWN;
12728
  else
12729
    {
12730
      enum attr_type type = get_attr_type (insn);
12731
      if (type == TYPE_LOAD || type == TYPE_STORE)
12732
        mips_last_74k_agen_insn = type;
12733
    }
12734
}
12735
 
12736
/* A TUNE_74K helper function.  The 74K AGEN pipeline likes multiple
12737
   loads to be grouped together, and multiple stores to be grouped
12738
   together.  Swap things around in the ready queue to make this happen.  */
12739
 
12740
static void
12741
mips_74k_agen_reorder (rtx *ready, int nready)
12742
{
12743
  int i;
12744
  int store_pos, load_pos;
12745
 
12746
  store_pos = -1;
12747
  load_pos = -1;
12748
 
12749
  for (i = nready - 1; i >= 0; i--)
12750
    {
12751
      rtx insn = ready[i];
12752
      if (USEFUL_INSN_P (insn))
12753
        switch (get_attr_type (insn))
12754
          {
12755
          case TYPE_STORE:
12756
            if (store_pos == -1)
12757
              store_pos = i;
12758
            break;
12759
 
12760
          case TYPE_LOAD:
12761
            if (load_pos == -1)
12762
              load_pos = i;
12763
            break;
12764
 
12765
          default:
12766
            break;
12767
          }
12768
    }
12769
 
12770
  if (load_pos == -1 || store_pos == -1)
12771
    return;
12772
 
12773
  switch (mips_last_74k_agen_insn)
12774
    {
12775
    case TYPE_UNKNOWN:
12776
      /* Prefer to schedule loads since they have a higher latency.  */
12777
    case TYPE_LOAD:
12778
      /* Swap loads to the front of the queue.  */
12779
      mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
12780
      break;
12781
    case TYPE_STORE:
12782
      /* Swap stores to the front of the queue.  */
12783
      mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
12784
      break;
12785
    default:
12786
      break;
12787
    }
12788
}
12789
 
12790
/* Implement TARGET_SCHED_INIT.  */
12791
 
12792
static void
12793
mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
12794
                 int max_ready ATTRIBUTE_UNUSED)
12795
{
12796
  mips_macc_chains_last_hilo = 0;
12797
  vr4130_last_insn = 0;
12798
  mips_74k_agen_init (NULL_RTX);
12799
 
12800
  /* When scheduling for Loongson2, branch instructions go to ALU1,
12801
     therefore basic block is most likely to start with round-robin counter
12802
     pointed to ALU2.  */
12803
  mips_ls2.alu1_turn_p = false;
12804
  mips_ls2.falu1_turn_p = true;
12805
}
12806
 
12807
/* Subroutine used by TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2.  */
12808
 
12809
static void
12810
mips_sched_reorder_1 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
12811
                      rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
12812
{
12813
  if (!reload_completed
12814
      && TUNE_MACC_CHAINS
12815
      && *nreadyp > 0)
12816
    mips_macc_chains_reorder (ready, *nreadyp);
12817
 
12818
  if (reload_completed
12819
      && TUNE_MIPS4130
12820
      && !TARGET_VR4130_ALIGN
12821
      && *nreadyp > 1)
12822
    vr4130_reorder (ready, *nreadyp);
12823
 
12824
  if (TUNE_74K)
12825
    mips_74k_agen_reorder (ready, *nreadyp);
12826
}
12827
 
12828
/* Implement TARGET_SCHED_REORDER.  */
12829
 
12830
static int
12831
mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
12832
                    rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
12833
{
12834
  mips_sched_reorder_1 (file, verbose, ready, nreadyp, cycle);
12835
  return mips_issue_rate ();
12836
}
12837
 
12838
/* Implement TARGET_SCHED_REORDER2.  */
12839
 
12840
static int
12841
mips_sched_reorder2 (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
12842
                     rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
12843
{
12844
  mips_sched_reorder_1 (file, verbose, ready, nreadyp, cycle);
12845
  return cached_can_issue_more;
12846
}
12847
 
12848
/* Update round-robin counters for ALU1/2 and FALU1/2.  */
12849
 
12850
static void
12851
mips_ls2_variable_issue (rtx insn)
12852
{
12853
  if (mips_ls2.alu1_turn_p)
12854
    {
12855
      if (cpu_unit_reservation_p (curr_state, mips_ls2.alu1_core_unit_code))
12856
        mips_ls2.alu1_turn_p = false;
12857
    }
12858
  else
12859
    {
12860
      if (cpu_unit_reservation_p (curr_state, mips_ls2.alu2_core_unit_code))
12861
        mips_ls2.alu1_turn_p = true;
12862
    }
12863
 
12864
  if (mips_ls2.falu1_turn_p)
12865
    {
12866
      if (cpu_unit_reservation_p (curr_state, mips_ls2.falu1_core_unit_code))
12867
        mips_ls2.falu1_turn_p = false;
12868
    }
12869
  else
12870
    {
12871
      if (cpu_unit_reservation_p (curr_state, mips_ls2.falu2_core_unit_code))
12872
        mips_ls2.falu1_turn_p = true;
12873
    }
12874
 
12875
  if (recog_memoized (insn) >= 0)
12876
    mips_ls2.cycle_has_multi_p |= (get_attr_type (insn) == TYPE_MULTI);
12877
}
12878
 
12879
/* Implement TARGET_SCHED_VARIABLE_ISSUE.  */
12880
 
12881
static int
12882
mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
12883
                     rtx insn, int more)
12884
{
12885
  /* Ignore USEs and CLOBBERs; don't count them against the issue rate.  */
12886
  if (USEFUL_INSN_P (insn))
12887
    {
12888
      if (get_attr_type (insn) != TYPE_GHOST)
12889
        more--;
12890
      if (!reload_completed && TUNE_MACC_CHAINS)
12891
        mips_macc_chains_record (insn);
12892
      vr4130_last_insn = insn;
12893
      if (TUNE_74K)
12894
        mips_74k_agen_init (insn);
12895
      else if (TUNE_LOONGSON_2EF)
12896
        mips_ls2_variable_issue (insn);
12897
    }
12898
 
12899
  /* Instructions of type 'multi' should all be split before
12900
     the second scheduling pass.  */
12901
  gcc_assert (!reload_completed
12902
              || recog_memoized (insn) < 0
12903
              || get_attr_type (insn) != TYPE_MULTI);
12904
 
12905
  cached_can_issue_more = more;
12906
  return more;
12907
}
12908
 
12909
/* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
12910
   return the first operand of the associated PREF or PREFX insn.  */
12911
 
12912
rtx
12913
mips_prefetch_cookie (rtx write, rtx locality)
12914
{
12915
  /* store_streamed / load_streamed.  */
12916
  if (INTVAL (locality) <= 0)
12917
    return GEN_INT (INTVAL (write) + 4);
12918
 
12919
  /* store / load.  */
12920
  if (INTVAL (locality) <= 2)
12921
    return write;
12922
 
12923
  /* store_retained / load_retained.  */
12924
  return GEN_INT (INTVAL (write) + 6);
12925
}
12926
 
12927
/* Flags that indicate when a built-in function is available.
12928
 
12929
   BUILTIN_AVAIL_NON_MIPS16
12930
        The function is available on the current target, but only
12931
        in non-MIPS16 mode.  */
12932
#define BUILTIN_AVAIL_NON_MIPS16 1
12933
 
12934
/* Declare an availability predicate for built-in functions that
12935
   require non-MIPS16 mode and also require COND to be true.
12936
   NAME is the main part of the predicate's name.  */
12937
#define AVAIL_NON_MIPS16(NAME, COND)                                    \
12938
 static unsigned int                                                    \
12939
 mips_builtin_avail_##NAME (void)                                       \
12940
 {                                                                      \
12941
   return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0;                 \
12942
 }
12943
 
12944
/* This structure describes a single built-in function.  */
12945
struct mips_builtin_description {
12946
  /* The code of the main .md file instruction.  See mips_builtin_type
12947
     for more information.  */
12948
  enum insn_code icode;
12949
 
12950
  /* The floating-point comparison code to use with ICODE, if any.  */
12951
  enum mips_fp_condition cond;
12952
 
12953
  /* The name of the built-in function.  */
12954
  const char *name;
12955
 
12956
  /* Specifies how the function should be expanded.  */
12957
  enum mips_builtin_type builtin_type;
12958
 
12959
  /* The function's prototype.  */
12960
  enum mips_function_type function_type;
12961
 
12962
  /* Whether the function is available.  */
12963
  unsigned int (*avail) (void);
12964
};
12965
 
12966
AVAIL_NON_MIPS16 (paired_single, TARGET_PAIRED_SINGLE_FLOAT)
12967
AVAIL_NON_MIPS16 (sb1_paired_single, TARGET_SB1 && TARGET_PAIRED_SINGLE_FLOAT)
12968
AVAIL_NON_MIPS16 (mips3d, TARGET_MIPS3D)
12969
AVAIL_NON_MIPS16 (dsp, TARGET_DSP)
12970
AVAIL_NON_MIPS16 (dspr2, TARGET_DSPR2)
12971
AVAIL_NON_MIPS16 (dsp_32, !TARGET_64BIT && TARGET_DSP)
12972
AVAIL_NON_MIPS16 (dsp_64, TARGET_64BIT && TARGET_DSP)
12973
AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
12974
AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_VECTORS)
12975
AVAIL_NON_MIPS16 (cache, TARGET_CACHE_BUILTIN)
12976
 
12977
/* Construct a mips_builtin_description from the given arguments.
12978
 
12979
   INSN is the name of the associated instruction pattern, without the
12980
   leading CODE_FOR_mips_.
12981
 
12982
   CODE is the floating-point condition code associated with the
12983
   function.  It can be 'f' if the field is not applicable.
12984
 
12985
   NAME is the name of the function itself, without the leading
12986
   "__builtin_mips_".
12987
 
12988
   BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
12989
 
12990
   AVAIL is the name of the availability predicate, without the leading
12991
   mips_builtin_avail_.  */
12992
#define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE,                    \
12993
                     FUNCTION_TYPE, AVAIL)                              \
12994
  { CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND,                      \
12995
    "__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE,                \
12996
    mips_builtin_avail_ ## AVAIL }
12997
 
12998
/* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
12999
   mapped to instruction CODE_FOR_mips_<INSN>,  FUNCTION_TYPE and AVAIL
13000
   are as for MIPS_BUILTIN.  */
13001
#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL)                      \
13002
  MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
13003
 
13004
/* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
13005
   are subject to mips_builtin_avail_<AVAIL>.  */
13006
#define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL)                          \
13007
  MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s",            \
13008
                MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL),  \
13009
  MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d",            \
13010
                MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
13011
 
13012
/* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
13013
   The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
13014
   while the any and all forms are subject to mips_builtin_avail_mips3d.  */
13015
#define CMP_PS_BUILTINS(INSN, COND, AVAIL)                              \
13016
  MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps",   \
13017
                MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF,         \
13018
                mips3d),                                                \
13019
  MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps",   \
13020
                MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF,         \
13021
                mips3d),                                                \
13022
  MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps", \
13023
                MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF,       \
13024
                AVAIL),                                                 \
13025
  MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps", \
13026
                MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF,       \
13027
                AVAIL)
13028
 
13029
/* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s.  The functions
13030
   are subject to mips_builtin_avail_mips3d.  */
13031
#define CMP_4S_BUILTINS(INSN, COND)                                     \
13032
  MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s",   \
13033
                MIPS_BUILTIN_CMP_ANY,                                   \
13034
                MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d),            \
13035
  MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s",   \
13036
                MIPS_BUILTIN_CMP_ALL,                                   \
13037
                MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
13038
 
13039
/* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps.  The comparison
13040
   instruction requires mips_builtin_avail_<AVAIL>.  */
13041
#define MOVTF_BUILTINS(INSN, COND, AVAIL)                               \
13042
  MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps",  \
13043
                MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
13044
                AVAIL),                                                 \
13045
  MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps",  \
13046
                MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
13047
                AVAIL)
13048
 
13049
/* Define all the built-in functions related to C.cond.fmt condition COND.  */
13050
#define CMP_BUILTINS(COND)                                              \
13051
  MOVTF_BUILTINS (c, COND, paired_single),                              \
13052
  MOVTF_BUILTINS (cabs, COND, mips3d),                                  \
13053
  CMP_SCALAR_BUILTINS (cabs, COND, mips3d),                             \
13054
  CMP_PS_BUILTINS (c, COND, paired_single),                             \
13055
  CMP_PS_BUILTINS (cabs, COND, mips3d),                                 \
13056
  CMP_4S_BUILTINS (c, COND),                                            \
13057
  CMP_4S_BUILTINS (cabs, COND)
13058
 
13059
/* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
13060
   function mapped to instruction CODE_FOR_mips_<INSN>,  FUNCTION_TYPE
13061
   and AVAIL are as for MIPS_BUILTIN.  */
13062
#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL)            \
13063
  MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET,          \
13064
                FUNCTION_TYPE, AVAIL)
13065
 
13066
/* Define __builtin_mips_bposge<VALUE>.  <VALUE> is 32 for the MIPS32 DSP
13067
   branch instruction.  AVAIL is as for MIPS_BUILTIN.  */
13068
#define BPOSGE_BUILTIN(VALUE, AVAIL)                                    \
13069
  MIPS_BUILTIN (bposge, f, "bposge" #VALUE,                             \
13070
                MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
13071
 
13072
/* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<FN_NAME>
13073
   for instruction CODE_FOR_loongson_<INSN>.  FUNCTION_TYPE is a
13074
   builtin_description field.  */
13075
#define LOONGSON_BUILTIN_ALIAS(INSN, FN_NAME, FUNCTION_TYPE)            \
13076
  { CODE_FOR_loongson_ ## INSN, MIPS_FP_COND_f,                         \
13077
    "__builtin_loongson_" #FN_NAME, MIPS_BUILTIN_DIRECT,                \
13078
    FUNCTION_TYPE, mips_builtin_avail_loongson }
13079
 
13080
/* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<INSN>
13081
   for instruction CODE_FOR_loongson_<INSN>.  FUNCTION_TYPE is a
13082
   builtin_description field.  */
13083
#define LOONGSON_BUILTIN(INSN, FUNCTION_TYPE)                           \
13084
  LOONGSON_BUILTIN_ALIAS (INSN, INSN, FUNCTION_TYPE)
13085
 
13086
/* Like LOONGSON_BUILTIN, but add _<SUFFIX> to the end of the function name.
13087
   We use functions of this form when the same insn can be usefully applied
13088
   to more than one datatype.  */
13089
#define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE)            \
13090
  LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
13091
 
13092
#define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
13093
#define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
13094
#define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
13095
#define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
13096
#define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
13097
#define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
13098
#define CODE_FOR_mips_mult CODE_FOR_mulsidi3_32bit
13099
#define CODE_FOR_mips_multu CODE_FOR_umulsidi3_32bit
13100
 
13101
#define CODE_FOR_loongson_packsswh CODE_FOR_vec_pack_ssat_v2si
13102
#define CODE_FOR_loongson_packsshb CODE_FOR_vec_pack_ssat_v4hi
13103
#define CODE_FOR_loongson_packushb CODE_FOR_vec_pack_usat_v4hi
13104
#define CODE_FOR_loongson_paddw CODE_FOR_addv2si3
13105
#define CODE_FOR_loongson_paddh CODE_FOR_addv4hi3
13106
#define CODE_FOR_loongson_paddb CODE_FOR_addv8qi3
13107
#define CODE_FOR_loongson_paddsh CODE_FOR_ssaddv4hi3
13108
#define CODE_FOR_loongson_paddsb CODE_FOR_ssaddv8qi3
13109
#define CODE_FOR_loongson_paddush CODE_FOR_usaddv4hi3
13110
#define CODE_FOR_loongson_paddusb CODE_FOR_usaddv8qi3
13111
#define CODE_FOR_loongson_pmaxsh CODE_FOR_smaxv4hi3
13112
#define CODE_FOR_loongson_pmaxub CODE_FOR_umaxv8qi3
13113
#define CODE_FOR_loongson_pminsh CODE_FOR_sminv4hi3
13114
#define CODE_FOR_loongson_pminub CODE_FOR_uminv8qi3
13115
#define CODE_FOR_loongson_pmulhuh CODE_FOR_umulv4hi3_highpart
13116
#define CODE_FOR_loongson_pmulhh CODE_FOR_smulv4hi3_highpart
13117
#define CODE_FOR_loongson_pmullh CODE_FOR_mulv4hi3
13118
#define CODE_FOR_loongson_psllh CODE_FOR_ashlv4hi3
13119
#define CODE_FOR_loongson_psllw CODE_FOR_ashlv2si3
13120
#define CODE_FOR_loongson_psrlh CODE_FOR_lshrv4hi3
13121
#define CODE_FOR_loongson_psrlw CODE_FOR_lshrv2si3
13122
#define CODE_FOR_loongson_psrah CODE_FOR_ashrv4hi3
13123
#define CODE_FOR_loongson_psraw CODE_FOR_ashrv2si3
13124
#define CODE_FOR_loongson_psubw CODE_FOR_subv2si3
13125
#define CODE_FOR_loongson_psubh CODE_FOR_subv4hi3
13126
#define CODE_FOR_loongson_psubb CODE_FOR_subv8qi3
13127
#define CODE_FOR_loongson_psubsh CODE_FOR_sssubv4hi3
13128
#define CODE_FOR_loongson_psubsb CODE_FOR_sssubv8qi3
13129
#define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
13130
#define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
13131
 
13132
static const struct mips_builtin_description mips_builtins[] = {
13133
  DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
13134
  DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
13135
  DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
13136
  DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
13137
  DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, paired_single),
13138
  DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, paired_single),
13139
  DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, paired_single),
13140
  DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, paired_single),
13141
 
13142
  DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, paired_single),
13143
  DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
13144
  DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
13145
  DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
13146
  DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, mips3d),
13147
 
13148
  DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, mips3d),
13149
  DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, mips3d),
13150
  DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
13151
  DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
13152
  DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
13153
  DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
13154
 
13155
  DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, mips3d),
13156
  DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, mips3d),
13157
  DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
13158
  DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
13159
  DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
13160
  DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
13161
 
13162
  MIPS_FP_CONDITIONS (CMP_BUILTINS),
13163
 
13164
  /* Built-in functions for the SB-1 processor.  */
13165
  DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, sb1_paired_single),
13166
 
13167
  /* Built-in functions for the DSP ASE (32-bit and 64-bit).  */
13168
  DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
13169
  DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
13170
  DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
13171
  DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
13172
  DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
13173
  DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
13174
  DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
13175
  DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
13176
  DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
13177
  DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
13178
  DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, dsp),
13179
  DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, dsp),
13180
  DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, dsp),
13181
  DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, dsp),
13182
  DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, dsp),
13183
  DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, dsp),
13184
  DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
13185
  DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
13186
  DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
13187
  DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
13188
  DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, dsp),
13189
  DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, dsp),
13190
  DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
13191
  DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
13192
  DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
13193
  DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
13194
  DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
13195
  DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
13196
  DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
13197
  DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
13198
  DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
13199
  DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
13200
  DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
13201
  DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
13202
  DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
13203
  DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
13204
  DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
13205
  DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, dsp),
13206
  DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
13207
  DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
13208
  DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
13209
  DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
13210
  DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
13211
  DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, dsp),
13212
  DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, dsp),
13213
  DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, dsp),
13214
  DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, dsp),
13215
  DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
13216
  DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
13217
  DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
13218
  DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
13219
  DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
13220
  DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
13221
  DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
13222
  DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
13223
  DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
13224
  DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
13225
  DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
13226
  DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
13227
  DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, dsp),
13228
  DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, dsp),
13229
  DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, dsp),
13230
  DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, dsp),
13231
  DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, dsp),
13232
  BPOSGE_BUILTIN (32, dsp),
13233
 
13234
  /* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit).  */
13235
  DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, dspr2),
13236
  DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13237
  DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13238
  DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
13239
  DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
13240
  DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
13241
  DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
13242
  DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
13243
  DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
13244
  DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
13245
  DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13246
  DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13247
  DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, dspr2),
13248
  DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13249
  DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, dspr2),
13250
  DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dspr2),
13251
  DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
13252
  DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
13253
  DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
13254
  DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
13255
  DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
13256
  DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, dspr2),
13257
  DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13258
  DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13259
  DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
13260
  DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
13261
  DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13262
  DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13263
  DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
13264
  DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
13265
  DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13266
  DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
13267
  DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
13268
  DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
13269
 
13270
  /* Built-in functions for the DSP ASE (32-bit only).  */
13271
  DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
13272
  DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
13273
  DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
13274
  DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
13275
  DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
13276
  DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
13277
  DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
13278
  DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
13279
  DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
13280
  DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
13281
  DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
13282
  DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
13283
  DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
13284
  DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
13285
  DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
13286
  DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
13287
  DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, dsp_32),
13288
  DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, dsp_32),
13289
  DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, dsp_32),
13290
  DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, dsp_32),
13291
  DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, dsp_32),
13292
  DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
13293
  DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, dsp_32),
13294
  DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
13295
  DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, dsp_32),
13296
  DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, dsp_32),
13297
  DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, dsp_32),
13298
 
13299
  /* Built-in functions for the DSP ASE (64-bit only).  */
13300
  DIRECT_BUILTIN (ldx, MIPS_DI_FTYPE_POINTER_SI, dsp_64),
13301
 
13302
  /* The following are for the MIPS DSP ASE REV 2 (32-bit only).  */
13303
  DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13304
  DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13305
  DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13306
  DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13307
  DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13308
  DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13309
  DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13310
  DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13311
  DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
13312
 
13313
  /* Builtin functions for ST Microelectronics Loongson-2E/2F cores.  */
13314
  LOONGSON_BUILTIN (packsswh, MIPS_V4HI_FTYPE_V2SI_V2SI),
13315
  LOONGSON_BUILTIN (packsshb, MIPS_V8QI_FTYPE_V4HI_V4HI),
13316
  LOONGSON_BUILTIN (packushb, MIPS_UV8QI_FTYPE_UV4HI_UV4HI),
13317
  LOONGSON_BUILTIN_SUFFIX (paddw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
13318
  LOONGSON_BUILTIN_SUFFIX (paddh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13319
  LOONGSON_BUILTIN_SUFFIX (paddb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13320
  LOONGSON_BUILTIN_SUFFIX (paddw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
13321
  LOONGSON_BUILTIN_SUFFIX (paddh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13322
  LOONGSON_BUILTIN_SUFFIX (paddb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
13323
  LOONGSON_BUILTIN_SUFFIX (paddd, u, MIPS_UDI_FTYPE_UDI_UDI),
13324
  LOONGSON_BUILTIN_SUFFIX (paddd, s, MIPS_DI_FTYPE_DI_DI),
13325
  LOONGSON_BUILTIN (paddsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
13326
  LOONGSON_BUILTIN (paddsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
13327
  LOONGSON_BUILTIN (paddush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13328
  LOONGSON_BUILTIN (paddusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13329
  LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_ud, MIPS_UDI_FTYPE_UDI_UDI),
13330
  LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_uw, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
13331
  LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_uh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13332
  LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_ub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13333
  LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_sd, MIPS_DI_FTYPE_DI_DI),
13334
  LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_sw, MIPS_V2SI_FTYPE_V2SI_V2SI),
13335
  LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_sh, MIPS_V4HI_FTYPE_V4HI_V4HI),
13336
  LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_sb, MIPS_V8QI_FTYPE_V8QI_V8QI),
13337
  LOONGSON_BUILTIN (pavgh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13338
  LOONGSON_BUILTIN (pavgb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13339
  LOONGSON_BUILTIN_SUFFIX (pcmpeqw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
13340
  LOONGSON_BUILTIN_SUFFIX (pcmpeqh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13341
  LOONGSON_BUILTIN_SUFFIX (pcmpeqb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13342
  LOONGSON_BUILTIN_SUFFIX (pcmpeqw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
13343
  LOONGSON_BUILTIN_SUFFIX (pcmpeqh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13344
  LOONGSON_BUILTIN_SUFFIX (pcmpeqb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
13345
  LOONGSON_BUILTIN_SUFFIX (pcmpgtw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
13346
  LOONGSON_BUILTIN_SUFFIX (pcmpgth, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13347
  LOONGSON_BUILTIN_SUFFIX (pcmpgtb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13348
  LOONGSON_BUILTIN_SUFFIX (pcmpgtw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
13349
  LOONGSON_BUILTIN_SUFFIX (pcmpgth, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13350
  LOONGSON_BUILTIN_SUFFIX (pcmpgtb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
13351
  LOONGSON_BUILTIN_SUFFIX (pextrh, u, MIPS_UV4HI_FTYPE_UV4HI_USI),
13352
  LOONGSON_BUILTIN_SUFFIX (pextrh, s, MIPS_V4HI_FTYPE_V4HI_USI),
13353
  LOONGSON_BUILTIN_SUFFIX (pinsrh_0, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13354
  LOONGSON_BUILTIN_SUFFIX (pinsrh_1, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13355
  LOONGSON_BUILTIN_SUFFIX (pinsrh_2, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13356
  LOONGSON_BUILTIN_SUFFIX (pinsrh_3, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13357
  LOONGSON_BUILTIN_SUFFIX (pinsrh_0, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13358
  LOONGSON_BUILTIN_SUFFIX (pinsrh_1, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13359
  LOONGSON_BUILTIN_SUFFIX (pinsrh_2, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13360
  LOONGSON_BUILTIN_SUFFIX (pinsrh_3, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13361
  LOONGSON_BUILTIN (pmaddhw, MIPS_V2SI_FTYPE_V4HI_V4HI),
13362
  LOONGSON_BUILTIN (pmaxsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
13363
  LOONGSON_BUILTIN (pmaxub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13364
  LOONGSON_BUILTIN (pminsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
13365
  LOONGSON_BUILTIN (pminub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13366
  LOONGSON_BUILTIN_SUFFIX (pmovmskb, u, MIPS_UV8QI_FTYPE_UV8QI),
13367
  LOONGSON_BUILTIN_SUFFIX (pmovmskb, s, MIPS_V8QI_FTYPE_V8QI),
13368
  LOONGSON_BUILTIN (pmulhuh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13369
  LOONGSON_BUILTIN (pmulhh, MIPS_V4HI_FTYPE_V4HI_V4HI),
13370
  LOONGSON_BUILTIN (pmullh, MIPS_V4HI_FTYPE_V4HI_V4HI),
13371
  LOONGSON_BUILTIN (pmuluw, MIPS_UDI_FTYPE_UV2SI_UV2SI),
13372
  LOONGSON_BUILTIN (pasubub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13373
  LOONGSON_BUILTIN (biadd, MIPS_UV4HI_FTYPE_UV8QI),
13374
  LOONGSON_BUILTIN (psadbh, MIPS_UV4HI_FTYPE_UV8QI_UV8QI),
13375
  LOONGSON_BUILTIN_SUFFIX (pshufh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
13376
  LOONGSON_BUILTIN_SUFFIX (pshufh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
13377
  LOONGSON_BUILTIN_SUFFIX (psllh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
13378
  LOONGSON_BUILTIN_SUFFIX (psllh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
13379
  LOONGSON_BUILTIN_SUFFIX (psllw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
13380
  LOONGSON_BUILTIN_SUFFIX (psllw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
13381
  LOONGSON_BUILTIN_SUFFIX (psrah, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
13382
  LOONGSON_BUILTIN_SUFFIX (psrah, s, MIPS_V4HI_FTYPE_V4HI_UQI),
13383
  LOONGSON_BUILTIN_SUFFIX (psraw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
13384
  LOONGSON_BUILTIN_SUFFIX (psraw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
13385
  LOONGSON_BUILTIN_SUFFIX (psrlh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
13386
  LOONGSON_BUILTIN_SUFFIX (psrlh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
13387
  LOONGSON_BUILTIN_SUFFIX (psrlw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
13388
  LOONGSON_BUILTIN_SUFFIX (psrlw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
13389
  LOONGSON_BUILTIN_SUFFIX (psubw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
13390
  LOONGSON_BUILTIN_SUFFIX (psubh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13391
  LOONGSON_BUILTIN_SUFFIX (psubb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13392
  LOONGSON_BUILTIN_SUFFIX (psubw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
13393
  LOONGSON_BUILTIN_SUFFIX (psubh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13394
  LOONGSON_BUILTIN_SUFFIX (psubb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
13395
  LOONGSON_BUILTIN_SUFFIX (psubd, u, MIPS_UDI_FTYPE_UDI_UDI),
13396
  LOONGSON_BUILTIN_SUFFIX (psubd, s, MIPS_DI_FTYPE_DI_DI),
13397
  LOONGSON_BUILTIN (psubsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
13398
  LOONGSON_BUILTIN (psubsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
13399
  LOONGSON_BUILTIN (psubush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13400
  LOONGSON_BUILTIN (psubusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13401
  LOONGSON_BUILTIN_SUFFIX (punpckhbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13402
  LOONGSON_BUILTIN_SUFFIX (punpckhhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13403
  LOONGSON_BUILTIN_SUFFIX (punpckhwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
13404
  LOONGSON_BUILTIN_SUFFIX (punpckhbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
13405
  LOONGSON_BUILTIN_SUFFIX (punpckhhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13406
  LOONGSON_BUILTIN_SUFFIX (punpckhwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
13407
  LOONGSON_BUILTIN_SUFFIX (punpcklbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
13408
  LOONGSON_BUILTIN_SUFFIX (punpcklhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
13409
  LOONGSON_BUILTIN_SUFFIX (punpcklwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
13410
  LOONGSON_BUILTIN_SUFFIX (punpcklbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
13411
  LOONGSON_BUILTIN_SUFFIX (punpcklhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
13412
  LOONGSON_BUILTIN_SUFFIX (punpcklwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
13413
 
13414
  /* Sundry other built-in functions.  */
13415
  DIRECT_NO_TARGET_BUILTIN (cache, MIPS_VOID_FTYPE_SI_CVPOINTER, cache)
13416
};
13417
 
13418
/* Index I is the function declaration for mips_builtins[I], or null if the
13419
   function isn't defined on this target.  */
13420
static GTY(()) tree mips_builtin_decls[ARRAY_SIZE (mips_builtins)];
13421
 
13422
/* MODE is a vector mode whose elements have type TYPE.  Return the type
13423
   of the vector itself.  */
13424
 
13425
static tree
13426
mips_builtin_vector_type (tree type, enum machine_mode mode)
13427
{
13428
  static tree types[2 * (int) MAX_MACHINE_MODE];
13429
  int mode_index;
13430
 
13431
  mode_index = (int) mode;
13432
 
13433
  if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type))
13434
    mode_index += MAX_MACHINE_MODE;
13435
 
13436
  if (types[mode_index] == NULL_TREE)
13437
    types[mode_index] = build_vector_type_for_mode (type, mode);
13438
  return types[mode_index];
13439
}
13440
 
13441
/* Return a type for 'const volatile void *'.  */
13442
 
13443
static tree
13444
mips_build_cvpointer_type (void)
13445
{
13446
  static tree cache;
13447
 
13448
  if (cache == NULL_TREE)
13449
    cache = build_pointer_type (build_qualified_type
13450
                                (void_type_node,
13451
                                 TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE));
13452
  return cache;
13453
}
13454
 
13455
/* Source-level argument types.  */
13456
#define MIPS_ATYPE_VOID void_type_node
13457
#define MIPS_ATYPE_INT integer_type_node
13458
#define MIPS_ATYPE_POINTER ptr_type_node
13459
#define MIPS_ATYPE_CVPOINTER mips_build_cvpointer_type ()
13460
 
13461
/* Standard mode-based argument types.  */
13462
#define MIPS_ATYPE_UQI unsigned_intQI_type_node
13463
#define MIPS_ATYPE_SI intSI_type_node
13464
#define MIPS_ATYPE_USI unsigned_intSI_type_node
13465
#define MIPS_ATYPE_DI intDI_type_node
13466
#define MIPS_ATYPE_UDI unsigned_intDI_type_node
13467
#define MIPS_ATYPE_SF float_type_node
13468
#define MIPS_ATYPE_DF double_type_node
13469
 
13470
/* Vector argument types.  */
13471
#define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
13472
#define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
13473
#define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode)
13474
#define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
13475
#define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
13476
#define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
13477
#define MIPS_ATYPE_UV2SI                                        \
13478
  mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
13479
#define MIPS_ATYPE_UV4HI                                        \
13480
  mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
13481
#define MIPS_ATYPE_UV8QI                                        \
13482
  mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
13483
 
13484
/* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
13485
   their associated MIPS_ATYPEs.  */
13486
#define MIPS_FTYPE_ATYPES1(A, B) \
13487
  MIPS_ATYPE_##A, MIPS_ATYPE_##B
13488
 
13489
#define MIPS_FTYPE_ATYPES2(A, B, C) \
13490
  MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
13491
 
13492
#define MIPS_FTYPE_ATYPES3(A, B, C, D) \
13493
  MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
13494
 
13495
#define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
13496
  MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
13497
  MIPS_ATYPE_##E
13498
 
13499
/* Return the function type associated with function prototype TYPE.  */
13500
 
13501
static tree
13502
mips_build_function_type (enum mips_function_type type)
13503
{
13504
  static tree types[(int) MIPS_MAX_FTYPE_MAX];
13505
 
13506
  if (types[(int) type] == NULL_TREE)
13507
    switch (type)
13508
      {
13509
#define DEF_MIPS_FTYPE(NUM, ARGS)                                       \
13510
  case MIPS_FTYPE_NAME##NUM ARGS:                                       \
13511
    types[(int) type]                                                   \
13512
      = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS,          \
13513
                                  NULL_TREE);                           \
13514
    break;
13515
#include "config/mips/mips-ftypes.def"
13516
#undef DEF_MIPS_FTYPE
13517
      default:
13518
        gcc_unreachable ();
13519
      }
13520
 
13521
  return types[(int) type];
13522
}
13523
 
13524
/* Implement TARGET_INIT_BUILTINS.  */
13525
 
13526
static void
13527
mips_init_builtins (void)
13528
{
13529
  const struct mips_builtin_description *d;
13530
  unsigned int i;
13531
 
13532
  /* Iterate through all of the bdesc arrays, initializing all of the
13533
     builtin functions.  */
13534
  for (i = 0; i < ARRAY_SIZE (mips_builtins); i++)
13535
    {
13536
      d = &mips_builtins[i];
13537
      if (d->avail ())
13538
        mips_builtin_decls[i]
13539
          = add_builtin_function (d->name,
13540
                                  mips_build_function_type (d->function_type),
13541
                                  i, BUILT_IN_MD, NULL, NULL);
13542
    }
13543
}
13544
 
13545
/* Implement TARGET_BUILTIN_DECL.  */
13546
 
13547
static tree
13548
mips_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
13549
{
13550
  if (code >= ARRAY_SIZE (mips_builtins))
13551
    return error_mark_node;
13552
  return mips_builtin_decls[code];
13553
}
13554
 
13555
/* Take argument ARGNO from EXP's argument list and convert it into
13556
   an expand operand.  Store the operand in *OP.  */
13557
 
13558
static void
13559
mips_prepare_builtin_arg (struct expand_operand *op, tree exp,
13560
                          unsigned int argno)
13561
{
13562
  tree arg;
13563
  rtx value;
13564
 
13565
  arg = CALL_EXPR_ARG (exp, argno);
13566
  value = expand_normal (arg);
13567
  create_input_operand (op, value, TYPE_MODE (TREE_TYPE (arg)));
13568
}
13569
 
13570
/* Expand instruction ICODE as part of a built-in function sequence.
13571
   Use the first NOPS elements of OPS as the instruction's operands.
13572
   HAS_TARGET_P is true if operand 0 is a target; it is false if the
13573
   instruction has no target.
13574
 
13575
   Return the target rtx if HAS_TARGET_P, otherwise return const0_rtx.  */
13576
 
13577
static rtx
13578
mips_expand_builtin_insn (enum insn_code icode, unsigned int nops,
13579
                          struct expand_operand *ops, bool has_target_p)
13580
{
13581
  if (!maybe_expand_insn (icode, nops, ops))
13582
    {
13583
      error ("invalid argument to built-in function");
13584
      return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx;
13585
    }
13586
  return has_target_p ? ops[0].value : const0_rtx;
13587
}
13588
 
13589
/* Expand a floating-point comparison for built-in function call EXP.
13590
   The first NARGS arguments are the values to be compared.  ICODE is
13591
   the .md pattern that does the comparison and COND is the condition
13592
   that is being tested.  Return an rtx for the result.  */
13593
 
13594
static rtx
13595
mips_expand_builtin_compare_1 (enum insn_code icode,
13596
                               enum mips_fp_condition cond,
13597
                               tree exp, int nargs)
13598
{
13599
  struct expand_operand ops[MAX_RECOG_OPERANDS];
13600
  int opno, argno;
13601
 
13602
  /* The instruction should have a target operand, an operand for each
13603
     argument, and an operand for COND.  */
13604
  gcc_assert (nargs + 2 == insn_data[(int) icode].n_generator_args);
13605
 
13606
  opno = 0;
13607
  create_output_operand (&ops[opno++], NULL_RTX,
13608
                         insn_data[(int) icode].operand[0].mode);
13609
  for (argno = 0; argno < nargs; argno++)
13610
    mips_prepare_builtin_arg (&ops[opno++], exp, argno);
13611
  create_integer_operand (&ops[opno++], (int) cond);
13612
  return mips_expand_builtin_insn (icode, opno, ops, true);
13613
}
13614
 
13615
/* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
13616
   HAS_TARGET_P says which.  EXP is the CALL_EXPR that calls the function
13617
   and ICODE is the code of the associated .md pattern.  TARGET, if nonnull,
13618
   suggests a good place to put the result.  */
13619
 
13620
static rtx
13621
mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
13622
                            bool has_target_p)
13623
{
13624
  struct expand_operand ops[MAX_RECOG_OPERANDS];
13625
  int opno, argno;
13626
 
13627
  /* Map any target to operand 0.  */
13628
  opno = 0;
13629
  if (has_target_p)
13630
    create_output_operand (&ops[opno++], target, TYPE_MODE (TREE_TYPE (exp)));
13631
 
13632
  /* Map the arguments to the other operands.  */
13633
  gcc_assert (opno + call_expr_nargs (exp)
13634
              == insn_data[icode].n_generator_args);
13635
  for (argno = 0; argno < call_expr_nargs (exp); argno++)
13636
    mips_prepare_builtin_arg (&ops[opno++], exp, argno);
13637
 
13638
  return mips_expand_builtin_insn (icode, opno, ops, has_target_p);
13639
}
13640
 
13641
/* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
13642
   function; TYPE says which.  EXP is the CALL_EXPR that calls the
13643
   function, ICODE is the instruction that should be used to compare
13644
   the first two arguments, and COND is the condition it should test.
13645
   TARGET, if nonnull, suggests a good place to put the result.  */
13646
 
13647
static rtx
13648
mips_expand_builtin_movtf (enum mips_builtin_type type,
13649
                           enum insn_code icode, enum mips_fp_condition cond,
13650
                           rtx target, tree exp)
13651
{
13652
  struct expand_operand ops[4];
13653
  rtx cmp_result;
13654
 
13655
  cmp_result = mips_expand_builtin_compare_1 (icode, cond, exp, 2);
13656
  create_output_operand (&ops[0], target, TYPE_MODE (TREE_TYPE (exp)));
13657
  if (type == MIPS_BUILTIN_MOVT)
13658
    {
13659
      mips_prepare_builtin_arg (&ops[2], exp, 2);
13660
      mips_prepare_builtin_arg (&ops[1], exp, 3);
13661
    }
13662
  else
13663
    {
13664
      mips_prepare_builtin_arg (&ops[1], exp, 2);
13665
      mips_prepare_builtin_arg (&ops[2], exp, 3);
13666
    }
13667
  create_fixed_operand (&ops[3], cmp_result);
13668
  return mips_expand_builtin_insn (CODE_FOR_mips_cond_move_tf_ps,
13669
                                   4, ops, true);
13670
}
13671
 
13672
/* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
13673
   into TARGET otherwise.  Return TARGET.  */
13674
 
13675
static rtx
13676
mips_builtin_branch_and_move (rtx condition, rtx target,
13677
                              rtx value_if_true, rtx value_if_false)
13678
{
13679
  rtx true_label, done_label;
13680
 
13681
  true_label = gen_label_rtx ();
13682
  done_label = gen_label_rtx ();
13683
 
13684
  /* First assume that CONDITION is false.  */
13685
  mips_emit_move (target, value_if_false);
13686
 
13687
  /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise.  */
13688
  emit_jump_insn (gen_condjump (condition, true_label));
13689
  emit_jump_insn (gen_jump (done_label));
13690
  emit_barrier ();
13691
 
13692
  /* Fix TARGET if CONDITION is true.  */
13693
  emit_label (true_label);
13694
  mips_emit_move (target, value_if_true);
13695
 
13696
  emit_label (done_label);
13697
  return target;
13698
}
13699
 
13700
/* Expand a comparison built-in function of type BUILTIN_TYPE.  EXP is
13701
   the CALL_EXPR that calls the function, ICODE is the code of the
13702
   comparison instruction, and COND is the condition it should test.
13703
   TARGET, if nonnull, suggests a good place to put the boolean result.  */
13704
 
13705
static rtx
13706
mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
13707
                             enum insn_code icode, enum mips_fp_condition cond,
13708
                             rtx target, tree exp)
13709
{
13710
  rtx offset, condition, cmp_result;
13711
 
13712
  if (target == 0 || GET_MODE (target) != SImode)
13713
    target = gen_reg_rtx (SImode);
13714
  cmp_result = mips_expand_builtin_compare_1 (icode, cond, exp,
13715
                                              call_expr_nargs (exp));
13716
 
13717
  /* If the comparison sets more than one register, we define the result
13718
     to be 0 if all registers are false and -1 if all registers are true.
13719
     The value of the complete result is indeterminate otherwise.  */
13720
  switch (builtin_type)
13721
    {
13722
    case MIPS_BUILTIN_CMP_ALL:
13723
      condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
13724
      return mips_builtin_branch_and_move (condition, target,
13725
                                           const0_rtx, const1_rtx);
13726
 
13727
    case MIPS_BUILTIN_CMP_UPPER:
13728
    case MIPS_BUILTIN_CMP_LOWER:
13729
      offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
13730
      condition = gen_single_cc (cmp_result, offset);
13731
      return mips_builtin_branch_and_move (condition, target,
13732
                                           const1_rtx, const0_rtx);
13733
 
13734
    default:
13735
      condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
13736
      return mips_builtin_branch_and_move (condition, target,
13737
                                           const1_rtx, const0_rtx);
13738
    }
13739
}
13740
 
13741
/* Expand a bposge built-in function of type BUILTIN_TYPE.  TARGET,
13742
   if nonnull, suggests a good place to put the boolean result.  */
13743
 
13744
static rtx
13745
mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
13746
{
13747
  rtx condition, cmp_result;
13748
  int cmp_value;
13749
 
13750
  if (target == 0 || GET_MODE (target) != SImode)
13751
    target = gen_reg_rtx (SImode);
13752
 
13753
  cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
13754
 
13755
  if (builtin_type == MIPS_BUILTIN_BPOSGE32)
13756
    cmp_value = 32;
13757
  else
13758
    gcc_assert (0);
13759
 
13760
  condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
13761
  return mips_builtin_branch_and_move (condition, target,
13762
                                       const1_rtx, const0_rtx);
13763
}
13764
 
13765
/* Implement TARGET_EXPAND_BUILTIN.  */
13766
 
13767
static rtx
13768
mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
13769
                     enum machine_mode mode, int ignore)
13770
{
13771
  tree fndecl;
13772
  unsigned int fcode, avail;
13773
  const struct mips_builtin_description *d;
13774
 
13775
  fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13776
  fcode = DECL_FUNCTION_CODE (fndecl);
13777
  gcc_assert (fcode < ARRAY_SIZE (mips_builtins));
13778
  d = &mips_builtins[fcode];
13779
  avail = d->avail ();
13780
  gcc_assert (avail != 0);
13781
  if (TARGET_MIPS16)
13782
    {
13783
      error ("built-in function %qE not supported for MIPS16",
13784
             DECL_NAME (fndecl));
13785
      return ignore ? const0_rtx : CONST0_RTX (mode);
13786
    }
13787
  switch (d->builtin_type)
13788
    {
13789
    case MIPS_BUILTIN_DIRECT:
13790
      return mips_expand_builtin_direct (d->icode, target, exp, true);
13791
 
13792
    case MIPS_BUILTIN_DIRECT_NO_TARGET:
13793
      return mips_expand_builtin_direct (d->icode, target, exp, false);
13794
 
13795
    case MIPS_BUILTIN_MOVT:
13796
    case MIPS_BUILTIN_MOVF:
13797
      return mips_expand_builtin_movtf (d->builtin_type, d->icode,
13798
                                        d->cond, target, exp);
13799
 
13800
    case MIPS_BUILTIN_CMP_ANY:
13801
    case MIPS_BUILTIN_CMP_ALL:
13802
    case MIPS_BUILTIN_CMP_UPPER:
13803
    case MIPS_BUILTIN_CMP_LOWER:
13804
    case MIPS_BUILTIN_CMP_SINGLE:
13805
      return mips_expand_builtin_compare (d->builtin_type, d->icode,
13806
                                          d->cond, target, exp);
13807
 
13808
    case MIPS_BUILTIN_BPOSGE32:
13809
      return mips_expand_builtin_bposge (d->builtin_type, target);
13810
    }
13811
  gcc_unreachable ();
13812
}
13813
 
13814
/* An entry in the MIPS16 constant pool.  VALUE is the pool constant,
13815
   MODE is its mode, and LABEL is the CODE_LABEL associated with it.  */
13816
struct mips16_constant {
13817
  struct mips16_constant *next;
13818
  rtx value;
13819
  rtx label;
13820
  enum machine_mode mode;
13821
};
13822
 
13823
/* Information about an incomplete MIPS16 constant pool.  FIRST is the
13824
   first constant, HIGHEST_ADDRESS is the highest address that the first
13825
   byte of the pool can have, and INSN_ADDRESS is the current instruction
13826
   address.  */
13827
struct mips16_constant_pool {
13828
  struct mips16_constant *first;
13829
  int highest_address;
13830
  int insn_address;
13831
};
13832
 
13833
/* Add constant VALUE to POOL and return its label.  MODE is the
13834
   value's mode (used for CONST_INTs, etc.).  */
13835
 
13836
static rtx
13837
mips16_add_constant (struct mips16_constant_pool *pool,
13838
                     rtx value, enum machine_mode mode)
13839
{
13840
  struct mips16_constant **p, *c;
13841
  bool first_of_size_p;
13842
 
13843
  /* See whether the constant is already in the pool.  If so, return the
13844
     existing label, otherwise leave P pointing to the place where the
13845
     constant should be added.
13846
 
13847
     Keep the pool sorted in increasing order of mode size so that we can
13848
     reduce the number of alignments needed.  */
13849
  first_of_size_p = true;
13850
  for (p = &pool->first; *p != 0; p = &(*p)->next)
13851
    {
13852
      if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
13853
        return (*p)->label;
13854
      if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
13855
        break;
13856
      if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
13857
        first_of_size_p = false;
13858
    }
13859
 
13860
  /* In the worst case, the constant needed by the earliest instruction
13861
     will end up at the end of the pool.  The entire pool must then be
13862
     accessible from that instruction.
13863
 
13864
     When adding the first constant, set the pool's highest address to
13865
     the address of the first out-of-range byte.  Adjust this address
13866
     downwards each time a new constant is added.  */
13867
  if (pool->first == 0)
13868
    /* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
13869
       of the instruction with the lowest two bits clear.  The base PC
13870
       value for LDPC has the lowest three bits clear.  Assume the worst
13871
       case here; namely that the PC-relative instruction occupies the
13872
       last 2 bytes in an aligned word.  */
13873
    pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
13874
  pool->highest_address -= GET_MODE_SIZE (mode);
13875
  if (first_of_size_p)
13876
    /* Take into account the worst possible padding due to alignment.  */
13877
    pool->highest_address -= GET_MODE_SIZE (mode) - 1;
13878
 
13879
  /* Create a new entry.  */
13880
  c = XNEW (struct mips16_constant);
13881
  c->value = value;
13882
  c->mode = mode;
13883
  c->label = gen_label_rtx ();
13884
  c->next = *p;
13885
  *p = c;
13886
 
13887
  return c->label;
13888
}
13889
 
13890
/* Output constant VALUE after instruction INSN and return the last
13891
   instruction emitted.  MODE is the mode of the constant.  */
13892
 
13893
static rtx
13894
mips16_emit_constants_1 (enum machine_mode mode, rtx value, rtx insn)
13895
{
13896
  if (SCALAR_INT_MODE_P (mode) || ALL_SCALAR_FIXED_POINT_MODE_P (mode))
13897
    {
13898
      rtx size = GEN_INT (GET_MODE_SIZE (mode));
13899
      return emit_insn_after (gen_consttable_int (value, size), insn);
13900
    }
13901
 
13902
  if (SCALAR_FLOAT_MODE_P (mode))
13903
    return emit_insn_after (gen_consttable_float (value), insn);
13904
 
13905
  if (VECTOR_MODE_P (mode))
13906
    {
13907
      int i;
13908
 
13909
      for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
13910
        insn = mips16_emit_constants_1 (GET_MODE_INNER (mode),
13911
                                        CONST_VECTOR_ELT (value, i), insn);
13912
      return insn;
13913
    }
13914
 
13915
  gcc_unreachable ();
13916
}
13917
 
13918
/* Dump out the constants in CONSTANTS after INSN.  */
13919
 
13920
static void
13921
mips16_emit_constants (struct mips16_constant *constants, rtx insn)
13922
{
13923
  struct mips16_constant *c, *next;
13924
  int align;
13925
 
13926
  align = 0;
13927
  for (c = constants; c != NULL; c = next)
13928
    {
13929
      /* If necessary, increase the alignment of PC.  */
13930
      if (align < GET_MODE_SIZE (c->mode))
13931
        {
13932
          int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
13933
          insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
13934
        }
13935
      align = GET_MODE_SIZE (c->mode);
13936
 
13937
      insn = emit_label_after (c->label, insn);
13938
      insn = mips16_emit_constants_1 (c->mode, c->value, insn);
13939
 
13940
      next = c->next;
13941
      free (c);
13942
    }
13943
 
13944
  emit_barrier_after (insn);
13945
}
13946
 
13947
/* Return the length of instruction INSN.  */
13948
 
13949
static int
13950
mips16_insn_length (rtx insn)
13951
{
13952
  if (JUMP_P (insn))
13953
    {
13954
      rtx body = PATTERN (insn);
13955
      if (GET_CODE (body) == ADDR_VEC)
13956
        return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
13957
      if (GET_CODE (body) == ADDR_DIFF_VEC)
13958
        return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
13959
    }
13960
  return get_attr_length (insn);
13961
}
13962
 
13963
/* If *X is a symbolic constant that refers to the constant pool, add
13964
   the constant to POOL and rewrite *X to use the constant's label.  */
13965
 
13966
static void
13967
mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
13968
{
13969
  rtx base, offset, label;
13970
 
13971
  split_const (*x, &base, &offset);
13972
  if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
13973
    {
13974
      label = mips16_add_constant (pool, get_pool_constant (base),
13975
                                   get_pool_mode (base));
13976
      base = gen_rtx_LABEL_REF (Pmode, label);
13977
      *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
13978
    }
13979
}
13980
 
13981
/* This structure is used to communicate with mips16_rewrite_pool_refs.
13982
   INSN is the instruction we're rewriting and POOL points to the current
13983
   constant pool.  */
13984
struct mips16_rewrite_pool_refs_info {
13985
  rtx insn;
13986
  struct mips16_constant_pool *pool;
13987
};
13988
 
13989
/* Rewrite *X so that constant pool references refer to the constant's
13990
   label instead.  DATA points to a mips16_rewrite_pool_refs_info
13991
   structure.  */
13992
 
13993
static int
13994
mips16_rewrite_pool_refs (rtx *x, void *data)
13995
{
13996
  struct mips16_rewrite_pool_refs_info *info =
13997
    (struct mips16_rewrite_pool_refs_info *) data;
13998
 
13999
  if (force_to_mem_operand (*x, Pmode))
14000
    {
14001
      rtx mem = force_const_mem (GET_MODE (*x), *x);
14002
      validate_change (info->insn, x, mem, false);
14003
    }
14004
 
14005
  if (MEM_P (*x))
14006
    {
14007
      mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
14008
      return -1;
14009
    }
14010
 
14011
  /* Don't rewrite the __mips16_rdwr symbol.  */
14012
  if (GET_CODE (*x) == UNSPEC && XINT (*x, 1) == UNSPEC_TLS_GET_TP)
14013
    return -1;
14014
 
14015
  if (TARGET_MIPS16_TEXT_LOADS)
14016
    mips16_rewrite_pool_constant (info->pool, x);
14017
 
14018
  return GET_CODE (*x) == CONST ? -1 : 0;
14019
}
14020
 
14021
/* Return whether CFG is used in mips_reorg.  */
14022
 
14023
static bool
14024
mips_cfg_in_reorg (void)
14025
{
14026
  return (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE
14027
          || TARGET_RELAX_PIC_CALLS);
14028
}
14029
 
14030
/* Build MIPS16 constant pools.  */
14031
 
14032
static void
14033
mips16_lay_out_constants (void)
14034
{
14035
  struct mips16_constant_pool pool;
14036
  struct mips16_rewrite_pool_refs_info info;
14037
  rtx insn, barrier;
14038
 
14039
  if (!TARGET_MIPS16_PCREL_LOADS)
14040
    return;
14041
 
14042
  if (mips_cfg_in_reorg ())
14043
    split_all_insns ();
14044
  else
14045
    split_all_insns_noflow ();
14046
  barrier = 0;
14047
  memset (&pool, 0, sizeof (pool));
14048
  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14049
    {
14050
      /* Rewrite constant pool references in INSN.  */
14051
      if (USEFUL_INSN_P (insn))
14052
        {
14053
          info.insn = insn;
14054
          info.pool = &pool;
14055
          for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
14056
        }
14057
 
14058
      pool.insn_address += mips16_insn_length (insn);
14059
 
14060
      if (pool.first != NULL)
14061
        {
14062
          /* If there are no natural barriers between the first user of
14063
             the pool and the highest acceptable address, we'll need to
14064
             create a new instruction to jump around the constant pool.
14065
             In the worst case, this instruction will be 4 bytes long.
14066
 
14067
             If it's too late to do this transformation after INSN,
14068
             do it immediately before INSN.  */
14069
          if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
14070
            {
14071
              rtx label, jump;
14072
 
14073
              label = gen_label_rtx ();
14074
 
14075
              jump = emit_jump_insn_before (gen_jump (label), insn);
14076
              JUMP_LABEL (jump) = label;
14077
              LABEL_NUSES (label) = 1;
14078
              barrier = emit_barrier_after (jump);
14079
 
14080
              emit_label_after (label, barrier);
14081
              pool.insn_address += 4;
14082
            }
14083
 
14084
          /* See whether the constant pool is now out of range of the first
14085
             user.  If so, output the constants after the previous barrier.
14086
             Note that any instructions between BARRIER and INSN (inclusive)
14087
             will use negative offsets to refer to the pool.  */
14088
          if (pool.insn_address > pool.highest_address)
14089
            {
14090
              mips16_emit_constants (pool.first, barrier);
14091
              pool.first = NULL;
14092
              barrier = 0;
14093
            }
14094
          else if (BARRIER_P (insn))
14095
            barrier = insn;
14096
        }
14097
    }
14098
  mips16_emit_constants (pool.first, get_last_insn ());
14099
}
14100
 
14101
/* Return true if it is worth r10k_simplify_address's while replacing
14102
   an address with X.  We are looking for constants, and for addresses
14103
   at a known offset from the incoming stack pointer.  */
14104
 
14105
static bool
14106
r10k_simplified_address_p (rtx x)
14107
{
14108
  if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
14109
    x = XEXP (x, 0);
14110
  return x == virtual_incoming_args_rtx || CONSTANT_P (x);
14111
}
14112
 
14113
/* X is an expression that appears in INSN.  Try to use the UD chains
14114
   to simplify it, returning the simplified form on success and the
14115
   original form otherwise.  Replace the incoming value of $sp with
14116
   virtual_incoming_args_rtx (which should never occur in X otherwise).  */
14117
 
14118
static rtx
14119
r10k_simplify_address (rtx x, rtx insn)
14120
{
14121
  rtx newx, op0, op1, set, def_insn, note;
14122
  df_ref use, def;
14123
  struct df_link *defs;
14124
 
14125
  newx = NULL_RTX;
14126
  if (UNARY_P (x))
14127
    {
14128
      op0 = r10k_simplify_address (XEXP (x, 0), insn);
14129
      if (op0 != XEXP (x, 0))
14130
        newx = simplify_gen_unary (GET_CODE (x), GET_MODE (x),
14131
                                   op0, GET_MODE (XEXP (x, 0)));
14132
    }
14133
  else if (BINARY_P (x))
14134
    {
14135
      op0 = r10k_simplify_address (XEXP (x, 0), insn);
14136
      op1 = r10k_simplify_address (XEXP (x, 1), insn);
14137
      if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
14138
        newx = simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
14139
    }
14140
  else if (GET_CODE (x) == LO_SUM)
14141
    {
14142
      /* LO_SUMs can be offset from HIGHs, if we know they won't
14143
         overflow.  See mips_classify_address for the rationale behind
14144
         the lax check.  */
14145
      op0 = r10k_simplify_address (XEXP (x, 0), insn);
14146
      if (GET_CODE (op0) == HIGH)
14147
        newx = XEXP (x, 1);
14148
    }
14149
  else if (REG_P (x))
14150
    {
14151
      /* Uses are recorded by regno_reg_rtx, not X itself.  */
14152
      use = df_find_use (insn, regno_reg_rtx[REGNO (x)]);
14153
      gcc_assert (use);
14154
      defs = DF_REF_CHAIN (use);
14155
 
14156
      /* Require a single definition.  */
14157
      if (defs && defs->next == NULL)
14158
        {
14159
          def = defs->ref;
14160
          if (DF_REF_IS_ARTIFICIAL (def))
14161
            {
14162
              /* Replace the incoming value of $sp with
14163
                 virtual_incoming_args_rtx.  */
14164
              if (x == stack_pointer_rtx
14165
                  && DF_REF_BB (def) == ENTRY_BLOCK_PTR)
14166
                newx = virtual_incoming_args_rtx;
14167
            }
14168
          else if (dominated_by_p (CDI_DOMINATORS, DF_REF_BB (use),
14169
                                   DF_REF_BB (def)))
14170
            {
14171
              /* Make sure that DEF_INSN is a single set of REG.  */
14172
              def_insn = DF_REF_INSN (def);
14173
              if (NONJUMP_INSN_P (def_insn))
14174
                {
14175
                  set = single_set (def_insn);
14176
                  if (set && rtx_equal_p (SET_DEST (set), x))
14177
                    {
14178
                      /* Prefer to use notes, since the def-use chains
14179
                         are often shorter.  */
14180
                      note = find_reg_equal_equiv_note (def_insn);
14181
                      if (note)
14182
                        newx = XEXP (note, 0);
14183
                      else
14184
                        newx = SET_SRC (set);
14185
                      newx = r10k_simplify_address (newx, def_insn);
14186
                    }
14187
                }
14188
            }
14189
        }
14190
    }
14191
  if (newx && r10k_simplified_address_p (newx))
14192
    return newx;
14193
  return x;
14194
}
14195
 
14196
/* Return true if ADDRESS is known to be an uncached address
14197
   on R10K systems.  */
14198
 
14199
static bool
14200
r10k_uncached_address_p (unsigned HOST_WIDE_INT address)
14201
{
14202
  unsigned HOST_WIDE_INT upper;
14203
 
14204
  /* Check for KSEG1.  */
14205
  if (address + 0x60000000 < 0x20000000)
14206
    return true;
14207
 
14208
  /* Check for uncached XKPHYS addresses.  */
14209
  if (Pmode == DImode)
14210
    {
14211
      upper = (address >> 40) & 0xf9ffff;
14212
      if (upper == 0x900000 || upper == 0xb80000)
14213
        return true;
14214
    }
14215
  return false;
14216
}
14217
 
14218
/* Return true if we can prove that an access to address X in instruction
14219
   INSN would be safe from R10K speculation.  This X is a general
14220
   expression; it might not be a legitimate address.  */
14221
 
14222
static bool
14223
r10k_safe_address_p (rtx x, rtx insn)
14224
{
14225
  rtx base, offset;
14226
  HOST_WIDE_INT offset_val;
14227
 
14228
  x = r10k_simplify_address (x, insn);
14229
 
14230
  /* Check for references to the stack frame.  It doesn't really matter
14231
     how much of the frame has been allocated at INSN; -mr10k-cache-barrier
14232
     allows us to assume that accesses to any part of the eventual frame
14233
     is safe from speculation at any point in the function.  */
14234
  mips_split_plus (x, &base, &offset_val);
14235
  if (base == virtual_incoming_args_rtx
14236
      && offset_val >= -cfun->machine->frame.total_size
14237
      && offset_val < cfun->machine->frame.args_size)
14238
    return true;
14239
 
14240
  /* Check for uncached addresses.  */
14241
  if (CONST_INT_P (x))
14242
    return r10k_uncached_address_p (INTVAL (x));
14243
 
14244
  /* Check for accesses to a static object.  */
14245
  split_const (x, &base, &offset);
14246
  return offset_within_block_p (base, INTVAL (offset));
14247
}
14248
 
14249
/* Return true if a MEM with MEM_EXPR EXPR and MEM_OFFSET OFFSET is
14250
   an in-range access to an automatic variable, or to an object with
14251
   a link-time-constant address.  */
14252
 
14253
static bool
14254
r10k_safe_mem_expr_p (tree expr, HOST_WIDE_INT offset)
14255
{
14256
  if (offset < 0 || offset >= int_size_in_bytes (TREE_TYPE (expr)))
14257
    return false;
14258
 
14259
  while (TREE_CODE (expr) == COMPONENT_REF)
14260
    {
14261
      expr = TREE_OPERAND (expr, 0);
14262
      if (expr == NULL_TREE)
14263
        return false;
14264
    }
14265
 
14266
  return DECL_P (expr);
14267
}
14268
 
14269
/* A for_each_rtx callback for which DATA points to the instruction
14270
   containing *X.  Stop the search if we find a MEM that is not safe
14271
   from R10K speculation.  */
14272
 
14273
static int
14274
r10k_needs_protection_p_1 (rtx *loc, void *data)
14275
{
14276
  rtx mem;
14277
 
14278
  mem = *loc;
14279
  if (!MEM_P (mem))
14280
    return 0;
14281
 
14282
  if (MEM_EXPR (mem)
14283
      && MEM_OFFSET_KNOWN_P (mem)
14284
      && r10k_safe_mem_expr_p (MEM_EXPR (mem), MEM_OFFSET (mem)))
14285
    return -1;
14286
 
14287
  if (r10k_safe_address_p (XEXP (mem, 0), (rtx) data))
14288
    return -1;
14289
 
14290
  return 1;
14291
}
14292
 
14293
/* A note_stores callback for which DATA points to an instruction pointer.
14294
   If *DATA is nonnull, make it null if it X contains a MEM that is not
14295
   safe from R10K speculation.  */
14296
 
14297
static void
14298
r10k_needs_protection_p_store (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
14299
                               void *data)
14300
{
14301
  rtx *insn_ptr;
14302
 
14303
  insn_ptr = (rtx *) data;
14304
  if (*insn_ptr && for_each_rtx (&x, r10k_needs_protection_p_1, *insn_ptr))
14305
    *insn_ptr = NULL_RTX;
14306
}
14307
 
14308
/* A for_each_rtx callback that iterates over the pattern of a CALL_INSN.
14309
   Return nonzero if the call is not to a declared function.  */
14310
 
14311
static int
14312
r10k_needs_protection_p_call (rtx *loc, void *data ATTRIBUTE_UNUSED)
14313
{
14314
  rtx x;
14315
 
14316
  x = *loc;
14317
  if (!MEM_P (x))
14318
    return 0;
14319
 
14320
  x = XEXP (x, 0);
14321
  if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DECL (x))
14322
    return -1;
14323
 
14324
  return 1;
14325
}
14326
 
14327
/* Return true if instruction INSN needs to be protected by an R10K
14328
   cache barrier.  */
14329
 
14330
static bool
14331
r10k_needs_protection_p (rtx insn)
14332
{
14333
  if (CALL_P (insn))
14334
    return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_call, NULL);
14335
 
14336
  if (mips_r10k_cache_barrier == R10K_CACHE_BARRIER_STORE)
14337
    {
14338
      note_stores (PATTERN (insn), r10k_needs_protection_p_store, &insn);
14339
      return insn == NULL_RTX;
14340
    }
14341
 
14342
  return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_1, insn);
14343
}
14344
 
14345
/* Return true if BB is only reached by blocks in PROTECTED_BBS and if every
14346
   edge is unconditional.  */
14347
 
14348
static bool
14349
r10k_protected_bb_p (basic_block bb, sbitmap protected_bbs)
14350
{
14351
  edge_iterator ei;
14352
  edge e;
14353
 
14354
  FOR_EACH_EDGE (e, ei, bb->preds)
14355
    if (!single_succ_p (e->src)
14356
        || !TEST_BIT (protected_bbs, e->src->index)
14357
        || (e->flags & EDGE_COMPLEX) != 0)
14358
      return false;
14359
  return true;
14360
}
14361
 
14362
/* Implement -mr10k-cache-barrier= for the current function.  */
14363
 
14364
static void
14365
r10k_insert_cache_barriers (void)
14366
{
14367
  int *rev_post_order;
14368
  unsigned int i, n;
14369
  basic_block bb;
14370
  sbitmap protected_bbs;
14371
  rtx insn, end, unprotected_region;
14372
 
14373
  if (TARGET_MIPS16)
14374
    {
14375
      sorry ("%qs does not support MIPS16 code", "-mr10k-cache-barrier");
14376
      return;
14377
    }
14378
 
14379
  /* Calculate dominators.  */
14380
  calculate_dominance_info (CDI_DOMINATORS);
14381
 
14382
  /* Bit X of PROTECTED_BBS is set if the last operation in basic block
14383
     X is protected by a cache barrier.  */
14384
  protected_bbs = sbitmap_alloc (last_basic_block);
14385
  sbitmap_zero (protected_bbs);
14386
 
14387
  /* Iterate over the basic blocks in reverse post-order.  */
14388
  rev_post_order = XNEWVEC (int, last_basic_block);
14389
  n = pre_and_rev_post_order_compute (NULL, rev_post_order, false);
14390
  for (i = 0; i < n; i++)
14391
    {
14392
      bb = BASIC_BLOCK (rev_post_order[i]);
14393
 
14394
      /* If this block is only reached by unconditional edges, and if the
14395
         source of every edge is protected, the beginning of the block is
14396
         also protected.  */
14397
      if (r10k_protected_bb_p (bb, protected_bbs))
14398
        unprotected_region = NULL_RTX;
14399
      else
14400
        unprotected_region = pc_rtx;
14401
      end = NEXT_INSN (BB_END (bb));
14402
 
14403
      /* UNPROTECTED_REGION is:
14404
 
14405
         - null if we are processing a protected region,
14406
         - pc_rtx if we are processing an unprotected region but have
14407
           not yet found the first instruction in it
14408
         - the first instruction in an unprotected region otherwise.  */
14409
      for (insn = BB_HEAD (bb); insn != end; insn = NEXT_INSN (insn))
14410
        {
14411
          if (unprotected_region && USEFUL_INSN_P (insn))
14412
            {
14413
              if (recog_memoized (insn) == CODE_FOR_mips_cache)
14414
                /* This CACHE instruction protects the following code.  */
14415
                unprotected_region = NULL_RTX;
14416
              else
14417
                {
14418
                  /* See if INSN is the first instruction in this
14419
                     unprotected region.  */
14420
                  if (unprotected_region == pc_rtx)
14421
                    unprotected_region = insn;
14422
 
14423
                  /* See if INSN needs to be protected.  If so,
14424
                     we must insert a cache barrier somewhere between
14425
                     PREV_INSN (UNPROTECTED_REGION) and INSN.  It isn't
14426
                     clear which position is better performance-wise,
14427
                     but as a tie-breaker, we assume that it is better
14428
                     to allow delay slots to be back-filled where
14429
                     possible, and that it is better not to insert
14430
                     barriers in the middle of already-scheduled code.
14431
                     We therefore insert the barrier at the beginning
14432
                     of the region.  */
14433
                  if (r10k_needs_protection_p (insn))
14434
                    {
14435
                      emit_insn_before (gen_r10k_cache_barrier (),
14436
                                        unprotected_region);
14437
                      unprotected_region = NULL_RTX;
14438
                    }
14439
                }
14440
            }
14441
 
14442
          if (CALL_P (insn))
14443
            /* The called function is not required to protect the exit path.
14444
               The code that follows a call is therefore unprotected.  */
14445
            unprotected_region = pc_rtx;
14446
        }
14447
 
14448
      /* Record whether the end of this block is protected.  */
14449
      if (unprotected_region == NULL_RTX)
14450
        SET_BIT (protected_bbs, bb->index);
14451
    }
14452
  XDELETEVEC (rev_post_order);
14453
 
14454
  sbitmap_free (protected_bbs);
14455
 
14456
  free_dominance_info (CDI_DOMINATORS);
14457
}
14458
 
14459
/* If INSN is a call, return the underlying CALL expr.  Return NULL_RTX
14460
   otherwise.  If INSN has two call rtx, then store the second one in
14461
   SECOND_CALL.  */
14462
 
14463
static rtx
14464
mips_call_expr_from_insn (rtx insn, rtx *second_call)
14465
{
14466
  rtx x;
14467
  rtx x2;
14468
 
14469
  if (!CALL_P (insn))
14470
    return NULL_RTX;
14471
 
14472
  x = PATTERN (insn);
14473
  if (GET_CODE (x) == PARALLEL)
14474
    {
14475
      /* Calls returning complex values have two CALL rtx.  Look for the second
14476
         one here, and return it via the SECOND_CALL arg.  */
14477
      x2 = XVECEXP (x, 0, 1);
14478
      if (GET_CODE (x2) == SET)
14479
        x2 = XEXP (x2, 1);
14480
      if (GET_CODE (x2) == CALL)
14481
        *second_call = x2;
14482
 
14483
      x = XVECEXP (x, 0, 0);
14484
    }
14485
  if (GET_CODE (x) == SET)
14486
    x = XEXP (x, 1);
14487
  gcc_assert (GET_CODE (x) == CALL);
14488
 
14489
  return x;
14490
}
14491
 
14492
/* REG is set in DEF.  See if the definition is one of the ways we load a
14493
   register with a symbol address for a mips_use_pic_fn_addr_reg_p call.
14494
   If it is, return the symbol reference of the function, otherwise return
14495
   NULL_RTX.
14496
 
14497
   If RECURSE_P is true, use mips_find_pic_call_symbol to interpret
14498
   the values of source registers, otherwise treat such registers as
14499
   having an unknown value.  */
14500
 
14501
static rtx
14502
mips_pic_call_symbol_from_set (df_ref def, rtx reg, bool recurse_p)
14503
{
14504
  rtx def_insn, set;
14505
 
14506
  if (DF_REF_IS_ARTIFICIAL (def))
14507
    return NULL_RTX;
14508
 
14509
  def_insn = DF_REF_INSN (def);
14510
  set = single_set (def_insn);
14511
  if (set && rtx_equal_p (SET_DEST (set), reg))
14512
    {
14513
      rtx note, src, symbol;
14514
 
14515
      /* First, look at REG_EQUAL/EQUIV notes.  */
14516
      note = find_reg_equal_equiv_note (def_insn);
14517
      if (note && GET_CODE (XEXP (note, 0)) == SYMBOL_REF)
14518
        return XEXP (note, 0);
14519
 
14520
      /* For %call16 references we don't have REG_EQUAL.  */
14521
      src = SET_SRC (set);
14522
      symbol = mips_strip_unspec_call (src);
14523
      if (symbol)
14524
        {
14525
          gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
14526
          return symbol;
14527
        }
14528
 
14529
      /* Follow at most one simple register copy.  Such copies are
14530
         interesting in cases like:
14531
 
14532
             for (...)
14533
               {
14534
                 locally_binding_fn (...);
14535
               }
14536
 
14537
         and:
14538
 
14539
             locally_binding_fn (...);
14540
             ...
14541
             locally_binding_fn (...);
14542
 
14543
         where the load of locally_binding_fn can legitimately be
14544
         hoisted or shared.  However, we do not expect to see complex
14545
         chains of copies, so a full worklist solution to the problem
14546
         would probably be overkill.  */
14547
      if (recurse_p && REG_P (src))
14548
        return mips_find_pic_call_symbol (def_insn, src, false);
14549
    }
14550
 
14551
  return NULL_RTX;
14552
}
14553
 
14554
/* Find the definition of the use of REG in INSN.  See if the definition
14555
   is one of the ways we load a register with a symbol address for a
14556
   mips_use_pic_fn_addr_reg_p call.  If it is return the symbol reference
14557
   of the function, otherwise return NULL_RTX.  RECURSE_P is as for
14558
   mips_pic_call_symbol_from_set.  */
14559
 
14560
static rtx
14561
mips_find_pic_call_symbol (rtx insn, rtx reg, bool recurse_p)
14562
{
14563
  df_ref use;
14564
  struct df_link *defs;
14565
  rtx symbol;
14566
 
14567
  use = df_find_use (insn, regno_reg_rtx[REGNO (reg)]);
14568
  if (!use)
14569
    return NULL_RTX;
14570
  defs = DF_REF_CHAIN (use);
14571
  if (!defs)
14572
    return NULL_RTX;
14573
  symbol = mips_pic_call_symbol_from_set (defs->ref, reg, recurse_p);
14574
  if (!symbol)
14575
    return NULL_RTX;
14576
 
14577
  /* If we have more than one definition, they need to be identical.  */
14578
  for (defs = defs->next; defs; defs = defs->next)
14579
    {
14580
      rtx other;
14581
 
14582
      other = mips_pic_call_symbol_from_set (defs->ref, reg, recurse_p);
14583
      if (!rtx_equal_p (symbol, other))
14584
        return NULL_RTX;
14585
    }
14586
 
14587
  return symbol;
14588
}
14589
 
14590
/* Replace the args_size operand of the call expression CALL with the
14591
   call-attribute UNSPEC and fill in SYMBOL as the function symbol.  */
14592
 
14593
static void
14594
mips_annotate_pic_call_expr (rtx call, rtx symbol)
14595
{
14596
  rtx args_size;
14597
 
14598
  args_size = XEXP (call, 1);
14599
  XEXP (call, 1) = gen_rtx_UNSPEC (GET_MODE (args_size),
14600
                                   gen_rtvec (2, args_size, symbol),
14601
                                   UNSPEC_CALL_ATTR);
14602
}
14603
 
14604
/* OPERANDS[ARGS_SIZE_OPNO] is the arg_size operand of a CALL expression.  See
14605
   if instead of the arg_size argument it contains the call attributes.  If
14606
   yes return true along with setting OPERANDS[ARGS_SIZE_OPNO] to the function
14607
   symbol from the call attributes.  Also return false if ARGS_SIZE_OPNO is
14608
   -1.  */
14609
 
14610
bool
14611
mips_get_pic_call_symbol (rtx *operands, int args_size_opno)
14612
{
14613
  rtx args_size, symbol;
14614
 
14615
  if (!TARGET_RELAX_PIC_CALLS || args_size_opno == -1)
14616
    return false;
14617
 
14618
  args_size = operands[args_size_opno];
14619
  if (GET_CODE (args_size) != UNSPEC)
14620
    return false;
14621
  gcc_assert (XINT (args_size, 1) == UNSPEC_CALL_ATTR);
14622
 
14623
  symbol = XVECEXP (args_size, 0, 1);
14624
  gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
14625
 
14626
  operands[args_size_opno] = symbol;
14627
  return true;
14628
}
14629
 
14630
/* Use DF to annotate PIC indirect calls with the function symbol they
14631
   dispatch to.  */
14632
 
14633
static void
14634
mips_annotate_pic_calls (void)
14635
{
14636
  basic_block bb;
14637
  rtx insn;
14638
 
14639
  FOR_EACH_BB (bb)
14640
    FOR_BB_INSNS (bb, insn)
14641
    {
14642
      rtx call, reg, symbol, second_call;
14643
 
14644
      second_call = 0;
14645
      call = mips_call_expr_from_insn (insn, &second_call);
14646
      if (!call)
14647
        continue;
14648
      gcc_assert (MEM_P (XEXP (call, 0)));
14649
      reg = XEXP (XEXP (call, 0), 0);
14650
      if (!REG_P (reg))
14651
        continue;
14652
 
14653
      symbol = mips_find_pic_call_symbol (insn, reg, true);
14654
      if (symbol)
14655
        {
14656
          mips_annotate_pic_call_expr (call, symbol);
14657
          if (second_call)
14658
            mips_annotate_pic_call_expr (second_call, symbol);
14659
        }
14660
    }
14661
}
14662
 
14663
/* A temporary variable used by for_each_rtx callbacks, etc.  */
14664
static rtx mips_sim_insn;
14665
 
14666
/* A structure representing the state of the processor pipeline.
14667
   Used by the mips_sim_* family of functions.  */
14668
struct mips_sim {
14669
  /* The maximum number of instructions that can be issued in a cycle.
14670
     (Caches mips_issue_rate.)  */
14671
  unsigned int issue_rate;
14672
 
14673
  /* The current simulation time.  */
14674
  unsigned int time;
14675
 
14676
  /* How many more instructions can be issued in the current cycle.  */
14677
  unsigned int insns_left;
14678
 
14679
  /* LAST_SET[X].INSN is the last instruction to set register X.
14680
     LAST_SET[X].TIME is the time at which that instruction was issued.
14681
     INSN is null if no instruction has yet set register X.  */
14682
  struct {
14683
    rtx insn;
14684
    unsigned int time;
14685
  } last_set[FIRST_PSEUDO_REGISTER];
14686
 
14687
  /* The pipeline's current DFA state.  */
14688
  state_t dfa_state;
14689
};
14690
 
14691
/* Reset STATE to the initial simulation state.  */
14692
 
14693
static void
14694
mips_sim_reset (struct mips_sim *state)
14695
{
14696
  state->time = 0;
14697
  state->insns_left = state->issue_rate;
14698
  memset (&state->last_set, 0, sizeof (state->last_set));
14699
  state_reset (state->dfa_state);
14700
}
14701
 
14702
/* Initialize STATE before its first use.  DFA_STATE points to an
14703
   allocated but uninitialized DFA state.  */
14704
 
14705
static void
14706
mips_sim_init (struct mips_sim *state, state_t dfa_state)
14707
{
14708
  state->issue_rate = mips_issue_rate ();
14709
  state->dfa_state = dfa_state;
14710
  mips_sim_reset (state);
14711
}
14712
 
14713
/* Advance STATE by one clock cycle.  */
14714
 
14715
static void
14716
mips_sim_next_cycle (struct mips_sim *state)
14717
{
14718
  state->time++;
14719
  state->insns_left = state->issue_rate;
14720
  state_transition (state->dfa_state, 0);
14721
}
14722
 
14723
/* Advance simulation state STATE until instruction INSN can read
14724
   register REG.  */
14725
 
14726
static void
14727
mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
14728
{
14729
  unsigned int regno, end_regno;
14730
 
14731
  end_regno = END_REGNO (reg);
14732
  for (regno = REGNO (reg); regno < end_regno; regno++)
14733
    if (state->last_set[regno].insn != 0)
14734
      {
14735
        unsigned int t;
14736
 
14737
        t = (state->last_set[regno].time
14738
             + insn_latency (state->last_set[regno].insn, insn));
14739
        while (state->time < t)
14740
          mips_sim_next_cycle (state);
14741
    }
14742
}
14743
 
14744
/* A for_each_rtx callback.  If *X is a register, advance simulation state
14745
   DATA until mips_sim_insn can read the register's value.  */
14746
 
14747
static int
14748
mips_sim_wait_regs_2 (rtx *x, void *data)
14749
{
14750
  if (REG_P (*x))
14751
    mips_sim_wait_reg ((struct mips_sim *) data, mips_sim_insn, *x);
14752
  return 0;
14753
}
14754
 
14755
/* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X.  */
14756
 
14757
static void
14758
mips_sim_wait_regs_1 (rtx *x, void *data)
14759
{
14760
  for_each_rtx (x, mips_sim_wait_regs_2, data);
14761
}
14762
 
14763
/* Advance simulation state STATE until all of INSN's register
14764
   dependencies are satisfied.  */
14765
 
14766
static void
14767
mips_sim_wait_regs (struct mips_sim *state, rtx insn)
14768
{
14769
  mips_sim_insn = insn;
14770
  note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
14771
}
14772
 
14773
/* Advance simulation state STATE until the units required by
14774
   instruction INSN are available.  */
14775
 
14776
static void
14777
mips_sim_wait_units (struct mips_sim *state, rtx insn)
14778
{
14779
  state_t tmp_state;
14780
 
14781
  tmp_state = alloca (state_size ());
14782
  while (state->insns_left == 0
14783
         || (memcpy (tmp_state, state->dfa_state, state_size ()),
14784
             state_transition (tmp_state, insn) >= 0))
14785
    mips_sim_next_cycle (state);
14786
}
14787
 
14788
/* Advance simulation state STATE until INSN is ready to issue.  */
14789
 
14790
static void
14791
mips_sim_wait_insn (struct mips_sim *state, rtx insn)
14792
{
14793
  mips_sim_wait_regs (state, insn);
14794
  mips_sim_wait_units (state, insn);
14795
}
14796
 
14797
/* mips_sim_insn has just set X.  Update the LAST_SET array
14798
   in simulation state DATA.  */
14799
 
14800
static void
14801
mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
14802
{
14803
  struct mips_sim *state;
14804
 
14805
  state = (struct mips_sim *) data;
14806
  if (REG_P (x))
14807
    {
14808
      unsigned int regno, end_regno;
14809
 
14810
      end_regno = END_REGNO (x);
14811
      for (regno = REGNO (x); regno < end_regno; regno++)
14812
        {
14813
          state->last_set[regno].insn = mips_sim_insn;
14814
          state->last_set[regno].time = state->time;
14815
        }
14816
    }
14817
}
14818
 
14819
/* Issue instruction INSN in scheduler state STATE.  Assume that INSN
14820
   can issue immediately (i.e., that mips_sim_wait_insn has already
14821
   been called).  */
14822
 
14823
static void
14824
mips_sim_issue_insn (struct mips_sim *state, rtx insn)
14825
{
14826
  state_transition (state->dfa_state, insn);
14827
  state->insns_left--;
14828
 
14829
  mips_sim_insn = insn;
14830
  note_stores (PATTERN (insn), mips_sim_record_set, state);
14831
}
14832
 
14833
/* Simulate issuing a NOP in state STATE.  */
14834
 
14835
static void
14836
mips_sim_issue_nop (struct mips_sim *state)
14837
{
14838
  if (state->insns_left == 0)
14839
    mips_sim_next_cycle (state);
14840
  state->insns_left--;
14841
}
14842
 
14843
/* Update simulation state STATE so that it's ready to accept the instruction
14844
   after INSN.  INSN should be part of the main rtl chain, not a member of a
14845
   SEQUENCE.  */
14846
 
14847
static void
14848
mips_sim_finish_insn (struct mips_sim *state, rtx insn)
14849
{
14850
  /* If INSN is a jump with an implicit delay slot, simulate a nop.  */
14851
  if (JUMP_P (insn))
14852
    mips_sim_issue_nop (state);
14853
 
14854
  switch (GET_CODE (SEQ_BEGIN (insn)))
14855
    {
14856
    case CODE_LABEL:
14857
    case CALL_INSN:
14858
      /* We can't predict the processor state after a call or label.  */
14859
      mips_sim_reset (state);
14860
      break;
14861
 
14862
    case JUMP_INSN:
14863
      /* The delay slots of branch likely instructions are only executed
14864
         when the branch is taken.  Therefore, if the caller has simulated
14865
         the delay slot instruction, STATE does not really reflect the state
14866
         of the pipeline for the instruction after the delay slot.  Also,
14867
         branch likely instructions tend to incur a penalty when not taken,
14868
         so there will probably be an extra delay between the branch and
14869
         the instruction after the delay slot.  */
14870
      if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
14871
        mips_sim_reset (state);
14872
      break;
14873
 
14874
    default:
14875
      break;
14876
    }
14877
}
14878
 
14879
/* The VR4130 pipeline issues aligned pairs of instructions together,
14880
   but it stalls the second instruction if it depends on the first.
14881
   In order to cut down the amount of logic required, this dependence
14882
   check is not based on a full instruction decode.  Instead, any non-SPECIAL
14883
   instruction is assumed to modify the register specified by bits 20-16
14884
   (which is usually the "rt" field).
14885
 
14886
   In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
14887
   input, so we can end up with a false dependence between the branch
14888
   and its delay slot.  If this situation occurs in instruction INSN,
14889
   try to avoid it by swapping rs and rt.  */
14890
 
14891
static void
14892
vr4130_avoid_branch_rt_conflict (rtx insn)
14893
{
14894
  rtx first, second;
14895
 
14896
  first = SEQ_BEGIN (insn);
14897
  second = SEQ_END (insn);
14898
  if (JUMP_P (first)
14899
      && NONJUMP_INSN_P (second)
14900
      && GET_CODE (PATTERN (first)) == SET
14901
      && GET_CODE (SET_DEST (PATTERN (first))) == PC
14902
      && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
14903
    {
14904
      /* Check for the right kind of condition.  */
14905
      rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
14906
      if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
14907
          && REG_P (XEXP (cond, 0))
14908
          && REG_P (XEXP (cond, 1))
14909
          && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
14910
          && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
14911
        {
14912
          /* SECOND mentions the rt register but not the rs register.  */
14913
          rtx tmp = XEXP (cond, 0);
14914
          XEXP (cond, 0) = XEXP (cond, 1);
14915
          XEXP (cond, 1) = tmp;
14916
        }
14917
    }
14918
}
14919
 
14920
/* Implement -mvr4130-align.  Go through each basic block and simulate the
14921
   processor pipeline.  If we find that a pair of instructions could execute
14922
   in parallel, and the first of those instructions is not 8-byte aligned,
14923
   insert a nop to make it aligned.  */
14924
 
14925
static void
14926
vr4130_align_insns (void)
14927
{
14928
  struct mips_sim state;
14929
  rtx insn, subinsn, last, last2, next;
14930
  bool aligned_p;
14931
 
14932
  dfa_start ();
14933
 
14934
  /* LAST is the last instruction before INSN to have a nonzero length.
14935
     LAST2 is the last such instruction before LAST.  */
14936
  last = 0;
14937
  last2 = 0;
14938
 
14939
  /* ALIGNED_P is true if INSN is known to be at an aligned address.  */
14940
  aligned_p = true;
14941
 
14942
  mips_sim_init (&state, alloca (state_size ()));
14943
  for (insn = get_insns (); insn != 0; insn = next)
14944
    {
14945
      unsigned int length;
14946
 
14947
      next = NEXT_INSN (insn);
14948
 
14949
      /* See the comment above vr4130_avoid_branch_rt_conflict for details.
14950
         This isn't really related to the alignment pass, but we do it on
14951
         the fly to avoid a separate instruction walk.  */
14952
      vr4130_avoid_branch_rt_conflict (insn);
14953
 
14954
      if (USEFUL_INSN_P (insn))
14955
        FOR_EACH_SUBINSN (subinsn, insn)
14956
          {
14957
            mips_sim_wait_insn (&state, subinsn);
14958
 
14959
            /* If we want this instruction to issue in parallel with the
14960
               previous one, make sure that the previous instruction is
14961
               aligned.  There are several reasons why this isn't worthwhile
14962
               when the second instruction is a call:
14963
 
14964
                  - Calls are less likely to be performance critical,
14965
                  - There's a good chance that the delay slot can execute
14966
                    in parallel with the call.
14967
                  - The return address would then be unaligned.
14968
 
14969
               In general, if we're going to insert a nop between instructions
14970
               X and Y, it's better to insert it immediately after X.  That
14971
               way, if the nop makes Y aligned, it will also align any labels
14972
               between X and Y.  */
14973
            if (state.insns_left != state.issue_rate
14974
                && !CALL_P (subinsn))
14975
              {
14976
                if (subinsn == SEQ_BEGIN (insn) && aligned_p)
14977
                  {
14978
                    /* SUBINSN is the first instruction in INSN and INSN is
14979
                       aligned.  We want to align the previous instruction
14980
                       instead, so insert a nop between LAST2 and LAST.
14981
 
14982
                       Note that LAST could be either a single instruction
14983
                       or a branch with a delay slot.  In the latter case,
14984
                       LAST, like INSN, is already aligned, but the delay
14985
                       slot must have some extra delay that stops it from
14986
                       issuing at the same time as the branch.  We therefore
14987
                       insert a nop before the branch in order to align its
14988
                       delay slot.  */
14989
                    emit_insn_after (gen_nop (), last2);
14990
                    aligned_p = false;
14991
                  }
14992
                else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
14993
                  {
14994
                    /* SUBINSN is the delay slot of INSN, but INSN is
14995
                       currently unaligned.  Insert a nop between
14996
                       LAST and INSN to align it.  */
14997
                    emit_insn_after (gen_nop (), last);
14998
                    aligned_p = true;
14999
                  }
15000
              }
15001
            mips_sim_issue_insn (&state, subinsn);
15002
          }
15003
      mips_sim_finish_insn (&state, insn);
15004
 
15005
      /* Update LAST, LAST2 and ALIGNED_P for the next instruction.  */
15006
      length = get_attr_length (insn);
15007
      if (length > 0)
15008
        {
15009
          /* If the instruction is an asm statement or multi-instruction
15010
             mips.md patern, the length is only an estimate.  Insert an
15011
             8 byte alignment after it so that the following instructions
15012
             can be handled correctly.  */
15013
          if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
15014
              && (recog_memoized (insn) < 0 || length >= 8))
15015
            {
15016
              next = emit_insn_after (gen_align (GEN_INT (3)), insn);
15017
              next = NEXT_INSN (next);
15018
              mips_sim_next_cycle (&state);
15019
              aligned_p = true;
15020
            }
15021
          else if (length & 4)
15022
            aligned_p = !aligned_p;
15023
          last2 = last;
15024
          last = insn;
15025
        }
15026
 
15027
      /* See whether INSN is an aligned label.  */
15028
      if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
15029
        aligned_p = true;
15030
    }
15031
  dfa_finish ();
15032
}
15033
 
15034
/* This structure records that the current function has a LO_SUM
15035
   involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
15036
   the largest offset applied to BASE by all such LO_SUMs.  */
15037
struct mips_lo_sum_offset {
15038
  rtx base;
15039
  HOST_WIDE_INT offset;
15040
};
15041
 
15042
/* Return a hash value for SYMBOL_REF or LABEL_REF BASE.  */
15043
 
15044
static hashval_t
15045
mips_hash_base (rtx base)
15046
{
15047
  int do_not_record_p;
15048
 
15049
  return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false);
15050
}
15051
 
15052
/* Hash-table callbacks for mips_lo_sum_offsets.  */
15053
 
15054
static hashval_t
15055
mips_lo_sum_offset_hash (const void *entry)
15056
{
15057
  return mips_hash_base (((const struct mips_lo_sum_offset *) entry)->base);
15058
}
15059
 
15060
static int
15061
mips_lo_sum_offset_eq (const void *entry, const void *value)
15062
{
15063
  return rtx_equal_p (((const struct mips_lo_sum_offset *) entry)->base,
15064
                      (const_rtx) value);
15065
}
15066
 
15067
/* Look up symbolic constant X in HTAB, which is a hash table of
15068
   mips_lo_sum_offsets.  If OPTION is NO_INSERT, return true if X can be
15069
   paired with a recorded LO_SUM, otherwise record X in the table.  */
15070
 
15071
static bool
15072
mips_lo_sum_offset_lookup (htab_t htab, rtx x, enum insert_option option)
15073
{
15074
  rtx base, offset;
15075
  void **slot;
15076
  struct mips_lo_sum_offset *entry;
15077
 
15078
  /* Split X into a base and offset.  */
15079
  split_const (x, &base, &offset);
15080
  if (UNSPEC_ADDRESS_P (base))
15081
    base = UNSPEC_ADDRESS (base);
15082
 
15083
  /* Look up the base in the hash table.  */
15084
  slot = htab_find_slot_with_hash (htab, base, mips_hash_base (base), option);
15085
  if (slot == NULL)
15086
    return false;
15087
 
15088
  entry = (struct mips_lo_sum_offset *) *slot;
15089
  if (option == INSERT)
15090
    {
15091
      if (entry == NULL)
15092
        {
15093
          entry = XNEW (struct mips_lo_sum_offset);
15094
          entry->base = base;
15095
          entry->offset = INTVAL (offset);
15096
          *slot = entry;
15097
        }
15098
      else
15099
        {
15100
          if (INTVAL (offset) > entry->offset)
15101
            entry->offset = INTVAL (offset);
15102
        }
15103
    }
15104
  return INTVAL (offset) <= entry->offset;
15105
}
15106
 
15107
/* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
15108
   Record every LO_SUM in *LOC.  */
15109
 
15110
static int
15111
mips_record_lo_sum (rtx *loc, void *data)
15112
{
15113
  if (GET_CODE (*loc) == LO_SUM)
15114
    mips_lo_sum_offset_lookup ((htab_t) data, XEXP (*loc, 1), INSERT);
15115
  return 0;
15116
}
15117
 
15118
/* Return true if INSN is a SET of an orphaned high-part relocation.
15119
   HTAB is a hash table of mips_lo_sum_offsets that describes all the
15120
   LO_SUMs in the current function.  */
15121
 
15122
static bool
15123
mips_orphaned_high_part_p (htab_t htab, rtx insn)
15124
{
15125
  enum mips_symbol_type type;
15126
  rtx x, set;
15127
 
15128
  set = single_set (insn);
15129
  if (set)
15130
    {
15131
      /* Check for %his.  */
15132
      x = SET_SRC (set);
15133
      if (GET_CODE (x) == HIGH
15134
          && absolute_symbolic_operand (XEXP (x, 0), VOIDmode))
15135
        return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT);
15136
 
15137
      /* Check for local %gots (and %got_pages, which is redundant but OK).  */
15138
      if (GET_CODE (x) == UNSPEC
15139
          && XINT (x, 1) == UNSPEC_LOAD_GOT
15140
          && mips_symbolic_constant_p (XVECEXP (x, 0, 1),
15141
                                       SYMBOL_CONTEXT_LEA, &type)
15142
          && type == SYMBOL_GOTOFF_PAGE)
15143
        return !mips_lo_sum_offset_lookup (htab, XVECEXP (x, 0, 1), NO_INSERT);
15144
    }
15145
  return false;
15146
}
15147
 
15148
/* Subroutine of mips_reorg_process_insns.  If there is a hazard between
15149
   INSN and a previous instruction, avoid it by inserting nops after
15150
   instruction AFTER.
15151
 
15152
   *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
15153
   this point.  If *DELAYED_REG is non-null, INSN must wait a cycle
15154
   before using the value of that register.  *HILO_DELAY counts the
15155
   number of instructions since the last hilo hazard (that is,
15156
   the number of instructions since the last MFLO or MFHI).
15157
 
15158
   After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
15159
   for the next instruction.
15160
 
15161
   LO_REG is an rtx for the LO register, used in dependence checking.  */
15162
 
15163
static void
15164
mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
15165
                   rtx *delayed_reg, rtx lo_reg)
15166
{
15167
  rtx pattern, set;
15168
  int nops, ninsns;
15169
 
15170
  pattern = PATTERN (insn);
15171
 
15172
  /* Do not put the whole function in .set noreorder if it contains
15173
     an asm statement.  We don't know whether there will be hazards
15174
     between the asm statement and the gcc-generated code.  */
15175
  if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
15176
    cfun->machine->all_noreorder_p = false;
15177
 
15178
  /* Ignore zero-length instructions (barriers and the like).  */
15179
  ninsns = get_attr_length (insn) / 4;
15180
  if (ninsns == 0)
15181
    return;
15182
 
15183
  /* Work out how many nops are needed.  Note that we only care about
15184
     registers that are explicitly mentioned in the instruction's pattern.
15185
     It doesn't matter that calls use the argument registers or that they
15186
     clobber hi and lo.  */
15187
  if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
15188
    nops = 2 - *hilo_delay;
15189
  else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
15190
    nops = 1;
15191
  else
15192
    nops = 0;
15193
 
15194
  /* Insert the nops between this instruction and the previous one.
15195
     Each new nop takes us further from the last hilo hazard.  */
15196
  *hilo_delay += nops;
15197
  while (nops-- > 0)
15198
    emit_insn_after (gen_hazard_nop (), after);
15199
 
15200
  /* Set up the state for the next instruction.  */
15201
  *hilo_delay += ninsns;
15202
  *delayed_reg = 0;
15203
  if (INSN_CODE (insn) >= 0)
15204
    switch (get_attr_hazard (insn))
15205
      {
15206
      case HAZARD_NONE:
15207
        break;
15208
 
15209
      case HAZARD_HILO:
15210
        *hilo_delay = 0;
15211
        break;
15212
 
15213
      case HAZARD_DELAY:
15214
        set = single_set (insn);
15215
        gcc_assert (set);
15216
        *delayed_reg = SET_DEST (set);
15217
        break;
15218
      }
15219
}
15220
 
15221
/* Go through the instruction stream and insert nops where necessary.
15222
   Also delete any high-part relocations whose partnering low parts
15223
   are now all dead.  See if the whole function can then be put into
15224
   .set noreorder and .set nomacro.  */
15225
 
15226
static void
15227
mips_reorg_process_insns (void)
15228
{
15229
  rtx insn, last_insn, subinsn, next_insn, lo_reg, delayed_reg;
15230
  int hilo_delay;
15231
  htab_t htab;
15232
 
15233
  /* Force all instructions to be split into their final form.  */
15234
  split_all_insns_noflow ();
15235
 
15236
  /* Recalculate instruction lengths without taking nops into account.  */
15237
  cfun->machine->ignore_hazard_length_p = true;
15238
  shorten_branches (get_insns ());
15239
 
15240
  cfun->machine->all_noreorder_p = true;
15241
 
15242
  /* We don't track MIPS16 PC-relative offsets closely enough to make
15243
     a good job of "set .noreorder" code in MIPS16 mode.  */
15244
  if (TARGET_MIPS16)
15245
    cfun->machine->all_noreorder_p = false;
15246
 
15247
  /* Code that doesn't use explicit relocs can't be ".set nomacro".  */
15248
  if (!TARGET_EXPLICIT_RELOCS)
15249
    cfun->machine->all_noreorder_p = false;
15250
 
15251
  /* Profiled functions can't be all noreorder because the profiler
15252
     support uses assembler macros.  */
15253
  if (crtl->profile)
15254
    cfun->machine->all_noreorder_p = false;
15255
 
15256
  /* Code compiled with -mfix-vr4120 or -mfix-24k can't be all noreorder
15257
     because we rely on the assembler to work around some errata.  */
15258
  if (TARGET_FIX_VR4120 || TARGET_FIX_24K)
15259
    cfun->machine->all_noreorder_p = false;
15260
 
15261
  /* The same is true for -mfix-vr4130 if we might generate MFLO or
15262
     MFHI instructions.  Note that we avoid using MFLO and MFHI if
15263
     the VR4130 MACC and DMACC instructions are available instead;
15264
     see the *mfhilo_{si,di}_macc patterns.  */
15265
  if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
15266
    cfun->machine->all_noreorder_p = false;
15267
 
15268
  htab = htab_create (37, mips_lo_sum_offset_hash,
15269
                      mips_lo_sum_offset_eq, free);
15270
 
15271
  /* Make a first pass over the instructions, recording all the LO_SUMs.  */
15272
  for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
15273
    FOR_EACH_SUBINSN (subinsn, insn)
15274
      if (USEFUL_INSN_P (subinsn))
15275
        for_each_rtx (&PATTERN (subinsn), mips_record_lo_sum, htab);
15276
 
15277
  last_insn = 0;
15278
  hilo_delay = 2;
15279
  delayed_reg = 0;
15280
  lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
15281
 
15282
  /* Make a second pass over the instructions.  Delete orphaned
15283
     high-part relocations or turn them into NOPs.  Avoid hazards
15284
     by inserting NOPs.  */
15285
  for (insn = get_insns (); insn != 0; insn = next_insn)
15286
    {
15287
      next_insn = NEXT_INSN (insn);
15288
      if (USEFUL_INSN_P (insn))
15289
        {
15290
          if (GET_CODE (PATTERN (insn)) == SEQUENCE)
15291
            {
15292
              /* If we find an orphaned high-part relocation in a delay
15293
                 slot, it's easier to turn that instruction into a NOP than
15294
                 to delete it.  The delay slot will be a NOP either way.  */
15295
              FOR_EACH_SUBINSN (subinsn, insn)
15296
                if (INSN_P (subinsn))
15297
                  {
15298
                    if (mips_orphaned_high_part_p (htab, subinsn))
15299
                      {
15300
                        PATTERN (subinsn) = gen_nop ();
15301
                        INSN_CODE (subinsn) = CODE_FOR_nop;
15302
                      }
15303
                    mips_avoid_hazard (last_insn, subinsn, &hilo_delay,
15304
                                       &delayed_reg, lo_reg);
15305
                  }
15306
              last_insn = insn;
15307
            }
15308
          else
15309
            {
15310
              /* INSN is a single instruction.  Delete it if it's an
15311
                 orphaned high-part relocation.  */
15312
              if (mips_orphaned_high_part_p (htab, insn))
15313
                delete_insn (insn);
15314
              /* Also delete cache barriers if the last instruction
15315
                 was an annulled branch.  INSN will not be speculatively
15316
                 executed.  */
15317
              else if (recog_memoized (insn) == CODE_FOR_r10k_cache_barrier
15318
                       && last_insn
15319
                       && JUMP_P (SEQ_BEGIN (last_insn))
15320
                       && INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (last_insn)))
15321
                delete_insn (insn);
15322
              else
15323
                {
15324
                  mips_avoid_hazard (last_insn, insn, &hilo_delay,
15325
                                     &delayed_reg, lo_reg);
15326
                  last_insn = insn;
15327
                }
15328
            }
15329
        }
15330
    }
15331
 
15332
  htab_delete (htab);
15333
}
15334
 
15335
/* If we are using a GOT, but have not decided to use a global pointer yet,
15336
   see whether we need one to implement long branches.  Convert the ghost
15337
   global-pointer instructions into real ones if so.  */
15338
 
15339
static bool
15340
mips_expand_ghost_gp_insns (void)
15341
{
15342
  rtx insn;
15343
  int normal_length;
15344
 
15345
  /* Quick exit if we already know that we will or won't need a
15346
     global pointer.  */
15347
  if (!TARGET_USE_GOT
15348
      || cfun->machine->global_pointer == INVALID_REGNUM
15349
      || mips_must_initialize_gp_p ())
15350
    return false;
15351
 
15352
  shorten_branches (get_insns ());
15353
 
15354
  /* Look for a branch that is longer than normal.  The normal length for
15355
     non-MIPS16 branches is 8, because the length includes the delay slot.
15356
     It is 4 for MIPS16, because MIPS16 branches are extended instructions,
15357
     but they have no delay slot.  */
15358
  normal_length = (TARGET_MIPS16 ? 4 : 8);
15359
  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
15360
    if (JUMP_P (insn)
15361
        && USEFUL_INSN_P (insn)
15362
        && get_attr_length (insn) > normal_length)
15363
      break;
15364
 
15365
  if (insn == NULL_RTX)
15366
    return false;
15367
 
15368
  /* We've now established that we need $gp.  */
15369
  cfun->machine->must_initialize_gp_p = true;
15370
  split_all_insns_noflow ();
15371
 
15372
  return true;
15373
}
15374
 
15375
/* Subroutine of mips_reorg to manage passes that require DF.  */
15376
 
15377
static void
15378
mips_df_reorg (void)
15379
{
15380
  /* Create def-use chains.  */
15381
  df_set_flags (DF_EQ_NOTES);
15382
  df_chain_add_problem (DF_UD_CHAIN);
15383
  df_analyze ();
15384
 
15385
  if (TARGET_RELAX_PIC_CALLS)
15386
    mips_annotate_pic_calls ();
15387
 
15388
  if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE)
15389
    r10k_insert_cache_barriers ();
15390
 
15391
  df_finish_pass (false);
15392
}
15393
 
15394
/* Implement TARGET_MACHINE_DEPENDENT_REORG.  */
15395
 
15396
static void
15397
mips_reorg (void)
15398
{
15399
  /* Restore the BLOCK_FOR_INSN pointers, which are needed by DF.  Also during
15400
     insn splitting in mips16_lay_out_constants, DF insn info is only kept up
15401
     to date if the CFG is available.  */
15402
  if (mips_cfg_in_reorg ())
15403
    compute_bb_for_insn ();
15404
  mips16_lay_out_constants ();
15405
  if (mips_cfg_in_reorg ())
15406
    {
15407
      mips_df_reorg ();
15408
      free_bb_for_insn ();
15409
    }
15410
 
15411
  if (optimize > 0 && flag_delayed_branch)
15412
    dbr_schedule (get_insns ());
15413
  mips_reorg_process_insns ();
15414
  if (!TARGET_MIPS16
15415
      && TARGET_EXPLICIT_RELOCS
15416
      && TUNE_MIPS4130
15417
      && TARGET_VR4130_ALIGN)
15418
    vr4130_align_insns ();
15419
  if (mips_expand_ghost_gp_insns ())
15420
    /* The expansion could invalidate some of the VR4130 alignment
15421
       optimizations, but this should be an extremely rare case anyhow.  */
15422
    mips_reorg_process_insns ();
15423
}
15424
 
15425
/* Implement TARGET_ASM_OUTPUT_MI_THUNK.  Generate rtl rather than asm text
15426
   in order to avoid duplicating too much logic from elsewhere.  */
15427
 
15428
static void
15429
mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
15430
                      HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
15431
                      tree function)
15432
{
15433
  rtx this_rtx, temp1, temp2, insn, fnaddr;
15434
  bool use_sibcall_p;
15435
 
15436
  /* Pretend to be a post-reload pass while generating rtl.  */
15437
  reload_completed = 1;
15438
 
15439
  /* Mark the end of the (empty) prologue.  */
15440
  emit_note (NOTE_INSN_PROLOGUE_END);
15441
 
15442
  /* Determine if we can use a sibcall to call FUNCTION directly.  */
15443
  fnaddr = XEXP (DECL_RTL (function), 0);
15444
  use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
15445
                   && const_call_insn_operand (fnaddr, Pmode));
15446
 
15447
  /* Determine if we need to load FNADDR from the GOT.  */
15448
  if (!use_sibcall_p
15449
      && (mips_got_symbol_type_p
15450
          (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))))
15451
    {
15452
      /* Pick a global pointer.  Use a call-clobbered register if
15453
         TARGET_CALL_SAVED_GP.  */
15454
      cfun->machine->global_pointer
15455
        = TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
15456
      cfun->machine->must_initialize_gp_p = true;
15457
      SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
15458
 
15459
      /* Set up the global pointer for n32 or n64 abicalls.  */
15460
      mips_emit_loadgp ();
15461
    }
15462
 
15463
  /* We need two temporary registers in some cases.  */
15464
  temp1 = gen_rtx_REG (Pmode, 2);
15465
  temp2 = gen_rtx_REG (Pmode, 3);
15466
 
15467
  /* Find out which register contains the "this" pointer.  */
15468
  if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
15469
    this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
15470
  else
15471
    this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
15472
 
15473
  /* Add DELTA to THIS_RTX.  */
15474
  if (delta != 0)
15475
    {
15476
      rtx offset = GEN_INT (delta);
15477
      if (!SMALL_OPERAND (delta))
15478
        {
15479
          mips_emit_move (temp1, offset);
15480
          offset = temp1;
15481
        }
15482
      emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
15483
    }
15484
 
15485
  /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX.  */
15486
  if (vcall_offset != 0)
15487
    {
15488
      rtx addr;
15489
 
15490
      /* Set TEMP1 to *THIS_RTX.  */
15491
      mips_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
15492
 
15493
      /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET.  */
15494
      addr = mips_add_offset (temp2, temp1, vcall_offset);
15495
 
15496
      /* Load the offset and add it to THIS_RTX.  */
15497
      mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
15498
      emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
15499
    }
15500
 
15501
  /* Jump to the target function.  Use a sibcall if direct jumps are
15502
     allowed, otherwise load the address into a register first.  */
15503
  if (use_sibcall_p)
15504
    {
15505
      insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
15506
      SIBLING_CALL_P (insn) = 1;
15507
    }
15508
  else
15509
    {
15510
      /* This is messy.  GAS treats "la $25,foo" as part of a call
15511
         sequence and may allow a global "foo" to be lazily bound.
15512
         The general move patterns therefore reject this combination.
15513
 
15514
         In this context, lazy binding would actually be OK
15515
         for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
15516
         TARGET_CALL_SAVED_GP; see mips_load_call_address.
15517
         We must therefore load the address via a temporary
15518
         register if mips_dangerous_for_la25_p.
15519
 
15520
         If we jump to the temporary register rather than $25,
15521
         the assembler can use the move insn to fill the jump's
15522
         delay slot.
15523
 
15524
         We can use the same technique for MIPS16 code, where $25
15525
         is not a valid JR register.  */
15526
      if (TARGET_USE_PIC_FN_ADDR_REG
15527
          && !TARGET_MIPS16
15528
          && !mips_dangerous_for_la25_p (fnaddr))
15529
        temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
15530
      mips_load_call_address (MIPS_CALL_SIBCALL, temp1, fnaddr);
15531
 
15532
      if (TARGET_USE_PIC_FN_ADDR_REG
15533
          && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
15534
        mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
15535
      emit_jump_insn (gen_indirect_jump (temp1));
15536
    }
15537
 
15538
  /* Run just enough of rest_of_compilation.  This sequence was
15539
     "borrowed" from alpha.c.  */
15540
  insn = get_insns ();
15541
  insn_locators_alloc ();
15542
  split_all_insns_noflow ();
15543
  mips16_lay_out_constants ();
15544
  shorten_branches (insn);
15545
  final_start_function (insn, file, 1);
15546
  final (insn, file, 1);
15547
  final_end_function ();
15548
 
15549
  /* Clean up the vars set above.  Note that final_end_function resets
15550
     the global pointer for us.  */
15551
  reload_completed = 0;
15552
}
15553
 
15554
/* The last argument passed to mips_set_mips16_mode, or negative if the
15555
   function hasn't been called yet.  */
15556
static int was_mips16_p = -1;
15557
 
15558
/* Set up the target-dependent global state so that it matches the
15559
   current function's ISA mode.  */
15560
 
15561
static void
15562
mips_set_mips16_mode (int mips16_p)
15563
{
15564
  if (mips16_p == was_mips16_p)
15565
    return;
15566
 
15567
  /* Restore base settings of various flags.  */
15568
  target_flags = mips_base_target_flags;
15569
  flag_schedule_insns = mips_base_schedule_insns;
15570
  flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
15571
  flag_move_loop_invariants = mips_base_move_loop_invariants;
15572
  align_loops = mips_base_align_loops;
15573
  align_jumps = mips_base_align_jumps;
15574
  align_functions = mips_base_align_functions;
15575
 
15576
  if (mips16_p)
15577
    {
15578
      /* Switch to MIPS16 mode.  */
15579
      target_flags |= MASK_MIPS16;
15580
 
15581
      /* Don't run the scheduler before reload, since it tends to
15582
         increase register pressure.  */
15583
      flag_schedule_insns = 0;
15584
 
15585
      /* Don't do hot/cold partitioning.  mips16_lay_out_constants expects
15586
         the whole function to be in a single section.  */
15587
      flag_reorder_blocks_and_partition = 0;
15588
 
15589
      /* Don't move loop invariants, because it tends to increase
15590
         register pressure.  It also introduces an extra move in cases
15591
         where the constant is the first operand in a two-operand binary
15592
         instruction, or when it forms a register argument to a functon
15593
         call.  */
15594
      flag_move_loop_invariants = 0;
15595
 
15596
      target_flags |= MASK_EXPLICIT_RELOCS;
15597
 
15598
      /* Experiments suggest we get the best overall section-anchor
15599
         results from using the range of an unextended LW or SW.  Code
15600
         that makes heavy use of byte or short accesses can do better
15601
         with ranges of 0...31 and 0...63 respectively, but most code is
15602
         sensitive to the range of LW and SW instead.  */
15603
      targetm.min_anchor_offset = 0;
15604
      targetm.max_anchor_offset = 127;
15605
 
15606
      targetm.const_anchor = 0;
15607
 
15608
      /* MIPS16 has no BAL instruction.  */
15609
      target_flags &= ~MASK_RELAX_PIC_CALLS;
15610
 
15611
      /* The R4000 errata don't apply to any known MIPS16 cores.
15612
         It's simpler to make the R4000 fixes and MIPS16 mode
15613
         mutually exclusive.  */
15614
      target_flags &= ~MASK_FIX_R4000;
15615
 
15616
      if (flag_pic && !TARGET_OLDABI)
15617
        sorry ("MIPS16 PIC for ABIs other than o32 and o64");
15618
 
15619
      if (TARGET_XGOT)
15620
        sorry ("MIPS16 -mxgot code");
15621
 
15622
      if (TARGET_HARD_FLOAT_ABI && !TARGET_OLDABI)
15623
        sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
15624
    }
15625
  else
15626
    {
15627
      /* Switch to normal (non-MIPS16) mode.  */
15628
      target_flags &= ~MASK_MIPS16;
15629
 
15630
      /* Provide default values for align_* for 64-bit targets.  */
15631
      if (TARGET_64BIT)
15632
        {
15633
          if (align_loops == 0)
15634
            align_loops = 8;
15635
          if (align_jumps == 0)
15636
            align_jumps = 8;
15637
          if (align_functions == 0)
15638
            align_functions = 8;
15639
        }
15640
 
15641
      targetm.min_anchor_offset = -32768;
15642
      targetm.max_anchor_offset = 32767;
15643
 
15644
      targetm.const_anchor = 0x8000;
15645
    }
15646
 
15647
  /* (Re)initialize MIPS target internals for new ISA.  */
15648
  mips_init_relocs ();
15649
 
15650
  if (mips16_p)
15651
    {
15652
      if (!mips16_globals)
15653
        mips16_globals = save_target_globals ();
15654
      else
15655
        restore_target_globals (mips16_globals);
15656
    }
15657
  else
15658
    restore_target_globals (&default_target_globals);
15659
 
15660
  was_mips16_p = mips16_p;
15661
}
15662
 
15663
/* Implement TARGET_SET_CURRENT_FUNCTION.  Decide whether the current
15664
   function should use the MIPS16 ISA and switch modes accordingly.  */
15665
 
15666
static void
15667
mips_set_current_function (tree fndecl)
15668
{
15669
  mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
15670
}
15671
 
15672
/* Allocate a chunk of memory for per-function machine-dependent data.  */
15673
 
15674
static struct machine_function *
15675
mips_init_machine_status (void)
15676
{
15677
  return ggc_alloc_cleared_machine_function ();
15678
}
15679
 
15680
/* Return the processor associated with the given ISA level, or null
15681
   if the ISA isn't valid.  */
15682
 
15683
static const struct mips_cpu_info *
15684
mips_cpu_info_from_isa (int isa)
15685
{
15686
  unsigned int i;
15687
 
15688
  for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
15689
    if (mips_cpu_info_table[i].isa == isa)
15690
      return mips_cpu_info_table + i;
15691
 
15692
  return NULL;
15693
}
15694
 
15695
/* Return a mips_cpu_info entry determined by an option valued
15696
   OPT.  */
15697
 
15698
static const struct mips_cpu_info *
15699
mips_cpu_info_from_opt (int opt)
15700
{
15701
  switch (opt)
15702
    {
15703
    case MIPS_ARCH_OPTION_FROM_ABI:
15704
      /* 'from-abi' selects the most compatible architecture for the
15705
         given ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit
15706
         ABIs.  For the EABIs, we have to decide whether we're using
15707
         the 32-bit or 64-bit version.  */
15708
      return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
15709
                                     : ABI_NEEDS_64BIT_REGS ? 3
15710
                                     : (TARGET_64BIT ? 3 : 1));
15711
 
15712
    case MIPS_ARCH_OPTION_NATIVE:
15713
      gcc_unreachable ();
15714
 
15715
    default:
15716
      return &mips_cpu_info_table[opt];
15717
    }
15718
}
15719
 
15720
/* Return a default mips_cpu_info entry, given that no -march= option
15721
   was explicitly specified.  */
15722
 
15723
static const struct mips_cpu_info *
15724
mips_default_arch (void)
15725
{
15726
#if defined (MIPS_CPU_STRING_DEFAULT)
15727
  unsigned int i;
15728
  for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
15729
    if (strcmp (mips_cpu_info_table[i].name, MIPS_CPU_STRING_DEFAULT) == 0)
15730
      return mips_cpu_info_table + i;
15731
  gcc_unreachable ();
15732
#elif defined (MIPS_ISA_DEFAULT)
15733
  return mips_cpu_info_from_isa (MIPS_ISA_DEFAULT);
15734
#else
15735
  /* 'from-abi' makes a good default: you get whatever the ABI
15736
     requires.  */
15737
  return mips_cpu_info_from_opt (MIPS_ARCH_OPTION_FROM_ABI);
15738
#endif
15739
}
15740
 
15741
/* Set up globals to generate code for the ISA or processor
15742
   described by INFO.  */
15743
 
15744
static void
15745
mips_set_architecture (const struct mips_cpu_info *info)
15746
{
15747
  if (info != 0)
15748
    {
15749
      mips_arch_info = info;
15750
      mips_arch = info->cpu;
15751
      mips_isa = info->isa;
15752
    }
15753
}
15754
 
15755
/* Likewise for tuning.  */
15756
 
15757
static void
15758
mips_set_tune (const struct mips_cpu_info *info)
15759
{
15760
  if (info != 0)
15761
    {
15762
      mips_tune_info = info;
15763
      mips_tune = info->cpu;
15764
    }
15765
}
15766
 
15767
/* Implement TARGET_OPTION_OVERRIDE.  */
15768
 
15769
static void
15770
mips_option_override (void)
15771
{
15772
  int i, start, regno, mode;
15773
 
15774
  if (global_options_set.x_mips_isa_option)
15775
    mips_isa_option_info = &mips_cpu_info_table[mips_isa_option];
15776
 
15777
  /* Process flags as though we were generating non-MIPS16 code.  */
15778
  mips_base_mips16 = TARGET_MIPS16;
15779
  target_flags &= ~MASK_MIPS16;
15780
 
15781
#ifdef SUBTARGET_OVERRIDE_OPTIONS
15782
  SUBTARGET_OVERRIDE_OPTIONS;
15783
#endif
15784
 
15785
  /* -mno-float overrides -mhard-float and -msoft-float.  */
15786
  if (TARGET_NO_FLOAT)
15787
    {
15788
      target_flags |= MASK_SOFT_FLOAT_ABI;
15789
      target_flags_explicit |= MASK_SOFT_FLOAT_ABI;
15790
    }
15791
 
15792
  if (TARGET_FLIP_MIPS16)
15793
    TARGET_INTERLINK_MIPS16 = 1;
15794
 
15795
  /* Set the small data limit.  */
15796
  mips_small_data_threshold = (global_options_set.x_g_switch_value
15797
                               ? g_switch_value
15798
                               : MIPS_DEFAULT_GVALUE);
15799
 
15800
  /* The following code determines the architecture and register size.
15801
     Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
15802
     The GAS and GCC code should be kept in sync as much as possible.  */
15803
 
15804
  if (global_options_set.x_mips_arch_option)
15805
    mips_set_architecture (mips_cpu_info_from_opt (mips_arch_option));
15806
 
15807
  if (mips_isa_option_info != 0)
15808
    {
15809
      if (mips_arch_info == 0)
15810
        mips_set_architecture (mips_isa_option_info);
15811
      else if (mips_arch_info->isa != mips_isa_option_info->isa)
15812
        error ("%<-%s%> conflicts with the other architecture options, "
15813
               "which specify a %s processor",
15814
               mips_isa_option_info->name,
15815
               mips_cpu_info_from_isa (mips_arch_info->isa)->name);
15816
    }
15817
 
15818
  if (mips_arch_info == 0)
15819
    mips_set_architecture (mips_default_arch ());
15820
 
15821
  if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
15822
    error ("%<-march=%s%> is not compatible with the selected ABI",
15823
           mips_arch_info->name);
15824
 
15825
  /* Optimize for mips_arch, unless -mtune selects a different processor.  */
15826
  if (global_options_set.x_mips_tune_option)
15827
    mips_set_tune (mips_cpu_info_from_opt (mips_tune_option));
15828
 
15829
  if (mips_tune_info == 0)
15830
    mips_set_tune (mips_arch_info);
15831
 
15832
  if ((target_flags_explicit & MASK_64BIT) != 0)
15833
    {
15834
      /* The user specified the size of the integer registers.  Make sure
15835
         it agrees with the ABI and ISA.  */
15836
      if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
15837
        error ("%<-mgp64%> used with a 32-bit processor");
15838
      else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
15839
        error ("%<-mgp32%> used with a 64-bit ABI");
15840
      else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
15841
        error ("%<-mgp64%> used with a 32-bit ABI");
15842
    }
15843
  else
15844
    {
15845
      /* Infer the integer register size from the ABI and processor.
15846
         Restrict ourselves to 32-bit registers if that's all the
15847
         processor has, or if the ABI cannot handle 64-bit registers.  */
15848
      if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
15849
        target_flags &= ~MASK_64BIT;
15850
      else
15851
        target_flags |= MASK_64BIT;
15852
    }
15853
 
15854
  if ((target_flags_explicit & MASK_FLOAT64) != 0)
15855
    {
15856
      if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
15857
        error ("unsupported combination: %s", "-mfp64 -msingle-float");
15858
      else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
15859
        error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
15860
      else if (!TARGET_64BIT && TARGET_FLOAT64)
15861
        {
15862
          if (!ISA_HAS_MXHC1)
15863
            error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
15864
                   " the target supports the mfhc1 and mthc1 instructions");
15865
          else if (mips_abi != ABI_32)
15866
            error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
15867
                   " the o32 ABI");
15868
        }
15869
    }
15870
  else
15871
    {
15872
      /* -msingle-float selects 32-bit float registers.  Otherwise the
15873
         float registers should be the same size as the integer ones.  */
15874
      if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
15875
        target_flags |= MASK_FLOAT64;
15876
      else
15877
        target_flags &= ~MASK_FLOAT64;
15878
    }
15879
 
15880
  /* End of code shared with GAS.  */
15881
 
15882
  /* If a -mlong* option was given, check that it matches the ABI,
15883
     otherwise infer the -mlong* setting from the other options.  */
15884
  if ((target_flags_explicit & MASK_LONG64) != 0)
15885
    {
15886
      if (TARGET_LONG64)
15887
        {
15888
          if (mips_abi == ABI_N32)
15889
            error ("%qs is incompatible with %qs", "-mabi=n32", "-mlong64");
15890
          else if (mips_abi == ABI_32)
15891
            error ("%qs is incompatible with %qs", "-mabi=32", "-mlong64");
15892
          else if (mips_abi == ABI_O64 && TARGET_ABICALLS)
15893
            /* We have traditionally allowed non-abicalls code to use
15894
               an LP64 form of o64.  However, it would take a bit more
15895
               effort to support the combination of 32-bit GOT entries
15896
               and 64-bit pointers, so we treat the abicalls case as
15897
               an error.  */
15898
            error ("the combination of %qs and %qs is incompatible with %qs",
15899
                   "-mabi=o64", "-mabicalls", "-mlong64");
15900
        }
15901
      else
15902
        {
15903
          if (mips_abi == ABI_64)
15904
            error ("%qs is incompatible with %qs", "-mabi=64", "-mlong32");
15905
        }
15906
    }
15907
  else
15908
    {
15909
      if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
15910
        target_flags |= MASK_LONG64;
15911
      else
15912
        target_flags &= ~MASK_LONG64;
15913
    }
15914
 
15915
  if (!TARGET_OLDABI)
15916
    flag_pcc_struct_return = 0;
15917
 
15918
  /* Decide which rtx_costs structure to use.  */
15919
  if (optimize_size)
15920
    mips_cost = &mips_rtx_cost_optimize_size;
15921
  else
15922
    mips_cost = &mips_rtx_cost_data[mips_tune];
15923
 
15924
  /* If the user hasn't specified a branch cost, use the processor's
15925
     default.  */
15926
  if (mips_branch_cost == 0)
15927
    mips_branch_cost = mips_cost->branch_cost;
15928
 
15929
  /* If neither -mbranch-likely nor -mno-branch-likely was given
15930
     on the command line, set MASK_BRANCHLIKELY based on the target
15931
     architecture and tuning flags.  Annulled delay slots are a
15932
     size win, so we only consider the processor-specific tuning
15933
     for !optimize_size.  */
15934
  if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
15935
    {
15936
      if (ISA_HAS_BRANCHLIKELY
15937
          && (optimize_size
15938
              || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
15939
        target_flags |= MASK_BRANCHLIKELY;
15940
      else
15941
        target_flags &= ~MASK_BRANCHLIKELY;
15942
    }
15943
  else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
15944
    warning (0, "the %qs architecture does not support branch-likely"
15945
             " instructions", mips_arch_info->name);
15946
 
15947
  /* The effect of -mabicalls isn't defined for the EABI.  */
15948
  if (mips_abi == ABI_EABI && TARGET_ABICALLS)
15949
    {
15950
      error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
15951
      target_flags &= ~MASK_ABICALLS;
15952
    }
15953
 
15954
  if (TARGET_ABICALLS_PIC2)
15955
    /* We need to set flag_pic for executables as well as DSOs
15956
       because we may reference symbols that are not defined in
15957
       the final executable.  (MIPS does not use things like
15958
       copy relocs, for example.)
15959
 
15960
       There is a body of code that uses __PIC__ to distinguish
15961
       between -mabicalls and -mno-abicalls code.  The non-__PIC__
15962
       variant is usually appropriate for TARGET_ABICALLS_PIC0, as
15963
       long as any indirect jumps use $25.  */
15964
    flag_pic = 1;
15965
 
15966
  /* -mvr4130-align is a "speed over size" optimization: it usually produces
15967
     faster code, but at the expense of more nops.  Enable it at -O3 and
15968
     above.  */
15969
  if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
15970
    target_flags |= MASK_VR4130_ALIGN;
15971
 
15972
  /* Prefer a call to memcpy over inline code when optimizing for size,
15973
     though see MOVE_RATIO in mips.h.  */
15974
  if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
15975
    target_flags |= MASK_MEMCPY;
15976
 
15977
  /* If we have a nonzero small-data limit, check that the -mgpopt
15978
     setting is consistent with the other target flags.  */
15979
  if (mips_small_data_threshold > 0)
15980
    {
15981
      if (!TARGET_GPOPT)
15982
        {
15983
          if (!TARGET_EXPLICIT_RELOCS)
15984
            error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
15985
 
15986
          TARGET_LOCAL_SDATA = false;
15987
          TARGET_EXTERN_SDATA = false;
15988
        }
15989
      else
15990
        {
15991
          if (TARGET_VXWORKS_RTP)
15992
            warning (0, "cannot use small-data accesses for %qs", "-mrtp");
15993
 
15994
          if (TARGET_ABICALLS)
15995
            warning (0, "cannot use small-data accesses for %qs",
15996
                     "-mabicalls");
15997
        }
15998
    }
15999
 
16000
#ifdef MIPS_TFMODE_FORMAT
16001
  REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
16002
#endif
16003
 
16004
  /* Make sure that the user didn't turn off paired single support when
16005
     MIPS-3D support is requested.  */
16006
  if (TARGET_MIPS3D
16007
      && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
16008
      && !TARGET_PAIRED_SINGLE_FLOAT)
16009
    error ("%<-mips3d%> requires %<-mpaired-single%>");
16010
 
16011
  /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT.  */
16012
  if (TARGET_MIPS3D)
16013
    target_flags |= MASK_PAIRED_SINGLE_FLOAT;
16014
 
16015
  /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
16016
     and TARGET_HARD_FLOAT_ABI are both true.  */
16017
  if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
16018
    error ("%qs must be used with %qs",
16019
           TARGET_MIPS3D ? "-mips3d" : "-mpaired-single",
16020
           TARGET_HARD_FLOAT_ABI ? "-mfp64" : "-mhard-float");
16021
 
16022
  /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
16023
     enabled.  */
16024
  if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_HAS_PAIRED_SINGLE)
16025
    warning (0, "the %qs architecture does not support paired-single"
16026
             " instructions", mips_arch_info->name);
16027
 
16028
  if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE
16029
      && !TARGET_CACHE_BUILTIN)
16030
    {
16031
      error ("%qs requires a target that provides the %qs instruction",
16032
             "-mr10k-cache-barrier", "cache");
16033
      mips_r10k_cache_barrier = R10K_CACHE_BARRIER_NONE;
16034
    }
16035
 
16036
  /* If TARGET_DSPR2, enable MASK_DSP.  */
16037
  if (TARGET_DSPR2)
16038
    target_flags |= MASK_DSP;
16039
 
16040
  /* .eh_frame addresses should be the same width as a C pointer.
16041
     Most MIPS ABIs support only one pointer size, so the assembler
16042
     will usually know exactly how big an .eh_frame address is.
16043
 
16044
     Unfortunately, this is not true of the 64-bit EABI.  The ABI was
16045
     originally defined to use 64-bit pointers (i.e. it is LP64), and
16046
     this is still the default mode.  However, we also support an n32-like
16047
     ILP32 mode, which is selected by -mlong32.  The problem is that the
16048
     assembler has traditionally not had an -mlong option, so it has
16049
     traditionally not known whether we're using the ILP32 or LP64 form.
16050
 
16051
     As it happens, gas versions up to and including 2.19 use _32-bit_
16052
     addresses for EABI64 .cfi_* directives.  This is wrong for the
16053
     default LP64 mode, so we can't use the directives by default.
16054
     Moreover, since gas's current behavior is at odds with gcc's
16055
     default behavior, it seems unwise to rely on future versions
16056
     of gas behaving the same way.  We therefore avoid using .cfi
16057
     directives for -mlong32 as well.  */
16058
  if (mips_abi == ABI_EABI && TARGET_64BIT)
16059
    flag_dwarf2_cfi_asm = 0;
16060
 
16061
  /* .cfi_* directives generate a read-only section, so fall back on
16062
     manual .eh_frame creation if we need the section to be writable.  */
16063
  if (TARGET_WRITABLE_EH_FRAME)
16064
    flag_dwarf2_cfi_asm = 0;
16065
 
16066
  mips_init_print_operand_punct ();
16067
 
16068
  /* Set up array to map GCC register number to debug register number.
16069
     Ignore the special purpose register numbers.  */
16070
 
16071
  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16072
    {
16073
      mips_dbx_regno[i] = INVALID_REGNUM;
16074
      if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
16075
        mips_dwarf_regno[i] = i;
16076
      else
16077
        mips_dwarf_regno[i] = INVALID_REGNUM;
16078
    }
16079
 
16080
  start = GP_DBX_FIRST - GP_REG_FIRST;
16081
  for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
16082
    mips_dbx_regno[i] = i + start;
16083
 
16084
  start = FP_DBX_FIRST - FP_REG_FIRST;
16085
  for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
16086
    mips_dbx_regno[i] = i + start;
16087
 
16088
  /* Accumulator debug registers use big-endian ordering.  */
16089
  mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
16090
  mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
16091
  mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
16092
  mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
16093
  for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
16094
    {
16095
      mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
16096
      mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
16097
    }
16098
 
16099
  /* Set up mips_hard_regno_mode_ok.  */
16100
  for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
16101
    for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
16102
      mips_hard_regno_mode_ok[mode][regno]
16103
        = mips_hard_regno_mode_ok_p (regno, (enum machine_mode) mode);
16104
 
16105
  /* Function to allocate machine-dependent function status.  */
16106
  init_machine_status = &mips_init_machine_status;
16107
 
16108
  /* Default to working around R4000 errata only if the processor
16109
     was selected explicitly.  */
16110
  if ((target_flags_explicit & MASK_FIX_R4000) == 0
16111
      && strcmp (mips_arch_info->name, "r4000") == 0)
16112
    target_flags |= MASK_FIX_R4000;
16113
 
16114
  /* Default to working around R4400 errata only if the processor
16115
     was selected explicitly.  */
16116
  if ((target_flags_explicit & MASK_FIX_R4400) == 0
16117
      && strcmp (mips_arch_info->name, "r4400") == 0)
16118
    target_flags |= MASK_FIX_R4400;
16119
 
16120
  /* Default to working around R10000 errata only if the processor
16121
     was selected explicitly.  */
16122
  if ((target_flags_explicit & MASK_FIX_R10000) == 0
16123
      && strcmp (mips_arch_info->name, "r10000") == 0)
16124
    target_flags |= MASK_FIX_R10000;
16125
 
16126
  /* Make sure that branch-likely instructions available when using
16127
     -mfix-r10000.  The instructions are not available if either:
16128
 
16129
        1. -mno-branch-likely was passed.
16130
        2. The selected ISA does not support branch-likely and
16131
           the command line does not include -mbranch-likely.  */
16132
  if (TARGET_FIX_R10000
16133
      && ((target_flags_explicit & MASK_BRANCHLIKELY) == 0
16134
          ? !ISA_HAS_BRANCHLIKELY
16135
          : !TARGET_BRANCHLIKELY))
16136
    sorry ("%qs requires branch-likely instructions", "-mfix-r10000");
16137
 
16138
  if (TARGET_SYNCI && !ISA_HAS_SYNCI)
16139
    {
16140
      warning (0, "the %qs architecture does not support the synci "
16141
               "instruction", mips_arch_info->name);
16142
      target_flags &= ~MASK_SYNCI;
16143
    }
16144
 
16145
  /* Only optimize PIC indirect calls if they are actually required.  */
16146
  if (!TARGET_USE_GOT || !TARGET_EXPLICIT_RELOCS)
16147
    target_flags &= ~MASK_RELAX_PIC_CALLS;
16148
 
16149
  /* Save base state of options.  */
16150
  mips_base_target_flags = target_flags;
16151
  mips_base_schedule_insns = flag_schedule_insns;
16152
  mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
16153
  mips_base_move_loop_invariants = flag_move_loop_invariants;
16154
  mips_base_align_loops = align_loops;
16155
  mips_base_align_jumps = align_jumps;
16156
  mips_base_align_functions = align_functions;
16157
 
16158
  /* Now select the ISA mode.
16159
 
16160
     Do all CPP-sensitive stuff in non-MIPS16 mode; we'll switch to
16161
     MIPS16 mode afterwards if need be.  */
16162
  mips_set_mips16_mode (false);
16163
}
16164
 
16165
/* Swap the register information for registers I and I + 1, which
16166
   currently have the wrong endianness.  Note that the registers'
16167
   fixedness and call-clobberedness might have been set on the
16168
   command line.  */
16169
 
16170
static void
16171
mips_swap_registers (unsigned int i)
16172
{
16173
  int tmpi;
16174
  const char *tmps;
16175
 
16176
#define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
16177
#define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
16178
 
16179
  SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
16180
  SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
16181
  SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
16182
  SWAP_STRING (reg_names[i], reg_names[i + 1]);
16183
 
16184
#undef SWAP_STRING
16185
#undef SWAP_INT
16186
}
16187
 
16188
/* Implement TARGET_CONDITIONAL_REGISTER_USAGE.  */
16189
 
16190
static void
16191
mips_conditional_register_usage (void)
16192
{
16193
 
16194
  if (ISA_HAS_DSP)
16195
    {
16196
      /* These DSP control register fields are global.  */
16197
      global_regs[CCDSP_PO_REGNUM] = 1;
16198
      global_regs[CCDSP_SC_REGNUM] = 1;
16199
    }
16200
  else
16201
    AND_COMPL_HARD_REG_SET (accessible_reg_set,
16202
                            reg_class_contents[(int) DSP_ACC_REGS]);
16203
 
16204
  if (!TARGET_HARD_FLOAT)
16205
    {
16206
      AND_COMPL_HARD_REG_SET (accessible_reg_set,
16207
                              reg_class_contents[(int) FP_REGS]);
16208
      AND_COMPL_HARD_REG_SET (accessible_reg_set,
16209
                              reg_class_contents[(int) ST_REGS]);
16210
    }
16211
  else if (!ISA_HAS_8CC)
16212
    {
16213
      /* We only have a single condition-code register.  We implement
16214
         this by fixing all the condition-code registers and generating
16215
         RTL that refers directly to ST_REG_FIRST.  */
16216
      AND_COMPL_HARD_REG_SET (accessible_reg_set,
16217
                              reg_class_contents[(int) ST_REGS]);
16218
      SET_HARD_REG_BIT (accessible_reg_set, FPSW_REGNUM);
16219
      fixed_regs[FPSW_REGNUM] = call_used_regs[FPSW_REGNUM] = 1;
16220
    }
16221
  if (TARGET_MIPS16)
16222
    {
16223
      /* In MIPS16 mode, we permit the $t temporary registers to be used
16224
         for reload.  We prohibit the unused $s registers, since they
16225
         are call-saved, and saving them via a MIPS16 register would
16226
         probably waste more time than just reloading the value.  */
16227
      fixed_regs[18] = call_used_regs[18] = 1;
16228
      fixed_regs[19] = call_used_regs[19] = 1;
16229
      fixed_regs[20] = call_used_regs[20] = 1;
16230
      fixed_regs[21] = call_used_regs[21] = 1;
16231
      fixed_regs[22] = call_used_regs[22] = 1;
16232
      fixed_regs[23] = call_used_regs[23] = 1;
16233
      fixed_regs[26] = call_used_regs[26] = 1;
16234
      fixed_regs[27] = call_used_regs[27] = 1;
16235
      fixed_regs[30] = call_used_regs[30] = 1;
16236
 
16237
      /* Do not allow HI and LO to be treated as register operands.
16238
         There are no MTHI or MTLO instructions (or any real need
16239
         for them) and one-way registers cannot easily be reloaded.  */
16240
      AND_COMPL_HARD_REG_SET (operand_reg_set,
16241
                              reg_class_contents[(int) MD_REGS]);
16242
    }
16243
  /* $f20-$f23 are call-clobbered for n64.  */
16244
  if (mips_abi == ABI_64)
16245
    {
16246
      int regno;
16247
      for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
16248
        call_really_used_regs[regno] = call_used_regs[regno] = 1;
16249
    }
16250
  /* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
16251
     for n32.  */
16252
  if (mips_abi == ABI_N32)
16253
    {
16254
      int regno;
16255
      for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
16256
        call_really_used_regs[regno] = call_used_regs[regno] = 1;
16257
    }
16258
  /* Make sure that double-register accumulator values are correctly
16259
     ordered for the current endianness.  */
16260
  if (TARGET_LITTLE_ENDIAN)
16261
    {
16262
      unsigned int regno;
16263
 
16264
      mips_swap_registers (MD_REG_FIRST);
16265
      for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
16266
        mips_swap_registers (regno);
16267
    }
16268
}
16269
 
16270
/* When generating MIPS16 code, we want to allocate $24 (T_REG) before
16271
   other registers for instructions for which it is possible.  This
16272
   encourages the compiler to use CMP in cases where an XOR would
16273
   require some register shuffling.  */
16274
 
16275
void
16276
mips_order_regs_for_local_alloc (void)
16277
{
16278
  int i;
16279
 
16280
  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
16281
    reg_alloc_order[i] = i;
16282
 
16283
  if (TARGET_MIPS16)
16284
    {
16285
      /* It really doesn't matter where we put register 0, since it is
16286
         a fixed register anyhow.  */
16287
      reg_alloc_order[0] = 24;
16288
      reg_alloc_order[24] = 0;
16289
    }
16290
}
16291
 
16292
/* Implement EH_USES.  */
16293
 
16294
bool
16295
mips_eh_uses (unsigned int regno)
16296
{
16297
  if (reload_completed && !TARGET_ABSOLUTE_JUMPS)
16298
    {
16299
      /* We need to force certain registers to be live in order to handle
16300
         PIC long branches correctly.  See mips_must_initialize_gp_p for
16301
         details.  */
16302
      if (mips_cfun_has_cprestore_slot_p ())
16303
        {
16304
          if (regno == CPRESTORE_SLOT_REGNUM)
16305
            return true;
16306
        }
16307
      else
16308
        {
16309
          if (cfun->machine->global_pointer == regno)
16310
            return true;
16311
        }
16312
    }
16313
 
16314
  return false;
16315
}
16316
 
16317
/* Implement EPILOGUE_USES.  */
16318
 
16319
bool
16320
mips_epilogue_uses (unsigned int regno)
16321
{
16322
  /* Say that the epilogue uses the return address register.  Note that
16323
     in the case of sibcalls, the values "used by the epilogue" are
16324
     considered live at the start of the called function.  */
16325
  if (regno == RETURN_ADDR_REGNUM)
16326
    return true;
16327
 
16328
  /* If using a GOT, say that the epilogue also uses GOT_VERSION_REGNUM.
16329
     See the comment above load_call<mode> for details.  */
16330
  if (TARGET_USE_GOT && (regno) == GOT_VERSION_REGNUM)
16331
    return true;
16332
 
16333
  /* An interrupt handler must preserve some registers that are
16334
     ordinarily call-clobbered.  */
16335
  if (cfun->machine->interrupt_handler_p
16336
      && mips_interrupt_extra_call_saved_reg_p (regno))
16337
    return true;
16338
 
16339
  return false;
16340
}
16341
 
16342
/* A for_each_rtx callback.  Stop the search if *X is an AT register.  */
16343
 
16344
static int
16345
mips_at_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
16346
{
16347
  return REG_P (*x) && REGNO (*x) == AT_REGNUM;
16348
}
16349
 
16350
/* Return true if INSN needs to be wrapped in ".set noat".
16351
   INSN has NOPERANDS operands, stored in OPVEC.  */
16352
 
16353
static bool
16354
mips_need_noat_wrapper_p (rtx insn, rtx *opvec, int noperands)
16355
{
16356
  int i;
16357
 
16358
  if (recog_memoized (insn) >= 0)
16359
    for (i = 0; i < noperands; i++)
16360
      if (for_each_rtx (&opvec[i], mips_at_reg_p, NULL))
16361
        return true;
16362
  return false;
16363
}
16364
 
16365
/* Implement FINAL_PRESCAN_INSN.  */
16366
 
16367
void
16368
mips_final_prescan_insn (rtx insn, rtx *opvec, int noperands)
16369
{
16370
  if (mips_need_noat_wrapper_p (insn, opvec, noperands))
16371
    mips_push_asm_switch (&mips_noat);
16372
}
16373
 
16374
/* Implement TARGET_ASM_FINAL_POSTSCAN_INSN.  */
16375
 
16376
static void
16377
mips_final_postscan_insn (FILE *file ATTRIBUTE_UNUSED, rtx insn,
16378
                          rtx *opvec, int noperands)
16379
{
16380
  if (mips_need_noat_wrapper_p (insn, opvec, noperands))
16381
    mips_pop_asm_switch (&mips_noat);
16382
}
16383
 
16384
/* Return the function that is used to expand the <u>mulsidi3 pattern.
16385
   EXT_CODE is the code of the extension used.  Return NULL if widening
16386
   multiplication shouldn't be used.  */
16387
 
16388
mulsidi3_gen_fn
16389
mips_mulsidi3_gen_fn (enum rtx_code ext_code)
16390
{
16391
  bool signed_p;
16392
 
16393
  signed_p = ext_code == SIGN_EXTEND;
16394
  if (TARGET_64BIT)
16395
    {
16396
      /* Don't use widening multiplication with MULT when we have DMUL.  Even
16397
         with the extension of its input operands DMUL is faster.  Note that
16398
         the extension is not needed for signed multiplication.  In order to
16399
         ensure that we always remove the redundant sign-extension in this
16400
         case we still expand mulsidi3 for DMUL.  */
16401
      if (ISA_HAS_DMUL3)
16402
        return signed_p ? gen_mulsidi3_64bit_dmul : NULL;
16403
      if (TARGET_MIPS16)
16404
        return (signed_p
16405
                ? gen_mulsidi3_64bit_mips16
16406
                : gen_umulsidi3_64bit_mips16);
16407
      if (TARGET_FIX_R4000)
16408
        return NULL;
16409
      return signed_p ? gen_mulsidi3_64bit : gen_umulsidi3_64bit;
16410
    }
16411
  else
16412
    {
16413
      if (TARGET_MIPS16)
16414
        return (signed_p
16415
                ? gen_mulsidi3_32bit_mips16
16416
                : gen_umulsidi3_32bit_mips16);
16417
      if (TARGET_FIX_R4000 && !ISA_HAS_DSP)
16418
        return signed_p ? gen_mulsidi3_32bit_r4000 : gen_umulsidi3_32bit_r4000;
16419
      return signed_p ? gen_mulsidi3_32bit : gen_umulsidi3_32bit;
16420
    }
16421
}
16422
 
16423
/* Return the size in bytes of the trampoline code, padded to
16424
   TRAMPOLINE_ALIGNMENT bits.  The static chain pointer and target
16425
   function address immediately follow.  */
16426
 
16427
int
16428
mips_trampoline_code_size (void)
16429
{
16430
  if (TARGET_USE_PIC_FN_ADDR_REG)
16431
    return 4 * 4;
16432
  else if (ptr_mode == DImode)
16433
    return 8 * 4;
16434
  else if (ISA_HAS_LOAD_DELAY)
16435
    return 6 * 4;
16436
  else
16437
    return 4 * 4;
16438
}
16439
 
16440
/* Implement TARGET_TRAMPOLINE_INIT.  */
16441
 
16442
static void
16443
mips_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
16444
{
16445
  rtx addr, end_addr, high, low, opcode, mem;
16446
  rtx trampoline[8];
16447
  unsigned int i, j;
16448
  HOST_WIDE_INT end_addr_offset, static_chain_offset, target_function_offset;
16449
 
16450
  /* Work out the offsets of the pointers from the start of the
16451
     trampoline code.  */
16452
  end_addr_offset = mips_trampoline_code_size ();
16453
  static_chain_offset = end_addr_offset;
16454
  target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
16455
 
16456
  /* Get pointers to the beginning and end of the code block.  */
16457
  addr = force_reg (Pmode, XEXP (m_tramp, 0));
16458
  end_addr = mips_force_binary (Pmode, PLUS, addr, GEN_INT (end_addr_offset));
16459
 
16460
#define OP(X) gen_int_mode (X, SImode)
16461
 
16462
  /* Build up the code in TRAMPOLINE.  */
16463
  i = 0;
16464
  if (TARGET_USE_PIC_FN_ADDR_REG)
16465
    {
16466
      /* $25 contains the address of the trampoline.  Emit code of the form:
16467
 
16468
             l[wd]    $1, target_function_offset($25)
16469
             l[wd]    $static_chain, static_chain_offset($25)
16470
             jr       $1
16471
             move     $25,$1.  */
16472
      trampoline[i++] = OP (MIPS_LOAD_PTR (AT_REGNUM,
16473
                                           target_function_offset,
16474
                                           PIC_FUNCTION_ADDR_REGNUM));
16475
      trampoline[i++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
16476
                                           static_chain_offset,
16477
                                           PIC_FUNCTION_ADDR_REGNUM));
16478
      trampoline[i++] = OP (MIPS_JR (AT_REGNUM));
16479
      trampoline[i++] = OP (MIPS_MOVE (PIC_FUNCTION_ADDR_REGNUM, AT_REGNUM));
16480
    }
16481
  else if (ptr_mode == DImode)
16482
    {
16483
      /* It's too cumbersome to create the full 64-bit address, so let's
16484
         instead use:
16485
 
16486
             move    $1, $31
16487
             bal     1f
16488
             nop
16489
         1:  l[wd]   $25, target_function_offset - 12($31)
16490
             l[wd]   $static_chain, static_chain_offset - 12($31)
16491
             jr      $25
16492
             move    $31, $1
16493
 
16494
        where 12 is the offset of "1:" from the start of the code block.  */
16495
      trampoline[i++] = OP (MIPS_MOVE (AT_REGNUM, RETURN_ADDR_REGNUM));
16496
      trampoline[i++] = OP (MIPS_BAL (1));
16497
      trampoline[i++] = OP (MIPS_NOP);
16498
      trampoline[i++] = OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM,
16499
                                           target_function_offset - 12,
16500
                                           RETURN_ADDR_REGNUM));
16501
      trampoline[i++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
16502
                                           static_chain_offset - 12,
16503
                                           RETURN_ADDR_REGNUM));
16504
      trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
16505
      trampoline[i++] = OP (MIPS_MOVE (RETURN_ADDR_REGNUM, AT_REGNUM));
16506
    }
16507
  else
16508
    {
16509
      /* If the target has load delays, emit:
16510
 
16511
             lui     $1, %hi(end_addr)
16512
             lw      $25, %lo(end_addr + ...)($1)
16513
             lw      $static_chain, %lo(end_addr + ...)($1)
16514
             jr      $25
16515
             nop
16516
 
16517
         Otherwise emit:
16518
 
16519
             lui     $1, %hi(end_addr)
16520
             lw      $25, %lo(end_addr + ...)($1)
16521
             jr      $25
16522
             lw      $static_chain, %lo(end_addr + ...)($1).  */
16523
 
16524
      /* Split END_ADDR into %hi and %lo values.  Trampolines are aligned
16525
         to 64 bits, so the %lo value will have the bottom 3 bits clear.  */
16526
      high = expand_simple_binop (SImode, PLUS, end_addr, GEN_INT (0x8000),
16527
                                  NULL, false, OPTAB_WIDEN);
16528
      high = expand_simple_binop (SImode, LSHIFTRT, high, GEN_INT (16),
16529
                                  NULL, false, OPTAB_WIDEN);
16530
      low = convert_to_mode (SImode, gen_lowpart (HImode, end_addr), true);
16531
 
16532
      /* Emit the LUI.  */
16533
      opcode = OP (MIPS_LUI (AT_REGNUM, 0));
16534
      trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, high,
16535
                                             NULL, false, OPTAB_WIDEN);
16536
 
16537
      /* Emit the load of the target function.  */
16538
      opcode = OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM,
16539
                                  target_function_offset - end_addr_offset,
16540
                                  AT_REGNUM));
16541
      trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, low,
16542
                                             NULL, false, OPTAB_WIDEN);
16543
 
16544
      /* Emit the JR here, if we can.  */
16545
      if (!ISA_HAS_LOAD_DELAY)
16546
        trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
16547
 
16548
      /* Emit the load of the static chain register.  */
16549
      opcode = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
16550
                                  static_chain_offset - end_addr_offset,
16551
                                  AT_REGNUM));
16552
      trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, low,
16553
                                             NULL, false, OPTAB_WIDEN);
16554
 
16555
      /* Emit the JR, if we couldn't above.  */
16556
      if (ISA_HAS_LOAD_DELAY)
16557
        {
16558
          trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
16559
          trampoline[i++] = OP (MIPS_NOP);
16560
        }
16561
    }
16562
 
16563
#undef OP
16564
 
16565
  /* Copy the trampoline code.  Leave any padding uninitialized.  */
16566
  for (j = 0; j < i; j++)
16567
    {
16568
      mem = adjust_address (m_tramp, SImode, j * GET_MODE_SIZE (SImode));
16569
      mips_emit_move (mem, trampoline[j]);
16570
    }
16571
 
16572
  /* Set up the static chain pointer field.  */
16573
  mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
16574
  mips_emit_move (mem, chain_value);
16575
 
16576
  /* Set up the target function field.  */
16577
  mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
16578
  mips_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
16579
 
16580
  /* Flush the code part of the trampoline.  */
16581
  emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
16582
  emit_insn (gen_clear_cache (addr, end_addr));
16583
}
16584
 
16585
/* Implement FUNCTION_PROFILER.  */
16586
 
16587
void mips_function_profiler (FILE *file)
16588
{
16589
  if (TARGET_MIPS16)
16590
    sorry ("mips16 function profiling");
16591
  if (TARGET_LONG_CALLS)
16592
    {
16593
      /* For TARGET_LONG_CALLS use $3 for the address of _mcount.  */
16594
      if (Pmode == DImode)
16595
        fprintf (file, "\tdla\t%s,_mcount\n", reg_names[3]);
16596
      else
16597
        fprintf (file, "\tla\t%s,_mcount\n", reg_names[3]);
16598
    }
16599
  mips_push_asm_switch (&mips_noat);
16600
  fprintf (file, "\tmove\t%s,%s\t\t# save current return address\n",
16601
           reg_names[AT_REGNUM], reg_names[RETURN_ADDR_REGNUM]);
16602
  /* _mcount treats $2 as the static chain register.  */
16603
  if (cfun->static_chain_decl != NULL)
16604
    fprintf (file, "\tmove\t%s,%s\n", reg_names[2],
16605
             reg_names[STATIC_CHAIN_REGNUM]);
16606
  if (TARGET_MCOUNT_RA_ADDRESS)
16607
    {
16608
      /* If TARGET_MCOUNT_RA_ADDRESS load $12 with the address of the
16609
         ra save location.  */
16610
      if (cfun->machine->frame.ra_fp_offset == 0)
16611
        /* ra not saved, pass zero.  */
16612
        fprintf (file, "\tmove\t%s,%s\n", reg_names[12], reg_names[0]);
16613
      else
16614
        fprintf (file, "\t%s\t%s," HOST_WIDE_INT_PRINT_DEC "(%s)\n",
16615
                 Pmode == DImode ? "dla" : "la", reg_names[12],
16616
                 cfun->machine->frame.ra_fp_offset,
16617
                 reg_names[STACK_POINTER_REGNUM]);
16618
    }
16619
  if (!TARGET_NEWABI)
16620
    fprintf (file,
16621
             "\t%s\t%s,%s,%d\t\t# _mcount pops 2 words from  stack\n",
16622
             TARGET_64BIT ? "dsubu" : "subu",
16623
             reg_names[STACK_POINTER_REGNUM],
16624
             reg_names[STACK_POINTER_REGNUM],
16625
             Pmode == DImode ? 16 : 8);
16626
 
16627
  if (TARGET_LONG_CALLS)
16628
    fprintf (file, "\tjalr\t%s\n", reg_names[3]);
16629
  else
16630
    fprintf (file, "\tjal\t_mcount\n");
16631
  mips_pop_asm_switch (&mips_noat);
16632
  /* _mcount treats $2 as the static chain register.  */
16633
  if (cfun->static_chain_decl != NULL)
16634
    fprintf (file, "\tmove\t%s,%s\n", reg_names[STATIC_CHAIN_REGNUM],
16635
             reg_names[2]);
16636
}
16637
 
16638
/* Implement TARGET_SHIFT_TRUNCATION_MASK.  We want to keep the default
16639
   behaviour of TARGET_SHIFT_TRUNCATION_MASK for non-vector modes even
16640
   when TARGET_LOONGSON_VECTORS is true.  */
16641
 
16642
static unsigned HOST_WIDE_INT
16643
mips_shift_truncation_mask (enum machine_mode mode)
16644
{
16645
  if (TARGET_LOONGSON_VECTORS && VECTOR_MODE_P (mode))
16646
    return 0;
16647
 
16648
  return GET_MODE_BITSIZE (mode) - 1;
16649
}
16650
 
16651
/* Implement TARGET_PREPARE_PCH_SAVE.  */
16652
 
16653
static void
16654
mips_prepare_pch_save (void)
16655
{
16656
  /* We are called in a context where the current MIPS16 vs. non-MIPS16
16657
     setting should be irrelevant.  The question then is: which setting
16658
     makes most sense at load time?
16659
 
16660
     The PCH is loaded before the first token is read.  We should never
16661
     have switched into MIPS16 mode by that point, and thus should not
16662
     have populated mips16_globals.  Nor can we load the entire contents
16663
     of mips16_globals from the PCH file, because mips16_globals contains
16664
     a combination of GGC and non-GGC data.
16665
 
16666
     There is therefore no point in trying save the GGC part of
16667
     mips16_globals to the PCH file, or to preserve MIPS16ness across
16668
     the PCH save and load.  The loading compiler would not have access
16669
     to the non-GGC parts of mips16_globals (either from the PCH file,
16670
     or from a copy that the loading compiler generated itself) and would
16671
     have to call target_reinit anyway.
16672
 
16673
     It therefore seems best to switch back to non-MIPS16 mode at
16674
     save time, and to ensure that mips16_globals remains null after
16675
     a PCH load.  */
16676
  mips_set_mips16_mode (false);
16677
  mips16_globals = 0;
16678
}
16679
 
16680
/* Generate or test for an insn that supports a constant permutation.  */
16681
 
16682
#define MAX_VECT_LEN 8
16683
 
16684
struct expand_vec_perm_d
16685
{
16686
  rtx target, op0, op1;
16687
  unsigned char perm[MAX_VECT_LEN];
16688
  enum machine_mode vmode;
16689
  unsigned char nelt;
16690
  bool one_vector_p;
16691
  bool testing_p;
16692
};
16693
 
16694
/* Construct (set target (vec_select op0 (parallel perm))) and
16695
   return true if that's a valid instruction in the active ISA.  */
16696
 
16697
static bool
16698
mips_expand_vselect (rtx target, rtx op0,
16699
                     const unsigned char *perm, unsigned nelt)
16700
{
16701
  rtx rperm[MAX_VECT_LEN], x;
16702
  unsigned i;
16703
 
16704
  for (i = 0; i < nelt; ++i)
16705
    rperm[i] = GEN_INT (perm[i]);
16706
 
16707
  x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
16708
  x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
16709
  x = gen_rtx_SET (VOIDmode, target, x);
16710
 
16711
  x = emit_insn (x);
16712
  if (recog_memoized (x) < 0)
16713
    {
16714
      remove_insn (x);
16715
      return false;
16716
    }
16717
  return true;
16718
}
16719
 
16720
/* Similar, but generate a vec_concat from op0 and op1 as well.  */
16721
 
16722
static bool
16723
mips_expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
16724
                             const unsigned char *perm, unsigned nelt)
16725
{
16726
  enum machine_mode v2mode;
16727
  rtx x;
16728
 
16729
  v2mode = GET_MODE_2XWIDER_MODE (GET_MODE (op0));
16730
  x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
16731
  return mips_expand_vselect (target, x, perm, nelt);
16732
}
16733
 
16734
/* Recognize patterns for even-odd extraction.  */
16735
 
16736
static bool
16737
mips_expand_vpc_loongson_even_odd (struct expand_vec_perm_d *d)
16738
{
16739
  unsigned i, odd, nelt = d->nelt;
16740
  rtx t0, t1, t2, t3;
16741
 
16742
  if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
16743
    return false;
16744
  /* Even-odd for V2SI/V2SFmode is matched by interleave directly.  */
16745
  if (nelt < 4)
16746
    return false;
16747
 
16748
  odd = d->perm[0];
16749
  if (odd > 1)
16750
    return false;
16751
  for (i = 1; i < nelt; ++i)
16752
    if (d->perm[i] != i * 2 + odd)
16753
      return false;
16754
 
16755
  if (d->testing_p)
16756
    return true;
16757
 
16758
  /* We need 2*log2(N)-1 operations to achieve odd/even with interleave. */
16759
  t0 = gen_reg_rtx (d->vmode);
16760
  t1 = gen_reg_rtx (d->vmode);
16761
  switch (d->vmode)
16762
    {
16763
    case V4HImode:
16764
      emit_insn (gen_loongson_punpckhhw (t0, d->op0, d->op1));
16765
      emit_insn (gen_loongson_punpcklhw (t1, d->op0, d->op1));
16766
      if (odd)
16767
        emit_insn (gen_loongson_punpckhhw (d->target, t1, t0));
16768
      else
16769
        emit_insn (gen_loongson_punpcklhw (d->target, t1, t0));
16770
      break;
16771
 
16772
    case V8QImode:
16773
      t2 = gen_reg_rtx (d->vmode);
16774
      t3 = gen_reg_rtx (d->vmode);
16775
      emit_insn (gen_loongson_punpckhbh (t0, d->op0, d->op1));
16776
      emit_insn (gen_loongson_punpcklbh (t1, d->op0, d->op1));
16777
      emit_insn (gen_loongson_punpckhbh (t2, t1, t0));
16778
      emit_insn (gen_loongson_punpcklbh (t3, t1, t0));
16779
      if (odd)
16780
        emit_insn (gen_loongson_punpckhbh (d->target, t3, t2));
16781
      else
16782
        emit_insn (gen_loongson_punpcklbh (d->target, t3, t2));
16783
      break;
16784
 
16785
    default:
16786
      gcc_unreachable ();
16787
    }
16788
  return true;
16789
}
16790
 
16791
/* Recognize patterns for the Loongson PSHUFH instruction.  */
16792
 
16793
static bool
16794
mips_expand_vpc_loongson_pshufh (struct expand_vec_perm_d *d)
16795
{
16796
  unsigned i, mask;
16797
  rtx rmask;
16798
 
16799
  if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
16800
    return false;
16801
  if (d->vmode != V4HImode)
16802
    return false;
16803
  if (d->testing_p)
16804
    return true;
16805
 
16806
  /* Convert the selector into the packed 8-bit form for pshufh.  */
16807
  /* Recall that loongson is little-endian only.  No big-endian
16808
     adjustment required.  */
16809
  for (i = mask = 0; i < 4; i++)
16810
    mask |= (d->perm[i] & 3) << (i * 2);
16811
  rmask = force_reg (SImode, GEN_INT (mask));
16812
 
16813
  if (d->one_vector_p)
16814
    emit_insn (gen_loongson_pshufh (d->target, d->op0, rmask));
16815
  else
16816
    {
16817
      rtx t0, t1, x, merge, rmerge[4];
16818
 
16819
      t0 = gen_reg_rtx (V4HImode);
16820
      t1 = gen_reg_rtx (V4HImode);
16821
      emit_insn (gen_loongson_pshufh (t1, d->op1, rmask));
16822
      emit_insn (gen_loongson_pshufh (t0, d->op0, rmask));
16823
 
16824
      for (i = 0; i < 4; ++i)
16825
        rmerge[i] = (d->perm[i] & 4 ? constm1_rtx : const0_rtx);
16826
      merge = gen_rtx_CONST_VECTOR (V4HImode, gen_rtvec_v (4, rmerge));
16827
      merge = force_reg (V4HImode, merge);
16828
 
16829
      x = gen_rtx_AND (V4HImode, merge, t1);
16830
      emit_insn (gen_rtx_SET (VOIDmode, t1, x));
16831
 
16832
      x = gen_rtx_NOT (V4HImode, merge);
16833
      x = gen_rtx_AND (V4HImode, x, t0);
16834
      emit_insn (gen_rtx_SET (VOIDmode, t0, x));
16835
 
16836
      x = gen_rtx_IOR (V4HImode, t0, t1);
16837
      emit_insn (gen_rtx_SET (VOIDmode, d->target, x));
16838
    }
16839
 
16840
  return true;
16841
}
16842
 
16843
/* Recognize broadcast patterns for the Loongson.  */
16844
 
16845
static bool
16846
mips_expand_vpc_loongson_bcast (struct expand_vec_perm_d *d)
16847
{
16848
  unsigned i, elt;
16849
  rtx t0, t1;
16850
 
16851
  if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
16852
    return false;
16853
  /* Note that we've already matched V2SI via punpck and V4HI via pshufh.  */
16854
  if (d->vmode != V8QImode)
16855
    return false;
16856
  if (!d->one_vector_p)
16857
    return false;
16858
 
16859
  elt = d->perm[0];
16860
  for (i = 1; i < 8; ++i)
16861
    if (d->perm[i] != elt)
16862
      return false;
16863
 
16864
  if (d->testing_p)
16865
    return true;
16866
 
16867
  /* With one interleave we put two of the desired element adjacent.  */
16868
  t0 = gen_reg_rtx (V8QImode);
16869
  if (elt < 4)
16870
    emit_insn (gen_loongson_punpcklbh (t0, d->op0, d->op0));
16871
  else
16872
    emit_insn (gen_loongson_punpckhbh (t0, d->op0, d->op0));
16873
 
16874
  /* Shuffle that one HImode element into all locations.  */
16875
  elt &= 3;
16876
  elt *= 0x55;
16877
  t1 = gen_reg_rtx (V4HImode);
16878
  emit_insn (gen_loongson_pshufh (t1, gen_lowpart (V4HImode, t0),
16879
                                  force_reg (SImode, GEN_INT (elt))));
16880
 
16881
  emit_move_insn (d->target, gen_lowpart (V8QImode, t1));
16882
  return true;
16883
}
16884
 
16885
static bool
16886
mips_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
16887
{
16888
  unsigned int i, nelt = d->nelt;
16889
  unsigned char perm2[MAX_VECT_LEN];
16890
 
16891
  if (d->one_vector_p)
16892
    {
16893
      /* Try interleave with alternating operands.  */
16894
      memcpy (perm2, d->perm, sizeof(perm2));
16895
      for (i = 1; i < nelt; i += 2)
16896
        perm2[i] += nelt;
16897
      if (mips_expand_vselect_vconcat (d->target, d->op0, d->op1, perm2, nelt))
16898
        return true;
16899
    }
16900
  else
16901
    {
16902
      if (mips_expand_vselect_vconcat (d->target, d->op0, d->op1,
16903
                                       d->perm, nelt))
16904
        return true;
16905
 
16906
      /* Try again with swapped operands.  */
16907
      for (i = 0; i < nelt; ++i)
16908
        perm2[i] = (d->perm[i] + nelt) & (2 * nelt - 1);
16909
      if (mips_expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt))
16910
        return true;
16911
    }
16912
 
16913
  if (mips_expand_vpc_loongson_even_odd (d))
16914
    return true;
16915
  if (mips_expand_vpc_loongson_pshufh (d))
16916
    return true;
16917
  if (mips_expand_vpc_loongson_bcast (d))
16918
    return true;
16919
  return false;
16920
}
16921
 
16922
/* Expand a vec_perm_const pattern.  */
16923
 
16924
bool
16925
mips_expand_vec_perm_const (rtx operands[4])
16926
{
16927
  struct expand_vec_perm_d d;
16928
  int i, nelt, which;
16929
  unsigned char orig_perm[MAX_VECT_LEN];
16930
  rtx sel;
16931
  bool ok;
16932
 
16933
  d.target = operands[0];
16934
  d.op0 = operands[1];
16935
  d.op1 = operands[2];
16936
  sel = operands[3];
16937
 
16938
  d.vmode = GET_MODE (d.target);
16939
  gcc_assert (VECTOR_MODE_P (d.vmode));
16940
  d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
16941
  d.testing_p = false;
16942
 
16943
  for (i = which = 0; i < nelt; ++i)
16944
    {
16945
      rtx e = XVECEXP (sel, 0, i);
16946
      int ei = INTVAL (e) & (2 * nelt - 1);
16947
      which |= (ei < nelt ? 1 : 2);
16948
      orig_perm[i] = ei;
16949
    }
16950
  memcpy (d.perm, orig_perm, MAX_VECT_LEN);
16951
 
16952
  switch (which)
16953
    {
16954
    default:
16955
      gcc_unreachable();
16956
 
16957
    case 3:
16958
      d.one_vector_p = false;
16959
      if (!rtx_equal_p (d.op0, d.op1))
16960
        break;
16961
      /* FALLTHRU */
16962
 
16963
    case 2:
16964
      for (i = 0; i < nelt; ++i)
16965
        d.perm[i] &= nelt - 1;
16966
      d.op0 = d.op1;
16967
      d.one_vector_p = true;
16968
      break;
16969
 
16970
    case 1:
16971
      d.op1 = d.op0;
16972
      d.one_vector_p = true;
16973
      break;
16974
    }
16975
 
16976
  ok = mips_expand_vec_perm_const_1 (&d);
16977
 
16978
  /* If we were given a two-vector permutation which just happened to
16979
     have both input vectors equal, we folded this into a one-vector
16980
     permutation.  There are several loongson patterns that are matched
16981
     via direct vec_select+vec_concat expansion, but we do not have
16982
     support in mips_expand_vec_perm_const_1 to guess the adjustment
16983
     that should be made for a single operand.  Just try again with
16984
     the original permutation.  */
16985
  if (!ok && which == 3)
16986
    {
16987
      d.op0 = operands[1];
16988
      d.op1 = operands[2];
16989
      d.one_vector_p = false;
16990
      memcpy (d.perm, orig_perm, MAX_VECT_LEN);
16991
      ok = mips_expand_vec_perm_const_1 (&d);
16992
    }
16993
 
16994
  return ok;
16995
}
16996
 
16997
/* Implement TARGET_VECTORIZE_VEC_PERM_CONST_OK.  */
16998
 
16999
static bool
17000
mips_vectorize_vec_perm_const_ok (enum machine_mode vmode,
17001
                                  const unsigned char *sel)
17002
{
17003
  struct expand_vec_perm_d d;
17004
  unsigned int i, nelt, which;
17005
  bool ret;
17006
 
17007
  d.vmode = vmode;
17008
  d.nelt = nelt = GET_MODE_NUNITS (d.vmode);
17009
  d.testing_p = true;
17010
  memcpy (d.perm, sel, nelt);
17011
 
17012
  /* Categorize the set of elements in the selector.  */
17013
  for (i = which = 0; i < nelt; ++i)
17014
    {
17015
      unsigned char e = d.perm[i];
17016
      gcc_assert (e < 2 * nelt);
17017
      which |= (e < nelt ? 1 : 2);
17018
    }
17019
 
17020
  /* For all elements from second vector, fold the elements to first.  */
17021
  if (which == 2)
17022
    for (i = 0; i < nelt; ++i)
17023
      d.perm[i] -= nelt;
17024
 
17025
  /* Check whether the mask can be applied to the vector type.  */
17026
  d.one_vector_p = (which != 3);
17027
 
17028
  d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
17029
  d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
17030
  if (!d.one_vector_p)
17031
    d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
17032
 
17033
  start_sequence ();
17034
  ret = mips_expand_vec_perm_const_1 (&d);
17035
  end_sequence ();
17036
 
17037
  return ret;
17038
}
17039
 
17040
/* Expand an integral vector unpack operation.  */
17041
 
17042
void
17043
mips_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p)
17044
{
17045
  enum machine_mode imode = GET_MODE (operands[1]);
17046
  rtx (*unpack) (rtx, rtx, rtx);
17047
  rtx (*cmpgt) (rtx, rtx, rtx);
17048
  rtx tmp, dest, zero;
17049
 
17050
  switch (imode)
17051
    {
17052
    case V8QImode:
17053
      if (high_p)
17054
        unpack = gen_loongson_punpckhbh;
17055
      else
17056
        unpack = gen_loongson_punpcklbh;
17057
      cmpgt = gen_loongson_pcmpgtb;
17058
      break;
17059
    case V4HImode:
17060
      if (high_p)
17061
        unpack = gen_loongson_punpckhhw;
17062
      else
17063
        unpack = gen_loongson_punpcklhw;
17064
      cmpgt = gen_loongson_pcmpgth;
17065
      break;
17066
    default:
17067
      gcc_unreachable ();
17068
    }
17069
 
17070
  zero = force_reg (imode, CONST0_RTX (imode));
17071
  if (unsigned_p)
17072
    tmp = zero;
17073
  else
17074
    {
17075
      tmp = gen_reg_rtx (imode);
17076
      emit_insn (cmpgt (tmp, zero, operands[1]));
17077
    }
17078
 
17079
  dest = gen_reg_rtx (imode);
17080
  emit_insn (unpack (dest, operands[1], tmp));
17081
 
17082
  emit_move_insn (operands[0], gen_lowpart (GET_MODE (operands[0]), dest));
17083
}
17084
 
17085
/* A subroutine of mips_expand_vec_init, match constant vector elements.  */
17086
 
17087
static inline bool
17088
mips_constant_elt_p (rtx x)
17089
{
17090
  return CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE;
17091
}
17092
 
17093
/* A subroutine of mips_expand_vec_init, expand via broadcast.  */
17094
 
17095
static void
17096
mips_expand_vi_broadcast (enum machine_mode vmode, rtx target, rtx elt)
17097
{
17098
  struct expand_vec_perm_d d;
17099
  rtx t1;
17100
  bool ok;
17101
 
17102
  if (elt != const0_rtx)
17103
    elt = force_reg (GET_MODE_INNER (vmode), elt);
17104
  if (REG_P (elt))
17105
    elt = gen_lowpart (DImode, elt);
17106
 
17107
  t1 = gen_reg_rtx (vmode);
17108
  switch (vmode)
17109
    {
17110
    case V8QImode:
17111
      emit_insn (gen_loongson_vec_init1_v8qi (t1, elt));
17112
      break;
17113
    case V4HImode:
17114
      emit_insn (gen_loongson_vec_init1_v4hi (t1, elt));
17115
      break;
17116
    default:
17117
      gcc_unreachable ();
17118
    }
17119
 
17120
  memset (&d, 0, sizeof (d));
17121
  d.target = target;
17122
  d.op0 = t1;
17123
  d.op1 = t1;
17124
  d.vmode = vmode;
17125
  d.nelt = GET_MODE_NUNITS (vmode);
17126
  d.one_vector_p = true;
17127
 
17128
  ok = mips_expand_vec_perm_const_1 (&d);
17129
  gcc_assert (ok);
17130
}
17131
 
17132
/* A subroutine of mips_expand_vec_init, replacing all of the non-constant
17133
   elements of VALS with zeros, copy the constant vector to TARGET.  */
17134
 
17135
static void
17136
mips_expand_vi_constant (enum machine_mode vmode, unsigned nelt,
17137
                         rtx target, rtx vals)
17138
{
17139
  rtvec vec = shallow_copy_rtvec (XVEC (vals, 0));
17140
  unsigned i;
17141
 
17142
  for (i = 0; i < nelt; ++i)
17143
    {
17144
      if (!mips_constant_elt_p (RTVEC_ELT (vec, i)))
17145
        RTVEC_ELT (vec, i) = const0_rtx;
17146
    }
17147
 
17148
  emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, vec));
17149
}
17150
 
17151
 
17152
/* A subroutine of mips_expand_vec_init, expand via pinsrh.  */
17153
 
17154
static void
17155
mips_expand_vi_loongson_one_pinsrh (rtx target, rtx vals, unsigned one_var)
17156
{
17157
  mips_expand_vi_constant (V4HImode, 4, target, vals);
17158
 
17159
  emit_insn (gen_vec_setv4hi (target, target, XVECEXP (vals, 0, one_var),
17160
                              GEN_INT (one_var)));
17161
}
17162
 
17163
/* A subroutine of mips_expand_vec_init, expand anything via memory.  */
17164
 
17165
static void
17166
mips_expand_vi_general (enum machine_mode vmode, enum machine_mode imode,
17167
                        unsigned nelt, unsigned nvar, rtx target, rtx vals)
17168
{
17169
  rtx mem = assign_stack_temp (vmode, GET_MODE_SIZE (vmode), 0);
17170
  unsigned int i, isize = GET_MODE_SIZE (imode);
17171
 
17172
  if (nvar < nelt)
17173
    mips_expand_vi_constant (vmode, nelt, mem, vals);
17174
 
17175
  for (i = 0; i < nelt; ++i)
17176
    {
17177
      rtx x = XVECEXP (vals, 0, i);
17178
      if (!mips_constant_elt_p (x))
17179
        emit_move_insn (adjust_address (mem, imode, i * isize), x);
17180
    }
17181
 
17182
  emit_move_insn (target, mem);
17183
}
17184
 
17185
/* Expand a vector initialization.  */
17186
 
17187
void
17188
mips_expand_vector_init (rtx target, rtx vals)
17189
{
17190
  enum machine_mode vmode = GET_MODE (target);
17191
  enum machine_mode imode = GET_MODE_INNER (vmode);
17192
  unsigned i, nelt = GET_MODE_NUNITS (vmode);
17193
  unsigned nvar = 0, one_var = -1u;
17194
  bool all_same = true;
17195
  rtx x;
17196
 
17197
  for (i = 0; i < nelt; ++i)
17198
    {
17199
      x = XVECEXP (vals, 0, i);
17200
      if (!mips_constant_elt_p (x))
17201
        nvar++, one_var = i;
17202
      if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
17203
        all_same = false;
17204
    }
17205
 
17206
  /* Load constants from the pool, or whatever's handy.  */
17207
  if (nvar == 0)
17208
    {
17209
      emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)));
17210
      return;
17211
    }
17212
 
17213
  /* For two-part initialization, always use CONCAT.  */
17214
  if (nelt == 2)
17215
    {
17216
      rtx op0 = force_reg (imode, XVECEXP (vals, 0, 0));
17217
      rtx op1 = force_reg (imode, XVECEXP (vals, 0, 1));
17218
      x = gen_rtx_VEC_CONCAT (vmode, op0, op1);
17219
      emit_insn (gen_rtx_SET (VOIDmode, target, x));
17220
      return;
17221
    }
17222
 
17223
  /* Loongson is the only cpu with vectors with more elements.  */
17224
  gcc_assert (TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS);
17225
 
17226
  /* If all values are identical, broadcast the value.  */
17227
  if (all_same)
17228
    {
17229
      mips_expand_vi_broadcast (vmode, target, XVECEXP (vals, 0, 0));
17230
      return;
17231
    }
17232
 
17233
  /* If we've only got one non-variable V4HImode, use PINSRH.  */
17234
  if (nvar == 1 && vmode == V4HImode)
17235
    {
17236
      mips_expand_vi_loongson_one_pinsrh (target, vals, one_var);
17237
      return;
17238
    }
17239
 
17240
  mips_expand_vi_general (vmode, imode, nelt, nvar, target, vals);
17241
}
17242
 
17243
/* Expand a vector reduction.  */
17244
 
17245
void
17246
mips_expand_vec_reduc (rtx target, rtx in, rtx (*gen)(rtx, rtx, rtx))
17247
{
17248
  enum machine_mode vmode = GET_MODE (in);
17249
  unsigned char perm2[2];
17250
  rtx last, next, fold, x;
17251
  bool ok;
17252
 
17253
  last = in;
17254
  fold = gen_reg_rtx (vmode);
17255
  switch (vmode)
17256
    {
17257
    case V2SFmode:
17258
      /* Use PUL/PLU to produce { L, H } op { H, L }.
17259
         By reversing the pair order, rather than a pure interleave high,
17260
         we avoid erroneous exceptional conditions that we might otherwise
17261
         produce from the computation of H op H.  */
17262
      perm2[0] = 1;
17263
      perm2[1] = 2;
17264
      ok = mips_expand_vselect_vconcat (fold, last, last, perm2, 2);
17265
      gcc_assert (ok);
17266
      break;
17267
 
17268
    case V2SImode:
17269
      /* Use interleave to produce { H, L } op { H, H }.  */
17270
      emit_insn (gen_loongson_punpckhwd (fold, last, last));
17271
      break;
17272
 
17273
    case V4HImode:
17274
      /* Perform the first reduction with interleave,
17275
         and subsequent reductions with shifts.  */
17276
      emit_insn (gen_loongson_punpckhwd_hi (fold, last, last));
17277
 
17278
      next = gen_reg_rtx (vmode);
17279
      emit_insn (gen (next, last, fold));
17280
      last = next;
17281
 
17282
      fold = gen_reg_rtx (vmode);
17283
      x = force_reg (SImode, GEN_INT (16));
17284
      emit_insn (gen_vec_shr_v4hi (fold, last, x));
17285
      break;
17286
 
17287
    case V8QImode:
17288
      emit_insn (gen_loongson_punpckhwd_qi (fold, last, last));
17289
 
17290
      next = gen_reg_rtx (vmode);
17291
      emit_insn (gen (next, last, fold));
17292
      last = next;
17293
 
17294
      fold = gen_reg_rtx (vmode);
17295
      x = force_reg (SImode, GEN_INT (16));
17296
      emit_insn (gen_vec_shr_v8qi (fold, last, x));
17297
 
17298
      next = gen_reg_rtx (vmode);
17299
      emit_insn (gen (next, last, fold));
17300
      last = next;
17301
 
17302
      fold = gen_reg_rtx (vmode);
17303
      x = force_reg (SImode, GEN_INT (8));
17304
      emit_insn (gen_vec_shr_v8qi (fold, last, x));
17305
      break;
17306
 
17307
    default:
17308
      gcc_unreachable ();
17309
    }
17310
 
17311
  emit_insn (gen (target, last, fold));
17312
}
17313
 
17314
/* Expand a vector minimum/maximum.  */
17315
 
17316
void
17317
mips_expand_vec_minmax (rtx target, rtx op0, rtx op1,
17318
                        rtx (*cmp) (rtx, rtx, rtx), bool min_p)
17319
{
17320
  enum machine_mode vmode = GET_MODE (target);
17321
  rtx tc, t0, t1, x;
17322
 
17323
  tc = gen_reg_rtx (vmode);
17324
  t0 = gen_reg_rtx (vmode);
17325
  t1 = gen_reg_rtx (vmode);
17326
 
17327
  /* op0 > op1 */
17328
  emit_insn (cmp (tc, op0, op1));
17329
 
17330
  x = gen_rtx_AND (vmode, tc, (min_p ? op1 : op0));
17331
  emit_insn (gen_rtx_SET (VOIDmode, t0, x));
17332
 
17333
  x = gen_rtx_NOT (vmode, tc);
17334
  x = gen_rtx_AND (vmode, x, (min_p ? op0 : op1));
17335
  emit_insn (gen_rtx_SET (VOIDmode, t1, x));
17336
 
17337
  x = gen_rtx_IOR (vmode, t0, t1);
17338
  emit_insn (gen_rtx_SET (VOIDmode, target, x));
17339
}
17340
 
17341
/* Initialize the GCC target structure.  */
17342
#undef TARGET_ASM_ALIGNED_HI_OP
17343
#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
17344
#undef TARGET_ASM_ALIGNED_SI_OP
17345
#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
17346
#undef TARGET_ASM_ALIGNED_DI_OP
17347
#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
17348
 
17349
#undef TARGET_OPTION_OVERRIDE
17350
#define TARGET_OPTION_OVERRIDE mips_option_override
17351
 
17352
#undef TARGET_LEGITIMIZE_ADDRESS
17353
#define TARGET_LEGITIMIZE_ADDRESS mips_legitimize_address
17354
 
17355
#undef TARGET_ASM_FUNCTION_PROLOGUE
17356
#define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
17357
#undef TARGET_ASM_FUNCTION_EPILOGUE
17358
#define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
17359
#undef TARGET_ASM_SELECT_RTX_SECTION
17360
#define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
17361
#undef TARGET_ASM_FUNCTION_RODATA_SECTION
17362
#define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
17363
 
17364
#undef TARGET_SCHED_INIT
17365
#define TARGET_SCHED_INIT mips_sched_init
17366
#undef TARGET_SCHED_REORDER
17367
#define TARGET_SCHED_REORDER mips_sched_reorder
17368
#undef TARGET_SCHED_REORDER2
17369
#define TARGET_SCHED_REORDER2 mips_sched_reorder2
17370
#undef TARGET_SCHED_VARIABLE_ISSUE
17371
#define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
17372
#undef TARGET_SCHED_ADJUST_COST
17373
#define TARGET_SCHED_ADJUST_COST mips_adjust_cost
17374
#undef TARGET_SCHED_ISSUE_RATE
17375
#define TARGET_SCHED_ISSUE_RATE mips_issue_rate
17376
#undef TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN
17377
#define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN mips_init_dfa_post_cycle_insn
17378
#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
17379
#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE mips_dfa_post_advance_cycle
17380
#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
17381
#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
17382
  mips_multipass_dfa_lookahead
17383
#undef TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P
17384
#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \
17385
  mips_small_register_classes_for_mode_p
17386
 
17387
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
17388
#define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
17389
 
17390
#undef TARGET_INSERT_ATTRIBUTES
17391
#define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
17392
#undef TARGET_MERGE_DECL_ATTRIBUTES
17393
#define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
17394
#undef TARGET_SET_CURRENT_FUNCTION
17395
#define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
17396
 
17397
#undef TARGET_VALID_POINTER_MODE
17398
#define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
17399
#undef TARGET_REGISTER_MOVE_COST
17400
#define TARGET_REGISTER_MOVE_COST mips_register_move_cost
17401
#undef TARGET_MEMORY_MOVE_COST
17402
#define TARGET_MEMORY_MOVE_COST mips_memory_move_cost
17403
#undef TARGET_RTX_COSTS
17404
#define TARGET_RTX_COSTS mips_rtx_costs
17405
#undef TARGET_ADDRESS_COST
17406
#define TARGET_ADDRESS_COST mips_address_cost
17407
 
17408
#undef TARGET_IN_SMALL_DATA_P
17409
#define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
17410
 
17411
#undef TARGET_MACHINE_DEPENDENT_REORG
17412
#define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
17413
 
17414
#undef  TARGET_PREFERRED_RELOAD_CLASS
17415
#define TARGET_PREFERRED_RELOAD_CLASS mips_preferred_reload_class
17416
 
17417
#undef TARGET_ASM_FILE_START
17418
#define TARGET_ASM_FILE_START mips_file_start
17419
#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
17420
#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
17421
#undef TARGET_ASM_CODE_END
17422
#define TARGET_ASM_CODE_END mips_code_end
17423
 
17424
#undef TARGET_INIT_LIBFUNCS
17425
#define TARGET_INIT_LIBFUNCS mips_init_libfuncs
17426
 
17427
#undef TARGET_BUILD_BUILTIN_VA_LIST
17428
#define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
17429
#undef TARGET_EXPAND_BUILTIN_VA_START
17430
#define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
17431
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
17432
#define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
17433
 
17434
#undef  TARGET_PROMOTE_FUNCTION_MODE
17435
#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
17436
#undef TARGET_PROMOTE_PROTOTYPES
17437
#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
17438
 
17439
#undef TARGET_FUNCTION_VALUE
17440
#define TARGET_FUNCTION_VALUE mips_function_value
17441
#undef TARGET_LIBCALL_VALUE
17442
#define TARGET_LIBCALL_VALUE mips_libcall_value
17443
#undef TARGET_FUNCTION_VALUE_REGNO_P
17444
#define TARGET_FUNCTION_VALUE_REGNO_P mips_function_value_regno_p
17445
#undef TARGET_RETURN_IN_MEMORY
17446
#define TARGET_RETURN_IN_MEMORY mips_return_in_memory
17447
#undef TARGET_RETURN_IN_MSB
17448
#define TARGET_RETURN_IN_MSB mips_return_in_msb
17449
 
17450
#undef TARGET_ASM_OUTPUT_MI_THUNK
17451
#define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
17452
#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
17453
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
17454
 
17455
#undef TARGET_PRINT_OPERAND
17456
#define TARGET_PRINT_OPERAND mips_print_operand
17457
#undef TARGET_PRINT_OPERAND_ADDRESS
17458
#define TARGET_PRINT_OPERAND_ADDRESS mips_print_operand_address
17459
#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
17460
#define TARGET_PRINT_OPERAND_PUNCT_VALID_P mips_print_operand_punct_valid_p
17461
 
17462
#undef TARGET_SETUP_INCOMING_VARARGS
17463
#define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
17464
#undef TARGET_STRICT_ARGUMENT_NAMING
17465
#define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
17466
#undef TARGET_MUST_PASS_IN_STACK
17467
#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
17468
#undef TARGET_PASS_BY_REFERENCE
17469
#define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
17470
#undef TARGET_CALLEE_COPIES
17471
#define TARGET_CALLEE_COPIES mips_callee_copies
17472
#undef TARGET_ARG_PARTIAL_BYTES
17473
#define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
17474
#undef TARGET_FUNCTION_ARG
17475
#define TARGET_FUNCTION_ARG mips_function_arg
17476
#undef TARGET_FUNCTION_ARG_ADVANCE
17477
#define TARGET_FUNCTION_ARG_ADVANCE mips_function_arg_advance
17478
#undef TARGET_FUNCTION_ARG_BOUNDARY
17479
#define TARGET_FUNCTION_ARG_BOUNDARY mips_function_arg_boundary
17480
 
17481
#undef TARGET_MODE_REP_EXTENDED
17482
#define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
17483
 
17484
#undef TARGET_VECTOR_MODE_SUPPORTED_P
17485
#define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
17486
 
17487
#undef TARGET_SCALAR_MODE_SUPPORTED_P
17488
#define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
17489
 
17490
#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
17491
#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE mips_preferred_simd_mode
17492
 
17493
#undef TARGET_INIT_BUILTINS
17494
#define TARGET_INIT_BUILTINS mips_init_builtins
17495
#undef TARGET_BUILTIN_DECL
17496
#define TARGET_BUILTIN_DECL mips_builtin_decl
17497
#undef TARGET_EXPAND_BUILTIN
17498
#define TARGET_EXPAND_BUILTIN mips_expand_builtin
17499
 
17500
#undef TARGET_HAVE_TLS
17501
#define TARGET_HAVE_TLS HAVE_AS_TLS
17502
 
17503
#undef TARGET_CANNOT_FORCE_CONST_MEM
17504
#define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
17505
 
17506
#undef TARGET_LEGITIMATE_CONSTANT_P
17507
#define TARGET_LEGITIMATE_CONSTANT_P mips_legitimate_constant_p
17508
 
17509
#undef TARGET_ENCODE_SECTION_INFO
17510
#define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
17511
 
17512
#undef TARGET_ATTRIBUTE_TABLE
17513
#define TARGET_ATTRIBUTE_TABLE mips_attribute_table
17514
/* All our function attributes are related to how out-of-line copies should
17515
   be compiled or called.  They don't in themselves prevent inlining.  */
17516
#undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
17517
#define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
17518
 
17519
#undef TARGET_EXTRA_LIVE_ON_ENTRY
17520
#define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
17521
 
17522
#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
17523
#define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
17524
#undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
17525
#define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
17526
 
17527
#undef  TARGET_COMP_TYPE_ATTRIBUTES
17528
#define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
17529
 
17530
#ifdef HAVE_AS_DTPRELWORD
17531
#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
17532
#define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
17533
#endif
17534
#undef TARGET_DWARF_REGISTER_SPAN
17535
#define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
17536
 
17537
#undef TARGET_ASM_FINAL_POSTSCAN_INSN
17538
#define TARGET_ASM_FINAL_POSTSCAN_INSN mips_final_postscan_insn
17539
 
17540
#undef TARGET_LEGITIMATE_ADDRESS_P
17541
#define TARGET_LEGITIMATE_ADDRESS_P     mips_legitimate_address_p
17542
 
17543
#undef TARGET_FRAME_POINTER_REQUIRED
17544
#define TARGET_FRAME_POINTER_REQUIRED mips_frame_pointer_required
17545
 
17546
#undef TARGET_CAN_ELIMINATE
17547
#define TARGET_CAN_ELIMINATE mips_can_eliminate
17548
 
17549
#undef TARGET_CONDITIONAL_REGISTER_USAGE
17550
#define TARGET_CONDITIONAL_REGISTER_USAGE mips_conditional_register_usage
17551
 
17552
#undef TARGET_TRAMPOLINE_INIT
17553
#define TARGET_TRAMPOLINE_INIT mips_trampoline_init
17554
 
17555
#undef TARGET_ASM_OUTPUT_SOURCE_FILENAME
17556
#define TARGET_ASM_OUTPUT_SOURCE_FILENAME mips_output_filename
17557
 
17558
#undef TARGET_SHIFT_TRUNCATION_MASK
17559
#define TARGET_SHIFT_TRUNCATION_MASK mips_shift_truncation_mask
17560
 
17561
#undef TARGET_PREPARE_PCH_SAVE
17562
#define TARGET_PREPARE_PCH_SAVE mips_prepare_pch_save
17563
 
17564
#undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
17565
#define TARGET_VECTORIZE_VEC_PERM_CONST_OK mips_vectorize_vec_perm_const_ok
17566
 
17567
struct gcc_target targetm = TARGET_INITIALIZER;
17568
 
17569
#include "gt-mips.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.