1 |
282 |
jeremybenn |
/* Subroutines used for MIPS code generation.
|
2 |
|
|
Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
|
3 |
|
|
1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
|
4 |
|
|
Free Software Foundation, Inc.
|
5 |
|
|
Contributed by A. Lichnewsky, lich@inria.inria.fr.
|
6 |
|
|
Changes by Michael Meissner, meissner@osf.org.
|
7 |
|
|
64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
|
8 |
|
|
Brendan Eich, brendan@microunity.com.
|
9 |
|
|
|
10 |
|
|
This file is part of GCC.
|
11 |
|
|
|
12 |
|
|
GCC is free software; you can redistribute it and/or modify
|
13 |
|
|
it under the terms of the GNU General Public License as published by
|
14 |
|
|
the Free Software Foundation; either version 3, or (at your option)
|
15 |
|
|
any later version.
|
16 |
|
|
|
17 |
|
|
GCC is distributed in the hope that it will be useful,
|
18 |
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
19 |
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
20 |
|
|
GNU General Public License for more details.
|
21 |
|
|
|
22 |
|
|
You should have received a copy of the GNU General Public License
|
23 |
|
|
along with GCC; see the file COPYING3. If not see
|
24 |
|
|
<http://www.gnu.org/licenses/>. */
|
25 |
|
|
|
26 |
|
|
#include "config.h"
|
27 |
|
|
#include "system.h"
|
28 |
|
|
#include "coretypes.h"
|
29 |
|
|
#include "tm.h"
|
30 |
|
|
#include <signal.h>
|
31 |
|
|
#include "rtl.h"
|
32 |
|
|
#include "regs.h"
|
33 |
|
|
#include "hard-reg-set.h"
|
34 |
|
|
#include "real.h"
|
35 |
|
|
#include "insn-config.h"
|
36 |
|
|
#include "conditions.h"
|
37 |
|
|
#include "insn-attr.h"
|
38 |
|
|
#include "recog.h"
|
39 |
|
|
#include "toplev.h"
|
40 |
|
|
#include "output.h"
|
41 |
|
|
#include "tree.h"
|
42 |
|
|
#include "function.h"
|
43 |
|
|
#include "expr.h"
|
44 |
|
|
#include "optabs.h"
|
45 |
|
|
#include "libfuncs.h"
|
46 |
|
|
#include "flags.h"
|
47 |
|
|
#include "reload.h"
|
48 |
|
|
#include "tm_p.h"
|
49 |
|
|
#include "ggc.h"
|
50 |
|
|
#include "gstab.h"
|
51 |
|
|
#include "hashtab.h"
|
52 |
|
|
#include "debug.h"
|
53 |
|
|
#include "target.h"
|
54 |
|
|
#include "target-def.h"
|
55 |
|
|
#include "integrate.h"
|
56 |
|
|
#include "langhooks.h"
|
57 |
|
|
#include "cfglayout.h"
|
58 |
|
|
#include "sched-int.h"
|
59 |
|
|
#include "gimple.h"
|
60 |
|
|
#include "bitmap.h"
|
61 |
|
|
#include "diagnostic.h"
|
62 |
|
|
|
63 |
|
|
/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
|
64 |
|
|
#define UNSPEC_ADDRESS_P(X) \
|
65 |
|
|
(GET_CODE (X) == UNSPEC \
|
66 |
|
|
&& XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
|
67 |
|
|
&& XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
|
68 |
|
|
|
69 |
|
|
/* Extract the symbol or label from UNSPEC wrapper X. */
|
70 |
|
|
#define UNSPEC_ADDRESS(X) \
|
71 |
|
|
XVECEXP (X, 0, 0)
|
72 |
|
|
|
73 |
|
|
/* Extract the symbol type from UNSPEC wrapper X. */
|
74 |
|
|
#define UNSPEC_ADDRESS_TYPE(X) \
|
75 |
|
|
((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
|
76 |
|
|
|
77 |
|
|
/* The maximum distance between the top of the stack frame and the
|
78 |
|
|
value $sp has when we save and restore registers.
|
79 |
|
|
|
80 |
|
|
The value for normal-mode code must be a SMALL_OPERAND and must
|
81 |
|
|
preserve the maximum stack alignment. We therefore use a value
|
82 |
|
|
of 0x7ff0 in this case.
|
83 |
|
|
|
84 |
|
|
MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
|
85 |
|
|
up to 0x7f8 bytes and can usually save or restore all the registers
|
86 |
|
|
that we need to save or restore. (Note that we can only use these
|
87 |
|
|
instructions for o32, for which the stack alignment is 8 bytes.)
|
88 |
|
|
|
89 |
|
|
We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
|
90 |
|
|
RESTORE are not available. We can then use unextended instructions
|
91 |
|
|
to save and restore registers, and to allocate and deallocate the top
|
92 |
|
|
part of the frame. */
|
93 |
|
|
#define MIPS_MAX_FIRST_STACK_STEP \
|
94 |
|
|
(!TARGET_MIPS16 ? 0x7ff0 \
|
95 |
|
|
: GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
|
96 |
|
|
: TARGET_64BIT ? 0x100 : 0x400)
|
97 |
|
|
|
98 |
|
|
/* True if INSN is a mips.md pattern or asm statement. */
|
99 |
|
|
#define USEFUL_INSN_P(INSN) \
|
100 |
|
|
(NONDEBUG_INSN_P (INSN) \
|
101 |
|
|
&& GET_CODE (PATTERN (INSN)) != USE \
|
102 |
|
|
&& GET_CODE (PATTERN (INSN)) != CLOBBER \
|
103 |
|
|
&& GET_CODE (PATTERN (INSN)) != ADDR_VEC \
|
104 |
|
|
&& GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
|
105 |
|
|
|
106 |
|
|
/* If INSN is a delayed branch sequence, return the first instruction
|
107 |
|
|
in the sequence, otherwise return INSN itself. */
|
108 |
|
|
#define SEQ_BEGIN(INSN) \
|
109 |
|
|
(INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
|
110 |
|
|
? XVECEXP (PATTERN (INSN), 0, 0) \
|
111 |
|
|
: (INSN))
|
112 |
|
|
|
113 |
|
|
/* Likewise for the last instruction in a delayed branch sequence. */
|
114 |
|
|
#define SEQ_END(INSN) \
|
115 |
|
|
(INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
|
116 |
|
|
? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
|
117 |
|
|
: (INSN))
|
118 |
|
|
|
119 |
|
|
/* Execute the following loop body with SUBINSN set to each instruction
|
120 |
|
|
between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
|
121 |
|
|
#define FOR_EACH_SUBINSN(SUBINSN, INSN) \
|
122 |
|
|
for ((SUBINSN) = SEQ_BEGIN (INSN); \
|
123 |
|
|
(SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
|
124 |
|
|
(SUBINSN) = NEXT_INSN (SUBINSN))
|
125 |
|
|
|
126 |
|
|
/* True if bit BIT is set in VALUE. */
|
127 |
|
|
#define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
|
128 |
|
|
|
129 |
|
|
/* Return the opcode for a ptr_mode load of the form:
|
130 |
|
|
|
131 |
|
|
l[wd] DEST, OFFSET(BASE). */
|
132 |
|
|
#define MIPS_LOAD_PTR(DEST, OFFSET, BASE) \
|
133 |
|
|
(((ptr_mode == DImode ? 0x37 : 0x23) << 26) \
|
134 |
|
|
| ((BASE) << 21) \
|
135 |
|
|
| ((DEST) << 16) \
|
136 |
|
|
| (OFFSET))
|
137 |
|
|
|
138 |
|
|
/* Return the opcode to move register SRC into register DEST. */
|
139 |
|
|
#define MIPS_MOVE(DEST, SRC) \
|
140 |
|
|
((TARGET_64BIT ? 0x2d : 0x21) \
|
141 |
|
|
| ((DEST) << 11) \
|
142 |
|
|
| ((SRC) << 21))
|
143 |
|
|
|
144 |
|
|
/* Return the opcode for:
|
145 |
|
|
|
146 |
|
|
lui DEST, VALUE. */
|
147 |
|
|
#define MIPS_LUI(DEST, VALUE) \
|
148 |
|
|
((0xf << 26) | ((DEST) << 16) | (VALUE))
|
149 |
|
|
|
150 |
|
|
/* Return the opcode to jump to register DEST. */
|
151 |
|
|
#define MIPS_JR(DEST) \
|
152 |
|
|
(((DEST) << 21) | 0x8)
|
153 |
|
|
|
154 |
|
|
/* Return the opcode for:
|
155 |
|
|
|
156 |
|
|
bal . + (1 + OFFSET) * 4. */
|
157 |
|
|
#define MIPS_BAL(OFFSET) \
|
158 |
|
|
((0x1 << 26) | (0x11 << 16) | (OFFSET))
|
159 |
|
|
|
160 |
|
|
/* Return the usual opcode for a nop. */
|
161 |
|
|
#define MIPS_NOP 0
|
162 |
|
|
|
163 |
|
|
/* Classifies an address.
|
164 |
|
|
|
165 |
|
|
ADDRESS_REG
|
166 |
|
|
A natural register + offset address. The register satisfies
|
167 |
|
|
mips_valid_base_register_p and the offset is a const_arith_operand.
|
168 |
|
|
|
169 |
|
|
ADDRESS_LO_SUM
|
170 |
|
|
A LO_SUM rtx. The first operand is a valid base register and
|
171 |
|
|
the second operand is a symbolic address.
|
172 |
|
|
|
173 |
|
|
ADDRESS_CONST_INT
|
174 |
|
|
A signed 16-bit constant address.
|
175 |
|
|
|
176 |
|
|
ADDRESS_SYMBOLIC:
|
177 |
|
|
A constant symbolic address. */
|
178 |
|
|
enum mips_address_type {
|
179 |
|
|
ADDRESS_REG,
|
180 |
|
|
ADDRESS_LO_SUM,
|
181 |
|
|
ADDRESS_CONST_INT,
|
182 |
|
|
ADDRESS_SYMBOLIC
|
183 |
|
|
};
|
184 |
|
|
|
185 |
|
|
/* Enumerates the setting of the -mr10k-cache-barrier option. */
|
186 |
|
|
enum mips_r10k_cache_barrier_setting {
|
187 |
|
|
R10K_CACHE_BARRIER_NONE,
|
188 |
|
|
R10K_CACHE_BARRIER_STORE,
|
189 |
|
|
R10K_CACHE_BARRIER_LOAD_STORE
|
190 |
|
|
};
|
191 |
|
|
|
192 |
|
|
/* Macros to create an enumeration identifier for a function prototype. */
|
193 |
|
|
#define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
|
194 |
|
|
#define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
|
195 |
|
|
#define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
|
196 |
|
|
#define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
|
197 |
|
|
|
198 |
|
|
/* Classifies the prototype of a built-in function. */
|
199 |
|
|
enum mips_function_type {
|
200 |
|
|
#define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
|
201 |
|
|
#include "config/mips/mips-ftypes.def"
|
202 |
|
|
#undef DEF_MIPS_FTYPE
|
203 |
|
|
MIPS_MAX_FTYPE_MAX
|
204 |
|
|
};
|
205 |
|
|
|
206 |
|
|
/* Specifies how a built-in function should be converted into rtl. */
|
207 |
|
|
enum mips_builtin_type {
|
208 |
|
|
/* The function corresponds directly to an .md pattern. The return
|
209 |
|
|
value is mapped to operand 0 and the arguments are mapped to
|
210 |
|
|
operands 1 and above. */
|
211 |
|
|
MIPS_BUILTIN_DIRECT,
|
212 |
|
|
|
213 |
|
|
/* The function corresponds directly to an .md pattern. There is no return
|
214 |
|
|
value and the arguments are mapped to operands 0 and above. */
|
215 |
|
|
MIPS_BUILTIN_DIRECT_NO_TARGET,
|
216 |
|
|
|
217 |
|
|
/* The function corresponds to a comparison instruction followed by
|
218 |
|
|
a mips_cond_move_tf_ps pattern. The first two arguments are the
|
219 |
|
|
values to compare and the second two arguments are the vector
|
220 |
|
|
operands for the movt.ps or movf.ps instruction (in assembly order). */
|
221 |
|
|
MIPS_BUILTIN_MOVF,
|
222 |
|
|
MIPS_BUILTIN_MOVT,
|
223 |
|
|
|
224 |
|
|
/* The function corresponds to a V2SF comparison instruction. Operand 0
|
225 |
|
|
of this instruction is the result of the comparison, which has mode
|
226 |
|
|
CCV2 or CCV4. The function arguments are mapped to operands 1 and
|
227 |
|
|
above. The function's return value is an SImode boolean that is
|
228 |
|
|
true under the following conditions:
|
229 |
|
|
|
230 |
|
|
MIPS_BUILTIN_CMP_ANY: one of the registers is true
|
231 |
|
|
MIPS_BUILTIN_CMP_ALL: all of the registers are true
|
232 |
|
|
MIPS_BUILTIN_CMP_LOWER: the first register is true
|
233 |
|
|
MIPS_BUILTIN_CMP_UPPER: the second register is true. */
|
234 |
|
|
MIPS_BUILTIN_CMP_ANY,
|
235 |
|
|
MIPS_BUILTIN_CMP_ALL,
|
236 |
|
|
MIPS_BUILTIN_CMP_UPPER,
|
237 |
|
|
MIPS_BUILTIN_CMP_LOWER,
|
238 |
|
|
|
239 |
|
|
/* As above, but the instruction only sets a single $fcc register. */
|
240 |
|
|
MIPS_BUILTIN_CMP_SINGLE,
|
241 |
|
|
|
242 |
|
|
/* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
|
243 |
|
|
MIPS_BUILTIN_BPOSGE32
|
244 |
|
|
};
|
245 |
|
|
|
246 |
|
|
/* Invoke MACRO (COND) for each C.cond.fmt condition. */
|
247 |
|
|
#define MIPS_FP_CONDITIONS(MACRO) \
|
248 |
|
|
MACRO (f), \
|
249 |
|
|
MACRO (un), \
|
250 |
|
|
MACRO (eq), \
|
251 |
|
|
MACRO (ueq), \
|
252 |
|
|
MACRO (olt), \
|
253 |
|
|
MACRO (ult), \
|
254 |
|
|
MACRO (ole), \
|
255 |
|
|
MACRO (ule), \
|
256 |
|
|
MACRO (sf), \
|
257 |
|
|
MACRO (ngle), \
|
258 |
|
|
MACRO (seq), \
|
259 |
|
|
MACRO (ngl), \
|
260 |
|
|
MACRO (lt), \
|
261 |
|
|
MACRO (nge), \
|
262 |
|
|
MACRO (le), \
|
263 |
|
|
MACRO (ngt)
|
264 |
|
|
|
265 |
|
|
/* Enumerates the codes above as MIPS_FP_COND_<X>. */
|
266 |
|
|
#define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
|
267 |
|
|
enum mips_fp_condition {
|
268 |
|
|
MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
|
269 |
|
|
};
|
270 |
|
|
|
271 |
|
|
/* Index X provides the string representation of MIPS_FP_COND_<X>. */
|
272 |
|
|
#define STRINGIFY(X) #X
|
273 |
|
|
static const char *const mips_fp_conditions[] = {
|
274 |
|
|
MIPS_FP_CONDITIONS (STRINGIFY)
|
275 |
|
|
};
|
276 |
|
|
|
277 |
|
|
/* Information about a function's frame layout. */
|
278 |
|
|
struct GTY(()) mips_frame_info {
|
279 |
|
|
/* The size of the frame in bytes. */
|
280 |
|
|
HOST_WIDE_INT total_size;
|
281 |
|
|
|
282 |
|
|
/* The number of bytes allocated to variables. */
|
283 |
|
|
HOST_WIDE_INT var_size;
|
284 |
|
|
|
285 |
|
|
/* The number of bytes allocated to outgoing function arguments. */
|
286 |
|
|
HOST_WIDE_INT args_size;
|
287 |
|
|
|
288 |
|
|
/* The number of bytes allocated to the .cprestore slot, or 0 if there
|
289 |
|
|
is no such slot. */
|
290 |
|
|
HOST_WIDE_INT cprestore_size;
|
291 |
|
|
|
292 |
|
|
/* Bit X is set if the function saves or restores GPR X. */
|
293 |
|
|
unsigned int mask;
|
294 |
|
|
|
295 |
|
|
/* Likewise FPR X. */
|
296 |
|
|
unsigned int fmask;
|
297 |
|
|
|
298 |
|
|
/* Likewise doubleword accumulator X ($acX). */
|
299 |
|
|
unsigned int acc_mask;
|
300 |
|
|
|
301 |
|
|
/* The number of GPRs, FPRs, doubleword accumulators and COP0
|
302 |
|
|
registers saved. */
|
303 |
|
|
unsigned int num_gp;
|
304 |
|
|
unsigned int num_fp;
|
305 |
|
|
unsigned int num_acc;
|
306 |
|
|
unsigned int num_cop0_regs;
|
307 |
|
|
|
308 |
|
|
/* The offset of the topmost GPR, FPR, accumulator and COP0-register
|
309 |
|
|
save slots from the top of the frame, or zero if no such slots are
|
310 |
|
|
needed. */
|
311 |
|
|
HOST_WIDE_INT gp_save_offset;
|
312 |
|
|
HOST_WIDE_INT fp_save_offset;
|
313 |
|
|
HOST_WIDE_INT acc_save_offset;
|
314 |
|
|
HOST_WIDE_INT cop0_save_offset;
|
315 |
|
|
|
316 |
|
|
/* Likewise, but giving offsets from the bottom of the frame. */
|
317 |
|
|
HOST_WIDE_INT gp_sp_offset;
|
318 |
|
|
HOST_WIDE_INT fp_sp_offset;
|
319 |
|
|
HOST_WIDE_INT acc_sp_offset;
|
320 |
|
|
HOST_WIDE_INT cop0_sp_offset;
|
321 |
|
|
|
322 |
|
|
/* Similar, but the value passed to _mcount. */
|
323 |
|
|
HOST_WIDE_INT ra_fp_offset;
|
324 |
|
|
|
325 |
|
|
/* The offset of arg_pointer_rtx from the bottom of the frame. */
|
326 |
|
|
HOST_WIDE_INT arg_pointer_offset;
|
327 |
|
|
|
328 |
|
|
/* The offset of hard_frame_pointer_rtx from the bottom of the frame. */
|
329 |
|
|
HOST_WIDE_INT hard_frame_pointer_offset;
|
330 |
|
|
};
|
331 |
|
|
|
332 |
|
|
struct GTY(()) machine_function {
|
333 |
|
|
/* The register returned by mips16_gp_pseudo_reg; see there for details. */
|
334 |
|
|
rtx mips16_gp_pseudo_rtx;
|
335 |
|
|
|
336 |
|
|
/* The number of extra stack bytes taken up by register varargs.
|
337 |
|
|
This area is allocated by the callee at the very top of the frame. */
|
338 |
|
|
int varargs_size;
|
339 |
|
|
|
340 |
|
|
/* The current frame information, calculated by mips_compute_frame_info. */
|
341 |
|
|
struct mips_frame_info frame;
|
342 |
|
|
|
343 |
|
|
/* The register to use as the function's global pointer, or INVALID_REGNUM
|
344 |
|
|
if the function doesn't need one. */
|
345 |
|
|
unsigned int global_pointer;
|
346 |
|
|
|
347 |
|
|
/* How many instructions it takes to load a label into $AT, or 0 if
|
348 |
|
|
this property hasn't yet been calculated. */
|
349 |
|
|
unsigned int load_label_length;
|
350 |
|
|
|
351 |
|
|
/* True if mips_adjust_insn_length should ignore an instruction's
|
352 |
|
|
hazard attribute. */
|
353 |
|
|
bool ignore_hazard_length_p;
|
354 |
|
|
|
355 |
|
|
/* True if the whole function is suitable for .set noreorder and
|
356 |
|
|
.set nomacro. */
|
357 |
|
|
bool all_noreorder_p;
|
358 |
|
|
|
359 |
|
|
/* True if the function has "inflexible" and "flexible" references
|
360 |
|
|
to the global pointer. See mips_cfun_has_inflexible_gp_ref_p
|
361 |
|
|
and mips_cfun_has_flexible_gp_ref_p for details. */
|
362 |
|
|
bool has_inflexible_gp_insn_p;
|
363 |
|
|
bool has_flexible_gp_insn_p;
|
364 |
|
|
|
365 |
|
|
/* True if the function's prologue must load the global pointer
|
366 |
|
|
value into pic_offset_table_rtx and store the same value in
|
367 |
|
|
the function's cprestore slot (if any). Even if this value
|
368 |
|
|
is currently false, we may decide to set it to true later;
|
369 |
|
|
see mips_must_initialize_gp_p () for details. */
|
370 |
|
|
bool must_initialize_gp_p;
|
371 |
|
|
|
372 |
|
|
/* True if the current function must restore $gp after any potential
|
373 |
|
|
clobber. This value is only meaningful during the first post-epilogue
|
374 |
|
|
split_insns pass; see mips_must_initialize_gp_p () for details. */
|
375 |
|
|
bool must_restore_gp_when_clobbered_p;
|
376 |
|
|
|
377 |
|
|
/* True if we have emitted an instruction to initialize
|
378 |
|
|
mips16_gp_pseudo_rtx. */
|
379 |
|
|
bool initialized_mips16_gp_pseudo_p;
|
380 |
|
|
|
381 |
|
|
/* True if this is an interrupt handler. */
|
382 |
|
|
bool interrupt_handler_p;
|
383 |
|
|
|
384 |
|
|
/* True if this is an interrupt handler that uses shadow registers. */
|
385 |
|
|
bool use_shadow_register_set_p;
|
386 |
|
|
|
387 |
|
|
/* True if this is an interrupt handler that should keep interrupts
|
388 |
|
|
masked. */
|
389 |
|
|
bool keep_interrupts_masked_p;
|
390 |
|
|
|
391 |
|
|
/* True if this is an interrupt handler that should use DERET
|
392 |
|
|
instead of ERET. */
|
393 |
|
|
bool use_debug_exception_return_p;
|
394 |
|
|
};
|
395 |
|
|
|
396 |
|
|
/* Information about a single argument. */
|
397 |
|
|
struct mips_arg_info {
|
398 |
|
|
/* True if the argument is passed in a floating-point register, or
|
399 |
|
|
would have been if we hadn't run out of registers. */
|
400 |
|
|
bool fpr_p;
|
401 |
|
|
|
402 |
|
|
/* The number of words passed in registers, rounded up. */
|
403 |
|
|
unsigned int reg_words;
|
404 |
|
|
|
405 |
|
|
/* For EABI, the offset of the first register from GP_ARG_FIRST or
|
406 |
|
|
FP_ARG_FIRST. For other ABIs, the offset of the first register from
|
407 |
|
|
the start of the ABI's argument structure (see the CUMULATIVE_ARGS
|
408 |
|
|
comment for details).
|
409 |
|
|
|
410 |
|
|
The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
|
411 |
|
|
on the stack. */
|
412 |
|
|
unsigned int reg_offset;
|
413 |
|
|
|
414 |
|
|
/* The number of words that must be passed on the stack, rounded up. */
|
415 |
|
|
unsigned int stack_words;
|
416 |
|
|
|
417 |
|
|
/* The offset from the start of the stack overflow area of the argument's
|
418 |
|
|
first stack word. Only meaningful when STACK_WORDS is nonzero. */
|
419 |
|
|
unsigned int stack_offset;
|
420 |
|
|
};
|
421 |
|
|
|
422 |
|
|
/* Information about an address described by mips_address_type.
|
423 |
|
|
|
424 |
|
|
ADDRESS_CONST_INT
|
425 |
|
|
No fields are used.
|
426 |
|
|
|
427 |
|
|
ADDRESS_REG
|
428 |
|
|
REG is the base register and OFFSET is the constant offset.
|
429 |
|
|
|
430 |
|
|
ADDRESS_LO_SUM
|
431 |
|
|
REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
|
432 |
|
|
is the type of symbol it references.
|
433 |
|
|
|
434 |
|
|
ADDRESS_SYMBOLIC
|
435 |
|
|
SYMBOL_TYPE is the type of symbol that the address references. */
|
436 |
|
|
struct mips_address_info {
|
437 |
|
|
enum mips_address_type type;
|
438 |
|
|
rtx reg;
|
439 |
|
|
rtx offset;
|
440 |
|
|
enum mips_symbol_type symbol_type;
|
441 |
|
|
};
|
442 |
|
|
|
443 |
|
|
/* One stage in a constant building sequence. These sequences have
|
444 |
|
|
the form:
|
445 |
|
|
|
446 |
|
|
A = VALUE[0]
|
447 |
|
|
A = A CODE[1] VALUE[1]
|
448 |
|
|
A = A CODE[2] VALUE[2]
|
449 |
|
|
...
|
450 |
|
|
|
451 |
|
|
where A is an accumulator, each CODE[i] is a binary rtl operation
|
452 |
|
|
and each VALUE[i] is a constant integer. CODE[0] is undefined. */
|
453 |
|
|
struct mips_integer_op {
|
454 |
|
|
enum rtx_code code;
|
455 |
|
|
unsigned HOST_WIDE_INT value;
|
456 |
|
|
};
|
457 |
|
|
|
458 |
|
|
/* The largest number of operations needed to load an integer constant.
|
459 |
|
|
The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
|
460 |
|
|
When the lowest bit is clear, we can try, but reject a sequence with
|
461 |
|
|
an extra SLL at the end. */
|
462 |
|
|
#define MIPS_MAX_INTEGER_OPS 7
|
463 |
|
|
|
464 |
|
|
/* Information about a MIPS16e SAVE or RESTORE instruction. */
|
465 |
|
|
struct mips16e_save_restore_info {
|
466 |
|
|
/* The number of argument registers saved by a SAVE instruction.
|
467 |
|
|
|
468 |
|
|
unsigned int nargs;
|
469 |
|
|
|
470 |
|
|
/* Bit X is set if the instruction saves or restores GPR X. */
|
471 |
|
|
unsigned int mask;
|
472 |
|
|
|
473 |
|
|
/* The total number of bytes to allocate. */
|
474 |
|
|
HOST_WIDE_INT size;
|
475 |
|
|
};
|
476 |
|
|
|
477 |
|
|
/* Global variables for machine-dependent things. */
|
478 |
|
|
|
479 |
|
|
/* The -G setting, or the configuration's default small-data limit if
|
480 |
|
|
no -G option is given. */
|
481 |
|
|
static unsigned int mips_small_data_threshold;
|
482 |
|
|
|
483 |
|
|
/* The number of file directives written by mips_output_filename. */
|
484 |
|
|
int num_source_filenames;
|
485 |
|
|
|
486 |
|
|
/* The name that appeared in the last .file directive written by
|
487 |
|
|
mips_output_filename, or "" if mips_output_filename hasn't
|
488 |
|
|
written anything yet. */
|
489 |
|
|
const char *current_function_file = "";
|
490 |
|
|
|
491 |
|
|
/* A label counter used by PUT_SDB_BLOCK_START and PUT_SDB_BLOCK_END. */
|
492 |
|
|
int sdb_label_count;
|
493 |
|
|
|
494 |
|
|
/* Arrays that map GCC register numbers to debugger register numbers. */
|
495 |
|
|
int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
|
496 |
|
|
int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
|
497 |
|
|
|
498 |
|
|
/* The nesting depth of the PRINT_OPERAND '%(', '%<' and '%[' constructs. */
|
499 |
|
|
struct mips_asm_switch mips_noreorder = { "reorder", 0 };
|
500 |
|
|
struct mips_asm_switch mips_nomacro = { "macro", 0 };
|
501 |
|
|
struct mips_asm_switch mips_noat = { "at", 0 };
|
502 |
|
|
|
503 |
|
|
/* True if we're writing out a branch-likely instruction rather than a
|
504 |
|
|
normal branch. */
|
505 |
|
|
static bool mips_branch_likely;
|
506 |
|
|
|
507 |
|
|
/* The current instruction-set architecture. */
|
508 |
|
|
enum processor_type mips_arch;
|
509 |
|
|
const struct mips_cpu_info *mips_arch_info;
|
510 |
|
|
|
511 |
|
|
/* The processor that we should tune the code for. */
|
512 |
|
|
enum processor_type mips_tune;
|
513 |
|
|
const struct mips_cpu_info *mips_tune_info;
|
514 |
|
|
|
515 |
|
|
/* The ISA level associated with mips_arch. */
|
516 |
|
|
int mips_isa;
|
517 |
|
|
|
518 |
|
|
/* The architecture selected by -mipsN, or null if -mipsN wasn't used. */
|
519 |
|
|
static const struct mips_cpu_info *mips_isa_option_info;
|
520 |
|
|
|
521 |
|
|
/* Which ABI to use. */
|
522 |
|
|
int mips_abi = MIPS_ABI_DEFAULT;
|
523 |
|
|
|
524 |
|
|
/* Which cost information to use. */
|
525 |
|
|
const struct mips_rtx_cost_data *mips_cost;
|
526 |
|
|
|
527 |
|
|
/* The ambient target flags, excluding MASK_MIPS16. */
|
528 |
|
|
static int mips_base_target_flags;
|
529 |
|
|
|
530 |
|
|
/* True if MIPS16 is the default mode. */
|
531 |
|
|
bool mips_base_mips16;
|
532 |
|
|
|
533 |
|
|
/* The ambient values of other global variables. */
|
534 |
|
|
static int mips_base_schedule_insns; /* flag_schedule_insns */
|
535 |
|
|
static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
|
536 |
|
|
static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
|
537 |
|
|
static int mips_base_align_loops; /* align_loops */
|
538 |
|
|
static int mips_base_align_jumps; /* align_jumps */
|
539 |
|
|
static int mips_base_align_functions; /* align_functions */
|
540 |
|
|
|
541 |
|
|
/* The -mcode-readable setting. */
|
542 |
|
|
enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
|
543 |
|
|
|
544 |
|
|
/* The -mr10k-cache-barrier setting. */
|
545 |
|
|
static enum mips_r10k_cache_barrier_setting mips_r10k_cache_barrier;
|
546 |
|
|
|
547 |
|
|
/* Index [M][R] is true if register R is allowed to hold a value of mode M. */
|
548 |
|
|
bool mips_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
|
549 |
|
|
|
550 |
|
|
/* Index C is true if character C is a valid PRINT_OPERAND punctation
|
551 |
|
|
character. */
|
552 |
|
|
bool mips_print_operand_punct[256];
|
553 |
|
|
|
554 |
|
|
static GTY (()) int mips_output_filename_first_time = 1;
|
555 |
|
|
|
556 |
|
|
/* mips_split_p[X] is true if symbols of type X can be split by
|
557 |
|
|
mips_split_symbol. */
|
558 |
|
|
bool mips_split_p[NUM_SYMBOL_TYPES];
|
559 |
|
|
|
560 |
|
|
/* mips_split_hi_p[X] is true if the high parts of symbols of type X
|
561 |
|
|
can be split by mips_split_symbol. */
|
562 |
|
|
bool mips_split_hi_p[NUM_SYMBOL_TYPES];
|
563 |
|
|
|
564 |
|
|
/* mips_lo_relocs[X] is the relocation to use when a symbol of type X
|
565 |
|
|
appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
|
566 |
|
|
if they are matched by a special .md file pattern. */
|
567 |
|
|
static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
|
568 |
|
|
|
569 |
|
|
/* Likewise for HIGHs. */
|
570 |
|
|
static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
|
571 |
|
|
|
572 |
|
|
/* Index R is the smallest register class that contains register R. */
|
573 |
|
|
const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = {
|
574 |
|
|
LEA_REGS, LEA_REGS, M16_REGS, V1_REG,
|
575 |
|
|
M16_REGS, M16_REGS, M16_REGS, M16_REGS,
|
576 |
|
|
LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
|
577 |
|
|
LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
|
578 |
|
|
M16_REGS, M16_REGS, LEA_REGS, LEA_REGS,
|
579 |
|
|
LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
|
580 |
|
|
T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
|
581 |
|
|
LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
|
582 |
|
|
FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
583 |
|
|
FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
584 |
|
|
FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
585 |
|
|
FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
586 |
|
|
FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
587 |
|
|
FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
588 |
|
|
FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
589 |
|
|
FP_REGS, FP_REGS, FP_REGS, FP_REGS,
|
590 |
|
|
MD0_REG, MD1_REG, NO_REGS, ST_REGS,
|
591 |
|
|
ST_REGS, ST_REGS, ST_REGS, ST_REGS,
|
592 |
|
|
ST_REGS, ST_REGS, ST_REGS, NO_REGS,
|
593 |
|
|
NO_REGS, FRAME_REGS, FRAME_REGS, NO_REGS,
|
594 |
|
|
COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
|
595 |
|
|
COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
|
596 |
|
|
COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
|
597 |
|
|
COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
|
598 |
|
|
COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
|
599 |
|
|
COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
|
600 |
|
|
COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
|
601 |
|
|
COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
|
602 |
|
|
COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
|
603 |
|
|
COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
|
604 |
|
|
COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
|
605 |
|
|
COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
|
606 |
|
|
COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
|
607 |
|
|
COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
|
608 |
|
|
COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
|
609 |
|
|
COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
|
610 |
|
|
COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
|
611 |
|
|
COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
|
612 |
|
|
COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
|
613 |
|
|
COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
|
614 |
|
|
COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
|
615 |
|
|
COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
|
616 |
|
|
COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
|
617 |
|
|
COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
|
618 |
|
|
DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
|
619 |
|
|
DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
|
620 |
|
|
ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
|
621 |
|
|
};
|
622 |
|
|
|
623 |
|
|
/* The value of TARGET_ATTRIBUTE_TABLE. */
|
624 |
|
|
static const struct attribute_spec mips_attribute_table[] = {
|
625 |
|
|
/* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
|
626 |
|
|
{ "long_call", 0, 0, false, true, true, NULL },
|
627 |
|
|
{ "far", 0, 0, false, true, true, NULL },
|
628 |
|
|
{ "near", 0, 0, false, true, true, NULL },
|
629 |
|
|
/* We would really like to treat "mips16" and "nomips16" as type
|
630 |
|
|
attributes, but GCC doesn't provide the hooks we need to support
|
631 |
|
|
the right conversion rules. As declaration attributes, they affect
|
632 |
|
|
code generation but don't carry other semantics. */
|
633 |
|
|
{ "mips16", 0, 0, true, false, false, NULL },
|
634 |
|
|
{ "nomips16", 0, 0, true, false, false, NULL },
|
635 |
|
|
/* Allow functions to be specified as interrupt handlers */
|
636 |
|
|
{ "interrupt", 0, 0, false, true, true, NULL },
|
637 |
|
|
{ "use_shadow_register_set", 0, 0, false, true, true, NULL },
|
638 |
|
|
{ "keep_interrupts_masked", 0, 0, false, true, true, NULL },
|
639 |
|
|
{ "use_debug_exception_return", 0, 0, false, true, true, NULL },
|
640 |
|
|
{ NULL, 0, 0, false, false, false, NULL }
|
641 |
|
|
};
|
642 |
|
|
|
643 |
|
|
/* A table describing all the processors GCC knows about. Names are
|
644 |
|
|
matched in the order listed. The first mention of an ISA level is
|
645 |
|
|
taken as the canonical name for that ISA.
|
646 |
|
|
|
647 |
|
|
To ease comparison, please keep this table in the same order
|
648 |
|
|
as GAS's mips_cpu_info_table. Please also make sure that
|
649 |
|
|
MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
|
650 |
|
|
options correctly. */
|
651 |
|
|
static const struct mips_cpu_info mips_cpu_info_table[] = {
|
652 |
|
|
/* Entries for generic ISAs. */
|
653 |
|
|
{ "mips1", PROCESSOR_R3000, 1, 0 },
|
654 |
|
|
{ "mips2", PROCESSOR_R6000, 2, 0 },
|
655 |
|
|
{ "mips3", PROCESSOR_R4000, 3, 0 },
|
656 |
|
|
{ "mips4", PROCESSOR_R8000, 4, 0 },
|
657 |
|
|
/* Prefer not to use branch-likely instructions for generic MIPS32rX
|
658 |
|
|
and MIPS64rX code. The instructions were officially deprecated
|
659 |
|
|
in revisions 2 and earlier, but revision 3 is likely to downgrade
|
660 |
|
|
that to a recommendation to avoid the instructions in code that
|
661 |
|
|
isn't tuned to a specific processor. */
|
662 |
|
|
{ "mips32", PROCESSOR_4KC, 32, PTF_AVOID_BRANCHLIKELY },
|
663 |
|
|
{ "mips32r2", PROCESSOR_M4K, 33, PTF_AVOID_BRANCHLIKELY },
|
664 |
|
|
{ "mips64", PROCESSOR_5KC, 64, PTF_AVOID_BRANCHLIKELY },
|
665 |
|
|
/* ??? For now just tune the generic MIPS64r2 for 5KC as well. */
|
666 |
|
|
{ "mips64r2", PROCESSOR_5KC, 65, PTF_AVOID_BRANCHLIKELY },
|
667 |
|
|
|
668 |
|
|
/* MIPS I processors. */
|
669 |
|
|
{ "r3000", PROCESSOR_R3000, 1, 0 },
|
670 |
|
|
{ "r2000", PROCESSOR_R3000, 1, 0 },
|
671 |
|
|
{ "r3900", PROCESSOR_R3900, 1, 0 },
|
672 |
|
|
|
673 |
|
|
/* MIPS II processors. */
|
674 |
|
|
{ "r6000", PROCESSOR_R6000, 2, 0 },
|
675 |
|
|
|
676 |
|
|
/* MIPS III processors. */
|
677 |
|
|
{ "r4000", PROCESSOR_R4000, 3, 0 },
|
678 |
|
|
{ "vr4100", PROCESSOR_R4100, 3, 0 },
|
679 |
|
|
{ "vr4111", PROCESSOR_R4111, 3, 0 },
|
680 |
|
|
{ "vr4120", PROCESSOR_R4120, 3, 0 },
|
681 |
|
|
{ "vr4130", PROCESSOR_R4130, 3, 0 },
|
682 |
|
|
{ "vr4300", PROCESSOR_R4300, 3, 0 },
|
683 |
|
|
{ "r4400", PROCESSOR_R4000, 3, 0 },
|
684 |
|
|
{ "r4600", PROCESSOR_R4600, 3, 0 },
|
685 |
|
|
{ "orion", PROCESSOR_R4600, 3, 0 },
|
686 |
|
|
{ "r4650", PROCESSOR_R4650, 3, 0 },
|
687 |
|
|
/* ST Loongson 2E/2F processors. */
|
688 |
|
|
{ "loongson2e", PROCESSOR_LOONGSON_2E, 3, PTF_AVOID_BRANCHLIKELY },
|
689 |
|
|
{ "loongson2f", PROCESSOR_LOONGSON_2F, 3, PTF_AVOID_BRANCHLIKELY },
|
690 |
|
|
|
691 |
|
|
/* MIPS IV processors. */
|
692 |
|
|
{ "r8000", PROCESSOR_R8000, 4, 0 },
|
693 |
|
|
{ "r10000", PROCESSOR_R10000, 4, 0 },
|
694 |
|
|
{ "r12000", PROCESSOR_R10000, 4, 0 },
|
695 |
|
|
{ "r14000", PROCESSOR_R10000, 4, 0 },
|
696 |
|
|
{ "r16000", PROCESSOR_R10000, 4, 0 },
|
697 |
|
|
{ "vr5000", PROCESSOR_R5000, 4, 0 },
|
698 |
|
|
{ "vr5400", PROCESSOR_R5400, 4, 0 },
|
699 |
|
|
{ "vr5500", PROCESSOR_R5500, 4, PTF_AVOID_BRANCHLIKELY },
|
700 |
|
|
{ "rm7000", PROCESSOR_R7000, 4, 0 },
|
701 |
|
|
{ "rm9000", PROCESSOR_R9000, 4, 0 },
|
702 |
|
|
|
703 |
|
|
/* MIPS32 processors. */
|
704 |
|
|
{ "4kc", PROCESSOR_4KC, 32, 0 },
|
705 |
|
|
{ "4km", PROCESSOR_4KC, 32, 0 },
|
706 |
|
|
{ "4kp", PROCESSOR_4KP, 32, 0 },
|
707 |
|
|
{ "4ksc", PROCESSOR_4KC, 32, 0 },
|
708 |
|
|
|
709 |
|
|
/* MIPS32 Release 2 processors. */
|
710 |
|
|
{ "m4k", PROCESSOR_M4K, 33, 0 },
|
711 |
|
|
{ "4kec", PROCESSOR_4KC, 33, 0 },
|
712 |
|
|
{ "4kem", PROCESSOR_4KC, 33, 0 },
|
713 |
|
|
{ "4kep", PROCESSOR_4KP, 33, 0 },
|
714 |
|
|
{ "4ksd", PROCESSOR_4KC, 33, 0 },
|
715 |
|
|
|
716 |
|
|
{ "24kc", PROCESSOR_24KC, 33, 0 },
|
717 |
|
|
{ "24kf2_1", PROCESSOR_24KF2_1, 33, 0 },
|
718 |
|
|
{ "24kf", PROCESSOR_24KF2_1, 33, 0 },
|
719 |
|
|
{ "24kf1_1", PROCESSOR_24KF1_1, 33, 0 },
|
720 |
|
|
{ "24kfx", PROCESSOR_24KF1_1, 33, 0 },
|
721 |
|
|
{ "24kx", PROCESSOR_24KF1_1, 33, 0 },
|
722 |
|
|
|
723 |
|
|
{ "24kec", PROCESSOR_24KC, 33, 0 }, /* 24K with DSP. */
|
724 |
|
|
{ "24kef2_1", PROCESSOR_24KF2_1, 33, 0 },
|
725 |
|
|
{ "24kef", PROCESSOR_24KF2_1, 33, 0 },
|
726 |
|
|
{ "24kef1_1", PROCESSOR_24KF1_1, 33, 0 },
|
727 |
|
|
{ "24kefx", PROCESSOR_24KF1_1, 33, 0 },
|
728 |
|
|
{ "24kex", PROCESSOR_24KF1_1, 33, 0 },
|
729 |
|
|
|
730 |
|
|
{ "34kc", PROCESSOR_24KC, 33, 0 }, /* 34K with MT/DSP. */
|
731 |
|
|
{ "34kf2_1", PROCESSOR_24KF2_1, 33, 0 },
|
732 |
|
|
{ "34kf", PROCESSOR_24KF2_1, 33, 0 },
|
733 |
|
|
{ "34kf1_1", PROCESSOR_24KF1_1, 33, 0 },
|
734 |
|
|
{ "34kfx", PROCESSOR_24KF1_1, 33, 0 },
|
735 |
|
|
{ "34kx", PROCESSOR_24KF1_1, 33, 0 },
|
736 |
|
|
|
737 |
|
|
{ "74kc", PROCESSOR_74KC, 33, 0 }, /* 74K with DSPr2. */
|
738 |
|
|
{ "74kf2_1", PROCESSOR_74KF2_1, 33, 0 },
|
739 |
|
|
{ "74kf", PROCESSOR_74KF2_1, 33, 0 },
|
740 |
|
|
{ "74kf1_1", PROCESSOR_74KF1_1, 33, 0 },
|
741 |
|
|
{ "74kfx", PROCESSOR_74KF1_1, 33, 0 },
|
742 |
|
|
{ "74kx", PROCESSOR_74KF1_1, 33, 0 },
|
743 |
|
|
{ "74kf3_2", PROCESSOR_74KF3_2, 33, 0 },
|
744 |
|
|
|
745 |
|
|
{ "1004kc", PROCESSOR_24KC, 33, 0 }, /* 1004K with MT/DSP. */
|
746 |
|
|
{ "1004kf2_1", PROCESSOR_24KF2_1, 33, 0 },
|
747 |
|
|
{ "1004kf", PROCESSOR_24KF2_1, 33, 0 },
|
748 |
|
|
{ "1004kf1_1", PROCESSOR_24KF1_1, 33, 0 },
|
749 |
|
|
|
750 |
|
|
/* MIPS64 processors. */
|
751 |
|
|
{ "5kc", PROCESSOR_5KC, 64, 0 },
|
752 |
|
|
{ "5kf", PROCESSOR_5KF, 64, 0 },
|
753 |
|
|
{ "20kc", PROCESSOR_20KC, 64, PTF_AVOID_BRANCHLIKELY },
|
754 |
|
|
{ "sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY },
|
755 |
|
|
{ "sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY },
|
756 |
|
|
{ "sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY },
|
757 |
|
|
{ "xlr", PROCESSOR_XLR, 64, 0 },
|
758 |
|
|
|
759 |
|
|
/* MIPS64 Release 2 processors. */
|
760 |
|
|
{ "octeon", PROCESSOR_OCTEON, 65, PTF_AVOID_BRANCHLIKELY }
|
761 |
|
|
};
|
762 |
|
|
|
763 |
|
|
/* Default costs. If these are used for a processor we should look
|
764 |
|
|
up the actual costs. */
|
765 |
|
|
#define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
|
766 |
|
|
COSTS_N_INSNS (7), /* fp_mult_sf */ \
|
767 |
|
|
COSTS_N_INSNS (8), /* fp_mult_df */ \
|
768 |
|
|
COSTS_N_INSNS (23), /* fp_div_sf */ \
|
769 |
|
|
COSTS_N_INSNS (36), /* fp_div_df */ \
|
770 |
|
|
COSTS_N_INSNS (10), /* int_mult_si */ \
|
771 |
|
|
COSTS_N_INSNS (10), /* int_mult_di */ \
|
772 |
|
|
COSTS_N_INSNS (69), /* int_div_si */ \
|
773 |
|
|
COSTS_N_INSNS (69), /* int_div_di */ \
|
774 |
|
|
2, /* branch_cost */ \
|
775 |
|
|
4 /* memory_latency */
|
776 |
|
|
|
777 |
|
|
/* Floating-point costs for processors without an FPU. Just assume that
|
778 |
|
|
all floating-point libcalls are very expensive. */
|
779 |
|
|
#define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
|
780 |
|
|
COSTS_N_INSNS (256), /* fp_mult_sf */ \
|
781 |
|
|
COSTS_N_INSNS (256), /* fp_mult_df */ \
|
782 |
|
|
COSTS_N_INSNS (256), /* fp_div_sf */ \
|
783 |
|
|
COSTS_N_INSNS (256) /* fp_div_df */
|
784 |
|
|
|
785 |
|
|
/* Costs to use when optimizing for size. */
|
786 |
|
|
static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = {
|
787 |
|
|
COSTS_N_INSNS (1), /* fp_add */
|
788 |
|
|
COSTS_N_INSNS (1), /* fp_mult_sf */
|
789 |
|
|
COSTS_N_INSNS (1), /* fp_mult_df */
|
790 |
|
|
COSTS_N_INSNS (1), /* fp_div_sf */
|
791 |
|
|
COSTS_N_INSNS (1), /* fp_div_df */
|
792 |
|
|
COSTS_N_INSNS (1), /* int_mult_si */
|
793 |
|
|
COSTS_N_INSNS (1), /* int_mult_di */
|
794 |
|
|
COSTS_N_INSNS (1), /* int_div_si */
|
795 |
|
|
COSTS_N_INSNS (1), /* int_div_di */
|
796 |
|
|
2, /* branch_cost */
|
797 |
|
|
4 /* memory_latency */
|
798 |
|
|
};
|
799 |
|
|
|
800 |
|
|
/* Costs to use when optimizing for speed, indexed by processor. */
|
801 |
|
|
static const struct mips_rtx_cost_data mips_rtx_cost_data[PROCESSOR_MAX] = {
|
802 |
|
|
{ /* R3000 */
|
803 |
|
|
COSTS_N_INSNS (2), /* fp_add */
|
804 |
|
|
COSTS_N_INSNS (4), /* fp_mult_sf */
|
805 |
|
|
COSTS_N_INSNS (5), /* fp_mult_df */
|
806 |
|
|
COSTS_N_INSNS (12), /* fp_div_sf */
|
807 |
|
|
COSTS_N_INSNS (19), /* fp_div_df */
|
808 |
|
|
COSTS_N_INSNS (12), /* int_mult_si */
|
809 |
|
|
COSTS_N_INSNS (12), /* int_mult_di */
|
810 |
|
|
COSTS_N_INSNS (35), /* int_div_si */
|
811 |
|
|
COSTS_N_INSNS (35), /* int_div_di */
|
812 |
|
|
1, /* branch_cost */
|
813 |
|
|
4 /* memory_latency */
|
814 |
|
|
},
|
815 |
|
|
{ /* 4KC */
|
816 |
|
|
SOFT_FP_COSTS,
|
817 |
|
|
COSTS_N_INSNS (6), /* int_mult_si */
|
818 |
|
|
COSTS_N_INSNS (6), /* int_mult_di */
|
819 |
|
|
COSTS_N_INSNS (36), /* int_div_si */
|
820 |
|
|
COSTS_N_INSNS (36), /* int_div_di */
|
821 |
|
|
1, /* branch_cost */
|
822 |
|
|
4 /* memory_latency */
|
823 |
|
|
},
|
824 |
|
|
{ /* 4KP */
|
825 |
|
|
SOFT_FP_COSTS,
|
826 |
|
|
COSTS_N_INSNS (36), /* int_mult_si */
|
827 |
|
|
COSTS_N_INSNS (36), /* int_mult_di */
|
828 |
|
|
COSTS_N_INSNS (37), /* int_div_si */
|
829 |
|
|
COSTS_N_INSNS (37), /* int_div_di */
|
830 |
|
|
1, /* branch_cost */
|
831 |
|
|
4 /* memory_latency */
|
832 |
|
|
},
|
833 |
|
|
{ /* 5KC */
|
834 |
|
|
SOFT_FP_COSTS,
|
835 |
|
|
COSTS_N_INSNS (4), /* int_mult_si */
|
836 |
|
|
COSTS_N_INSNS (11), /* int_mult_di */
|
837 |
|
|
COSTS_N_INSNS (36), /* int_div_si */
|
838 |
|
|
COSTS_N_INSNS (68), /* int_div_di */
|
839 |
|
|
1, /* branch_cost */
|
840 |
|
|
4 /* memory_latency */
|
841 |
|
|
},
|
842 |
|
|
{ /* 5KF */
|
843 |
|
|
COSTS_N_INSNS (4), /* fp_add */
|
844 |
|
|
COSTS_N_INSNS (4), /* fp_mult_sf */
|
845 |
|
|
COSTS_N_INSNS (5), /* fp_mult_df */
|
846 |
|
|
COSTS_N_INSNS (17), /* fp_div_sf */
|
847 |
|
|
COSTS_N_INSNS (32), /* fp_div_df */
|
848 |
|
|
COSTS_N_INSNS (4), /* int_mult_si */
|
849 |
|
|
COSTS_N_INSNS (11), /* int_mult_di */
|
850 |
|
|
COSTS_N_INSNS (36), /* int_div_si */
|
851 |
|
|
COSTS_N_INSNS (68), /* int_div_di */
|
852 |
|
|
1, /* branch_cost */
|
853 |
|
|
4 /* memory_latency */
|
854 |
|
|
},
|
855 |
|
|
{ /* 20KC */
|
856 |
|
|
COSTS_N_INSNS (4), /* fp_add */
|
857 |
|
|
COSTS_N_INSNS (4), /* fp_mult_sf */
|
858 |
|
|
COSTS_N_INSNS (5), /* fp_mult_df */
|
859 |
|
|
COSTS_N_INSNS (17), /* fp_div_sf */
|
860 |
|
|
COSTS_N_INSNS (32), /* fp_div_df */
|
861 |
|
|
COSTS_N_INSNS (4), /* int_mult_si */
|
862 |
|
|
COSTS_N_INSNS (7), /* int_mult_di */
|
863 |
|
|
COSTS_N_INSNS (42), /* int_div_si */
|
864 |
|
|
COSTS_N_INSNS (72), /* int_div_di */
|
865 |
|
|
1, /* branch_cost */
|
866 |
|
|
4 /* memory_latency */
|
867 |
|
|
},
|
868 |
|
|
{ /* 24KC */
|
869 |
|
|
SOFT_FP_COSTS,
|
870 |
|
|
COSTS_N_INSNS (5), /* int_mult_si */
|
871 |
|
|
COSTS_N_INSNS (5), /* int_mult_di */
|
872 |
|
|
COSTS_N_INSNS (41), /* int_div_si */
|
873 |
|
|
COSTS_N_INSNS (41), /* int_div_di */
|
874 |
|
|
1, /* branch_cost */
|
875 |
|
|
4 /* memory_latency */
|
876 |
|
|
},
|
877 |
|
|
{ /* 24KF2_1 */
|
878 |
|
|
COSTS_N_INSNS (8), /* fp_add */
|
879 |
|
|
COSTS_N_INSNS (8), /* fp_mult_sf */
|
880 |
|
|
COSTS_N_INSNS (10), /* fp_mult_df */
|
881 |
|
|
COSTS_N_INSNS (34), /* fp_div_sf */
|
882 |
|
|
COSTS_N_INSNS (64), /* fp_div_df */
|
883 |
|
|
COSTS_N_INSNS (5), /* int_mult_si */
|
884 |
|
|
COSTS_N_INSNS (5), /* int_mult_di */
|
885 |
|
|
COSTS_N_INSNS (41), /* int_div_si */
|
886 |
|
|
COSTS_N_INSNS (41), /* int_div_di */
|
887 |
|
|
1, /* branch_cost */
|
888 |
|
|
4 /* memory_latency */
|
889 |
|
|
},
|
890 |
|
|
{ /* 24KF1_1 */
|
891 |
|
|
COSTS_N_INSNS (4), /* fp_add */
|
892 |
|
|
COSTS_N_INSNS (4), /* fp_mult_sf */
|
893 |
|
|
COSTS_N_INSNS (5), /* fp_mult_df */
|
894 |
|
|
COSTS_N_INSNS (17), /* fp_div_sf */
|
895 |
|
|
COSTS_N_INSNS (32), /* fp_div_df */
|
896 |
|
|
COSTS_N_INSNS (5), /* int_mult_si */
|
897 |
|
|
COSTS_N_INSNS (5), /* int_mult_di */
|
898 |
|
|
COSTS_N_INSNS (41), /* int_div_si */
|
899 |
|
|
COSTS_N_INSNS (41), /* int_div_di */
|
900 |
|
|
1, /* branch_cost */
|
901 |
|
|
4 /* memory_latency */
|
902 |
|
|
},
|
903 |
|
|
{ /* 74KC */
|
904 |
|
|
SOFT_FP_COSTS,
|
905 |
|
|
COSTS_N_INSNS (5), /* int_mult_si */
|
906 |
|
|
COSTS_N_INSNS (5), /* int_mult_di */
|
907 |
|
|
COSTS_N_INSNS (41), /* int_div_si */
|
908 |
|
|
COSTS_N_INSNS (41), /* int_div_di */
|
909 |
|
|
1, /* branch_cost */
|
910 |
|
|
4 /* memory_latency */
|
911 |
|
|
},
|
912 |
|
|
{ /* 74KF2_1 */
|
913 |
|
|
COSTS_N_INSNS (8), /* fp_add */
|
914 |
|
|
COSTS_N_INSNS (8), /* fp_mult_sf */
|
915 |
|
|
COSTS_N_INSNS (10), /* fp_mult_df */
|
916 |
|
|
COSTS_N_INSNS (34), /* fp_div_sf */
|
917 |
|
|
COSTS_N_INSNS (64), /* fp_div_df */
|
918 |
|
|
COSTS_N_INSNS (5), /* int_mult_si */
|
919 |
|
|
COSTS_N_INSNS (5), /* int_mult_di */
|
920 |
|
|
COSTS_N_INSNS (41), /* int_div_si */
|
921 |
|
|
COSTS_N_INSNS (41), /* int_div_di */
|
922 |
|
|
1, /* branch_cost */
|
923 |
|
|
4 /* memory_latency */
|
924 |
|
|
},
|
925 |
|
|
{ /* 74KF1_1 */
|
926 |
|
|
COSTS_N_INSNS (4), /* fp_add */
|
927 |
|
|
COSTS_N_INSNS (4), /* fp_mult_sf */
|
928 |
|
|
COSTS_N_INSNS (5), /* fp_mult_df */
|
929 |
|
|
COSTS_N_INSNS (17), /* fp_div_sf */
|
930 |
|
|
COSTS_N_INSNS (32), /* fp_div_df */
|
931 |
|
|
COSTS_N_INSNS (5), /* int_mult_si */
|
932 |
|
|
COSTS_N_INSNS (5), /* int_mult_di */
|
933 |
|
|
COSTS_N_INSNS (41), /* int_div_si */
|
934 |
|
|
COSTS_N_INSNS (41), /* int_div_di */
|
935 |
|
|
1, /* branch_cost */
|
936 |
|
|
4 /* memory_latency */
|
937 |
|
|
},
|
938 |
|
|
{ /* 74KF3_2 */
|
939 |
|
|
COSTS_N_INSNS (6), /* fp_add */
|
940 |
|
|
COSTS_N_INSNS (6), /* fp_mult_sf */
|
941 |
|
|
COSTS_N_INSNS (7), /* fp_mult_df */
|
942 |
|
|
COSTS_N_INSNS (25), /* fp_div_sf */
|
943 |
|
|
COSTS_N_INSNS (48), /* fp_div_df */
|
944 |
|
|
COSTS_N_INSNS (5), /* int_mult_si */
|
945 |
|
|
COSTS_N_INSNS (5), /* int_mult_di */
|
946 |
|
|
COSTS_N_INSNS (41), /* int_div_si */
|
947 |
|
|
COSTS_N_INSNS (41), /* int_div_di */
|
948 |
|
|
1, /* branch_cost */
|
949 |
|
|
4 /* memory_latency */
|
950 |
|
|
},
|
951 |
|
|
{ /* Loongson-2E */
|
952 |
|
|
DEFAULT_COSTS
|
953 |
|
|
},
|
954 |
|
|
{ /* Loongson-2F */
|
955 |
|
|
DEFAULT_COSTS
|
956 |
|
|
},
|
957 |
|
|
{ /* M4k */
|
958 |
|
|
DEFAULT_COSTS
|
959 |
|
|
},
|
960 |
|
|
/* Octeon */
|
961 |
|
|
{
|
962 |
|
|
SOFT_FP_COSTS,
|
963 |
|
|
COSTS_N_INSNS (5), /* int_mult_si */
|
964 |
|
|
COSTS_N_INSNS (5), /* int_mult_di */
|
965 |
|
|
COSTS_N_INSNS (72), /* int_div_si */
|
966 |
|
|
COSTS_N_INSNS (72), /* int_div_di */
|
967 |
|
|
1, /* branch_cost */
|
968 |
|
|
4 /* memory_latency */
|
969 |
|
|
},
|
970 |
|
|
{ /* R3900 */
|
971 |
|
|
COSTS_N_INSNS (2), /* fp_add */
|
972 |
|
|
COSTS_N_INSNS (4), /* fp_mult_sf */
|
973 |
|
|
COSTS_N_INSNS (5), /* fp_mult_df */
|
974 |
|
|
COSTS_N_INSNS (12), /* fp_div_sf */
|
975 |
|
|
COSTS_N_INSNS (19), /* fp_div_df */
|
976 |
|
|
COSTS_N_INSNS (2), /* int_mult_si */
|
977 |
|
|
COSTS_N_INSNS (2), /* int_mult_di */
|
978 |
|
|
COSTS_N_INSNS (35), /* int_div_si */
|
979 |
|
|
COSTS_N_INSNS (35), /* int_div_di */
|
980 |
|
|
1, /* branch_cost */
|
981 |
|
|
4 /* memory_latency */
|
982 |
|
|
},
|
983 |
|
|
{ /* R6000 */
|
984 |
|
|
COSTS_N_INSNS (3), /* fp_add */
|
985 |
|
|
COSTS_N_INSNS (5), /* fp_mult_sf */
|
986 |
|
|
COSTS_N_INSNS (6), /* fp_mult_df */
|
987 |
|
|
COSTS_N_INSNS (15), /* fp_div_sf */
|
988 |
|
|
COSTS_N_INSNS (16), /* fp_div_df */
|
989 |
|
|
COSTS_N_INSNS (17), /* int_mult_si */
|
990 |
|
|
COSTS_N_INSNS (17), /* int_mult_di */
|
991 |
|
|
COSTS_N_INSNS (38), /* int_div_si */
|
992 |
|
|
COSTS_N_INSNS (38), /* int_div_di */
|
993 |
|
|
2, /* branch_cost */
|
994 |
|
|
6 /* memory_latency */
|
995 |
|
|
},
|
996 |
|
|
{ /* R4000 */
|
997 |
|
|
COSTS_N_INSNS (6), /* fp_add */
|
998 |
|
|
COSTS_N_INSNS (7), /* fp_mult_sf */
|
999 |
|
|
COSTS_N_INSNS (8), /* fp_mult_df */
|
1000 |
|
|
COSTS_N_INSNS (23), /* fp_div_sf */
|
1001 |
|
|
COSTS_N_INSNS (36), /* fp_div_df */
|
1002 |
|
|
COSTS_N_INSNS (10), /* int_mult_si */
|
1003 |
|
|
COSTS_N_INSNS (10), /* int_mult_di */
|
1004 |
|
|
COSTS_N_INSNS (69), /* int_div_si */
|
1005 |
|
|
COSTS_N_INSNS (69), /* int_div_di */
|
1006 |
|
|
2, /* branch_cost */
|
1007 |
|
|
6 /* memory_latency */
|
1008 |
|
|
},
|
1009 |
|
|
{ /* R4100 */
|
1010 |
|
|
DEFAULT_COSTS
|
1011 |
|
|
},
|
1012 |
|
|
{ /* R4111 */
|
1013 |
|
|
DEFAULT_COSTS
|
1014 |
|
|
},
|
1015 |
|
|
{ /* R4120 */
|
1016 |
|
|
DEFAULT_COSTS
|
1017 |
|
|
},
|
1018 |
|
|
{ /* R4130 */
|
1019 |
|
|
/* The only costs that appear to be updated here are
|
1020 |
|
|
integer multiplication. */
|
1021 |
|
|
SOFT_FP_COSTS,
|
1022 |
|
|
COSTS_N_INSNS (4), /* int_mult_si */
|
1023 |
|
|
COSTS_N_INSNS (6), /* int_mult_di */
|
1024 |
|
|
COSTS_N_INSNS (69), /* int_div_si */
|
1025 |
|
|
COSTS_N_INSNS (69), /* int_div_di */
|
1026 |
|
|
1, /* branch_cost */
|
1027 |
|
|
4 /* memory_latency */
|
1028 |
|
|
},
|
1029 |
|
|
{ /* R4300 */
|
1030 |
|
|
DEFAULT_COSTS
|
1031 |
|
|
},
|
1032 |
|
|
{ /* R4600 */
|
1033 |
|
|
DEFAULT_COSTS
|
1034 |
|
|
},
|
1035 |
|
|
{ /* R4650 */
|
1036 |
|
|
DEFAULT_COSTS
|
1037 |
|
|
},
|
1038 |
|
|
{ /* R5000 */
|
1039 |
|
|
COSTS_N_INSNS (6), /* fp_add */
|
1040 |
|
|
COSTS_N_INSNS (4), /* fp_mult_sf */
|
1041 |
|
|
COSTS_N_INSNS (5), /* fp_mult_df */
|
1042 |
|
|
COSTS_N_INSNS (23), /* fp_div_sf */
|
1043 |
|
|
COSTS_N_INSNS (36), /* fp_div_df */
|
1044 |
|
|
COSTS_N_INSNS (5), /* int_mult_si */
|
1045 |
|
|
COSTS_N_INSNS (5), /* int_mult_di */
|
1046 |
|
|
COSTS_N_INSNS (36), /* int_div_si */
|
1047 |
|
|
COSTS_N_INSNS (36), /* int_div_di */
|
1048 |
|
|
1, /* branch_cost */
|
1049 |
|
|
4 /* memory_latency */
|
1050 |
|
|
},
|
1051 |
|
|
{ /* R5400 */
|
1052 |
|
|
COSTS_N_INSNS (6), /* fp_add */
|
1053 |
|
|
COSTS_N_INSNS (5), /* fp_mult_sf */
|
1054 |
|
|
COSTS_N_INSNS (6), /* fp_mult_df */
|
1055 |
|
|
COSTS_N_INSNS (30), /* fp_div_sf */
|
1056 |
|
|
COSTS_N_INSNS (59), /* fp_div_df */
|
1057 |
|
|
COSTS_N_INSNS (3), /* int_mult_si */
|
1058 |
|
|
COSTS_N_INSNS (4), /* int_mult_di */
|
1059 |
|
|
COSTS_N_INSNS (42), /* int_div_si */
|
1060 |
|
|
COSTS_N_INSNS (74), /* int_div_di */
|
1061 |
|
|
1, /* branch_cost */
|
1062 |
|
|
4 /* memory_latency */
|
1063 |
|
|
},
|
1064 |
|
|
{ /* R5500 */
|
1065 |
|
|
COSTS_N_INSNS (6), /* fp_add */
|
1066 |
|
|
COSTS_N_INSNS (5), /* fp_mult_sf */
|
1067 |
|
|
COSTS_N_INSNS (6), /* fp_mult_df */
|
1068 |
|
|
COSTS_N_INSNS (30), /* fp_div_sf */
|
1069 |
|
|
COSTS_N_INSNS (59), /* fp_div_df */
|
1070 |
|
|
COSTS_N_INSNS (5), /* int_mult_si */
|
1071 |
|
|
COSTS_N_INSNS (9), /* int_mult_di */
|
1072 |
|
|
COSTS_N_INSNS (42), /* int_div_si */
|
1073 |
|
|
COSTS_N_INSNS (74), /* int_div_di */
|
1074 |
|
|
1, /* branch_cost */
|
1075 |
|
|
4 /* memory_latency */
|
1076 |
|
|
},
|
1077 |
|
|
{ /* R7000 */
|
1078 |
|
|
/* The only costs that are changed here are
|
1079 |
|
|
integer multiplication. */
|
1080 |
|
|
COSTS_N_INSNS (6), /* fp_add */
|
1081 |
|
|
COSTS_N_INSNS (7), /* fp_mult_sf */
|
1082 |
|
|
COSTS_N_INSNS (8), /* fp_mult_df */
|
1083 |
|
|
COSTS_N_INSNS (23), /* fp_div_sf */
|
1084 |
|
|
COSTS_N_INSNS (36), /* fp_div_df */
|
1085 |
|
|
COSTS_N_INSNS (5), /* int_mult_si */
|
1086 |
|
|
COSTS_N_INSNS (9), /* int_mult_di */
|
1087 |
|
|
COSTS_N_INSNS (69), /* int_div_si */
|
1088 |
|
|
COSTS_N_INSNS (69), /* int_div_di */
|
1089 |
|
|
1, /* branch_cost */
|
1090 |
|
|
4 /* memory_latency */
|
1091 |
|
|
},
|
1092 |
|
|
{ /* R8000 */
|
1093 |
|
|
DEFAULT_COSTS
|
1094 |
|
|
},
|
1095 |
|
|
{ /* R9000 */
|
1096 |
|
|
/* The only costs that are changed here are
|
1097 |
|
|
integer multiplication. */
|
1098 |
|
|
COSTS_N_INSNS (6), /* fp_add */
|
1099 |
|
|
COSTS_N_INSNS (7), /* fp_mult_sf */
|
1100 |
|
|
COSTS_N_INSNS (8), /* fp_mult_df */
|
1101 |
|
|
COSTS_N_INSNS (23), /* fp_div_sf */
|
1102 |
|
|
COSTS_N_INSNS (36), /* fp_div_df */
|
1103 |
|
|
COSTS_N_INSNS (3), /* int_mult_si */
|
1104 |
|
|
COSTS_N_INSNS (8), /* int_mult_di */
|
1105 |
|
|
COSTS_N_INSNS (69), /* int_div_si */
|
1106 |
|
|
COSTS_N_INSNS (69), /* int_div_di */
|
1107 |
|
|
1, /* branch_cost */
|
1108 |
|
|
4 /* memory_latency */
|
1109 |
|
|
},
|
1110 |
|
|
{ /* R1x000 */
|
1111 |
|
|
COSTS_N_INSNS (2), /* fp_add */
|
1112 |
|
|
COSTS_N_INSNS (2), /* fp_mult_sf */
|
1113 |
|
|
COSTS_N_INSNS (2), /* fp_mult_df */
|
1114 |
|
|
COSTS_N_INSNS (12), /* fp_div_sf */
|
1115 |
|
|
COSTS_N_INSNS (19), /* fp_div_df */
|
1116 |
|
|
COSTS_N_INSNS (5), /* int_mult_si */
|
1117 |
|
|
COSTS_N_INSNS (9), /* int_mult_di */
|
1118 |
|
|
COSTS_N_INSNS (34), /* int_div_si */
|
1119 |
|
|
COSTS_N_INSNS (66), /* int_div_di */
|
1120 |
|
|
1, /* branch_cost */
|
1121 |
|
|
4 /* memory_latency */
|
1122 |
|
|
},
|
1123 |
|
|
{ /* SB1 */
|
1124 |
|
|
/* These costs are the same as the SB-1A below. */
|
1125 |
|
|
COSTS_N_INSNS (4), /* fp_add */
|
1126 |
|
|
COSTS_N_INSNS (4), /* fp_mult_sf */
|
1127 |
|
|
COSTS_N_INSNS (4), /* fp_mult_df */
|
1128 |
|
|
COSTS_N_INSNS (24), /* fp_div_sf */
|
1129 |
|
|
COSTS_N_INSNS (32), /* fp_div_df */
|
1130 |
|
|
COSTS_N_INSNS (3), /* int_mult_si */
|
1131 |
|
|
COSTS_N_INSNS (4), /* int_mult_di */
|
1132 |
|
|
COSTS_N_INSNS (36), /* int_div_si */
|
1133 |
|
|
COSTS_N_INSNS (68), /* int_div_di */
|
1134 |
|
|
1, /* branch_cost */
|
1135 |
|
|
4 /* memory_latency */
|
1136 |
|
|
},
|
1137 |
|
|
{ /* SB1-A */
|
1138 |
|
|
/* These costs are the same as the SB-1 above. */
|
1139 |
|
|
COSTS_N_INSNS (4), /* fp_add */
|
1140 |
|
|
COSTS_N_INSNS (4), /* fp_mult_sf */
|
1141 |
|
|
COSTS_N_INSNS (4), /* fp_mult_df */
|
1142 |
|
|
COSTS_N_INSNS (24), /* fp_div_sf */
|
1143 |
|
|
COSTS_N_INSNS (32), /* fp_div_df */
|
1144 |
|
|
COSTS_N_INSNS (3), /* int_mult_si */
|
1145 |
|
|
COSTS_N_INSNS (4), /* int_mult_di */
|
1146 |
|
|
COSTS_N_INSNS (36), /* int_div_si */
|
1147 |
|
|
COSTS_N_INSNS (68), /* int_div_di */
|
1148 |
|
|
1, /* branch_cost */
|
1149 |
|
|
4 /* memory_latency */
|
1150 |
|
|
},
|
1151 |
|
|
{ /* SR71000 */
|
1152 |
|
|
DEFAULT_COSTS
|
1153 |
|
|
},
|
1154 |
|
|
{ /* XLR */
|
1155 |
|
|
SOFT_FP_COSTS,
|
1156 |
|
|
COSTS_N_INSNS (8), /* int_mult_si */
|
1157 |
|
|
COSTS_N_INSNS (8), /* int_mult_di */
|
1158 |
|
|
COSTS_N_INSNS (72), /* int_div_si */
|
1159 |
|
|
COSTS_N_INSNS (72), /* int_div_di */
|
1160 |
|
|
1, /* branch_cost */
|
1161 |
|
|
4 /* memory_latency */
|
1162 |
|
|
}
|
1163 |
|
|
};
|
1164 |
|
|
|
1165 |
|
|
static rtx mips_find_pic_call_symbol (rtx, rtx);
|
1166 |
|
|
|
1167 |
|
|
/* This hash table keeps track of implicit "mips16" and "nomips16" attributes
|
1168 |
|
|
for -mflip_mips16. It maps decl names onto a boolean mode setting. */
|
1169 |
|
|
struct GTY (()) mflip_mips16_entry {
|
1170 |
|
|
const char *name;
|
1171 |
|
|
bool mips16_p;
|
1172 |
|
|
};
|
1173 |
|
|
static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
|
1174 |
|
|
|
1175 |
|
|
/* Hash table callbacks for mflip_mips16_htab. */
|
1176 |
|
|
|
1177 |
|
|
static hashval_t
|
1178 |
|
|
mflip_mips16_htab_hash (const void *entry)
|
1179 |
|
|
{
|
1180 |
|
|
return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
|
1181 |
|
|
}
|
1182 |
|
|
|
1183 |
|
|
static int
|
1184 |
|
|
mflip_mips16_htab_eq (const void *entry, const void *name)
|
1185 |
|
|
{
|
1186 |
|
|
return strcmp (((const struct mflip_mips16_entry *) entry)->name,
|
1187 |
|
|
(const char *) name) == 0;
|
1188 |
|
|
}
|
1189 |
|
|
|
1190 |
|
|
/* True if -mflip-mips16 should next add an attribute for the default MIPS16
|
1191 |
|
|
mode, false if it should next add an attribute for the opposite mode. */
|
1192 |
|
|
static GTY(()) bool mips16_flipper;
|
1193 |
|
|
|
1194 |
|
|
/* DECL is a function that needs a default "mips16" or "nomips16" attribute
|
1195 |
|
|
for -mflip-mips16. Return true if it should use "mips16" and false if
|
1196 |
|
|
it should use "nomips16". */
|
1197 |
|
|
|
1198 |
|
|
static bool
|
1199 |
|
|
mflip_mips16_use_mips16_p (tree decl)
|
1200 |
|
|
{
|
1201 |
|
|
struct mflip_mips16_entry *entry;
|
1202 |
|
|
const char *name;
|
1203 |
|
|
hashval_t hash;
|
1204 |
|
|
void **slot;
|
1205 |
|
|
|
1206 |
|
|
/* Use the opposite of the command-line setting for anonymous decls. */
|
1207 |
|
|
if (!DECL_NAME (decl))
|
1208 |
|
|
return !mips_base_mips16;
|
1209 |
|
|
|
1210 |
|
|
if (!mflip_mips16_htab)
|
1211 |
|
|
mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
|
1212 |
|
|
mflip_mips16_htab_eq, NULL);
|
1213 |
|
|
|
1214 |
|
|
name = IDENTIFIER_POINTER (DECL_NAME (decl));
|
1215 |
|
|
hash = htab_hash_string (name);
|
1216 |
|
|
slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
|
1217 |
|
|
entry = (struct mflip_mips16_entry *) *slot;
|
1218 |
|
|
if (!entry)
|
1219 |
|
|
{
|
1220 |
|
|
mips16_flipper = !mips16_flipper;
|
1221 |
|
|
entry = GGC_NEW (struct mflip_mips16_entry);
|
1222 |
|
|
entry->name = name;
|
1223 |
|
|
entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
|
1224 |
|
|
*slot = entry;
|
1225 |
|
|
}
|
1226 |
|
|
return entry->mips16_p;
|
1227 |
|
|
}
|
1228 |
|
|
|
1229 |
|
|
/* Predicates to test for presence of "near" and "far"/"long_call"
|
1230 |
|
|
attributes on the given TYPE. */
|
1231 |
|
|
|
1232 |
|
|
static bool
|
1233 |
|
|
mips_near_type_p (const_tree type)
|
1234 |
|
|
{
|
1235 |
|
|
return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
|
1236 |
|
|
}
|
1237 |
|
|
|
1238 |
|
|
static bool
|
1239 |
|
|
mips_far_type_p (const_tree type)
|
1240 |
|
|
{
|
1241 |
|
|
return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
|
1242 |
|
|
|| lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
|
1243 |
|
|
}
|
1244 |
|
|
|
1245 |
|
|
/* Similar predicates for "mips16"/"nomips16" function attributes. */
|
1246 |
|
|
|
1247 |
|
|
static bool
|
1248 |
|
|
mips_mips16_decl_p (const_tree decl)
|
1249 |
|
|
{
|
1250 |
|
|
return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
|
1251 |
|
|
}
|
1252 |
|
|
|
1253 |
|
|
static bool
|
1254 |
|
|
mips_nomips16_decl_p (const_tree decl)
|
1255 |
|
|
{
|
1256 |
|
|
return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
|
1257 |
|
|
}
|
1258 |
|
|
|
1259 |
|
|
/* Check if the interrupt attribute is set for a function. */
|
1260 |
|
|
|
1261 |
|
|
static bool
|
1262 |
|
|
mips_interrupt_type_p (tree type)
|
1263 |
|
|
{
|
1264 |
|
|
return lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type)) != NULL;
|
1265 |
|
|
}
|
1266 |
|
|
|
1267 |
|
|
/* Check if the attribute to use shadow register set is set for a function. */
|
1268 |
|
|
|
1269 |
|
|
static bool
|
1270 |
|
|
mips_use_shadow_register_set_p (tree type)
|
1271 |
|
|
{
|
1272 |
|
|
return lookup_attribute ("use_shadow_register_set",
|
1273 |
|
|
TYPE_ATTRIBUTES (type)) != NULL;
|
1274 |
|
|
}
|
1275 |
|
|
|
1276 |
|
|
/* Check if the attribute to keep interrupts masked is set for a function. */
|
1277 |
|
|
|
1278 |
|
|
static bool
|
1279 |
|
|
mips_keep_interrupts_masked_p (tree type)
|
1280 |
|
|
{
|
1281 |
|
|
return lookup_attribute ("keep_interrupts_masked",
|
1282 |
|
|
TYPE_ATTRIBUTES (type)) != NULL;
|
1283 |
|
|
}
|
1284 |
|
|
|
1285 |
|
|
/* Check if the attribute to use debug exception return is set for
|
1286 |
|
|
a function. */
|
1287 |
|
|
|
1288 |
|
|
static bool
|
1289 |
|
|
mips_use_debug_exception_return_p (tree type)
|
1290 |
|
|
{
|
1291 |
|
|
return lookup_attribute ("use_debug_exception_return",
|
1292 |
|
|
TYPE_ATTRIBUTES (type)) != NULL;
|
1293 |
|
|
}
|
1294 |
|
|
|
1295 |
|
|
/* Return true if function DECL is a MIPS16 function. Return the ambient
|
1296 |
|
|
setting if DECL is null. */
|
1297 |
|
|
|
1298 |
|
|
static bool
|
1299 |
|
|
mips_use_mips16_mode_p (tree decl)
|
1300 |
|
|
{
|
1301 |
|
|
if (decl)
|
1302 |
|
|
{
|
1303 |
|
|
/* Nested functions must use the same frame pointer as their
|
1304 |
|
|
parent and must therefore use the same ISA mode. */
|
1305 |
|
|
tree parent = decl_function_context (decl);
|
1306 |
|
|
if (parent)
|
1307 |
|
|
decl = parent;
|
1308 |
|
|
if (mips_mips16_decl_p (decl))
|
1309 |
|
|
return true;
|
1310 |
|
|
if (mips_nomips16_decl_p (decl))
|
1311 |
|
|
return false;
|
1312 |
|
|
}
|
1313 |
|
|
return mips_base_mips16;
|
1314 |
|
|
}
|
1315 |
|
|
|
1316 |
|
|
/* Implement TARGET_COMP_TYPE_ATTRIBUTES. */
|
1317 |
|
|
|
1318 |
|
|
static int
|
1319 |
|
|
mips_comp_type_attributes (const_tree type1, const_tree type2)
|
1320 |
|
|
{
|
1321 |
|
|
/* Disallow mixed near/far attributes. */
|
1322 |
|
|
if (mips_far_type_p (type1) && mips_near_type_p (type2))
|
1323 |
|
|
return 0;
|
1324 |
|
|
if (mips_near_type_p (type1) && mips_far_type_p (type2))
|
1325 |
|
|
return 0;
|
1326 |
|
|
return 1;
|
1327 |
|
|
}
|
1328 |
|
|
|
1329 |
|
|
/* Implement TARGET_INSERT_ATTRIBUTES. */
|
1330 |
|
|
|
1331 |
|
|
static void
|
1332 |
|
|
mips_insert_attributes (tree decl, tree *attributes)
|
1333 |
|
|
{
|
1334 |
|
|
const char *name;
|
1335 |
|
|
bool mips16_p, nomips16_p;
|
1336 |
|
|
|
1337 |
|
|
/* Check for "mips16" and "nomips16" attributes. */
|
1338 |
|
|
mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
|
1339 |
|
|
nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
|
1340 |
|
|
if (TREE_CODE (decl) != FUNCTION_DECL)
|
1341 |
|
|
{
|
1342 |
|
|
if (mips16_p)
|
1343 |
|
|
error ("%qs attribute only applies to functions", "mips16");
|
1344 |
|
|
if (nomips16_p)
|
1345 |
|
|
error ("%qs attribute only applies to functions", "nomips16");
|
1346 |
|
|
}
|
1347 |
|
|
else
|
1348 |
|
|
{
|
1349 |
|
|
mips16_p |= mips_mips16_decl_p (decl);
|
1350 |
|
|
nomips16_p |= mips_nomips16_decl_p (decl);
|
1351 |
|
|
if (mips16_p || nomips16_p)
|
1352 |
|
|
{
|
1353 |
|
|
/* DECL cannot be simultaneously "mips16" and "nomips16". */
|
1354 |
|
|
if (mips16_p && nomips16_p)
|
1355 |
|
|
error ("%qE cannot have both %<mips16%> and "
|
1356 |
|
|
"%<nomips16%> attributes",
|
1357 |
|
|
DECL_NAME (decl));
|
1358 |
|
|
}
|
1359 |
|
|
else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
|
1360 |
|
|
{
|
1361 |
|
|
/* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
|
1362 |
|
|
"mips16" attribute, arbitrarily pick one. We must pick the same
|
1363 |
|
|
setting for duplicate declarations of a function. */
|
1364 |
|
|
name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
|
1365 |
|
|
*attributes = tree_cons (get_identifier (name), NULL, *attributes);
|
1366 |
|
|
}
|
1367 |
|
|
}
|
1368 |
|
|
}
|
1369 |
|
|
|
1370 |
|
|
/* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
|
1371 |
|
|
|
1372 |
|
|
static tree
|
1373 |
|
|
mips_merge_decl_attributes (tree olddecl, tree newdecl)
|
1374 |
|
|
{
|
1375 |
|
|
/* The decls' "mips16" and "nomips16" attributes must match exactly. */
|
1376 |
|
|
if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
|
1377 |
|
|
error ("%qE redeclared with conflicting %qs attributes",
|
1378 |
|
|
DECL_NAME (newdecl), "mips16");
|
1379 |
|
|
if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
|
1380 |
|
|
error ("%qE redeclared with conflicting %qs attributes",
|
1381 |
|
|
DECL_NAME (newdecl), "nomips16");
|
1382 |
|
|
|
1383 |
|
|
return merge_attributes (DECL_ATTRIBUTES (olddecl),
|
1384 |
|
|
DECL_ATTRIBUTES (newdecl));
|
1385 |
|
|
}
|
1386 |
|
|
|
1387 |
|
|
/* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
|
1388 |
|
|
and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
|
1389 |
|
|
|
1390 |
|
|
static void
|
1391 |
|
|
mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
|
1392 |
|
|
{
|
1393 |
|
|
if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
|
1394 |
|
|
{
|
1395 |
|
|
*base_ptr = XEXP (x, 0);
|
1396 |
|
|
*offset_ptr = INTVAL (XEXP (x, 1));
|
1397 |
|
|
}
|
1398 |
|
|
else
|
1399 |
|
|
{
|
1400 |
|
|
*base_ptr = x;
|
1401 |
|
|
*offset_ptr = 0;
|
1402 |
|
|
}
|
1403 |
|
|
}
|
1404 |
|
|
|
1405 |
|
|
static unsigned int mips_build_integer (struct mips_integer_op *,
|
1406 |
|
|
unsigned HOST_WIDE_INT);
|
1407 |
|
|
|
1408 |
|
|
/* A subroutine of mips_build_integer, with the same interface.
|
1409 |
|
|
Assume that the final action in the sequence should be a left shift. */
|
1410 |
|
|
|
1411 |
|
|
static unsigned int
|
1412 |
|
|
mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
|
1413 |
|
|
{
|
1414 |
|
|
unsigned int i, shift;
|
1415 |
|
|
|
1416 |
|
|
/* Shift VALUE right until its lowest bit is set. Shift arithmetically
|
1417 |
|
|
since signed numbers are easier to load than unsigned ones. */
|
1418 |
|
|
shift = 0;
|
1419 |
|
|
while ((value & 1) == 0)
|
1420 |
|
|
value /= 2, shift++;
|
1421 |
|
|
|
1422 |
|
|
i = mips_build_integer (codes, value);
|
1423 |
|
|
codes[i].code = ASHIFT;
|
1424 |
|
|
codes[i].value = shift;
|
1425 |
|
|
return i + 1;
|
1426 |
|
|
}
|
1427 |
|
|
|
1428 |
|
|
/* As for mips_build_shift, but assume that the final action will be
|
1429 |
|
|
an IOR or PLUS operation. */
|
1430 |
|
|
|
1431 |
|
|
static unsigned int
|
1432 |
|
|
mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
|
1433 |
|
|
{
|
1434 |
|
|
unsigned HOST_WIDE_INT high;
|
1435 |
|
|
unsigned int i;
|
1436 |
|
|
|
1437 |
|
|
high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
|
1438 |
|
|
if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
|
1439 |
|
|
{
|
1440 |
|
|
/* The constant is too complex to load with a simple LUI/ORI pair,
|
1441 |
|
|
so we want to give the recursive call as many trailing zeros as
|
1442 |
|
|
possible. In this case, we know bit 16 is set and that the
|
1443 |
|
|
low 16 bits form a negative number. If we subtract that number
|
1444 |
|
|
from VALUE, we will clear at least the lowest 17 bits, maybe more. */
|
1445 |
|
|
i = mips_build_integer (codes, CONST_HIGH_PART (value));
|
1446 |
|
|
codes[i].code = PLUS;
|
1447 |
|
|
codes[i].value = CONST_LOW_PART (value);
|
1448 |
|
|
}
|
1449 |
|
|
else
|
1450 |
|
|
{
|
1451 |
|
|
/* Either this is a simple LUI/ORI pair, or clearing the lowest 16
|
1452 |
|
|
bits gives a value with at least 17 trailing zeros. */
|
1453 |
|
|
i = mips_build_integer (codes, high);
|
1454 |
|
|
codes[i].code = IOR;
|
1455 |
|
|
codes[i].value = value & 0xffff;
|
1456 |
|
|
}
|
1457 |
|
|
return i + 1;
|
1458 |
|
|
}
|
1459 |
|
|
|
1460 |
|
|
/* Fill CODES with a sequence of rtl operations to load VALUE.
|
1461 |
|
|
Return the number of operations needed. */
|
1462 |
|
|
|
1463 |
|
|
static unsigned int
|
1464 |
|
|
mips_build_integer (struct mips_integer_op *codes,
|
1465 |
|
|
unsigned HOST_WIDE_INT value)
|
1466 |
|
|
{
|
1467 |
|
|
if (SMALL_OPERAND (value)
|
1468 |
|
|
|| SMALL_OPERAND_UNSIGNED (value)
|
1469 |
|
|
|| LUI_OPERAND (value))
|
1470 |
|
|
{
|
1471 |
|
|
/* The value can be loaded with a single instruction. */
|
1472 |
|
|
codes[0].code = UNKNOWN;
|
1473 |
|
|
codes[0].value = value;
|
1474 |
|
|
return 1;
|
1475 |
|
|
}
|
1476 |
|
|
else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
|
1477 |
|
|
{
|
1478 |
|
|
/* Either the constant is a simple LUI/ORI combination or its
|
1479 |
|
|
lowest bit is set. We don't want to shift in this case. */
|
1480 |
|
|
return mips_build_lower (codes, value);
|
1481 |
|
|
}
|
1482 |
|
|
else if ((value & 0xffff) == 0)
|
1483 |
|
|
{
|
1484 |
|
|
/* The constant will need at least three actions. The lowest
|
1485 |
|
|
16 bits are clear, so the final action will be a shift. */
|
1486 |
|
|
return mips_build_shift (codes, value);
|
1487 |
|
|
}
|
1488 |
|
|
else
|
1489 |
|
|
{
|
1490 |
|
|
/* The final action could be a shift, add or inclusive OR.
|
1491 |
|
|
Rather than use a complex condition to select the best
|
1492 |
|
|
approach, try both mips_build_shift and mips_build_lower
|
1493 |
|
|
and pick the one that gives the shortest sequence.
|
1494 |
|
|
Note that this case is only used once per constant. */
|
1495 |
|
|
struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
|
1496 |
|
|
unsigned int cost, alt_cost;
|
1497 |
|
|
|
1498 |
|
|
cost = mips_build_shift (codes, value);
|
1499 |
|
|
alt_cost = mips_build_lower (alt_codes, value);
|
1500 |
|
|
if (alt_cost < cost)
|
1501 |
|
|
{
|
1502 |
|
|
memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
|
1503 |
|
|
cost = alt_cost;
|
1504 |
|
|
}
|
1505 |
|
|
return cost;
|
1506 |
|
|
}
|
1507 |
|
|
}
|
1508 |
|
|
|
1509 |
|
|
/* Return true if symbols of type TYPE require a GOT access. */
|
1510 |
|
|
|
1511 |
|
|
static bool
|
1512 |
|
|
mips_got_symbol_type_p (enum mips_symbol_type type)
|
1513 |
|
|
{
|
1514 |
|
|
switch (type)
|
1515 |
|
|
{
|
1516 |
|
|
case SYMBOL_GOT_PAGE_OFST:
|
1517 |
|
|
case SYMBOL_GOT_DISP:
|
1518 |
|
|
return true;
|
1519 |
|
|
|
1520 |
|
|
default:
|
1521 |
|
|
return false;
|
1522 |
|
|
}
|
1523 |
|
|
}
|
1524 |
|
|
|
1525 |
|
|
/* Return true if X is a thread-local symbol. */
|
1526 |
|
|
|
1527 |
|
|
static bool
|
1528 |
|
|
mips_tls_symbol_p (rtx x)
|
1529 |
|
|
{
|
1530 |
|
|
return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
|
1531 |
|
|
}
|
1532 |
|
|
|
1533 |
|
|
/* Return true if SYMBOL_REF X is associated with a global symbol
|
1534 |
|
|
(in the STB_GLOBAL sense). */
|
1535 |
|
|
|
1536 |
|
|
static bool
|
1537 |
|
|
mips_global_symbol_p (const_rtx x)
|
1538 |
|
|
{
|
1539 |
|
|
const_tree decl = SYMBOL_REF_DECL (x);
|
1540 |
|
|
|
1541 |
|
|
if (!decl)
|
1542 |
|
|
return !SYMBOL_REF_LOCAL_P (x) || SYMBOL_REF_EXTERNAL_P (x);
|
1543 |
|
|
|
1544 |
|
|
/* Weakref symbols are not TREE_PUBLIC, but their targets are global
|
1545 |
|
|
or weak symbols. Relocations in the object file will be against
|
1546 |
|
|
the target symbol, so it's that symbol's binding that matters here. */
|
1547 |
|
|
return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
|
1548 |
|
|
}
|
1549 |
|
|
|
1550 |
|
|
/* Return true if function X is a libgcc MIPS16 stub function. */
|
1551 |
|
|
|
1552 |
|
|
static bool
|
1553 |
|
|
mips16_stub_function_p (const_rtx x)
|
1554 |
|
|
{
|
1555 |
|
|
return (GET_CODE (x) == SYMBOL_REF
|
1556 |
|
|
&& strncmp (XSTR (x, 0), "__mips16_", 9) == 0);
|
1557 |
|
|
}
|
1558 |
|
|
|
1559 |
|
|
/* Return true if function X is a locally-defined and locally-binding
|
1560 |
|
|
MIPS16 function. */
|
1561 |
|
|
|
1562 |
|
|
static bool
|
1563 |
|
|
mips16_local_function_p (const_rtx x)
|
1564 |
|
|
{
|
1565 |
|
|
return (GET_CODE (x) == SYMBOL_REF
|
1566 |
|
|
&& SYMBOL_REF_LOCAL_P (x)
|
1567 |
|
|
&& !SYMBOL_REF_EXTERNAL_P (x)
|
1568 |
|
|
&& mips_use_mips16_mode_p (SYMBOL_REF_DECL (x)));
|
1569 |
|
|
}
|
1570 |
|
|
|
1571 |
|
|
/* Return true if SYMBOL_REF X binds locally. */
|
1572 |
|
|
|
1573 |
|
|
static bool
|
1574 |
|
|
mips_symbol_binds_local_p (const_rtx x)
|
1575 |
|
|
{
|
1576 |
|
|
return (SYMBOL_REF_DECL (x)
|
1577 |
|
|
? targetm.binds_local_p (SYMBOL_REF_DECL (x))
|
1578 |
|
|
: SYMBOL_REF_LOCAL_P (x));
|
1579 |
|
|
}
|
1580 |
|
|
|
1581 |
|
|
/* Return true if rtx constants of mode MODE should be put into a small
|
1582 |
|
|
data section. */
|
1583 |
|
|
|
1584 |
|
|
static bool
|
1585 |
|
|
mips_rtx_constant_in_small_data_p (enum machine_mode mode)
|
1586 |
|
|
{
|
1587 |
|
|
return (!TARGET_EMBEDDED_DATA
|
1588 |
|
|
&& TARGET_LOCAL_SDATA
|
1589 |
|
|
&& GET_MODE_SIZE (mode) <= mips_small_data_threshold);
|
1590 |
|
|
}
|
1591 |
|
|
|
1592 |
|
|
/* Return true if X should not be moved directly into register $25.
|
1593 |
|
|
We need this because many versions of GAS will treat "la $25,foo" as
|
1594 |
|
|
part of a call sequence and so allow a global "foo" to be lazily bound. */
|
1595 |
|
|
|
1596 |
|
|
bool
|
1597 |
|
|
mips_dangerous_for_la25_p (rtx x)
|
1598 |
|
|
{
|
1599 |
|
|
return (!TARGET_EXPLICIT_RELOCS
|
1600 |
|
|
&& TARGET_USE_GOT
|
1601 |
|
|
&& GET_CODE (x) == SYMBOL_REF
|
1602 |
|
|
&& mips_global_symbol_p (x));
|
1603 |
|
|
}
|
1604 |
|
|
|
1605 |
|
|
/* Return true if calls to X might need $25 to be valid on entry. */
|
1606 |
|
|
|
1607 |
|
|
bool
|
1608 |
|
|
mips_use_pic_fn_addr_reg_p (const_rtx x)
|
1609 |
|
|
{
|
1610 |
|
|
if (!TARGET_USE_PIC_FN_ADDR_REG)
|
1611 |
|
|
return false;
|
1612 |
|
|
|
1613 |
|
|
/* MIPS16 stub functions are guaranteed not to use $25. */
|
1614 |
|
|
if (mips16_stub_function_p (x))
|
1615 |
|
|
return false;
|
1616 |
|
|
|
1617 |
|
|
if (GET_CODE (x) == SYMBOL_REF)
|
1618 |
|
|
{
|
1619 |
|
|
/* If PLTs and copy relocations are available, the static linker
|
1620 |
|
|
will make sure that $25 is valid on entry to the target function. */
|
1621 |
|
|
if (TARGET_ABICALLS_PIC0)
|
1622 |
|
|
return false;
|
1623 |
|
|
|
1624 |
|
|
/* Locally-defined functions use absolute accesses to set up
|
1625 |
|
|
the global pointer. */
|
1626 |
|
|
if (TARGET_ABSOLUTE_ABICALLS
|
1627 |
|
|
&& mips_symbol_binds_local_p (x)
|
1628 |
|
|
&& !SYMBOL_REF_EXTERNAL_P (x))
|
1629 |
|
|
return false;
|
1630 |
|
|
}
|
1631 |
|
|
|
1632 |
|
|
return true;
|
1633 |
|
|
}
|
1634 |
|
|
|
1635 |
|
|
/* Return the method that should be used to access SYMBOL_REF or
|
1636 |
|
|
LABEL_REF X in context CONTEXT. */
|
1637 |
|
|
|
1638 |
|
|
static enum mips_symbol_type
|
1639 |
|
|
mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
|
1640 |
|
|
{
|
1641 |
|
|
if (TARGET_RTP_PIC)
|
1642 |
|
|
return SYMBOL_GOT_DISP;
|
1643 |
|
|
|
1644 |
|
|
if (GET_CODE (x) == LABEL_REF)
|
1645 |
|
|
{
|
1646 |
|
|
/* LABEL_REFs are used for jump tables as well as text labels.
|
1647 |
|
|
Only return SYMBOL_PC_RELATIVE if we know the label is in
|
1648 |
|
|
the text section. */
|
1649 |
|
|
if (TARGET_MIPS16_SHORT_JUMP_TABLES)
|
1650 |
|
|
return SYMBOL_PC_RELATIVE;
|
1651 |
|
|
|
1652 |
|
|
if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
|
1653 |
|
|
return SYMBOL_GOT_PAGE_OFST;
|
1654 |
|
|
|
1655 |
|
|
return SYMBOL_ABSOLUTE;
|
1656 |
|
|
}
|
1657 |
|
|
|
1658 |
|
|
gcc_assert (GET_CODE (x) == SYMBOL_REF);
|
1659 |
|
|
|
1660 |
|
|
if (SYMBOL_REF_TLS_MODEL (x))
|
1661 |
|
|
return SYMBOL_TLS;
|
1662 |
|
|
|
1663 |
|
|
if (CONSTANT_POOL_ADDRESS_P (x))
|
1664 |
|
|
{
|
1665 |
|
|
if (TARGET_MIPS16_TEXT_LOADS)
|
1666 |
|
|
return SYMBOL_PC_RELATIVE;
|
1667 |
|
|
|
1668 |
|
|
if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
|
1669 |
|
|
return SYMBOL_PC_RELATIVE;
|
1670 |
|
|
|
1671 |
|
|
if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
|
1672 |
|
|
return SYMBOL_GP_RELATIVE;
|
1673 |
|
|
}
|
1674 |
|
|
|
1675 |
|
|
/* Do not use small-data accesses for weak symbols; they may end up
|
1676 |
|
|
being zero. */
|
1677 |
|
|
if (TARGET_GPOPT && SYMBOL_REF_SMALL_P (x) && !SYMBOL_REF_WEAK (x))
|
1678 |
|
|
return SYMBOL_GP_RELATIVE;
|
1679 |
|
|
|
1680 |
|
|
/* Don't use GOT accesses for locally-binding symbols when -mno-shared
|
1681 |
|
|
is in effect. */
|
1682 |
|
|
if (TARGET_ABICALLS_PIC2
|
1683 |
|
|
&& !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
|
1684 |
|
|
{
|
1685 |
|
|
/* There are three cases to consider:
|
1686 |
|
|
|
1687 |
|
|
- o32 PIC (either with or without explicit relocs)
|
1688 |
|
|
- n32/n64 PIC without explicit relocs
|
1689 |
|
|
- n32/n64 PIC with explicit relocs
|
1690 |
|
|
|
1691 |
|
|
In the first case, both local and global accesses will use an
|
1692 |
|
|
R_MIPS_GOT16 relocation. We must correctly predict which of
|
1693 |
|
|
the two semantics (local or global) the assembler and linker
|
1694 |
|
|
will apply. The choice depends on the symbol's binding rather
|
1695 |
|
|
than its visibility.
|
1696 |
|
|
|
1697 |
|
|
In the second case, the assembler will not use R_MIPS_GOT16
|
1698 |
|
|
relocations, but it chooses between local and global accesses
|
1699 |
|
|
in the same way as for o32 PIC.
|
1700 |
|
|
|
1701 |
|
|
In the third case we have more freedom since both forms of
|
1702 |
|
|
access will work for any kind of symbol. However, there seems
|
1703 |
|
|
little point in doing things differently. */
|
1704 |
|
|
if (mips_global_symbol_p (x))
|
1705 |
|
|
return SYMBOL_GOT_DISP;
|
1706 |
|
|
|
1707 |
|
|
return SYMBOL_GOT_PAGE_OFST;
|
1708 |
|
|
}
|
1709 |
|
|
|
1710 |
|
|
if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
|
1711 |
|
|
return SYMBOL_FORCE_TO_MEM;
|
1712 |
|
|
|
1713 |
|
|
return SYMBOL_ABSOLUTE;
|
1714 |
|
|
}
|
1715 |
|
|
|
1716 |
|
|
/* Classify the base of symbolic expression X, given that X appears in
|
1717 |
|
|
context CONTEXT. */
|
1718 |
|
|
|
1719 |
|
|
static enum mips_symbol_type
|
1720 |
|
|
mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
|
1721 |
|
|
{
|
1722 |
|
|
rtx offset;
|
1723 |
|
|
|
1724 |
|
|
split_const (x, &x, &offset);
|
1725 |
|
|
if (UNSPEC_ADDRESS_P (x))
|
1726 |
|
|
return UNSPEC_ADDRESS_TYPE (x);
|
1727 |
|
|
|
1728 |
|
|
return mips_classify_symbol (x, context);
|
1729 |
|
|
}
|
1730 |
|
|
|
1731 |
|
|
/* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
|
1732 |
|
|
is the alignment in bytes of SYMBOL_REF X. */
|
1733 |
|
|
|
1734 |
|
|
static bool
|
1735 |
|
|
mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
|
1736 |
|
|
{
|
1737 |
|
|
HOST_WIDE_INT align;
|
1738 |
|
|
|
1739 |
|
|
align = SYMBOL_REF_DECL (x) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x)) : 1;
|
1740 |
|
|
return IN_RANGE (offset, 0, align - 1);
|
1741 |
|
|
}
|
1742 |
|
|
|
1743 |
|
|
/* Return true if X is a symbolic constant that can be used in context
|
1744 |
|
|
CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
|
1745 |
|
|
|
1746 |
|
|
bool
|
1747 |
|
|
mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
|
1748 |
|
|
enum mips_symbol_type *symbol_type)
|
1749 |
|
|
{
|
1750 |
|
|
rtx offset;
|
1751 |
|
|
|
1752 |
|
|
split_const (x, &x, &offset);
|
1753 |
|
|
if (UNSPEC_ADDRESS_P (x))
|
1754 |
|
|
{
|
1755 |
|
|
*symbol_type = UNSPEC_ADDRESS_TYPE (x);
|
1756 |
|
|
x = UNSPEC_ADDRESS (x);
|
1757 |
|
|
}
|
1758 |
|
|
else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
|
1759 |
|
|
{
|
1760 |
|
|
*symbol_type = mips_classify_symbol (x, context);
|
1761 |
|
|
if (*symbol_type == SYMBOL_TLS)
|
1762 |
|
|
return false;
|
1763 |
|
|
}
|
1764 |
|
|
else
|
1765 |
|
|
return false;
|
1766 |
|
|
|
1767 |
|
|
if (offset == const0_rtx)
|
1768 |
|
|
return true;
|
1769 |
|
|
|
1770 |
|
|
/* Check whether a nonzero offset is valid for the underlying
|
1771 |
|
|
relocations. */
|
1772 |
|
|
switch (*symbol_type)
|
1773 |
|
|
{
|
1774 |
|
|
case SYMBOL_ABSOLUTE:
|
1775 |
|
|
case SYMBOL_FORCE_TO_MEM:
|
1776 |
|
|
case SYMBOL_32_HIGH:
|
1777 |
|
|
case SYMBOL_64_HIGH:
|
1778 |
|
|
case SYMBOL_64_MID:
|
1779 |
|
|
case SYMBOL_64_LOW:
|
1780 |
|
|
/* If the target has 64-bit pointers and the object file only
|
1781 |
|
|
supports 32-bit symbols, the values of those symbols will be
|
1782 |
|
|
sign-extended. In this case we can't allow an arbitrary offset
|
1783 |
|
|
in case the 32-bit value X + OFFSET has a different sign from X. */
|
1784 |
|
|
if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
|
1785 |
|
|
return offset_within_block_p (x, INTVAL (offset));
|
1786 |
|
|
|
1787 |
|
|
/* In other cases the relocations can handle any offset. */
|
1788 |
|
|
return true;
|
1789 |
|
|
|
1790 |
|
|
case SYMBOL_PC_RELATIVE:
|
1791 |
|
|
/* Allow constant pool references to be converted to LABEL+CONSTANT.
|
1792 |
|
|
In this case, we no longer have access to the underlying constant,
|
1793 |
|
|
but the original symbol-based access was known to be valid. */
|
1794 |
|
|
if (GET_CODE (x) == LABEL_REF)
|
1795 |
|
|
return true;
|
1796 |
|
|
|
1797 |
|
|
/* Fall through. */
|
1798 |
|
|
|
1799 |
|
|
case SYMBOL_GP_RELATIVE:
|
1800 |
|
|
/* Make sure that the offset refers to something within the
|
1801 |
|
|
same object block. This should guarantee that the final
|
1802 |
|
|
PC- or GP-relative offset is within the 16-bit limit. */
|
1803 |
|
|
return offset_within_block_p (x, INTVAL (offset));
|
1804 |
|
|
|
1805 |
|
|
case SYMBOL_GOT_PAGE_OFST:
|
1806 |
|
|
case SYMBOL_GOTOFF_PAGE:
|
1807 |
|
|
/* If the symbol is global, the GOT entry will contain the symbol's
|
1808 |
|
|
address, and we will apply a 16-bit offset after loading it.
|
1809 |
|
|
If the symbol is local, the linker should provide enough local
|
1810 |
|
|
GOT entries for a 16-bit offset, but larger offsets may lead
|
1811 |
|
|
to GOT overflow. */
|
1812 |
|
|
return SMALL_INT (offset);
|
1813 |
|
|
|
1814 |
|
|
case SYMBOL_TPREL:
|
1815 |
|
|
case SYMBOL_DTPREL:
|
1816 |
|
|
/* There is no carry between the HI and LO REL relocations, so the
|
1817 |
|
|
offset is only valid if we know it won't lead to such a carry. */
|
1818 |
|
|
return mips_offset_within_alignment_p (x, INTVAL (offset));
|
1819 |
|
|
|
1820 |
|
|
case SYMBOL_GOT_DISP:
|
1821 |
|
|
case SYMBOL_GOTOFF_DISP:
|
1822 |
|
|
case SYMBOL_GOTOFF_CALL:
|
1823 |
|
|
case SYMBOL_GOTOFF_LOADGP:
|
1824 |
|
|
case SYMBOL_TLSGD:
|
1825 |
|
|
case SYMBOL_TLSLDM:
|
1826 |
|
|
case SYMBOL_GOTTPREL:
|
1827 |
|
|
case SYMBOL_TLS:
|
1828 |
|
|
case SYMBOL_HALF:
|
1829 |
|
|
return false;
|
1830 |
|
|
}
|
1831 |
|
|
gcc_unreachable ();
|
1832 |
|
|
}
|
1833 |
|
|
|
1834 |
|
|
/* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
|
1835 |
|
|
single instruction. We rely on the fact that, in the worst case,
|
1836 |
|
|
all instructions involved in a MIPS16 address calculation are usually
|
1837 |
|
|
extended ones. */
|
1838 |
|
|
|
1839 |
|
|
static int
|
1840 |
|
|
mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
|
1841 |
|
|
{
|
1842 |
|
|
switch (type)
|
1843 |
|
|
{
|
1844 |
|
|
case SYMBOL_ABSOLUTE:
|
1845 |
|
|
/* When using 64-bit symbols, we need 5 preparatory instructions,
|
1846 |
|
|
such as:
|
1847 |
|
|
|
1848 |
|
|
lui $at,%highest(symbol)
|
1849 |
|
|
daddiu $at,$at,%higher(symbol)
|
1850 |
|
|
dsll $at,$at,16
|
1851 |
|
|
daddiu $at,$at,%hi(symbol)
|
1852 |
|
|
dsll $at,$at,16
|
1853 |
|
|
|
1854 |
|
|
The final address is then $at + %lo(symbol). With 32-bit
|
1855 |
|
|
symbols we just need a preparatory LUI for normal mode and
|
1856 |
|
|
a preparatory LI and SLL for MIPS16. */
|
1857 |
|
|
return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
|
1858 |
|
|
|
1859 |
|
|
case SYMBOL_GP_RELATIVE:
|
1860 |
|
|
/* Treat GP-relative accesses as taking a single instruction on
|
1861 |
|
|
MIPS16 too; the copy of $gp can often be shared. */
|
1862 |
|
|
return 1;
|
1863 |
|
|
|
1864 |
|
|
case SYMBOL_PC_RELATIVE:
|
1865 |
|
|
/* PC-relative constants can be only be used with ADDIUPC,
|
1866 |
|
|
DADDIUPC, LWPC and LDPC. */
|
1867 |
|
|
if (mode == MAX_MACHINE_MODE
|
1868 |
|
|
|| GET_MODE_SIZE (mode) == 4
|
1869 |
|
|
|| GET_MODE_SIZE (mode) == 8)
|
1870 |
|
|
return 1;
|
1871 |
|
|
|
1872 |
|
|
/* The constant must be loaded using ADDIUPC or DADDIUPC first. */
|
1873 |
|
|
return 0;
|
1874 |
|
|
|
1875 |
|
|
case SYMBOL_FORCE_TO_MEM:
|
1876 |
|
|
/* LEAs will be converted into constant-pool references by
|
1877 |
|
|
mips_reorg. */
|
1878 |
|
|
if (mode == MAX_MACHINE_MODE)
|
1879 |
|
|
return 1;
|
1880 |
|
|
|
1881 |
|
|
/* The constant must be loaded and then dereferenced. */
|
1882 |
|
|
return 0;
|
1883 |
|
|
|
1884 |
|
|
case SYMBOL_GOT_DISP:
|
1885 |
|
|
/* The constant will have to be loaded from the GOT before it
|
1886 |
|
|
is used in an address. */
|
1887 |
|
|
if (mode != MAX_MACHINE_MODE)
|
1888 |
|
|
return 0;
|
1889 |
|
|
|
1890 |
|
|
/* Fall through. */
|
1891 |
|
|
|
1892 |
|
|
case SYMBOL_GOT_PAGE_OFST:
|
1893 |
|
|
/* Unless -funit-at-a-time is in effect, we can't be sure whether the
|
1894 |
|
|
local/global classification is accurate. The worst cases are:
|
1895 |
|
|
|
1896 |
|
|
(1) For local symbols when generating o32 or o64 code. The assembler
|
1897 |
|
|
will use:
|
1898 |
|
|
|
1899 |
|
|
lw $at,%got(symbol)
|
1900 |
|
|
nop
|
1901 |
|
|
|
1902 |
|
|
...and the final address will be $at + %lo(symbol).
|
1903 |
|
|
|
1904 |
|
|
(2) For global symbols when -mxgot. The assembler will use:
|
1905 |
|
|
|
1906 |
|
|
lui $at,%got_hi(symbol)
|
1907 |
|
|
(d)addu $at,$at,$gp
|
1908 |
|
|
|
1909 |
|
|
...and the final address will be $at + %got_lo(symbol). */
|
1910 |
|
|
return 3;
|
1911 |
|
|
|
1912 |
|
|
case SYMBOL_GOTOFF_PAGE:
|
1913 |
|
|
case SYMBOL_GOTOFF_DISP:
|
1914 |
|
|
case SYMBOL_GOTOFF_CALL:
|
1915 |
|
|
case SYMBOL_GOTOFF_LOADGP:
|
1916 |
|
|
case SYMBOL_32_HIGH:
|
1917 |
|
|
case SYMBOL_64_HIGH:
|
1918 |
|
|
case SYMBOL_64_MID:
|
1919 |
|
|
case SYMBOL_64_LOW:
|
1920 |
|
|
case SYMBOL_TLSGD:
|
1921 |
|
|
case SYMBOL_TLSLDM:
|
1922 |
|
|
case SYMBOL_DTPREL:
|
1923 |
|
|
case SYMBOL_GOTTPREL:
|
1924 |
|
|
case SYMBOL_TPREL:
|
1925 |
|
|
case SYMBOL_HALF:
|
1926 |
|
|
/* A 16-bit constant formed by a single relocation, or a 32-bit
|
1927 |
|
|
constant formed from a high 16-bit relocation and a low 16-bit
|
1928 |
|
|
relocation. Use mips_split_p to determine which. 32-bit
|
1929 |
|
|
constants need an "lui; addiu" sequence for normal mode and
|
1930 |
|
|
an "li; sll; addiu" sequence for MIPS16 mode. */
|
1931 |
|
|
return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
|
1932 |
|
|
|
1933 |
|
|
case SYMBOL_TLS:
|
1934 |
|
|
/* We don't treat a bare TLS symbol as a constant. */
|
1935 |
|
|
return 0;
|
1936 |
|
|
}
|
1937 |
|
|
gcc_unreachable ();
|
1938 |
|
|
}
|
1939 |
|
|
|
1940 |
|
|
/* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
|
1941 |
|
|
to load symbols of type TYPE into a register. Return 0 if the given
|
1942 |
|
|
type of symbol cannot be used as an immediate operand.
|
1943 |
|
|
|
1944 |
|
|
Otherwise, return the number of instructions needed to load or store
|
1945 |
|
|
values of mode MODE to or from addresses of type TYPE. Return 0 if
|
1946 |
|
|
the given type of symbol is not valid in addresses.
|
1947 |
|
|
|
1948 |
|
|
In both cases, treat extended MIPS16 instructions as two instructions. */
|
1949 |
|
|
|
1950 |
|
|
static int
|
1951 |
|
|
mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
|
1952 |
|
|
{
|
1953 |
|
|
return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
|
1954 |
|
|
}
|
1955 |
|
|
|
1956 |
|
|
/* A for_each_rtx callback. Stop the search if *X references a
|
1957 |
|
|
thread-local symbol. */
|
1958 |
|
|
|
1959 |
|
|
static int
|
1960 |
|
|
mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
|
1961 |
|
|
{
|
1962 |
|
|
return mips_tls_symbol_p (*x);
|
1963 |
|
|
}
|
1964 |
|
|
|
1965 |
|
|
/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
|
1966 |
|
|
|
1967 |
|
|
static bool
|
1968 |
|
|
mips_cannot_force_const_mem (rtx x)
|
1969 |
|
|
{
|
1970 |
|
|
enum mips_symbol_type type;
|
1971 |
|
|
rtx base, offset;
|
1972 |
|
|
|
1973 |
|
|
/* There is no assembler syntax for expressing an address-sized
|
1974 |
|
|
high part. */
|
1975 |
|
|
if (GET_CODE (x) == HIGH)
|
1976 |
|
|
return true;
|
1977 |
|
|
|
1978 |
|
|
/* As an optimization, reject constants that mips_legitimize_move
|
1979 |
|
|
can expand inline.
|
1980 |
|
|
|
1981 |
|
|
Suppose we have a multi-instruction sequence that loads constant C
|
1982 |
|
|
into register R. If R does not get allocated a hard register, and
|
1983 |
|
|
R is used in an operand that allows both registers and memory
|
1984 |
|
|
references, reload will consider forcing C into memory and using
|
1985 |
|
|
one of the instruction's memory alternatives. Returning false
|
1986 |
|
|
here will force it to use an input reload instead. */
|
1987 |
|
|
if (CONST_INT_P (x) && LEGITIMATE_CONSTANT_P (x))
|
1988 |
|
|
return true;
|
1989 |
|
|
|
1990 |
|
|
split_const (x, &base, &offset);
|
1991 |
|
|
if (mips_symbolic_constant_p (base, SYMBOL_CONTEXT_LEA, &type)
|
1992 |
|
|
&& type != SYMBOL_FORCE_TO_MEM)
|
1993 |
|
|
{
|
1994 |
|
|
/* The same optimization as for CONST_INT. */
|
1995 |
|
|
if (SMALL_INT (offset) && mips_symbol_insns (type, MAX_MACHINE_MODE) > 0)
|
1996 |
|
|
return true;
|
1997 |
|
|
|
1998 |
|
|
/* If MIPS16 constant pools live in the text section, they should
|
1999 |
|
|
not refer to anything that might need run-time relocation. */
|
2000 |
|
|
if (TARGET_MIPS16_PCREL_LOADS && mips_got_symbol_type_p (type))
|
2001 |
|
|
return true;
|
2002 |
|
|
}
|
2003 |
|
|
|
2004 |
|
|
/* TLS symbols must be computed by mips_legitimize_move. */
|
2005 |
|
|
if (for_each_rtx (&x, &mips_tls_symbol_ref_1, NULL))
|
2006 |
|
|
return true;
|
2007 |
|
|
|
2008 |
|
|
return false;
|
2009 |
|
|
}
|
2010 |
|
|
|
2011 |
|
|
/* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
|
2012 |
|
|
constants when we're using a per-function constant pool. */
|
2013 |
|
|
|
2014 |
|
|
static bool
|
2015 |
|
|
mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
|
2016 |
|
|
const_rtx x ATTRIBUTE_UNUSED)
|
2017 |
|
|
{
|
2018 |
|
|
return !TARGET_MIPS16_PCREL_LOADS;
|
2019 |
|
|
}
|
2020 |
|
|
|
2021 |
|
|
/* Return true if register REGNO is a valid base register for mode MODE.
|
2022 |
|
|
STRICT_P is true if REG_OK_STRICT is in effect. */
|
2023 |
|
|
|
2024 |
|
|
int
|
2025 |
|
|
mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode,
|
2026 |
|
|
bool strict_p)
|
2027 |
|
|
{
|
2028 |
|
|
if (!HARD_REGISTER_NUM_P (regno))
|
2029 |
|
|
{
|
2030 |
|
|
if (!strict_p)
|
2031 |
|
|
return true;
|
2032 |
|
|
regno = reg_renumber[regno];
|
2033 |
|
|
}
|
2034 |
|
|
|
2035 |
|
|
/* These fake registers will be eliminated to either the stack or
|
2036 |
|
|
hard frame pointer, both of which are usually valid base registers.
|
2037 |
|
|
Reload deals with the cases where the eliminated form isn't valid. */
|
2038 |
|
|
if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
|
2039 |
|
|
return true;
|
2040 |
|
|
|
2041 |
|
|
/* In MIPS16 mode, the stack pointer can only address word and doubleword
|
2042 |
|
|
values, nothing smaller. There are two problems here:
|
2043 |
|
|
|
2044 |
|
|
(a) Instantiating virtual registers can introduce new uses of the
|
2045 |
|
|
stack pointer. If these virtual registers are valid addresses,
|
2046 |
|
|
the stack pointer should be too.
|
2047 |
|
|
|
2048 |
|
|
(b) Most uses of the stack pointer are not made explicit until
|
2049 |
|
|
FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
|
2050 |
|
|
We don't know until that stage whether we'll be eliminating to the
|
2051 |
|
|
stack pointer (which needs the restriction) or the hard frame
|
2052 |
|
|
pointer (which doesn't).
|
2053 |
|
|
|
2054 |
|
|
All in all, it seems more consistent to only enforce this restriction
|
2055 |
|
|
during and after reload. */
|
2056 |
|
|
if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
|
2057 |
|
|
return !strict_p || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
|
2058 |
|
|
|
2059 |
|
|
return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
|
2060 |
|
|
}
|
2061 |
|
|
|
2062 |
|
|
/* Return true if X is a valid base register for mode MODE.
|
2063 |
|
|
STRICT_P is true if REG_OK_STRICT is in effect. */
|
2064 |
|
|
|
2065 |
|
|
static bool
|
2066 |
|
|
mips_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
|
2067 |
|
|
{
|
2068 |
|
|
if (!strict_p && GET_CODE (x) == SUBREG)
|
2069 |
|
|
x = SUBREG_REG (x);
|
2070 |
|
|
|
2071 |
|
|
return (REG_P (x)
|
2072 |
|
|
&& mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
|
2073 |
|
|
}
|
2074 |
|
|
|
2075 |
|
|
/* Return true if, for every base register BASE_REG, (plus BASE_REG X)
|
2076 |
|
|
can address a value of mode MODE. */
|
2077 |
|
|
|
2078 |
|
|
static bool
|
2079 |
|
|
mips_valid_offset_p (rtx x, enum machine_mode mode)
|
2080 |
|
|
{
|
2081 |
|
|
/* Check that X is a signed 16-bit number. */
|
2082 |
|
|
if (!const_arith_operand (x, Pmode))
|
2083 |
|
|
return false;
|
2084 |
|
|
|
2085 |
|
|
/* We may need to split multiword moves, so make sure that every word
|
2086 |
|
|
is accessible. */
|
2087 |
|
|
if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
|
2088 |
|
|
&& !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
|
2089 |
|
|
return false;
|
2090 |
|
|
|
2091 |
|
|
return true;
|
2092 |
|
|
}
|
2093 |
|
|
|
2094 |
|
|
/* Return true if a LO_SUM can address a value of mode MODE when the
|
2095 |
|
|
LO_SUM symbol has type SYMBOL_TYPE. */
|
2096 |
|
|
|
2097 |
|
|
static bool
|
2098 |
|
|
mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, enum machine_mode mode)
|
2099 |
|
|
{
|
2100 |
|
|
/* Check that symbols of type SYMBOL_TYPE can be used to access values
|
2101 |
|
|
of mode MODE. */
|
2102 |
|
|
if (mips_symbol_insns (symbol_type, mode) == 0)
|
2103 |
|
|
return false;
|
2104 |
|
|
|
2105 |
|
|
/* Check that there is a known low-part relocation. */
|
2106 |
|
|
if (mips_lo_relocs[symbol_type] == NULL)
|
2107 |
|
|
return false;
|
2108 |
|
|
|
2109 |
|
|
/* We may need to split multiword moves, so make sure that each word
|
2110 |
|
|
can be accessed without inducing a carry. This is mainly needed
|
2111 |
|
|
for o64, which has historically only guaranteed 64-bit alignment
|
2112 |
|
|
for 128-bit types. */
|
2113 |
|
|
if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
|
2114 |
|
|
&& GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
|
2115 |
|
|
return false;
|
2116 |
|
|
|
2117 |
|
|
return true;
|
2118 |
|
|
}
|
2119 |
|
|
|
2120 |
|
|
/* Return true if X is a valid address for machine mode MODE. If it is,
|
2121 |
|
|
fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
|
2122 |
|
|
effect. */
|
2123 |
|
|
|
2124 |
|
|
static bool
|
2125 |
|
|
mips_classify_address (struct mips_address_info *info, rtx x,
|
2126 |
|
|
enum machine_mode mode, bool strict_p)
|
2127 |
|
|
{
|
2128 |
|
|
switch (GET_CODE (x))
|
2129 |
|
|
{
|
2130 |
|
|
case REG:
|
2131 |
|
|
case SUBREG:
|
2132 |
|
|
info->type = ADDRESS_REG;
|
2133 |
|
|
info->reg = x;
|
2134 |
|
|
info->offset = const0_rtx;
|
2135 |
|
|
return mips_valid_base_register_p (info->reg, mode, strict_p);
|
2136 |
|
|
|
2137 |
|
|
case PLUS:
|
2138 |
|
|
info->type = ADDRESS_REG;
|
2139 |
|
|
info->reg = XEXP (x, 0);
|
2140 |
|
|
info->offset = XEXP (x, 1);
|
2141 |
|
|
return (mips_valid_base_register_p (info->reg, mode, strict_p)
|
2142 |
|
|
&& mips_valid_offset_p (info->offset, mode));
|
2143 |
|
|
|
2144 |
|
|
case LO_SUM:
|
2145 |
|
|
info->type = ADDRESS_LO_SUM;
|
2146 |
|
|
info->reg = XEXP (x, 0);
|
2147 |
|
|
info->offset = XEXP (x, 1);
|
2148 |
|
|
/* We have to trust the creator of the LO_SUM to do something vaguely
|
2149 |
|
|
sane. Target-independent code that creates a LO_SUM should also
|
2150 |
|
|
create and verify the matching HIGH. Target-independent code that
|
2151 |
|
|
adds an offset to a LO_SUM must prove that the offset will not
|
2152 |
|
|
induce a carry. Failure to do either of these things would be
|
2153 |
|
|
a bug, and we are not required to check for it here. The MIPS
|
2154 |
|
|
backend itself should only create LO_SUMs for valid symbolic
|
2155 |
|
|
constants, with the high part being either a HIGH or a copy
|
2156 |
|
|
of _gp. */
|
2157 |
|
|
info->symbol_type
|
2158 |
|
|
= mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
|
2159 |
|
|
return (mips_valid_base_register_p (info->reg, mode, strict_p)
|
2160 |
|
|
&& mips_valid_lo_sum_p (info->symbol_type, mode));
|
2161 |
|
|
|
2162 |
|
|
case CONST_INT:
|
2163 |
|
|
/* Small-integer addresses don't occur very often, but they
|
2164 |
|
|
are legitimate if $0 is a valid base register. */
|
2165 |
|
|
info->type = ADDRESS_CONST_INT;
|
2166 |
|
|
return !TARGET_MIPS16 && SMALL_INT (x);
|
2167 |
|
|
|
2168 |
|
|
case CONST:
|
2169 |
|
|
case LABEL_REF:
|
2170 |
|
|
case SYMBOL_REF:
|
2171 |
|
|
info->type = ADDRESS_SYMBOLIC;
|
2172 |
|
|
return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
|
2173 |
|
|
&info->symbol_type)
|
2174 |
|
|
&& mips_symbol_insns (info->symbol_type, mode) > 0
|
2175 |
|
|
&& !mips_split_p[info->symbol_type]);
|
2176 |
|
|
|
2177 |
|
|
default:
|
2178 |
|
|
return false;
|
2179 |
|
|
}
|
2180 |
|
|
}
|
2181 |
|
|
|
2182 |
|
|
/* Implement TARGET_LEGITIMATE_ADDRESS_P. */
|
2183 |
|
|
|
2184 |
|
|
static bool
|
2185 |
|
|
mips_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
|
2186 |
|
|
{
|
2187 |
|
|
struct mips_address_info addr;
|
2188 |
|
|
|
2189 |
|
|
return mips_classify_address (&addr, x, mode, strict_p);
|
2190 |
|
|
}
|
2191 |
|
|
|
2192 |
|
|
/* Return true if X is a legitimate $sp-based address for mode MDOE. */
|
2193 |
|
|
|
2194 |
|
|
bool
|
2195 |
|
|
mips_stack_address_p (rtx x, enum machine_mode mode)
|
2196 |
|
|
{
|
2197 |
|
|
struct mips_address_info addr;
|
2198 |
|
|
|
2199 |
|
|
return (mips_classify_address (&addr, x, mode, false)
|
2200 |
|
|
&& addr.type == ADDRESS_REG
|
2201 |
|
|
&& addr.reg == stack_pointer_rtx);
|
2202 |
|
|
}
|
2203 |
|
|
|
2204 |
|
|
/* Return true if ADDR matches the pattern for the LWXS load scaled indexed
|
2205 |
|
|
address instruction. Note that such addresses are not considered
|
2206 |
|
|
legitimate in the TARGET_LEGITIMATE_ADDRESS_P sense, because their use
|
2207 |
|
|
is so restricted. */
|
2208 |
|
|
|
2209 |
|
|
static bool
|
2210 |
|
|
mips_lwxs_address_p (rtx addr)
|
2211 |
|
|
{
|
2212 |
|
|
if (ISA_HAS_LWXS
|
2213 |
|
|
&& GET_CODE (addr) == PLUS
|
2214 |
|
|
&& REG_P (XEXP (addr, 1)))
|
2215 |
|
|
{
|
2216 |
|
|
rtx offset = XEXP (addr, 0);
|
2217 |
|
|
if (GET_CODE (offset) == MULT
|
2218 |
|
|
&& REG_P (XEXP (offset, 0))
|
2219 |
|
|
&& CONST_INT_P (XEXP (offset, 1))
|
2220 |
|
|
&& INTVAL (XEXP (offset, 1)) == 4)
|
2221 |
|
|
return true;
|
2222 |
|
|
}
|
2223 |
|
|
return false;
|
2224 |
|
|
}
|
2225 |
|
|
|
2226 |
|
|
/* Return true if a value at OFFSET bytes from base register BASE can be
|
2227 |
|
|
accessed using an unextended MIPS16 instruction. MODE is the mode of
|
2228 |
|
|
the value.
|
2229 |
|
|
|
2230 |
|
|
Usually the offset in an unextended instruction is a 5-bit field.
|
2231 |
|
|
The offset is unsigned and shifted left once for LH and SH, twice
|
2232 |
|
|
for LW and SW, and so on. An exception is LWSP and SWSP, which have
|
2233 |
|
|
an 8-bit immediate field that's shifted left twice. */
|
2234 |
|
|
|
2235 |
|
|
static bool
|
2236 |
|
|
mips16_unextended_reference_p (enum machine_mode mode, rtx base,
|
2237 |
|
|
unsigned HOST_WIDE_INT offset)
|
2238 |
|
|
{
|
2239 |
|
|
if (offset % GET_MODE_SIZE (mode) == 0)
|
2240 |
|
|
{
|
2241 |
|
|
if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
|
2242 |
|
|
return offset < 256U * GET_MODE_SIZE (mode);
|
2243 |
|
|
return offset < 32U * GET_MODE_SIZE (mode);
|
2244 |
|
|
}
|
2245 |
|
|
return false;
|
2246 |
|
|
}
|
2247 |
|
|
|
2248 |
|
|
/* Return the number of instructions needed to load or store a value
|
2249 |
|
|
of mode MODE at address X. Return 0 if X isn't valid for MODE.
|
2250 |
|
|
Assume that multiword moves may need to be split into word moves
|
2251 |
|
|
if MIGHT_SPLIT_P, otherwise assume that a single load or store is
|
2252 |
|
|
enough.
|
2253 |
|
|
|
2254 |
|
|
For MIPS16 code, count extended instructions as two instructions. */
|
2255 |
|
|
|
2256 |
|
|
int
|
2257 |
|
|
mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
|
2258 |
|
|
{
|
2259 |
|
|
struct mips_address_info addr;
|
2260 |
|
|
int factor;
|
2261 |
|
|
|
2262 |
|
|
/* BLKmode is used for single unaligned loads and stores and should
|
2263 |
|
|
not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
|
2264 |
|
|
meaningless, so we have to single it out as a special case one way
|
2265 |
|
|
or the other.) */
|
2266 |
|
|
if (mode != BLKmode && might_split_p)
|
2267 |
|
|
factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
|
2268 |
|
|
else
|
2269 |
|
|
factor = 1;
|
2270 |
|
|
|
2271 |
|
|
if (mips_classify_address (&addr, x, mode, false))
|
2272 |
|
|
switch (addr.type)
|
2273 |
|
|
{
|
2274 |
|
|
case ADDRESS_REG:
|
2275 |
|
|
if (TARGET_MIPS16
|
2276 |
|
|
&& !mips16_unextended_reference_p (mode, addr.reg,
|
2277 |
|
|
UINTVAL (addr.offset)))
|
2278 |
|
|
return factor * 2;
|
2279 |
|
|
return factor;
|
2280 |
|
|
|
2281 |
|
|
case ADDRESS_LO_SUM:
|
2282 |
|
|
return TARGET_MIPS16 ? factor * 2 : factor;
|
2283 |
|
|
|
2284 |
|
|
case ADDRESS_CONST_INT:
|
2285 |
|
|
return factor;
|
2286 |
|
|
|
2287 |
|
|
case ADDRESS_SYMBOLIC:
|
2288 |
|
|
return factor * mips_symbol_insns (addr.symbol_type, mode);
|
2289 |
|
|
}
|
2290 |
|
|
return 0;
|
2291 |
|
|
}
|
2292 |
|
|
|
2293 |
|
|
/* Return the number of instructions needed to load constant X.
|
2294 |
|
|
Return 0 if X isn't a valid constant. */
|
2295 |
|
|
|
2296 |
|
|
int
|
2297 |
|
|
mips_const_insns (rtx x)
|
2298 |
|
|
{
|
2299 |
|
|
struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
|
2300 |
|
|
enum mips_symbol_type symbol_type;
|
2301 |
|
|
rtx offset;
|
2302 |
|
|
|
2303 |
|
|
switch (GET_CODE (x))
|
2304 |
|
|
{
|
2305 |
|
|
case HIGH:
|
2306 |
|
|
if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
|
2307 |
|
|
&symbol_type)
|
2308 |
|
|
|| !mips_split_p[symbol_type])
|
2309 |
|
|
return 0;
|
2310 |
|
|
|
2311 |
|
|
/* This is simply an LUI for normal mode. It is an extended
|
2312 |
|
|
LI followed by an extended SLL for MIPS16. */
|
2313 |
|
|
return TARGET_MIPS16 ? 4 : 1;
|
2314 |
|
|
|
2315 |
|
|
case CONST_INT:
|
2316 |
|
|
if (TARGET_MIPS16)
|
2317 |
|
|
/* Unsigned 8-bit constants can be loaded using an unextended
|
2318 |
|
|
LI instruction. Unsigned 16-bit constants can be loaded
|
2319 |
|
|
using an extended LI. Negative constants must be loaded
|
2320 |
|
|
using LI and then negated. */
|
2321 |
|
|
return (IN_RANGE (INTVAL (x), 0, 255) ? 1
|
2322 |
|
|
: SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
|
2323 |
|
|
: IN_RANGE (-INTVAL (x), 0, 255) ? 2
|
2324 |
|
|
: SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
|
2325 |
|
|
: 0);
|
2326 |
|
|
|
2327 |
|
|
return mips_build_integer (codes, INTVAL (x));
|
2328 |
|
|
|
2329 |
|
|
case CONST_DOUBLE:
|
2330 |
|
|
case CONST_VECTOR:
|
2331 |
|
|
/* Allow zeros for normal mode, where we can use $0. */
|
2332 |
|
|
return !TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
|
2333 |
|
|
|
2334 |
|
|
case CONST:
|
2335 |
|
|
if (CONST_GP_P (x))
|
2336 |
|
|
return 1;
|
2337 |
|
|
|
2338 |
|
|
/* See if we can refer to X directly. */
|
2339 |
|
|
if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
|
2340 |
|
|
return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
|
2341 |
|
|
|
2342 |
|
|
/* Otherwise try splitting the constant into a base and offset.
|
2343 |
|
|
If the offset is a 16-bit value, we can load the base address
|
2344 |
|
|
into a register and then use (D)ADDIU to add in the offset.
|
2345 |
|
|
If the offset is larger, we can load the base and offset
|
2346 |
|
|
into separate registers and add them together with (D)ADDU.
|
2347 |
|
|
However, the latter is only possible before reload; during
|
2348 |
|
|
and after reload, we must have the option of forcing the
|
2349 |
|
|
constant into the pool instead. */
|
2350 |
|
|
split_const (x, &x, &offset);
|
2351 |
|
|
if (offset != 0)
|
2352 |
|
|
{
|
2353 |
|
|
int n = mips_const_insns (x);
|
2354 |
|
|
if (n != 0)
|
2355 |
|
|
{
|
2356 |
|
|
if (SMALL_INT (offset))
|
2357 |
|
|
return n + 1;
|
2358 |
|
|
else if (!targetm.cannot_force_const_mem (x))
|
2359 |
|
|
return n + 1 + mips_build_integer (codes, INTVAL (offset));
|
2360 |
|
|
}
|
2361 |
|
|
}
|
2362 |
|
|
return 0;
|
2363 |
|
|
|
2364 |
|
|
case SYMBOL_REF:
|
2365 |
|
|
case LABEL_REF:
|
2366 |
|
|
return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
|
2367 |
|
|
MAX_MACHINE_MODE);
|
2368 |
|
|
|
2369 |
|
|
default:
|
2370 |
|
|
return 0;
|
2371 |
|
|
}
|
2372 |
|
|
}
|
2373 |
|
|
|
2374 |
|
|
/* X is a doubleword constant that can be handled by splitting it into
|
2375 |
|
|
two words and loading each word separately. Return the number of
|
2376 |
|
|
instructions required to do this. */
|
2377 |
|
|
|
2378 |
|
|
int
|
2379 |
|
|
mips_split_const_insns (rtx x)
|
2380 |
|
|
{
|
2381 |
|
|
unsigned int low, high;
|
2382 |
|
|
|
2383 |
|
|
low = mips_const_insns (mips_subword (x, false));
|
2384 |
|
|
high = mips_const_insns (mips_subword (x, true));
|
2385 |
|
|
gcc_assert (low > 0 && high > 0);
|
2386 |
|
|
return low + high;
|
2387 |
|
|
}
|
2388 |
|
|
|
2389 |
|
|
/* Return the number of instructions needed to implement INSN,
|
2390 |
|
|
given that it loads from or stores to MEM. Count extended
|
2391 |
|
|
MIPS16 instructions as two instructions. */
|
2392 |
|
|
|
2393 |
|
|
int
|
2394 |
|
|
mips_load_store_insns (rtx mem, rtx insn)
|
2395 |
|
|
{
|
2396 |
|
|
enum machine_mode mode;
|
2397 |
|
|
bool might_split_p;
|
2398 |
|
|
rtx set;
|
2399 |
|
|
|
2400 |
|
|
gcc_assert (MEM_P (mem));
|
2401 |
|
|
mode = GET_MODE (mem);
|
2402 |
|
|
|
2403 |
|
|
/* Try to prove that INSN does not need to be split. */
|
2404 |
|
|
might_split_p = true;
|
2405 |
|
|
if (GET_MODE_BITSIZE (mode) == 64)
|
2406 |
|
|
{
|
2407 |
|
|
set = single_set (insn);
|
2408 |
|
|
if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
|
2409 |
|
|
might_split_p = false;
|
2410 |
|
|
}
|
2411 |
|
|
|
2412 |
|
|
return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
|
2413 |
|
|
}
|
2414 |
|
|
|
2415 |
|
|
/* Return the number of instructions needed for an integer division. */
|
2416 |
|
|
|
2417 |
|
|
int
|
2418 |
|
|
mips_idiv_insns (void)
|
2419 |
|
|
{
|
2420 |
|
|
int count;
|
2421 |
|
|
|
2422 |
|
|
count = 1;
|
2423 |
|
|
if (TARGET_CHECK_ZERO_DIV)
|
2424 |
|
|
{
|
2425 |
|
|
if (GENERATE_DIVIDE_TRAPS)
|
2426 |
|
|
count++;
|
2427 |
|
|
else
|
2428 |
|
|
count += 2;
|
2429 |
|
|
}
|
2430 |
|
|
|
2431 |
|
|
if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
|
2432 |
|
|
count++;
|
2433 |
|
|
return count;
|
2434 |
|
|
}
|
2435 |
|
|
|
2436 |
|
|
/* Emit a move from SRC to DEST. Assume that the move expanders can
|
2437 |
|
|
handle all moves if !can_create_pseudo_p (). The distinction is
|
2438 |
|
|
important because, unlike emit_move_insn, the move expanders know
|
2439 |
|
|
how to force Pmode objects into the constant pool even when the
|
2440 |
|
|
constant pool address is not itself legitimate. */
|
2441 |
|
|
|
2442 |
|
|
rtx
|
2443 |
|
|
mips_emit_move (rtx dest, rtx src)
|
2444 |
|
|
{
|
2445 |
|
|
return (can_create_pseudo_p ()
|
2446 |
|
|
? emit_move_insn (dest, src)
|
2447 |
|
|
: emit_move_insn_1 (dest, src));
|
2448 |
|
|
}
|
2449 |
|
|
|
2450 |
|
|
/* Emit an instruction of the form (set TARGET (CODE OP0)). */
|
2451 |
|
|
|
2452 |
|
|
static void
|
2453 |
|
|
mips_emit_unary (enum rtx_code code, rtx target, rtx op0)
|
2454 |
|
|
{
|
2455 |
|
|
emit_insn (gen_rtx_SET (VOIDmode, target,
|
2456 |
|
|
gen_rtx_fmt_e (code, GET_MODE (op0), op0)));
|
2457 |
|
|
}
|
2458 |
|
|
|
2459 |
|
|
/* Compute (CODE OP0) and store the result in a new register of mode MODE.
|
2460 |
|
|
Return that new register. */
|
2461 |
|
|
|
2462 |
|
|
static rtx
|
2463 |
|
|
mips_force_unary (enum machine_mode mode, enum rtx_code code, rtx op0)
|
2464 |
|
|
{
|
2465 |
|
|
rtx reg;
|
2466 |
|
|
|
2467 |
|
|
reg = gen_reg_rtx (mode);
|
2468 |
|
|
mips_emit_unary (code, reg, op0);
|
2469 |
|
|
return reg;
|
2470 |
|
|
}
|
2471 |
|
|
|
2472 |
|
|
/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
|
2473 |
|
|
|
2474 |
|
|
static void
|
2475 |
|
|
mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
|
2476 |
|
|
{
|
2477 |
|
|
emit_insn (gen_rtx_SET (VOIDmode, target,
|
2478 |
|
|
gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
|
2479 |
|
|
}
|
2480 |
|
|
|
2481 |
|
|
/* Compute (CODE OP0 OP1) and store the result in a new register
|
2482 |
|
|
of mode MODE. Return that new register. */
|
2483 |
|
|
|
2484 |
|
|
static rtx
|
2485 |
|
|
mips_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
|
2486 |
|
|
{
|
2487 |
|
|
rtx reg;
|
2488 |
|
|
|
2489 |
|
|
reg = gen_reg_rtx (mode);
|
2490 |
|
|
mips_emit_binary (code, reg, op0, op1);
|
2491 |
|
|
return reg;
|
2492 |
|
|
}
|
2493 |
|
|
|
2494 |
|
|
/* Copy VALUE to a register and return that register. If new pseudos
|
2495 |
|
|
are allowed, copy it into a new register, otherwise use DEST. */
|
2496 |
|
|
|
2497 |
|
|
static rtx
|
2498 |
|
|
mips_force_temporary (rtx dest, rtx value)
|
2499 |
|
|
{
|
2500 |
|
|
if (can_create_pseudo_p ())
|
2501 |
|
|
return force_reg (Pmode, value);
|
2502 |
|
|
else
|
2503 |
|
|
{
|
2504 |
|
|
mips_emit_move (dest, value);
|
2505 |
|
|
return dest;
|
2506 |
|
|
}
|
2507 |
|
|
}
|
2508 |
|
|
|
2509 |
|
|
/* Emit a call sequence with call pattern PATTERN and return the call
|
2510 |
|
|
instruction itself (which is not necessarily the last instruction
|
2511 |
|
|
emitted). ORIG_ADDR is the original, unlegitimized address,
|
2512 |
|
|
ADDR is the legitimized form, and LAZY_P is true if the call
|
2513 |
|
|
address is lazily-bound. */
|
2514 |
|
|
|
2515 |
|
|
static rtx
|
2516 |
|
|
mips_emit_call_insn (rtx pattern, rtx orig_addr, rtx addr, bool lazy_p)
|
2517 |
|
|
{
|
2518 |
|
|
rtx insn, reg;
|
2519 |
|
|
|
2520 |
|
|
insn = emit_call_insn (pattern);
|
2521 |
|
|
|
2522 |
|
|
if (TARGET_MIPS16 && mips_use_pic_fn_addr_reg_p (orig_addr))
|
2523 |
|
|
{
|
2524 |
|
|
/* MIPS16 JALRs only take MIPS16 registers. If the target
|
2525 |
|
|
function requires $25 to be valid on entry, we must copy it
|
2526 |
|
|
there separately. The move instruction can be put in the
|
2527 |
|
|
call's delay slot. */
|
2528 |
|
|
reg = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
|
2529 |
|
|
emit_insn_before (gen_move_insn (reg, addr), insn);
|
2530 |
|
|
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), reg);
|
2531 |
|
|
}
|
2532 |
|
|
|
2533 |
|
|
if (lazy_p)
|
2534 |
|
|
/* Lazy-binding stubs require $gp to be valid on entry. */
|
2535 |
|
|
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
|
2536 |
|
|
|
2537 |
|
|
if (TARGET_USE_GOT)
|
2538 |
|
|
{
|
2539 |
|
|
/* See the comment above load_call<mode> for details. */
|
2540 |
|
|
use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
|
2541 |
|
|
gen_rtx_REG (Pmode, GOT_VERSION_REGNUM));
|
2542 |
|
|
emit_insn (gen_update_got_version ());
|
2543 |
|
|
}
|
2544 |
|
|
return insn;
|
2545 |
|
|
}
|
2546 |
|
|
|
2547 |
|
|
/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
|
2548 |
|
|
then add CONST_INT OFFSET to the result. */
|
2549 |
|
|
|
2550 |
|
|
static rtx
|
2551 |
|
|
mips_unspec_address_offset (rtx base, rtx offset,
|
2552 |
|
|
enum mips_symbol_type symbol_type)
|
2553 |
|
|
{
|
2554 |
|
|
base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
|
2555 |
|
|
UNSPEC_ADDRESS_FIRST + symbol_type);
|
2556 |
|
|
if (offset != const0_rtx)
|
2557 |
|
|
base = gen_rtx_PLUS (Pmode, base, offset);
|
2558 |
|
|
return gen_rtx_CONST (Pmode, base);
|
2559 |
|
|
}
|
2560 |
|
|
|
2561 |
|
|
/* Return an UNSPEC address with underlying address ADDRESS and symbol
|
2562 |
|
|
type SYMBOL_TYPE. */
|
2563 |
|
|
|
2564 |
|
|
rtx
|
2565 |
|
|
mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
|
2566 |
|
|
{
|
2567 |
|
|
rtx base, offset;
|
2568 |
|
|
|
2569 |
|
|
split_const (address, &base, &offset);
|
2570 |
|
|
return mips_unspec_address_offset (base, offset, symbol_type);
|
2571 |
|
|
}
|
2572 |
|
|
|
2573 |
|
|
/* If OP is an UNSPEC address, return the address to which it refers,
|
2574 |
|
|
otherwise return OP itself. */
|
2575 |
|
|
|
2576 |
|
|
static rtx
|
2577 |
|
|
mips_strip_unspec_address (rtx op)
|
2578 |
|
|
{
|
2579 |
|
|
rtx base, offset;
|
2580 |
|
|
|
2581 |
|
|
split_const (op, &base, &offset);
|
2582 |
|
|
if (UNSPEC_ADDRESS_P (base))
|
2583 |
|
|
op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
|
2584 |
|
|
return op;
|
2585 |
|
|
}
|
2586 |
|
|
|
2587 |
|
|
/* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
|
2588 |
|
|
high part to BASE and return the result. Just return BASE otherwise.
|
2589 |
|
|
TEMP is as for mips_force_temporary.
|
2590 |
|
|
|
2591 |
|
|
The returned expression can be used as the first operand to a LO_SUM. */
|
2592 |
|
|
|
2593 |
|
|
static rtx
|
2594 |
|
|
mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
|
2595 |
|
|
enum mips_symbol_type symbol_type)
|
2596 |
|
|
{
|
2597 |
|
|
if (mips_split_p[symbol_type])
|
2598 |
|
|
{
|
2599 |
|
|
addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
|
2600 |
|
|
addr = mips_force_temporary (temp, addr);
|
2601 |
|
|
base = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
|
2602 |
|
|
}
|
2603 |
|
|
return base;
|
2604 |
|
|
}
|
2605 |
|
|
|
2606 |
|
|
/* Return an instruction that copies $gp into register REG. We want
|
2607 |
|
|
GCC to treat the register's value as constant, so that its value
|
2608 |
|
|
can be rematerialized on demand. */
|
2609 |
|
|
|
2610 |
|
|
static rtx
|
2611 |
|
|
gen_load_const_gp (rtx reg)
|
2612 |
|
|
{
|
2613 |
|
|
return (Pmode == SImode
|
2614 |
|
|
? gen_load_const_gp_si (reg)
|
2615 |
|
|
: gen_load_const_gp_di (reg));
|
2616 |
|
|
}
|
2617 |
|
|
|
2618 |
|
|
/* Return a pseudo register that contains the value of $gp throughout
|
2619 |
|
|
the current function. Such registers are needed by MIPS16 functions,
|
2620 |
|
|
for which $gp itself is not a valid base register or addition operand. */
|
2621 |
|
|
|
2622 |
|
|
static rtx
|
2623 |
|
|
mips16_gp_pseudo_reg (void)
|
2624 |
|
|
{
|
2625 |
|
|
if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
|
2626 |
|
|
cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
|
2627 |
|
|
|
2628 |
|
|
/* Don't emit an instruction to initialize the pseudo register if
|
2629 |
|
|
we are being called from the tree optimizers' cost-calculation
|
2630 |
|
|
routines. */
|
2631 |
|
|
if (!cfun->machine->initialized_mips16_gp_pseudo_p
|
2632 |
|
|
&& (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
|
2633 |
|
|
{
|
2634 |
|
|
rtx insn, scan;
|
2635 |
|
|
|
2636 |
|
|
push_topmost_sequence ();
|
2637 |
|
|
|
2638 |
|
|
scan = get_insns ();
|
2639 |
|
|
while (NEXT_INSN (scan) && !INSN_P (NEXT_INSN (scan)))
|
2640 |
|
|
scan = NEXT_INSN (scan);
|
2641 |
|
|
|
2642 |
|
|
insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
|
2643 |
|
|
emit_insn_after (insn, scan);
|
2644 |
|
|
|
2645 |
|
|
pop_topmost_sequence ();
|
2646 |
|
|
|
2647 |
|
|
cfun->machine->initialized_mips16_gp_pseudo_p = true;
|
2648 |
|
|
}
|
2649 |
|
|
|
2650 |
|
|
return cfun->machine->mips16_gp_pseudo_rtx;
|
2651 |
|
|
}
|
2652 |
|
|
|
2653 |
|
|
/* Return a base register that holds pic_offset_table_rtx.
|
2654 |
|
|
TEMP, if nonnull, is a scratch Pmode base register. */
|
2655 |
|
|
|
2656 |
|
|
rtx
|
2657 |
|
|
mips_pic_base_register (rtx temp)
|
2658 |
|
|
{
|
2659 |
|
|
if (!TARGET_MIPS16)
|
2660 |
|
|
return pic_offset_table_rtx;
|
2661 |
|
|
|
2662 |
|
|
if (can_create_pseudo_p ())
|
2663 |
|
|
return mips16_gp_pseudo_reg ();
|
2664 |
|
|
|
2665 |
|
|
if (TARGET_USE_GOT)
|
2666 |
|
|
/* The first post-reload split exposes all references to $gp
|
2667 |
|
|
(both uses and definitions). All references must remain
|
2668 |
|
|
explicit after that point.
|
2669 |
|
|
|
2670 |
|
|
It is safe to introduce uses of $gp at any time, so for
|
2671 |
|
|
simplicity, we do that before the split too. */
|
2672 |
|
|
mips_emit_move (temp, pic_offset_table_rtx);
|
2673 |
|
|
else
|
2674 |
|
|
emit_insn (gen_load_const_gp (temp));
|
2675 |
|
|
return temp;
|
2676 |
|
|
}
|
2677 |
|
|
|
2678 |
|
|
/* Return the RHS of a load_call<mode> insn. */
|
2679 |
|
|
|
2680 |
|
|
static rtx
|
2681 |
|
|
mips_unspec_call (rtx reg, rtx symbol)
|
2682 |
|
|
{
|
2683 |
|
|
rtvec vec;
|
2684 |
|
|
|
2685 |
|
|
vec = gen_rtvec (3, reg, symbol, gen_rtx_REG (SImode, GOT_VERSION_REGNUM));
|
2686 |
|
|
return gen_rtx_UNSPEC (Pmode, vec, UNSPEC_LOAD_CALL);
|
2687 |
|
|
}
|
2688 |
|
|
|
2689 |
|
|
/* If SRC is the RHS of a load_call<mode> insn, return the underlying symbol
|
2690 |
|
|
reference. Return NULL_RTX otherwise. */
|
2691 |
|
|
|
2692 |
|
|
static rtx
|
2693 |
|
|
mips_strip_unspec_call (rtx src)
|
2694 |
|
|
{
|
2695 |
|
|
if (GET_CODE (src) == UNSPEC && XINT (src, 1) == UNSPEC_LOAD_CALL)
|
2696 |
|
|
return mips_strip_unspec_address (XVECEXP (src, 0, 1));
|
2697 |
|
|
return NULL_RTX;
|
2698 |
|
|
}
|
2699 |
|
|
|
2700 |
|
|
/* Create and return a GOT reference of type TYPE for address ADDR.
|
2701 |
|
|
TEMP, if nonnull, is a scratch Pmode base register. */
|
2702 |
|
|
|
2703 |
|
|
rtx
|
2704 |
|
|
mips_got_load (rtx temp, rtx addr, enum mips_symbol_type type)
|
2705 |
|
|
{
|
2706 |
|
|
rtx base, high, lo_sum_symbol;
|
2707 |
|
|
|
2708 |
|
|
base = mips_pic_base_register (temp);
|
2709 |
|
|
|
2710 |
|
|
/* If we used the temporary register to load $gp, we can't use
|
2711 |
|
|
it for the high part as well. */
|
2712 |
|
|
if (temp != NULL && reg_overlap_mentioned_p (base, temp))
|
2713 |
|
|
temp = NULL;
|
2714 |
|
|
|
2715 |
|
|
high = mips_unspec_offset_high (temp, base, addr, type);
|
2716 |
|
|
lo_sum_symbol = mips_unspec_address (addr, type);
|
2717 |
|
|
|
2718 |
|
|
if (type == SYMBOL_GOTOFF_CALL)
|
2719 |
|
|
return mips_unspec_call (high, lo_sum_symbol);
|
2720 |
|
|
else
|
2721 |
|
|
return (Pmode == SImode
|
2722 |
|
|
? gen_unspec_gotsi (high, lo_sum_symbol)
|
2723 |
|
|
: gen_unspec_gotdi (high, lo_sum_symbol));
|
2724 |
|
|
}
|
2725 |
|
|
|
2726 |
|
|
/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
|
2727 |
|
|
it appears in a MEM of that mode. Return true if ADDR is a legitimate
|
2728 |
|
|
constant in that context and can be split into high and low parts.
|
2729 |
|
|
If so, and if LOW_OUT is nonnull, emit the high part and store the
|
2730 |
|
|
low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
|
2731 |
|
|
|
2732 |
|
|
TEMP is as for mips_force_temporary and is used to load the high
|
2733 |
|
|
part into a register.
|
2734 |
|
|
|
2735 |
|
|
When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
|
2736 |
|
|
a legitimize SET_SRC for an .md pattern, otherwise the low part
|
2737 |
|
|
is guaranteed to be a legitimate address for mode MODE. */
|
2738 |
|
|
|
2739 |
|
|
bool
|
2740 |
|
|
mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
|
2741 |
|
|
{
|
2742 |
|
|
enum mips_symbol_context context;
|
2743 |
|
|
enum mips_symbol_type symbol_type;
|
2744 |
|
|
rtx high;
|
2745 |
|
|
|
2746 |
|
|
context = (mode == MAX_MACHINE_MODE
|
2747 |
|
|
? SYMBOL_CONTEXT_LEA
|
2748 |
|
|
: SYMBOL_CONTEXT_MEM);
|
2749 |
|
|
if (GET_CODE (addr) == HIGH && context == SYMBOL_CONTEXT_LEA)
|
2750 |
|
|
{
|
2751 |
|
|
addr = XEXP (addr, 0);
|
2752 |
|
|
if (mips_symbolic_constant_p (addr, context, &symbol_type)
|
2753 |
|
|
&& mips_symbol_insns (symbol_type, mode) > 0
|
2754 |
|
|
&& mips_split_hi_p[symbol_type])
|
2755 |
|
|
{
|
2756 |
|
|
if (low_out)
|
2757 |
|
|
switch (symbol_type)
|
2758 |
|
|
{
|
2759 |
|
|
case SYMBOL_GOT_PAGE_OFST:
|
2760 |
|
|
/* The high part of a page/ofst pair is loaded from the GOT. */
|
2761 |
|
|
*low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_PAGE);
|
2762 |
|
|
break;
|
2763 |
|
|
|
2764 |
|
|
default:
|
2765 |
|
|
gcc_unreachable ();
|
2766 |
|
|
}
|
2767 |
|
|
return true;
|
2768 |
|
|
}
|
2769 |
|
|
}
|
2770 |
|
|
else
|
2771 |
|
|
{
|
2772 |
|
|
if (mips_symbolic_constant_p (addr, context, &symbol_type)
|
2773 |
|
|
&& mips_symbol_insns (symbol_type, mode) > 0
|
2774 |
|
|
&& mips_split_p[symbol_type])
|
2775 |
|
|
{
|
2776 |
|
|
if (low_out)
|
2777 |
|
|
switch (symbol_type)
|
2778 |
|
|
{
|
2779 |
|
|
case SYMBOL_GOT_DISP:
|
2780 |
|
|
/* SYMBOL_GOT_DISP symbols are loaded from the GOT. */
|
2781 |
|
|
*low_out = mips_got_load (temp, addr, SYMBOL_GOTOFF_DISP);
|
2782 |
|
|
break;
|
2783 |
|
|
|
2784 |
|
|
case SYMBOL_GP_RELATIVE:
|
2785 |
|
|
high = mips_pic_base_register (temp);
|
2786 |
|
|
*low_out = gen_rtx_LO_SUM (Pmode, high, addr);
|
2787 |
|
|
break;
|
2788 |
|
|
|
2789 |
|
|
default:
|
2790 |
|
|
high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
|
2791 |
|
|
high = mips_force_temporary (temp, high);
|
2792 |
|
|
*low_out = gen_rtx_LO_SUM (Pmode, high, addr);
|
2793 |
|
|
break;
|
2794 |
|
|
}
|
2795 |
|
|
return true;
|
2796 |
|
|
}
|
2797 |
|
|
}
|
2798 |
|
|
return false;
|
2799 |
|
|
}
|
2800 |
|
|
|
2801 |
|
|
/* Return a legitimate address for REG + OFFSET. TEMP is as for
|
2802 |
|
|
mips_force_temporary; it is only needed when OFFSET is not a
|
2803 |
|
|
SMALL_OPERAND. */
|
2804 |
|
|
|
2805 |
|
|
static rtx
|
2806 |
|
|
mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
|
2807 |
|
|
{
|
2808 |
|
|
if (!SMALL_OPERAND (offset))
|
2809 |
|
|
{
|
2810 |
|
|
rtx high;
|
2811 |
|
|
|
2812 |
|
|
if (TARGET_MIPS16)
|
2813 |
|
|
{
|
2814 |
|
|
/* Load the full offset into a register so that we can use
|
2815 |
|
|
an unextended instruction for the address itself. */
|
2816 |
|
|
high = GEN_INT (offset);
|
2817 |
|
|
offset = 0;
|
2818 |
|
|
}
|
2819 |
|
|
else
|
2820 |
|
|
{
|
2821 |
|
|
/* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
|
2822 |
|
|
The addition inside the macro CONST_HIGH_PART may cause an
|
2823 |
|
|
overflow, so we need to force a sign-extension check. */
|
2824 |
|
|
high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
|
2825 |
|
|
offset = CONST_LOW_PART (offset);
|
2826 |
|
|
}
|
2827 |
|
|
high = mips_force_temporary (temp, high);
|
2828 |
|
|
reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
|
2829 |
|
|
}
|
2830 |
|
|
return plus_constant (reg, offset);
|
2831 |
|
|
}
|
2832 |
|
|
|
2833 |
|
|
/* The __tls_get_attr symbol. */
|
2834 |
|
|
static GTY(()) rtx mips_tls_symbol;
|
2835 |
|
|
|
2836 |
|
|
/* Return an instruction sequence that calls __tls_get_addr. SYM is
|
2837 |
|
|
the TLS symbol we are referencing and TYPE is the symbol type to use
|
2838 |
|
|
(either global dynamic or local dynamic). V0 is an RTX for the
|
2839 |
|
|
return value location. */
|
2840 |
|
|
|
2841 |
|
|
static rtx
|
2842 |
|
|
mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
|
2843 |
|
|
{
|
2844 |
|
|
rtx insn, loc, a0;
|
2845 |
|
|
|
2846 |
|
|
a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
|
2847 |
|
|
|
2848 |
|
|
if (!mips_tls_symbol)
|
2849 |
|
|
mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
|
2850 |
|
|
|
2851 |
|
|
loc = mips_unspec_address (sym, type);
|
2852 |
|
|
|
2853 |
|
|
start_sequence ();
|
2854 |
|
|
|
2855 |
|
|
emit_insn (gen_rtx_SET (Pmode, a0,
|
2856 |
|
|
gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
|
2857 |
|
|
insn = mips_expand_call (MIPS_CALL_NORMAL, v0, mips_tls_symbol,
|
2858 |
|
|
const0_rtx, NULL_RTX, false);
|
2859 |
|
|
RTL_CONST_CALL_P (insn) = 1;
|
2860 |
|
|
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
|
2861 |
|
|
insn = get_insns ();
|
2862 |
|
|
|
2863 |
|
|
end_sequence ();
|
2864 |
|
|
|
2865 |
|
|
return insn;
|
2866 |
|
|
}
|
2867 |
|
|
|
2868 |
|
|
/* Return a pseudo register that contains the current thread pointer. */
|
2869 |
|
|
|
2870 |
|
|
static rtx
|
2871 |
|
|
mips_get_tp (void)
|
2872 |
|
|
{
|
2873 |
|
|
rtx tp;
|
2874 |
|
|
|
2875 |
|
|
tp = gen_reg_rtx (Pmode);
|
2876 |
|
|
if (Pmode == DImode)
|
2877 |
|
|
emit_insn (gen_tls_get_tp_di (tp));
|
2878 |
|
|
else
|
2879 |
|
|
emit_insn (gen_tls_get_tp_si (tp));
|
2880 |
|
|
return tp;
|
2881 |
|
|
}
|
2882 |
|
|
|
2883 |
|
|
/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
|
2884 |
|
|
its address. The return value will be both a valid address and a valid
|
2885 |
|
|
SET_SRC (either a REG or a LO_SUM). */
|
2886 |
|
|
|
2887 |
|
|
static rtx
|
2888 |
|
|
mips_legitimize_tls_address (rtx loc)
|
2889 |
|
|
{
|
2890 |
|
|
rtx dest, insn, v0, tp, tmp1, tmp2, eqv;
|
2891 |
|
|
enum tls_model model;
|
2892 |
|
|
|
2893 |
|
|
if (TARGET_MIPS16)
|
2894 |
|
|
{
|
2895 |
|
|
sorry ("MIPS16 TLS");
|
2896 |
|
|
return gen_reg_rtx (Pmode);
|
2897 |
|
|
}
|
2898 |
|
|
|
2899 |
|
|
model = SYMBOL_REF_TLS_MODEL (loc);
|
2900 |
|
|
/* Only TARGET_ABICALLS code can have more than one module; other
|
2901 |
|
|
code must be be static and should not use a GOT. All TLS models
|
2902 |
|
|
reduce to local exec in this situation. */
|
2903 |
|
|
if (!TARGET_ABICALLS)
|
2904 |
|
|
model = TLS_MODEL_LOCAL_EXEC;
|
2905 |
|
|
|
2906 |
|
|
switch (model)
|
2907 |
|
|
{
|
2908 |
|
|
case TLS_MODEL_GLOBAL_DYNAMIC:
|
2909 |
|
|
v0 = gen_rtx_REG (Pmode, GP_RETURN);
|
2910 |
|
|
insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
|
2911 |
|
|
dest = gen_reg_rtx (Pmode);
|
2912 |
|
|
emit_libcall_block (insn, dest, v0, loc);
|
2913 |
|
|
break;
|
2914 |
|
|
|
2915 |
|
|
case TLS_MODEL_LOCAL_DYNAMIC:
|
2916 |
|
|
v0 = gen_rtx_REG (Pmode, GP_RETURN);
|
2917 |
|
|
insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
|
2918 |
|
|
tmp1 = gen_reg_rtx (Pmode);
|
2919 |
|
|
|
2920 |
|
|
/* Attach a unique REG_EQUIV, to allow the RTL optimizers to
|
2921 |
|
|
share the LDM result with other LD model accesses. */
|
2922 |
|
|
eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
|
2923 |
|
|
UNSPEC_TLS_LDM);
|
2924 |
|
|
emit_libcall_block (insn, tmp1, v0, eqv);
|
2925 |
|
|
|
2926 |
|
|
tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
|
2927 |
|
|
dest = gen_rtx_LO_SUM (Pmode, tmp2,
|
2928 |
|
|
mips_unspec_address (loc, SYMBOL_DTPREL));
|
2929 |
|
|
break;
|
2930 |
|
|
|
2931 |
|
|
case TLS_MODEL_INITIAL_EXEC:
|
2932 |
|
|
tp = mips_get_tp ();
|
2933 |
|
|
tmp1 = gen_reg_rtx (Pmode);
|
2934 |
|
|
tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
|
2935 |
|
|
if (Pmode == DImode)
|
2936 |
|
|
emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
|
2937 |
|
|
else
|
2938 |
|
|
emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
|
2939 |
|
|
dest = gen_reg_rtx (Pmode);
|
2940 |
|
|
emit_insn (gen_add3_insn (dest, tmp1, tp));
|
2941 |
|
|
break;
|
2942 |
|
|
|
2943 |
|
|
case TLS_MODEL_LOCAL_EXEC:
|
2944 |
|
|
tp = mips_get_tp ();
|
2945 |
|
|
tmp1 = mips_unspec_offset_high (NULL, tp, loc, SYMBOL_TPREL);
|
2946 |
|
|
dest = gen_rtx_LO_SUM (Pmode, tmp1,
|
2947 |
|
|
mips_unspec_address (loc, SYMBOL_TPREL));
|
2948 |
|
|
break;
|
2949 |
|
|
|
2950 |
|
|
default:
|
2951 |
|
|
gcc_unreachable ();
|
2952 |
|
|
}
|
2953 |
|
|
return dest;
|
2954 |
|
|
}
|
2955 |
|
|
|
2956 |
|
|
/* If X is not a valid address for mode MODE, force it into a register. */
|
2957 |
|
|
|
2958 |
|
|
static rtx
|
2959 |
|
|
mips_force_address (rtx x, enum machine_mode mode)
|
2960 |
|
|
{
|
2961 |
|
|
if (!mips_legitimate_address_p (mode, x, false))
|
2962 |
|
|
x = force_reg (Pmode, x);
|
2963 |
|
|
return x;
|
2964 |
|
|
}
|
2965 |
|
|
|
2966 |
|
|
/* This function is used to implement LEGITIMIZE_ADDRESS. If X can
|
2967 |
|
|
be legitimized in a way that the generic machinery might not expect,
|
2968 |
|
|
return a new address, otherwise return NULL. MODE is the mode of
|
2969 |
|
|
the memory being accessed. */
|
2970 |
|
|
|
2971 |
|
|
static rtx
|
2972 |
|
|
mips_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
|
2973 |
|
|
enum machine_mode mode)
|
2974 |
|
|
{
|
2975 |
|
|
rtx base, addr;
|
2976 |
|
|
HOST_WIDE_INT offset;
|
2977 |
|
|
|
2978 |
|
|
if (mips_tls_symbol_p (x))
|
2979 |
|
|
return mips_legitimize_tls_address (x);
|
2980 |
|
|
|
2981 |
|
|
/* See if the address can split into a high part and a LO_SUM. */
|
2982 |
|
|
if (mips_split_symbol (NULL, x, mode, &addr))
|
2983 |
|
|
return mips_force_address (addr, mode);
|
2984 |
|
|
|
2985 |
|
|
/* Handle BASE + OFFSET using mips_add_offset. */
|
2986 |
|
|
mips_split_plus (x, &base, &offset);
|
2987 |
|
|
if (offset != 0)
|
2988 |
|
|
{
|
2989 |
|
|
if (!mips_valid_base_register_p (base, mode, false))
|
2990 |
|
|
base = copy_to_mode_reg (Pmode, base);
|
2991 |
|
|
addr = mips_add_offset (NULL, base, offset);
|
2992 |
|
|
return mips_force_address (addr, mode);
|
2993 |
|
|
}
|
2994 |
|
|
|
2995 |
|
|
return x;
|
2996 |
|
|
}
|
2997 |
|
|
|
2998 |
|
|
/* Load VALUE into DEST. TEMP is as for mips_force_temporary. */
|
2999 |
|
|
|
3000 |
|
|
void
|
3001 |
|
|
mips_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
|
3002 |
|
|
{
|
3003 |
|
|
struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
|
3004 |
|
|
enum machine_mode mode;
|
3005 |
|
|
unsigned int i, num_ops;
|
3006 |
|
|
rtx x;
|
3007 |
|
|
|
3008 |
|
|
mode = GET_MODE (dest);
|
3009 |
|
|
num_ops = mips_build_integer (codes, value);
|
3010 |
|
|
|
3011 |
|
|
/* Apply each binary operation to X. Invariant: X is a legitimate
|
3012 |
|
|
source operand for a SET pattern. */
|
3013 |
|
|
x = GEN_INT (codes[0].value);
|
3014 |
|
|
for (i = 1; i < num_ops; i++)
|
3015 |
|
|
{
|
3016 |
|
|
if (!can_create_pseudo_p ())
|
3017 |
|
|
{
|
3018 |
|
|
emit_insn (gen_rtx_SET (VOIDmode, temp, x));
|
3019 |
|
|
x = temp;
|
3020 |
|
|
}
|
3021 |
|
|
else
|
3022 |
|
|
x = force_reg (mode, x);
|
3023 |
|
|
x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
|
3024 |
|
|
}
|
3025 |
|
|
|
3026 |
|
|
emit_insn (gen_rtx_SET (VOIDmode, dest, x));
|
3027 |
|
|
}
|
3028 |
|
|
|
3029 |
|
|
/* Subroutine of mips_legitimize_move. Move constant SRC into register
|
3030 |
|
|
DEST given that SRC satisfies immediate_operand but doesn't satisfy
|
3031 |
|
|
move_operand. */
|
3032 |
|
|
|
3033 |
|
|
static void
|
3034 |
|
|
mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
|
3035 |
|
|
{
|
3036 |
|
|
rtx base, offset;
|
3037 |
|
|
|
3038 |
|
|
/* Split moves of big integers into smaller pieces. */
|
3039 |
|
|
if (splittable_const_int_operand (src, mode))
|
3040 |
|
|
{
|
3041 |
|
|
mips_move_integer (dest, dest, INTVAL (src));
|
3042 |
|
|
return;
|
3043 |
|
|
}
|
3044 |
|
|
|
3045 |
|
|
/* Split moves of symbolic constants into high/low pairs. */
|
3046 |
|
|
if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
|
3047 |
|
|
{
|
3048 |
|
|
emit_insn (gen_rtx_SET (VOIDmode, dest, src));
|
3049 |
|
|
return;
|
3050 |
|
|
}
|
3051 |
|
|
|
3052 |
|
|
/* Generate the appropriate access sequences for TLS symbols. */
|
3053 |
|
|
if (mips_tls_symbol_p (src))
|
3054 |
|
|
{
|
3055 |
|
|
mips_emit_move (dest, mips_legitimize_tls_address (src));
|
3056 |
|
|
return;
|
3057 |
|
|
}
|
3058 |
|
|
|
3059 |
|
|
/* If we have (const (plus symbol offset)), and that expression cannot
|
3060 |
|
|
be forced into memory, load the symbol first and add in the offset.
|
3061 |
|
|
In non-MIPS16 mode, prefer to do this even if the constant _can_ be
|
3062 |
|
|
forced into memory, as it usually produces better code. */
|
3063 |
|
|
split_const (src, &base, &offset);
|
3064 |
|
|
if (offset != const0_rtx
|
3065 |
|
|
&& (targetm.cannot_force_const_mem (src)
|
3066 |
|
|
|| (!TARGET_MIPS16 && can_create_pseudo_p ())))
|
3067 |
|
|
{
|
3068 |
|
|
base = mips_force_temporary (dest, base);
|
3069 |
|
|
mips_emit_move (dest, mips_add_offset (NULL, base, INTVAL (offset)));
|
3070 |
|
|
return;
|
3071 |
|
|
}
|
3072 |
|
|
|
3073 |
|
|
src = force_const_mem (mode, src);
|
3074 |
|
|
|
3075 |
|
|
/* When using explicit relocs, constant pool references are sometimes
|
3076 |
|
|
not legitimate addresses. */
|
3077 |
|
|
mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
|
3078 |
|
|
mips_emit_move (dest, src);
|
3079 |
|
|
}
|
3080 |
|
|
|
3081 |
|
|
/* If (set DEST SRC) is not a valid move instruction, emit an equivalent
|
3082 |
|
|
sequence that is valid. */
|
3083 |
|
|
|
3084 |
|
|
bool
|
3085 |
|
|
mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
|
3086 |
|
|
{
|
3087 |
|
|
if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
|
3088 |
|
|
{
|
3089 |
|
|
mips_emit_move (dest, force_reg (mode, src));
|
3090 |
|
|
return true;
|
3091 |
|
|
}
|
3092 |
|
|
|
3093 |
|
|
/* We need to deal with constants that would be legitimate
|
3094 |
|
|
immediate_operands but aren't legitimate move_operands. */
|
3095 |
|
|
if (CONSTANT_P (src) && !move_operand (src, mode))
|
3096 |
|
|
{
|
3097 |
|
|
mips_legitimize_const_move (mode, dest, src);
|
3098 |
|
|
set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
|
3099 |
|
|
return true;
|
3100 |
|
|
}
|
3101 |
|
|
return false;
|
3102 |
|
|
}
|
3103 |
|
|
|
3104 |
|
|
/* Return true if value X in context CONTEXT is a small-data address
|
3105 |
|
|
that can be rewritten as a LO_SUM. */
|
3106 |
|
|
|
3107 |
|
|
static bool
|
3108 |
|
|
mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
|
3109 |
|
|
{
|
3110 |
|
|
enum mips_symbol_type symbol_type;
|
3111 |
|
|
|
3112 |
|
|
return (mips_lo_relocs[SYMBOL_GP_RELATIVE]
|
3113 |
|
|
&& !mips_split_p[SYMBOL_GP_RELATIVE]
|
3114 |
|
|
&& mips_symbolic_constant_p (x, context, &symbol_type)
|
3115 |
|
|
&& symbol_type == SYMBOL_GP_RELATIVE);
|
3116 |
|
|
}
|
3117 |
|
|
|
3118 |
|
|
/* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
|
3119 |
|
|
containing MEM, or null if none. */
|
3120 |
|
|
|
3121 |
|
|
static int
|
3122 |
|
|
mips_small_data_pattern_1 (rtx *loc, void *data)
|
3123 |
|
|
{
|
3124 |
|
|
enum mips_symbol_context context;
|
3125 |
|
|
|
3126 |
|
|
if (GET_CODE (*loc) == LO_SUM)
|
3127 |
|
|
return -1;
|
3128 |
|
|
|
3129 |
|
|
if (MEM_P (*loc))
|
3130 |
|
|
{
|
3131 |
|
|
if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
|
3132 |
|
|
return 1;
|
3133 |
|
|
return -1;
|
3134 |
|
|
}
|
3135 |
|
|
|
3136 |
|
|
context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
|
3137 |
|
|
return mips_rewrite_small_data_p (*loc, context);
|
3138 |
|
|
}
|
3139 |
|
|
|
3140 |
|
|
/* Return true if OP refers to small data symbols directly, not through
|
3141 |
|
|
a LO_SUM. */
|
3142 |
|
|
|
3143 |
|
|
bool
|
3144 |
|
|
mips_small_data_pattern_p (rtx op)
|
3145 |
|
|
{
|
3146 |
|
|
return for_each_rtx (&op, mips_small_data_pattern_1, NULL);
|
3147 |
|
|
}
|
3148 |
|
|
|
3149 |
|
|
/* A for_each_rtx callback, used by mips_rewrite_small_data.
|
3150 |
|
|
DATA is the containing MEM, or null if none. */
|
3151 |
|
|
|
3152 |
|
|
static int
|
3153 |
|
|
mips_rewrite_small_data_1 (rtx *loc, void *data)
|
3154 |
|
|
{
|
3155 |
|
|
enum mips_symbol_context context;
|
3156 |
|
|
|
3157 |
|
|
if (MEM_P (*loc))
|
3158 |
|
|
{
|
3159 |
|
|
for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
|
3160 |
|
|
return -1;
|
3161 |
|
|
}
|
3162 |
|
|
|
3163 |
|
|
context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
|
3164 |
|
|
if (mips_rewrite_small_data_p (*loc, context))
|
3165 |
|
|
*loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
|
3166 |
|
|
|
3167 |
|
|
if (GET_CODE (*loc) == LO_SUM)
|
3168 |
|
|
return -1;
|
3169 |
|
|
|
3170 |
|
|
return 0;
|
3171 |
|
|
}
|
3172 |
|
|
|
3173 |
|
|
/* Rewrite instruction pattern PATTERN so that it refers to small data
|
3174 |
|
|
using explicit relocations. */
|
3175 |
|
|
|
3176 |
|
|
rtx
|
3177 |
|
|
mips_rewrite_small_data (rtx pattern)
|
3178 |
|
|
{
|
3179 |
|
|
pattern = copy_insn (pattern);
|
3180 |
|
|
for_each_rtx (&pattern, mips_rewrite_small_data_1, NULL);
|
3181 |
|
|
return pattern;
|
3182 |
|
|
}
|
3183 |
|
|
|
3184 |
|
|
/* We need a lot of little routines to check the range of MIPS16 immediate
|
3185 |
|
|
operands. */
|
3186 |
|
|
|
3187 |
|
|
static int
|
3188 |
|
|
m16_check_op (rtx op, int low, int high, int mask)
|
3189 |
|
|
{
|
3190 |
|
|
return (CONST_INT_P (op)
|
3191 |
|
|
&& IN_RANGE (INTVAL (op), low, high)
|
3192 |
|
|
&& (INTVAL (op) & mask) == 0);
|
3193 |
|
|
}
|
3194 |
|
|
|
3195 |
|
|
int
|
3196 |
|
|
m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3197 |
|
|
{
|
3198 |
|
|
return m16_check_op (op, 0x1, 0x8, 0);
|
3199 |
|
|
}
|
3200 |
|
|
|
3201 |
|
|
int
|
3202 |
|
|
m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3203 |
|
|
{
|
3204 |
|
|
return m16_check_op (op, -0x8, 0x7, 0);
|
3205 |
|
|
}
|
3206 |
|
|
|
3207 |
|
|
int
|
3208 |
|
|
m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3209 |
|
|
{
|
3210 |
|
|
return m16_check_op (op, -0x7, 0x8, 0);
|
3211 |
|
|
}
|
3212 |
|
|
|
3213 |
|
|
int
|
3214 |
|
|
m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3215 |
|
|
{
|
3216 |
|
|
return m16_check_op (op, -0x10, 0xf, 0);
|
3217 |
|
|
}
|
3218 |
|
|
|
3219 |
|
|
int
|
3220 |
|
|
m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3221 |
|
|
{
|
3222 |
|
|
return m16_check_op (op, -0xf, 0x10, 0);
|
3223 |
|
|
}
|
3224 |
|
|
|
3225 |
|
|
int
|
3226 |
|
|
m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3227 |
|
|
{
|
3228 |
|
|
return m16_check_op (op, -0x10 << 2, 0xf << 2, 3);
|
3229 |
|
|
}
|
3230 |
|
|
|
3231 |
|
|
int
|
3232 |
|
|
m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3233 |
|
|
{
|
3234 |
|
|
return m16_check_op (op, -0xf << 2, 0x10 << 2, 3);
|
3235 |
|
|
}
|
3236 |
|
|
|
3237 |
|
|
int
|
3238 |
|
|
m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3239 |
|
|
{
|
3240 |
|
|
return m16_check_op (op, -0x80, 0x7f, 0);
|
3241 |
|
|
}
|
3242 |
|
|
|
3243 |
|
|
int
|
3244 |
|
|
m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3245 |
|
|
{
|
3246 |
|
|
return m16_check_op (op, -0x7f, 0x80, 0);
|
3247 |
|
|
}
|
3248 |
|
|
|
3249 |
|
|
int
|
3250 |
|
|
m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3251 |
|
|
{
|
3252 |
|
|
return m16_check_op (op, 0x0, 0xff, 0);
|
3253 |
|
|
}
|
3254 |
|
|
|
3255 |
|
|
int
|
3256 |
|
|
m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3257 |
|
|
{
|
3258 |
|
|
return m16_check_op (op, -0xff, 0x0, 0);
|
3259 |
|
|
}
|
3260 |
|
|
|
3261 |
|
|
int
|
3262 |
|
|
m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3263 |
|
|
{
|
3264 |
|
|
return m16_check_op (op, -0x1, 0xfe, 0);
|
3265 |
|
|
}
|
3266 |
|
|
|
3267 |
|
|
int
|
3268 |
|
|
m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3269 |
|
|
{
|
3270 |
|
|
return m16_check_op (op, 0x0, 0xff << 2, 3);
|
3271 |
|
|
}
|
3272 |
|
|
|
3273 |
|
|
int
|
3274 |
|
|
m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3275 |
|
|
{
|
3276 |
|
|
return m16_check_op (op, -0xff << 2, 0x0, 3);
|
3277 |
|
|
}
|
3278 |
|
|
|
3279 |
|
|
int
|
3280 |
|
|
m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3281 |
|
|
{
|
3282 |
|
|
return m16_check_op (op, -0x80 << 3, 0x7f << 3, 7);
|
3283 |
|
|
}
|
3284 |
|
|
|
3285 |
|
|
int
|
3286 |
|
|
m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
|
3287 |
|
|
{
|
3288 |
|
|
return m16_check_op (op, -0x7f << 3, 0x80 << 3, 7);
|
3289 |
|
|
}
|
3290 |
|
|
|
3291 |
|
|
/* The cost of loading values from the constant pool. It should be
|
3292 |
|
|
larger than the cost of any constant we want to synthesize inline. */
|
3293 |
|
|
#define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
|
3294 |
|
|
|
3295 |
|
|
/* Return the cost of X when used as an operand to the MIPS16 instruction
|
3296 |
|
|
that implements CODE. Return -1 if there is no such instruction, or if
|
3297 |
|
|
X is not a valid immediate operand for it. */
|
3298 |
|
|
|
3299 |
|
|
static int
|
3300 |
|
|
mips16_constant_cost (int code, HOST_WIDE_INT x)
|
3301 |
|
|
{
|
3302 |
|
|
switch (code)
|
3303 |
|
|
{
|
3304 |
|
|
case ASHIFT:
|
3305 |
|
|
case ASHIFTRT:
|
3306 |
|
|
case LSHIFTRT:
|
3307 |
|
|
/* Shifts by between 1 and 8 bits (inclusive) are unextended,
|
3308 |
|
|
other shifts are extended. The shift patterns truncate the shift
|
3309 |
|
|
count to the right size, so there are no out-of-range values. */
|
3310 |
|
|
if (IN_RANGE (x, 1, 8))
|
3311 |
|
|
return 0;
|
3312 |
|
|
return COSTS_N_INSNS (1);
|
3313 |
|
|
|
3314 |
|
|
case PLUS:
|
3315 |
|
|
if (IN_RANGE (x, -128, 127))
|
3316 |
|
|
return 0;
|
3317 |
|
|
if (SMALL_OPERAND (x))
|
3318 |
|
|
return COSTS_N_INSNS (1);
|
3319 |
|
|
return -1;
|
3320 |
|
|
|
3321 |
|
|
case LEU:
|
3322 |
|
|
/* Like LE, but reject the always-true case. */
|
3323 |
|
|
if (x == -1)
|
3324 |
|
|
return -1;
|
3325 |
|
|
case LE:
|
3326 |
|
|
/* We add 1 to the immediate and use SLT. */
|
3327 |
|
|
x += 1;
|
3328 |
|
|
case XOR:
|
3329 |
|
|
/* We can use CMPI for an xor with an unsigned 16-bit X. */
|
3330 |
|
|
case LT:
|
3331 |
|
|
case LTU:
|
3332 |
|
|
if (IN_RANGE (x, 0, 255))
|
3333 |
|
|
return 0;
|
3334 |
|
|
if (SMALL_OPERAND_UNSIGNED (x))
|
3335 |
|
|
return COSTS_N_INSNS (1);
|
3336 |
|
|
return -1;
|
3337 |
|
|
|
3338 |
|
|
case EQ:
|
3339 |
|
|
case NE:
|
3340 |
|
|
/* Equality comparisons with 0 are cheap. */
|
3341 |
|
|
if (x == 0)
|
3342 |
|
|
return 0;
|
3343 |
|
|
return -1;
|
3344 |
|
|
|
3345 |
|
|
default:
|
3346 |
|
|
return -1;
|
3347 |
|
|
}
|
3348 |
|
|
}
|
3349 |
|
|
|
3350 |
|
|
/* Return true if there is a non-MIPS16 instruction that implements CODE
|
3351 |
|
|
and if that instruction accepts X as an immediate operand. */
|
3352 |
|
|
|
3353 |
|
|
static int
|
3354 |
|
|
mips_immediate_operand_p (int code, HOST_WIDE_INT x)
|
3355 |
|
|
{
|
3356 |
|
|
switch (code)
|
3357 |
|
|
{
|
3358 |
|
|
case ASHIFT:
|
3359 |
|
|
case ASHIFTRT:
|
3360 |
|
|
case LSHIFTRT:
|
3361 |
|
|
/* All shift counts are truncated to a valid constant. */
|
3362 |
|
|
return true;
|
3363 |
|
|
|
3364 |
|
|
case ROTATE:
|
3365 |
|
|
case ROTATERT:
|
3366 |
|
|
/* Likewise rotates, if the target supports rotates at all. */
|
3367 |
|
|
return ISA_HAS_ROR;
|
3368 |
|
|
|
3369 |
|
|
case AND:
|
3370 |
|
|
case IOR:
|
3371 |
|
|
case XOR:
|
3372 |
|
|
/* These instructions take 16-bit unsigned immediates. */
|
3373 |
|
|
return SMALL_OPERAND_UNSIGNED (x);
|
3374 |
|
|
|
3375 |
|
|
case PLUS:
|
3376 |
|
|
case LT:
|
3377 |
|
|
case LTU:
|
3378 |
|
|
/* These instructions take 16-bit signed immediates. */
|
3379 |
|
|
return SMALL_OPERAND (x);
|
3380 |
|
|
|
3381 |
|
|
case EQ:
|
3382 |
|
|
case NE:
|
3383 |
|
|
case GT:
|
3384 |
|
|
case GTU:
|
3385 |
|
|
/* The "immediate" forms of these instructions are really
|
3386 |
|
|
implemented as comparisons with register 0. */
|
3387 |
|
|
return x == 0;
|
3388 |
|
|
|
3389 |
|
|
case GE:
|
3390 |
|
|
case GEU:
|
3391 |
|
|
/* Likewise, meaning that the only valid immediate operand is 1. */
|
3392 |
|
|
return x == 1;
|
3393 |
|
|
|
3394 |
|
|
case LE:
|
3395 |
|
|
/* We add 1 to the immediate and use SLT. */
|
3396 |
|
|
return SMALL_OPERAND (x + 1);
|
3397 |
|
|
|
3398 |
|
|
case LEU:
|
3399 |
|
|
/* Likewise SLTU, but reject the always-true case. */
|
3400 |
|
|
return SMALL_OPERAND (x + 1) && x + 1 != 0;
|
3401 |
|
|
|
3402 |
|
|
case SIGN_EXTRACT:
|
3403 |
|
|
case ZERO_EXTRACT:
|
3404 |
|
|
/* The bit position and size are immediate operands. */
|
3405 |
|
|
return ISA_HAS_EXT_INS;
|
3406 |
|
|
|
3407 |
|
|
default:
|
3408 |
|
|
/* By default assume that $0 can be used for 0. */
|
3409 |
|
|
return x == 0;
|
3410 |
|
|
}
|
3411 |
|
|
}
|
3412 |
|
|
|
3413 |
|
|
/* Return the cost of binary operation X, given that the instruction
|
3414 |
|
|
sequence for a word-sized or smaller operation has cost SINGLE_COST
|
3415 |
|
|
and that the sequence of a double-word operation has cost DOUBLE_COST.
|
3416 |
|
|
If SPEED is true, optimize for speed otherwise optimize for size. */
|
3417 |
|
|
|
3418 |
|
|
static int
|
3419 |
|
|
mips_binary_cost (rtx x, int single_cost, int double_cost, bool speed)
|
3420 |
|
|
{
|
3421 |
|
|
int cost;
|
3422 |
|
|
|
3423 |
|
|
if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
|
3424 |
|
|
cost = double_cost;
|
3425 |
|
|
else
|
3426 |
|
|
cost = single_cost;
|
3427 |
|
|
return (cost
|
3428 |
|
|
+ rtx_cost (XEXP (x, 0), SET, speed)
|
3429 |
|
|
+ rtx_cost (XEXP (x, 1), GET_CODE (x), speed));
|
3430 |
|
|
}
|
3431 |
|
|
|
3432 |
|
|
/* Return the cost of floating-point multiplications of mode MODE. */
|
3433 |
|
|
|
3434 |
|
|
static int
|
3435 |
|
|
mips_fp_mult_cost (enum machine_mode mode)
|
3436 |
|
|
{
|
3437 |
|
|
return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
|
3438 |
|
|
}
|
3439 |
|
|
|
3440 |
|
|
/* Return the cost of floating-point divisions of mode MODE. */
|
3441 |
|
|
|
3442 |
|
|
static int
|
3443 |
|
|
mips_fp_div_cost (enum machine_mode mode)
|
3444 |
|
|
{
|
3445 |
|
|
return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
|
3446 |
|
|
}
|
3447 |
|
|
|
3448 |
|
|
/* Return the cost of sign-extending OP to mode MODE, not including the
|
3449 |
|
|
cost of OP itself. */
|
3450 |
|
|
|
3451 |
|
|
static int
|
3452 |
|
|
mips_sign_extend_cost (enum machine_mode mode, rtx op)
|
3453 |
|
|
{
|
3454 |
|
|
if (MEM_P (op))
|
3455 |
|
|
/* Extended loads are as cheap as unextended ones. */
|
3456 |
|
|
return 0;
|
3457 |
|
|
|
3458 |
|
|
if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
|
3459 |
|
|
/* A sign extension from SImode to DImode in 64-bit mode is free. */
|
3460 |
|
|
return 0;
|
3461 |
|
|
|
3462 |
|
|
if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
|
3463 |
|
|
/* We can use SEB or SEH. */
|
3464 |
|
|
return COSTS_N_INSNS (1);
|
3465 |
|
|
|
3466 |
|
|
/* We need to use a shift left and a shift right. */
|
3467 |
|
|
return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
|
3468 |
|
|
}
|
3469 |
|
|
|
3470 |
|
|
/* Return the cost of zero-extending OP to mode MODE, not including the
|
3471 |
|
|
cost of OP itself. */
|
3472 |
|
|
|
3473 |
|
|
static int
|
3474 |
|
|
mips_zero_extend_cost (enum machine_mode mode, rtx op)
|
3475 |
|
|
{
|
3476 |
|
|
if (MEM_P (op))
|
3477 |
|
|
/* Extended loads are as cheap as unextended ones. */
|
3478 |
|
|
return 0;
|
3479 |
|
|
|
3480 |
|
|
if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
|
3481 |
|
|
/* We need a shift left by 32 bits and a shift right by 32 bits. */
|
3482 |
|
|
return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
|
3483 |
|
|
|
3484 |
|
|
if (GENERATE_MIPS16E)
|
3485 |
|
|
/* We can use ZEB or ZEH. */
|
3486 |
|
|
return COSTS_N_INSNS (1);
|
3487 |
|
|
|
3488 |
|
|
if (TARGET_MIPS16)
|
3489 |
|
|
/* We need to load 0xff or 0xffff into a register and use AND. */
|
3490 |
|
|
return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
|
3491 |
|
|
|
3492 |
|
|
/* We can use ANDI. */
|
3493 |
|
|
return COSTS_N_INSNS (1);
|
3494 |
|
|
}
|
3495 |
|
|
|
3496 |
|
|
/* Implement TARGET_RTX_COSTS. */
|
3497 |
|
|
|
3498 |
|
|
static bool
|
3499 |
|
|
mips_rtx_costs (rtx x, int code, int outer_code, int *total, bool speed)
|
3500 |
|
|
{
|
3501 |
|
|
enum machine_mode mode = GET_MODE (x);
|
3502 |
|
|
bool float_mode_p = FLOAT_MODE_P (mode);
|
3503 |
|
|
int cost;
|
3504 |
|
|
rtx addr;
|
3505 |
|
|
|
3506 |
|
|
/* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
|
3507 |
|
|
appear in the instruction stream, and the cost of a comparison is
|
3508 |
|
|
really the cost of the branch or scc condition. At the time of
|
3509 |
|
|
writing, GCC only uses an explicit outer COMPARE code when optabs
|
3510 |
|
|
is testing whether a constant is expensive enough to force into a
|
3511 |
|
|
register. We want optabs to pass such constants through the MIPS
|
3512 |
|
|
expanders instead, so make all constants very cheap here. */
|
3513 |
|
|
if (outer_code == COMPARE)
|
3514 |
|
|
{
|
3515 |
|
|
gcc_assert (CONSTANT_P (x));
|
3516 |
|
|
*total = 0;
|
3517 |
|
|
return true;
|
3518 |
|
|
}
|
3519 |
|
|
|
3520 |
|
|
switch (code)
|
3521 |
|
|
{
|
3522 |
|
|
case CONST_INT:
|
3523 |
|
|
/* Treat *clear_upper32-style ANDs as having zero cost in the
|
3524 |
|
|
second operand. The cost is entirely in the first operand.
|
3525 |
|
|
|
3526 |
|
|
??? This is needed because we would otherwise try to CSE
|
3527 |
|
|
the constant operand. Although that's the right thing for
|
3528 |
|
|
instructions that continue to be a register operation throughout
|
3529 |
|
|
compilation, it is disastrous for instructions that could
|
3530 |
|
|
later be converted into a memory operation. */
|
3531 |
|
|
if (TARGET_64BIT
|
3532 |
|
|
&& outer_code == AND
|
3533 |
|
|
&& UINTVAL (x) == 0xffffffff)
|
3534 |
|
|
{
|
3535 |
|
|
*total = 0;
|
3536 |
|
|
return true;
|
3537 |
|
|
}
|
3538 |
|
|
|
3539 |
|
|
if (TARGET_MIPS16)
|
3540 |
|
|
{
|
3541 |
|
|
cost = mips16_constant_cost (outer_code, INTVAL (x));
|
3542 |
|
|
if (cost >= 0)
|
3543 |
|
|
{
|
3544 |
|
|
*total = cost;
|
3545 |
|
|
return true;
|
3546 |
|
|
}
|
3547 |
|
|
}
|
3548 |
|
|
else
|
3549 |
|
|
{
|
3550 |
|
|
/* When not optimizing for size, we care more about the cost
|
3551 |
|
|
of hot code, and hot code is often in a loop. If a constant
|
3552 |
|
|
operand needs to be forced into a register, we will often be
|
3553 |
|
|
able to hoist the constant load out of the loop, so the load
|
3554 |
|
|
should not contribute to the cost. */
|
3555 |
|
|
if (speed || mips_immediate_operand_p (outer_code, INTVAL (x)))
|
3556 |
|
|
{
|
3557 |
|
|
*total = 0;
|
3558 |
|
|
return true;
|
3559 |
|
|
}
|
3560 |
|
|
}
|
3561 |
|
|
/* Fall through. */
|
3562 |
|
|
|
3563 |
|
|
case CONST:
|
3564 |
|
|
case SYMBOL_REF:
|
3565 |
|
|
case LABEL_REF:
|
3566 |
|
|
case CONST_DOUBLE:
|
3567 |
|
|
if (force_to_mem_operand (x, VOIDmode))
|
3568 |
|
|
{
|
3569 |
|
|
*total = COSTS_N_INSNS (1);
|
3570 |
|
|
return true;
|
3571 |
|
|
}
|
3572 |
|
|
cost = mips_const_insns (x);
|
3573 |
|
|
if (cost > 0)
|
3574 |
|
|
{
|
3575 |
|
|
/* If the constant is likely to be stored in a GPR, SETs of
|
3576 |
|
|
single-insn constants are as cheap as register sets; we
|
3577 |
|
|
never want to CSE them.
|
3578 |
|
|
|
3579 |
|
|
Don't reduce the cost of storing a floating-point zero in
|
3580 |
|
|
FPRs. If we have a zero in an FPR for other reasons, we
|
3581 |
|
|
can get better cfg-cleanup and delayed-branch results by
|
3582 |
|
|
using it consistently, rather than using $0 sometimes and
|
3583 |
|
|
an FPR at other times. Also, moves between floating-point
|
3584 |
|
|
registers are sometimes cheaper than (D)MTC1 $0. */
|
3585 |
|
|
if (cost == 1
|
3586 |
|
|
&& outer_code == SET
|
3587 |
|
|
&& !(float_mode_p && TARGET_HARD_FLOAT))
|
3588 |
|
|
cost = 0;
|
3589 |
|
|
/* When non-MIPS16 code loads a constant N>1 times, we rarely
|
3590 |
|
|
want to CSE the constant itself. It is usually better to
|
3591 |
|
|
have N copies of the last operation in the sequence and one
|
3592 |
|
|
shared copy of the other operations. (Note that this is
|
3593 |
|
|
not true for MIPS16 code, where the final operation in the
|
3594 |
|
|
sequence is often an extended instruction.)
|
3595 |
|
|
|
3596 |
|
|
Also, if we have a CONST_INT, we don't know whether it is
|
3597 |
|
|
for a word or doubleword operation, so we cannot rely on
|
3598 |
|
|
the result of mips_build_integer. */
|
3599 |
|
|
else if (!TARGET_MIPS16
|
3600 |
|
|
&& (outer_code == SET || mode == VOIDmode))
|
3601 |
|
|
cost = 1;
|
3602 |
|
|
*total = COSTS_N_INSNS (cost);
|
3603 |
|
|
return true;
|
3604 |
|
|
}
|
3605 |
|
|
/* The value will need to be fetched from the constant pool. */
|
3606 |
|
|
*total = CONSTANT_POOL_COST;
|
3607 |
|
|
return true;
|
3608 |
|
|
|
3609 |
|
|
case MEM:
|
3610 |
|
|
/* If the address is legitimate, return the number of
|
3611 |
|
|
instructions it needs. */
|
3612 |
|
|
addr = XEXP (x, 0);
|
3613 |
|
|
cost = mips_address_insns (addr, mode, true);
|
3614 |
|
|
if (cost > 0)
|
3615 |
|
|
{
|
3616 |
|
|
*total = COSTS_N_INSNS (cost + 1);
|
3617 |
|
|
return true;
|
3618 |
|
|
}
|
3619 |
|
|
/* Check for a scaled indexed address. */
|
3620 |
|
|
if (mips_lwxs_address_p (addr))
|
3621 |
|
|
{
|
3622 |
|
|
*total = COSTS_N_INSNS (2);
|
3623 |
|
|
return true;
|
3624 |
|
|
}
|
3625 |
|
|
/* Otherwise use the default handling. */
|
3626 |
|
|
return false;
|
3627 |
|
|
|
3628 |
|
|
case FFS:
|
3629 |
|
|
*total = COSTS_N_INSNS (6);
|
3630 |
|
|
return false;
|
3631 |
|
|
|
3632 |
|
|
case NOT:
|
3633 |
|
|
*total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
|
3634 |
|
|
return false;
|
3635 |
|
|
|
3636 |
|
|
case AND:
|
3637 |
|
|
/* Check for a *clear_upper32 pattern and treat it like a zero
|
3638 |
|
|
extension. See the pattern's comment for details. */
|
3639 |
|
|
if (TARGET_64BIT
|
3640 |
|
|
&& mode == DImode
|
3641 |
|
|
&& CONST_INT_P (XEXP (x, 1))
|
3642 |
|
|
&& UINTVAL (XEXP (x, 1)) == 0xffffffff)
|
3643 |
|
|
{
|
3644 |
|
|
*total = (mips_zero_extend_cost (mode, XEXP (x, 0))
|
3645 |
|
|
+ rtx_cost (XEXP (x, 0), SET, speed));
|
3646 |
|
|
return true;
|
3647 |
|
|
}
|
3648 |
|
|
/* Fall through. */
|
3649 |
|
|
|
3650 |
|
|
case IOR:
|
3651 |
|
|
case XOR:
|
3652 |
|
|
/* Double-word operations use two single-word operations. */
|
3653 |
|
|
*total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2),
|
3654 |
|
|
speed);
|
3655 |
|
|
return true;
|
3656 |
|
|
|
3657 |
|
|
case ASHIFT:
|
3658 |
|
|
case ASHIFTRT:
|
3659 |
|
|
case LSHIFTRT:
|
3660 |
|
|
case ROTATE:
|
3661 |
|
|
case ROTATERT:
|
3662 |
|
|
if (CONSTANT_P (XEXP (x, 1)))
|
3663 |
|
|
*total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
|
3664 |
|
|
speed);
|
3665 |
|
|
else
|
3666 |
|
|
*total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12),
|
3667 |
|
|
speed);
|
3668 |
|
|
return true;
|
3669 |
|
|
|
3670 |
|
|
case ABS:
|
3671 |
|
|
if (float_mode_p)
|
3672 |
|
|
*total = mips_cost->fp_add;
|
3673 |
|
|
else
|
3674 |
|
|
*total = COSTS_N_INSNS (4);
|
3675 |
|
|
return false;
|
3676 |
|
|
|
3677 |
|
|
case LO_SUM:
|
3678 |
|
|
/* Low-part immediates need an extended MIPS16 instruction. */
|
3679 |
|
|
*total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
|
3680 |
|
|
+ rtx_cost (XEXP (x, 0), SET, speed));
|
3681 |
|
|
return true;
|
3682 |
|
|
|
3683 |
|
|
case LT:
|
3684 |
|
|
case LTU:
|
3685 |
|
|
case LE:
|
3686 |
|
|
case LEU:
|
3687 |
|
|
case GT:
|
3688 |
|
|
case GTU:
|
3689 |
|
|
case GE:
|
3690 |
|
|
case GEU:
|
3691 |
|
|
case EQ:
|
3692 |
|
|
case NE:
|
3693 |
|
|
case UNORDERED:
|
3694 |
|
|
case LTGT:
|
3695 |
|
|
/* Branch comparisons have VOIDmode, so use the first operand's
|
3696 |
|
|
mode instead. */
|
3697 |
|
|
mode = GET_MODE (XEXP (x, 0));
|
3698 |
|
|
if (FLOAT_MODE_P (mode))
|
3699 |
|
|
{
|
3700 |
|
|
*total = mips_cost->fp_add;
|
3701 |
|
|
return false;
|
3702 |
|
|
}
|
3703 |
|
|
*total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4),
|
3704 |
|
|
speed);
|
3705 |
|
|
return true;
|
3706 |
|
|
|
3707 |
|
|
case MINUS:
|
3708 |
|
|
if (float_mode_p
|
3709 |
|
|
&& (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
|
3710 |
|
|
&& TARGET_FUSED_MADD
|
3711 |
|
|
&& !HONOR_NANS (mode)
|
3712 |
|
|
&& !HONOR_SIGNED_ZEROS (mode))
|
3713 |
|
|
{
|
3714 |
|
|
/* See if we can use NMADD or NMSUB. See mips.md for the
|
3715 |
|
|
associated patterns. */
|
3716 |
|
|
rtx op0 = XEXP (x, 0);
|
3717 |
|
|
rtx op1 = XEXP (x, 1);
|
3718 |
|
|
if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
|
3719 |
|
|
{
|
3720 |
|
|
*total = (mips_fp_mult_cost (mode)
|
3721 |
|
|
+ rtx_cost (XEXP (XEXP (op0, 0), 0), SET, speed)
|
3722 |
|
|
+ rtx_cost (XEXP (op0, 1), SET, speed)
|
3723 |
|
|
+ rtx_cost (op1, SET, speed));
|
3724 |
|
|
return true;
|
3725 |
|
|
}
|
3726 |
|
|
if (GET_CODE (op1) == MULT)
|
3727 |
|
|
{
|
3728 |
|
|
*total = (mips_fp_mult_cost (mode)
|
3729 |
|
|
+ rtx_cost (op0, SET, speed)
|
3730 |
|
|
+ rtx_cost (XEXP (op1, 0), SET, speed)
|
3731 |
|
|
+ rtx_cost (XEXP (op1, 1), SET, speed));
|
3732 |
|
|
return true;
|
3733 |
|
|
}
|
3734 |
|
|
}
|
3735 |
|
|
/* Fall through. */
|
3736 |
|
|
|
3737 |
|
|
case PLUS:
|
3738 |
|
|
if (float_mode_p)
|
3739 |
|
|
{
|
3740 |
|
|
/* If this is part of a MADD or MSUB, treat the PLUS as
|
3741 |
|
|
being free. */
|
3742 |
|
|
if (ISA_HAS_FP4
|
3743 |
|
|
&& TARGET_FUSED_MADD
|
3744 |
|
|
&& GET_CODE (XEXP (x, 0)) == MULT)
|
3745 |
|
|
*total = 0;
|
3746 |
|
|
else
|
3747 |
|
|
*total = mips_cost->fp_add;
|
3748 |
|
|
return false;
|
3749 |
|
|
}
|
3750 |
|
|
|
3751 |
|
|
/* Double-word operations require three single-word operations and
|
3752 |
|
|
an SLTU. The MIPS16 version then needs to move the result of
|
3753 |
|
|
the SLTU from $24 to a MIPS16 register. */
|
3754 |
|
|
*total = mips_binary_cost (x, COSTS_N_INSNS (1),
|
3755 |
|
|
COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4),
|
3756 |
|
|
speed);
|
3757 |
|
|
return true;
|
3758 |
|
|
|
3759 |
|
|
case NEG:
|
3760 |
|
|
if (float_mode_p
|
3761 |
|
|
&& (ISA_HAS_NMADD4_NMSUB4 (mode) || ISA_HAS_NMADD3_NMSUB3 (mode))
|
3762 |
|
|
&& TARGET_FUSED_MADD
|
3763 |
|
|
&& !HONOR_NANS (mode)
|
3764 |
|
|
&& HONOR_SIGNED_ZEROS (mode))
|
3765 |
|
|
{
|
3766 |
|
|
/* See if we can use NMADD or NMSUB. See mips.md for the
|
3767 |
|
|
associated patterns. */
|
3768 |
|
|
rtx op = XEXP (x, 0);
|
3769 |
|
|
if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
|
3770 |
|
|
&& GET_CODE (XEXP (op, 0)) == MULT)
|
3771 |
|
|
{
|
3772 |
|
|
*total = (mips_fp_mult_cost (mode)
|
3773 |
|
|
+ rtx_cost (XEXP (XEXP (op, 0), 0), SET, speed)
|
3774 |
|
|
+ rtx_cost (XEXP (XEXP (op, 0), 1), SET, speed)
|
3775 |
|
|
+ rtx_cost (XEXP (op, 1), SET, speed));
|
3776 |
|
|
return true;
|
3777 |
|
|
}
|
3778 |
|
|
}
|
3779 |
|
|
|
3780 |
|
|
if (float_mode_p)
|
3781 |
|
|
*total = mips_cost->fp_add;
|
3782 |
|
|
else
|
3783 |
|
|
*total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
|
3784 |
|
|
return false;
|
3785 |
|
|
|
3786 |
|
|
case MULT:
|
3787 |
|
|
if (float_mode_p)
|
3788 |
|
|
*total = mips_fp_mult_cost (mode);
|
3789 |
|
|
else if (mode == DImode && !TARGET_64BIT)
|
3790 |
|
|
/* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
|
3791 |
|
|
where the mulsidi3 always includes an MFHI and an MFLO. */
|
3792 |
|
|
*total = (speed
|
3793 |
|
|
? mips_cost->int_mult_si * 3 + 6
|
3794 |
|
|
: COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9));
|
3795 |
|
|
else if (!speed)
|
3796 |
|
|
*total = (ISA_HAS_MUL3 ? 1 : 2);
|
3797 |
|
|
else if (mode == DImode)
|
3798 |
|
|
*total = mips_cost->int_mult_di;
|
3799 |
|
|
else
|
3800 |
|
|
*total = mips_cost->int_mult_si;
|
3801 |
|
|
return false;
|
3802 |
|
|
|
3803 |
|
|
case DIV:
|
3804 |
|
|
/* Check for a reciprocal. */
|
3805 |
|
|
if (float_mode_p
|
3806 |
|
|
&& ISA_HAS_FP4
|
3807 |
|
|
&& flag_unsafe_math_optimizations
|
3808 |
|
|
&& XEXP (x, 0) == CONST1_RTX (mode))
|
3809 |
|
|
{
|
3810 |
|
|
if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT)
|
3811 |
|
|
/* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
|
3812 |
|
|
division as being free. */
|
3813 |
|
|
*total = rtx_cost (XEXP (x, 1), SET, speed);
|
3814 |
|
|
else
|
3815 |
|
|
*total = (mips_fp_div_cost (mode)
|
3816 |
|
|
+ rtx_cost (XEXP (x, 1), SET, speed));
|
3817 |
|
|
return true;
|
3818 |
|
|
}
|
3819 |
|
|
/* Fall through. */
|
3820 |
|
|
|
3821 |
|
|
case SQRT:
|
3822 |
|
|
case MOD:
|
3823 |
|
|
if (float_mode_p)
|
3824 |
|
|
{
|
3825 |
|
|
*total = mips_fp_div_cost (mode);
|
3826 |
|
|
return false;
|
3827 |
|
|
}
|
3828 |
|
|
/* Fall through. */
|
3829 |
|
|
|
3830 |
|
|
case UDIV:
|
3831 |
|
|
case UMOD:
|
3832 |
|
|
if (!speed)
|
3833 |
|
|
{
|
3834 |
|
|
/* It is our responsibility to make division by a power of 2
|
3835 |
|
|
as cheap as 2 register additions if we want the division
|
3836 |
|
|
expanders to be used for such operations; see the setting
|
3837 |
|
|
of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
|
3838 |
|
|
should always produce shorter code than using
|
3839 |
|
|
expand_sdiv2_pow2. */
|
3840 |
|
|
if (TARGET_MIPS16
|
3841 |
|
|
&& CONST_INT_P (XEXP (x, 1))
|
3842 |
|
|
&& exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
|
3843 |
|
|
{
|
3844 |
|
|
*total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), SET, speed);
|
3845 |
|
|
return true;
|
3846 |
|
|
}
|
3847 |
|
|
*total = COSTS_N_INSNS (mips_idiv_insns ());
|
3848 |
|
|
}
|
3849 |
|
|
else if (mode == DImode)
|
3850 |
|
|
*total = mips_cost->int_div_di;
|
3851 |
|
|
else
|
3852 |
|
|
*total = mips_cost->int_div_si;
|
3853 |
|
|
return false;
|
3854 |
|
|
|
3855 |
|
|
case SIGN_EXTEND:
|
3856 |
|
|
*total = mips_sign_extend_cost (mode, XEXP (x, 0));
|
3857 |
|
|
return false;
|
3858 |
|
|
|
3859 |
|
|
case ZERO_EXTEND:
|
3860 |
|
|
*total = mips_zero_extend_cost (mode, XEXP (x, 0));
|
3861 |
|
|
return false;
|
3862 |
|
|
|
3863 |
|
|
case FLOAT:
|
3864 |
|
|
case UNSIGNED_FLOAT:
|
3865 |
|
|
case FIX:
|
3866 |
|
|
case FLOAT_EXTEND:
|
3867 |
|
|
case FLOAT_TRUNCATE:
|
3868 |
|
|
*total = mips_cost->fp_add;
|
3869 |
|
|
return false;
|
3870 |
|
|
|
3871 |
|
|
default:
|
3872 |
|
|
return false;
|
3873 |
|
|
}
|
3874 |
|
|
}
|
3875 |
|
|
|
3876 |
|
|
/* Implement TARGET_ADDRESS_COST. */
|
3877 |
|
|
|
3878 |
|
|
static int
|
3879 |
|
|
mips_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
|
3880 |
|
|
{
|
3881 |
|
|
return mips_address_insns (addr, SImode, false);
|
3882 |
|
|
}
|
3883 |
|
|
|
3884 |
|
|
/* Information about a single instruction in a multi-instruction
|
3885 |
|
|
asm sequence. */
|
3886 |
|
|
struct mips_multi_member {
|
3887 |
|
|
/* True if this is a label, false if it is code. */
|
3888 |
|
|
bool is_label_p;
|
3889 |
|
|
|
3890 |
|
|
/* The output_asm_insn format of the instruction. */
|
3891 |
|
|
const char *format;
|
3892 |
|
|
|
3893 |
|
|
/* The operands to the instruction. */
|
3894 |
|
|
rtx operands[MAX_RECOG_OPERANDS];
|
3895 |
|
|
};
|
3896 |
|
|
typedef struct mips_multi_member mips_multi_member;
|
3897 |
|
|
|
3898 |
|
|
/* Vector definitions for the above. */
|
3899 |
|
|
DEF_VEC_O(mips_multi_member);
|
3900 |
|
|
DEF_VEC_ALLOC_O(mips_multi_member, heap);
|
3901 |
|
|
|
3902 |
|
|
/* The instructions that make up the current multi-insn sequence. */
|
3903 |
|
|
static VEC (mips_multi_member, heap) *mips_multi_members;
|
3904 |
|
|
|
3905 |
|
|
/* How many instructions (as opposed to labels) are in the current
|
3906 |
|
|
multi-insn sequence. */
|
3907 |
|
|
static unsigned int mips_multi_num_insns;
|
3908 |
|
|
|
3909 |
|
|
/* Start a new multi-insn sequence. */
|
3910 |
|
|
|
3911 |
|
|
static void
|
3912 |
|
|
mips_multi_start (void)
|
3913 |
|
|
{
|
3914 |
|
|
VEC_truncate (mips_multi_member, mips_multi_members, 0);
|
3915 |
|
|
mips_multi_num_insns = 0;
|
3916 |
|
|
}
|
3917 |
|
|
|
3918 |
|
|
/* Add a new, uninitialized member to the current multi-insn sequence. */
|
3919 |
|
|
|
3920 |
|
|
static struct mips_multi_member *
|
3921 |
|
|
mips_multi_add (void)
|
3922 |
|
|
{
|
3923 |
|
|
return VEC_safe_push (mips_multi_member, heap, mips_multi_members, 0);
|
3924 |
|
|
}
|
3925 |
|
|
|
3926 |
|
|
/* Add a normal insn with the given asm format to the current multi-insn
|
3927 |
|
|
sequence. The other arguments are a null-terminated list of operands. */
|
3928 |
|
|
|
3929 |
|
|
static void
|
3930 |
|
|
mips_multi_add_insn (const char *format, ...)
|
3931 |
|
|
{
|
3932 |
|
|
struct mips_multi_member *member;
|
3933 |
|
|
va_list ap;
|
3934 |
|
|
unsigned int i;
|
3935 |
|
|
rtx op;
|
3936 |
|
|
|
3937 |
|
|
member = mips_multi_add ();
|
3938 |
|
|
member->is_label_p = false;
|
3939 |
|
|
member->format = format;
|
3940 |
|
|
va_start (ap, format);
|
3941 |
|
|
i = 0;
|
3942 |
|
|
while ((op = va_arg (ap, rtx)))
|
3943 |
|
|
member->operands[i++] = op;
|
3944 |
|
|
va_end (ap);
|
3945 |
|
|
mips_multi_num_insns++;
|
3946 |
|
|
}
|
3947 |
|
|
|
3948 |
|
|
/* Add the given label definition to the current multi-insn sequence.
|
3949 |
|
|
The definition should include the colon. */
|
3950 |
|
|
|
3951 |
|
|
static void
|
3952 |
|
|
mips_multi_add_label (const char *label)
|
3953 |
|
|
{
|
3954 |
|
|
struct mips_multi_member *member;
|
3955 |
|
|
|
3956 |
|
|
member = mips_multi_add ();
|
3957 |
|
|
member->is_label_p = true;
|
3958 |
|
|
member->format = label;
|
3959 |
|
|
}
|
3960 |
|
|
|
3961 |
|
|
/* Return the index of the last member of the current multi-insn sequence. */
|
3962 |
|
|
|
3963 |
|
|
static unsigned int
|
3964 |
|
|
mips_multi_last_index (void)
|
3965 |
|
|
{
|
3966 |
|
|
return VEC_length (mips_multi_member, mips_multi_members) - 1;
|
3967 |
|
|
}
|
3968 |
|
|
|
3969 |
|
|
/* Add a copy of an existing instruction to the current multi-insn
|
3970 |
|
|
sequence. I is the index of the instruction that should be copied. */
|
3971 |
|
|
|
3972 |
|
|
static void
|
3973 |
|
|
mips_multi_copy_insn (unsigned int i)
|
3974 |
|
|
{
|
3975 |
|
|
struct mips_multi_member *member;
|
3976 |
|
|
|
3977 |
|
|
member = mips_multi_add ();
|
3978 |
|
|
memcpy (member, VEC_index (mips_multi_member, mips_multi_members, i),
|
3979 |
|
|
sizeof (*member));
|
3980 |
|
|
gcc_assert (!member->is_label_p);
|
3981 |
|
|
}
|
3982 |
|
|
|
3983 |
|
|
/* Change the operand of an existing instruction in the current
|
3984 |
|
|
multi-insn sequence. I is the index of the instruction,
|
3985 |
|
|
OP is the index of the operand, and X is the new value. */
|
3986 |
|
|
|
3987 |
|
|
static void
|
3988 |
|
|
mips_multi_set_operand (unsigned int i, unsigned int op, rtx x)
|
3989 |
|
|
{
|
3990 |
|
|
VEC_index (mips_multi_member, mips_multi_members, i)->operands[op] = x;
|
3991 |
|
|
}
|
3992 |
|
|
|
3993 |
|
|
/* Write out the asm code for the current multi-insn sequence. */
|
3994 |
|
|
|
3995 |
|
|
static void
|
3996 |
|
|
mips_multi_write (void)
|
3997 |
|
|
{
|
3998 |
|
|
struct mips_multi_member *member;
|
3999 |
|
|
unsigned int i;
|
4000 |
|
|
|
4001 |
|
|
for (i = 0;
|
4002 |
|
|
VEC_iterate (mips_multi_member, mips_multi_members, i, member);
|
4003 |
|
|
i++)
|
4004 |
|
|
if (member->is_label_p)
|
4005 |
|
|
fprintf (asm_out_file, "%s\n", member->format);
|
4006 |
|
|
else
|
4007 |
|
|
output_asm_insn (member->format, member->operands);
|
4008 |
|
|
}
|
4009 |
|
|
|
4010 |
|
|
/* Return one word of double-word value OP, taking into account the fixed
|
4011 |
|
|
endianness of certain registers. HIGH_P is true to select the high part,
|
4012 |
|
|
false to select the low part. */
|
4013 |
|
|
|
4014 |
|
|
rtx
|
4015 |
|
|
mips_subword (rtx op, bool high_p)
|
4016 |
|
|
{
|
4017 |
|
|
unsigned int byte, offset;
|
4018 |
|
|
enum machine_mode mode;
|
4019 |
|
|
|
4020 |
|
|
mode = GET_MODE (op);
|
4021 |
|
|
if (mode == VOIDmode)
|
4022 |
|
|
mode = TARGET_64BIT ? TImode : DImode;
|
4023 |
|
|
|
4024 |
|
|
if (TARGET_BIG_ENDIAN ? !high_p : high_p)
|
4025 |
|
|
byte = UNITS_PER_WORD;
|
4026 |
|
|
else
|
4027 |
|
|
byte = 0;
|
4028 |
|
|
|
4029 |
|
|
if (FP_REG_RTX_P (op))
|
4030 |
|
|
{
|
4031 |
|
|
/* Paired FPRs are always ordered little-endian. */
|
4032 |
|
|
offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
|
4033 |
|
|
return gen_rtx_REG (word_mode, REGNO (op) + offset);
|
4034 |
|
|
}
|
4035 |
|
|
|
4036 |
|
|
if (MEM_P (op))
|
4037 |
|
|
return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
|
4038 |
|
|
|
4039 |
|
|
return simplify_gen_subreg (word_mode, op, mode, byte);
|
4040 |
|
|
}
|
4041 |
|
|
|
4042 |
|
|
/* Return true if a 64-bit move from SRC to DEST should be split into two. */
|
4043 |
|
|
|
4044 |
|
|
bool
|
4045 |
|
|
mips_split_64bit_move_p (rtx dest, rtx src)
|
4046 |
|
|
{
|
4047 |
|
|
if (TARGET_64BIT)
|
4048 |
|
|
return false;
|
4049 |
|
|
|
4050 |
|
|
/* FPR-to-FPR moves can be done in a single instruction, if they're
|
4051 |
|
|
allowed at all. */
|
4052 |
|
|
if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
|
4053 |
|
|
return false;
|
4054 |
|
|
|
4055 |
|
|
/* Check for floating-point loads and stores. */
|
4056 |
|
|
if (ISA_HAS_LDC1_SDC1)
|
4057 |
|
|
{
|
4058 |
|
|
if (FP_REG_RTX_P (dest) && MEM_P (src))
|
4059 |
|
|
return false;
|
4060 |
|
|
if (FP_REG_RTX_P (src) && MEM_P (dest))
|
4061 |
|
|
return false;
|
4062 |
|
|
}
|
4063 |
|
|
return true;
|
4064 |
|
|
}
|
4065 |
|
|
|
4066 |
|
|
/* Split a doubleword move from SRC to DEST. On 32-bit targets,
|
4067 |
|
|
this function handles 64-bit moves for which mips_split_64bit_move_p
|
4068 |
|
|
holds. For 64-bit targets, this function handles 128-bit moves. */
|
4069 |
|
|
|
4070 |
|
|
void
|
4071 |
|
|
mips_split_doubleword_move (rtx dest, rtx src)
|
4072 |
|
|
{
|
4073 |
|
|
rtx low_dest;
|
4074 |
|
|
|
4075 |
|
|
if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
|
4076 |
|
|
{
|
4077 |
|
|
if (!TARGET_64BIT && GET_MODE (dest) == DImode)
|
4078 |
|
|
emit_insn (gen_move_doubleword_fprdi (dest, src));
|
4079 |
|
|
else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
|
4080 |
|
|
emit_insn (gen_move_doubleword_fprdf (dest, src));
|
4081 |
|
|
else if (!TARGET_64BIT && GET_MODE (dest) == V2SFmode)
|
4082 |
|
|
emit_insn (gen_move_doubleword_fprv2sf (dest, src));
|
4083 |
|
|
else if (!TARGET_64BIT && GET_MODE (dest) == V2SImode)
|
4084 |
|
|
emit_insn (gen_move_doubleword_fprv2si (dest, src));
|
4085 |
|
|
else if (!TARGET_64BIT && GET_MODE (dest) == V4HImode)
|
4086 |
|
|
emit_insn (gen_move_doubleword_fprv4hi (dest, src));
|
4087 |
|
|
else if (!TARGET_64BIT && GET_MODE (dest) == V8QImode)
|
4088 |
|
|
emit_insn (gen_move_doubleword_fprv8qi (dest, src));
|
4089 |
|
|
else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
|
4090 |
|
|
emit_insn (gen_move_doubleword_fprtf (dest, src));
|
4091 |
|
|
else
|
4092 |
|
|
gcc_unreachable ();
|
4093 |
|
|
}
|
4094 |
|
|
else if (REG_P (dest) && REGNO (dest) == MD_REG_FIRST)
|
4095 |
|
|
{
|
4096 |
|
|
low_dest = mips_subword (dest, false);
|
4097 |
|
|
mips_emit_move (low_dest, mips_subword (src, false));
|
4098 |
|
|
if (TARGET_64BIT)
|
4099 |
|
|
emit_insn (gen_mthidi_ti (dest, mips_subword (src, true), low_dest));
|
4100 |
|
|
else
|
4101 |
|
|
emit_insn (gen_mthisi_di (dest, mips_subword (src, true), low_dest));
|
4102 |
|
|
}
|
4103 |
|
|
else if (REG_P (src) && REGNO (src) == MD_REG_FIRST)
|
4104 |
|
|
{
|
4105 |
|
|
mips_emit_move (mips_subword (dest, false), mips_subword (src, false));
|
4106 |
|
|
if (TARGET_64BIT)
|
4107 |
|
|
emit_insn (gen_mfhidi_ti (mips_subword (dest, true), src));
|
4108 |
|
|
else
|
4109 |
|
|
emit_insn (gen_mfhisi_di (mips_subword (dest, true), src));
|
4110 |
|
|
}
|
4111 |
|
|
else
|
4112 |
|
|
{
|
4113 |
|
|
/* The operation can be split into two normal moves. Decide in
|
4114 |
|
|
which order to do them. */
|
4115 |
|
|
low_dest = mips_subword (dest, false);
|
4116 |
|
|
if (REG_P (low_dest)
|
4117 |
|
|
&& reg_overlap_mentioned_p (low_dest, src))
|
4118 |
|
|
{
|
4119 |
|
|
mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
|
4120 |
|
|
mips_emit_move (low_dest, mips_subword (src, false));
|
4121 |
|
|
}
|
4122 |
|
|
else
|
4123 |
|
|
{
|
4124 |
|
|
mips_emit_move (low_dest, mips_subword (src, false));
|
4125 |
|
|
mips_emit_move (mips_subword (dest, true), mips_subword (src, true));
|
4126 |
|
|
}
|
4127 |
|
|
}
|
4128 |
|
|
}
|
4129 |
|
|
|
4130 |
|
|
/* Return the appropriate instructions to move SRC into DEST. Assume
|
4131 |
|
|
that SRC is operand 1 and DEST is operand 0. */
|
4132 |
|
|
|
4133 |
|
|
const char *
|
4134 |
|
|
mips_output_move (rtx dest, rtx src)
|
4135 |
|
|
{
|
4136 |
|
|
enum rtx_code dest_code, src_code;
|
4137 |
|
|
enum machine_mode mode;
|
4138 |
|
|
enum mips_symbol_type symbol_type;
|
4139 |
|
|
bool dbl_p;
|
4140 |
|
|
|
4141 |
|
|
dest_code = GET_CODE (dest);
|
4142 |
|
|
src_code = GET_CODE (src);
|
4143 |
|
|
mode = GET_MODE (dest);
|
4144 |
|
|
dbl_p = (GET_MODE_SIZE (mode) == 8);
|
4145 |
|
|
|
4146 |
|
|
if (dbl_p && mips_split_64bit_move_p (dest, src))
|
4147 |
|
|
return "#";
|
4148 |
|
|
|
4149 |
|
|
if ((src_code == REG && GP_REG_P (REGNO (src)))
|
4150 |
|
|
|| (!TARGET_MIPS16 && src == CONST0_RTX (mode)))
|
4151 |
|
|
{
|
4152 |
|
|
if (dest_code == REG)
|
4153 |
|
|
{
|
4154 |
|
|
if (GP_REG_P (REGNO (dest)))
|
4155 |
|
|
return "move\t%0,%z1";
|
4156 |
|
|
|
4157 |
|
|
/* Moves to HI are handled by special .md insns. */
|
4158 |
|
|
if (REGNO (dest) == LO_REGNUM)
|
4159 |
|
|
return "mtlo\t%z1";
|
4160 |
|
|
|
4161 |
|
|
if (DSP_ACC_REG_P (REGNO (dest)))
|
4162 |
|
|
{
|
4163 |
|
|
static char retval[] = "mt__\t%z1,%q0";
|
4164 |
|
|
|
4165 |
|
|
retval[2] = reg_names[REGNO (dest)][4];
|
4166 |
|
|
retval[3] = reg_names[REGNO (dest)][5];
|
4167 |
|
|
return retval;
|
4168 |
|
|
}
|
4169 |
|
|
|
4170 |
|
|
if (FP_REG_P (REGNO (dest)))
|
4171 |
|
|
return dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0";
|
4172 |
|
|
|
4173 |
|
|
if (ALL_COP_REG_P (REGNO (dest)))
|
4174 |
|
|
{
|
4175 |
|
|
static char retval[] = "dmtc_\t%z1,%0";
|
4176 |
|
|
|
4177 |
|
|
retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
|
4178 |
|
|
return dbl_p ? retval : retval + 1;
|
4179 |
|
|
}
|
4180 |
|
|
}
|
4181 |
|
|
if (dest_code == MEM)
|
4182 |
|
|
switch (GET_MODE_SIZE (mode))
|
4183 |
|
|
{
|
4184 |
|
|
case 1: return "sb\t%z1,%0";
|
4185 |
|
|
case 2: return "sh\t%z1,%0";
|
4186 |
|
|
case 4: return "sw\t%z1,%0";
|
4187 |
|
|
case 8: return "sd\t%z1,%0";
|
4188 |
|
|
}
|
4189 |
|
|
}
|
4190 |
|
|
if (dest_code == REG && GP_REG_P (REGNO (dest)))
|
4191 |
|
|
{
|
4192 |
|
|
if (src_code == REG)
|
4193 |
|
|
{
|
4194 |
|
|
/* Moves from HI are handled by special .md insns. */
|
4195 |
|
|
if (REGNO (src) == LO_REGNUM)
|
4196 |
|
|
{
|
4197 |
|
|
/* When generating VR4120 or VR4130 code, we use MACC and
|
4198 |
|
|
DMACC instead of MFLO. This avoids both the normal
|
4199 |
|
|
MIPS III HI/LO hazards and the errata related to
|
4200 |
|
|
-mfix-vr4130. */
|
4201 |
|
|
if (ISA_HAS_MACCHI)
|
4202 |
|
|
return dbl_p ? "dmacc\t%0,%.,%." : "macc\t%0,%.,%.";
|
4203 |
|
|
return "mflo\t%0";
|
4204 |
|
|
}
|
4205 |
|
|
|
4206 |
|
|
if (DSP_ACC_REG_P (REGNO (src)))
|
4207 |
|
|
{
|
4208 |
|
|
static char retval[] = "mf__\t%0,%q1";
|
4209 |
|
|
|
4210 |
|
|
retval[2] = reg_names[REGNO (src)][4];
|
4211 |
|
|
retval[3] = reg_names[REGNO (src)][5];
|
4212 |
|
|
return retval;
|
4213 |
|
|
}
|
4214 |
|
|
|
4215 |
|
|
if (FP_REG_P (REGNO (src)))
|
4216 |
|
|
return dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1";
|
4217 |
|
|
|
4218 |
|
|
if (ALL_COP_REG_P (REGNO (src)))
|
4219 |
|
|
{
|
4220 |
|
|
static char retval[] = "dmfc_\t%0,%1";
|
4221 |
|
|
|
4222 |
|
|
retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
|
4223 |
|
|
return dbl_p ? retval : retval + 1;
|
4224 |
|
|
}
|
4225 |
|
|
|
4226 |
|
|
if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
|
4227 |
|
|
return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
|
4228 |
|
|
}
|
4229 |
|
|
|
4230 |
|
|
if (src_code == MEM)
|
4231 |
|
|
switch (GET_MODE_SIZE (mode))
|
4232 |
|
|
{
|
4233 |
|
|
case 1: return "lbu\t%0,%1";
|
4234 |
|
|
case 2: return "lhu\t%0,%1";
|
4235 |
|
|
case 4: return "lw\t%0,%1";
|
4236 |
|
|
case 8: return "ld\t%0,%1";
|
4237 |
|
|
}
|
4238 |
|
|
|
4239 |
|
|
if (src_code == CONST_INT)
|
4240 |
|
|
{
|
4241 |
|
|
/* Don't use the X format for the operand itself, because that
|
4242 |
|
|
will give out-of-range numbers for 64-bit hosts and 32-bit
|
4243 |
|
|
targets. */
|
4244 |
|
|
if (!TARGET_MIPS16)
|
4245 |
|
|
return "li\t%0,%1\t\t\t# %X1";
|
4246 |
|
|
|
4247 |
|
|
if (SMALL_OPERAND_UNSIGNED (INTVAL (src)))
|
4248 |
|
|
return "li\t%0,%1";
|
4249 |
|
|
|
4250 |
|
|
if (SMALL_OPERAND_UNSIGNED (-INTVAL (src)))
|
4251 |
|
|
return "#";
|
4252 |
|
|
}
|
4253 |
|
|
|
4254 |
|
|
if (src_code == HIGH)
|
4255 |
|
|
return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
|
4256 |
|
|
|
4257 |
|
|
if (CONST_GP_P (src))
|
4258 |
|
|
return "move\t%0,%1";
|
4259 |
|
|
|
4260 |
|
|
if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
|
4261 |
|
|
&& mips_lo_relocs[symbol_type] != 0)
|
4262 |
|
|
{
|
4263 |
|
|
/* A signed 16-bit constant formed by applying a relocation
|
4264 |
|
|
operator to a symbolic address. */
|
4265 |
|
|
gcc_assert (!mips_split_p[symbol_type]);
|
4266 |
|
|
return "li\t%0,%R1";
|
4267 |
|
|
}
|
4268 |
|
|
|
4269 |
|
|
if (symbolic_operand (src, VOIDmode))
|
4270 |
|
|
{
|
4271 |
|
|
gcc_assert (TARGET_MIPS16
|
4272 |
|
|
? TARGET_MIPS16_TEXT_LOADS
|
4273 |
|
|
: !TARGET_EXPLICIT_RELOCS);
|
4274 |
|
|
return dbl_p ? "dla\t%0,%1" : "la\t%0,%1";
|
4275 |
|
|
}
|
4276 |
|
|
}
|
4277 |
|
|
if (src_code == REG && FP_REG_P (REGNO (src)))
|
4278 |
|
|
{
|
4279 |
|
|
if (dest_code == REG && FP_REG_P (REGNO (dest)))
|
4280 |
|
|
{
|
4281 |
|
|
if (GET_MODE (dest) == V2SFmode)
|
4282 |
|
|
return "mov.ps\t%0,%1";
|
4283 |
|
|
else
|
4284 |
|
|
return dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1";
|
4285 |
|
|
}
|
4286 |
|
|
|
4287 |
|
|
if (dest_code == MEM)
|
4288 |
|
|
return dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0";
|
4289 |
|
|
}
|
4290 |
|
|
if (dest_code == REG && FP_REG_P (REGNO (dest)))
|
4291 |
|
|
{
|
4292 |
|
|
if (src_code == MEM)
|
4293 |
|
|
return dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1";
|
4294 |
|
|
}
|
4295 |
|
|
if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
|
4296 |
|
|
{
|
4297 |
|
|
static char retval[] = "l_c_\t%0,%1";
|
4298 |
|
|
|
4299 |
|
|
retval[1] = (dbl_p ? 'd' : 'w');
|
4300 |
|
|
retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
|
4301 |
|
|
return retval;
|
4302 |
|
|
}
|
4303 |
|
|
if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
|
4304 |
|
|
{
|
4305 |
|
|
static char retval[] = "s_c_\t%1,%0";
|
4306 |
|
|
|
4307 |
|
|
retval[1] = (dbl_p ? 'd' : 'w');
|
4308 |
|
|
retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
|
4309 |
|
|
return retval;
|
4310 |
|
|
}
|
4311 |
|
|
gcc_unreachable ();
|
4312 |
|
|
}
|
4313 |
|
|
|
4314 |
|
|
/* Return true if CMP1 is a suitable second operand for integer ordering
|
4315 |
|
|
test CODE. See also the *sCC patterns in mips.md. */
|
4316 |
|
|
|
4317 |
|
|
static bool
|
4318 |
|
|
mips_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
|
4319 |
|
|
{
|
4320 |
|
|
switch (code)
|
4321 |
|
|
{
|
4322 |
|
|
case GT:
|
4323 |
|
|
case GTU:
|
4324 |
|
|
return reg_or_0_operand (cmp1, VOIDmode);
|
4325 |
|
|
|
4326 |
|
|
case GE:
|
4327 |
|
|
case GEU:
|
4328 |
|
|
return !TARGET_MIPS16 && cmp1 == const1_rtx;
|
4329 |
|
|
|
4330 |
|
|
case LT:
|
4331 |
|
|
case LTU:
|
4332 |
|
|
return arith_operand (cmp1, VOIDmode);
|
4333 |
|
|
|
4334 |
|
|
case LE:
|
4335 |
|
|
return sle_operand (cmp1, VOIDmode);
|
4336 |
|
|
|
4337 |
|
|
case LEU:
|
4338 |
|
|
return sleu_operand (cmp1, VOIDmode);
|
4339 |
|
|
|
4340 |
|
|
default:
|
4341 |
|
|
gcc_unreachable ();
|
4342 |
|
|
}
|
4343 |
|
|
}
|
4344 |
|
|
|
4345 |
|
|
/* Return true if *CMP1 (of mode MODE) is a valid second operand for
|
4346 |
|
|
integer ordering test *CODE, or if an equivalent combination can
|
4347 |
|
|
be formed by adjusting *CODE and *CMP1. When returning true, update
|
4348 |
|
|
*CODE and *CMP1 with the chosen code and operand, otherwise leave
|
4349 |
|
|
them alone. */
|
4350 |
|
|
|
4351 |
|
|
static bool
|
4352 |
|
|
mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
|
4353 |
|
|
enum machine_mode mode)
|
4354 |
|
|
{
|
4355 |
|
|
HOST_WIDE_INT plus_one;
|
4356 |
|
|
|
4357 |
|
|
if (mips_int_order_operand_ok_p (*code, *cmp1))
|
4358 |
|
|
return true;
|
4359 |
|
|
|
4360 |
|
|
if (CONST_INT_P (*cmp1))
|
4361 |
|
|
switch (*code)
|
4362 |
|
|
{
|
4363 |
|
|
case LE:
|
4364 |
|
|
plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
|
4365 |
|
|
if (INTVAL (*cmp1) < plus_one)
|
4366 |
|
|
{
|
4367 |
|
|
*code = LT;
|
4368 |
|
|
*cmp1 = force_reg (mode, GEN_INT (plus_one));
|
4369 |
|
|
return true;
|
4370 |
|
|
}
|
4371 |
|
|
break;
|
4372 |
|
|
|
4373 |
|
|
case LEU:
|
4374 |
|
|
plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
|
4375 |
|
|
if (plus_one != 0)
|
4376 |
|
|
{
|
4377 |
|
|
*code = LTU;
|
4378 |
|
|
*cmp1 = force_reg (mode, GEN_INT (plus_one));
|
4379 |
|
|
return true;
|
4380 |
|
|
}
|
4381 |
|
|
break;
|
4382 |
|
|
|
4383 |
|
|
default:
|
4384 |
|
|
break;
|
4385 |
|
|
}
|
4386 |
|
|
return false;
|
4387 |
|
|
}
|
4388 |
|
|
|
4389 |
|
|
/* Compare CMP0 and CMP1 using ordering test CODE and store the result
|
4390 |
|
|
in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
|
4391 |
|
|
is nonnull, it's OK to set TARGET to the inverse of the result and
|
4392 |
|
|
flip *INVERT_PTR instead. */
|
4393 |
|
|
|
4394 |
|
|
static void
|
4395 |
|
|
mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
|
4396 |
|
|
rtx target, rtx cmp0, rtx cmp1)
|
4397 |
|
|
{
|
4398 |
|
|
enum machine_mode mode;
|
4399 |
|
|
|
4400 |
|
|
/* First see if there is a MIPS instruction that can do this operation.
|
4401 |
|
|
If not, try doing the same for the inverse operation. If that also
|
4402 |
|
|
fails, force CMP1 into a register and try again. */
|
4403 |
|
|
mode = GET_MODE (cmp0);
|
4404 |
|
|
if (mips_canonicalize_int_order_test (&code, &cmp1, mode))
|
4405 |
|
|
mips_emit_binary (code, target, cmp0, cmp1);
|
4406 |
|
|
else
|
4407 |
|
|
{
|
4408 |
|
|
enum rtx_code inv_code = reverse_condition (code);
|
4409 |
|
|
if (!mips_canonicalize_int_order_test (&inv_code, &cmp1, mode))
|
4410 |
|
|
{
|
4411 |
|
|
cmp1 = force_reg (mode, cmp1);
|
4412 |
|
|
mips_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
|
4413 |
|
|
}
|
4414 |
|
|
else if (invert_ptr == 0)
|
4415 |
|
|
{
|
4416 |
|
|
rtx inv_target;
|
4417 |
|
|
|
4418 |
|
|
inv_target = mips_force_binary (GET_MODE (target),
|
4419 |
|
|
inv_code, cmp0, cmp1);
|
4420 |
|
|
mips_emit_binary (XOR, target, inv_target, const1_rtx);
|
4421 |
|
|
}
|
4422 |
|
|
else
|
4423 |
|
|
{
|
4424 |
|
|
*invert_ptr = !*invert_ptr;
|
4425 |
|
|
mips_emit_binary (inv_code, target, cmp0, cmp1);
|
4426 |
|
|
}
|
4427 |
|
|
}
|
4428 |
|
|
}
|
4429 |
|
|
|
4430 |
|
|
/* Return a register that is zero iff CMP0 and CMP1 are equal.
|
4431 |
|
|
The register will have the same mode as CMP0. */
|
4432 |
|
|
|
4433 |
|
|
static rtx
|
4434 |
|
|
mips_zero_if_equal (rtx cmp0, rtx cmp1)
|
4435 |
|
|
{
|
4436 |
|
|
if (cmp1 == const0_rtx)
|
4437 |
|
|
return cmp0;
|
4438 |
|
|
|
4439 |
|
|
if (uns_arith_operand (cmp1, VOIDmode))
|
4440 |
|
|
return expand_binop (GET_MODE (cmp0), xor_optab,
|
4441 |
|
|
cmp0, cmp1, 0, 0, OPTAB_DIRECT);
|
4442 |
|
|
|
4443 |
|
|
return expand_binop (GET_MODE (cmp0), sub_optab,
|
4444 |
|
|
cmp0, cmp1, 0, 0, OPTAB_DIRECT);
|
4445 |
|
|
}
|
4446 |
|
|
|
4447 |
|
|
/* Convert *CODE into a code that can be used in a floating-point
|
4448 |
|
|
scc instruction (C.cond.fmt). Return true if the values of
|
4449 |
|
|
the condition code registers will be inverted, with 0 indicating
|
4450 |
|
|
that the condition holds. */
|
4451 |
|
|
|
4452 |
|
|
static bool
|
4453 |
|
|
mips_reversed_fp_cond (enum rtx_code *code)
|
4454 |
|
|
{
|
4455 |
|
|
switch (*code)
|
4456 |
|
|
{
|
4457 |
|
|
case NE:
|
4458 |
|
|
case LTGT:
|
4459 |
|
|
case ORDERED:
|
4460 |
|
|
*code = reverse_condition_maybe_unordered (*code);
|
4461 |
|
|
return true;
|
4462 |
|
|
|
4463 |
|
|
default:
|
4464 |
|
|
return false;
|
4465 |
|
|
}
|
4466 |
|
|
}
|
4467 |
|
|
|
4468 |
|
|
/* Convert a comparison into something that can be used in a branch or
|
4469 |
|
|
conditional move. On entry, *OP0 and *OP1 are the values being
|
4470 |
|
|
compared and *CODE is the code used to compare them.
|
4471 |
|
|
|
4472 |
|
|
Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
|
4473 |
|
|
If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible,
|
4474 |
|
|
otherwise any standard branch condition can be used. The standard branch
|
4475 |
|
|
conditions are:
|
4476 |
|
|
|
4477 |
|
|
- EQ or NE between two registers.
|
4478 |
|
|
- any comparison between a register and zero. */
|
4479 |
|
|
|
4480 |
|
|
static void
|
4481 |
|
|
mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
|
4482 |
|
|
{
|
4483 |
|
|
rtx cmp_op0 = *op0;
|
4484 |
|
|
rtx cmp_op1 = *op1;
|
4485 |
|
|
|
4486 |
|
|
if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT)
|
4487 |
|
|
{
|
4488 |
|
|
if (!need_eq_ne_p && *op1 == const0_rtx)
|
4489 |
|
|
;
|
4490 |
|
|
else if (*code == EQ || *code == NE)
|
4491 |
|
|
{
|
4492 |
|
|
if (need_eq_ne_p)
|
4493 |
|
|
{
|
4494 |
|
|
*op0 = mips_zero_if_equal (cmp_op0, cmp_op1);
|
4495 |
|
|
*op1 = const0_rtx;
|
4496 |
|
|
}
|
4497 |
|
|
else
|
4498 |
|
|
*op1 = force_reg (GET_MODE (cmp_op0), cmp_op1);
|
4499 |
|
|
}
|
4500 |
|
|
else
|
4501 |
|
|
{
|
4502 |
|
|
/* The comparison needs a separate scc instruction. Store the
|
4503 |
|
|
result of the scc in *OP0 and compare it against zero. */
|
4504 |
|
|
bool invert = false;
|
4505 |
|
|
*op0 = gen_reg_rtx (GET_MODE (cmp_op0));
|
4506 |
|
|
mips_emit_int_order_test (*code, &invert, *op0, cmp_op0, cmp_op1);
|
4507 |
|
|
*code = (invert ? EQ : NE);
|
4508 |
|
|
*op1 = const0_rtx;
|
4509 |
|
|
}
|
4510 |
|
|
}
|
4511 |
|
|
else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_op0)))
|
4512 |
|
|
{
|
4513 |
|
|
*op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
|
4514 |
|
|
mips_emit_binary (*code, *op0, cmp_op0, cmp_op1);
|
4515 |
|
|
*code = NE;
|
4516 |
|
|
*op1 = const0_rtx;
|
4517 |
|
|
}
|
4518 |
|
|
else
|
4519 |
|
|
{
|
4520 |
|
|
enum rtx_code cmp_code;
|
4521 |
|
|
|
4522 |
|
|
/* Floating-point tests use a separate C.cond.fmt comparison to
|
4523 |
|
|
set a condition code register. The branch or conditional move
|
4524 |
|
|
will then compare that register against zero.
|
4525 |
|
|
|
4526 |
|
|
Set CMP_CODE to the code of the comparison instruction and
|
4527 |
|
|
*CODE to the code that the branch or move should use. */
|
4528 |
|
|
cmp_code = *code;
|
4529 |
|
|
*code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE;
|
4530 |
|
|
*op0 = (ISA_HAS_8CC
|
4531 |
|
|
? gen_reg_rtx (CCmode)
|
4532 |
|
|
: gen_rtx_REG (CCmode, FPSW_REGNUM));
|
4533 |
|
|
*op1 = const0_rtx;
|
4534 |
|
|
mips_emit_binary (cmp_code, *op0, cmp_op0, cmp_op1);
|
4535 |
|
|
}
|
4536 |
|
|
}
|
4537 |
|
|
|
4538 |
|
|
/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
|
4539 |
|
|
and OPERAND[3]. Store the result in OPERANDS[0].
|
4540 |
|
|
|
4541 |
|
|
On 64-bit targets, the mode of the comparison and target will always be
|
4542 |
|
|
SImode, thus possibly narrower than that of the comparison's operands. */
|
4543 |
|
|
|
4544 |
|
|
void
|
4545 |
|
|
mips_expand_scc (rtx operands[])
|
4546 |
|
|
{
|
4547 |
|
|
rtx target = operands[0];
|
4548 |
|
|
enum rtx_code code = GET_CODE (operands[1]);
|
4549 |
|
|
rtx op0 = operands[2];
|
4550 |
|
|
rtx op1 = operands[3];
|
4551 |
|
|
|
4552 |
|
|
gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT);
|
4553 |
|
|
|
4554 |
|
|
if (code == EQ || code == NE)
|
4555 |
|
|
{
|
4556 |
|
|
if (ISA_HAS_SEQ_SNE
|
4557 |
|
|
&& reg_imm10_operand (op1, GET_MODE (op1)))
|
4558 |
|
|
mips_emit_binary (code, target, op0, op1);
|
4559 |
|
|
else
|
4560 |
|
|
{
|
4561 |
|
|
rtx zie = mips_zero_if_equal (op0, op1);
|
4562 |
|
|
mips_emit_binary (code, target, zie, const0_rtx);
|
4563 |
|
|
}
|
4564 |
|
|
}
|
4565 |
|
|
else
|
4566 |
|
|
mips_emit_int_order_test (code, 0, target, op0, op1);
|
4567 |
|
|
}
|
4568 |
|
|
|
4569 |
|
|
/* Compare OPERANDS[1] with OPERANDS[2] using comparison code
|
4570 |
|
|
CODE and jump to OPERANDS[3] if the condition holds. */
|
4571 |
|
|
|
4572 |
|
|
void
|
4573 |
|
|
mips_expand_conditional_branch (rtx *operands)
|
4574 |
|
|
{
|
4575 |
|
|
enum rtx_code code = GET_CODE (operands[0]);
|
4576 |
|
|
rtx op0 = operands[1];
|
4577 |
|
|
rtx op1 = operands[2];
|
4578 |
|
|
rtx condition;
|
4579 |
|
|
|
4580 |
|
|
mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
|
4581 |
|
|
condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
|
4582 |
|
|
emit_jump_insn (gen_condjump (condition, operands[3]));
|
4583 |
|
|
}
|
4584 |
|
|
|
4585 |
|
|
/* Implement:
|
4586 |
|
|
|
4587 |
|
|
(set temp (COND:CCV2 CMP_OP0 CMP_OP1))
|
4588 |
|
|
(set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
|
4589 |
|
|
|
4590 |
|
|
void
|
4591 |
|
|
mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
|
4592 |
|
|
enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
|
4593 |
|
|
{
|
4594 |
|
|
rtx cmp_result;
|
4595 |
|
|
bool reversed_p;
|
4596 |
|
|
|
4597 |
|
|
reversed_p = mips_reversed_fp_cond (&cond);
|
4598 |
|
|
cmp_result = gen_reg_rtx (CCV2mode);
|
4599 |
|
|
emit_insn (gen_scc_ps (cmp_result,
|
4600 |
|
|
gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
|
4601 |
|
|
if (reversed_p)
|
4602 |
|
|
emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
|
4603 |
|
|
cmp_result));
|
4604 |
|
|
else
|
4605 |
|
|
emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
|
4606 |
|
|
cmp_result));
|
4607 |
|
|
}
|
4608 |
|
|
|
4609 |
|
|
/* Perform the comparison in OPERANDS[1]. Move OPERANDS[2] into OPERANDS[0]
|
4610 |
|
|
if the condition holds, otherwise move OPERANDS[3] into OPERANDS[0]. */
|
4611 |
|
|
|
4612 |
|
|
void
|
4613 |
|
|
mips_expand_conditional_move (rtx *operands)
|
4614 |
|
|
{
|
4615 |
|
|
rtx cond;
|
4616 |
|
|
enum rtx_code code = GET_CODE (operands[1]);
|
4617 |
|
|
rtx op0 = XEXP (operands[1], 0);
|
4618 |
|
|
rtx op1 = XEXP (operands[1], 1);
|
4619 |
|
|
|
4620 |
|
|
mips_emit_compare (&code, &op0, &op1, true);
|
4621 |
|
|
cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1);
|
4622 |
|
|
emit_insn (gen_rtx_SET (VOIDmode, operands[0],
|
4623 |
|
|
gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond,
|
4624 |
|
|
operands[2], operands[3])));
|
4625 |
|
|
}
|
4626 |
|
|
|
4627 |
|
|
/* Perform the comparison in COMPARISON, then trap if the condition holds. */
|
4628 |
|
|
|
4629 |
|
|
void
|
4630 |
|
|
mips_expand_conditional_trap (rtx comparison)
|
4631 |
|
|
{
|
4632 |
|
|
rtx op0, op1;
|
4633 |
|
|
enum machine_mode mode;
|
4634 |
|
|
enum rtx_code code;
|
4635 |
|
|
|
4636 |
|
|
/* MIPS conditional trap instructions don't have GT or LE flavors,
|
4637 |
|
|
so we must swap the operands and convert to LT and GE respectively. */
|
4638 |
|
|
code = GET_CODE (comparison);
|
4639 |
|
|
switch (code)
|
4640 |
|
|
{
|
4641 |
|
|
case GT:
|
4642 |
|
|
case LE:
|
4643 |
|
|
case GTU:
|
4644 |
|
|
case LEU:
|
4645 |
|
|
code = swap_condition (code);
|
4646 |
|
|
op0 = XEXP (comparison, 1);
|
4647 |
|
|
op1 = XEXP (comparison, 0);
|
4648 |
|
|
break;
|
4649 |
|
|
|
4650 |
|
|
default:
|
4651 |
|
|
op0 = XEXP (comparison, 0);
|
4652 |
|
|
op1 = XEXP (comparison, 1);
|
4653 |
|
|
break;
|
4654 |
|
|
}
|
4655 |
|
|
|
4656 |
|
|
mode = GET_MODE (XEXP (comparison, 0));
|
4657 |
|
|
op0 = force_reg (mode, op0);
|
4658 |
|
|
if (!arith_operand (op1, mode))
|
4659 |
|
|
op1 = force_reg (mode, op1);
|
4660 |
|
|
|
4661 |
|
|
emit_insn (gen_rtx_TRAP_IF (VOIDmode,
|
4662 |
|
|
gen_rtx_fmt_ee (code, mode, op0, op1),
|
4663 |
|
|
const0_rtx));
|
4664 |
|
|
}
|
4665 |
|
|
|
4666 |
|
|
/* Initialize *CUM for a call to a function of type FNTYPE. */
|
4667 |
|
|
|
4668 |
|
|
void
|
4669 |
|
|
mips_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype)
|
4670 |
|
|
{
|
4671 |
|
|
memset (cum, 0, sizeof (*cum));
|
4672 |
|
|
cum->prototype = (fntype && prototype_p (fntype));
|
4673 |
|
|
cum->gp_reg_found = (cum->prototype && stdarg_p (fntype));
|
4674 |
|
|
}
|
4675 |
|
|
|
4676 |
|
|
/* Fill INFO with information about a single argument. CUM is the
|
4677 |
|
|
cumulative state for earlier arguments. MODE is the mode of this
|
4678 |
|
|
argument and TYPE is its type (if known). NAMED is true if this
|
4679 |
|
|
is a named (fixed) argument rather than a variable one. */
|
4680 |
|
|
|
4681 |
|
|
static void
|
4682 |
|
|
mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum,
|
4683 |
|
|
enum machine_mode mode, tree type, int named)
|
4684 |
|
|
{
|
4685 |
|
|
bool doubleword_aligned_p;
|
4686 |
|
|
unsigned int num_bytes, num_words, max_regs;
|
4687 |
|
|
|
4688 |
|
|
/* Work out the size of the argument. */
|
4689 |
|
|
num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
|
4690 |
|
|
num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
|
4691 |
|
|
|
4692 |
|
|
/* Decide whether it should go in a floating-point register, assuming
|
4693 |
|
|
one is free. Later code checks for availability.
|
4694 |
|
|
|
4695 |
|
|
The checks against UNITS_PER_FPVALUE handle the soft-float and
|
4696 |
|
|
single-float cases. */
|
4697 |
|
|
switch (mips_abi)
|
4698 |
|
|
{
|
4699 |
|
|
case ABI_EABI:
|
4700 |
|
|
/* The EABI conventions have traditionally been defined in terms
|
4701 |
|
|
of TYPE_MODE, regardless of the actual type. */
|
4702 |
|
|
info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
|
4703 |
|
|
|| GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
|
4704 |
|
|
&& GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
|
4705 |
|
|
break;
|
4706 |
|
|
|
4707 |
|
|
case ABI_32:
|
4708 |
|
|
case ABI_O64:
|
4709 |
|
|
/* Only leading floating-point scalars are passed in
|
4710 |
|
|
floating-point registers. We also handle vector floats the same
|
4711 |
|
|
say, which is OK because they are not covered by the standard ABI. */
|
4712 |
|
|
info->fpr_p = (!cum->gp_reg_found
|
4713 |
|
|
&& cum->arg_number < 2
|
4714 |
|
|
&& (type == 0
|
4715 |
|
|
|| SCALAR_FLOAT_TYPE_P (type)
|
4716 |
|
|
|| VECTOR_FLOAT_TYPE_P (type))
|
4717 |
|
|
&& (GET_MODE_CLASS (mode) == MODE_FLOAT
|
4718 |
|
|
|| GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
|
4719 |
|
|
&& GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
|
4720 |
|
|
break;
|
4721 |
|
|
|
4722 |
|
|
case ABI_N32:
|
4723 |
|
|
case ABI_64:
|
4724 |
|
|
/* Scalar, complex and vector floating-point types are passed in
|
4725 |
|
|
floating-point registers, as long as this is a named rather
|
4726 |
|
|
than a variable argument. */
|
4727 |
|
|
info->fpr_p = (named
|
4728 |
|
|
&& (type == 0 || FLOAT_TYPE_P (type))
|
4729 |
|
|
&& (GET_MODE_CLASS (mode) == MODE_FLOAT
|
4730 |
|
|
|| GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
|
4731 |
|
|
|| GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
|
4732 |
|
|
&& GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
|
4733 |
|
|
|
4734 |
|
|
/* ??? According to the ABI documentation, the real and imaginary
|
4735 |
|
|
parts of complex floats should be passed in individual registers.
|
4736 |
|
|
The real and imaginary parts of stack arguments are supposed
|
4737 |
|
|
to be contiguous and there should be an extra word of padding
|
4738 |
|
|
at the end.
|
4739 |
|
|
|
4740 |
|
|
This has two problems. First, it makes it impossible to use a
|
4741 |
|
|
single "void *" va_list type, since register and stack arguments
|
4742 |
|
|
are passed differently. (At the time of writing, MIPSpro cannot
|
4743 |
|
|
handle complex float varargs correctly.) Second, it's unclear
|
4744 |
|
|
what should happen when there is only one register free.
|
4745 |
|
|
|
4746 |
|
|
For now, we assume that named complex floats should go into FPRs
|
4747 |
|
|
if there are two FPRs free, otherwise they should be passed in the
|
4748 |
|
|
same way as a struct containing two floats. */
|
4749 |
|
|
if (info->fpr_p
|
4750 |
|
|
&& GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
|
4751 |
|
|
&& GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
|
4752 |
|
|
{
|
4753 |
|
|
if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
|
4754 |
|
|
info->fpr_p = false;
|
4755 |
|
|
else
|
4756 |
|
|
num_words = 2;
|
4757 |
|
|
}
|
4758 |
|
|
break;
|
4759 |
|
|
|
4760 |
|
|
default:
|
4761 |
|
|
gcc_unreachable ();
|
4762 |
|
|
}
|
4763 |
|
|
|
4764 |
|
|
/* See whether the argument has doubleword alignment. */
|
4765 |
|
|
doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
|
4766 |
|
|
|
4767 |
|
|
/* Set REG_OFFSET to the register count we're interested in.
|
4768 |
|
|
The EABI allocates the floating-point registers separately,
|
4769 |
|
|
but the other ABIs allocate them like integer registers. */
|
4770 |
|
|
info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
|
4771 |
|
|
? cum->num_fprs
|
4772 |
|
|
: cum->num_gprs);
|
4773 |
|
|
|
4774 |
|
|
/* Advance to an even register if the argument is doubleword-aligned. */
|
4775 |
|
|
if (doubleword_aligned_p)
|
4776 |
|
|
info->reg_offset += info->reg_offset & 1;
|
4777 |
|
|
|
4778 |
|
|
/* Work out the offset of a stack argument. */
|
4779 |
|
|
info->stack_offset = cum->stack_words;
|
4780 |
|
|
if (doubleword_aligned_p)
|
4781 |
|
|
info->stack_offset += info->stack_offset & 1;
|
4782 |
|
|
|
4783 |
|
|
max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
|
4784 |
|
|
|
4785 |
|
|
/* Partition the argument between registers and stack. */
|
4786 |
|
|
info->reg_words = MIN (num_words, max_regs);
|
4787 |
|
|
info->stack_words = num_words - info->reg_words;
|
4788 |
|
|
}
|
4789 |
|
|
|
4790 |
|
|
/* INFO describes a register argument that has the normal format for the
|
4791 |
|
|
argument's mode. Return the register it uses, assuming that FPRs are
|
4792 |
|
|
available if HARD_FLOAT_P. */
|
4793 |
|
|
|
4794 |
|
|
static unsigned int
|
4795 |
|
|
mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
|
4796 |
|
|
{
|
4797 |
|
|
if (!info->fpr_p || !hard_float_p)
|
4798 |
|
|
return GP_ARG_FIRST + info->reg_offset;
|
4799 |
|
|
else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
|
4800 |
|
|
/* In o32, the second argument is always passed in $f14
|
4801 |
|
|
for TARGET_DOUBLE_FLOAT, regardless of whether the
|
4802 |
|
|
first argument was a word or doubleword. */
|
4803 |
|
|
return FP_ARG_FIRST + 2;
|
4804 |
|
|
else
|
4805 |
|
|
return FP_ARG_FIRST + info->reg_offset;
|
4806 |
|
|
}
|
4807 |
|
|
|
4808 |
|
|
/* Implement TARGET_STRICT_ARGUMENT_NAMING. */
|
4809 |
|
|
|
4810 |
|
|
static bool
|
4811 |
|
|
mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
|
4812 |
|
|
{
|
4813 |
|
|
return !TARGET_OLDABI;
|
4814 |
|
|
}
|
4815 |
|
|
|
4816 |
|
|
/* Implement FUNCTION_ARG. */
|
4817 |
|
|
|
4818 |
|
|
rtx
|
4819 |
|
|
mips_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
|
4820 |
|
|
tree type, int named)
|
4821 |
|
|
{
|
4822 |
|
|
struct mips_arg_info info;
|
4823 |
|
|
|
4824 |
|
|
/* We will be called with a mode of VOIDmode after the last argument
|
4825 |
|
|
has been seen. Whatever we return will be passed to the call expander.
|
4826 |
|
|
If we need a MIPS16 fp_code, return a REG with the code stored as
|
4827 |
|
|
the mode. */
|
4828 |
|
|
if (mode == VOIDmode)
|
4829 |
|
|
{
|
4830 |
|
|
if (TARGET_MIPS16 && cum->fp_code != 0)
|
4831 |
|
|
return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
|
4832 |
|
|
else
|
4833 |
|
|
return NULL;
|
4834 |
|
|
}
|
4835 |
|
|
|
4836 |
|
|
mips_get_arg_info (&info, cum, mode, type, named);
|
4837 |
|
|
|
4838 |
|
|
/* Return straight away if the whole argument is passed on the stack. */
|
4839 |
|
|
if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
|
4840 |
|
|
return NULL;
|
4841 |
|
|
|
4842 |
|
|
/* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
|
4843 |
|
|
contains a double in its entirety, then that 64-bit chunk is passed
|
4844 |
|
|
in a floating-point register. */
|
4845 |
|
|
if (TARGET_NEWABI
|
4846 |
|
|
&& TARGET_HARD_FLOAT
|
4847 |
|
|
&& named
|
4848 |
|
|
&& type != 0
|
4849 |
|
|
&& TREE_CODE (type) == RECORD_TYPE
|
4850 |
|
|
&& TYPE_SIZE_UNIT (type)
|
4851 |
|
|
&& host_integerp (TYPE_SIZE_UNIT (type), 1))
|
4852 |
|
|
{
|
4853 |
|
|
tree field;
|
4854 |
|
|
|
4855 |
|
|
/* First check to see if there is any such field. */
|
4856 |
|
|
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
|
4857 |
|
|
if (TREE_CODE (field) == FIELD_DECL
|
4858 |
|
|
&& SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
|
4859 |
|
|
&& TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
|
4860 |
|
|
&& host_integerp (bit_position (field), 0)
|
4861 |
|
|
&& int_bit_position (field) % BITS_PER_WORD == 0)
|
4862 |
|
|
break;
|
4863 |
|
|
|
4864 |
|
|
if (field != 0)
|
4865 |
|
|
{
|
4866 |
|
|
/* Now handle the special case by returning a PARALLEL
|
4867 |
|
|
indicating where each 64-bit chunk goes. INFO.REG_WORDS
|
4868 |
|
|
chunks are passed in registers. */
|
4869 |
|
|
unsigned int i;
|
4870 |
|
|
HOST_WIDE_INT bitpos;
|
4871 |
|
|
rtx ret;
|
4872 |
|
|
|
4873 |
|
|
/* assign_parms checks the mode of ENTRY_PARM, so we must
|
4874 |
|
|
use the actual mode here. */
|
4875 |
|
|
ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
|
4876 |
|
|
|
4877 |
|
|
bitpos = 0;
|
4878 |
|
|
field = TYPE_FIELDS (type);
|
4879 |
|
|
for (i = 0; i < info.reg_words; i++)
|
4880 |
|
|
{
|
4881 |
|
|
rtx reg;
|
4882 |
|
|
|
4883 |
|
|
for (; field; field = TREE_CHAIN (field))
|
4884 |
|
|
if (TREE_CODE (field) == FIELD_DECL
|
4885 |
|
|
&& int_bit_position (field) >= bitpos)
|
4886 |
|
|
break;
|
4887 |
|
|
|
4888 |
|
|
if (field
|
4889 |
|
|
&& int_bit_position (field) == bitpos
|
4890 |
|
|
&& SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
|
4891 |
|
|
&& TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
|
4892 |
|
|
reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
|
4893 |
|
|
else
|
4894 |
|
|
reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
|
4895 |
|
|
|
4896 |
|
|
XVECEXP (ret, 0, i)
|
4897 |
|
|
= gen_rtx_EXPR_LIST (VOIDmode, reg,
|
4898 |
|
|
GEN_INT (bitpos / BITS_PER_UNIT));
|
4899 |
|
|
|
4900 |
|
|
bitpos += BITS_PER_WORD;
|
4901 |
|
|
}
|
4902 |
|
|
return ret;
|
4903 |
|
|
}
|
4904 |
|
|
}
|
4905 |
|
|
|
4906 |
|
|
/* Handle the n32/n64 conventions for passing complex floating-point
|
4907 |
|
|
arguments in FPR pairs. The real part goes in the lower register
|
4908 |
|
|
and the imaginary part goes in the upper register. */
|
4909 |
|
|
if (TARGET_NEWABI
|
4910 |
|
|
&& info.fpr_p
|
4911 |
|
|
&& GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
|
4912 |
|
|
{
|
4913 |
|
|
rtx real, imag;
|
4914 |
|
|
enum machine_mode inner;
|
4915 |
|
|
unsigned int regno;
|
4916 |
|
|
|
4917 |
|
|
inner = GET_MODE_INNER (mode);
|
4918 |
|
|
regno = FP_ARG_FIRST + info.reg_offset;
|
4919 |
|
|
if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
|
4920 |
|
|
{
|
4921 |
|
|
/* Real part in registers, imaginary part on stack. */
|
4922 |
|
|
gcc_assert (info.stack_words == info.reg_words);
|
4923 |
|
|
return gen_rtx_REG (inner, regno);
|
4924 |
|
|
}
|
4925 |
|
|
else
|
4926 |
|
|
{
|
4927 |
|
|
gcc_assert (info.stack_words == 0);
|
4928 |
|
|
real = gen_rtx_EXPR_LIST (VOIDmode,
|
4929 |
|
|
gen_rtx_REG (inner, regno),
|
4930 |
|
|
const0_rtx);
|
4931 |
|
|
imag = gen_rtx_EXPR_LIST (VOIDmode,
|
4932 |
|
|
gen_rtx_REG (inner,
|
4933 |
|
|
regno + info.reg_words / 2),
|
4934 |
|
|
GEN_INT (GET_MODE_SIZE (inner)));
|
4935 |
|
|
return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
|
4936 |
|
|
}
|
4937 |
|
|
}
|
4938 |
|
|
|
4939 |
|
|
return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
|
4940 |
|
|
}
|
4941 |
|
|
|
4942 |
|
|
/* Implement FUNCTION_ARG_ADVANCE. */
|
4943 |
|
|
|
4944 |
|
|
void
|
4945 |
|
|
mips_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
|
4946 |
|
|
tree type, int named)
|
4947 |
|
|
{
|
4948 |
|
|
struct mips_arg_info info;
|
4949 |
|
|
|
4950 |
|
|
mips_get_arg_info (&info, cum, mode, type, named);
|
4951 |
|
|
|
4952 |
|
|
if (!info.fpr_p)
|
4953 |
|
|
cum->gp_reg_found = true;
|
4954 |
|
|
|
4955 |
|
|
/* See the comment above the CUMULATIVE_ARGS structure in mips.h for
|
4956 |
|
|
an explanation of what this code does. It assumes that we're using
|
4957 |
|
|
either the o32 or the o64 ABI, both of which pass at most 2 arguments
|
4958 |
|
|
in FPRs. */
|
4959 |
|
|
if (cum->arg_number < 2 && info.fpr_p)
|
4960 |
|
|
cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
|
4961 |
|
|
|
4962 |
|
|
/* Advance the register count. This has the effect of setting
|
4963 |
|
|
num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
|
4964 |
|
|
argument required us to skip the final GPR and pass the whole
|
4965 |
|
|
argument on the stack. */
|
4966 |
|
|
if (mips_abi != ABI_EABI || !info.fpr_p)
|
4967 |
|
|
cum->num_gprs = info.reg_offset + info.reg_words;
|
4968 |
|
|
else if (info.reg_words > 0)
|
4969 |
|
|
cum->num_fprs += MAX_FPRS_PER_FMT;
|
4970 |
|
|
|
4971 |
|
|
/* Advance the stack word count. */
|
4972 |
|
|
if (info.stack_words > 0)
|
4973 |
|
|
cum->stack_words = info.stack_offset + info.stack_words;
|
4974 |
|
|
|
4975 |
|
|
cum->arg_number++;
|
4976 |
|
|
}
|
4977 |
|
|
|
4978 |
|
|
/* Implement TARGET_ARG_PARTIAL_BYTES. */
|
4979 |
|
|
|
4980 |
|
|
static int
|
4981 |
|
|
mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
|
4982 |
|
|
enum machine_mode mode, tree type, bool named)
|
4983 |
|
|
{
|
4984 |
|
|
struct mips_arg_info info;
|
4985 |
|
|
|
4986 |
|
|
mips_get_arg_info (&info, cum, mode, type, named);
|
4987 |
|
|
return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
|
4988 |
|
|
}
|
4989 |
|
|
|
4990 |
|
|
/* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
|
4991 |
|
|
PARM_BOUNDARY bits of alignment, but will be given anything up
|
4992 |
|
|
to STACK_BOUNDARY bits if the type requires it. */
|
4993 |
|
|
|
4994 |
|
|
int
|
4995 |
|
|
mips_function_arg_boundary (enum machine_mode mode, tree type)
|
4996 |
|
|
{
|
4997 |
|
|
unsigned int alignment;
|
4998 |
|
|
|
4999 |
|
|
alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
|
5000 |
|
|
if (alignment < PARM_BOUNDARY)
|
5001 |
|
|
alignment = PARM_BOUNDARY;
|
5002 |
|
|
if (alignment > STACK_BOUNDARY)
|
5003 |
|
|
alignment = STACK_BOUNDARY;
|
5004 |
|
|
return alignment;
|
5005 |
|
|
}
|
5006 |
|
|
|
5007 |
|
|
/* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
|
5008 |
|
|
upward rather than downward. In other words, return true if the
|
5009 |
|
|
first byte of the stack slot has useful data, false if the last
|
5010 |
|
|
byte does. */
|
5011 |
|
|
|
5012 |
|
|
bool
|
5013 |
|
|
mips_pad_arg_upward (enum machine_mode mode, const_tree type)
|
5014 |
|
|
{
|
5015 |
|
|
/* On little-endian targets, the first byte of every stack argument
|
5016 |
|
|
is passed in the first byte of the stack slot. */
|
5017 |
|
|
if (!BYTES_BIG_ENDIAN)
|
5018 |
|
|
return true;
|
5019 |
|
|
|
5020 |
|
|
/* Otherwise, integral types are padded downward: the last byte of a
|
5021 |
|
|
stack argument is passed in the last byte of the stack slot. */
|
5022 |
|
|
if (type != 0
|
5023 |
|
|
? (INTEGRAL_TYPE_P (type)
|
5024 |
|
|
|| POINTER_TYPE_P (type)
|
5025 |
|
|
|| FIXED_POINT_TYPE_P (type))
|
5026 |
|
|
: (SCALAR_INT_MODE_P (mode)
|
5027 |
|
|
|| ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
|
5028 |
|
|
return false;
|
5029 |
|
|
|
5030 |
|
|
/* Big-endian o64 pads floating-point arguments downward. */
|
5031 |
|
|
if (mips_abi == ABI_O64)
|
5032 |
|
|
if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
|
5033 |
|
|
return false;
|
5034 |
|
|
|
5035 |
|
|
/* Other types are padded upward for o32, o64, n32 and n64. */
|
5036 |
|
|
if (mips_abi != ABI_EABI)
|
5037 |
|
|
return true;
|
5038 |
|
|
|
5039 |
|
|
/* Arguments smaller than a stack slot are padded downward. */
|
5040 |
|
|
if (mode != BLKmode)
|
5041 |
|
|
return GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY;
|
5042 |
|
|
else
|
5043 |
|
|
return int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT);
|
5044 |
|
|
}
|
5045 |
|
|
|
5046 |
|
|
/* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
|
5047 |
|
|
if the least significant byte of the register has useful data. Return
|
5048 |
|
|
the opposite if the most significant byte does. */
|
5049 |
|
|
|
5050 |
|
|
bool
|
5051 |
|
|
mips_pad_reg_upward (enum machine_mode mode, tree type)
|
5052 |
|
|
{
|
5053 |
|
|
/* No shifting is required for floating-point arguments. */
|
5054 |
|
|
if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
|
5055 |
|
|
return !BYTES_BIG_ENDIAN;
|
5056 |
|
|
|
5057 |
|
|
/* Otherwise, apply the same padding to register arguments as we do
|
5058 |
|
|
to stack arguments. */
|
5059 |
|
|
return mips_pad_arg_upward (mode, type);
|
5060 |
|
|
}
|
5061 |
|
|
|
5062 |
|
|
/* Return nonzero when an argument must be passed by reference. */
|
5063 |
|
|
|
5064 |
|
|
static bool
|
5065 |
|
|
mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
|
5066 |
|
|
enum machine_mode mode, const_tree type,
|
5067 |
|
|
bool named ATTRIBUTE_UNUSED)
|
5068 |
|
|
{
|
5069 |
|
|
if (mips_abi == ABI_EABI)
|
5070 |
|
|
{
|
5071 |
|
|
int size;
|
5072 |
|
|
|
5073 |
|
|
/* ??? How should SCmode be handled? */
|
5074 |
|
|
if (mode == DImode || mode == DFmode
|
5075 |
|
|
|| mode == DQmode || mode == UDQmode
|
5076 |
|
|
|| mode == DAmode || mode == UDAmode)
|
5077 |
|
|
return 0;
|
5078 |
|
|
|
5079 |
|
|
size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
|
5080 |
|
|
return size == -1 || size > UNITS_PER_WORD;
|
5081 |
|
|
}
|
5082 |
|
|
else
|
5083 |
|
|
{
|
5084 |
|
|
/* If we have a variable-sized parameter, we have no choice. */
|
5085 |
|
|
return targetm.calls.must_pass_in_stack (mode, type);
|
5086 |
|
|
}
|
5087 |
|
|
}
|
5088 |
|
|
|
5089 |
|
|
/* Implement TARGET_CALLEE_COPIES. */
|
5090 |
|
|
|
5091 |
|
|
static bool
|
5092 |
|
|
mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
|
5093 |
|
|
enum machine_mode mode ATTRIBUTE_UNUSED,
|
5094 |
|
|
const_tree type ATTRIBUTE_UNUSED, bool named)
|
5095 |
|
|
{
|
5096 |
|
|
return mips_abi == ABI_EABI && named;
|
5097 |
|
|
}
|
5098 |
|
|
|
5099 |
|
|
/* See whether VALTYPE is a record whose fields should be returned in
|
5100 |
|
|
floating-point registers. If so, return the number of fields and
|
5101 |
|
|
list them in FIELDS (which should have two elements). Return 0
|
5102 |
|
|
otherwise.
|
5103 |
|
|
|
5104 |
|
|
For n32 & n64, a structure with one or two fields is returned in
|
5105 |
|
|
floating-point registers as long as every field has a floating-point
|
5106 |
|
|
type. */
|
5107 |
|
|
|
5108 |
|
|
static int
|
5109 |
|
|
mips_fpr_return_fields (const_tree valtype, tree *fields)
|
5110 |
|
|
{
|
5111 |
|
|
tree field;
|
5112 |
|
|
int i;
|
5113 |
|
|
|
5114 |
|
|
if (!TARGET_NEWABI)
|
5115 |
|
|
return 0;
|
5116 |
|
|
|
5117 |
|
|
if (TREE_CODE (valtype) != RECORD_TYPE)
|
5118 |
|
|
return 0;
|
5119 |
|
|
|
5120 |
|
|
i = 0;
|
5121 |
|
|
for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
|
5122 |
|
|
{
|
5123 |
|
|
if (TREE_CODE (field) != FIELD_DECL)
|
5124 |
|
|
continue;
|
5125 |
|
|
|
5126 |
|
|
if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
|
5127 |
|
|
return 0;
|
5128 |
|
|
|
5129 |
|
|
if (i == 2)
|
5130 |
|
|
return 0;
|
5131 |
|
|
|
5132 |
|
|
fields[i++] = field;
|
5133 |
|
|
}
|
5134 |
|
|
return i;
|
5135 |
|
|
}
|
5136 |
|
|
|
5137 |
|
|
/* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
|
5138 |
|
|
a value in the most significant part of $2/$3 if:
|
5139 |
|
|
|
5140 |
|
|
- the target is big-endian;
|
5141 |
|
|
|
5142 |
|
|
- the value has a structure or union type (we generalize this to
|
5143 |
|
|
cover aggregates from other languages too); and
|
5144 |
|
|
|
5145 |
|
|
- the structure is not returned in floating-point registers. */
|
5146 |
|
|
|
5147 |
|
|
static bool
|
5148 |
|
|
mips_return_in_msb (const_tree valtype)
|
5149 |
|
|
{
|
5150 |
|
|
tree fields[2];
|
5151 |
|
|
|
5152 |
|
|
return (TARGET_NEWABI
|
5153 |
|
|
&& TARGET_BIG_ENDIAN
|
5154 |
|
|
&& AGGREGATE_TYPE_P (valtype)
|
5155 |
|
|
&& mips_fpr_return_fields (valtype, fields) == 0);
|
5156 |
|
|
}
|
5157 |
|
|
|
5158 |
|
|
/* Return true if the function return value MODE will get returned in a
|
5159 |
|
|
floating-point register. */
|
5160 |
|
|
|
5161 |
|
|
static bool
|
5162 |
|
|
mips_return_mode_in_fpr_p (enum machine_mode mode)
|
5163 |
|
|
{
|
5164 |
|
|
return ((GET_MODE_CLASS (mode) == MODE_FLOAT
|
5165 |
|
|
|| GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
|
5166 |
|
|
|| GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
|
5167 |
|
|
&& GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
|
5168 |
|
|
}
|
5169 |
|
|
|
5170 |
|
|
/* Return the representation of an FPR return register when the
|
5171 |
|
|
value being returned in FP_RETURN has mode VALUE_MODE and the
|
5172 |
|
|
return type itself has mode TYPE_MODE. On NewABI targets,
|
5173 |
|
|
the two modes may be different for structures like:
|
5174 |
|
|
|
5175 |
|
|
struct __attribute__((packed)) foo { float f; }
|
5176 |
|
|
|
5177 |
|
|
where we return the SFmode value of "f" in FP_RETURN, but where
|
5178 |
|
|
the structure itself has mode BLKmode. */
|
5179 |
|
|
|
5180 |
|
|
static rtx
|
5181 |
|
|
mips_return_fpr_single (enum machine_mode type_mode,
|
5182 |
|
|
enum machine_mode value_mode)
|
5183 |
|
|
{
|
5184 |
|
|
rtx x;
|
5185 |
|
|
|
5186 |
|
|
x = gen_rtx_REG (value_mode, FP_RETURN);
|
5187 |
|
|
if (type_mode != value_mode)
|
5188 |
|
|
{
|
5189 |
|
|
x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
|
5190 |
|
|
x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
|
5191 |
|
|
}
|
5192 |
|
|
return x;
|
5193 |
|
|
}
|
5194 |
|
|
|
5195 |
|
|
/* Return a composite value in a pair of floating-point registers.
|
5196 |
|
|
MODE1 and OFFSET1 are the mode and byte offset for the first value,
|
5197 |
|
|
likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
|
5198 |
|
|
complete value.
|
5199 |
|
|
|
5200 |
|
|
For n32 & n64, $f0 always holds the first value and $f2 the second.
|
5201 |
|
|
Otherwise the values are packed together as closely as possible. */
|
5202 |
|
|
|
5203 |
|
|
static rtx
|
5204 |
|
|
mips_return_fpr_pair (enum machine_mode mode,
|
5205 |
|
|
enum machine_mode mode1, HOST_WIDE_INT offset1,
|
5206 |
|
|
enum machine_mode mode2, HOST_WIDE_INT offset2)
|
5207 |
|
|
{
|
5208 |
|
|
int inc;
|
5209 |
|
|
|
5210 |
|
|
inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
|
5211 |
|
|
return gen_rtx_PARALLEL
|
5212 |
|
|
(mode,
|
5213 |
|
|
gen_rtvec (2,
|
5214 |
|
|
gen_rtx_EXPR_LIST (VOIDmode,
|
5215 |
|
|
gen_rtx_REG (mode1, FP_RETURN),
|
5216 |
|
|
GEN_INT (offset1)),
|
5217 |
|
|
gen_rtx_EXPR_LIST (VOIDmode,
|
5218 |
|
|
gen_rtx_REG (mode2, FP_RETURN + inc),
|
5219 |
|
|
GEN_INT (offset2))));
|
5220 |
|
|
|
5221 |
|
|
}
|
5222 |
|
|
|
5223 |
|
|
/* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
|
5224 |
|
|
VALTYPE is the return type and MODE is VOIDmode. For libcalls,
|
5225 |
|
|
VALTYPE is null and MODE is the mode of the return value. */
|
5226 |
|
|
|
5227 |
|
|
rtx
|
5228 |
|
|
mips_function_value (const_tree valtype, const_tree func, enum machine_mode mode)
|
5229 |
|
|
{
|
5230 |
|
|
if (valtype)
|
5231 |
|
|
{
|
5232 |
|
|
tree fields[2];
|
5233 |
|
|
int unsigned_p;
|
5234 |
|
|
|
5235 |
|
|
mode = TYPE_MODE (valtype);
|
5236 |
|
|
unsigned_p = TYPE_UNSIGNED (valtype);
|
5237 |
|
|
|
5238 |
|
|
/* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
|
5239 |
|
|
return values, promote the mode here too. */
|
5240 |
|
|
mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
|
5241 |
|
|
|
5242 |
|
|
/* Handle structures whose fields are returned in $f0/$f2. */
|
5243 |
|
|
switch (mips_fpr_return_fields (valtype, fields))
|
5244 |
|
|
{
|
5245 |
|
|
case 1:
|
5246 |
|
|
return mips_return_fpr_single (mode,
|
5247 |
|
|
TYPE_MODE (TREE_TYPE (fields[0])));
|
5248 |
|
|
|
5249 |
|
|
case 2:
|
5250 |
|
|
return mips_return_fpr_pair (mode,
|
5251 |
|
|
TYPE_MODE (TREE_TYPE (fields[0])),
|
5252 |
|
|
int_byte_position (fields[0]),
|
5253 |
|
|
TYPE_MODE (TREE_TYPE (fields[1])),
|
5254 |
|
|
int_byte_position (fields[1]));
|
5255 |
|
|
}
|
5256 |
|
|
|
5257 |
|
|
/* If a value is passed in the most significant part of a register, see
|
5258 |
|
|
whether we have to round the mode up to a whole number of words. */
|
5259 |
|
|
if (mips_return_in_msb (valtype))
|
5260 |
|
|
{
|
5261 |
|
|
HOST_WIDE_INT size = int_size_in_bytes (valtype);
|
5262 |
|
|
if (size % UNITS_PER_WORD != 0)
|
5263 |
|
|
{
|
5264 |
|
|
size += UNITS_PER_WORD - size % UNITS_PER_WORD;
|
5265 |
|
|
mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
|
5266 |
|
|
}
|
5267 |
|
|
}
|
5268 |
|
|
|
5269 |
|
|
/* For EABI, the class of return register depends entirely on MODE.
|
5270 |
|
|
For example, "struct { some_type x; }" and "union { some_type x; }"
|
5271 |
|
|
are returned in the same way as a bare "some_type" would be.
|
5272 |
|
|
Other ABIs only use FPRs for scalar, complex or vector types. */
|
5273 |
|
|
if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
|
5274 |
|
|
return gen_rtx_REG (mode, GP_RETURN);
|
5275 |
|
|
}
|
5276 |
|
|
|
5277 |
|
|
if (!TARGET_MIPS16)
|
5278 |
|
|
{
|
5279 |
|
|
/* Handle long doubles for n32 & n64. */
|
5280 |
|
|
if (mode == TFmode)
|
5281 |
|
|
return mips_return_fpr_pair (mode,
|
5282 |
|
|
DImode, 0,
|
5283 |
|
|
DImode, GET_MODE_SIZE (mode) / 2);
|
5284 |
|
|
|
5285 |
|
|
if (mips_return_mode_in_fpr_p (mode))
|
5286 |
|
|
{
|
5287 |
|
|
if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
|
5288 |
|
|
return mips_return_fpr_pair (mode,
|
5289 |
|
|
GET_MODE_INNER (mode), 0,
|
5290 |
|
|
GET_MODE_INNER (mode),
|
5291 |
|
|
GET_MODE_SIZE (mode) / 2);
|
5292 |
|
|
else
|
5293 |
|
|
return gen_rtx_REG (mode, FP_RETURN);
|
5294 |
|
|
}
|
5295 |
|
|
}
|
5296 |
|
|
|
5297 |
|
|
return gen_rtx_REG (mode, GP_RETURN);
|
5298 |
|
|
}
|
5299 |
|
|
|
5300 |
|
|
/* Implement TARGET_RETURN_IN_MEMORY. Under the o32 and o64 ABIs,
|
5301 |
|
|
all BLKmode objects are returned in memory. Under the n32, n64
|
5302 |
|
|
and embedded ABIs, small structures are returned in a register.
|
5303 |
|
|
Objects with varying size must still be returned in memory, of
|
5304 |
|
|
course. */
|
5305 |
|
|
|
5306 |
|
|
static bool
|
5307 |
|
|
mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
|
5308 |
|
|
{
|
5309 |
|
|
return (TARGET_OLDABI
|
5310 |
|
|
? TYPE_MODE (type) == BLKmode
|
5311 |
|
|
: !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD));
|
5312 |
|
|
}
|
5313 |
|
|
|
5314 |
|
|
/* Implement TARGET_SETUP_INCOMING_VARARGS. */
|
5315 |
|
|
|
5316 |
|
|
static void
|
5317 |
|
|
mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
|
5318 |
|
|
tree type, int *pretend_size ATTRIBUTE_UNUSED,
|
5319 |
|
|
int no_rtl)
|
5320 |
|
|
{
|
5321 |
|
|
CUMULATIVE_ARGS local_cum;
|
5322 |
|
|
int gp_saved, fp_saved;
|
5323 |
|
|
|
5324 |
|
|
/* The caller has advanced CUM up to, but not beyond, the last named
|
5325 |
|
|
argument. Advance a local copy of CUM past the last "real" named
|
5326 |
|
|
argument, to find out how many registers are left over. */
|
5327 |
|
|
local_cum = *cum;
|
5328 |
|
|
FUNCTION_ARG_ADVANCE (local_cum, mode, type, true);
|
5329 |
|
|
|
5330 |
|
|
/* Found out how many registers we need to save. */
|
5331 |
|
|
gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
|
5332 |
|
|
fp_saved = (EABI_FLOAT_VARARGS_P
|
5333 |
|
|
? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
|
5334 |
|
|
: 0);
|
5335 |
|
|
|
5336 |
|
|
if (!no_rtl)
|
5337 |
|
|
{
|
5338 |
|
|
if (gp_saved > 0)
|
5339 |
|
|
{
|
5340 |
|
|
rtx ptr, mem;
|
5341 |
|
|
|
5342 |
|
|
ptr = plus_constant (virtual_incoming_args_rtx,
|
5343 |
|
|
REG_PARM_STACK_SPACE (cfun->decl)
|
5344 |
|
|
- gp_saved * UNITS_PER_WORD);
|
5345 |
|
|
mem = gen_frame_mem (BLKmode, ptr);
|
5346 |
|
|
set_mem_alias_set (mem, get_varargs_alias_set ());
|
5347 |
|
|
|
5348 |
|
|
move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
|
5349 |
|
|
mem, gp_saved);
|
5350 |
|
|
}
|
5351 |
|
|
if (fp_saved > 0)
|
5352 |
|
|
{
|
5353 |
|
|
/* We can't use move_block_from_reg, because it will use
|
5354 |
|
|
the wrong mode. */
|
5355 |
|
|
enum machine_mode mode;
|
5356 |
|
|
int off, i;
|
5357 |
|
|
|
5358 |
|
|
/* Set OFF to the offset from virtual_incoming_args_rtx of
|
5359 |
|
|
the first float register. The FP save area lies below
|
5360 |
|
|
the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
|
5361 |
|
|
off = (-gp_saved * UNITS_PER_WORD) & -UNITS_PER_FPVALUE;
|
5362 |
|
|
off -= fp_saved * UNITS_PER_FPREG;
|
5363 |
|
|
|
5364 |
|
|
mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
|
5365 |
|
|
|
5366 |
|
|
for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
|
5367 |
|
|
i += MAX_FPRS_PER_FMT)
|
5368 |
|
|
{
|
5369 |
|
|
rtx ptr, mem;
|
5370 |
|
|
|
5371 |
|
|
ptr = plus_constant (virtual_incoming_args_rtx, off);
|
5372 |
|
|
mem = gen_frame_mem (mode, ptr);
|
5373 |
|
|
set_mem_alias_set (mem, get_varargs_alias_set ());
|
5374 |
|
|
mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
|
5375 |
|
|
off += UNITS_PER_HWFPVALUE;
|
5376 |
|
|
}
|
5377 |
|
|
}
|
5378 |
|
|
}
|
5379 |
|
|
if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
|
5380 |
|
|
cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
|
5381 |
|
|
+ fp_saved * UNITS_PER_FPREG);
|
5382 |
|
|
}
|
5383 |
|
|
|
5384 |
|
|
/* Implement TARGET_BUILTIN_VA_LIST. */
|
5385 |
|
|
|
5386 |
|
|
static tree
|
5387 |
|
|
mips_build_builtin_va_list (void)
|
5388 |
|
|
{
|
5389 |
|
|
if (EABI_FLOAT_VARARGS_P)
|
5390 |
|
|
{
|
5391 |
|
|
/* We keep 3 pointers, and two offsets.
|
5392 |
|
|
|
5393 |
|
|
Two pointers are to the overflow area, which starts at the CFA.
|
5394 |
|
|
One of these is constant, for addressing into the GPR save area
|
5395 |
|
|
below it. The other is advanced up the stack through the
|
5396 |
|
|
overflow region.
|
5397 |
|
|
|
5398 |
|
|
The third pointer is to the bottom of the GPR save area.
|
5399 |
|
|
Since the FPR save area is just below it, we can address
|
5400 |
|
|
FPR slots off this pointer.
|
5401 |
|
|
|
5402 |
|
|
We also keep two one-byte offsets, which are to be subtracted
|
5403 |
|
|
from the constant pointers to yield addresses in the GPR and
|
5404 |
|
|
FPR save areas. These are downcounted as float or non-float
|
5405 |
|
|
arguments are used, and when they get to zero, the argument
|
5406 |
|
|
must be obtained from the overflow region. */
|
5407 |
|
|
tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
|
5408 |
|
|
tree array, index;
|
5409 |
|
|
|
5410 |
|
|
record = lang_hooks.types.make_type (RECORD_TYPE);
|
5411 |
|
|
|
5412 |
|
|
f_ovfl = build_decl (BUILTINS_LOCATION,
|
5413 |
|
|
FIELD_DECL, get_identifier ("__overflow_argptr"),
|
5414 |
|
|
ptr_type_node);
|
5415 |
|
|
f_gtop = build_decl (BUILTINS_LOCATION,
|
5416 |
|
|
FIELD_DECL, get_identifier ("__gpr_top"),
|
5417 |
|
|
ptr_type_node);
|
5418 |
|
|
f_ftop = build_decl (BUILTINS_LOCATION,
|
5419 |
|
|
FIELD_DECL, get_identifier ("__fpr_top"),
|
5420 |
|
|
ptr_type_node);
|
5421 |
|
|
f_goff = build_decl (BUILTINS_LOCATION,
|
5422 |
|
|
FIELD_DECL, get_identifier ("__gpr_offset"),
|
5423 |
|
|
unsigned_char_type_node);
|
5424 |
|
|
f_foff = build_decl (BUILTINS_LOCATION,
|
5425 |
|
|
FIELD_DECL, get_identifier ("__fpr_offset"),
|
5426 |
|
|
unsigned_char_type_node);
|
5427 |
|
|
/* Explicitly pad to the size of a pointer, so that -Wpadded won't
|
5428 |
|
|
warn on every user file. */
|
5429 |
|
|
index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
|
5430 |
|
|
array = build_array_type (unsigned_char_type_node,
|
5431 |
|
|
build_index_type (index));
|
5432 |
|
|
f_res = build_decl (BUILTINS_LOCATION,
|
5433 |
|
|
FIELD_DECL, get_identifier ("__reserved"), array);
|
5434 |
|
|
|
5435 |
|
|
DECL_FIELD_CONTEXT (f_ovfl) = record;
|
5436 |
|
|
DECL_FIELD_CONTEXT (f_gtop) = record;
|
5437 |
|
|
DECL_FIELD_CONTEXT (f_ftop) = record;
|
5438 |
|
|
DECL_FIELD_CONTEXT (f_goff) = record;
|
5439 |
|
|
DECL_FIELD_CONTEXT (f_foff) = record;
|
5440 |
|
|
DECL_FIELD_CONTEXT (f_res) = record;
|
5441 |
|
|
|
5442 |
|
|
TYPE_FIELDS (record) = f_ovfl;
|
5443 |
|
|
TREE_CHAIN (f_ovfl) = f_gtop;
|
5444 |
|
|
TREE_CHAIN (f_gtop) = f_ftop;
|
5445 |
|
|
TREE_CHAIN (f_ftop) = f_goff;
|
5446 |
|
|
TREE_CHAIN (f_goff) = f_foff;
|
5447 |
|
|
TREE_CHAIN (f_foff) = f_res;
|
5448 |
|
|
|
5449 |
|
|
layout_type (record);
|
5450 |
|
|
return record;
|
5451 |
|
|
}
|
5452 |
|
|
else if (TARGET_IRIX && TARGET_IRIX6)
|
5453 |
|
|
/* On IRIX 6, this type is 'char *'. */
|
5454 |
|
|
return build_pointer_type (char_type_node);
|
5455 |
|
|
else
|
5456 |
|
|
/* Otherwise, we use 'void *'. */
|
5457 |
|
|
return ptr_type_node;
|
5458 |
|
|
}
|
5459 |
|
|
|
5460 |
|
|
/* Implement TARGET_EXPAND_BUILTIN_VA_START. */
|
5461 |
|
|
|
5462 |
|
|
static void
|
5463 |
|
|
mips_va_start (tree valist, rtx nextarg)
|
5464 |
|
|
{
|
5465 |
|
|
if (EABI_FLOAT_VARARGS_P)
|
5466 |
|
|
{
|
5467 |
|
|
const CUMULATIVE_ARGS *cum;
|
5468 |
|
|
tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
|
5469 |
|
|
tree ovfl, gtop, ftop, goff, foff;
|
5470 |
|
|
tree t;
|
5471 |
|
|
int gpr_save_area_size;
|
5472 |
|
|
int fpr_save_area_size;
|
5473 |
|
|
int fpr_offset;
|
5474 |
|
|
|
5475 |
|
|
cum = &crtl->args.info;
|
5476 |
|
|
gpr_save_area_size
|
5477 |
|
|
= (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
|
5478 |
|
|
fpr_save_area_size
|
5479 |
|
|
= (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
|
5480 |
|
|
|
5481 |
|
|
f_ovfl = TYPE_FIELDS (va_list_type_node);
|
5482 |
|
|
f_gtop = TREE_CHAIN (f_ovfl);
|
5483 |
|
|
f_ftop = TREE_CHAIN (f_gtop);
|
5484 |
|
|
f_goff = TREE_CHAIN (f_ftop);
|
5485 |
|
|
f_foff = TREE_CHAIN (f_goff);
|
5486 |
|
|
|
5487 |
|
|
ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
|
5488 |
|
|
NULL_TREE);
|
5489 |
|
|
gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
|
5490 |
|
|
NULL_TREE);
|
5491 |
|
|
ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
|
5492 |
|
|
NULL_TREE);
|
5493 |
|
|
goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
|
5494 |
|
|
NULL_TREE);
|
5495 |
|
|
foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
|
5496 |
|
|
NULL_TREE);
|
5497 |
|
|
|
5498 |
|
|
/* Emit code to initialize OVFL, which points to the next varargs
|
5499 |
|
|
stack argument. CUM->STACK_WORDS gives the number of stack
|
5500 |
|
|
words used by named arguments. */
|
5501 |
|
|
t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
|
5502 |
|
|
if (cum->stack_words > 0)
|
5503 |
|
|
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
|
5504 |
|
|
size_int (cum->stack_words * UNITS_PER_WORD));
|
5505 |
|
|
t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
|
5506 |
|
|
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
|
5507 |
|
|
|
5508 |
|
|
/* Emit code to initialize GTOP, the top of the GPR save area. */
|
5509 |
|
|
t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
|
5510 |
|
|
t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
|
5511 |
|
|
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
|
5512 |
|
|
|
5513 |
|
|
/* Emit code to initialize FTOP, the top of the FPR save area.
|
5514 |
|
|
This address is gpr_save_area_bytes below GTOP, rounded
|
5515 |
|
|
down to the next fp-aligned boundary. */
|
5516 |
|
|
t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
|
5517 |
|
|
fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
|
5518 |
|
|
fpr_offset &= -UNITS_PER_FPVALUE;
|
5519 |
|
|
if (fpr_offset)
|
5520 |
|
|
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
|
5521 |
|
|
size_int (-fpr_offset));
|
5522 |
|
|
t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
|
5523 |
|
|
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
|
5524 |
|
|
|
5525 |
|
|
/* Emit code to initialize GOFF, the offset from GTOP of the
|
5526 |
|
|
next GPR argument. */
|
5527 |
|
|
t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
|
5528 |
|
|
build_int_cst (TREE_TYPE (goff), gpr_save_area_size));
|
5529 |
|
|
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
|
5530 |
|
|
|
5531 |
|
|
/* Likewise emit code to initialize FOFF, the offset from FTOP
|
5532 |
|
|
of the next FPR argument. */
|
5533 |
|
|
t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
|
5534 |
|
|
build_int_cst (TREE_TYPE (foff), fpr_save_area_size));
|
5535 |
|
|
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
|
5536 |
|
|
}
|
5537 |
|
|
else
|
5538 |
|
|
{
|
5539 |
|
|
nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
|
5540 |
|
|
std_expand_builtin_va_start (valist, nextarg);
|
5541 |
|
|
}
|
5542 |
|
|
}
|
5543 |
|
|
|
5544 |
|
|
/* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
|
5545 |
|
|
|
5546 |
|
|
static tree
|
5547 |
|
|
mips_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
|
5548 |
|
|
gimple_seq *post_p)
|
5549 |
|
|
{
|
5550 |
|
|
tree addr;
|
5551 |
|
|
bool indirect_p;
|
5552 |
|
|
|
5553 |
|
|
indirect_p = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
|
5554 |
|
|
if (indirect_p)
|
5555 |
|
|
type = build_pointer_type (type);
|
5556 |
|
|
|
5557 |
|
|
if (!EABI_FLOAT_VARARGS_P)
|
5558 |
|
|
addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
|
5559 |
|
|
else
|
5560 |
|
|
{
|
5561 |
|
|
tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
|
5562 |
|
|
tree ovfl, top, off, align;
|
5563 |
|
|
HOST_WIDE_INT size, rsize, osize;
|
5564 |
|
|
tree t, u;
|
5565 |
|
|
|
5566 |
|
|
f_ovfl = TYPE_FIELDS (va_list_type_node);
|
5567 |
|
|
f_gtop = TREE_CHAIN (f_ovfl);
|
5568 |
|
|
f_ftop = TREE_CHAIN (f_gtop);
|
5569 |
|
|
f_goff = TREE_CHAIN (f_ftop);
|
5570 |
|
|
f_foff = TREE_CHAIN (f_goff);
|
5571 |
|
|
|
5572 |
|
|
/* Let:
|
5573 |
|
|
|
5574 |
|
|
TOP be the top of the GPR or FPR save area;
|
5575 |
|
|
OFF be the offset from TOP of the next register;
|
5576 |
|
|
ADDR_RTX be the address of the argument;
|
5577 |
|
|
SIZE be the number of bytes in the argument type;
|
5578 |
|
|
RSIZE be the number of bytes used to store the argument
|
5579 |
|
|
when it's in the register save area; and
|
5580 |
|
|
OSIZE be the number of bytes used to store it when it's
|
5581 |
|
|
in the stack overflow area.
|
5582 |
|
|
|
5583 |
|
|
The code we want is:
|
5584 |
|
|
|
5585 |
|
|
1: off &= -rsize; // round down
|
5586 |
|
|
2: if (off != 0)
|
5587 |
|
|
3: {
|
5588 |
|
|
4: addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0);
|
5589 |
|
|
5: off -= rsize;
|
5590 |
|
|
6: }
|
5591 |
|
|
7: else
|
5592 |
|
|
8: {
|
5593 |
|
|
9: ovfl = ((intptr_t) ovfl + osize - 1) & -osize;
|
5594 |
|
|
10: addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0);
|
5595 |
|
|
11: ovfl += osize;
|
5596 |
|
|
14: }
|
5597 |
|
|
|
5598 |
|
|
[1] and [9] can sometimes be optimized away. */
|
5599 |
|
|
|
5600 |
|
|
ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
|
5601 |
|
|
NULL_TREE);
|
5602 |
|
|
size = int_size_in_bytes (type);
|
5603 |
|
|
|
5604 |
|
|
if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
|
5605 |
|
|
&& GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
|
5606 |
|
|
{
|
5607 |
|
|
top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop),
|
5608 |
|
|
unshare_expr (valist), f_ftop, NULL_TREE);
|
5609 |
|
|
off = build3 (COMPONENT_REF, TREE_TYPE (f_foff),
|
5610 |
|
|
unshare_expr (valist), f_foff, NULL_TREE);
|
5611 |
|
|
|
5612 |
|
|
/* When va_start saves FPR arguments to the stack, each slot
|
5613 |
|
|
takes up UNITS_PER_HWFPVALUE bytes, regardless of the
|
5614 |
|
|
argument's precision. */
|
5615 |
|
|
rsize = UNITS_PER_HWFPVALUE;
|
5616 |
|
|
|
5617 |
|
|
/* Overflow arguments are padded to UNITS_PER_WORD bytes
|
5618 |
|
|
(= PARM_BOUNDARY bits). This can be different from RSIZE
|
5619 |
|
|
in two cases:
|
5620 |
|
|
|
5621 |
|
|
(1) On 32-bit targets when TYPE is a structure such as:
|
5622 |
|
|
|
5623 |
|
|
struct s { float f; };
|
5624 |
|
|
|
5625 |
|
|
Such structures are passed in paired FPRs, so RSIZE
|
5626 |
|
|
will be 8 bytes. However, the structure only takes
|
5627 |
|
|
up 4 bytes of memory, so OSIZE will only be 4.
|
5628 |
|
|
|
5629 |
|
|
(2) In combinations such as -mgp64 -msingle-float
|
5630 |
|
|
-fshort-double. Doubles passed in registers will then take
|
5631 |
|
|
up 4 (UNITS_PER_HWFPVALUE) bytes, but those passed on the
|
5632 |
|
|
stack take up UNITS_PER_WORD bytes. */
|
5633 |
|
|
osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
|
5634 |
|
|
}
|
5635 |
|
|
else
|
5636 |
|
|
{
|
5637 |
|
|
top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop),
|
5638 |
|
|
unshare_expr (valist), f_gtop, NULL_TREE);
|
5639 |
|
|
off = build3 (COMPONENT_REF, TREE_TYPE (f_goff),
|
5640 |
|
|
unshare_expr (valist), f_goff, NULL_TREE);
|
5641 |
|
|
rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
|
5642 |
|
|
if (rsize > UNITS_PER_WORD)
|
5643 |
|
|
{
|
5644 |
|
|
/* [1] Emit code for: off &= -rsize. */
|
5645 |
|
|
t = build2 (BIT_AND_EXPR, TREE_TYPE (off), unshare_expr (off),
|
5646 |
|
|
build_int_cst (TREE_TYPE (off), -rsize));
|
5647 |
|
|
gimplify_assign (unshare_expr (off), t, pre_p);
|
5648 |
|
|
}
|
5649 |
|
|
osize = rsize;
|
5650 |
|
|
}
|
5651 |
|
|
|
5652 |
|
|
/* [2] Emit code to branch if off == 0. */
|
5653 |
|
|
t = build2 (NE_EXPR, boolean_type_node, off,
|
5654 |
|
|
build_int_cst (TREE_TYPE (off), 0));
|
5655 |
|
|
addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
|
5656 |
|
|
|
5657 |
|
|
/* [5] Emit code for: off -= rsize. We do this as a form of
|
5658 |
|
|
post-decrement not available to C. */
|
5659 |
|
|
t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
|
5660 |
|
|
t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
|
5661 |
|
|
|
5662 |
|
|
/* [4] Emit code for:
|
5663 |
|
|
addr_rtx = top - off + (BYTES_BIG_ENDIAN ? RSIZE - SIZE : 0). */
|
5664 |
|
|
t = fold_convert (sizetype, t);
|
5665 |
|
|
t = fold_build1 (NEGATE_EXPR, sizetype, t);
|
5666 |
|
|
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
|
5667 |
|
|
if (BYTES_BIG_ENDIAN && rsize > size)
|
5668 |
|
|
{
|
5669 |
|
|
u = size_int (rsize - size);
|
5670 |
|
|
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
|
5671 |
|
|
}
|
5672 |
|
|
COND_EXPR_THEN (addr) = t;
|
5673 |
|
|
|
5674 |
|
|
if (osize > UNITS_PER_WORD)
|
5675 |
|
|
{
|
5676 |
|
|
/* [9] Emit: ovfl = ((intptr_t) ovfl + osize - 1) & -osize. */
|
5677 |
|
|
u = size_int (osize - 1);
|
5678 |
|
|
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl),
|
5679 |
|
|
unshare_expr (ovfl), u);
|
5680 |
|
|
t = fold_convert (sizetype, t);
|
5681 |
|
|
u = size_int (-osize);
|
5682 |
|
|
t = build2 (BIT_AND_EXPR, sizetype, t, u);
|
5683 |
|
|
t = fold_convert (TREE_TYPE (ovfl), t);
|
5684 |
|
|
align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl),
|
5685 |
|
|
unshare_expr (ovfl), t);
|
5686 |
|
|
}
|
5687 |
|
|
else
|
5688 |
|
|
align = NULL;
|
5689 |
|
|
|
5690 |
|
|
/* [10, 11] Emit code for:
|
5691 |
|
|
addr_rtx = ovfl + (BYTES_BIG_ENDIAN ? OSIZE - SIZE : 0)
|
5692 |
|
|
ovfl += osize. */
|
5693 |
|
|
u = fold_convert (TREE_TYPE (ovfl), build_int_cst (NULL_TREE, osize));
|
5694 |
|
|
t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
|
5695 |
|
|
if (BYTES_BIG_ENDIAN && osize > size)
|
5696 |
|
|
{
|
5697 |
|
|
u = size_int (osize - size);
|
5698 |
|
|
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
|
5699 |
|
|
}
|
5700 |
|
|
|
5701 |
|
|
/* String [9] and [10, 11] together. */
|
5702 |
|
|
if (align)
|
5703 |
|
|
t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
|
5704 |
|
|
COND_EXPR_ELSE (addr) = t;
|
5705 |
|
|
|
5706 |
|
|
addr = fold_convert (build_pointer_type (type), addr);
|
5707 |
|
|
addr = build_va_arg_indirect_ref (addr);
|
5708 |
|
|
}
|
5709 |
|
|
|
5710 |
|
|
if (indirect_p)
|
5711 |
|
|
addr = build_va_arg_indirect_ref (addr);
|
5712 |
|
|
|
5713 |
|
|
return addr;
|
5714 |
|
|
}
|
5715 |
|
|
|
5716 |
|
|
/* Start a definition of function NAME. MIPS16_P indicates whether the
|
5717 |
|
|
function contains MIPS16 code. */
|
5718 |
|
|
|
5719 |
|
|
static void
|
5720 |
|
|
mips_start_function_definition (const char *name, bool mips16_p)
|
5721 |
|
|
{
|
5722 |
|
|
if (mips16_p)
|
5723 |
|
|
fprintf (asm_out_file, "\t.set\tmips16\n");
|
5724 |
|
|
else
|
5725 |
|
|
fprintf (asm_out_file, "\t.set\tnomips16\n");
|
5726 |
|
|
|
5727 |
|
|
if (!flag_inhibit_size_directive)
|
5728 |
|
|
{
|
5729 |
|
|
fputs ("\t.ent\t", asm_out_file);
|
5730 |
|
|
assemble_name (asm_out_file, name);
|
5731 |
|
|
fputs ("\n", asm_out_file);
|
5732 |
|
|
}
|
5733 |
|
|
|
5734 |
|
|
ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, name, "function");
|
5735 |
|
|
|
5736 |
|
|
/* Start the definition proper. */
|
5737 |
|
|
assemble_name (asm_out_file, name);
|
5738 |
|
|
fputs (":\n", asm_out_file);
|
5739 |
|
|
}
|
5740 |
|
|
|
5741 |
|
|
/* End a function definition started by mips_start_function_definition. */
|
5742 |
|
|
|
5743 |
|
|
static void
|
5744 |
|
|
mips_end_function_definition (const char *name)
|
5745 |
|
|
{
|
5746 |
|
|
if (!flag_inhibit_size_directive)
|
5747 |
|
|
{
|
5748 |
|
|
fputs ("\t.end\t", asm_out_file);
|
5749 |
|
|
assemble_name (asm_out_file, name);
|
5750 |
|
|
fputs ("\n", asm_out_file);
|
5751 |
|
|
}
|
5752 |
|
|
}
|
5753 |
|
|
|
5754 |
|
|
/* Return true if calls to X can use R_MIPS_CALL* relocations. */
|
5755 |
|
|
|
5756 |
|
|
static bool
|
5757 |
|
|
mips_ok_for_lazy_binding_p (rtx x)
|
5758 |
|
|
{
|
5759 |
|
|
return (TARGET_USE_GOT
|
5760 |
|
|
&& GET_CODE (x) == SYMBOL_REF
|
5761 |
|
|
&& !SYMBOL_REF_BIND_NOW_P (x)
|
5762 |
|
|
&& !mips_symbol_binds_local_p (x));
|
5763 |
|
|
}
|
5764 |
|
|
|
5765 |
|
|
/* Load function address ADDR into register DEST. TYPE is as for
|
5766 |
|
|
mips_expand_call. Return true if we used an explicit lazy-binding
|
5767 |
|
|
sequence. */
|
5768 |
|
|
|
5769 |
|
|
static bool
|
5770 |
|
|
mips_load_call_address (enum mips_call_type type, rtx dest, rtx addr)
|
5771 |
|
|
{
|
5772 |
|
|
/* If we're generating PIC, and this call is to a global function,
|
5773 |
|
|
try to allow its address to be resolved lazily. This isn't
|
5774 |
|
|
possible for sibcalls when $gp is call-saved because the value
|
5775 |
|
|
of $gp on entry to the stub would be our caller's gp, not ours. */
|
5776 |
|
|
if (TARGET_EXPLICIT_RELOCS
|
5777 |
|
|
&& !(type == MIPS_CALL_SIBCALL && TARGET_CALL_SAVED_GP)
|
5778 |
|
|
&& mips_ok_for_lazy_binding_p (addr))
|
5779 |
|
|
{
|
5780 |
|
|
addr = mips_got_load (dest, addr, SYMBOL_GOTOFF_CALL);
|
5781 |
|
|
emit_insn (gen_rtx_SET (VOIDmode, dest, addr));
|
5782 |
|
|
return true;
|
5783 |
|
|
}
|
5784 |
|
|
else
|
5785 |
|
|
{
|
5786 |
|
|
mips_emit_move (dest, addr);
|
5787 |
|
|
return false;
|
5788 |
|
|
}
|
5789 |
|
|
}
|
5790 |
|
|
|
5791 |
|
|
/* Each locally-defined hard-float MIPS16 function has a local symbol
|
5792 |
|
|
associated with it. This hash table maps the function symbol (FUNC)
|
5793 |
|
|
to the local symbol (LOCAL). */
|
5794 |
|
|
struct GTY(()) mips16_local_alias {
|
5795 |
|
|
rtx func;
|
5796 |
|
|
rtx local;
|
5797 |
|
|
};
|
5798 |
|
|
static GTY ((param_is (struct mips16_local_alias))) htab_t mips16_local_aliases;
|
5799 |
|
|
|
5800 |
|
|
/* Hash table callbacks for mips16_local_aliases. */
|
5801 |
|
|
|
5802 |
|
|
static hashval_t
|
5803 |
|
|
mips16_local_aliases_hash (const void *entry)
|
5804 |
|
|
{
|
5805 |
|
|
const struct mips16_local_alias *alias;
|
5806 |
|
|
|
5807 |
|
|
alias = (const struct mips16_local_alias *) entry;
|
5808 |
|
|
return htab_hash_string (XSTR (alias->func, 0));
|
5809 |
|
|
}
|
5810 |
|
|
|
5811 |
|
|
static int
|
5812 |
|
|
mips16_local_aliases_eq (const void *entry1, const void *entry2)
|
5813 |
|
|
{
|
5814 |
|
|
const struct mips16_local_alias *alias1, *alias2;
|
5815 |
|
|
|
5816 |
|
|
alias1 = (const struct mips16_local_alias *) entry1;
|
5817 |
|
|
alias2 = (const struct mips16_local_alias *) entry2;
|
5818 |
|
|
return rtx_equal_p (alias1->func, alias2->func);
|
5819 |
|
|
}
|
5820 |
|
|
|
5821 |
|
|
/* FUNC is the symbol for a locally-defined hard-float MIPS16 function.
|
5822 |
|
|
Return a local alias for it, creating a new one if necessary. */
|
5823 |
|
|
|
5824 |
|
|
static rtx
|
5825 |
|
|
mips16_local_alias (rtx func)
|
5826 |
|
|
{
|
5827 |
|
|
struct mips16_local_alias *alias, tmp_alias;
|
5828 |
|
|
void **slot;
|
5829 |
|
|
|
5830 |
|
|
/* Create the hash table if this is the first call. */
|
5831 |
|
|
if (mips16_local_aliases == NULL)
|
5832 |
|
|
mips16_local_aliases = htab_create_ggc (37, mips16_local_aliases_hash,
|
5833 |
|
|
mips16_local_aliases_eq, NULL);
|
5834 |
|
|
|
5835 |
|
|
/* Look up the function symbol, creating a new entry if need be. */
|
5836 |
|
|
tmp_alias.func = func;
|
5837 |
|
|
slot = htab_find_slot (mips16_local_aliases, &tmp_alias, INSERT);
|
5838 |
|
|
gcc_assert (slot != NULL);
|
5839 |
|
|
|
5840 |
|
|
alias = (struct mips16_local_alias *) *slot;
|
5841 |
|
|
if (alias == NULL)
|
5842 |
|
|
{
|
5843 |
|
|
const char *func_name, *local_name;
|
5844 |
|
|
rtx local;
|
5845 |
|
|
|
5846 |
|
|
/* Create a new SYMBOL_REF for the local symbol. The choice of
|
5847 |
|
|
__fn_local_* is based on the __fn_stub_* names that we've
|
5848 |
|
|
traditionally used for the non-MIPS16 stub. */
|
5849 |
|
|
func_name = targetm.strip_name_encoding (XSTR (func, 0));
|
5850 |
|
|
local_name = ACONCAT (("__fn_local_", func_name, NULL));
|
5851 |
|
|
local = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (local_name));
|
5852 |
|
|
SYMBOL_REF_FLAGS (local) = SYMBOL_REF_FLAGS (func) | SYMBOL_FLAG_LOCAL;
|
5853 |
|
|
|
5854 |
|
|
/* Create a new structure to represent the mapping. */
|
5855 |
|
|
alias = GGC_NEW (struct mips16_local_alias);
|
5856 |
|
|
alias->func = func;
|
5857 |
|
|
alias->local = local;
|
5858 |
|
|
*slot = alias;
|
5859 |
|
|
}
|
5860 |
|
|
return alias->local;
|
5861 |
|
|
}
|
5862 |
|
|
|
5863 |
|
|
/* A chained list of functions for which mips16_build_call_stub has already
|
5864 |
|
|
generated a stub. NAME is the name of the function and FP_RET_P is true
|
5865 |
|
|
if the function returns a value in floating-point registers. */
|
5866 |
|
|
struct mips16_stub {
|
5867 |
|
|
struct mips16_stub *next;
|
5868 |
|
|
char *name;
|
5869 |
|
|
bool fp_ret_p;
|
5870 |
|
|
};
|
5871 |
|
|
static struct mips16_stub *mips16_stubs;
|
5872 |
|
|
|
5873 |
|
|
/* Return a SYMBOL_REF for a MIPS16 function called NAME. */
|
5874 |
|
|
|
5875 |
|
|
static rtx
|
5876 |
|
|
mips16_stub_function (const char *name)
|
5877 |
|
|
{
|
5878 |
|
|
rtx x;
|
5879 |
|
|
|
5880 |
|
|
x = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
|
5881 |
|
|
SYMBOL_REF_FLAGS (x) |= (SYMBOL_FLAG_EXTERNAL | SYMBOL_FLAG_FUNCTION);
|
5882 |
|
|
return x;
|
5883 |
|
|
}
|
5884 |
|
|
|
5885 |
|
|
/* Return the two-character string that identifies floating-point
|
5886 |
|
|
return mode MODE in the name of a MIPS16 function stub. */
|
5887 |
|
|
|
5888 |
|
|
static const char *
|
5889 |
|
|
mips16_call_stub_mode_suffix (enum machine_mode mode)
|
5890 |
|
|
{
|
5891 |
|
|
if (mode == SFmode)
|
5892 |
|
|
return "sf";
|
5893 |
|
|
else if (mode == DFmode)
|
5894 |
|
|
return "df";
|
5895 |
|
|
else if (mode == SCmode)
|
5896 |
|
|
return "sc";
|
5897 |
|
|
else if (mode == DCmode)
|
5898 |
|
|
return "dc";
|
5899 |
|
|
else if (mode == V2SFmode)
|
5900 |
|
|
return "df";
|
5901 |
|
|
else
|
5902 |
|
|
gcc_unreachable ();
|
5903 |
|
|
}
|
5904 |
|
|
|
5905 |
|
|
/* Write instructions to move a 32-bit value between general register
|
5906 |
|
|
GPREG and floating-point register FPREG. DIRECTION is 't' to move
|
5907 |
|
|
from GPREG to FPREG and 'f' to move in the opposite direction. */
|
5908 |
|
|
|
5909 |
|
|
static void
|
5910 |
|
|
mips_output_32bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
|
5911 |
|
|
{
|
5912 |
|
|
fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
|
5913 |
|
|
reg_names[gpreg], reg_names[fpreg]);
|
5914 |
|
|
}
|
5915 |
|
|
|
5916 |
|
|
/* Likewise for 64-bit values. */
|
5917 |
|
|
|
5918 |
|
|
static void
|
5919 |
|
|
mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
|
5920 |
|
|
{
|
5921 |
|
|
if (TARGET_64BIT)
|
5922 |
|
|
fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
|
5923 |
|
|
reg_names[gpreg], reg_names[fpreg]);
|
5924 |
|
|
else if (TARGET_FLOAT64)
|
5925 |
|
|
{
|
5926 |
|
|
fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
|
5927 |
|
|
reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
|
5928 |
|
|
fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
|
5929 |
|
|
reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
|
5930 |
|
|
}
|
5931 |
|
|
else
|
5932 |
|
|
{
|
5933 |
|
|
/* Move the least-significant word. */
|
5934 |
|
|
fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
|
5935 |
|
|
reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
|
5936 |
|
|
/* ...then the most significant word. */
|
5937 |
|
|
fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
|
5938 |
|
|
reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg + 1]);
|
5939 |
|
|
}
|
5940 |
|
|
}
|
5941 |
|
|
|
5942 |
|
|
/* Write out code to move floating-point arguments into or out of
|
5943 |
|
|
general registers. FP_CODE is the code describing which arguments
|
5944 |
|
|
are present (see the comment above the definition of CUMULATIVE_ARGS
|
5945 |
|
|
in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
|
5946 |
|
|
|
5947 |
|
|
static void
|
5948 |
|
|
mips_output_args_xfer (int fp_code, char direction)
|
5949 |
|
|
{
|
5950 |
|
|
unsigned int gparg, fparg, f;
|
5951 |
|
|
CUMULATIVE_ARGS cum;
|
5952 |
|
|
|
5953 |
|
|
/* This code only works for o32 and o64. */
|
5954 |
|
|
gcc_assert (TARGET_OLDABI);
|
5955 |
|
|
|
5956 |
|
|
mips_init_cumulative_args (&cum, NULL);
|
5957 |
|
|
|
5958 |
|
|
for (f = (unsigned int) fp_code; f != 0; f >>= 2)
|
5959 |
|
|
{
|
5960 |
|
|
enum machine_mode mode;
|
5961 |
|
|
struct mips_arg_info info;
|
5962 |
|
|
|
5963 |
|
|
if ((f & 3) == 1)
|
5964 |
|
|
mode = SFmode;
|
5965 |
|
|
else if ((f & 3) == 2)
|
5966 |
|
|
mode = DFmode;
|
5967 |
|
|
else
|
5968 |
|
|
gcc_unreachable ();
|
5969 |
|
|
|
5970 |
|
|
mips_get_arg_info (&info, &cum, mode, NULL, true);
|
5971 |
|
|
gparg = mips_arg_regno (&info, false);
|
5972 |
|
|
fparg = mips_arg_regno (&info, true);
|
5973 |
|
|
|
5974 |
|
|
if (mode == SFmode)
|
5975 |
|
|
mips_output_32bit_xfer (direction, gparg, fparg);
|
5976 |
|
|
else
|
5977 |
|
|
mips_output_64bit_xfer (direction, gparg, fparg);
|
5978 |
|
|
|
5979 |
|
|
mips_function_arg_advance (&cum, mode, NULL, true);
|
5980 |
|
|
}
|
5981 |
|
|
}
|
5982 |
|
|
|
5983 |
|
|
/* Write a MIPS16 stub for the current function. This stub is used
|
5984 |
|
|
for functions which take arguments in the floating-point registers.
|
5985 |
|
|
It is normal-mode code that moves the floating-point arguments
|
5986 |
|
|
into the general registers and then jumps to the MIPS16 code. */
|
5987 |
|
|
|
5988 |
|
|
static void
|
5989 |
|
|
mips16_build_function_stub (void)
|
5990 |
|
|
{
|
5991 |
|
|
const char *fnname, *alias_name, *separator;
|
5992 |
|
|
char *secname, *stubname;
|
5993 |
|
|
tree stubdecl;
|
5994 |
|
|
unsigned int f;
|
5995 |
|
|
rtx symbol, alias;
|
5996 |
|
|
|
5997 |
|
|
/* Create the name of the stub, and its unique section. */
|
5998 |
|
|
symbol = XEXP (DECL_RTL (current_function_decl), 0);
|
5999 |
|
|
alias = mips16_local_alias (symbol);
|
6000 |
|
|
|
6001 |
|
|
fnname = targetm.strip_name_encoding (XSTR (symbol, 0));
|
6002 |
|
|
alias_name = targetm.strip_name_encoding (XSTR (alias, 0));
|
6003 |
|
|
secname = ACONCAT ((".mips16.fn.", fnname, NULL));
|
6004 |
|
|
stubname = ACONCAT (("__fn_stub_", fnname, NULL));
|
6005 |
|
|
|
6006 |
|
|
/* Build a decl for the stub. */
|
6007 |
|
|
stubdecl = build_decl (BUILTINS_LOCATION,
|
6008 |
|
|
FUNCTION_DECL, get_identifier (stubname),
|
6009 |
|
|
build_function_type (void_type_node, NULL_TREE));
|
6010 |
|
|
DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
|
6011 |
|
|
DECL_RESULT (stubdecl) = build_decl (BUILTINS_LOCATION,
|
6012 |
|
|
RESULT_DECL, NULL_TREE, void_type_node);
|
6013 |
|
|
|
6014 |
|
|
/* Output a comment. */
|
6015 |
|
|
fprintf (asm_out_file, "\t# Stub function for %s (",
|
6016 |
|
|
current_function_name ());
|
6017 |
|
|
separator = "";
|
6018 |
|
|
for (f = (unsigned int) crtl->args.info.fp_code; f != 0; f >>= 2)
|
6019 |
|
|
{
|
6020 |
|
|
fprintf (asm_out_file, "%s%s", separator,
|
6021 |
|
|
(f & 3) == 1 ? "float" : "double");
|
6022 |
|
|
separator = ", ";
|
6023 |
|
|
}
|
6024 |
|
|
fprintf (asm_out_file, ")\n");
|
6025 |
|
|
|
6026 |
|
|
/* Start the function definition. */
|
6027 |
|
|
assemble_start_function (stubdecl, stubname);
|
6028 |
|
|
mips_start_function_definition (stubname, false);
|
6029 |
|
|
|
6030 |
|
|
/* If generating pic2 code, either set up the global pointer or
|
6031 |
|
|
switch to pic0. */
|
6032 |
|
|
if (TARGET_ABICALLS_PIC2)
|
6033 |
|
|
{
|
6034 |
|
|
if (TARGET_ABSOLUTE_ABICALLS)
|
6035 |
|
|
fprintf (asm_out_file, "\t.option\tpic0\n");
|
6036 |
|
|
else
|
6037 |
|
|
{
|
6038 |
|
|
output_asm_insn ("%(.cpload\t%^%)", NULL);
|
6039 |
|
|
/* Emit an R_MIPS_NONE relocation to tell the linker what the
|
6040 |
|
|
target function is. Use a local GOT access when loading the
|
6041 |
|
|
symbol, to cut down on the number of unnecessary GOT entries
|
6042 |
|
|
for stubs that aren't needed. */
|
6043 |
|
|
output_asm_insn (".reloc\t0,R_MIPS_NONE,%0", &symbol);
|
6044 |
|
|
symbol = alias;
|
6045 |
|
|
}
|
6046 |
|
|
}
|
6047 |
|
|
|
6048 |
|
|
/* Load the address of the MIPS16 function into $25. Do this first so
|
6049 |
|
|
that targets with coprocessor interlocks can use an MFC1 to fill the
|
6050 |
|
|
delay slot. */
|
6051 |
|
|
output_asm_insn ("la\t%^,%0", &symbol);
|
6052 |
|
|
|
6053 |
|
|
/* Move the arguments from floating-point registers to general registers. */
|
6054 |
|
|
mips_output_args_xfer (crtl->args.info.fp_code, 'f');
|
6055 |
|
|
|
6056 |
|
|
/* Jump to the MIPS16 function. */
|
6057 |
|
|
output_asm_insn ("jr\t%^", NULL);
|
6058 |
|
|
|
6059 |
|
|
if (TARGET_ABICALLS_PIC2 && TARGET_ABSOLUTE_ABICALLS)
|
6060 |
|
|
fprintf (asm_out_file, "\t.option\tpic2\n");
|
6061 |
|
|
|
6062 |
|
|
mips_end_function_definition (stubname);
|
6063 |
|
|
|
6064 |
|
|
/* If the linker needs to create a dynamic symbol for the target
|
6065 |
|
|
function, it will associate the symbol with the stub (which,
|
6066 |
|
|
unlike the target function, follows the proper calling conventions).
|
6067 |
|
|
It is therefore useful to have a local alias for the target function,
|
6068 |
|
|
so that it can still be identified as MIPS16 code. As an optimization,
|
6069 |
|
|
this symbol can also be used for indirect MIPS16 references from
|
6070 |
|
|
within this file. */
|
6071 |
|
|
ASM_OUTPUT_DEF (asm_out_file, alias_name, fnname);
|
6072 |
|
|
|
6073 |
|
|
switch_to_section (function_section (current_function_decl));
|
6074 |
|
|
}
|
6075 |
|
|
|
6076 |
|
|
/* The current function is a MIPS16 function that returns a value in an FPR.
|
6077 |
|
|
Copy the return value from its soft-float to its hard-float location.
|
6078 |
|
|
libgcc2 has special non-MIPS16 helper functions for each case. */
|
6079 |
|
|
|
6080 |
|
|
static void
|
6081 |
|
|
mips16_copy_fpr_return_value (void)
|
6082 |
|
|
{
|
6083 |
|
|
rtx fn, insn, retval;
|
6084 |
|
|
tree return_type;
|
6085 |
|
|
enum machine_mode return_mode;
|
6086 |
|
|
const char *name;
|
6087 |
|
|
|
6088 |
|
|
return_type = DECL_RESULT (current_function_decl);
|
6089 |
|
|
return_mode = DECL_MODE (return_type);
|
6090 |
|
|
|
6091 |
|
|
name = ACONCAT (("__mips16_ret_",
|
6092 |
|
|
mips16_call_stub_mode_suffix (return_mode),
|
6093 |
|
|
NULL));
|
6094 |
|
|
fn = mips16_stub_function (name);
|
6095 |
|
|
|
6096 |
|
|
/* The function takes arguments in $2 (and possibly $3), so calls
|
6097 |
|
|
to it cannot be lazily bound. */
|
6098 |
|
|
SYMBOL_REF_FLAGS (fn) |= SYMBOL_FLAG_BIND_NOW;
|
6099 |
|
|
|
6100 |
|
|
/* Model the call as something that takes the GPR return value as
|
6101 |
|
|
argument and returns an "updated" value. */
|
6102 |
|
|
retval = gen_rtx_REG (return_mode, GP_RETURN);
|
6103 |
|
|
insn = mips_expand_call (MIPS_CALL_EPILOGUE, retval, fn,
|
6104 |
|
|
const0_rtx, NULL_RTX, false);
|
6105 |
|
|
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
|
6106 |
|
|
}
|
6107 |
|
|
|
6108 |
|
|
/* Consider building a stub for a MIPS16 call to function *FN_PTR.
|
6109 |
|
|
RETVAL is the location of the return value, or null if this is
|
6110 |
|
|
a "call" rather than a "call_value". ARGS_SIZE is the size of the
|
6111 |
|
|
arguments and FP_CODE is the code built by mips_function_arg;
|
6112 |
|
|
see the comment before the fp_code field in CUMULATIVE_ARGS for details.
|
6113 |
|
|
|
6114 |
|
|
There are three alternatives:
|
6115 |
|
|
|
6116 |
|
|
- If a stub was needed, emit the call and return the call insn itself.
|
6117 |
|
|
|
6118 |
|
|
- If we can avoid using a stub by redirecting the call, set *FN_PTR
|
6119 |
|
|
to the new target and return null.
|
6120 |
|
|
|
6121 |
|
|
- If *FN_PTR doesn't need a stub, return null and leave *FN_PTR
|
6122 |
|
|
unmodified.
|
6123 |
|
|
|
6124 |
|
|
A stub is needed for calls to functions that, in normal mode,
|
6125 |
|
|
receive arguments in FPRs or return values in FPRs. The stub
|
6126 |
|
|
copies the arguments from their soft-float positions to their
|
6127 |
|
|
hard-float positions, calls the real function, then copies the
|
6128 |
|
|
return value from its hard-float position to its soft-float
|
6129 |
|
|
position.
|
6130 |
|
|
|
6131 |
|
|
We can emit a JAL to *FN_PTR even when *FN_PTR might need a stub.
|
6132 |
|
|
If *FN_PTR turns out to be to a non-MIPS16 function, the linker
|
6133 |
|
|
automatically redirects the JAL to the stub, otherwise the JAL
|
6134 |
|
|
continues to call FN directly. */
|
6135 |
|
|
|
6136 |
|
|
static rtx
|
6137 |
|
|
mips16_build_call_stub (rtx retval, rtx *fn_ptr, rtx args_size, int fp_code)
|
6138 |
|
|
{
|
6139 |
|
|
const char *fnname;
|
6140 |
|
|
bool fp_ret_p;
|
6141 |
|
|
struct mips16_stub *l;
|
6142 |
|
|
rtx insn, fn;
|
6143 |
|
|
|
6144 |
|
|
/* We don't need to do anything if we aren't in MIPS16 mode, or if
|
6145 |
|
|
we were invoked with the -msoft-float option. */
|
6146 |
|
|
if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
|
6147 |
|
|
return NULL_RTX;
|
6148 |
|
|
|
6149 |
|
|
/* Figure out whether the value might come back in a floating-point
|
6150 |
|
|
register. */
|
6151 |
|
|
fp_ret_p = retval && mips_return_mode_in_fpr_p (GET_MODE (retval));
|
6152 |
|
|
|
6153 |
|
|
/* We don't need to do anything if there were no floating-point
|
6154 |
|
|
arguments and the value will not be returned in a floating-point
|
6155 |
|
|
register. */
|
6156 |
|
|
if (fp_code == 0 && !fp_ret_p)
|
6157 |
|
|
return NULL_RTX;
|
6158 |
|
|
|
6159 |
|
|
/* We don't need to do anything if this is a call to a special
|
6160 |
|
|
MIPS16 support function. */
|
6161 |
|
|
fn = *fn_ptr;
|
6162 |
|
|
if (mips16_stub_function_p (fn))
|
6163 |
|
|
return NULL_RTX;
|
6164 |
|
|
|
6165 |
|
|
/* This code will only work for o32 and o64 abis. The other ABI's
|
6166 |
|
|
require more sophisticated support. */
|
6167 |
|
|
gcc_assert (TARGET_OLDABI);
|
6168 |
|
|
|
6169 |
|
|
/* If we're calling via a function pointer, use one of the magic
|
6170 |
|
|
libgcc.a stubs provided for each (FP_CODE, FP_RET_P) combination.
|
6171 |
|
|
Each stub expects the function address to arrive in register $2. */
|
6172 |
|
|
if (GET_CODE (fn) != SYMBOL_REF
|
6173 |
|
|
|| !call_insn_operand (fn, VOIDmode))
|
6174 |
|
|
{
|
6175 |
|
|
char buf[30];
|
6176 |
|
|
rtx stub_fn, insn, addr;
|
6177 |
|
|
bool lazy_p;
|
6178 |
|
|
|
6179 |
|
|
/* If this is a locally-defined and locally-binding function,
|
6180 |
|
|
avoid the stub by calling the local alias directly. */
|
6181 |
|
|
if (mips16_local_function_p (fn))
|
6182 |
|
|
{
|
6183 |
|
|
*fn_ptr = mips16_local_alias (fn);
|
6184 |
|
|
return NULL_RTX;
|
6185 |
|
|
}
|
6186 |
|
|
|
6187 |
|
|
/* Create a SYMBOL_REF for the libgcc.a function. */
|
6188 |
|
|
if (fp_ret_p)
|
6189 |
|
|
sprintf (buf, "__mips16_call_stub_%s_%d",
|
6190 |
|
|
mips16_call_stub_mode_suffix (GET_MODE (retval)),
|
6191 |
|
|
fp_code);
|
6192 |
|
|
else
|
6193 |
|
|
sprintf (buf, "__mips16_call_stub_%d", fp_code);
|
6194 |
|
|
stub_fn = mips16_stub_function (buf);
|
6195 |
|
|
|
6196 |
|
|
/* The function uses $2 as an argument, so calls to it
|
6197 |
|
|
cannot be lazily bound. */
|
6198 |
|
|
SYMBOL_REF_FLAGS (stub_fn) |= SYMBOL_FLAG_BIND_NOW;
|
6199 |
|
|
|
6200 |
|
|
/* Load the target function into $2. */
|
6201 |
|
|
addr = gen_rtx_REG (Pmode, GP_REG_FIRST + 2);
|
6202 |
|
|
lazy_p = mips_load_call_address (MIPS_CALL_NORMAL, addr, fn);
|
6203 |
|
|
|
6204 |
|
|
/* Emit the call. */
|
6205 |
|
|
insn = mips_expand_call (MIPS_CALL_NORMAL, retval, stub_fn,
|
6206 |
|
|
args_size, NULL_RTX, lazy_p);
|
6207 |
|
|
|
6208 |
|
|
/* Tell GCC that this call does indeed use the value of $2. */
|
6209 |
|
|
use_reg (&CALL_INSN_FUNCTION_USAGE (insn), addr);
|
6210 |
|
|
|
6211 |
|
|
/* If we are handling a floating-point return value, we need to
|
6212 |
|
|
save $18 in the function prologue. Putting a note on the
|
6213 |
|
|
call will mean that df_regs_ever_live_p ($18) will be true if the
|
6214 |
|
|
call is not eliminated, and we can check that in the prologue
|
6215 |
|
|
code. */
|
6216 |
|
|
if (fp_ret_p)
|
6217 |
|
|
CALL_INSN_FUNCTION_USAGE (insn) =
|
6218 |
|
|
gen_rtx_EXPR_LIST (VOIDmode,
|
6219 |
|
|
gen_rtx_CLOBBER (VOIDmode,
|
6220 |
|
|
gen_rtx_REG (word_mode, 18)),
|
6221 |
|
|
CALL_INSN_FUNCTION_USAGE (insn));
|
6222 |
|
|
|
6223 |
|
|
return insn;
|
6224 |
|
|
}
|
6225 |
|
|
|
6226 |
|
|
/* We know the function we are going to call. If we have already
|
6227 |
|
|
built a stub, we don't need to do anything further. */
|
6228 |
|
|
fnname = targetm.strip_name_encoding (XSTR (fn, 0));
|
6229 |
|
|
for (l = mips16_stubs; l != NULL; l = l->next)
|
6230 |
|
|
if (strcmp (l->name, fnname) == 0)
|
6231 |
|
|
break;
|
6232 |
|
|
|
6233 |
|
|
if (l == NULL)
|
6234 |
|
|
{
|
6235 |
|
|
const char *separator;
|
6236 |
|
|
char *secname, *stubname;
|
6237 |
|
|
tree stubid, stubdecl;
|
6238 |
|
|
unsigned int f;
|
6239 |
|
|
|
6240 |
|
|
/* If the function does not return in FPRs, the special stub
|
6241 |
|
|
section is named
|
6242 |
|
|
.mips16.call.FNNAME
|
6243 |
|
|
|
6244 |
|
|
If the function does return in FPRs, the stub section is named
|
6245 |
|
|
.mips16.call.fp.FNNAME
|
6246 |
|
|
|
6247 |
|
|
Build a decl for the stub. */
|
6248 |
|
|
secname = ACONCAT ((".mips16.call.", fp_ret_p ? "fp." : "",
|
6249 |
|
|
fnname, NULL));
|
6250 |
|
|
stubname = ACONCAT (("__call_stub_", fp_ret_p ? "fp_" : "",
|
6251 |
|
|
fnname, NULL));
|
6252 |
|
|
stubid = get_identifier (stubname);
|
6253 |
|
|
stubdecl = build_decl (BUILTINS_LOCATION,
|
6254 |
|
|
FUNCTION_DECL, stubid,
|
6255 |
|
|
build_function_type (void_type_node, NULL_TREE));
|
6256 |
|
|
DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
|
6257 |
|
|
DECL_RESULT (stubdecl) = build_decl (BUILTINS_LOCATION,
|
6258 |
|
|
RESULT_DECL, NULL_TREE,
|
6259 |
|
|
void_type_node);
|
6260 |
|
|
|
6261 |
|
|
/* Output a comment. */
|
6262 |
|
|
fprintf (asm_out_file, "\t# Stub function to call %s%s (",
|
6263 |
|
|
(fp_ret_p
|
6264 |
|
|
? (GET_MODE (retval) == SFmode ? "float " : "double ")
|
6265 |
|
|
: ""),
|
6266 |
|
|
fnname);
|
6267 |
|
|
separator = "";
|
6268 |
|
|
for (f = (unsigned int) fp_code; f != 0; f >>= 2)
|
6269 |
|
|
{
|
6270 |
|
|
fprintf (asm_out_file, "%s%s", separator,
|
6271 |
|
|
(f & 3) == 1 ? "float" : "double");
|
6272 |
|
|
separator = ", ";
|
6273 |
|
|
}
|
6274 |
|
|
fprintf (asm_out_file, ")\n");
|
6275 |
|
|
|
6276 |
|
|
/* Start the function definition. */
|
6277 |
|
|
assemble_start_function (stubdecl, stubname);
|
6278 |
|
|
mips_start_function_definition (stubname, false);
|
6279 |
|
|
|
6280 |
|
|
if (!fp_ret_p)
|
6281 |
|
|
{
|
6282 |
|
|
/* Load the address of the MIPS16 function into $25. Do this
|
6283 |
|
|
first so that targets with coprocessor interlocks can use
|
6284 |
|
|
an MFC1 to fill the delay slot. */
|
6285 |
|
|
if (TARGET_EXPLICIT_RELOCS)
|
6286 |
|
|
{
|
6287 |
|
|
output_asm_insn ("lui\t%^,%%hi(%0)", &fn);
|
6288 |
|
|
output_asm_insn ("addiu\t%^,%^,%%lo(%0)", &fn);
|
6289 |
|
|
}
|
6290 |
|
|
else
|
6291 |
|
|
output_asm_insn ("la\t%^,%0", &fn);
|
6292 |
|
|
}
|
6293 |
|
|
|
6294 |
|
|
/* Move the arguments from general registers to floating-point
|
6295 |
|
|
registers. */
|
6296 |
|
|
mips_output_args_xfer (fp_code, 't');
|
6297 |
|
|
|
6298 |
|
|
if (!fp_ret_p)
|
6299 |
|
|
{
|
6300 |
|
|
/* Jump to the previously-loaded address. */
|
6301 |
|
|
output_asm_insn ("jr\t%^", NULL);
|
6302 |
|
|
}
|
6303 |
|
|
else
|
6304 |
|
|
{
|
6305 |
|
|
/* Save the return address in $18 and call the non-MIPS16 function.
|
6306 |
|
|
The stub's caller knows that $18 might be clobbered, even though
|
6307 |
|
|
$18 is usually a call-saved register. */
|
6308 |
|
|
fprintf (asm_out_file, "\tmove\t%s,%s\n",
|
6309 |
|
|
reg_names[GP_REG_FIRST + 18], reg_names[RETURN_ADDR_REGNUM]);
|
6310 |
|
|
output_asm_insn (MIPS_CALL ("jal", &fn, 0, -1), &fn);
|
6311 |
|
|
|
6312 |
|
|
/* Move the result from floating-point registers to
|
6313 |
|
|
general registers. */
|
6314 |
|
|
switch (GET_MODE (retval))
|
6315 |
|
|
{
|
6316 |
|
|
case SCmode:
|
6317 |
|
|
mips_output_32bit_xfer ('f', GP_RETURN + 1,
|
6318 |
|
|
FP_REG_FIRST + MAX_FPRS_PER_FMT);
|
6319 |
|
|
/* Fall though. */
|
6320 |
|
|
case SFmode:
|
6321 |
|
|
mips_output_32bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
|
6322 |
|
|
if (GET_MODE (retval) == SCmode && TARGET_64BIT)
|
6323 |
|
|
{
|
6324 |
|
|
/* On 64-bit targets, complex floats are returned in
|
6325 |
|
|
a single GPR, such that "sd" on a suitably-aligned
|
6326 |
|
|
target would store the value correctly. */
|
6327 |
|
|
fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
|
6328 |
|
|
reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN],
|
6329 |
|
|
reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN]);
|
6330 |
|
|
fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
|
6331 |
|
|
reg_names[GP_RETURN],
|
6332 |
|
|
reg_names[GP_RETURN],
|
6333 |
|
|
reg_names[GP_RETURN + 1]);
|
6334 |
|
|
}
|
6335 |
|
|
break;
|
6336 |
|
|
|
6337 |
|
|
case DCmode:
|
6338 |
|
|
mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
|
6339 |
|
|
FP_REG_FIRST + MAX_FPRS_PER_FMT);
|
6340 |
|
|
/* Fall though. */
|
6341 |
|
|
case DFmode:
|
6342 |
|
|
case V2SFmode:
|
6343 |
|
|
mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
|
6344 |
|
|
break;
|
6345 |
|
|
|
6346 |
|
|
default:
|
6347 |
|
|
gcc_unreachable ();
|
6348 |
|
|
}
|
6349 |
|
|
fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 18]);
|
6350 |
|
|
}
|
6351 |
|
|
|
6352 |
|
|
#ifdef ASM_DECLARE_FUNCTION_SIZE
|
6353 |
|
|
ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
|
6354 |
|
|
#endif
|
6355 |
|
|
|
6356 |
|
|
mips_end_function_definition (stubname);
|
6357 |
|
|
|
6358 |
|
|
/* Record this stub. */
|
6359 |
|
|
l = XNEW (struct mips16_stub);
|
6360 |
|
|
l->name = xstrdup (fnname);
|
6361 |
|
|
l->fp_ret_p = fp_ret_p;
|
6362 |
|
|
l->next = mips16_stubs;
|
6363 |
|
|
mips16_stubs = l;
|
6364 |
|
|
}
|
6365 |
|
|
|
6366 |
|
|
/* If we expect a floating-point return value, but we've built a
|
6367 |
|
|
stub which does not expect one, then we're in trouble. We can't
|
6368 |
|
|
use the existing stub, because it won't handle the floating-point
|
6369 |
|
|
value. We can't build a new stub, because the linker won't know
|
6370 |
|
|
which stub to use for the various calls in this object file.
|
6371 |
|
|
Fortunately, this case is illegal, since it means that a function
|
6372 |
|
|
was declared in two different ways in a single compilation. */
|
6373 |
|
|
if (fp_ret_p && !l->fp_ret_p)
|
6374 |
|
|
error ("cannot handle inconsistent calls to %qs", fnname);
|
6375 |
|
|
|
6376 |
|
|
if (retval == NULL_RTX)
|
6377 |
|
|
insn = gen_call_internal_direct (fn, args_size);
|
6378 |
|
|
else
|
6379 |
|
|
insn = gen_call_value_internal_direct (retval, fn, args_size);
|
6380 |
|
|
insn = mips_emit_call_insn (insn, fn, fn, false);
|
6381 |
|
|
|
6382 |
|
|
/* If we are calling a stub which handles a floating-point return
|
6383 |
|
|
value, we need to arrange to save $18 in the prologue. We do this
|
6384 |
|
|
by marking the function call as using the register. The prologue
|
6385 |
|
|
will later see that it is used, and emit code to save it. */
|
6386 |
|
|
if (fp_ret_p)
|
6387 |
|
|
CALL_INSN_FUNCTION_USAGE (insn) =
|
6388 |
|
|
gen_rtx_EXPR_LIST (VOIDmode,
|
6389 |
|
|
gen_rtx_CLOBBER (VOIDmode,
|
6390 |
|
|
gen_rtx_REG (word_mode, 18)),
|
6391 |
|
|
CALL_INSN_FUNCTION_USAGE (insn));
|
6392 |
|
|
|
6393 |
|
|
return insn;
|
6394 |
|
|
}
|
6395 |
|
|
|
6396 |
|
|
/* Expand a call of type TYPE. RESULT is where the result will go (null
|
6397 |
|
|
for "call"s and "sibcall"s), ADDR is the address of the function,
|
6398 |
|
|
ARGS_SIZE is the size of the arguments and AUX is the value passed
|
6399 |
|
|
to us by mips_function_arg. LAZY_P is true if this call already
|
6400 |
|
|
involves a lazily-bound function address (such as when calling
|
6401 |
|
|
functions through a MIPS16 hard-float stub).
|
6402 |
|
|
|
6403 |
|
|
Return the call itself. */
|
6404 |
|
|
|
6405 |
|
|
rtx
|
6406 |
|
|
mips_expand_call (enum mips_call_type type, rtx result, rtx addr,
|
6407 |
|
|
rtx args_size, rtx aux, bool lazy_p)
|
6408 |
|
|
{
|
6409 |
|
|
rtx orig_addr, pattern, insn;
|
6410 |
|
|
int fp_code;
|
6411 |
|
|
|
6412 |
|
|
fp_code = aux == 0 ? 0 : (int) GET_MODE (aux);
|
6413 |
|
|
insn = mips16_build_call_stub (result, &addr, args_size, fp_code);
|
6414 |
|
|
if (insn)
|
6415 |
|
|
{
|
6416 |
|
|
gcc_assert (!lazy_p && type == MIPS_CALL_NORMAL);
|
6417 |
|
|
return insn;
|
6418 |
|
|
}
|
6419 |
|
|
;
|
6420 |
|
|
orig_addr = addr;
|
6421 |
|
|
if (!call_insn_operand (addr, VOIDmode))
|
6422 |
|
|
{
|
6423 |
|
|
if (type == MIPS_CALL_EPILOGUE)
|
6424 |
|
|
addr = MIPS_EPILOGUE_TEMP (Pmode);
|
6425 |
|
|
else
|
6426 |
|
|
addr = gen_reg_rtx (Pmode);
|
6427 |
|
|
lazy_p |= mips_load_call_address (type, addr, orig_addr);
|
6428 |
|
|
}
|
6429 |
|
|
|
6430 |
|
|
if (result == 0)
|
6431 |
|
|
{
|
6432 |
|
|
rtx (*fn) (rtx, rtx);
|
6433 |
|
|
|
6434 |
|
|
if (type == MIPS_CALL_SIBCALL)
|
6435 |
|
|
fn = gen_sibcall_internal;
|
6436 |
|
|
else
|
6437 |
|
|
fn = gen_call_internal;
|
6438 |
|
|
|
6439 |
|
|
pattern = fn (addr, args_size);
|
6440 |
|
|
}
|
6441 |
|
|
else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
|
6442 |
|
|
{
|
6443 |
|
|
/* Handle return values created by mips_return_fpr_pair. */
|
6444 |
|
|
rtx (*fn) (rtx, rtx, rtx, rtx);
|
6445 |
|
|
rtx reg1, reg2;
|
6446 |
|
|
|
6447 |
|
|
if (type == MIPS_CALL_SIBCALL)
|
6448 |
|
|
fn = gen_sibcall_value_multiple_internal;
|
6449 |
|
|
else
|
6450 |
|
|
fn = gen_call_value_multiple_internal;
|
6451 |
|
|
|
6452 |
|
|
reg1 = XEXP (XVECEXP (result, 0, 0), 0);
|
6453 |
|
|
reg2 = XEXP (XVECEXP (result, 0, 1), 0);
|
6454 |
|
|
pattern = fn (reg1, addr, args_size, reg2);
|
6455 |
|
|
}
|
6456 |
|
|
else
|
6457 |
|
|
{
|
6458 |
|
|
rtx (*fn) (rtx, rtx, rtx);
|
6459 |
|
|
|
6460 |
|
|
if (type == MIPS_CALL_SIBCALL)
|
6461 |
|
|
fn = gen_sibcall_value_internal;
|
6462 |
|
|
else
|
6463 |
|
|
fn = gen_call_value_internal;
|
6464 |
|
|
|
6465 |
|
|
/* Handle return values created by mips_return_fpr_single. */
|
6466 |
|
|
if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
|
6467 |
|
|
result = XEXP (XVECEXP (result, 0, 0), 0);
|
6468 |
|
|
pattern = fn (result, addr, args_size);
|
6469 |
|
|
}
|
6470 |
|
|
|
6471 |
|
|
return mips_emit_call_insn (pattern, orig_addr, addr, lazy_p);
|
6472 |
|
|
}
|
6473 |
|
|
|
6474 |
|
|
/* Split call instruction INSN into a $gp-clobbering call and
|
6475 |
|
|
(where necessary) an instruction to restore $gp from its save slot.
|
6476 |
|
|
CALL_PATTERN is the pattern of the new call. */
|
6477 |
|
|
|
6478 |
|
|
void
|
6479 |
|
|
mips_split_call (rtx insn, rtx call_pattern)
|
6480 |
|
|
{
|
6481 |
|
|
rtx new_insn;
|
6482 |
|
|
|
6483 |
|
|
new_insn = emit_call_insn (call_pattern);
|
6484 |
|
|
CALL_INSN_FUNCTION_USAGE (new_insn)
|
6485 |
|
|
= copy_rtx (CALL_INSN_FUNCTION_USAGE (insn));
|
6486 |
|
|
if (!find_reg_note (insn, REG_NORETURN, 0))
|
6487 |
|
|
/* Pick a temporary register that is suitable for both MIPS16 and
|
6488 |
|
|
non-MIPS16 code. $4 and $5 are used for returning complex double
|
6489 |
|
|
values in soft-float code, so $6 is the first suitable candidate. */
|
6490 |
|
|
mips_restore_gp_from_cprestore_slot (gen_rtx_REG (Pmode, GP_ARG_FIRST + 2));
|
6491 |
|
|
}
|
6492 |
|
|
|
6493 |
|
|
/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
|
6494 |
|
|
|
6495 |
|
|
static bool
|
6496 |
|
|
mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
|
6497 |
|
|
{
|
6498 |
|
|
if (!TARGET_SIBCALLS)
|
6499 |
|
|
return false;
|
6500 |
|
|
|
6501 |
|
|
/* Interrupt handlers need special epilogue code and therefore can't
|
6502 |
|
|
use sibcalls. */
|
6503 |
|
|
if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
|
6504 |
|
|
return false;
|
6505 |
|
|
|
6506 |
|
|
/* We can't do a sibcall if the called function is a MIPS16 function
|
6507 |
|
|
because there is no direct "jx" instruction equivalent to "jalx" to
|
6508 |
|
|
switch the ISA mode. We only care about cases where the sibling
|
6509 |
|
|
and normal calls would both be direct. */
|
6510 |
|
|
if (decl
|
6511 |
|
|
&& mips_use_mips16_mode_p (decl)
|
6512 |
|
|
&& const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
|
6513 |
|
|
return false;
|
6514 |
|
|
|
6515 |
|
|
/* When -minterlink-mips16 is in effect, assume that non-locally-binding
|
6516 |
|
|
functions could be MIPS16 ones unless an attribute explicitly tells
|
6517 |
|
|
us otherwise. */
|
6518 |
|
|
if (TARGET_INTERLINK_MIPS16
|
6519 |
|
|
&& decl
|
6520 |
|
|
&& (DECL_EXTERNAL (decl) || !targetm.binds_local_p (decl))
|
6521 |
|
|
&& !mips_nomips16_decl_p (decl)
|
6522 |
|
|
&& const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
|
6523 |
|
|
return false;
|
6524 |
|
|
|
6525 |
|
|
/* Otherwise OK. */
|
6526 |
|
|
return true;
|
6527 |
|
|
}
|
6528 |
|
|
|
6529 |
|
|
/* Emit code to move general operand SRC into condition-code
|
6530 |
|
|
register DEST given that SCRATCH is a scratch TFmode FPR.
|
6531 |
|
|
The sequence is:
|
6532 |
|
|
|
6533 |
|
|
FP1 = SRC
|
6534 |
|
|
FP2 = 0.0f
|
6535 |
|
|
DEST = FP2 < FP1
|
6536 |
|
|
|
6537 |
|
|
where FP1 and FP2 are single-precision FPRs taken from SCRATCH. */
|
6538 |
|
|
|
6539 |
|
|
void
|
6540 |
|
|
mips_expand_fcc_reload (rtx dest, rtx src, rtx scratch)
|
6541 |
|
|
{
|
6542 |
|
|
rtx fp1, fp2;
|
6543 |
|
|
|
6544 |
|
|
/* Change the source to SFmode. */
|
6545 |
|
|
if (MEM_P (src))
|
6546 |
|
|
src = adjust_address (src, SFmode, 0);
|
6547 |
|
|
else if (REG_P (src) || GET_CODE (src) == SUBREG)
|
6548 |
|
|
src = gen_rtx_REG (SFmode, true_regnum (src));
|
6549 |
|
|
|
6550 |
|
|
fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
|
6551 |
|
|
fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
|
6552 |
|
|
|
6553 |
|
|
mips_emit_move (copy_rtx (fp1), src);
|
6554 |
|
|
mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
|
6555 |
|
|
emit_insn (gen_slt_sf (dest, fp2, fp1));
|
6556 |
|
|
}
|
6557 |
|
|
|
6558 |
|
|
/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
|
6559 |
|
|
Assume that the areas do not overlap. */
|
6560 |
|
|
|
6561 |
|
|
static void
|
6562 |
|
|
mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
|
6563 |
|
|
{
|
6564 |
|
|
HOST_WIDE_INT offset, delta;
|
6565 |
|
|
unsigned HOST_WIDE_INT bits;
|
6566 |
|
|
int i;
|
6567 |
|
|
enum machine_mode mode;
|
6568 |
|
|
rtx *regs;
|
6569 |
|
|
|
6570 |
|
|
/* Work out how many bits to move at a time. If both operands have
|
6571 |
|
|
half-word alignment, it is usually better to move in half words.
|
6572 |
|
|
For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
|
6573 |
|
|
and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
|
6574 |
|
|
Otherwise move word-sized chunks. */
|
6575 |
|
|
if (MEM_ALIGN (src) == BITS_PER_WORD / 2
|
6576 |
|
|
&& MEM_ALIGN (dest) == BITS_PER_WORD / 2)
|
6577 |
|
|
bits = BITS_PER_WORD / 2;
|
6578 |
|
|
else
|
6579 |
|
|
bits = BITS_PER_WORD;
|
6580 |
|
|
|
6581 |
|
|
mode = mode_for_size (bits, MODE_INT, 0);
|
6582 |
|
|
delta = bits / BITS_PER_UNIT;
|
6583 |
|
|
|
6584 |
|
|
/* Allocate a buffer for the temporary registers. */
|
6585 |
|
|
regs = XALLOCAVEC (rtx, length / delta);
|
6586 |
|
|
|
6587 |
|
|
/* Load as many BITS-sized chunks as possible. Use a normal load if
|
6588 |
|
|
the source has enough alignment, otherwise use left/right pairs. */
|
6589 |
|
|
for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
|
6590 |
|
|
{
|
6591 |
|
|
regs[i] = gen_reg_rtx (mode);
|
6592 |
|
|
if (MEM_ALIGN (src) >= bits)
|
6593 |
|
|
mips_emit_move (regs[i], adjust_address (src, mode, offset));
|
6594 |
|
|
else
|
6595 |
|
|
{
|
6596 |
|
|
rtx part = adjust_address (src, BLKmode, offset);
|
6597 |
|
|
if (!mips_expand_ext_as_unaligned_load (regs[i], part, bits, 0))
|
6598 |
|
|
gcc_unreachable ();
|
6599 |
|
|
}
|
6600 |
|
|
}
|
6601 |
|
|
|
6602 |
|
|
/* Copy the chunks to the destination. */
|
6603 |
|
|
for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
|
6604 |
|
|
if (MEM_ALIGN (dest) >= bits)
|
6605 |
|
|
mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
|
6606 |
|
|
else
|
6607 |
|
|
{
|
6608 |
|
|
rtx part = adjust_address (dest, BLKmode, offset);
|
6609 |
|
|
if (!mips_expand_ins_as_unaligned_store (part, regs[i], bits, 0))
|
6610 |
|
|
gcc_unreachable ();
|
6611 |
|
|
}
|
6612 |
|
|
|
6613 |
|
|
/* Mop up any left-over bytes. */
|
6614 |
|
|
if (offset < length)
|
6615 |
|
|
{
|
6616 |
|
|
src = adjust_address (src, BLKmode, offset);
|
6617 |
|
|
dest = adjust_address (dest, BLKmode, offset);
|
6618 |
|
|
move_by_pieces (dest, src, length - offset,
|
6619 |
|
|
MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
|
6620 |
|
|
}
|
6621 |
|
|
}
|
6622 |
|
|
|
6623 |
|
|
/* Helper function for doing a loop-based block operation on memory
|
6624 |
|
|
reference MEM. Each iteration of the loop will operate on LENGTH
|
6625 |
|
|
bytes of MEM.
|
6626 |
|
|
|
6627 |
|
|
Create a new base register for use within the loop and point it to
|
6628 |
|
|
the start of MEM. Create a new memory reference that uses this
|
6629 |
|
|
register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
|
6630 |
|
|
|
6631 |
|
|
static void
|
6632 |
|
|
mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
|
6633 |
|
|
rtx *loop_reg, rtx *loop_mem)
|
6634 |
|
|
{
|
6635 |
|
|
*loop_reg = copy_addr_to_reg (XEXP (mem, 0));
|
6636 |
|
|
|
6637 |
|
|
/* Although the new mem does not refer to a known location,
|
6638 |
|
|
it does keep up to LENGTH bytes of alignment. */
|
6639 |
|
|
*loop_mem = change_address (mem, BLKmode, *loop_reg);
|
6640 |
|
|
set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
|
6641 |
|
|
}
|
6642 |
|
|
|
6643 |
|
|
/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
|
6644 |
|
|
bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
|
6645 |
|
|
the memory regions do not overlap. */
|
6646 |
|
|
|
6647 |
|
|
static void
|
6648 |
|
|
mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
|
6649 |
|
|
HOST_WIDE_INT bytes_per_iter)
|
6650 |
|
|
{
|
6651 |
|
|
rtx label, src_reg, dest_reg, final_src, test;
|
6652 |
|
|
HOST_WIDE_INT leftover;
|
6653 |
|
|
|
6654 |
|
|
leftover = length % bytes_per_iter;
|
6655 |
|
|
length -= leftover;
|
6656 |
|
|
|
6657 |
|
|
/* Create registers and memory references for use within the loop. */
|
6658 |
|
|
mips_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
|
6659 |
|
|
mips_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
|
6660 |
|
|
|
6661 |
|
|
/* Calculate the value that SRC_REG should have after the last iteration
|
6662 |
|
|
of the loop. */
|
6663 |
|
|
final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
|
6664 |
|
|
0, 0, OPTAB_WIDEN);
|
6665 |
|
|
|
6666 |
|
|
/* Emit the start of the loop. */
|
6667 |
|
|
label = gen_label_rtx ();
|
6668 |
|
|
emit_label (label);
|
6669 |
|
|
|
6670 |
|
|
/* Emit the loop body. */
|
6671 |
|
|
mips_block_move_straight (dest, src, bytes_per_iter);
|
6672 |
|
|
|
6673 |
|
|
/* Move on to the next block. */
|
6674 |
|
|
mips_emit_move (src_reg, plus_constant (src_reg, bytes_per_iter));
|
6675 |
|
|
mips_emit_move (dest_reg, plus_constant (dest_reg, bytes_per_iter));
|
6676 |
|
|
|
6677 |
|
|
/* Emit the loop condition. */
|
6678 |
|
|
test = gen_rtx_NE (VOIDmode, src_reg, final_src);
|
6679 |
|
|
if (Pmode == DImode)
|
6680 |
|
|
emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
|
6681 |
|
|
else
|
6682 |
|
|
emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
|
6683 |
|
|
|
6684 |
|
|
/* Mop up any left-over bytes. */
|
6685 |
|
|
if (leftover)
|
6686 |
|
|
mips_block_move_straight (dest, src, leftover);
|
6687 |
|
|
}
|
6688 |
|
|
|
6689 |
|
|
/* Expand a movmemsi instruction, which copies LENGTH bytes from
|
6690 |
|
|
memory reference SRC to memory reference DEST. */
|
6691 |
|
|
|
6692 |
|
|
bool
|
6693 |
|
|
mips_expand_block_move (rtx dest, rtx src, rtx length)
|
6694 |
|
|
{
|
6695 |
|
|
if (CONST_INT_P (length))
|
6696 |
|
|
{
|
6697 |
|
|
if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT)
|
6698 |
|
|
{
|
6699 |
|
|
mips_block_move_straight (dest, src, INTVAL (length));
|
6700 |
|
|
return true;
|
6701 |
|
|
}
|
6702 |
|
|
else if (optimize)
|
6703 |
|
|
{
|
6704 |
|
|
mips_block_move_loop (dest, src, INTVAL (length),
|
6705 |
|
|
MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER);
|
6706 |
|
|
return true;
|
6707 |
|
|
}
|
6708 |
|
|
}
|
6709 |
|
|
return false;
|
6710 |
|
|
}
|
6711 |
|
|
|
6712 |
|
|
/* Expand a loop of synci insns for the address range [BEGIN, END). */
|
6713 |
|
|
|
6714 |
|
|
void
|
6715 |
|
|
mips_expand_synci_loop (rtx begin, rtx end)
|
6716 |
|
|
{
|
6717 |
|
|
rtx inc, label, end_label, cmp_result, mask, length;
|
6718 |
|
|
|
6719 |
|
|
/* Create end_label. */
|
6720 |
|
|
end_label = gen_label_rtx ();
|
6721 |
|
|
|
6722 |
|
|
/* Check if begin equals end. */
|
6723 |
|
|
cmp_result = gen_rtx_EQ (VOIDmode, begin, end);
|
6724 |
|
|
emit_jump_insn (gen_condjump (cmp_result, end_label));
|
6725 |
|
|
|
6726 |
|
|
/* Load INC with the cache line size (rdhwr INC,$1). */
|
6727 |
|
|
inc = gen_reg_rtx (Pmode);
|
6728 |
|
|
emit_insn (Pmode == SImode
|
6729 |
|
|
? gen_rdhwr_synci_step_si (inc)
|
6730 |
|
|
: gen_rdhwr_synci_step_di (inc));
|
6731 |
|
|
|
6732 |
|
|
/* Check if inc is 0. */
|
6733 |
|
|
cmp_result = gen_rtx_EQ (VOIDmode, inc, const0_rtx);
|
6734 |
|
|
emit_jump_insn (gen_condjump (cmp_result, end_label));
|
6735 |
|
|
|
6736 |
|
|
/* Calculate mask. */
|
6737 |
|
|
mask = mips_force_unary (Pmode, NEG, inc);
|
6738 |
|
|
|
6739 |
|
|
/* Mask out begin by mask. */
|
6740 |
|
|
begin = mips_force_binary (Pmode, AND, begin, mask);
|
6741 |
|
|
|
6742 |
|
|
/* Calculate length. */
|
6743 |
|
|
length = mips_force_binary (Pmode, MINUS, end, begin);
|
6744 |
|
|
|
6745 |
|
|
/* Loop back to here. */
|
6746 |
|
|
label = gen_label_rtx ();
|
6747 |
|
|
emit_label (label);
|
6748 |
|
|
|
6749 |
|
|
emit_insn (gen_synci (begin));
|
6750 |
|
|
|
6751 |
|
|
/* Update length. */
|
6752 |
|
|
mips_emit_binary (MINUS, length, length, inc);
|
6753 |
|
|
|
6754 |
|
|
/* Update begin. */
|
6755 |
|
|
mips_emit_binary (PLUS, begin, begin, inc);
|
6756 |
|
|
|
6757 |
|
|
/* Check if length is greater than 0. */
|
6758 |
|
|
cmp_result = gen_rtx_GT (VOIDmode, length, const0_rtx);
|
6759 |
|
|
emit_jump_insn (gen_condjump (cmp_result, label));
|
6760 |
|
|
|
6761 |
|
|
emit_label (end_label);
|
6762 |
|
|
}
|
6763 |
|
|
|
6764 |
|
|
/* Expand a QI or HI mode atomic memory operation.
|
6765 |
|
|
|
6766 |
|
|
GENERATOR contains a pointer to the gen_* function that generates
|
6767 |
|
|
the SI mode underlying atomic operation using masks that we
|
6768 |
|
|
calculate.
|
6769 |
|
|
|
6770 |
|
|
RESULT is the return register for the operation. Its value is NULL
|
6771 |
|
|
if unused.
|
6772 |
|
|
|
6773 |
|
|
MEM is the location of the atomic access.
|
6774 |
|
|
|
6775 |
|
|
OLDVAL is the first operand for the operation.
|
6776 |
|
|
|
6777 |
|
|
NEWVAL is the optional second operand for the operation. Its value
|
6778 |
|
|
is NULL if unused. */
|
6779 |
|
|
|
6780 |
|
|
void
|
6781 |
|
|
mips_expand_atomic_qihi (union mips_gen_fn_ptrs generator,
|
6782 |
|
|
rtx result, rtx mem, rtx oldval, rtx newval)
|
6783 |
|
|
{
|
6784 |
|
|
rtx orig_addr, memsi_addr, memsi, shift, shiftsi, unshifted_mask;
|
6785 |
|
|
rtx unshifted_mask_reg, mask, inverted_mask, si_op;
|
6786 |
|
|
rtx res = NULL;
|
6787 |
|
|
enum machine_mode mode;
|
6788 |
|
|
|
6789 |
|
|
mode = GET_MODE (mem);
|
6790 |
|
|
|
6791 |
|
|
/* Compute the address of the containing SImode value. */
|
6792 |
|
|
orig_addr = force_reg (Pmode, XEXP (mem, 0));
|
6793 |
|
|
memsi_addr = mips_force_binary (Pmode, AND, orig_addr,
|
6794 |
|
|
force_reg (Pmode, GEN_INT (-4)));
|
6795 |
|
|
|
6796 |
|
|
/* Create a memory reference for it. */
|
6797 |
|
|
memsi = gen_rtx_MEM (SImode, memsi_addr);
|
6798 |
|
|
set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
|
6799 |
|
|
MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
|
6800 |
|
|
|
6801 |
|
|
/* Work out the byte offset of the QImode or HImode value,
|
6802 |
|
|
counting from the least significant byte. */
|
6803 |
|
|
shift = mips_force_binary (Pmode, AND, orig_addr, GEN_INT (3));
|
6804 |
|
|
if (TARGET_BIG_ENDIAN)
|
6805 |
|
|
mips_emit_binary (XOR, shift, shift, GEN_INT (mode == QImode ? 3 : 2));
|
6806 |
|
|
|
6807 |
|
|
/* Multiply by eight to convert the shift value from bytes to bits. */
|
6808 |
|
|
mips_emit_binary (ASHIFT, shift, shift, GEN_INT (3));
|
6809 |
|
|
|
6810 |
|
|
/* Make the final shift an SImode value, so that it can be used in
|
6811 |
|
|
SImode operations. */
|
6812 |
|
|
shiftsi = force_reg (SImode, gen_lowpart (SImode, shift));
|
6813 |
|
|
|
6814 |
|
|
/* Set MASK to an inclusive mask of the QImode or HImode value. */
|
6815 |
|
|
unshifted_mask = GEN_INT (GET_MODE_MASK (mode));
|
6816 |
|
|
unshifted_mask_reg = force_reg (SImode, unshifted_mask);
|
6817 |
|
|
mask = mips_force_binary (SImode, ASHIFT, unshifted_mask_reg, shiftsi);
|
6818 |
|
|
|
6819 |
|
|
/* Compute the equivalent exclusive mask. */
|
6820 |
|
|
inverted_mask = gen_reg_rtx (SImode);
|
6821 |
|
|
emit_insn (gen_rtx_SET (VOIDmode, inverted_mask,
|
6822 |
|
|
gen_rtx_NOT (SImode, mask)));
|
6823 |
|
|
|
6824 |
|
|
/* Shift the old value into place. */
|
6825 |
|
|
if (oldval != const0_rtx)
|
6826 |
|
|
{
|
6827 |
|
|
oldval = convert_modes (SImode, mode, oldval, true);
|
6828 |
|
|
oldval = force_reg (SImode, oldval);
|
6829 |
|
|
oldval = mips_force_binary (SImode, ASHIFT, oldval, shiftsi);
|
6830 |
|
|
}
|
6831 |
|
|
|
6832 |
|
|
/* Do the same for the new value. */
|
6833 |
|
|
if (newval && newval != const0_rtx)
|
6834 |
|
|
{
|
6835 |
|
|
newval = convert_modes (SImode, mode, newval, true);
|
6836 |
|
|
newval = force_reg (SImode, newval);
|
6837 |
|
|
newval = mips_force_binary (SImode, ASHIFT, newval, shiftsi);
|
6838 |
|
|
}
|
6839 |
|
|
|
6840 |
|
|
/* Do the SImode atomic access. */
|
6841 |
|
|
if (result)
|
6842 |
|
|
res = gen_reg_rtx (SImode);
|
6843 |
|
|
if (newval)
|
6844 |
|
|
si_op = generator.fn_6 (res, memsi, mask, inverted_mask, oldval, newval);
|
6845 |
|
|
else if (result)
|
6846 |
|
|
si_op = generator.fn_5 (res, memsi, mask, inverted_mask, oldval);
|
6847 |
|
|
else
|
6848 |
|
|
si_op = generator.fn_4 (memsi, mask, inverted_mask, oldval);
|
6849 |
|
|
|
6850 |
|
|
emit_insn (si_op);
|
6851 |
|
|
|
6852 |
|
|
if (result)
|
6853 |
|
|
{
|
6854 |
|
|
/* Shift and convert the result. */
|
6855 |
|
|
mips_emit_binary (AND, res, res, mask);
|
6856 |
|
|
mips_emit_binary (LSHIFTRT, res, res, shiftsi);
|
6857 |
|
|
mips_emit_move (result, gen_lowpart (GET_MODE (result), res));
|
6858 |
|
|
}
|
6859 |
|
|
}
|
6860 |
|
|
|
6861 |
|
|
/* Return true if it is possible to use left/right accesses for a
|
6862 |
|
|
bitfield of WIDTH bits starting BITPOS bits into *OP. When
|
6863 |
|
|
returning true, update *OP, *LEFT and *RIGHT as follows:
|
6864 |
|
|
|
6865 |
|
|
*OP is a BLKmode reference to the whole field.
|
6866 |
|
|
|
6867 |
|
|
*LEFT is a QImode reference to the first byte if big endian or
|
6868 |
|
|
the last byte if little endian. This address can be used in the
|
6869 |
|
|
left-side instructions (LWL, SWL, LDL, SDL).
|
6870 |
|
|
|
6871 |
|
|
*RIGHT is a QImode reference to the opposite end of the field and
|
6872 |
|
|
can be used in the patterning right-side instruction. */
|
6873 |
|
|
|
6874 |
|
|
static bool
|
6875 |
|
|
mips_get_unaligned_mem (rtx *op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos,
|
6876 |
|
|
rtx *left, rtx *right)
|
6877 |
|
|
{
|
6878 |
|
|
rtx first, last;
|
6879 |
|
|
|
6880 |
|
|
/* Check that the operand really is a MEM. Not all the extv and
|
6881 |
|
|
extzv predicates are checked. */
|
6882 |
|
|
if (!MEM_P (*op))
|
6883 |
|
|
return false;
|
6884 |
|
|
|
6885 |
|
|
/* Check that the size is valid. */
|
6886 |
|
|
if (width != 32 && (!TARGET_64BIT || width != 64))
|
6887 |
|
|
return false;
|
6888 |
|
|
|
6889 |
|
|
/* We can only access byte-aligned values. Since we are always passed
|
6890 |
|
|
a reference to the first byte of the field, it is not necessary to
|
6891 |
|
|
do anything with BITPOS after this check. */
|
6892 |
|
|
if (bitpos % BITS_PER_UNIT != 0)
|
6893 |
|
|
return false;
|
6894 |
|
|
|
6895 |
|
|
/* Reject aligned bitfields: we want to use a normal load or store
|
6896 |
|
|
instead of a left/right pair. */
|
6897 |
|
|
if (MEM_ALIGN (*op) >= width)
|
6898 |
|
|
return false;
|
6899 |
|
|
|
6900 |
|
|
/* Adjust *OP to refer to the whole field. This also has the effect
|
6901 |
|
|
of legitimizing *OP's address for BLKmode, possibly simplifying it. */
|
6902 |
|
|
*op = adjust_address (*op, BLKmode, 0);
|
6903 |
|
|
set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
|
6904 |
|
|
|
6905 |
|
|
/* Get references to both ends of the field. We deliberately don't
|
6906 |
|
|
use the original QImode *OP for FIRST since the new BLKmode one
|
6907 |
|
|
might have a simpler address. */
|
6908 |
|
|
first = adjust_address (*op, QImode, 0);
|
6909 |
|
|
last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
|
6910 |
|
|
|
6911 |
|
|
/* Allocate to LEFT and RIGHT according to endianness. LEFT should
|
6912 |
|
|
correspond to the MSB and RIGHT to the LSB. */
|
6913 |
|
|
if (TARGET_BIG_ENDIAN)
|
6914 |
|
|
*left = first, *right = last;
|
6915 |
|
|
else
|
6916 |
|
|
*left = last, *right = first;
|
6917 |
|
|
|
6918 |
|
|
return true;
|
6919 |
|
|
}
|
6920 |
|
|
|
6921 |
|
|
/* Try to use left/right loads to expand an "extv" or "extzv" pattern.
|
6922 |
|
|
DEST, SRC, WIDTH and BITPOS are the operands passed to the expander;
|
6923 |
|
|
the operation is the equivalent of:
|
6924 |
|
|
|
6925 |
|
|
(set DEST (*_extract SRC WIDTH BITPOS))
|
6926 |
|
|
|
6927 |
|
|
Return true on success. */
|
6928 |
|
|
|
6929 |
|
|
bool
|
6930 |
|
|
mips_expand_ext_as_unaligned_load (rtx dest, rtx src, HOST_WIDE_INT width,
|
6931 |
|
|
HOST_WIDE_INT bitpos)
|
6932 |
|
|
{
|
6933 |
|
|
rtx left, right, temp;
|
6934 |
|
|
|
6935 |
|
|
/* If TARGET_64BIT, the destination of a 32-bit "extz" or "extzv" will
|
6936 |
|
|
be a paradoxical word_mode subreg. This is the only case in which
|
6937 |
|
|
we allow the destination to be larger than the source. */
|
6938 |
|
|
if (GET_CODE (dest) == SUBREG
|
6939 |
|
|
&& GET_MODE (dest) == DImode
|
6940 |
|
|
&& GET_MODE (SUBREG_REG (dest)) == SImode)
|
6941 |
|
|
dest = SUBREG_REG (dest);
|
6942 |
|
|
|
6943 |
|
|
/* After the above adjustment, the destination must be the same
|
6944 |
|
|
width as the source. */
|
6945 |
|
|
if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
|
6946 |
|
|
return false;
|
6947 |
|
|
|
6948 |
|
|
if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
|
6949 |
|
|
return false;
|
6950 |
|
|
|
6951 |
|
|
temp = gen_reg_rtx (GET_MODE (dest));
|
6952 |
|
|
if (GET_MODE (dest) == DImode)
|
6953 |
|
|
{
|
6954 |
|
|
emit_insn (gen_mov_ldl (temp, src, left));
|
6955 |
|
|
emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
|
6956 |
|
|
}
|
6957 |
|
|
else
|
6958 |
|
|
{
|
6959 |
|
|
emit_insn (gen_mov_lwl (temp, src, left));
|
6960 |
|
|
emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
|
6961 |
|
|
}
|
6962 |
|
|
return true;
|
6963 |
|
|
}
|
6964 |
|
|
|
6965 |
|
|
/* Try to use left/right stores to expand an "ins" pattern. DEST, WIDTH,
|
6966 |
|
|
BITPOS and SRC are the operands passed to the expander; the operation
|
6967 |
|
|
is the equivalent of:
|
6968 |
|
|
|
6969 |
|
|
(set (zero_extract DEST WIDTH BITPOS) SRC)
|
6970 |
|
|
|
6971 |
|
|
Return true on success. */
|
6972 |
|
|
|
6973 |
|
|
bool
|
6974 |
|
|
mips_expand_ins_as_unaligned_store (rtx dest, rtx src, HOST_WIDE_INT width,
|
6975 |
|
|
HOST_WIDE_INT bitpos)
|
6976 |
|
|
{
|
6977 |
|
|
rtx left, right;
|
6978 |
|
|
enum machine_mode mode;
|
6979 |
|
|
|
6980 |
|
|
if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
|
6981 |
|
|
return false;
|
6982 |
|
|
|
6983 |
|
|
mode = mode_for_size (width, MODE_INT, 0);
|
6984 |
|
|
src = gen_lowpart (mode, src);
|
6985 |
|
|
if (mode == DImode)
|
6986 |
|
|
{
|
6987 |
|
|
emit_insn (gen_mov_sdl (dest, src, left));
|
6988 |
|
|
emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
|
6989 |
|
|
}
|
6990 |
|
|
else
|
6991 |
|
|
{
|
6992 |
|
|
emit_insn (gen_mov_swl (dest, src, left));
|
6993 |
|
|
emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
|
6994 |
|
|
}
|
6995 |
|
|
return true;
|
6996 |
|
|
}
|
6997 |
|
|
|
6998 |
|
|
/* Return true if X is a MEM with the same size as MODE. */
|
6999 |
|
|
|
7000 |
|
|
bool
|
7001 |
|
|
mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
|
7002 |
|
|
{
|
7003 |
|
|
rtx size;
|
7004 |
|
|
|
7005 |
|
|
if (!MEM_P (x))
|
7006 |
|
|
return false;
|
7007 |
|
|
|
7008 |
|
|
size = MEM_SIZE (x);
|
7009 |
|
|
return size && INTVAL (size) == GET_MODE_SIZE (mode);
|
7010 |
|
|
}
|
7011 |
|
|
|
7012 |
|
|
/* Return true if (zero_extract OP WIDTH BITPOS) can be used as the
|
7013 |
|
|
source of an "ext" instruction or the destination of an "ins"
|
7014 |
|
|
instruction. OP must be a register operand and the following
|
7015 |
|
|
conditions must hold:
|
7016 |
|
|
|
7017 |
|
|
|
7018 |
|
|
|
7019 |
|
|
|
7020 |
|
|
|
7021 |
|
|
Also reject lengths equal to a word as they are better handled
|
7022 |
|
|
by the move patterns. */
|
7023 |
|
|
|
7024 |
|
|
bool
|
7025 |
|
|
mips_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos)
|
7026 |
|
|
{
|
7027 |
|
|
if (!ISA_HAS_EXT_INS
|
7028 |
|
|
|| !register_operand (op, VOIDmode)
|
7029 |
|
|
|| GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
|
7030 |
|
|
return false;
|
7031 |
|
|
|
7032 |
|
|
if (!IN_RANGE (width, 1, GET_MODE_BITSIZE (GET_MODE (op)) - 1))
|
7033 |
|
|
return false;
|
7034 |
|
|
|
7035 |
|
|
if (bitpos < 0 || bitpos + width > GET_MODE_BITSIZE (GET_MODE (op)))
|
7036 |
|
|
return false;
|
7037 |
|
|
|
7038 |
|
|
return true;
|
7039 |
|
|
}
|
7040 |
|
|
|
7041 |
|
|
/* Check if MASK and SHIFT are valid in mask-low-and-shift-left
|
7042 |
|
|
operation if MAXLEN is the maxium length of consecutive bits that
|
7043 |
|
|
can make up MASK. MODE is the mode of the operation. See
|
7044 |
|
|
mask_low_and_shift_len for the actual definition. */
|
7045 |
|
|
|
7046 |
|
|
bool
|
7047 |
|
|
mask_low_and_shift_p (enum machine_mode mode, rtx mask, rtx shift, int maxlen)
|
7048 |
|
|
{
|
7049 |
|
|
return IN_RANGE (mask_low_and_shift_len (mode, mask, shift), 1, maxlen);
|
7050 |
|
|
}
|
7051 |
|
|
|
7052 |
|
|
/* Return true iff OP1 and OP2 are valid operands together for the
|
7053 |
|
|
*and<MODE>3 and *and<MODE>3_mips16 patterns. For the cases to consider,
|
7054 |
|
|
see the table in the comment before the pattern. */
|
7055 |
|
|
|
7056 |
|
|
bool
|
7057 |
|
|
and_operands_ok (enum machine_mode mode, rtx op1, rtx op2)
|
7058 |
|
|
{
|
7059 |
|
|
return (memory_operand (op1, mode)
|
7060 |
|
|
? and_load_operand (op2, mode)
|
7061 |
|
|
: and_reg_operand (op2, mode));
|
7062 |
|
|
}
|
7063 |
|
|
|
7064 |
|
|
/* The canonical form of a mask-low-and-shift-left operation is
|
7065 |
|
|
(and (ashift X SHIFT) MASK) where MASK has the lower SHIFT number of bits
|
7066 |
|
|
cleared. Thus we need to shift MASK to the right before checking if it
|
7067 |
|
|
is a valid mask value. MODE is the mode of the operation. If true
|
7068 |
|
|
return the length of the mask, otherwise return -1. */
|
7069 |
|
|
|
7070 |
|
|
int
|
7071 |
|
|
mask_low_and_shift_len (enum machine_mode mode, rtx mask, rtx shift)
|
7072 |
|
|
{
|
7073 |
|
|
HOST_WIDE_INT shval;
|
7074 |
|
|
|
7075 |
|
|
shval = INTVAL (shift) & (GET_MODE_BITSIZE (mode) - 1);
|
7076 |
|
|
return exact_log2 ((UINTVAL (mask) >> shval) + 1);
|
7077 |
|
|
}
|
7078 |
|
|
|
7079 |
|
|
/* Return true if -msplit-addresses is selected and should be honored.
|
7080 |
|
|
|
7081 |
|
|
-msplit-addresses is a half-way house between explicit relocations
|
7082 |
|
|
and the traditional assembler macros. It can split absolute 32-bit
|
7083 |
|
|
symbolic constants into a high/lo_sum pair but uses macros for other
|
7084 |
|
|
sorts of access.
|
7085 |
|
|
|
7086 |
|
|
Like explicit relocation support for REL targets, it relies
|
7087 |
|
|
on GNU extensions in the assembler and the linker.
|
7088 |
|
|
|
7089 |
|
|
Although this code should work for -O0, it has traditionally
|
7090 |
|
|
been treated as an optimization. */
|
7091 |
|
|
|
7092 |
|
|
static bool
|
7093 |
|
|
mips_split_addresses_p (void)
|
7094 |
|
|
{
|
7095 |
|
|
return (TARGET_SPLIT_ADDRESSES
|
7096 |
|
|
&& optimize
|
7097 |
|
|
&& !TARGET_MIPS16
|
7098 |
|
|
&& !flag_pic
|
7099 |
|
|
&& !ABI_HAS_64BIT_SYMBOLS);
|
7100 |
|
|
}
|
7101 |
|
|
|
7102 |
|
|
/* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs. */
|
7103 |
|
|
|
7104 |
|
|
static void
|
7105 |
|
|
mips_init_relocs (void)
|
7106 |
|
|
{
|
7107 |
|
|
memset (mips_split_p, '\0', sizeof (mips_split_p));
|
7108 |
|
|
memset (mips_split_hi_p, '\0', sizeof (mips_split_hi_p));
|
7109 |
|
|
memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
|
7110 |
|
|
memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
|
7111 |
|
|
|
7112 |
|
|
if (ABI_HAS_64BIT_SYMBOLS)
|
7113 |
|
|
{
|
7114 |
|
|
if (TARGET_EXPLICIT_RELOCS)
|
7115 |
|
|
{
|
7116 |
|
|
mips_split_p[SYMBOL_64_HIGH] = true;
|
7117 |
|
|
mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
|
7118 |
|
|
mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
|
7119 |
|
|
|
7120 |
|
|
mips_split_p[SYMBOL_64_MID] = true;
|
7121 |
|
|
mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
|
7122 |
|
|
mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
|
7123 |
|
|
|
7124 |
|
|
mips_split_p[SYMBOL_64_LOW] = true;
|
7125 |
|
|
mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
|
7126 |
|
|
mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
|
7127 |
|
|
|
7128 |
|
|
mips_split_p[SYMBOL_ABSOLUTE] = true;
|
7129 |
|
|
mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
|
7130 |
|
|
}
|
7131 |
|
|
}
|
7132 |
|
|
else
|
7133 |
|
|
{
|
7134 |
|
|
if (TARGET_EXPLICIT_RELOCS || mips_split_addresses_p () || TARGET_MIPS16)
|
7135 |
|
|
{
|
7136 |
|
|
mips_split_p[SYMBOL_ABSOLUTE] = true;
|
7137 |
|
|
mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
|
7138 |
|
|
mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
|
7139 |
|
|
|
7140 |
|
|
mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
|
7141 |
|
|
}
|
7142 |
|
|
}
|
7143 |
|
|
|
7144 |
|
|
if (TARGET_MIPS16)
|
7145 |
|
|
{
|
7146 |
|
|
/* The high part is provided by a pseudo copy of $gp. */
|
7147 |
|
|
mips_split_p[SYMBOL_GP_RELATIVE] = true;
|
7148 |
|
|
mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
|
7149 |
|
|
}
|
7150 |
|
|
else if (TARGET_EXPLICIT_RELOCS)
|
7151 |
|
|
/* Small data constants are kept whole until after reload,
|
7152 |
|
|
then lowered by mips_rewrite_small_data. */
|
7153 |
|
|
mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
|
7154 |
|
|
|
7155 |
|
|
if (TARGET_EXPLICIT_RELOCS)
|
7156 |
|
|
{
|
7157 |
|
|
mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
|
7158 |
|
|
if (TARGET_NEWABI)
|
7159 |
|
|
{
|
7160 |
|
|
mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
|
7161 |
|
|
mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
|
7162 |
|
|
}
|
7163 |
|
|
else
|
7164 |
|
|
{
|
7165 |
|
|
mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
|
7166 |
|
|
mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
|
7167 |
|
|
}
|
7168 |
|
|
if (TARGET_MIPS16)
|
7169 |
|
|
/* Expose the use of $28 as soon as possible. */
|
7170 |
|
|
mips_split_hi_p[SYMBOL_GOT_PAGE_OFST] = true;
|
7171 |
|
|
|
7172 |
|
|
if (TARGET_XGOT)
|
7173 |
|
|
{
|
7174 |
|
|
/* The HIGH and LO_SUM are matched by special .md patterns. */
|
7175 |
|
|
mips_split_p[SYMBOL_GOT_DISP] = true;
|
7176 |
|
|
|
7177 |
|
|
mips_split_p[SYMBOL_GOTOFF_DISP] = true;
|
7178 |
|
|
mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
|
7179 |
|
|
mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
|
7180 |
|
|
|
7181 |
|
|
mips_split_p[SYMBOL_GOTOFF_CALL] = true;
|
7182 |
|
|
mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
|
7183 |
|
|
mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
|
7184 |
|
|
}
|
7185 |
|
|
else
|
7186 |
|
|
{
|
7187 |
|
|
if (TARGET_NEWABI)
|
7188 |
|
|
mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
|
7189 |
|
|
else
|
7190 |
|
|
mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
|
7191 |
|
|
mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
|
7192 |
|
|
if (TARGET_MIPS16)
|
7193 |
|
|
/* Expose the use of $28 as soon as possible. */
|
7194 |
|
|
mips_split_p[SYMBOL_GOT_DISP] = true;
|
7195 |
|
|
}
|
7196 |
|
|
}
|
7197 |
|
|
|
7198 |
|
|
if (TARGET_NEWABI)
|
7199 |
|
|
{
|
7200 |
|
|
mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
|
7201 |
|
|
mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
|
7202 |
|
|
mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
|
7203 |
|
|
}
|
7204 |
|
|
|
7205 |
|
|
mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
|
7206 |
|
|
mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
|
7207 |
|
|
|
7208 |
|
|
mips_split_p[SYMBOL_DTPREL] = true;
|
7209 |
|
|
mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
|
7210 |
|
|
mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
|
7211 |
|
|
|
7212 |
|
|
mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
|
7213 |
|
|
|
7214 |
|
|
mips_split_p[SYMBOL_TPREL] = true;
|
7215 |
|
|
mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
|
7216 |
|
|
mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
|
7217 |
|
|
|
7218 |
|
|
mips_lo_relocs[SYMBOL_HALF] = "%half(";
|
7219 |
|
|
}
|
7220 |
|
|
|
7221 |
|
|
/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
|
7222 |
|
|
in context CONTEXT. RELOCS is the array of relocations to use. */
|
7223 |
|
|
|
7224 |
|
|
static void
|
7225 |
|
|
mips_print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
|
7226 |
|
|
const char **relocs)
|
7227 |
|
|
{
|
7228 |
|
|
enum mips_symbol_type symbol_type;
|
7229 |
|
|
const char *p;
|
7230 |
|
|
|
7231 |
|
|
symbol_type = mips_classify_symbolic_expression (op, context);
|
7232 |
|
|
gcc_assert (relocs[symbol_type]);
|
7233 |
|
|
|
7234 |
|
|
fputs (relocs[symbol_type], file);
|
7235 |
|
|
output_addr_const (file, mips_strip_unspec_address (op));
|
7236 |
|
|
for (p = relocs[symbol_type]; *p != 0; p++)
|
7237 |
|
|
if (*p == '(')
|
7238 |
|
|
fputc (')', file);
|
7239 |
|
|
}
|
7240 |
|
|
|
7241 |
|
|
/* Start a new block with the given asm switch enabled. If we need
|
7242 |
|
|
to print a directive, emit PREFIX before it and SUFFIX after it. */
|
7243 |
|
|
|
7244 |
|
|
static void
|
7245 |
|
|
mips_push_asm_switch_1 (struct mips_asm_switch *asm_switch,
|
7246 |
|
|
const char *prefix, const char *suffix)
|
7247 |
|
|
{
|
7248 |
|
|
if (asm_switch->nesting_level == 0)
|
7249 |
|
|
fprintf (asm_out_file, "%s.set\tno%s%s", prefix, asm_switch->name, suffix);
|
7250 |
|
|
asm_switch->nesting_level++;
|
7251 |
|
|
}
|
7252 |
|
|
|
7253 |
|
|
/* Likewise, but end a block. */
|
7254 |
|
|
|
7255 |
|
|
static void
|
7256 |
|
|
mips_pop_asm_switch_1 (struct mips_asm_switch *asm_switch,
|
7257 |
|
|
const char *prefix, const char *suffix)
|
7258 |
|
|
{
|
7259 |
|
|
gcc_assert (asm_switch->nesting_level);
|
7260 |
|
|
asm_switch->nesting_level--;
|
7261 |
|
|
if (asm_switch->nesting_level == 0)
|
7262 |
|
|
fprintf (asm_out_file, "%s.set\t%s%s", prefix, asm_switch->name, suffix);
|
7263 |
|
|
}
|
7264 |
|
|
|
7265 |
|
|
/* Wrappers around mips_push_asm_switch_1 and mips_pop_asm_switch_1
|
7266 |
|
|
that either print a complete line or print nothing. */
|
7267 |
|
|
|
7268 |
|
|
void
|
7269 |
|
|
mips_push_asm_switch (struct mips_asm_switch *asm_switch)
|
7270 |
|
|
{
|
7271 |
|
|
mips_push_asm_switch_1 (asm_switch, "\t", "\n");
|
7272 |
|
|
}
|
7273 |
|
|
|
7274 |
|
|
void
|
7275 |
|
|
mips_pop_asm_switch (struct mips_asm_switch *asm_switch)
|
7276 |
|
|
{
|
7277 |
|
|
mips_pop_asm_switch_1 (asm_switch, "\t", "\n");
|
7278 |
|
|
}
|
7279 |
|
|
|
7280 |
|
|
/* Print the text for PRINT_OPERAND punctation character CH to FILE.
|
7281 |
|
|
The punctuation characters are:
|
7282 |
|
|
|
7283 |
|
|
'(' Start a nested ".set noreorder" block.
|
7284 |
|
|
')' End a nested ".set noreorder" block.
|
7285 |
|
|
'[' Start a nested ".set noat" block.
|
7286 |
|
|
']' End a nested ".set noat" block.
|
7287 |
|
|
'<' Start a nested ".set nomacro" block.
|
7288 |
|
|
'>' End a nested ".set nomacro" block.
|
7289 |
|
|
'*' Behave like %(%< if generating a delayed-branch sequence.
|
7290 |
|
|
'#' Print a nop if in a ".set noreorder" block.
|
7291 |
|
|
'/' Like '#', but do nothing within a delayed-branch sequence.
|
7292 |
|
|
'?' Print "l" if mips_branch_likely is true
|
7293 |
|
|
'~' Print a nop if mips_branch_likely is true
|
7294 |
|
|
'.' Print the name of the register with a hard-wired zero (zero or $0).
|
7295 |
|
|
'@' Print the name of the assembler temporary register (at or $1).
|
7296 |
|
|
'^' Print the name of the pic call-through register (t9 or $25).
|
7297 |
|
|
'+' Print the name of the gp register (usually gp or $28).
|
7298 |
|
|
'$' Print the name of the stack pointer register (sp or $29).
|
7299 |
|
|
|
7300 |
|
|
See also mips_init_print_operand_pucnt. */
|
7301 |
|
|
|
7302 |
|
|
static void
|
7303 |
|
|
mips_print_operand_punctuation (FILE *file, int ch)
|
7304 |
|
|
{
|
7305 |
|
|
switch (ch)
|
7306 |
|
|
{
|
7307 |
|
|
case '(':
|
7308 |
|
|
mips_push_asm_switch_1 (&mips_noreorder, "", "\n\t");
|
7309 |
|
|
break;
|
7310 |
|
|
|
7311 |
|
|
case ')':
|
7312 |
|
|
mips_pop_asm_switch_1 (&mips_noreorder, "\n\t", "");
|
7313 |
|
|
break;
|
7314 |
|
|
|
7315 |
|
|
case '[':
|
7316 |
|
|
mips_push_asm_switch_1 (&mips_noat, "", "\n\t");
|
7317 |
|
|
break;
|
7318 |
|
|
|
7319 |
|
|
case ']':
|
7320 |
|
|
mips_pop_asm_switch_1 (&mips_noat, "\n\t", "");
|
7321 |
|
|
break;
|
7322 |
|
|
|
7323 |
|
|
case '<':
|
7324 |
|
|
mips_push_asm_switch_1 (&mips_nomacro, "", "\n\t");
|
7325 |
|
|
break;
|
7326 |
|
|
|
7327 |
|
|
case '>':
|
7328 |
|
|
mips_pop_asm_switch_1 (&mips_nomacro, "\n\t", "");
|
7329 |
|
|
break;
|
7330 |
|
|
|
7331 |
|
|
case '*':
|
7332 |
|
|
if (final_sequence != 0)
|
7333 |
|
|
{
|
7334 |
|
|
mips_print_operand_punctuation (file, '(');
|
7335 |
|
|
mips_print_operand_punctuation (file, '<');
|
7336 |
|
|
}
|
7337 |
|
|
break;
|
7338 |
|
|
|
7339 |
|
|
case '#':
|
7340 |
|
|
if (mips_noreorder.nesting_level > 0)
|
7341 |
|
|
fputs ("\n\tnop", file);
|
7342 |
|
|
break;
|
7343 |
|
|
|
7344 |
|
|
case '/':
|
7345 |
|
|
/* Print an extra newline so that the delayed insn is separated
|
7346 |
|
|
from the following ones. This looks neater and is consistent
|
7347 |
|
|
with non-nop delayed sequences. */
|
7348 |
|
|
if (mips_noreorder.nesting_level > 0 && final_sequence == 0)
|
7349 |
|
|
fputs ("\n\tnop\n", file);
|
7350 |
|
|
break;
|
7351 |
|
|
|
7352 |
|
|
case '?':
|
7353 |
|
|
if (mips_branch_likely)
|
7354 |
|
|
putc ('l', file);
|
7355 |
|
|
break;
|
7356 |
|
|
|
7357 |
|
|
case '~':
|
7358 |
|
|
if (mips_branch_likely)
|
7359 |
|
|
fputs ("\n\tnop", file);
|
7360 |
|
|
break;
|
7361 |
|
|
|
7362 |
|
|
case '.':
|
7363 |
|
|
fputs (reg_names[GP_REG_FIRST + 0], file);
|
7364 |
|
|
break;
|
7365 |
|
|
|
7366 |
|
|
case '@':
|
7367 |
|
|
fputs (reg_names[AT_REGNUM], file);
|
7368 |
|
|
break;
|
7369 |
|
|
|
7370 |
|
|
case '^':
|
7371 |
|
|
fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
|
7372 |
|
|
break;
|
7373 |
|
|
|
7374 |
|
|
case '+':
|
7375 |
|
|
fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
|
7376 |
|
|
break;
|
7377 |
|
|
|
7378 |
|
|
case '$':
|
7379 |
|
|
fputs (reg_names[STACK_POINTER_REGNUM], file);
|
7380 |
|
|
break;
|
7381 |
|
|
|
7382 |
|
|
default:
|
7383 |
|
|
gcc_unreachable ();
|
7384 |
|
|
break;
|
7385 |
|
|
}
|
7386 |
|
|
}
|
7387 |
|
|
|
7388 |
|
|
/* Initialize mips_print_operand_punct. */
|
7389 |
|
|
|
7390 |
|
|
static void
|
7391 |
|
|
mips_init_print_operand_punct (void)
|
7392 |
|
|
{
|
7393 |
|
|
const char *p;
|
7394 |
|
|
|
7395 |
|
|
for (p = "()[]<>*#/?~.@^+$"; *p; p++)
|
7396 |
|
|
mips_print_operand_punct[(unsigned char) *p] = true;
|
7397 |
|
|
}
|
7398 |
|
|
|
7399 |
|
|
/* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
|
7400 |
|
|
associated with condition CODE. Print the condition part of the
|
7401 |
|
|
opcode to FILE. */
|
7402 |
|
|
|
7403 |
|
|
static void
|
7404 |
|
|
mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
|
7405 |
|
|
{
|
7406 |
|
|
switch (code)
|
7407 |
|
|
{
|
7408 |
|
|
case EQ:
|
7409 |
|
|
case NE:
|
7410 |
|
|
case GT:
|
7411 |
|
|
case GE:
|
7412 |
|
|
case LT:
|
7413 |
|
|
case LE:
|
7414 |
|
|
case GTU:
|
7415 |
|
|
case GEU:
|
7416 |
|
|
case LTU:
|
7417 |
|
|
case LEU:
|
7418 |
|
|
/* Conveniently, the MIPS names for these conditions are the same
|
7419 |
|
|
as their RTL equivalents. */
|
7420 |
|
|
fputs (GET_RTX_NAME (code), file);
|
7421 |
|
|
break;
|
7422 |
|
|
|
7423 |
|
|
default:
|
7424 |
|
|
output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
|
7425 |
|
|
break;
|
7426 |
|
|
}
|
7427 |
|
|
}
|
7428 |
|
|
|
7429 |
|
|
/* Likewise floating-point branches. */
|
7430 |
|
|
|
7431 |
|
|
static void
|
7432 |
|
|
mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
|
7433 |
|
|
{
|
7434 |
|
|
switch (code)
|
7435 |
|
|
{
|
7436 |
|
|
case EQ:
|
7437 |
|
|
fputs ("c1f", file);
|
7438 |
|
|
break;
|
7439 |
|
|
|
7440 |
|
|
case NE:
|
7441 |
|
|
fputs ("c1t", file);
|
7442 |
|
|
break;
|
7443 |
|
|
|
7444 |
|
|
default:
|
7445 |
|
|
output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
|
7446 |
|
|
break;
|
7447 |
|
|
}
|
7448 |
|
|
}
|
7449 |
|
|
|
7450 |
|
|
/* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
|
7451 |
|
|
|
7452 |
|
|
'X' Print CONST_INT OP in hexadecimal format.
|
7453 |
|
|
'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
|
7454 |
|
|
'd' Print CONST_INT OP in decimal.
|
7455 |
|
|
'm' Print one less than CONST_INT OP in decimal.
|
7456 |
|
|
'h' Print the high-part relocation associated with OP, after stripping
|
7457 |
|
|
any outermost HIGH.
|
7458 |
|
|
'R' Print the low-part relocation associated with OP.
|
7459 |
|
|
'C' Print the integer branch condition for comparison OP.
|
7460 |
|
|
'N' Print the inverse of the integer branch condition for comparison OP.
|
7461 |
|
|
'F' Print the FPU branch condition for comparison OP.
|
7462 |
|
|
'W' Print the inverse of the FPU branch condition for comparison OP.
|
7463 |
|
|
'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
|
7464 |
|
|
'z' for (eq:?I ...), 'n' for (ne:?I ...).
|
7465 |
|
|
't' Like 'T', but with the EQ/NE cases reversed
|
7466 |
|
|
'Y' Print mips_fp_conditions[INTVAL (OP)]
|
7467 |
|
|
'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
|
7468 |
|
|
'q' Print a DSP accumulator register.
|
7469 |
|
|
'D' Print the second part of a double-word register or memory operand.
|
7470 |
|
|
'L' Print the low-order register in a double-word register operand.
|
7471 |
|
|
'M' Print high-order register in a double-word register operand.
|
7472 |
|
|
'z' Print $0 if OP is zero, otherwise print OP normally. */
|
7473 |
|
|
|
7474 |
|
|
void
|
7475 |
|
|
mips_print_operand (FILE *file, rtx op, int letter)
|
7476 |
|
|
{
|
7477 |
|
|
enum rtx_code code;
|
7478 |
|
|
|
7479 |
|
|
if (PRINT_OPERAND_PUNCT_VALID_P (letter))
|
7480 |
|
|
{
|
7481 |
|
|
mips_print_operand_punctuation (file, letter);
|
7482 |
|
|
return;
|
7483 |
|
|
}
|
7484 |
|
|
|
7485 |
|
|
gcc_assert (op);
|
7486 |
|
|
code = GET_CODE (op);
|
7487 |
|
|
|
7488 |
|
|
switch (letter)
|
7489 |
|
|
{
|
7490 |
|
|
case 'X':
|
7491 |
|
|
if (CONST_INT_P (op))
|
7492 |
|
|
fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
|
7493 |
|
|
else
|
7494 |
|
|
output_operand_lossage ("invalid use of '%%%c'", letter);
|
7495 |
|
|
break;
|
7496 |
|
|
|
7497 |
|
|
case 'x':
|
7498 |
|
|
if (CONST_INT_P (op))
|
7499 |
|
|
fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
|
7500 |
|
|
else
|
7501 |
|
|
output_operand_lossage ("invalid use of '%%%c'", letter);
|
7502 |
|
|
break;
|
7503 |
|
|
|
7504 |
|
|
case 'd':
|
7505 |
|
|
if (CONST_INT_P (op))
|
7506 |
|
|
fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
|
7507 |
|
|
else
|
7508 |
|
|
output_operand_lossage ("invalid use of '%%%c'", letter);
|
7509 |
|
|
break;
|
7510 |
|
|
|
7511 |
|
|
case 'm':
|
7512 |
|
|
if (CONST_INT_P (op))
|
7513 |
|
|
fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1);
|
7514 |
|
|
else
|
7515 |
|
|
output_operand_lossage ("invalid use of '%%%c'", letter);
|
7516 |
|
|
break;
|
7517 |
|
|
|
7518 |
|
|
case 'h':
|
7519 |
|
|
if (code == HIGH)
|
7520 |
|
|
op = XEXP (op, 0);
|
7521 |
|
|
mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
|
7522 |
|
|
break;
|
7523 |
|
|
|
7524 |
|
|
case 'R':
|
7525 |
|
|
mips_print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
|
7526 |
|
|
break;
|
7527 |
|
|
|
7528 |
|
|
case 'C':
|
7529 |
|
|
mips_print_int_branch_condition (file, code, letter);
|
7530 |
|
|
break;
|
7531 |
|
|
|
7532 |
|
|
case 'N':
|
7533 |
|
|
mips_print_int_branch_condition (file, reverse_condition (code), letter);
|
7534 |
|
|
break;
|
7535 |
|
|
|
7536 |
|
|
case 'F':
|
7537 |
|
|
mips_print_float_branch_condition (file, code, letter);
|
7538 |
|
|
break;
|
7539 |
|
|
|
7540 |
|
|
case 'W':
|
7541 |
|
|
mips_print_float_branch_condition (file, reverse_condition (code),
|
7542 |
|
|
letter);
|
7543 |
|
|
break;
|
7544 |
|
|
|
7545 |
|
|
case 'T':
|
7546 |
|
|
case 't':
|
7547 |
|
|
{
|
7548 |
|
|
int truth = (code == NE) == (letter == 'T');
|
7549 |
|
|
fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
|
7550 |
|
|
}
|
7551 |
|
|
break;
|
7552 |
|
|
|
7553 |
|
|
case 'Y':
|
7554 |
|
|
if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
|
7555 |
|
|
fputs (mips_fp_conditions[UINTVAL (op)], file);
|
7556 |
|
|
else
|
7557 |
|
|
output_operand_lossage ("'%%%c' is not a valid operand prefix",
|
7558 |
|
|
letter);
|
7559 |
|
|
break;
|
7560 |
|
|
|
7561 |
|
|
case 'Z':
|
7562 |
|
|
if (ISA_HAS_8CC)
|
7563 |
|
|
{
|
7564 |
|
|
mips_print_operand (file, op, 0);
|
7565 |
|
|
fputc (',', file);
|
7566 |
|
|
}
|
7567 |
|
|
break;
|
7568 |
|
|
|
7569 |
|
|
case 'q':
|
7570 |
|
|
if (code == REG && MD_REG_P (REGNO (op)))
|
7571 |
|
|
fprintf (file, "$ac0");
|
7572 |
|
|
else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
|
7573 |
|
|
fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
|
7574 |
|
|
else
|
7575 |
|
|
output_operand_lossage ("invalid use of '%%%c'", letter);
|
7576 |
|
|
break;
|
7577 |
|
|
|
7578 |
|
|
default:
|
7579 |
|
|
switch (code)
|
7580 |
|
|
{
|
7581 |
|
|
case REG:
|
7582 |
|
|
{
|
7583 |
|
|
unsigned int regno = REGNO (op);
|
7584 |
|
|
if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
|
7585 |
|
|
|| (letter == 'L' && TARGET_BIG_ENDIAN)
|
7586 |
|
|
|| letter == 'D')
|
7587 |
|
|
regno++;
|
7588 |
|
|
else if (letter && letter != 'z' && letter != 'M' && letter != 'L')
|
7589 |
|
|
output_operand_lossage ("invalid use of '%%%c'", letter);
|
7590 |
|
|
/* We need to print $0 .. $31 for COP0 registers. */
|
7591 |
|
|
if (COP0_REG_P (regno))
|
7592 |
|
|
fprintf (file, "$%s", ®_names[regno][4]);
|
7593 |
|
|
else
|
7594 |
|
|
fprintf (file, "%s", reg_names[regno]);
|
7595 |
|
|
}
|
7596 |
|
|
break;
|
7597 |
|
|
|
7598 |
|
|
case MEM:
|
7599 |
|
|
if (letter == 'D')
|
7600 |
|
|
output_address (plus_constant (XEXP (op, 0), 4));
|
7601 |
|
|
else if (letter && letter != 'z')
|
7602 |
|
|
output_operand_lossage ("invalid use of '%%%c'", letter);
|
7603 |
|
|
else
|
7604 |
|
|
output_address (XEXP (op, 0));
|
7605 |
|
|
break;
|
7606 |
|
|
|
7607 |
|
|
default:
|
7608 |
|
|
if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
|
7609 |
|
|
fputs (reg_names[GP_REG_FIRST], file);
|
7610 |
|
|
else if (letter && letter != 'z')
|
7611 |
|
|
output_operand_lossage ("invalid use of '%%%c'", letter);
|
7612 |
|
|
else if (CONST_GP_P (op))
|
7613 |
|
|
fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
|
7614 |
|
|
else
|
7615 |
|
|
output_addr_const (file, mips_strip_unspec_address (op));
|
7616 |
|
|
break;
|
7617 |
|
|
}
|
7618 |
|
|
}
|
7619 |
|
|
}
|
7620 |
|
|
|
7621 |
|
|
/* Output address operand X to FILE. */
|
7622 |
|
|
|
7623 |
|
|
void
|
7624 |
|
|
mips_print_operand_address (FILE *file, rtx x)
|
7625 |
|
|
{
|
7626 |
|
|
struct mips_address_info addr;
|
7627 |
|
|
|
7628 |
|
|
if (mips_classify_address (&addr, x, word_mode, true))
|
7629 |
|
|
switch (addr.type)
|
7630 |
|
|
{
|
7631 |
|
|
case ADDRESS_REG:
|
7632 |
|
|
mips_print_operand (file, addr.offset, 0);
|
7633 |
|
|
fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
|
7634 |
|
|
return;
|
7635 |
|
|
|
7636 |
|
|
case ADDRESS_LO_SUM:
|
7637 |
|
|
mips_print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
|
7638 |
|
|
mips_lo_relocs);
|
7639 |
|
|
fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
|
7640 |
|
|
return;
|
7641 |
|
|
|
7642 |
|
|
case ADDRESS_CONST_INT:
|
7643 |
|
|
output_addr_const (file, x);
|
7644 |
|
|
fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
|
7645 |
|
|
return;
|
7646 |
|
|
|
7647 |
|
|
case ADDRESS_SYMBOLIC:
|
7648 |
|
|
output_addr_const (file, mips_strip_unspec_address (x));
|
7649 |
|
|
return;
|
7650 |
|
|
}
|
7651 |
|
|
gcc_unreachable ();
|
7652 |
|
|
}
|
7653 |
|
|
|
7654 |
|
|
/* Implement TARGET_ENCODE_SECTION_INFO. */
|
7655 |
|
|
|
7656 |
|
|
static void
|
7657 |
|
|
mips_encode_section_info (tree decl, rtx rtl, int first)
|
7658 |
|
|
{
|
7659 |
|
|
default_encode_section_info (decl, rtl, first);
|
7660 |
|
|
|
7661 |
|
|
if (TREE_CODE (decl) == FUNCTION_DECL)
|
7662 |
|
|
{
|
7663 |
|
|
rtx symbol = XEXP (rtl, 0);
|
7664 |
|
|
tree type = TREE_TYPE (decl);
|
7665 |
|
|
|
7666 |
|
|
/* Encode whether the symbol is short or long. */
|
7667 |
|
|
if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
|
7668 |
|
|
|| mips_far_type_p (type))
|
7669 |
|
|
SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
|
7670 |
|
|
}
|
7671 |
|
|
}
|
7672 |
|
|
|
7673 |
|
|
/* Implement TARGET_SELECT_RTX_SECTION. */
|
7674 |
|
|
|
7675 |
|
|
static section *
|
7676 |
|
|
mips_select_rtx_section (enum machine_mode mode, rtx x,
|
7677 |
|
|
unsigned HOST_WIDE_INT align)
|
7678 |
|
|
{
|
7679 |
|
|
/* ??? Consider using mergeable small data sections. */
|
7680 |
|
|
if (mips_rtx_constant_in_small_data_p (mode))
|
7681 |
|
|
return get_named_section (NULL, ".sdata", 0);
|
7682 |
|
|
|
7683 |
|
|
return default_elf_select_rtx_section (mode, x, align);
|
7684 |
|
|
}
|
7685 |
|
|
|
7686 |
|
|
/* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
|
7687 |
|
|
|
7688 |
|
|
The complication here is that, with the combination TARGET_ABICALLS
|
7689 |
|
|
&& !TARGET_ABSOLUTE_ABICALLS && !TARGET_GPWORD, jump tables will use
|
7690 |
|
|
absolute addresses, and should therefore not be included in the
|
7691 |
|
|
read-only part of a DSO. Handle such cases by selecting a normal
|
7692 |
|
|
data section instead of a read-only one. The logic apes that in
|
7693 |
|
|
default_function_rodata_section. */
|
7694 |
|
|
|
7695 |
|
|
static section *
|
7696 |
|
|
mips_function_rodata_section (tree decl)
|
7697 |
|
|
{
|
7698 |
|
|
if (!TARGET_ABICALLS || TARGET_ABSOLUTE_ABICALLS || TARGET_GPWORD)
|
7699 |
|
|
return default_function_rodata_section (decl);
|
7700 |
|
|
|
7701 |
|
|
if (decl && DECL_SECTION_NAME (decl))
|
7702 |
|
|
{
|
7703 |
|
|
const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
|
7704 |
|
|
if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
|
7705 |
|
|
{
|
7706 |
|
|
char *rname = ASTRDUP (name);
|
7707 |
|
|
rname[14] = 'd';
|
7708 |
|
|
return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
|
7709 |
|
|
}
|
7710 |
|
|
else if (flag_function_sections
|
7711 |
|
|
&& flag_data_sections
|
7712 |
|
|
&& strncmp (name, ".text.", 6) == 0)
|
7713 |
|
|
{
|
7714 |
|
|
char *rname = ASTRDUP (name);
|
7715 |
|
|
memcpy (rname + 1, "data", 4);
|
7716 |
|
|
return get_section (rname, SECTION_WRITE, decl);
|
7717 |
|
|
}
|
7718 |
|
|
}
|
7719 |
|
|
return data_section;
|
7720 |
|
|
}
|
7721 |
|
|
|
7722 |
|
|
/* Implement TARGET_IN_SMALL_DATA_P. */
|
7723 |
|
|
|
7724 |
|
|
static bool
|
7725 |
|
|
mips_in_small_data_p (const_tree decl)
|
7726 |
|
|
{
|
7727 |
|
|
unsigned HOST_WIDE_INT size;
|
7728 |
|
|
|
7729 |
|
|
if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
|
7730 |
|
|
return false;
|
7731 |
|
|
|
7732 |
|
|
/* We don't yet generate small-data references for -mabicalls
|
7733 |
|
|
or VxWorks RTP code. See the related -G handling in
|
7734 |
|
|
mips_override_options. */
|
7735 |
|
|
if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
|
7736 |
|
|
return false;
|
7737 |
|
|
|
7738 |
|
|
if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
|
7739 |
|
|
{
|
7740 |
|
|
const char *name;
|
7741 |
|
|
|
7742 |
|
|
/* Reject anything that isn't in a known small-data section. */
|
7743 |
|
|
name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
|
7744 |
|
|
if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
|
7745 |
|
|
return false;
|
7746 |
|
|
|
7747 |
|
|
/* If a symbol is defined externally, the assembler will use the
|
7748 |
|
|
usual -G rules when deciding how to implement macros. */
|
7749 |
|
|
if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
|
7750 |
|
|
return true;
|
7751 |
|
|
}
|
7752 |
|
|
else if (TARGET_EMBEDDED_DATA)
|
7753 |
|
|
{
|
7754 |
|
|
/* Don't put constants into the small data section: we want them
|
7755 |
|
|
to be in ROM rather than RAM. */
|
7756 |
|
|
if (TREE_CODE (decl) != VAR_DECL)
|
7757 |
|
|
return false;
|
7758 |
|
|
|
7759 |
|
|
if (TREE_READONLY (decl)
|
7760 |
|
|
&& !TREE_SIDE_EFFECTS (decl)
|
7761 |
|
|
&& (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
|
7762 |
|
|
return false;
|
7763 |
|
|
}
|
7764 |
|
|
|
7765 |
|
|
/* Enforce -mlocal-sdata. */
|
7766 |
|
|
if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
|
7767 |
|
|
return false;
|
7768 |
|
|
|
7769 |
|
|
/* Enforce -mextern-sdata. */
|
7770 |
|
|
if (!TARGET_EXTERN_SDATA && DECL_P (decl))
|
7771 |
|
|
{
|
7772 |
|
|
if (DECL_EXTERNAL (decl))
|
7773 |
|
|
return false;
|
7774 |
|
|
if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
|
7775 |
|
|
return false;
|
7776 |
|
|
}
|
7777 |
|
|
|
7778 |
|
|
/* We have traditionally not treated zero-sized objects as small data,
|
7779 |
|
|
so this is now effectively part of the ABI. */
|
7780 |
|
|
size = int_size_in_bytes (TREE_TYPE (decl));
|
7781 |
|
|
return size > 0 && size <= mips_small_data_threshold;
|
7782 |
|
|
}
|
7783 |
|
|
|
7784 |
|
|
/* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
|
7785 |
|
|
anchors for small data: the GP register acts as an anchor in that
|
7786 |
|
|
case. We also don't want to use them for PC-relative accesses,
|
7787 |
|
|
where the PC acts as an anchor. */
|
7788 |
|
|
|
7789 |
|
|
static bool
|
7790 |
|
|
mips_use_anchors_for_symbol_p (const_rtx symbol)
|
7791 |
|
|
{
|
7792 |
|
|
switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
|
7793 |
|
|
{
|
7794 |
|
|
case SYMBOL_PC_RELATIVE:
|
7795 |
|
|
case SYMBOL_GP_RELATIVE:
|
7796 |
|
|
return false;
|
7797 |
|
|
|
7798 |
|
|
default:
|
7799 |
|
|
return default_use_anchors_for_symbol_p (symbol);
|
7800 |
|
|
}
|
7801 |
|
|
}
|
7802 |
|
|
|
7803 |
|
|
/* The MIPS debug format wants all automatic variables and arguments
|
7804 |
|
|
to be in terms of the virtual frame pointer (stack pointer before
|
7805 |
|
|
any adjustment in the function), while the MIPS 3.0 linker wants
|
7806 |
|
|
the frame pointer to be the stack pointer after the initial
|
7807 |
|
|
adjustment. So, we do the adjustment here. The arg pointer (which
|
7808 |
|
|
is eliminated) points to the virtual frame pointer, while the frame
|
7809 |
|
|
pointer (which may be eliminated) points to the stack pointer after
|
7810 |
|
|
the initial adjustments. */
|
7811 |
|
|
|
7812 |
|
|
HOST_WIDE_INT
|
7813 |
|
|
mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
|
7814 |
|
|
{
|
7815 |
|
|
rtx offset2 = const0_rtx;
|
7816 |
|
|
rtx reg = eliminate_constant_term (addr, &offset2);
|
7817 |
|
|
|
7818 |
|
|
if (offset == 0)
|
7819 |
|
|
offset = INTVAL (offset2);
|
7820 |
|
|
|
7821 |
|
|
if (reg == stack_pointer_rtx
|
7822 |
|
|
|| reg == frame_pointer_rtx
|
7823 |
|
|
|| reg == hard_frame_pointer_rtx)
|
7824 |
|
|
{
|
7825 |
|
|
offset -= cfun->machine->frame.total_size;
|
7826 |
|
|
if (reg == hard_frame_pointer_rtx)
|
7827 |
|
|
offset += cfun->machine->frame.hard_frame_pointer_offset;
|
7828 |
|
|
}
|
7829 |
|
|
|
7830 |
|
|
/* sdbout_parms does not want this to crash for unrecognized cases. */
|
7831 |
|
|
#if 0
|
7832 |
|
|
else if (reg != arg_pointer_rtx)
|
7833 |
|
|
fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
|
7834 |
|
|
addr);
|
7835 |
|
|
#endif
|
7836 |
|
|
|
7837 |
|
|
return offset;
|
7838 |
|
|
}
|
7839 |
|
|
|
7840 |
|
|
/* Implement ASM_OUTPUT_EXTERNAL. */
|
7841 |
|
|
|
7842 |
|
|
void
|
7843 |
|
|
mips_output_external (FILE *file, tree decl, const char *name)
|
7844 |
|
|
{
|
7845 |
|
|
default_elf_asm_output_external (file, decl, name);
|
7846 |
|
|
|
7847 |
|
|
/* We output the name if and only if TREE_SYMBOL_REFERENCED is
|
7848 |
|
|
set in order to avoid putting out names that are never really
|
7849 |
|
|
used. */
|
7850 |
|
|
if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
|
7851 |
|
|
{
|
7852 |
|
|
if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
|
7853 |
|
|
{
|
7854 |
|
|
/* When using assembler macros, emit .extern directives for
|
7855 |
|
|
all small-data externs so that the assembler knows how
|
7856 |
|
|
big they are.
|
7857 |
|
|
|
7858 |
|
|
In most cases it would be safe (though pointless) to emit
|
7859 |
|
|
.externs for other symbols too. One exception is when an
|
7860 |
|
|
object is within the -G limit but declared by the user to
|
7861 |
|
|
be in a section other than .sbss or .sdata. */
|
7862 |
|
|
fputs ("\t.extern\t", file);
|
7863 |
|
|
assemble_name (file, name);
|
7864 |
|
|
fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
|
7865 |
|
|
int_size_in_bytes (TREE_TYPE (decl)));
|
7866 |
|
|
}
|
7867 |
|
|
else if (TARGET_IRIX
|
7868 |
|
|
&& mips_abi == ABI_32
|
7869 |
|
|
&& TREE_CODE (decl) == FUNCTION_DECL)
|
7870 |
|
|
{
|
7871 |
|
|
/* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
|
7872 |
|
|
`.global name .text' directive for every used but
|
7873 |
|
|
undefined function. If we don't, the linker may perform
|
7874 |
|
|
an optimization (skipping over the insns that set $gp)
|
7875 |
|
|
when it is unsafe. */
|
7876 |
|
|
fputs ("\t.globl ", file);
|
7877 |
|
|
assemble_name (file, name);
|
7878 |
|
|
fputs (" .text\n", file);
|
7879 |
|
|
}
|
7880 |
|
|
}
|
7881 |
|
|
}
|
7882 |
|
|
|
7883 |
|
|
/* Implement ASM_OUTPUT_SOURCE_FILENAME. */
|
7884 |
|
|
|
7885 |
|
|
void
|
7886 |
|
|
mips_output_filename (FILE *stream, const char *name)
|
7887 |
|
|
{
|
7888 |
|
|
/* If we are emitting DWARF-2, let dwarf2out handle the ".file"
|
7889 |
|
|
directives. */
|
7890 |
|
|
if (write_symbols == DWARF2_DEBUG)
|
7891 |
|
|
return;
|
7892 |
|
|
else if (mips_output_filename_first_time)
|
7893 |
|
|
{
|
7894 |
|
|
mips_output_filename_first_time = 0;
|
7895 |
|
|
num_source_filenames += 1;
|
7896 |
|
|
current_function_file = name;
|
7897 |
|
|
fprintf (stream, "\t.file\t%d ", num_source_filenames);
|
7898 |
|
|
output_quoted_string (stream, name);
|
7899 |
|
|
putc ('\n', stream);
|
7900 |
|
|
}
|
7901 |
|
|
/* If we are emitting stabs, let dbxout.c handle this (except for
|
7902 |
|
|
the mips_output_filename_first_time case). */
|
7903 |
|
|
else if (write_symbols == DBX_DEBUG)
|
7904 |
|
|
return;
|
7905 |
|
|
else if (name != current_function_file
|
7906 |
|
|
&& strcmp (name, current_function_file) != 0)
|
7907 |
|
|
{
|
7908 |
|
|
num_source_filenames += 1;
|
7909 |
|
|
current_function_file = name;
|
7910 |
|
|
fprintf (stream, "\t.file\t%d ", num_source_filenames);
|
7911 |
|
|
output_quoted_string (stream, name);
|
7912 |
|
|
putc ('\n', stream);
|
7913 |
|
|
}
|
7914 |
|
|
}
|
7915 |
|
|
|
7916 |
|
|
/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
|
7917 |
|
|
|
7918 |
|
|
static void ATTRIBUTE_UNUSED
|
7919 |
|
|
mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
|
7920 |
|
|
{
|
7921 |
|
|
switch (size)
|
7922 |
|
|
{
|
7923 |
|
|
case 4:
|
7924 |
|
|
fputs ("\t.dtprelword\t", file);
|
7925 |
|
|
break;
|
7926 |
|
|
|
7927 |
|
|
case 8:
|
7928 |
|
|
fputs ("\t.dtpreldword\t", file);
|
7929 |
|
|
break;
|
7930 |
|
|
|
7931 |
|
|
default:
|
7932 |
|
|
gcc_unreachable ();
|
7933 |
|
|
}
|
7934 |
|
|
output_addr_const (file, x);
|
7935 |
|
|
fputs ("+0x8000", file);
|
7936 |
|
|
}
|
7937 |
|
|
|
7938 |
|
|
/* Implement TARGET_DWARF_REGISTER_SPAN. */
|
7939 |
|
|
|
7940 |
|
|
static rtx
|
7941 |
|
|
mips_dwarf_register_span (rtx reg)
|
7942 |
|
|
{
|
7943 |
|
|
rtx high, low;
|
7944 |
|
|
enum machine_mode mode;
|
7945 |
|
|
|
7946 |
|
|
/* By default, GCC maps increasing register numbers to increasing
|
7947 |
|
|
memory locations, but paired FPRs are always little-endian,
|
7948 |
|
|
regardless of the prevailing endianness. */
|
7949 |
|
|
mode = GET_MODE (reg);
|
7950 |
|
|
if (FP_REG_P (REGNO (reg))
|
7951 |
|
|
&& TARGET_BIG_ENDIAN
|
7952 |
|
|
&& MAX_FPRS_PER_FMT > 1
|
7953 |
|
|
&& GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
|
7954 |
|
|
{
|
7955 |
|
|
gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
|
7956 |
|
|
high = mips_subword (reg, true);
|
7957 |
|
|
low = mips_subword (reg, false);
|
7958 |
|
|
return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
|
7959 |
|
|
}
|
7960 |
|
|
|
7961 |
|
|
return NULL_RTX;
|
7962 |
|
|
}
|
7963 |
|
|
|
7964 |
|
|
/* Implement ASM_OUTPUT_ASCII. */
|
7965 |
|
|
|
7966 |
|
|
void
|
7967 |
|
|
mips_output_ascii (FILE *stream, const char *string, size_t len)
|
7968 |
|
|
{
|
7969 |
|
|
size_t i;
|
7970 |
|
|
int cur_pos;
|
7971 |
|
|
|
7972 |
|
|
cur_pos = 17;
|
7973 |
|
|
fprintf (stream, "\t.ascii\t\"");
|
7974 |
|
|
for (i = 0; i < len; i++)
|
7975 |
|
|
{
|
7976 |
|
|
int c;
|
7977 |
|
|
|
7978 |
|
|
c = (unsigned char) string[i];
|
7979 |
|
|
if (ISPRINT (c))
|
7980 |
|
|
{
|
7981 |
|
|
if (c == '\\' || c == '\"')
|
7982 |
|
|
{
|
7983 |
|
|
putc ('\\', stream);
|
7984 |
|
|
cur_pos++;
|
7985 |
|
|
}
|
7986 |
|
|
putc (c, stream);
|
7987 |
|
|
cur_pos++;
|
7988 |
|
|
}
|
7989 |
|
|
else
|
7990 |
|
|
{
|
7991 |
|
|
fprintf (stream, "\\%03o", c);
|
7992 |
|
|
cur_pos += 4;
|
7993 |
|
|
}
|
7994 |
|
|
|
7995 |
|
|
if (cur_pos > 72 && i+1 < len)
|
7996 |
|
|
{
|
7997 |
|
|
cur_pos = 17;
|
7998 |
|
|
fprintf (stream, "\"\n\t.ascii\t\"");
|
7999 |
|
|
}
|
8000 |
|
|
}
|
8001 |
|
|
fprintf (stream, "\"\n");
|
8002 |
|
|
}
|
8003 |
|
|
|
8004 |
|
|
/* Emit either a label, .comm, or .lcomm directive. When using assembler
|
8005 |
|
|
macros, mark the symbol as written so that mips_asm_output_external
|
8006 |
|
|
won't emit an .extern for it. STREAM is the output file, NAME is the
|
8007 |
|
|
name of the symbol, INIT_STRING is the string that should be written
|
8008 |
|
|
before the symbol and FINAL_STRING is the string that should be
|
8009 |
|
|
written after it. FINAL_STRING is a printf format that consumes the
|
8010 |
|
|
remaining arguments. */
|
8011 |
|
|
|
8012 |
|
|
void
|
8013 |
|
|
mips_declare_object (FILE *stream, const char *name, const char *init_string,
|
8014 |
|
|
const char *final_string, ...)
|
8015 |
|
|
{
|
8016 |
|
|
va_list ap;
|
8017 |
|
|
|
8018 |
|
|
fputs (init_string, stream);
|
8019 |
|
|
assemble_name (stream, name);
|
8020 |
|
|
va_start (ap, final_string);
|
8021 |
|
|
vfprintf (stream, final_string, ap);
|
8022 |
|
|
va_end (ap);
|
8023 |
|
|
|
8024 |
|
|
if (!TARGET_EXPLICIT_RELOCS)
|
8025 |
|
|
{
|
8026 |
|
|
tree name_tree = get_identifier (name);
|
8027 |
|
|
TREE_ASM_WRITTEN (name_tree) = 1;
|
8028 |
|
|
}
|
8029 |
|
|
}
|
8030 |
|
|
|
8031 |
|
|
/* Declare a common object of SIZE bytes using asm directive INIT_STRING.
|
8032 |
|
|
NAME is the name of the object and ALIGN is the required alignment
|
8033 |
|
|
in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
|
8034 |
|
|
alignment argument. */
|
8035 |
|
|
|
8036 |
|
|
void
|
8037 |
|
|
mips_declare_common_object (FILE *stream, const char *name,
|
8038 |
|
|
const char *init_string,
|
8039 |
|
|
unsigned HOST_WIDE_INT size,
|
8040 |
|
|
unsigned int align, bool takes_alignment_p)
|
8041 |
|
|
{
|
8042 |
|
|
if (!takes_alignment_p)
|
8043 |
|
|
{
|
8044 |
|
|
size += (align / BITS_PER_UNIT) - 1;
|
8045 |
|
|
size -= size % (align / BITS_PER_UNIT);
|
8046 |
|
|
mips_declare_object (stream, name, init_string,
|
8047 |
|
|
"," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
|
8048 |
|
|
}
|
8049 |
|
|
else
|
8050 |
|
|
mips_declare_object (stream, name, init_string,
|
8051 |
|
|
"," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
|
8052 |
|
|
size, align / BITS_PER_UNIT);
|
8053 |
|
|
}
|
8054 |
|
|
|
8055 |
|
|
/* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
|
8056 |
|
|
elfos.h version, but we also need to handle -muninit-const-in-rodata. */
|
8057 |
|
|
|
8058 |
|
|
void
|
8059 |
|
|
mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
|
8060 |
|
|
unsigned HOST_WIDE_INT size,
|
8061 |
|
|
unsigned int align)
|
8062 |
|
|
{
|
8063 |
|
|
/* If the target wants uninitialized const declarations in
|
8064 |
|
|
.rdata then don't put them in .comm. */
|
8065 |
|
|
if (TARGET_EMBEDDED_DATA
|
8066 |
|
|
&& TARGET_UNINIT_CONST_IN_RODATA
|
8067 |
|
|
&& TREE_CODE (decl) == VAR_DECL
|
8068 |
|
|
&& TREE_READONLY (decl)
|
8069 |
|
|
&& (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
|
8070 |
|
|
{
|
8071 |
|
|
if (TREE_PUBLIC (decl) && DECL_NAME (decl))
|
8072 |
|
|
targetm.asm_out.globalize_label (stream, name);
|
8073 |
|
|
|
8074 |
|
|
switch_to_section (readonly_data_section);
|
8075 |
|
|
ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
|
8076 |
|
|
mips_declare_object (stream, name, "",
|
8077 |
|
|
":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
|
8078 |
|
|
size);
|
8079 |
|
|
}
|
8080 |
|
|
else
|
8081 |
|
|
mips_declare_common_object (stream, name, "\n\t.comm\t",
|
8082 |
|
|
size, align, true);
|
8083 |
|
|
}
|
8084 |
|
|
|
8085 |
|
|
#ifdef ASM_OUTPUT_SIZE_DIRECTIVE
|
8086 |
|
|
extern int size_directive_output;
|
8087 |
|
|
|
8088 |
|
|
/* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
|
8089 |
|
|
definitions except that it uses mips_declare_object to emit the label. */
|
8090 |
|
|
|
8091 |
|
|
void
|
8092 |
|
|
mips_declare_object_name (FILE *stream, const char *name,
|
8093 |
|
|
tree decl ATTRIBUTE_UNUSED)
|
8094 |
|
|
{
|
8095 |
|
|
#ifdef ASM_OUTPUT_TYPE_DIRECTIVE
|
8096 |
|
|
ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
|
8097 |
|
|
#endif
|
8098 |
|
|
|
8099 |
|
|
size_directive_output = 0;
|
8100 |
|
|
if (!flag_inhibit_size_directive && DECL_SIZE (decl))
|
8101 |
|
|
{
|
8102 |
|
|
HOST_WIDE_INT size;
|
8103 |
|
|
|
8104 |
|
|
size_directive_output = 1;
|
8105 |
|
|
size = int_size_in_bytes (TREE_TYPE (decl));
|
8106 |
|
|
ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
|
8107 |
|
|
}
|
8108 |
|
|
|
8109 |
|
|
mips_declare_object (stream, name, "", ":\n");
|
8110 |
|
|
}
|
8111 |
|
|
|
8112 |
|
|
/* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
|
8113 |
|
|
|
8114 |
|
|
void
|
8115 |
|
|
mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
|
8116 |
|
|
{
|
8117 |
|
|
const char *name;
|
8118 |
|
|
|
8119 |
|
|
name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
|
8120 |
|
|
if (!flag_inhibit_size_directive
|
8121 |
|
|
&& DECL_SIZE (decl) != 0
|
8122 |
|
|
&& !at_end
|
8123 |
|
|
&& top_level
|
8124 |
|
|
&& DECL_INITIAL (decl) == error_mark_node
|
8125 |
|
|
&& !size_directive_output)
|
8126 |
|
|
{
|
8127 |
|
|
HOST_WIDE_INT size;
|
8128 |
|
|
|
8129 |
|
|
size_directive_output = 1;
|
8130 |
|
|
size = int_size_in_bytes (TREE_TYPE (decl));
|
8131 |
|
|
ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
|
8132 |
|
|
}
|
8133 |
|
|
}
|
8134 |
|
|
#endif
|
8135 |
|
|
|
8136 |
|
|
/* Return the FOO in the name of the ".mdebug.FOO" section associated
|
8137 |
|
|
with the current ABI. */
|
8138 |
|
|
|
8139 |
|
|
static const char *
|
8140 |
|
|
mips_mdebug_abi_name (void)
|
8141 |
|
|
{
|
8142 |
|
|
switch (mips_abi)
|
8143 |
|
|
{
|
8144 |
|
|
case ABI_32:
|
8145 |
|
|
return "abi32";
|
8146 |
|
|
case ABI_O64:
|
8147 |
|
|
return "abiO64";
|
8148 |
|
|
case ABI_N32:
|
8149 |
|
|
return "abiN32";
|
8150 |
|
|
case ABI_64:
|
8151 |
|
|
return "abi64";
|
8152 |
|
|
case ABI_EABI:
|
8153 |
|
|
return TARGET_64BIT ? "eabi64" : "eabi32";
|
8154 |
|
|
default:
|
8155 |
|
|
gcc_unreachable ();
|
8156 |
|
|
}
|
8157 |
|
|
}
|
8158 |
|
|
|
8159 |
|
|
/* Implement TARGET_ASM_FILE_START. */
|
8160 |
|
|
|
8161 |
|
|
static void
|
8162 |
|
|
mips_file_start (void)
|
8163 |
|
|
{
|
8164 |
|
|
default_file_start ();
|
8165 |
|
|
|
8166 |
|
|
/* Generate a special section to describe the ABI switches used to
|
8167 |
|
|
produce the resultant binary. This is unnecessary on IRIX and
|
8168 |
|
|
causes unwanted warnings from the native linker. */
|
8169 |
|
|
if (!TARGET_IRIX)
|
8170 |
|
|
{
|
8171 |
|
|
/* Record the ABI itself. Modern versions of binutils encode
|
8172 |
|
|
this information in the ELF header flags, but GDB needs the
|
8173 |
|
|
information in order to correctly debug binaries produced by
|
8174 |
|
|
older binutils. See the function mips_gdbarch_init in
|
8175 |
|
|
gdb/mips-tdep.c. */
|
8176 |
|
|
fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
|
8177 |
|
|
mips_mdebug_abi_name ());
|
8178 |
|
|
|
8179 |
|
|
/* There is no ELF header flag to distinguish long32 forms of the
|
8180 |
|
|
EABI from long64 forms. Emit a special section to help tools
|
8181 |
|
|
such as GDB. Do the same for o64, which is sometimes used with
|
8182 |
|
|
-mlong64. */
|
8183 |
|
|
if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
|
8184 |
|
|
fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
|
8185 |
|
|
"\t.previous\n", TARGET_LONG64 ? 64 : 32);
|
8186 |
|
|
|
8187 |
|
|
#ifdef HAVE_AS_GNU_ATTRIBUTE
|
8188 |
|
|
fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
|
8189 |
|
|
(TARGET_HARD_FLOAT_ABI
|
8190 |
|
|
? (TARGET_DOUBLE_FLOAT
|
8191 |
|
|
? ((!TARGET_64BIT && TARGET_FLOAT64) ? 4 : 1) : 2) : 3));
|
8192 |
|
|
#endif
|
8193 |
|
|
}
|
8194 |
|
|
|
8195 |
|
|
/* If TARGET_ABICALLS, tell GAS to generate -KPIC code. */
|
8196 |
|
|
if (TARGET_ABICALLS)
|
8197 |
|
|
{
|
8198 |
|
|
fprintf (asm_out_file, "\t.abicalls\n");
|
8199 |
|
|
if (TARGET_ABICALLS_PIC0)
|
8200 |
|
|
fprintf (asm_out_file, "\t.option\tpic0\n");
|
8201 |
|
|
}
|
8202 |
|
|
|
8203 |
|
|
if (flag_verbose_asm)
|
8204 |
|
|
fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
|
8205 |
|
|
ASM_COMMENT_START,
|
8206 |
|
|
mips_small_data_threshold, mips_arch_info->name, mips_isa);
|
8207 |
|
|
}
|
8208 |
|
|
|
8209 |
|
|
/* Make the last instruction frame-related and note that it performs
|
8210 |
|
|
the operation described by FRAME_PATTERN. */
|
8211 |
|
|
|
8212 |
|
|
static void
|
8213 |
|
|
mips_set_frame_expr (rtx frame_pattern)
|
8214 |
|
|
{
|
8215 |
|
|
rtx insn;
|
8216 |
|
|
|
8217 |
|
|
insn = get_last_insn ();
|
8218 |
|
|
RTX_FRAME_RELATED_P (insn) = 1;
|
8219 |
|
|
REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
|
8220 |
|
|
frame_pattern,
|
8221 |
|
|
REG_NOTES (insn));
|
8222 |
|
|
}
|
8223 |
|
|
|
8224 |
|
|
/* Return a frame-related rtx that stores REG at MEM.
|
8225 |
|
|
REG must be a single register. */
|
8226 |
|
|
|
8227 |
|
|
static rtx
|
8228 |
|
|
mips_frame_set (rtx mem, rtx reg)
|
8229 |
|
|
{
|
8230 |
|
|
rtx set;
|
8231 |
|
|
|
8232 |
|
|
/* If we're saving the return address register and the DWARF return
|
8233 |
|
|
address column differs from the hard register number, adjust the
|
8234 |
|
|
note reg to refer to the former. */
|
8235 |
|
|
if (REGNO (reg) == RETURN_ADDR_REGNUM
|
8236 |
|
|
&& DWARF_FRAME_RETURN_COLUMN != RETURN_ADDR_REGNUM)
|
8237 |
|
|
reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
|
8238 |
|
|
|
8239 |
|
|
set = gen_rtx_SET (VOIDmode, mem, reg);
|
8240 |
|
|
RTX_FRAME_RELATED_P (set) = 1;
|
8241 |
|
|
|
8242 |
|
|
return set;
|
8243 |
|
|
}
|
8244 |
|
|
|
8245 |
|
|
/* If a MIPS16e SAVE or RESTORE instruction saves or restores register
|
8246 |
|
|
mips16e_s2_s8_regs[X], it must also save the registers in indexes
|
8247 |
|
|
X + 1 onwards. Likewise mips16e_a0_a3_regs. */
|
8248 |
|
|
static const unsigned char mips16e_s2_s8_regs[] = {
|
8249 |
|
|
30, 23, 22, 21, 20, 19, 18
|
8250 |
|
|
};
|
8251 |
|
|
static const unsigned char mips16e_a0_a3_regs[] = {
|
8252 |
|
|
4, 5, 6, 7
|
8253 |
|
|
};
|
8254 |
|
|
|
8255 |
|
|
/* A list of the registers that can be saved by the MIPS16e SAVE instruction,
|
8256 |
|
|
ordered from the uppermost in memory to the lowest in memory. */
|
8257 |
|
|
static const unsigned char mips16e_save_restore_regs[] = {
|
8258 |
|
|
31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
|
8259 |
|
|
};
|
8260 |
|
|
|
8261 |
|
|
/* Return the index of the lowest X in the range [0, SIZE) for which
|
8262 |
|
|
bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
|
8263 |
|
|
|
8264 |
|
|
static unsigned int
|
8265 |
|
|
mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
|
8266 |
|
|
unsigned int size)
|
8267 |
|
|
{
|
8268 |
|
|
unsigned int i;
|
8269 |
|
|
|
8270 |
|
|
for (i = 0; i < size; i++)
|
8271 |
|
|
if (BITSET_P (mask, regs[i]))
|
8272 |
|
|
break;
|
8273 |
|
|
|
8274 |
|
|
return i;
|
8275 |
|
|
}
|
8276 |
|
|
|
8277 |
|
|
/* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
|
8278 |
|
|
is the number of set bits. If *MASK_PTR contains REGS[X] for some X
|
8279 |
|
|
in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
|
8280 |
|
|
is true for all indexes (X, SIZE). */
|
8281 |
|
|
|
8282 |
|
|
static void
|
8283 |
|
|
mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
|
8284 |
|
|
unsigned int size, unsigned int *num_regs_ptr)
|
8285 |
|
|
{
|
8286 |
|
|
unsigned int i;
|
8287 |
|
|
|
8288 |
|
|
i = mips16e_find_first_register (*mask_ptr, regs, size);
|
8289 |
|
|
for (i++; i < size; i++)
|
8290 |
|
|
if (!BITSET_P (*mask_ptr, regs[i]))
|
8291 |
|
|
{
|
8292 |
|
|
*num_regs_ptr += 1;
|
8293 |
|
|
*mask_ptr |= 1 << regs[i];
|
8294 |
|
|
}
|
8295 |
|
|
}
|
8296 |
|
|
|
8297 |
|
|
/* Return a simplified form of X using the register values in REG_VALUES.
|
8298 |
|
|
REG_VALUES[R] is the last value assigned to hard register R, or null
|
8299 |
|
|
if R has not been modified.
|
8300 |
|
|
|
8301 |
|
|
This function is rather limited, but is good enough for our purposes. */
|
8302 |
|
|
|
8303 |
|
|
static rtx
|
8304 |
|
|
mips16e_collect_propagate_value (rtx x, rtx *reg_values)
|
8305 |
|
|
{
|
8306 |
|
|
x = avoid_constant_pool_reference (x);
|
8307 |
|
|
|
8308 |
|
|
if (UNARY_P (x))
|
8309 |
|
|
{
|
8310 |
|
|
rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
|
8311 |
|
|
return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
|
8312 |
|
|
x0, GET_MODE (XEXP (x, 0)));
|
8313 |
|
|
}
|
8314 |
|
|
|
8315 |
|
|
if (ARITHMETIC_P (x))
|
8316 |
|
|
{
|
8317 |
|
|
rtx x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
|
8318 |
|
|
rtx x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
|
8319 |
|
|
return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
|
8320 |
|
|
}
|
8321 |
|
|
|
8322 |
|
|
if (REG_P (x)
|
8323 |
|
|
&& reg_values[REGNO (x)]
|
8324 |
|
|
&& !rtx_unstable_p (reg_values[REGNO (x)]))
|
8325 |
|
|
return reg_values[REGNO (x)];
|
8326 |
|
|
|
8327 |
|
|
return x;
|
8328 |
|
|
}
|
8329 |
|
|
|
8330 |
|
|
/* Return true if (set DEST SRC) stores an argument register into its
|
8331 |
|
|
caller-allocated save slot, storing the number of that argument
|
8332 |
|
|
register in *REGNO_PTR if so. REG_VALUES is as for
|
8333 |
|
|
mips16e_collect_propagate_value. */
|
8334 |
|
|
|
8335 |
|
|
static bool
|
8336 |
|
|
mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
|
8337 |
|
|
unsigned int *regno_ptr)
|
8338 |
|
|
{
|
8339 |
|
|
unsigned int argno, regno;
|
8340 |
|
|
HOST_WIDE_INT offset, required_offset;
|
8341 |
|
|
rtx addr, base;
|
8342 |
|
|
|
8343 |
|
|
/* Check that this is a word-mode store. */
|
8344 |
|
|
if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
|
8345 |
|
|
return false;
|
8346 |
|
|
|
8347 |
|
|
/* Check that the register being saved is an unmodified argument
|
8348 |
|
|
register. */
|
8349 |
|
|
regno = REGNO (src);
|
8350 |
|
|
if (!IN_RANGE (regno, GP_ARG_FIRST, GP_ARG_LAST) || reg_values[regno])
|
8351 |
|
|
return false;
|
8352 |
|
|
argno = regno - GP_ARG_FIRST;
|
8353 |
|
|
|
8354 |
|
|
/* Check whether the address is an appropriate stack-pointer or
|
8355 |
|
|
frame-pointer access. */
|
8356 |
|
|
addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
|
8357 |
|
|
mips_split_plus (addr, &base, &offset);
|
8358 |
|
|
required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
|
8359 |
|
|
if (base == hard_frame_pointer_rtx)
|
8360 |
|
|
required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
|
8361 |
|
|
else if (base != stack_pointer_rtx)
|
8362 |
|
|
return false;
|
8363 |
|
|
if (offset != required_offset)
|
8364 |
|
|
return false;
|
8365 |
|
|
|
8366 |
|
|
*regno_ptr = regno;
|
8367 |
|
|
return true;
|
8368 |
|
|
}
|
8369 |
|
|
|
8370 |
|
|
/* A subroutine of mips_expand_prologue, called only when generating
|
8371 |
|
|
MIPS16e SAVE instructions. Search the start of the function for any
|
8372 |
|
|
instructions that save argument registers into their caller-allocated
|
8373 |
|
|
save slots. Delete such instructions and return a value N such that
|
8374 |
|
|
saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
|
8375 |
|
|
instructions redundant. */
|
8376 |
|
|
|
8377 |
|
|
static unsigned int
|
8378 |
|
|
mips16e_collect_argument_saves (void)
|
8379 |
|
|
{
|
8380 |
|
|
rtx reg_values[FIRST_PSEUDO_REGISTER];
|
8381 |
|
|
rtx insn, next, set, dest, src;
|
8382 |
|
|
unsigned int nargs, regno;
|
8383 |
|
|
|
8384 |
|
|
push_topmost_sequence ();
|
8385 |
|
|
nargs = 0;
|
8386 |
|
|
memset (reg_values, 0, sizeof (reg_values));
|
8387 |
|
|
for (insn = get_insns (); insn; insn = next)
|
8388 |
|
|
{
|
8389 |
|
|
next = NEXT_INSN (insn);
|
8390 |
|
|
if (NOTE_P (insn) || DEBUG_INSN_P (insn))
|
8391 |
|
|
continue;
|
8392 |
|
|
|
8393 |
|
|
if (!INSN_P (insn))
|
8394 |
|
|
break;
|
8395 |
|
|
|
8396 |
|
|
set = PATTERN (insn);
|
8397 |
|
|
if (GET_CODE (set) != SET)
|
8398 |
|
|
break;
|
8399 |
|
|
|
8400 |
|
|
dest = SET_DEST (set);
|
8401 |
|
|
src = SET_SRC (set);
|
8402 |
|
|
if (mips16e_collect_argument_save_p (dest, src, reg_values, ®no))
|
8403 |
|
|
{
|
8404 |
|
|
if (!BITSET_P (cfun->machine->frame.mask, regno))
|
8405 |
|
|
{
|
8406 |
|
|
delete_insn (insn);
|
8407 |
|
|
nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
|
8408 |
|
|
}
|
8409 |
|
|
}
|
8410 |
|
|
else if (REG_P (dest) && GET_MODE (dest) == word_mode)
|
8411 |
|
|
reg_values[REGNO (dest)]
|
8412 |
|
|
= mips16e_collect_propagate_value (src, reg_values);
|
8413 |
|
|
else
|
8414 |
|
|
break;
|
8415 |
|
|
}
|
8416 |
|
|
pop_topmost_sequence ();
|
8417 |
|
|
|
8418 |
|
|
return nargs;
|
8419 |
|
|
}
|
8420 |
|
|
|
8421 |
|
|
/* Return a move between register REGNO and memory location SP + OFFSET.
|
8422 |
|
|
Make the move a load if RESTORE_P, otherwise make it a frame-related
|
8423 |
|
|
store. */
|
8424 |
|
|
|
8425 |
|
|
static rtx
|
8426 |
|
|
mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
|
8427 |
|
|
unsigned int regno)
|
8428 |
|
|
{
|
8429 |
|
|
rtx reg, mem;
|
8430 |
|
|
|
8431 |
|
|
mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
|
8432 |
|
|
reg = gen_rtx_REG (SImode, regno);
|
8433 |
|
|
return (restore_p
|
8434 |
|
|
? gen_rtx_SET (VOIDmode, reg, mem)
|
8435 |
|
|
: mips_frame_set (mem, reg));
|
8436 |
|
|
}
|
8437 |
|
|
|
8438 |
|
|
/* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
|
8439 |
|
|
The instruction must:
|
8440 |
|
|
|
8441 |
|
|
- Allocate or deallocate SIZE bytes in total; SIZE is known
|
8442 |
|
|
to be nonzero.
|
8443 |
|
|
|
8444 |
|
|
- Save or restore as many registers in *MASK_PTR as possible.
|
8445 |
|
|
The instruction saves the first registers at the top of the
|
8446 |
|
|
allocated area, with the other registers below it.
|
8447 |
|
|
|
8448 |
|
|
- Save NARGS argument registers above the allocated area.
|
8449 |
|
|
|
8450 |
|
|
(NARGS is always zero if RESTORE_P.)
|
8451 |
|
|
|
8452 |
|
|
The SAVE and RESTORE instructions cannot save and restore all general
|
8453 |
|
|
registers, so there may be some registers left over for the caller to
|
8454 |
|
|
handle. Destructively modify *MASK_PTR so that it contains the registers
|
8455 |
|
|
that still need to be saved or restored. The caller can save these
|
8456 |
|
|
registers in the memory immediately below *OFFSET_PTR, which is a
|
8457 |
|
|
byte offset from the bottom of the allocated stack area. */
|
8458 |
|
|
|
8459 |
|
|
static rtx
|
8460 |
|
|
mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
|
8461 |
|
|
HOST_WIDE_INT *offset_ptr, unsigned int nargs,
|
8462 |
|
|
HOST_WIDE_INT size)
|
8463 |
|
|
{
|
8464 |
|
|
rtx pattern, set;
|
8465 |
|
|
HOST_WIDE_INT offset, top_offset;
|
8466 |
|
|
unsigned int i, regno;
|
8467 |
|
|
int n;
|
8468 |
|
|
|
8469 |
|
|
gcc_assert (cfun->machine->frame.num_fp == 0);
|
8470 |
|
|
|
8471 |
|
|
/* Calculate the number of elements in the PARALLEL. We need one element
|
8472 |
|
|
for the stack adjustment, one for each argument register save, and one
|
8473 |
|
|
for each additional register move. */
|
8474 |
|
|
n = 1 + nargs;
|
8475 |
|
|
for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
|
8476 |
|
|
if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
|
8477 |
|
|
n++;
|
8478 |
|
|
|
8479 |
|
|
/* Create the final PARALLEL. */
|
8480 |
|
|
pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
|
8481 |
|
|
n = 0;
|
8482 |
|
|
|
8483 |
|
|
/* Add the stack pointer adjustment. */
|
8484 |
|
|
set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
|
8485 |
|
|
plus_constant (stack_pointer_rtx,
|
8486 |
|
|
restore_p ? size : -size));
|
8487 |
|
|
RTX_FRAME_RELATED_P (set) = 1;
|
8488 |
|
|
XVECEXP (pattern, 0, n++) = set;
|
8489 |
|
|
|
8490 |
|
|
/* Stack offsets in the PARALLEL are relative to the old stack pointer. */
|
8491 |
|
|
top_offset = restore_p ? size : 0;
|
8492 |
|
|
|
8493 |
|
|
/* Save the arguments. */
|
8494 |
|
|
for (i = 0; i < nargs; i++)
|
8495 |
|
|
{
|
8496 |
|
|
offset = top_offset + i * UNITS_PER_WORD;
|
8497 |
|
|
set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
|
8498 |
|
|
XVECEXP (pattern, 0, n++) = set;
|
8499 |
|
|
}
|
8500 |
|
|
|
8501 |
|
|
/* Then fill in the other register moves. */
|
8502 |
|
|
offset = top_offset;
|
8503 |
|
|
for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
|
8504 |
|
|
{
|
8505 |
|
|
regno = mips16e_save_restore_regs[i];
|
8506 |
|
|
if (BITSET_P (*mask_ptr, regno))
|
8507 |
|
|
{
|
8508 |
|
|
offset -= UNITS_PER_WORD;
|
8509 |
|
|
set = mips16e_save_restore_reg (restore_p, offset, regno);
|
8510 |
|
|
XVECEXP (pattern, 0, n++) = set;
|
8511 |
|
|
*mask_ptr &= ~(1 << regno);
|
8512 |
|
|
}
|
8513 |
|
|
}
|
8514 |
|
|
|
8515 |
|
|
/* Tell the caller what offset it should use for the remaining registers. */
|
8516 |
|
|
*offset_ptr = size + (offset - top_offset);
|
8517 |
|
|
|
8518 |
|
|
gcc_assert (n == XVECLEN (pattern, 0));
|
8519 |
|
|
|
8520 |
|
|
return pattern;
|
8521 |
|
|
}
|
8522 |
|
|
|
8523 |
|
|
/* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
|
8524 |
|
|
pointer. Return true if PATTERN matches the kind of instruction
|
8525 |
|
|
generated by mips16e_build_save_restore. If INFO is nonnull,
|
8526 |
|
|
initialize it when returning true. */
|
8527 |
|
|
|
8528 |
|
|
bool
|
8529 |
|
|
mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
|
8530 |
|
|
struct mips16e_save_restore_info *info)
|
8531 |
|
|
{
|
8532 |
|
|
unsigned int i, nargs, mask, extra;
|
8533 |
|
|
HOST_WIDE_INT top_offset, save_offset, offset;
|
8534 |
|
|
rtx set, reg, mem, base;
|
8535 |
|
|
int n;
|
8536 |
|
|
|
8537 |
|
|
if (!GENERATE_MIPS16E_SAVE_RESTORE)
|
8538 |
|
|
return false;
|
8539 |
|
|
|
8540 |
|
|
/* Stack offsets in the PARALLEL are relative to the old stack pointer. */
|
8541 |
|
|
top_offset = adjust > 0 ? adjust : 0;
|
8542 |
|
|
|
8543 |
|
|
/* Interpret all other members of the PARALLEL. */
|
8544 |
|
|
save_offset = top_offset - UNITS_PER_WORD;
|
8545 |
|
|
mask = 0;
|
8546 |
|
|
nargs = 0;
|
8547 |
|
|
i = 0;
|
8548 |
|
|
for (n = 1; n < XVECLEN (pattern, 0); n++)
|
8549 |
|
|
{
|
8550 |
|
|
/* Check that we have a SET. */
|
8551 |
|
|
set = XVECEXP (pattern, 0, n);
|
8552 |
|
|
if (GET_CODE (set) != SET)
|
8553 |
|
|
return false;
|
8554 |
|
|
|
8555 |
|
|
/* Check that the SET is a load (if restoring) or a store
|
8556 |
|
|
(if saving). */
|
8557 |
|
|
mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
|
8558 |
|
|
if (!MEM_P (mem))
|
8559 |
|
|
return false;
|
8560 |
|
|
|
8561 |
|
|
/* Check that the address is the sum of the stack pointer and a
|
8562 |
|
|
possibly-zero constant offset. */
|
8563 |
|
|
mips_split_plus (XEXP (mem, 0), &base, &offset);
|
8564 |
|
|
if (base != stack_pointer_rtx)
|
8565 |
|
|
return false;
|
8566 |
|
|
|
8567 |
|
|
/* Check that SET's other operand is a register. */
|
8568 |
|
|
reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
|
8569 |
|
|
if (!REG_P (reg))
|
8570 |
|
|
return false;
|
8571 |
|
|
|
8572 |
|
|
/* Check for argument saves. */
|
8573 |
|
|
if (offset == top_offset + nargs * UNITS_PER_WORD
|
8574 |
|
|
&& REGNO (reg) == GP_ARG_FIRST + nargs)
|
8575 |
|
|
nargs++;
|
8576 |
|
|
else if (offset == save_offset)
|
8577 |
|
|
{
|
8578 |
|
|
while (mips16e_save_restore_regs[i++] != REGNO (reg))
|
8579 |
|
|
if (i == ARRAY_SIZE (mips16e_save_restore_regs))
|
8580 |
|
|
return false;
|
8581 |
|
|
|
8582 |
|
|
mask |= 1 << REGNO (reg);
|
8583 |
|
|
save_offset -= UNITS_PER_WORD;
|
8584 |
|
|
}
|
8585 |
|
|
else
|
8586 |
|
|
return false;
|
8587 |
|
|
}
|
8588 |
|
|
|
8589 |
|
|
/* Check that the restrictions on register ranges are met. */
|
8590 |
|
|
extra = 0;
|
8591 |
|
|
mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
|
8592 |
|
|
ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
|
8593 |
|
|
mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
|
8594 |
|
|
ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
|
8595 |
|
|
if (extra != 0)
|
8596 |
|
|
return false;
|
8597 |
|
|
|
8598 |
|
|
/* Make sure that the topmost argument register is not saved twice.
|
8599 |
|
|
The checks above ensure that the same is then true for the other
|
8600 |
|
|
argument registers. */
|
8601 |
|
|
if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
|
8602 |
|
|
return false;
|
8603 |
|
|
|
8604 |
|
|
/* Pass back information, if requested. */
|
8605 |
|
|
if (info)
|
8606 |
|
|
{
|
8607 |
|
|
info->nargs = nargs;
|
8608 |
|
|
info->mask = mask;
|
8609 |
|
|
info->size = (adjust > 0 ? adjust : -adjust);
|
8610 |
|
|
}
|
8611 |
|
|
|
8612 |
|
|
return true;
|
8613 |
|
|
}
|
8614 |
|
|
|
8615 |
|
|
/* Add a MIPS16e SAVE or RESTORE register-range argument to string S
|
8616 |
|
|
for the register range [MIN_REG, MAX_REG]. Return a pointer to
|
8617 |
|
|
the null terminator. */
|
8618 |
|
|
|
8619 |
|
|
static char *
|
8620 |
|
|
mips16e_add_register_range (char *s, unsigned int min_reg,
|
8621 |
|
|
unsigned int max_reg)
|
8622 |
|
|
{
|
8623 |
|
|
if (min_reg != max_reg)
|
8624 |
|
|
s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
|
8625 |
|
|
else
|
8626 |
|
|
s += sprintf (s, ",%s", reg_names[min_reg]);
|
8627 |
|
|
return s;
|
8628 |
|
|
}
|
8629 |
|
|
|
8630 |
|
|
/* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
|
8631 |
|
|
PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
|
8632 |
|
|
|
8633 |
|
|
const char *
|
8634 |
|
|
mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
|
8635 |
|
|
{
|
8636 |
|
|
static char buffer[300];
|
8637 |
|
|
|
8638 |
|
|
struct mips16e_save_restore_info info;
|
8639 |
|
|
unsigned int i, end;
|
8640 |
|
|
char *s;
|
8641 |
|
|
|
8642 |
|
|
/* Parse the pattern. */
|
8643 |
|
|
if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
|
8644 |
|
|
gcc_unreachable ();
|
8645 |
|
|
|
8646 |
|
|
/* Add the mnemonic. */
|
8647 |
|
|
s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
|
8648 |
|
|
s += strlen (s);
|
8649 |
|
|
|
8650 |
|
|
/* Save the arguments. */
|
8651 |
|
|
if (info.nargs > 1)
|
8652 |
|
|
s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
|
8653 |
|
|
reg_names[GP_ARG_FIRST + info.nargs - 1]);
|
8654 |
|
|
else if (info.nargs == 1)
|
8655 |
|
|
s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
|
8656 |
|
|
|
8657 |
|
|
/* Emit the amount of stack space to allocate or deallocate. */
|
8658 |
|
|
s += sprintf (s, "%d", (int) info.size);
|
8659 |
|
|
|
8660 |
|
|
/* Save or restore $16. */
|
8661 |
|
|
if (BITSET_P (info.mask, 16))
|
8662 |
|
|
s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
|
8663 |
|
|
|
8664 |
|
|
/* Save or restore $17. */
|
8665 |
|
|
if (BITSET_P (info.mask, 17))
|
8666 |
|
|
s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
|
8667 |
|
|
|
8668 |
|
|
/* Save or restore registers in the range $s2...$s8, which
|
8669 |
|
|
mips16e_s2_s8_regs lists in decreasing order. Note that this
|
8670 |
|
|
is a software register range; the hardware registers are not
|
8671 |
|
|
numbered consecutively. */
|
8672 |
|
|
end = ARRAY_SIZE (mips16e_s2_s8_regs);
|
8673 |
|
|
i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
|
8674 |
|
|
if (i < end)
|
8675 |
|
|
s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
|
8676 |
|
|
mips16e_s2_s8_regs[i]);
|
8677 |
|
|
|
8678 |
|
|
/* Save or restore registers in the range $a0...$a3. */
|
8679 |
|
|
end = ARRAY_SIZE (mips16e_a0_a3_regs);
|
8680 |
|
|
i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
|
8681 |
|
|
if (i < end)
|
8682 |
|
|
s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
|
8683 |
|
|
mips16e_a0_a3_regs[end - 1]);
|
8684 |
|
|
|
8685 |
|
|
/* Save or restore $31. */
|
8686 |
|
|
if (BITSET_P (info.mask, RETURN_ADDR_REGNUM))
|
8687 |
|
|
s += sprintf (s, ",%s", reg_names[RETURN_ADDR_REGNUM]);
|
8688 |
|
|
|
8689 |
|
|
return buffer;
|
8690 |
|
|
}
|
8691 |
|
|
|
8692 |
|
|
/* Return true if the current function returns its value in a floating-point
|
8693 |
|
|
register in MIPS16 mode. */
|
8694 |
|
|
|
8695 |
|
|
static bool
|
8696 |
|
|
mips16_cfun_returns_in_fpr_p (void)
|
8697 |
|
|
{
|
8698 |
|
|
tree return_type = DECL_RESULT (current_function_decl);
|
8699 |
|
|
return (TARGET_MIPS16
|
8700 |
|
|
&& TARGET_HARD_FLOAT_ABI
|
8701 |
|
|
&& !aggregate_value_p (return_type, current_function_decl)
|
8702 |
|
|
&& mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
|
8703 |
|
|
}
|
8704 |
|
|
|
8705 |
|
|
/* Return true if predicate PRED is true for at least one instruction.
|
8706 |
|
|
Cache the result in *CACHE, and assume that the result is true
|
8707 |
|
|
if *CACHE is already true. */
|
8708 |
|
|
|
8709 |
|
|
static bool
|
8710 |
|
|
mips_find_gp_ref (bool *cache, bool (*pred) (rtx))
|
8711 |
|
|
{
|
8712 |
|
|
rtx insn;
|
8713 |
|
|
|
8714 |
|
|
if (!*cache)
|
8715 |
|
|
{
|
8716 |
|
|
push_topmost_sequence ();
|
8717 |
|
|
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
|
8718 |
|
|
if (USEFUL_INSN_P (insn) && pred (insn))
|
8719 |
|
|
{
|
8720 |
|
|
*cache = true;
|
8721 |
|
|
break;
|
8722 |
|
|
}
|
8723 |
|
|
pop_topmost_sequence ();
|
8724 |
|
|
}
|
8725 |
|
|
return *cache;
|
8726 |
|
|
}
|
8727 |
|
|
|
8728 |
|
|
/* Return true if INSN refers to the global pointer in an "inflexible" way.
|
8729 |
|
|
See mips_cfun_has_inflexible_gp_ref_p for details. */
|
8730 |
|
|
|
8731 |
|
|
static bool
|
8732 |
|
|
mips_insn_has_inflexible_gp_ref_p (rtx insn)
|
8733 |
|
|
{
|
8734 |
|
|
/* Uses of pic_offset_table_rtx in CALL_INSN_FUNCTION_USAGE
|
8735 |
|
|
indicate that the target could be a traditional MIPS
|
8736 |
|
|
lazily-binding stub. */
|
8737 |
|
|
return find_reg_fusage (insn, USE, pic_offset_table_rtx);
|
8738 |
|
|
}
|
8739 |
|
|
|
8740 |
|
|
/* Return true if the current function refers to the global pointer
|
8741 |
|
|
in a way that forces $28 to be valid. This means that we can't
|
8742 |
|
|
change the choice of global pointer, even for NewABI code.
|
8743 |
|
|
|
8744 |
|
|
One example of this (and one which needs several checks) is that
|
8745 |
|
|
$28 must be valid when calling traditional MIPS lazy-binding stubs.
|
8746 |
|
|
(This restriction does not apply to PLTs.) */
|
8747 |
|
|
|
8748 |
|
|
static bool
|
8749 |
|
|
mips_cfun_has_inflexible_gp_ref_p (void)
|
8750 |
|
|
{
|
8751 |
|
|
/* If the function has a nonlocal goto, $28 must hold the correct
|
8752 |
|
|
global pointer for the target function. That is, the target
|
8753 |
|
|
of the goto implicitly uses $28. */
|
8754 |
|
|
if (crtl->has_nonlocal_goto)
|
8755 |
|
|
return true;
|
8756 |
|
|
|
8757 |
|
|
if (TARGET_ABICALLS_PIC2)
|
8758 |
|
|
{
|
8759 |
|
|
/* Symbolic accesses implicitly use the global pointer unless
|
8760 |
|
|
-mexplicit-relocs is in effect. JAL macros to symbolic addresses
|
8761 |
|
|
might go to traditional MIPS lazy-binding stubs. */
|
8762 |
|
|
if (!TARGET_EXPLICIT_RELOCS)
|
8763 |
|
|
return true;
|
8764 |
|
|
|
8765 |
|
|
/* FUNCTION_PROFILER includes a JAL to _mcount, which again
|
8766 |
|
|
can be lazily-bound. */
|
8767 |
|
|
if (crtl->profile)
|
8768 |
|
|
return true;
|
8769 |
|
|
|
8770 |
|
|
/* MIPS16 functions that return in FPRs need to call an
|
8771 |
|
|
external libgcc routine. This call is only made explict
|
8772 |
|
|
during mips_expand_epilogue, and it too might be lazily bound. */
|
8773 |
|
|
if (mips16_cfun_returns_in_fpr_p ())
|
8774 |
|
|
return true;
|
8775 |
|
|
}
|
8776 |
|
|
|
8777 |
|
|
return mips_find_gp_ref (&cfun->machine->has_inflexible_gp_insn_p,
|
8778 |
|
|
mips_insn_has_inflexible_gp_ref_p);
|
8779 |
|
|
}
|
8780 |
|
|
|
8781 |
|
|
/* Return true if INSN refers to the global pointer in a "flexible" way.
|
8782 |
|
|
See mips_cfun_has_flexible_gp_ref_p for details. */
|
8783 |
|
|
|
8784 |
|
|
static bool
|
8785 |
|
|
mips_insn_has_flexible_gp_ref_p (rtx insn)
|
8786 |
|
|
{
|
8787 |
|
|
return (get_attr_got (insn) != GOT_UNSET
|
8788 |
|
|
|| mips_small_data_pattern_p (PATTERN (insn))
|
8789 |
|
|
|| reg_overlap_mentioned_p (pic_offset_table_rtx, PATTERN (insn)));
|
8790 |
|
|
}
|
8791 |
|
|
|
8792 |
|
|
/* Return true if the current function references the global pointer,
|
8793 |
|
|
but if those references do not inherently require the global pointer
|
8794 |
|
|
to be $28. Assume !mips_cfun_has_inflexible_gp_ref_p (). */
|
8795 |
|
|
|
8796 |
|
|
static bool
|
8797 |
|
|
mips_cfun_has_flexible_gp_ref_p (void)
|
8798 |
|
|
{
|
8799 |
|
|
/* Reload can sometimes introduce constant pool references
|
8800 |
|
|
into a function that otherwise didn't need them. For example,
|
8801 |
|
|
suppose we have an instruction like:
|
8802 |
|
|
|
8803 |
|
|
(set (reg:DF R1) (float:DF (reg:SI R2)))
|
8804 |
|
|
|
8805 |
|
|
If R2 turns out to be a constant such as 1, the instruction may
|
8806 |
|
|
have a REG_EQUAL note saying that R1 == 1.0. Reload then has
|
8807 |
|
|
the option of using this constant if R2 doesn't get allocated
|
8808 |
|
|
to a register.
|
8809 |
|
|
|
8810 |
|
|
In cases like these, reload will have added the constant to the
|
8811 |
|
|
pool but no instruction will yet refer to it. */
|
8812 |
|
|
if (TARGET_ABICALLS_PIC2 && !reload_completed && crtl->uses_const_pool)
|
8813 |
|
|
return true;
|
8814 |
|
|
|
8815 |
|
|
return mips_find_gp_ref (&cfun->machine->has_flexible_gp_insn_p,
|
8816 |
|
|
mips_insn_has_flexible_gp_ref_p);
|
8817 |
|
|
}
|
8818 |
|
|
|
8819 |
|
|
/* Return the register that should be used as the global pointer
|
8820 |
|
|
within this function. Return INVALID_REGNUM if the function
|
8821 |
|
|
doesn't need a global pointer. */
|
8822 |
|
|
|
8823 |
|
|
static unsigned int
|
8824 |
|
|
mips_global_pointer (void)
|
8825 |
|
|
{
|
8826 |
|
|
unsigned int regno;
|
8827 |
|
|
|
8828 |
|
|
/* $gp is always available unless we're using a GOT. */
|
8829 |
|
|
if (!TARGET_USE_GOT)
|
8830 |
|
|
return GLOBAL_POINTER_REGNUM;
|
8831 |
|
|
|
8832 |
|
|
/* If there are inflexible references to $gp, we must use the
|
8833 |
|
|
standard register. */
|
8834 |
|
|
if (mips_cfun_has_inflexible_gp_ref_p ())
|
8835 |
|
|
return GLOBAL_POINTER_REGNUM;
|
8836 |
|
|
|
8837 |
|
|
/* If there are no current references to $gp, then the only uses
|
8838 |
|
|
we can introduce later are those involved in long branches. */
|
8839 |
|
|
if (TARGET_ABSOLUTE_JUMPS && !mips_cfun_has_flexible_gp_ref_p ())
|
8840 |
|
|
return INVALID_REGNUM;
|
8841 |
|
|
|
8842 |
|
|
/* If the global pointer is call-saved, try to use a call-clobbered
|
8843 |
|
|
alternative. */
|
8844 |
|
|
if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
|
8845 |
|
|
for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
|
8846 |
|
|
if (!df_regs_ever_live_p (regno)
|
8847 |
|
|
&& call_really_used_regs[regno]
|
8848 |
|
|
&& !fixed_regs[regno]
|
8849 |
|
|
&& regno != PIC_FUNCTION_ADDR_REGNUM)
|
8850 |
|
|
return regno;
|
8851 |
|
|
|
8852 |
|
|
return GLOBAL_POINTER_REGNUM;
|
8853 |
|
|
}
|
8854 |
|
|
|
8855 |
|
|
/* Return true if the current function's prologue must load the global
|
8856 |
|
|
pointer value into pic_offset_table_rtx and store the same value in
|
8857 |
|
|
the function's cprestore slot (if any).
|
8858 |
|
|
|
8859 |
|
|
One problem we have to deal with is that, when emitting GOT-based
|
8860 |
|
|
position independent code, long-branch sequences will need to load
|
8861 |
|
|
the address of the branch target from the GOT. We don't know until
|
8862 |
|
|
the very end of compilation whether (and where) the function needs
|
8863 |
|
|
long branches, so we must ensure that _any_ branch can access the
|
8864 |
|
|
global pointer in some form. However, we do not want to pessimize
|
8865 |
|
|
the usual case in which all branches are short.
|
8866 |
|
|
|
8867 |
|
|
We handle this as follows:
|
8868 |
|
|
|
8869 |
|
|
(1) During reload, we set cfun->machine->global_pointer to
|
8870 |
|
|
INVALID_REGNUM if we _know_ that the current function
|
8871 |
|
|
doesn't need a global pointer. This is only valid if
|
8872 |
|
|
long branches don't need the GOT.
|
8873 |
|
|
|
8874 |
|
|
Otherwise, we assume that we might need a global pointer
|
8875 |
|
|
and pick an appropriate register.
|
8876 |
|
|
|
8877 |
|
|
(2) If cfun->machine->global_pointer != INVALID_REGNUM,
|
8878 |
|
|
we ensure that the global pointer is available at every
|
8879 |
|
|
block boundary bar entry and exit. We do this in one of two ways:
|
8880 |
|
|
|
8881 |
|
|
- If the function has a cprestore slot, we ensure that this
|
8882 |
|
|
slot is valid at every branch. However, as explained in
|
8883 |
|
|
point (6) below, there is no guarantee that pic_offset_table_rtx
|
8884 |
|
|
itself is valid if new uses of the global pointer are introduced
|
8885 |
|
|
after the first post-epilogue split.
|
8886 |
|
|
|
8887 |
|
|
We guarantee that the cprestore slot is valid by loading it
|
8888 |
|
|
into a fake register, CPRESTORE_SLOT_REGNUM. We then make
|
8889 |
|
|
this register live at every block boundary bar function entry
|
8890 |
|
|
and exit. It is then invalid to move the load (and thus the
|
8891 |
|
|
preceding store) across a block boundary.
|
8892 |
|
|
|
8893 |
|
|
- If the function has no cprestore slot, we guarantee that
|
8894 |
|
|
pic_offset_table_rtx itself is valid at every branch.
|
8895 |
|
|
|
8896 |
|
|
See mips_eh_uses for the handling of the register liveness.
|
8897 |
|
|
|
8898 |
|
|
(3) During prologue and epilogue generation, we emit "ghost"
|
8899 |
|
|
placeholder instructions to manipulate the global pointer.
|
8900 |
|
|
|
8901 |
|
|
(4) During prologue generation, we set cfun->machine->must_initialize_gp_p
|
8902 |
|
|
and cfun->machine->must_restore_gp_when_clobbered_p if we already know
|
8903 |
|
|
that the function needs a global pointer. (There is no need to set
|
8904 |
|
|
them earlier than this, and doing it as late as possible leads to
|
8905 |
|
|
fewer false positives.)
|
8906 |
|
|
|
8907 |
|
|
(5) If cfun->machine->must_initialize_gp_p is true during a
|
8908 |
|
|
split_insns pass, we split the ghost instructions into real
|
8909 |
|
|
instructions. These split instructions can then be optimized in
|
8910 |
|
|
the usual way. Otherwise, we keep the ghost instructions intact,
|
8911 |
|
|
and optimize for the case where they aren't needed. We still
|
8912 |
|
|
have the option of splitting them later, if we need to introduce
|
8913 |
|
|
new uses of the global pointer.
|
8914 |
|
|
|
8915 |
|
|
For example, the scheduler ignores a ghost instruction that
|
8916 |
|
|
stores $28 to the stack, but it handles the split form of
|
8917 |
|
|
the ghost instruction as an ordinary store.
|
8918 |
|
|
|
8919 |
|
|
(6) [OldABI only.] If cfun->machine->must_restore_gp_when_clobbered_p
|
8920 |
|
|
is true during the first post-epilogue split_insns pass, we split
|
8921 |
|
|
calls and restore_gp patterns into instructions that explicitly
|
8922 |
|
|
load pic_offset_table_rtx from the cprestore slot. Otherwise,
|
8923 |
|
|
we split these patterns into instructions that _don't_ load from
|
8924 |
|
|
the cprestore slot.
|
8925 |
|
|
|
8926 |
|
|
If cfun->machine->must_restore_gp_when_clobbered_p is true at the
|
8927 |
|
|
time of the split, then any instructions that exist at that time
|
8928 |
|
|
can make free use of pic_offset_table_rtx. However, if we want
|
8929 |
|
|
to introduce new uses of the global pointer after the split,
|
8930 |
|
|
we must explicitly load the value from the cprestore slot, since
|
8931 |
|
|
pic_offset_table_rtx itself might not be valid at a given point
|
8932 |
|
|
in the function.
|
8933 |
|
|
|
8934 |
|
|
The idea is that we want to be able to delete redundant
|
8935 |
|
|
loads from the cprestore slot in the usual case where no
|
8936 |
|
|
long branches are needed.
|
8937 |
|
|
|
8938 |
|
|
(7) If cfun->machine->must_initialize_gp_p is still false at the end
|
8939 |
|
|
of md_reorg, we decide whether the global pointer is needed for
|
8940 |
|
|
long branches. If so, we set cfun->machine->must_initialize_gp_p
|
8941 |
|
|
to true and split the ghost instructions into real instructions
|
8942 |
|
|
at that stage.
|
8943 |
|
|
|
8944 |
|
|
Note that the ghost instructions must have a zero length for three reasons:
|
8945 |
|
|
|
8946 |
|
|
- Giving the length of the underlying $gp sequence might cause
|
8947 |
|
|
us to use long branches in cases where they aren't really needed.
|
8948 |
|
|
|
8949 |
|
|
- They would perturb things like alignment calculations.
|
8950 |
|
|
|
8951 |
|
|
- More importantly, the hazard detection in md_reorg relies on
|
8952 |
|
|
empty instructions having a zero length.
|
8953 |
|
|
|
8954 |
|
|
If we find a long branch and split the ghost instructions at the
|
8955 |
|
|
end of md_reorg, the split could introduce more long branches.
|
8956 |
|
|
That isn't a problem though, because we still do the split before
|
8957 |
|
|
the final shorten_branches pass.
|
8958 |
|
|
|
8959 |
|
|
This is extremely ugly, but it seems like the best compromise between
|
8960 |
|
|
correctness and efficiency. */
|
8961 |
|
|
|
8962 |
|
|
bool
|
8963 |
|
|
mips_must_initialize_gp_p (void)
|
8964 |
|
|
{
|
8965 |
|
|
return cfun->machine->must_initialize_gp_p;
|
8966 |
|
|
}
|
8967 |
|
|
|
8968 |
|
|
/* Return true if REGNO is a register that is ordinarily call-clobbered
|
8969 |
|
|
but must nevertheless be preserved by an interrupt handler. */
|
8970 |
|
|
|
8971 |
|
|
static bool
|
8972 |
|
|
mips_interrupt_extra_call_saved_reg_p (unsigned int regno)
|
8973 |
|
|
{
|
8974 |
|
|
if (MD_REG_P (regno))
|
8975 |
|
|
return true;
|
8976 |
|
|
|
8977 |
|
|
if (TARGET_DSP && DSP_ACC_REG_P (regno))
|
8978 |
|
|
return true;
|
8979 |
|
|
|
8980 |
|
|
if (GP_REG_P (regno) && !cfun->machine->use_shadow_register_set_p)
|
8981 |
|
|
{
|
8982 |
|
|
/* $0 is hard-wired. */
|
8983 |
|
|
if (regno == GP_REG_FIRST)
|
8984 |
|
|
return false;
|
8985 |
|
|
|
8986 |
|
|
/* The interrupt handler can treat kernel registers as
|
8987 |
|
|
scratch registers. */
|
8988 |
|
|
if (KERNEL_REG_P (regno))
|
8989 |
|
|
return false;
|
8990 |
|
|
|
8991 |
|
|
/* The function will return the stack pointer to its original value
|
8992 |
|
|
anyway. */
|
8993 |
|
|
if (regno == STACK_POINTER_REGNUM)
|
8994 |
|
|
return false;
|
8995 |
|
|
|
8996 |
|
|
/* Otherwise, return true for registers that aren't ordinarily
|
8997 |
|
|
call-clobbered. */
|
8998 |
|
|
return call_really_used_regs[regno];
|
8999 |
|
|
}
|
9000 |
|
|
|
9001 |
|
|
return false;
|
9002 |
|
|
}
|
9003 |
|
|
|
9004 |
|
|
/* Return true if the current function should treat register REGNO
|
9005 |
|
|
as call-saved. */
|
9006 |
|
|
|
9007 |
|
|
static bool
|
9008 |
|
|
mips_cfun_call_saved_reg_p (unsigned int regno)
|
9009 |
|
|
{
|
9010 |
|
|
/* Interrupt handlers need to save extra registers. */
|
9011 |
|
|
if (cfun->machine->interrupt_handler_p
|
9012 |
|
|
&& mips_interrupt_extra_call_saved_reg_p (regno))
|
9013 |
|
|
return true;
|
9014 |
|
|
|
9015 |
|
|
/* call_insns preserve $28 unless they explicitly say otherwise,
|
9016 |
|
|
so call_really_used_regs[] treats $28 as call-saved. However,
|
9017 |
|
|
we want the ABI property rather than the default call_insn
|
9018 |
|
|
property here. */
|
9019 |
|
|
return (regno == GLOBAL_POINTER_REGNUM
|
9020 |
|
|
? TARGET_CALL_SAVED_GP
|
9021 |
|
|
: !call_really_used_regs[regno]);
|
9022 |
|
|
}
|
9023 |
|
|
|
9024 |
|
|
/* Return true if the function body might clobber register REGNO.
|
9025 |
|
|
We know that REGNO is call-saved. */
|
9026 |
|
|
|
9027 |
|
|
static bool
|
9028 |
|
|
mips_cfun_might_clobber_call_saved_reg_p (unsigned int regno)
|
9029 |
|
|
{
|
9030 |
|
|
/* Some functions should be treated as clobbering all call-saved
|
9031 |
|
|
registers. */
|
9032 |
|
|
if (crtl->saves_all_registers)
|
9033 |
|
|
return true;
|
9034 |
|
|
|
9035 |
|
|
/* DF handles cases where a register is explicitly referenced in
|
9036 |
|
|
the rtl. Incoming values are passed in call-clobbered registers,
|
9037 |
|
|
so we can assume that any live call-saved register is set within
|
9038 |
|
|
the function. */
|
9039 |
|
|
if (df_regs_ever_live_p (regno))
|
9040 |
|
|
return true;
|
9041 |
|
|
|
9042 |
|
|
/* Check for registers that are clobbered by FUNCTION_PROFILER.
|
9043 |
|
|
These clobbers are not explicit in the rtl. */
|
9044 |
|
|
if (crtl->profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
|
9045 |
|
|
return true;
|
9046 |
|
|
|
9047 |
|
|
/* If we're using a call-saved global pointer, the function's
|
9048 |
|
|
prologue will need to set it up. */
|
9049 |
|
|
if (cfun->machine->global_pointer == regno)
|
9050 |
|
|
return true;
|
9051 |
|
|
|
9052 |
|
|
/* The function's prologue will need to set the frame pointer if
|
9053 |
|
|
frame_pointer_needed. */
|
9054 |
|
|
if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
|
9055 |
|
|
return true;
|
9056 |
|
|
|
9057 |
|
|
/* If a MIPS16 function returns a value in FPRs, its epilogue
|
9058 |
|
|
will need to call an external libgcc routine. This yet-to-be
|
9059 |
|
|
generated call_insn will clobber $31. */
|
9060 |
|
|
if (regno == RETURN_ADDR_REGNUM && mips16_cfun_returns_in_fpr_p ())
|
9061 |
|
|
return true;
|
9062 |
|
|
|
9063 |
|
|
/* If REGNO is ordinarily call-clobbered, we must assume that any
|
9064 |
|
|
called function could modify it. */
|
9065 |
|
|
if (cfun->machine->interrupt_handler_p
|
9066 |
|
|
&& !current_function_is_leaf
|
9067 |
|
|
&& mips_interrupt_extra_call_saved_reg_p (regno))
|
9068 |
|
|
return true;
|
9069 |
|
|
|
9070 |
|
|
return false;
|
9071 |
|
|
}
|
9072 |
|
|
|
9073 |
|
|
/* Return true if the current function must save register REGNO. */
|
9074 |
|
|
|
9075 |
|
|
static bool
|
9076 |
|
|
mips_save_reg_p (unsigned int regno)
|
9077 |
|
|
{
|
9078 |
|
|
if (mips_cfun_call_saved_reg_p (regno))
|
9079 |
|
|
{
|
9080 |
|
|
if (mips_cfun_might_clobber_call_saved_reg_p (regno))
|
9081 |
|
|
return true;
|
9082 |
|
|
|
9083 |
|
|
/* Save both registers in an FPR pair if either one is used. This is
|
9084 |
|
|
needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
|
9085 |
|
|
register to be used without the even register. */
|
9086 |
|
|
if (FP_REG_P (regno)
|
9087 |
|
|
&& MAX_FPRS_PER_FMT == 2
|
9088 |
|
|
&& mips_cfun_might_clobber_call_saved_reg_p (regno + 1))
|
9089 |
|
|
return true;
|
9090 |
|
|
}
|
9091 |
|
|
|
9092 |
|
|
/* We need to save the incoming return address if __builtin_eh_return
|
9093 |
|
|
is being used to set a different return address. */
|
9094 |
|
|
if (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return)
|
9095 |
|
|
return true;
|
9096 |
|
|
|
9097 |
|
|
return false;
|
9098 |
|
|
}
|
9099 |
|
|
|
9100 |
|
|
/* Populate the current function's mips_frame_info structure.
|
9101 |
|
|
|
9102 |
|
|
MIPS stack frames look like:
|
9103 |
|
|
|
9104 |
|
|
+-------------------------------+
|
9105 |
|
|
| |
|
9106 |
|
|
| incoming stack arguments |
|
9107 |
|
|
| |
|
9108 |
|
|
+-------------------------------+
|
9109 |
|
|
| |
|
9110 |
|
|
| caller-allocated save area |
|
9111 |
|
|
A | for register arguments |
|
9112 |
|
|
| |
|
9113 |
|
|
+-------------------------------+ <-- incoming stack pointer
|
9114 |
|
|
| |
|
9115 |
|
|
| callee-allocated save area |
|
9116 |
|
|
B | for arguments that are |
|
9117 |
|
|
| split between registers and |
|
9118 |
|
|
| the stack |
|
9119 |
|
|
| |
|
9120 |
|
|
+-------------------------------+ <-- arg_pointer_rtx
|
9121 |
|
|
| |
|
9122 |
|
|
C | callee-allocated save area |
|
9123 |
|
|
| for register varargs |
|
9124 |
|
|
| |
|
9125 |
|
|
+-------------------------------+ <-- frame_pointer_rtx
|
9126 |
|
|
| | + cop0_sp_offset
|
9127 |
|
|
| COP0 reg save area | + UNITS_PER_WORD
|
9128 |
|
|
| |
|
9129 |
|
|
+-------------------------------+ <-- frame_pointer_rtx + acc_sp_offset
|
9130 |
|
|
| | + UNITS_PER_WORD
|
9131 |
|
|
| accumulator save area |
|
9132 |
|
|
| |
|
9133 |
|
|
+-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
|
9134 |
|
|
| | + UNITS_PER_HWFPVALUE
|
9135 |
|
|
| FPR save area |
|
9136 |
|
|
| |
|
9137 |
|
|
+-------------------------------+ <-- stack_pointer_rtx + gp_sp_offset
|
9138 |
|
|
| | + UNITS_PER_WORD
|
9139 |
|
|
| GPR save area |
|
9140 |
|
|
| |
|
9141 |
|
|
+-------------------------------+ <-- frame_pointer_rtx with
|
9142 |
|
|
| | \ -fstack-protector
|
9143 |
|
|
| local variables | | var_size
|
9144 |
|
|
| | /
|
9145 |
|
|
+-------------------------------+
|
9146 |
|
|
| | \
|
9147 |
|
|
| $gp save area | | cprestore_size
|
9148 |
|
|
| | /
|
9149 |
|
|
P +-------------------------------+ <-- hard_frame_pointer_rtx for
|
9150 |
|
|
| | \ MIPS16 code
|
9151 |
|
|
| outgoing stack arguments | |
|
9152 |
|
|
| | |
|
9153 |
|
|
+-------------------------------+ | args_size
|
9154 |
|
|
| | |
|
9155 |
|
|
| caller-allocated save area | |
|
9156 |
|
|
| for register arguments | |
|
9157 |
|
|
| | /
|
9158 |
|
|
+-------------------------------+ <-- stack_pointer_rtx
|
9159 |
|
|
frame_pointer_rtx without
|
9160 |
|
|
-fstack-protector
|
9161 |
|
|
hard_frame_pointer_rtx for
|
9162 |
|
|
non-MIPS16 code.
|
9163 |
|
|
|
9164 |
|
|
At least two of A, B and C will be empty.
|
9165 |
|
|
|
9166 |
|
|
Dynamic stack allocations such as alloca insert data at point P.
|
9167 |
|
|
They decrease stack_pointer_rtx but leave frame_pointer_rtx and
|
9168 |
|
|
hard_frame_pointer_rtx unchanged. */
|
9169 |
|
|
|
9170 |
|
|
static void
|
9171 |
|
|
mips_compute_frame_info (void)
|
9172 |
|
|
{
|
9173 |
|
|
struct mips_frame_info *frame;
|
9174 |
|
|
HOST_WIDE_INT offset, size;
|
9175 |
|
|
unsigned int regno, i;
|
9176 |
|
|
|
9177 |
|
|
/* Set this function's interrupt properties. */
|
9178 |
|
|
if (mips_interrupt_type_p (TREE_TYPE (current_function_decl)))
|
9179 |
|
|
{
|
9180 |
|
|
if (!ISA_MIPS32R2)
|
9181 |
|
|
error ("the %<interrupt%> attribute requires a MIPS32r2 processor");
|
9182 |
|
|
else if (TARGET_HARD_FLOAT)
|
9183 |
|
|
error ("the %<interrupt%> attribute requires %<-msoft-float%>");
|
9184 |
|
|
else if (TARGET_MIPS16)
|
9185 |
|
|
error ("interrupt handlers cannot be MIPS16 functions");
|
9186 |
|
|
else
|
9187 |
|
|
{
|
9188 |
|
|
cfun->machine->interrupt_handler_p = true;
|
9189 |
|
|
cfun->machine->use_shadow_register_set_p =
|
9190 |
|
|
mips_use_shadow_register_set_p (TREE_TYPE (current_function_decl));
|
9191 |
|
|
cfun->machine->keep_interrupts_masked_p =
|
9192 |
|
|
mips_keep_interrupts_masked_p (TREE_TYPE (current_function_decl));
|
9193 |
|
|
cfun->machine->use_debug_exception_return_p =
|
9194 |
|
|
mips_use_debug_exception_return_p (TREE_TYPE
|
9195 |
|
|
(current_function_decl));
|
9196 |
|
|
}
|
9197 |
|
|
}
|
9198 |
|
|
|
9199 |
|
|
frame = &cfun->machine->frame;
|
9200 |
|
|
memset (frame, 0, sizeof (*frame));
|
9201 |
|
|
size = get_frame_size ();
|
9202 |
|
|
|
9203 |
|
|
cfun->machine->global_pointer = mips_global_pointer ();
|
9204 |
|
|
|
9205 |
|
|
/* The first two blocks contain the outgoing argument area and the $gp save
|
9206 |
|
|
slot. This area isn't needed in leaf functions, but if the
|
9207 |
|
|
target-independent frame size is nonzero, we have already committed to
|
9208 |
|
|
allocating these in STARTING_FRAME_OFFSET for !FRAME_GROWS_DOWNWARD. */
|
9209 |
|
|
if ((size == 0 || FRAME_GROWS_DOWNWARD) && current_function_is_leaf)
|
9210 |
|
|
{
|
9211 |
|
|
/* The MIPS 3.0 linker does not like functions that dynamically
|
9212 |
|
|
allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
|
9213 |
|
|
looks like we are trying to create a second frame pointer to the
|
9214 |
|
|
function, so allocate some stack space to make it happy. */
|
9215 |
|
|
if (cfun->calls_alloca)
|
9216 |
|
|
frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
|
9217 |
|
|
else
|
9218 |
|
|
frame->args_size = 0;
|
9219 |
|
|
frame->cprestore_size = 0;
|
9220 |
|
|
}
|
9221 |
|
|
else
|
9222 |
|
|
{
|
9223 |
|
|
frame->args_size = crtl->outgoing_args_size;
|
9224 |
|
|
frame->cprestore_size = MIPS_GP_SAVE_AREA_SIZE;
|
9225 |
|
|
}
|
9226 |
|
|
offset = frame->args_size + frame->cprestore_size;
|
9227 |
|
|
|
9228 |
|
|
/* Move above the local variables. */
|
9229 |
|
|
frame->var_size = MIPS_STACK_ALIGN (size);
|
9230 |
|
|
offset += frame->var_size;
|
9231 |
|
|
|
9232 |
|
|
/* Find out which GPRs we need to save. */
|
9233 |
|
|
for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
|
9234 |
|
|
if (mips_save_reg_p (regno))
|
9235 |
|
|
{
|
9236 |
|
|
frame->num_gp++;
|
9237 |
|
|
frame->mask |= 1 << (regno - GP_REG_FIRST);
|
9238 |
|
|
}
|
9239 |
|
|
|
9240 |
|
|
/* If this function calls eh_return, we must also save and restore the
|
9241 |
|
|
EH data registers. */
|
9242 |
|
|
if (crtl->calls_eh_return)
|
9243 |
|
|
for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
|
9244 |
|
|
{
|
9245 |
|
|
frame->num_gp++;
|
9246 |
|
|
frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
|
9247 |
|
|
}
|
9248 |
|
|
|
9249 |
|
|
/* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
|
9250 |
|
|
$a3-$a0 and $s2-$s8. If we save one register in the range, we must
|
9251 |
|
|
save all later registers too. */
|
9252 |
|
|
if (GENERATE_MIPS16E_SAVE_RESTORE)
|
9253 |
|
|
{
|
9254 |
|
|
mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
|
9255 |
|
|
ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
|
9256 |
|
|
mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
|
9257 |
|
|
ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
|
9258 |
|
|
}
|
9259 |
|
|
|
9260 |
|
|
/* Move above the GPR save area. */
|
9261 |
|
|
if (frame->num_gp > 0)
|
9262 |
|
|
{
|
9263 |
|
|
offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
|
9264 |
|
|
frame->gp_sp_offset = offset - UNITS_PER_WORD;
|
9265 |
|
|
}
|
9266 |
|
|
|
9267 |
|
|
/* Find out which FPRs we need to save. This loop must iterate over
|
9268 |
|
|
the same space as its companion in mips_for_each_saved_gpr_and_fpr. */
|
9269 |
|
|
if (TARGET_HARD_FLOAT)
|
9270 |
|
|
for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
|
9271 |
|
|
if (mips_save_reg_p (regno))
|
9272 |
|
|
{
|
9273 |
|
|
frame->num_fp += MAX_FPRS_PER_FMT;
|
9274 |
|
|
frame->fmask |= ~(~0 << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
|
9275 |
|
|
}
|
9276 |
|
|
|
9277 |
|
|
/* Move above the FPR save area. */
|
9278 |
|
|
if (frame->num_fp > 0)
|
9279 |
|
|
{
|
9280 |
|
|
offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
|
9281 |
|
|
frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
|
9282 |
|
|
}
|
9283 |
|
|
|
9284 |
|
|
/* Add in space for the interrupt context information. */
|
9285 |
|
|
if (cfun->machine->interrupt_handler_p)
|
9286 |
|
|
{
|
9287 |
|
|
/* Check HI/LO. */
|
9288 |
|
|
if (mips_save_reg_p (LO_REGNUM) || mips_save_reg_p (HI_REGNUM))
|
9289 |
|
|
{
|
9290 |
|
|
frame->num_acc++;
|
9291 |
|
|
frame->acc_mask |= (1 << 0);
|
9292 |
|
|
}
|
9293 |
|
|
|
9294 |
|
|
/* Check accumulators 1, 2, 3. */
|
9295 |
|
|
for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
|
9296 |
|
|
if (mips_save_reg_p (i) || mips_save_reg_p (i + 1))
|
9297 |
|
|
{
|
9298 |
|
|
frame->num_acc++;
|
9299 |
|
|
frame->acc_mask |= 1 << (((i - DSP_ACC_REG_FIRST) / 2) + 1);
|
9300 |
|
|
}
|
9301 |
|
|
|
9302 |
|
|
/* All interrupt context functions need space to preserve STATUS. */
|
9303 |
|
|
frame->num_cop0_regs++;
|
9304 |
|
|
|
9305 |
|
|
/* If we don't keep interrupts masked, we need to save EPC. */
|
9306 |
|
|
if (!cfun->machine->keep_interrupts_masked_p)
|
9307 |
|
|
frame->num_cop0_regs++;
|
9308 |
|
|
}
|
9309 |
|
|
|
9310 |
|
|
/* Move above the accumulator save area. */
|
9311 |
|
|
if (frame->num_acc > 0)
|
9312 |
|
|
{
|
9313 |
|
|
/* Each accumulator needs 2 words. */
|
9314 |
|
|
offset += frame->num_acc * 2 * UNITS_PER_WORD;
|
9315 |
|
|
frame->acc_sp_offset = offset - UNITS_PER_WORD;
|
9316 |
|
|
}
|
9317 |
|
|
|
9318 |
|
|
/* Move above the COP0 register save area. */
|
9319 |
|
|
if (frame->num_cop0_regs > 0)
|
9320 |
|
|
{
|
9321 |
|
|
offset += frame->num_cop0_regs * UNITS_PER_WORD;
|
9322 |
|
|
frame->cop0_sp_offset = offset - UNITS_PER_WORD;
|
9323 |
|
|
}
|
9324 |
|
|
|
9325 |
|
|
/* Move above the callee-allocated varargs save area. */
|
9326 |
|
|
offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
|
9327 |
|
|
frame->arg_pointer_offset = offset;
|
9328 |
|
|
|
9329 |
|
|
/* Move above the callee-allocated area for pretend stack arguments. */
|
9330 |
|
|
offset += crtl->args.pretend_args_size;
|
9331 |
|
|
frame->total_size = offset;
|
9332 |
|
|
|
9333 |
|
|
/* Work out the offsets of the save areas from the top of the frame. */
|
9334 |
|
|
if (frame->gp_sp_offset > 0)
|
9335 |
|
|
frame->gp_save_offset = frame->gp_sp_offset - offset;
|
9336 |
|
|
if (frame->fp_sp_offset > 0)
|
9337 |
|
|
frame->fp_save_offset = frame->fp_sp_offset - offset;
|
9338 |
|
|
if (frame->acc_sp_offset > 0)
|
9339 |
|
|
frame->acc_save_offset = frame->acc_sp_offset - offset;
|
9340 |
|
|
if (frame->num_cop0_regs > 0)
|
9341 |
|
|
frame->cop0_save_offset = frame->cop0_sp_offset - offset;
|
9342 |
|
|
|
9343 |
|
|
/* MIPS16 code offsets the frame pointer by the size of the outgoing
|
9344 |
|
|
arguments. This tends to increase the chances of using unextended
|
9345 |
|
|
instructions for local variables and incoming arguments. */
|
9346 |
|
|
if (TARGET_MIPS16)
|
9347 |
|
|
frame->hard_frame_pointer_offset = frame->args_size;
|
9348 |
|
|
}
|
9349 |
|
|
|
9350 |
|
|
/* Return the style of GP load sequence that is being used for the
|
9351 |
|
|
current function. */
|
9352 |
|
|
|
9353 |
|
|
enum mips_loadgp_style
|
9354 |
|
|
mips_current_loadgp_style (void)
|
9355 |
|
|
{
|
9356 |
|
|
if (!TARGET_USE_GOT || cfun->machine->global_pointer == INVALID_REGNUM)
|
9357 |
|
|
return LOADGP_NONE;
|
9358 |
|
|
|
9359 |
|
|
if (TARGET_RTP_PIC)
|
9360 |
|
|
return LOADGP_RTP;
|
9361 |
|
|
|
9362 |
|
|
if (TARGET_ABSOLUTE_ABICALLS)
|
9363 |
|
|
return LOADGP_ABSOLUTE;
|
9364 |
|
|
|
9365 |
|
|
return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
|
9366 |
|
|
}
|
9367 |
|
|
|
9368 |
|
|
/* Implement TARGET_FRAME_POINTER_REQUIRED. */
|
9369 |
|
|
|
9370 |
|
|
static bool
|
9371 |
|
|
mips_frame_pointer_required (void)
|
9372 |
|
|
{
|
9373 |
|
|
/* If the function contains dynamic stack allocations, we need to
|
9374 |
|
|
use the frame pointer to access the static parts of the frame. */
|
9375 |
|
|
if (cfun->calls_alloca)
|
9376 |
|
|
return true;
|
9377 |
|
|
|
9378 |
|
|
/* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
|
9379 |
|
|
reload may be unable to compute the address of a local variable,
|
9380 |
|
|
since there is no way to add a large constant to the stack pointer
|
9381 |
|
|
without using a second temporary register. */
|
9382 |
|
|
if (TARGET_MIPS16)
|
9383 |
|
|
{
|
9384 |
|
|
mips_compute_frame_info ();
|
9385 |
|
|
if (!SMALL_OPERAND (cfun->machine->frame.total_size))
|
9386 |
|
|
return true;
|
9387 |
|
|
}
|
9388 |
|
|
|
9389 |
|
|
return false;
|
9390 |
|
|
}
|
9391 |
|
|
|
9392 |
|
|
/* Make sure that we're not trying to eliminate to the wrong hard frame
|
9393 |
|
|
pointer. */
|
9394 |
|
|
|
9395 |
|
|
static bool
|
9396 |
|
|
mips_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
|
9397 |
|
|
{
|
9398 |
|
|
return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
|
9399 |
|
|
}
|
9400 |
|
|
|
9401 |
|
|
/* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
|
9402 |
|
|
or argument pointer. TO is either the stack pointer or hard frame
|
9403 |
|
|
pointer. */
|
9404 |
|
|
|
9405 |
|
|
HOST_WIDE_INT
|
9406 |
|
|
mips_initial_elimination_offset (int from, int to)
|
9407 |
|
|
{
|
9408 |
|
|
HOST_WIDE_INT offset;
|
9409 |
|
|
|
9410 |
|
|
mips_compute_frame_info ();
|
9411 |
|
|
|
9412 |
|
|
/* Set OFFSET to the offset from the end-of-prologue stack pointer. */
|
9413 |
|
|
switch (from)
|
9414 |
|
|
{
|
9415 |
|
|
case FRAME_POINTER_REGNUM:
|
9416 |
|
|
if (FRAME_GROWS_DOWNWARD)
|
9417 |
|
|
offset = (cfun->machine->frame.args_size
|
9418 |
|
|
+ cfun->machine->frame.cprestore_size
|
9419 |
|
|
+ cfun->machine->frame.var_size);
|
9420 |
|
|
else
|
9421 |
|
|
offset = 0;
|
9422 |
|
|
break;
|
9423 |
|
|
|
9424 |
|
|
case ARG_POINTER_REGNUM:
|
9425 |
|
|
offset = cfun->machine->frame.arg_pointer_offset;
|
9426 |
|
|
break;
|
9427 |
|
|
|
9428 |
|
|
default:
|
9429 |
|
|
gcc_unreachable ();
|
9430 |
|
|
}
|
9431 |
|
|
|
9432 |
|
|
if (to == HARD_FRAME_POINTER_REGNUM)
|
9433 |
|
|
offset -= cfun->machine->frame.hard_frame_pointer_offset;
|
9434 |
|
|
|
9435 |
|
|
return offset;
|
9436 |
|
|
}
|
9437 |
|
|
|
9438 |
|
|
/* Implement TARGET_EXTRA_LIVE_ON_ENTRY. */
|
9439 |
|
|
|
9440 |
|
|
static void
|
9441 |
|
|
mips_extra_live_on_entry (bitmap regs)
|
9442 |
|
|
{
|
9443 |
|
|
if (TARGET_USE_GOT)
|
9444 |
|
|
{
|
9445 |
|
|
/* PIC_FUNCTION_ADDR_REGNUM is live if we need it to set up
|
9446 |
|
|
the global pointer. */
|
9447 |
|
|
if (!TARGET_ABSOLUTE_ABICALLS)
|
9448 |
|
|
bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
|
9449 |
|
|
|
9450 |
|
|
/* The prologue may set MIPS16_PIC_TEMP_REGNUM to the value of
|
9451 |
|
|
the global pointer. */
|
9452 |
|
|
if (TARGET_MIPS16)
|
9453 |
|
|
bitmap_set_bit (regs, MIPS16_PIC_TEMP_REGNUM);
|
9454 |
|
|
|
9455 |
|
|
/* See the comment above load_call<mode> for details. */
|
9456 |
|
|
bitmap_set_bit (regs, GOT_VERSION_REGNUM);
|
9457 |
|
|
}
|
9458 |
|
|
}
|
9459 |
|
|
|
9460 |
|
|
/* Implement RETURN_ADDR_RTX. We do not support moving back to a
|
9461 |
|
|
previous frame. */
|
9462 |
|
|
|
9463 |
|
|
rtx
|
9464 |
|
|
mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
|
9465 |
|
|
{
|
9466 |
|
|
if (count != 0)
|
9467 |
|
|
return const0_rtx;
|
9468 |
|
|
|
9469 |
|
|
return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
|
9470 |
|
|
}
|
9471 |
|
|
|
9472 |
|
|
/* Emit code to change the current function's return address to
|
9473 |
|
|
ADDRESS. SCRATCH is available as a scratch register, if needed.
|
9474 |
|
|
ADDRESS and SCRATCH are both word-mode GPRs. */
|
9475 |
|
|
|
9476 |
|
|
void
|
9477 |
|
|
mips_set_return_address (rtx address, rtx scratch)
|
9478 |
|
|
{
|
9479 |
|
|
rtx slot_address;
|
9480 |
|
|
|
9481 |
|
|
gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
|
9482 |
|
|
slot_address = mips_add_offset (scratch, stack_pointer_rtx,
|
9483 |
|
|
cfun->machine->frame.gp_sp_offset);
|
9484 |
|
|
mips_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
|
9485 |
|
|
}
|
9486 |
|
|
|
9487 |
|
|
/* Return true if the current function has a cprestore slot. */
|
9488 |
|
|
|
9489 |
|
|
bool
|
9490 |
|
|
mips_cfun_has_cprestore_slot_p (void)
|
9491 |
|
|
{
|
9492 |
|
|
return (cfun->machine->global_pointer != INVALID_REGNUM
|
9493 |
|
|
&& cfun->machine->frame.cprestore_size > 0);
|
9494 |
|
|
}
|
9495 |
|
|
|
9496 |
|
|
/* Fill *BASE and *OFFSET such that *BASE + *OFFSET refers to the
|
9497 |
|
|
cprestore slot. LOAD_P is true if the caller wants to load from
|
9498 |
|
|
the cprestore slot; it is false if the caller wants to store to
|
9499 |
|
|
the slot. */
|
9500 |
|
|
|
9501 |
|
|
static void
|
9502 |
|
|
mips_get_cprestore_base_and_offset (rtx *base, HOST_WIDE_INT *offset,
|
9503 |
|
|
bool load_p)
|
9504 |
|
|
{
|
9505 |
|
|
const struct mips_frame_info *frame;
|
9506 |
|
|
|
9507 |
|
|
frame = &cfun->machine->frame;
|
9508 |
|
|
/* .cprestore always uses the stack pointer instead of the frame pointer.
|
9509 |
|
|
We have a free choice for direct stores for non-MIPS16 functions,
|
9510 |
|
|
and for MIPS16 functions whose cprestore slot is in range of the
|
9511 |
|
|
stack pointer. Using the stack pointer would sometimes give more
|
9512 |
|
|
(early) scheduling freedom, but using the frame pointer would
|
9513 |
|
|
sometimes give more (late) scheduling freedom. It's hard to
|
9514 |
|
|
predict which applies to a given function, so let's keep things
|
9515 |
|
|
simple.
|
9516 |
|
|
|
9517 |
|
|
Loads must always use the frame pointer in functions that call
|
9518 |
|
|
alloca, and there's little benefit to using the stack pointer
|
9519 |
|
|
otherwise. */
|
9520 |
|
|
if (frame_pointer_needed && !(TARGET_CPRESTORE_DIRECTIVE && !load_p))
|
9521 |
|
|
{
|
9522 |
|
|
*base = hard_frame_pointer_rtx;
|
9523 |
|
|
*offset = frame->args_size - frame->hard_frame_pointer_offset;
|
9524 |
|
|
}
|
9525 |
|
|
else
|
9526 |
|
|
{
|
9527 |
|
|
*base = stack_pointer_rtx;
|
9528 |
|
|
*offset = frame->args_size;
|
9529 |
|
|
}
|
9530 |
|
|
}
|
9531 |
|
|
|
9532 |
|
|
/* Return true if X is the load or store address of the cprestore slot;
|
9533 |
|
|
LOAD_P says which. */
|
9534 |
|
|
|
9535 |
|
|
bool
|
9536 |
|
|
mips_cprestore_address_p (rtx x, bool load_p)
|
9537 |
|
|
{
|
9538 |
|
|
rtx given_base, required_base;
|
9539 |
|
|
HOST_WIDE_INT given_offset, required_offset;
|
9540 |
|
|
|
9541 |
|
|
mips_split_plus (x, &given_base, &given_offset);
|
9542 |
|
|
mips_get_cprestore_base_and_offset (&required_base, &required_offset, load_p);
|
9543 |
|
|
return given_base == required_base && given_offset == required_offset;
|
9544 |
|
|
}
|
9545 |
|
|
|
9546 |
|
|
/* Return a MEM rtx for the cprestore slot. LOAD_P is true if we are
|
9547 |
|
|
going to load from it, false if we are going to store to it.
|
9548 |
|
|
Use TEMP as a temporary register if need be. */
|
9549 |
|
|
|
9550 |
|
|
static rtx
|
9551 |
|
|
mips_cprestore_slot (rtx temp, bool load_p)
|
9552 |
|
|
{
|
9553 |
|
|
rtx base;
|
9554 |
|
|
HOST_WIDE_INT offset;
|
9555 |
|
|
|
9556 |
|
|
mips_get_cprestore_base_and_offset (&base, &offset, load_p);
|
9557 |
|
|
return gen_frame_mem (Pmode, mips_add_offset (temp, base, offset));
|
9558 |
|
|
}
|
9559 |
|
|
|
9560 |
|
|
/* Emit instructions to save global pointer value GP into cprestore
|
9561 |
|
|
slot MEM. OFFSET is the offset that MEM applies to the base register.
|
9562 |
|
|
|
9563 |
|
|
MEM may not be a legitimate address. If it isn't, TEMP is a
|
9564 |
|
|
temporary register that can be used, otherwise it is a SCRATCH. */
|
9565 |
|
|
|
9566 |
|
|
void
|
9567 |
|
|
mips_save_gp_to_cprestore_slot (rtx mem, rtx offset, rtx gp, rtx temp)
|
9568 |
|
|
{
|
9569 |
|
|
if (TARGET_CPRESTORE_DIRECTIVE)
|
9570 |
|
|
{
|
9571 |
|
|
gcc_assert (gp == pic_offset_table_rtx);
|
9572 |
|
|
emit_insn (gen_cprestore (mem, offset));
|
9573 |
|
|
}
|
9574 |
|
|
else
|
9575 |
|
|
mips_emit_move (mips_cprestore_slot (temp, false), gp);
|
9576 |
|
|
}
|
9577 |
|
|
|
9578 |
|
|
/* Restore $gp from its save slot, using TEMP as a temporary base register
|
9579 |
|
|
if need be. This function is for o32 and o64 abicalls only.
|
9580 |
|
|
|
9581 |
|
|
See mips_must_initialize_gp_p for details about how we manage the
|
9582 |
|
|
global pointer. */
|
9583 |
|
|
|
9584 |
|
|
void
|
9585 |
|
|
mips_restore_gp_from_cprestore_slot (rtx temp)
|
9586 |
|
|
{
|
9587 |
|
|
gcc_assert (TARGET_ABICALLS && TARGET_OLDABI && epilogue_completed);
|
9588 |
|
|
|
9589 |
|
|
if (!cfun->machine->must_restore_gp_when_clobbered_p)
|
9590 |
|
|
{
|
9591 |
|
|
emit_note (NOTE_INSN_DELETED);
|
9592 |
|
|
return;
|
9593 |
|
|
}
|
9594 |
|
|
|
9595 |
|
|
if (TARGET_MIPS16)
|
9596 |
|
|
{
|
9597 |
|
|
mips_emit_move (temp, mips_cprestore_slot (temp, true));
|
9598 |
|
|
mips_emit_move (pic_offset_table_rtx, temp);
|
9599 |
|
|
}
|
9600 |
|
|
else
|
9601 |
|
|
mips_emit_move (pic_offset_table_rtx, mips_cprestore_slot (temp, true));
|
9602 |
|
|
if (!TARGET_EXPLICIT_RELOCS)
|
9603 |
|
|
emit_insn (gen_blockage ());
|
9604 |
|
|
}
|
9605 |
|
|
|
9606 |
|
|
/* A function to save or store a register. The first argument is the
|
9607 |
|
|
register and the second is the stack slot. */
|
9608 |
|
|
typedef void (*mips_save_restore_fn) (rtx, rtx);
|
9609 |
|
|
|
9610 |
|
|
/* Use FN to save or restore register REGNO. MODE is the register's
|
9611 |
|
|
mode and OFFSET is the offset of its save slot from the current
|
9612 |
|
|
stack pointer. */
|
9613 |
|
|
|
9614 |
|
|
static void
|
9615 |
|
|
mips_save_restore_reg (enum machine_mode mode, int regno,
|
9616 |
|
|
HOST_WIDE_INT offset, mips_save_restore_fn fn)
|
9617 |
|
|
{
|
9618 |
|
|
rtx mem;
|
9619 |
|
|
|
9620 |
|
|
mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
|
9621 |
|
|
fn (gen_rtx_REG (mode, regno), mem);
|
9622 |
|
|
}
|
9623 |
|
|
|
9624 |
|
|
/* Call FN for each accumlator that is saved by the current function.
|
9625 |
|
|
SP_OFFSET is the offset of the current stack pointer from the start
|
9626 |
|
|
of the frame. */
|
9627 |
|
|
|
9628 |
|
|
static void
|
9629 |
|
|
mips_for_each_saved_acc (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
|
9630 |
|
|
{
|
9631 |
|
|
HOST_WIDE_INT offset;
|
9632 |
|
|
int regno;
|
9633 |
|
|
|
9634 |
|
|
offset = cfun->machine->frame.acc_sp_offset - sp_offset;
|
9635 |
|
|
if (BITSET_P (cfun->machine->frame.acc_mask, 0))
|
9636 |
|
|
{
|
9637 |
|
|
mips_save_restore_reg (word_mode, LO_REGNUM, offset, fn);
|
9638 |
|
|
offset -= UNITS_PER_WORD;
|
9639 |
|
|
mips_save_restore_reg (word_mode, HI_REGNUM, offset, fn);
|
9640 |
|
|
offset -= UNITS_PER_WORD;
|
9641 |
|
|
}
|
9642 |
|
|
|
9643 |
|
|
for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
|
9644 |
|
|
if (BITSET_P (cfun->machine->frame.acc_mask,
|
9645 |
|
|
((regno - DSP_ACC_REG_FIRST) / 2) + 1))
|
9646 |
|
|
{
|
9647 |
|
|
mips_save_restore_reg (word_mode, regno, offset, fn);
|
9648 |
|
|
offset -= UNITS_PER_WORD;
|
9649 |
|
|
}
|
9650 |
|
|
}
|
9651 |
|
|
|
9652 |
|
|
/* Call FN for each register that is saved by the current function.
|
9653 |
|
|
SP_OFFSET is the offset of the current stack pointer from the start
|
9654 |
|
|
of the frame. */
|
9655 |
|
|
|
9656 |
|
|
static void
|
9657 |
|
|
mips_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset,
|
9658 |
|
|
mips_save_restore_fn fn)
|
9659 |
|
|
{
|
9660 |
|
|
enum machine_mode fpr_mode;
|
9661 |
|
|
HOST_WIDE_INT offset;
|
9662 |
|
|
int regno;
|
9663 |
|
|
|
9664 |
|
|
/* Save registers starting from high to low. The debuggers prefer at least
|
9665 |
|
|
the return register be stored at func+4, and also it allows us not to
|
9666 |
|
|
need a nop in the epilogue if at least one register is reloaded in
|
9667 |
|
|
addition to return address. */
|
9668 |
|
|
offset = cfun->machine->frame.gp_sp_offset - sp_offset;
|
9669 |
|
|
for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
|
9670 |
|
|
if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
|
9671 |
|
|
{
|
9672 |
|
|
/* Record the ra offset for use by mips_function_profiler. */
|
9673 |
|
|
if (regno == RETURN_ADDR_REGNUM)
|
9674 |
|
|
cfun->machine->frame.ra_fp_offset = offset + sp_offset;
|
9675 |
|
|
mips_save_restore_reg (word_mode, regno, offset, fn);
|
9676 |
|
|
offset -= UNITS_PER_WORD;
|
9677 |
|
|
}
|
9678 |
|
|
|
9679 |
|
|
/* This loop must iterate over the same space as its companion in
|
9680 |
|
|
mips_compute_frame_info. */
|
9681 |
|
|
offset = cfun->machine->frame.fp_sp_offset - sp_offset;
|
9682 |
|
|
fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
|
9683 |
|
|
for (regno = FP_REG_LAST - MAX_FPRS_PER_FMT + 1;
|
9684 |
|
|
regno >= FP_REG_FIRST;
|
9685 |
|
|
regno -= MAX_FPRS_PER_FMT)
|
9686 |
|
|
if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
|
9687 |
|
|
{
|
9688 |
|
|
mips_save_restore_reg (fpr_mode, regno, offset, fn);
|
9689 |
|
|
offset -= GET_MODE_SIZE (fpr_mode);
|
9690 |
|
|
}
|
9691 |
|
|
}
|
9692 |
|
|
|
9693 |
|
|
/* Return true if a move between register REGNO and its save slot (MEM)
|
9694 |
|
|
can be done in a single move. LOAD_P is true if we are loading
|
9695 |
|
|
from the slot, false if we are storing to it. */
|
9696 |
|
|
|
9697 |
|
|
static bool
|
9698 |
|
|
mips_direct_save_slot_move_p (unsigned int regno, rtx mem, bool load_p)
|
9699 |
|
|
{
|
9700 |
|
|
/* There is a specific MIPS16 instruction for saving $31 to the stack. */
|
9701 |
|
|
if (TARGET_MIPS16 && !load_p && regno == RETURN_ADDR_REGNUM)
|
9702 |
|
|
return false;
|
9703 |
|
|
|
9704 |
|
|
return mips_secondary_reload_class (REGNO_REG_CLASS (regno),
|
9705 |
|
|
GET_MODE (mem), mem, load_p) == NO_REGS;
|
9706 |
|
|
}
|
9707 |
|
|
|
9708 |
|
|
/* Emit a move from SRC to DEST, given that one of them is a register
|
9709 |
|
|
save slot and that the other is a register. TEMP is a temporary
|
9710 |
|
|
GPR of the same mode that is available if need be. */
|
9711 |
|
|
|
9712 |
|
|
void
|
9713 |
|
|
mips_emit_save_slot_move (rtx dest, rtx src, rtx temp)
|
9714 |
|
|
{
|
9715 |
|
|
unsigned int regno;
|
9716 |
|
|
rtx mem;
|
9717 |
|
|
|
9718 |
|
|
if (REG_P (src))
|
9719 |
|
|
{
|
9720 |
|
|
regno = REGNO (src);
|
9721 |
|
|
mem = dest;
|
9722 |
|
|
}
|
9723 |
|
|
else
|
9724 |
|
|
{
|
9725 |
|
|
regno = REGNO (dest);
|
9726 |
|
|
mem = src;
|
9727 |
|
|
}
|
9728 |
|
|
|
9729 |
|
|
if (regno == cfun->machine->global_pointer && !mips_must_initialize_gp_p ())
|
9730 |
|
|
{
|
9731 |
|
|
/* We don't yet know whether we'll need this instruction or not.
|
9732 |
|
|
Postpone the decision by emitting a ghost move. This move
|
9733 |
|
|
is specifically not frame-related; only the split version is. */
|
9734 |
|
|
if (TARGET_64BIT)
|
9735 |
|
|
emit_insn (gen_move_gpdi (dest, src));
|
9736 |
|
|
else
|
9737 |
|
|
emit_insn (gen_move_gpsi (dest, src));
|
9738 |
|
|
return;
|
9739 |
|
|
}
|
9740 |
|
|
|
9741 |
|
|
if (regno == HI_REGNUM)
|
9742 |
|
|
{
|
9743 |
|
|
if (REG_P (dest))
|
9744 |
|
|
{
|
9745 |
|
|
mips_emit_move (temp, src);
|
9746 |
|
|
if (TARGET_64BIT)
|
9747 |
|
|
emit_insn (gen_mthisi_di (gen_rtx_REG (TImode, MD_REG_FIRST),
|
9748 |
|
|
temp, gen_rtx_REG (DImode, LO_REGNUM)));
|
9749 |
|
|
else
|
9750 |
|
|
emit_insn (gen_mthisi_di (gen_rtx_REG (DImode, MD_REG_FIRST),
|
9751 |
|
|
temp, gen_rtx_REG (SImode, LO_REGNUM)));
|
9752 |
|
|
}
|
9753 |
|
|
else
|
9754 |
|
|
{
|
9755 |
|
|
if (TARGET_64BIT)
|
9756 |
|
|
emit_insn (gen_mfhidi_ti (temp,
|
9757 |
|
|
gen_rtx_REG (TImode, MD_REG_FIRST)));
|
9758 |
|
|
else
|
9759 |
|
|
emit_insn (gen_mfhisi_di (temp,
|
9760 |
|
|
gen_rtx_REG (DImode, MD_REG_FIRST)));
|
9761 |
|
|
mips_emit_move (dest, temp);
|
9762 |
|
|
}
|
9763 |
|
|
}
|
9764 |
|
|
else if (mips_direct_save_slot_move_p (regno, mem, mem == src))
|
9765 |
|
|
mips_emit_move (dest, src);
|
9766 |
|
|
else
|
9767 |
|
|
{
|
9768 |
|
|
gcc_assert (!reg_overlap_mentioned_p (dest, temp));
|
9769 |
|
|
mips_emit_move (temp, src);
|
9770 |
|
|
mips_emit_move (dest, temp);
|
9771 |
|
|
}
|
9772 |
|
|
if (MEM_P (dest))
|
9773 |
|
|
mips_set_frame_expr (mips_frame_set (dest, src));
|
9774 |
|
|
}
|
9775 |
|
|
|
9776 |
|
|
/* If we're generating n32 or n64 abicalls, and the current function
|
9777 |
|
|
does not use $28 as its global pointer, emit a cplocal directive.
|
9778 |
|
|
Use pic_offset_table_rtx as the argument to the directive. */
|
9779 |
|
|
|
9780 |
|
|
static void
|
9781 |
|
|
mips_output_cplocal (void)
|
9782 |
|
|
{
|
9783 |
|
|
if (!TARGET_EXPLICIT_RELOCS
|
9784 |
|
|
&& mips_must_initialize_gp_p ()
|
9785 |
|
|
&& cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
|
9786 |
|
|
output_asm_insn (".cplocal %+", 0);
|
9787 |
|
|
}
|
9788 |
|
|
|
9789 |
|
|
/* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE. */
|
9790 |
|
|
|
9791 |
|
|
static void
|
9792 |
|
|
mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
|
9793 |
|
|
{
|
9794 |
|
|
const char *fnname;
|
9795 |
|
|
|
9796 |
|
|
#ifdef SDB_DEBUGGING_INFO
|
9797 |
|
|
if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
|
9798 |
|
|
SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
|
9799 |
|
|
#endif
|
9800 |
|
|
|
9801 |
|
|
/* In MIPS16 mode, we may need to generate a non-MIPS16 stub to handle
|
9802 |
|
|
floating-point arguments. */
|
9803 |
|
|
if (TARGET_MIPS16
|
9804 |
|
|
&& TARGET_HARD_FLOAT_ABI
|
9805 |
|
|
&& crtl->args.info.fp_code != 0)
|
9806 |
|
|
mips16_build_function_stub ();
|
9807 |
|
|
|
9808 |
|
|
/* Get the function name the same way that toplev.c does before calling
|
9809 |
|
|
assemble_start_function. This is needed so that the name used here
|
9810 |
|
|
exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
|
9811 |
|
|
fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
|
9812 |
|
|
mips_start_function_definition (fnname, TARGET_MIPS16);
|
9813 |
|
|
|
9814 |
|
|
/* Stop mips_file_end from treating this function as external. */
|
9815 |
|
|
if (TARGET_IRIX && mips_abi == ABI_32)
|
9816 |
|
|
TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
|
9817 |
|
|
|
9818 |
|
|
/* Output MIPS-specific frame information. */
|
9819 |
|
|
if (!flag_inhibit_size_directive)
|
9820 |
|
|
{
|
9821 |
|
|
const struct mips_frame_info *frame;
|
9822 |
|
|
|
9823 |
|
|
frame = &cfun->machine->frame;
|
9824 |
|
|
|
9825 |
|
|
/* .frame FRAMEREG, FRAMESIZE, RETREG. */
|
9826 |
|
|
fprintf (file,
|
9827 |
|
|
"\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
|
9828 |
|
|
"# vars= " HOST_WIDE_INT_PRINT_DEC
|
9829 |
|
|
", regs= %d/%d"
|
9830 |
|
|
", args= " HOST_WIDE_INT_PRINT_DEC
|
9831 |
|
|
", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
|
9832 |
|
|
reg_names[frame_pointer_needed
|
9833 |
|
|
? HARD_FRAME_POINTER_REGNUM
|
9834 |
|
|
: STACK_POINTER_REGNUM],
|
9835 |
|
|
(frame_pointer_needed
|
9836 |
|
|
? frame->total_size - frame->hard_frame_pointer_offset
|
9837 |
|
|
: frame->total_size),
|
9838 |
|
|
reg_names[RETURN_ADDR_REGNUM],
|
9839 |
|
|
frame->var_size,
|
9840 |
|
|
frame->num_gp, frame->num_fp,
|
9841 |
|
|
frame->args_size,
|
9842 |
|
|
frame->cprestore_size);
|
9843 |
|
|
|
9844 |
|
|
/* .mask MASK, OFFSET. */
|
9845 |
|
|
fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
|
9846 |
|
|
frame->mask, frame->gp_save_offset);
|
9847 |
|
|
|
9848 |
|
|
/* .fmask MASK, OFFSET. */
|
9849 |
|
|
fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
|
9850 |
|
|
frame->fmask, frame->fp_save_offset);
|
9851 |
|
|
}
|
9852 |
|
|
|
9853 |
|
|
/* Handle the initialization of $gp for SVR4 PIC, if applicable.
|
9854 |
|
|
Also emit the ".set noreorder; .set nomacro" sequence for functions
|
9855 |
|
|
that need it. */
|
9856 |
|
|
if (mips_must_initialize_gp_p ()
|
9857 |
|
|
&& mips_current_loadgp_style () == LOADGP_OLDABI)
|
9858 |
|
|
{
|
9859 |
|
|
if (TARGET_MIPS16)
|
9860 |
|
|
{
|
9861 |
|
|
/* This is a fixed-form sequence. The position of the
|
9862 |
|
|
first two instructions is important because of the
|
9863 |
|
|
way _gp_disp is defined. */
|
9864 |
|
|
output_asm_insn ("li\t$2,%%hi(_gp_disp)", 0);
|
9865 |
|
|
output_asm_insn ("addiu\t$3,$pc,%%lo(_gp_disp)", 0);
|
9866 |
|
|
output_asm_insn ("sll\t$2,16", 0);
|
9867 |
|
|
output_asm_insn ("addu\t$2,$3", 0);
|
9868 |
|
|
}
|
9869 |
|
|
else
|
9870 |
|
|
{
|
9871 |
|
|
/* .cpload must be in a .set noreorder but not a
|
9872 |
|
|
.set nomacro block. */
|
9873 |
|
|
mips_push_asm_switch (&mips_noreorder);
|
9874 |
|
|
output_asm_insn (".cpload\t%^", 0);
|
9875 |
|
|
if (!cfun->machine->all_noreorder_p)
|
9876 |
|
|
mips_pop_asm_switch (&mips_noreorder);
|
9877 |
|
|
else
|
9878 |
|
|
mips_push_asm_switch (&mips_nomacro);
|
9879 |
|
|
}
|
9880 |
|
|
}
|
9881 |
|
|
else if (cfun->machine->all_noreorder_p)
|
9882 |
|
|
{
|
9883 |
|
|
mips_push_asm_switch (&mips_noreorder);
|
9884 |
|
|
mips_push_asm_switch (&mips_nomacro);
|
9885 |
|
|
}
|
9886 |
|
|
|
9887 |
|
|
/* Tell the assembler which register we're using as the global
|
9888 |
|
|
pointer. This is needed for thunks, since they can use either
|
9889 |
|
|
explicit relocs or assembler macros. */
|
9890 |
|
|
mips_output_cplocal ();
|
9891 |
|
|
}
|
9892 |
|
|
|
9893 |
|
|
/* Implement TARGET_OUTPUT_FUNCTION_EPILOGUE. */
|
9894 |
|
|
|
9895 |
|
|
static void
|
9896 |
|
|
mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
|
9897 |
|
|
HOST_WIDE_INT size ATTRIBUTE_UNUSED)
|
9898 |
|
|
{
|
9899 |
|
|
const char *fnname;
|
9900 |
|
|
|
9901 |
|
|
/* Reinstate the normal $gp. */
|
9902 |
|
|
SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
|
9903 |
|
|
mips_output_cplocal ();
|
9904 |
|
|
|
9905 |
|
|
if (cfun->machine->all_noreorder_p)
|
9906 |
|
|
{
|
9907 |
|
|
mips_pop_asm_switch (&mips_nomacro);
|
9908 |
|
|
mips_pop_asm_switch (&mips_noreorder);
|
9909 |
|
|
}
|
9910 |
|
|
|
9911 |
|
|
/* Get the function name the same way that toplev.c does before calling
|
9912 |
|
|
assemble_start_function. This is needed so that the name used here
|
9913 |
|
|
exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
|
9914 |
|
|
fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
|
9915 |
|
|
mips_end_function_definition (fnname);
|
9916 |
|
|
}
|
9917 |
|
|
|
9918 |
|
|
/* Save register REG to MEM. Make the instruction frame-related. */
|
9919 |
|
|
|
9920 |
|
|
static void
|
9921 |
|
|
mips_save_reg (rtx reg, rtx mem)
|
9922 |
|
|
{
|
9923 |
|
|
if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
|
9924 |
|
|
{
|
9925 |
|
|
rtx x1, x2;
|
9926 |
|
|
|
9927 |
|
|
if (mips_split_64bit_move_p (mem, reg))
|
9928 |
|
|
mips_split_doubleword_move (mem, reg);
|
9929 |
|
|
else
|
9930 |
|
|
mips_emit_move (mem, reg);
|
9931 |
|
|
|
9932 |
|
|
x1 = mips_frame_set (mips_subword (mem, false),
|
9933 |
|
|
mips_subword (reg, false));
|
9934 |
|
|
x2 = mips_frame_set (mips_subword (mem, true),
|
9935 |
|
|
mips_subword (reg, true));
|
9936 |
|
|
mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
|
9937 |
|
|
}
|
9938 |
|
|
else
|
9939 |
|
|
mips_emit_save_slot_move (mem, reg, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
|
9940 |
|
|
}
|
9941 |
|
|
|
9942 |
|
|
/* The __gnu_local_gp symbol. */
|
9943 |
|
|
|
9944 |
|
|
static GTY(()) rtx mips_gnu_local_gp;
|
9945 |
|
|
|
9946 |
|
|
/* If we're generating n32 or n64 abicalls, emit instructions
|
9947 |
|
|
to set up the global pointer. */
|
9948 |
|
|
|
9949 |
|
|
static void
|
9950 |
|
|
mips_emit_loadgp (void)
|
9951 |
|
|
{
|
9952 |
|
|
rtx addr, offset, incoming_address, base, index, pic_reg;
|
9953 |
|
|
|
9954 |
|
|
pic_reg = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
|
9955 |
|
|
switch (mips_current_loadgp_style ())
|
9956 |
|
|
{
|
9957 |
|
|
case LOADGP_ABSOLUTE:
|
9958 |
|
|
if (mips_gnu_local_gp == NULL)
|
9959 |
|
|
{
|
9960 |
|
|
mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
|
9961 |
|
|
SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
|
9962 |
|
|
}
|
9963 |
|
|
emit_insn (Pmode == SImode
|
9964 |
|
|
? gen_loadgp_absolute_si (pic_reg, mips_gnu_local_gp)
|
9965 |
|
|
: gen_loadgp_absolute_di (pic_reg, mips_gnu_local_gp));
|
9966 |
|
|
break;
|
9967 |
|
|
|
9968 |
|
|
case LOADGP_OLDABI:
|
9969 |
|
|
/* Added by mips_output_function_prologue. */
|
9970 |
|
|
break;
|
9971 |
|
|
|
9972 |
|
|
case LOADGP_NEWABI:
|
9973 |
|
|
addr = XEXP (DECL_RTL (current_function_decl), 0);
|
9974 |
|
|
offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
|
9975 |
|
|
incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
|
9976 |
|
|
emit_insn (Pmode == SImode
|
9977 |
|
|
? gen_loadgp_newabi_si (pic_reg, offset, incoming_address)
|
9978 |
|
|
: gen_loadgp_newabi_di (pic_reg, offset, incoming_address));
|
9979 |
|
|
break;
|
9980 |
|
|
|
9981 |
|
|
case LOADGP_RTP:
|
9982 |
|
|
base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
|
9983 |
|
|
index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
|
9984 |
|
|
emit_insn (Pmode == SImode
|
9985 |
|
|
? gen_loadgp_rtp_si (pic_reg, base, index)
|
9986 |
|
|
: gen_loadgp_rtp_di (pic_reg, base, index));
|
9987 |
|
|
break;
|
9988 |
|
|
|
9989 |
|
|
default:
|
9990 |
|
|
return;
|
9991 |
|
|
}
|
9992 |
|
|
|
9993 |
|
|
if (TARGET_MIPS16)
|
9994 |
|
|
emit_insn (gen_copygp_mips16 (pic_offset_table_rtx, pic_reg));
|
9995 |
|
|
|
9996 |
|
|
/* Emit a blockage if there are implicit uses of the GP register.
|
9997 |
|
|
This includes profiled functions, because FUNCTION_PROFILE uses
|
9998 |
|
|
a jal macro. */
|
9999 |
|
|
if (!TARGET_EXPLICIT_RELOCS || crtl->profile)
|
10000 |
|
|
emit_insn (gen_loadgp_blockage ());
|
10001 |
|
|
}
|
10002 |
|
|
|
10003 |
|
|
/* A for_each_rtx callback. Stop the search if *X is a kernel register. */
|
10004 |
|
|
|
10005 |
|
|
static int
|
10006 |
|
|
mips_kernel_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
|
10007 |
|
|
{
|
10008 |
|
|
return REG_P (*x) && KERNEL_REG_P (REGNO (*x));
|
10009 |
|
|
}
|
10010 |
|
|
|
10011 |
|
|
/* Expand the "prologue" pattern. */
|
10012 |
|
|
|
10013 |
|
|
void
|
10014 |
|
|
mips_expand_prologue (void)
|
10015 |
|
|
{
|
10016 |
|
|
const struct mips_frame_info *frame;
|
10017 |
|
|
HOST_WIDE_INT size;
|
10018 |
|
|
unsigned int nargs;
|
10019 |
|
|
rtx insn;
|
10020 |
|
|
|
10021 |
|
|
if (cfun->machine->global_pointer != INVALID_REGNUM)
|
10022 |
|
|
{
|
10023 |
|
|
/* Check whether an insn uses pic_offset_table_rtx, either explicitly
|
10024 |
|
|
or implicitly. If so, we can commit to using a global pointer
|
10025 |
|
|
straight away, otherwise we need to defer the decision. */
|
10026 |
|
|
if (mips_cfun_has_inflexible_gp_ref_p ()
|
10027 |
|
|
|| mips_cfun_has_flexible_gp_ref_p ())
|
10028 |
|
|
{
|
10029 |
|
|
cfun->machine->must_initialize_gp_p = true;
|
10030 |
|
|
cfun->machine->must_restore_gp_when_clobbered_p = true;
|
10031 |
|
|
}
|
10032 |
|
|
|
10033 |
|
|
SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
|
10034 |
|
|
}
|
10035 |
|
|
|
10036 |
|
|
frame = &cfun->machine->frame;
|
10037 |
|
|
size = frame->total_size;
|
10038 |
|
|
|
10039 |
|
|
/* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
|
10040 |
|
|
bytes beforehand; this is enough to cover the register save area
|
10041 |
|
|
without going out of range. */
|
10042 |
|
|
if (((frame->mask | frame->fmask | frame->acc_mask) != 0)
|
10043 |
|
|
|| frame->num_cop0_regs > 0)
|
10044 |
|
|
{
|
10045 |
|
|
HOST_WIDE_INT step1;
|
10046 |
|
|
|
10047 |
|
|
step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
|
10048 |
|
|
if (GENERATE_MIPS16E_SAVE_RESTORE)
|
10049 |
|
|
{
|
10050 |
|
|
HOST_WIDE_INT offset;
|
10051 |
|
|
unsigned int mask, regno;
|
10052 |
|
|
|
10053 |
|
|
/* Try to merge argument stores into the save instruction. */
|
10054 |
|
|
nargs = mips16e_collect_argument_saves ();
|
10055 |
|
|
|
10056 |
|
|
/* Build the save instruction. */
|
10057 |
|
|
mask = frame->mask;
|
10058 |
|
|
insn = mips16e_build_save_restore (false, &mask, &offset,
|
10059 |
|
|
nargs, step1);
|
10060 |
|
|
RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
|
10061 |
|
|
size -= step1;
|
10062 |
|
|
|
10063 |
|
|
/* Check if we need to save other registers. */
|
10064 |
|
|
for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
|
10065 |
|
|
if (BITSET_P (mask, regno - GP_REG_FIRST))
|
10066 |
|
|
{
|
10067 |
|
|
offset -= UNITS_PER_WORD;
|
10068 |
|
|
mips_save_restore_reg (word_mode, regno,
|
10069 |
|
|
offset, mips_save_reg);
|
10070 |
|
|
}
|
10071 |
|
|
}
|
10072 |
|
|
else
|
10073 |
|
|
{
|
10074 |
|
|
if (cfun->machine->interrupt_handler_p)
|
10075 |
|
|
{
|
10076 |
|
|
HOST_WIDE_INT offset;
|
10077 |
|
|
rtx mem;
|
10078 |
|
|
|
10079 |
|
|
/* If this interrupt is using a shadow register set, we need to
|
10080 |
|
|
get the stack pointer from the previous register set. */
|
10081 |
|
|
if (cfun->machine->use_shadow_register_set_p)
|
10082 |
|
|
emit_insn (gen_mips_rdpgpr (stack_pointer_rtx,
|
10083 |
|
|
stack_pointer_rtx));
|
10084 |
|
|
|
10085 |
|
|
if (!cfun->machine->keep_interrupts_masked_p)
|
10086 |
|
|
{
|
10087 |
|
|
/* Move from COP0 Cause to K0. */
|
10088 |
|
|
emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K0_REG_NUM),
|
10089 |
|
|
gen_rtx_REG (SImode,
|
10090 |
|
|
COP0_CAUSE_REG_NUM)));
|
10091 |
|
|
/* Move from COP0 EPC to K1. */
|
10092 |
|
|
emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
|
10093 |
|
|
gen_rtx_REG (SImode,
|
10094 |
|
|
COP0_EPC_REG_NUM)));
|
10095 |
|
|
}
|
10096 |
|
|
|
10097 |
|
|
/* Allocate the first part of the frame. */
|
10098 |
|
|
insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
|
10099 |
|
|
GEN_INT (-step1));
|
10100 |
|
|
RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
|
10101 |
|
|
size -= step1;
|
10102 |
|
|
|
10103 |
|
|
/* Start at the uppermost location for saving. */
|
10104 |
|
|
offset = frame->cop0_sp_offset - size;
|
10105 |
|
|
if (!cfun->machine->keep_interrupts_masked_p)
|
10106 |
|
|
{
|
10107 |
|
|
/* Push EPC into its stack slot. */
|
10108 |
|
|
mem = gen_frame_mem (word_mode,
|
10109 |
|
|
plus_constant (stack_pointer_rtx,
|
10110 |
|
|
offset));
|
10111 |
|
|
mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
|
10112 |
|
|
offset -= UNITS_PER_WORD;
|
10113 |
|
|
}
|
10114 |
|
|
|
10115 |
|
|
/* Move from COP0 Status to K1. */
|
10116 |
|
|
emit_insn (gen_cop0_move (gen_rtx_REG (SImode, K1_REG_NUM),
|
10117 |
|
|
gen_rtx_REG (SImode,
|
10118 |
|
|
COP0_STATUS_REG_NUM)));
|
10119 |
|
|
|
10120 |
|
|
/* Right justify the RIPL in k0. */
|
10121 |
|
|
if (!cfun->machine->keep_interrupts_masked_p)
|
10122 |
|
|
emit_insn (gen_lshrsi3 (gen_rtx_REG (SImode, K0_REG_NUM),
|
10123 |
|
|
gen_rtx_REG (SImode, K0_REG_NUM),
|
10124 |
|
|
GEN_INT (CAUSE_IPL)));
|
10125 |
|
|
|
10126 |
|
|
/* Push Status into its stack slot. */
|
10127 |
|
|
mem = gen_frame_mem (word_mode,
|
10128 |
|
|
plus_constant (stack_pointer_rtx, offset));
|
10129 |
|
|
mips_emit_move (mem, gen_rtx_REG (word_mode, K1_REG_NUM));
|
10130 |
|
|
offset -= UNITS_PER_WORD;
|
10131 |
|
|
|
10132 |
|
|
/* Insert the RIPL into our copy of SR (k1) as the new IPL. */
|
10133 |
|
|
if (!cfun->machine->keep_interrupts_masked_p)
|
10134 |
|
|
emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
|
10135 |
|
|
GEN_INT (6),
|
10136 |
|
|
GEN_INT (SR_IPL),
|
10137 |
|
|
gen_rtx_REG (SImode, K0_REG_NUM)));
|
10138 |
|
|
|
10139 |
|
|
if (!cfun->machine->keep_interrupts_masked_p)
|
10140 |
|
|
/* Enable interrupts by clearing the KSU ERL and EXL bits.
|
10141 |
|
|
IE is already the correct value, so we don't have to do
|
10142 |
|
|
anything explicit. */
|
10143 |
|
|
emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
|
10144 |
|
|
GEN_INT (4),
|
10145 |
|
|
GEN_INT (SR_EXL),
|
10146 |
|
|
gen_rtx_REG (SImode, GP_REG_FIRST)));
|
10147 |
|
|
else
|
10148 |
|
|
/* Disable interrupts by clearing the KSU, ERL, EXL,
|
10149 |
|
|
and IE bits. */
|
10150 |
|
|
emit_insn (gen_insvsi (gen_rtx_REG (SImode, K1_REG_NUM),
|
10151 |
|
|
GEN_INT (5),
|
10152 |
|
|
GEN_INT (SR_IE),
|
10153 |
|
|
gen_rtx_REG (SImode, GP_REG_FIRST)));
|
10154 |
|
|
}
|
10155 |
|
|
else
|
10156 |
|
|
{
|
10157 |
|
|
insn = gen_add3_insn (stack_pointer_rtx,
|
10158 |
|
|
stack_pointer_rtx,
|
10159 |
|
|
GEN_INT (-step1));
|
10160 |
|
|
RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
|
10161 |
|
|
size -= step1;
|
10162 |
|
|
}
|
10163 |
|
|
mips_for_each_saved_acc (size, mips_save_reg);
|
10164 |
|
|
mips_for_each_saved_gpr_and_fpr (size, mips_save_reg);
|
10165 |
|
|
}
|
10166 |
|
|
}
|
10167 |
|
|
|
10168 |
|
|
/* Allocate the rest of the frame. */
|
10169 |
|
|
if (size > 0)
|
10170 |
|
|
{
|
10171 |
|
|
if (SMALL_OPERAND (-size))
|
10172 |
|
|
RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
|
10173 |
|
|
stack_pointer_rtx,
|
10174 |
|
|
GEN_INT (-size)))) = 1;
|
10175 |
|
|
else
|
10176 |
|
|
{
|
10177 |
|
|
mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
|
10178 |
|
|
if (TARGET_MIPS16)
|
10179 |
|
|
{
|
10180 |
|
|
/* There are no instructions to add or subtract registers
|
10181 |
|
|
from the stack pointer, so use the frame pointer as a
|
10182 |
|
|
temporary. We should always be using a frame pointer
|
10183 |
|
|
in this case anyway. */
|
10184 |
|
|
gcc_assert (frame_pointer_needed);
|
10185 |
|
|
mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
|
10186 |
|
|
emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
|
10187 |
|
|
hard_frame_pointer_rtx,
|
10188 |
|
|
MIPS_PROLOGUE_TEMP (Pmode)));
|
10189 |
|
|
mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
|
10190 |
|
|
}
|
10191 |
|
|
else
|
10192 |
|
|
emit_insn (gen_sub3_insn (stack_pointer_rtx,
|
10193 |
|
|
stack_pointer_rtx,
|
10194 |
|
|
MIPS_PROLOGUE_TEMP (Pmode)));
|
10195 |
|
|
|
10196 |
|
|
/* Describe the combined effect of the previous instructions. */
|
10197 |
|
|
mips_set_frame_expr
|
10198 |
|
|
(gen_rtx_SET (VOIDmode, stack_pointer_rtx,
|
10199 |
|
|
plus_constant (stack_pointer_rtx, -size)));
|
10200 |
|
|
}
|
10201 |
|
|
}
|
10202 |
|
|
|
10203 |
|
|
/* Set up the frame pointer, if we're using one. */
|
10204 |
|
|
if (frame_pointer_needed)
|
10205 |
|
|
{
|
10206 |
|
|
HOST_WIDE_INT offset;
|
10207 |
|
|
|
10208 |
|
|
offset = frame->hard_frame_pointer_offset;
|
10209 |
|
|
if (offset == 0)
|
10210 |
|
|
{
|
10211 |
|
|
insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
|
10212 |
|
|
RTX_FRAME_RELATED_P (insn) = 1;
|
10213 |
|
|
}
|
10214 |
|
|
else if (SMALL_OPERAND (offset))
|
10215 |
|
|
{
|
10216 |
|
|
insn = gen_add3_insn (hard_frame_pointer_rtx,
|
10217 |
|
|
stack_pointer_rtx, GEN_INT (offset));
|
10218 |
|
|
RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
|
10219 |
|
|
}
|
10220 |
|
|
else
|
10221 |
|
|
{
|
10222 |
|
|
mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
|
10223 |
|
|
mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
|
10224 |
|
|
emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
|
10225 |
|
|
hard_frame_pointer_rtx,
|
10226 |
|
|
MIPS_PROLOGUE_TEMP (Pmode)));
|
10227 |
|
|
mips_set_frame_expr
|
10228 |
|
|
(gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
|
10229 |
|
|
plus_constant (stack_pointer_rtx, offset)));
|
10230 |
|
|
}
|
10231 |
|
|
}
|
10232 |
|
|
|
10233 |
|
|
mips_emit_loadgp ();
|
10234 |
|
|
|
10235 |
|
|
/* Initialize the $gp save slot. */
|
10236 |
|
|
if (mips_cfun_has_cprestore_slot_p ())
|
10237 |
|
|
{
|
10238 |
|
|
rtx base, mem, gp, temp;
|
10239 |
|
|
HOST_WIDE_INT offset;
|
10240 |
|
|
|
10241 |
|
|
mips_get_cprestore_base_and_offset (&base, &offset, false);
|
10242 |
|
|
mem = gen_frame_mem (Pmode, plus_constant (base, offset));
|
10243 |
|
|
gp = TARGET_MIPS16 ? MIPS16_PIC_TEMP : pic_offset_table_rtx;
|
10244 |
|
|
temp = (SMALL_OPERAND (offset)
|
10245 |
|
|
? gen_rtx_SCRATCH (Pmode)
|
10246 |
|
|
: MIPS_PROLOGUE_TEMP (Pmode));
|
10247 |
|
|
emit_insn (gen_potential_cprestore (mem, GEN_INT (offset), gp, temp));
|
10248 |
|
|
|
10249 |
|
|
mips_get_cprestore_base_and_offset (&base, &offset, true);
|
10250 |
|
|
mem = gen_frame_mem (Pmode, plus_constant (base, offset));
|
10251 |
|
|
emit_insn (gen_use_cprestore (mem));
|
10252 |
|
|
}
|
10253 |
|
|
|
10254 |
|
|
/* We need to search back to the last use of K0 or K1. */
|
10255 |
|
|
if (cfun->machine->interrupt_handler_p)
|
10256 |
|
|
{
|
10257 |
|
|
for (insn = get_last_insn (); insn != NULL_RTX; insn = PREV_INSN (insn))
|
10258 |
|
|
if (INSN_P (insn)
|
10259 |
|
|
&& for_each_rtx (&PATTERN (insn), mips_kernel_reg_p, NULL))
|
10260 |
|
|
break;
|
10261 |
|
|
/* Emit a move from K1 to COP0 Status after insn. */
|
10262 |
|
|
gcc_assert (insn != NULL_RTX);
|
10263 |
|
|
emit_insn_after (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
|
10264 |
|
|
gen_rtx_REG (SImode, K1_REG_NUM)),
|
10265 |
|
|
insn);
|
10266 |
|
|
}
|
10267 |
|
|
|
10268 |
|
|
/* If we are profiling, make sure no instructions are scheduled before
|
10269 |
|
|
the call to mcount. */
|
10270 |
|
|
if (crtl->profile)
|
10271 |
|
|
emit_insn (gen_blockage ());
|
10272 |
|
|
}
|
10273 |
|
|
|
10274 |
|
|
/* Emit instructions to restore register REG from slot MEM. */
|
10275 |
|
|
|
10276 |
|
|
static void
|
10277 |
|
|
mips_restore_reg (rtx reg, rtx mem)
|
10278 |
|
|
{
|
10279 |
|
|
/* There's no MIPS16 instruction to load $31 directly. Load into
|
10280 |
|
|
$7 instead and adjust the return insn appropriately. */
|
10281 |
|
|
if (TARGET_MIPS16 && REGNO (reg) == RETURN_ADDR_REGNUM)
|
10282 |
|
|
reg = gen_rtx_REG (GET_MODE (reg), GP_REG_FIRST + 7);
|
10283 |
|
|
|
10284 |
|
|
mips_emit_save_slot_move (reg, mem, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
|
10285 |
|
|
}
|
10286 |
|
|
|
10287 |
|
|
/* Emit any instructions needed before a return. */
|
10288 |
|
|
|
10289 |
|
|
void
|
10290 |
|
|
mips_expand_before_return (void)
|
10291 |
|
|
{
|
10292 |
|
|
/* When using a call-clobbered gp, we start out with unified call
|
10293 |
|
|
insns that include instructions to restore the gp. We then split
|
10294 |
|
|
these unified calls after reload. These split calls explicitly
|
10295 |
|
|
clobber gp, so there is no need to define
|
10296 |
|
|
PIC_OFFSET_TABLE_REG_CALL_CLOBBERED.
|
10297 |
|
|
|
10298 |
|
|
For consistency, we should also insert an explicit clobber of $28
|
10299 |
|
|
before return insns, so that the post-reload optimizers know that
|
10300 |
|
|
the register is not live on exit. */
|
10301 |
|
|
if (TARGET_CALL_CLOBBERED_GP)
|
10302 |
|
|
emit_clobber (pic_offset_table_rtx);
|
10303 |
|
|
}
|
10304 |
|
|
|
10305 |
|
|
/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
|
10306 |
|
|
says which. */
|
10307 |
|
|
|
10308 |
|
|
void
|
10309 |
|
|
mips_expand_epilogue (bool sibcall_p)
|
10310 |
|
|
{
|
10311 |
|
|
const struct mips_frame_info *frame;
|
10312 |
|
|
HOST_WIDE_INT step1, step2;
|
10313 |
|
|
rtx base, target, insn;
|
10314 |
|
|
|
10315 |
|
|
if (!sibcall_p && mips_can_use_return_insn ())
|
10316 |
|
|
{
|
10317 |
|
|
emit_jump_insn (gen_return ());
|
10318 |
|
|
return;
|
10319 |
|
|
}
|
10320 |
|
|
|
10321 |
|
|
/* In MIPS16 mode, if the return value should go into a floating-point
|
10322 |
|
|
register, we need to call a helper routine to copy it over. */
|
10323 |
|
|
if (mips16_cfun_returns_in_fpr_p ())
|
10324 |
|
|
mips16_copy_fpr_return_value ();
|
10325 |
|
|
|
10326 |
|
|
/* Split the frame into two. STEP1 is the amount of stack we should
|
10327 |
|
|
deallocate before restoring the registers. STEP2 is the amount we
|
10328 |
|
|
should deallocate afterwards.
|
10329 |
|
|
|
10330 |
|
|
Start off by assuming that no registers need to be restored. */
|
10331 |
|
|
frame = &cfun->machine->frame;
|
10332 |
|
|
step1 = frame->total_size;
|
10333 |
|
|
step2 = 0;
|
10334 |
|
|
|
10335 |
|
|
/* Work out which register holds the frame address. */
|
10336 |
|
|
if (!frame_pointer_needed)
|
10337 |
|
|
base = stack_pointer_rtx;
|
10338 |
|
|
else
|
10339 |
|
|
{
|
10340 |
|
|
base = hard_frame_pointer_rtx;
|
10341 |
|
|
step1 -= frame->hard_frame_pointer_offset;
|
10342 |
|
|
}
|
10343 |
|
|
|
10344 |
|
|
/* If we need to restore registers, deallocate as much stack as
|
10345 |
|
|
possible in the second step without going out of range. */
|
10346 |
|
|
if ((frame->mask | frame->fmask | frame->acc_mask) != 0
|
10347 |
|
|
|| frame->num_cop0_regs > 0)
|
10348 |
|
|
{
|
10349 |
|
|
step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
|
10350 |
|
|
step1 -= step2;
|
10351 |
|
|
}
|
10352 |
|
|
|
10353 |
|
|
/* Set TARGET to BASE + STEP1. */
|
10354 |
|
|
target = base;
|
10355 |
|
|
if (step1 > 0)
|
10356 |
|
|
{
|
10357 |
|
|
rtx adjust;
|
10358 |
|
|
|
10359 |
|
|
/* Get an rtx for STEP1 that we can add to BASE. */
|
10360 |
|
|
adjust = GEN_INT (step1);
|
10361 |
|
|
if (!SMALL_OPERAND (step1))
|
10362 |
|
|
{
|
10363 |
|
|
mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
|
10364 |
|
|
adjust = MIPS_EPILOGUE_TEMP (Pmode);
|
10365 |
|
|
}
|
10366 |
|
|
|
10367 |
|
|
/* Normal mode code can copy the result straight into $sp. */
|
10368 |
|
|
if (!TARGET_MIPS16)
|
10369 |
|
|
target = stack_pointer_rtx;
|
10370 |
|
|
|
10371 |
|
|
emit_insn (gen_add3_insn (target, base, adjust));
|
10372 |
|
|
}
|
10373 |
|
|
|
10374 |
|
|
/* Copy TARGET into the stack pointer. */
|
10375 |
|
|
if (target != stack_pointer_rtx)
|
10376 |
|
|
mips_emit_move (stack_pointer_rtx, target);
|
10377 |
|
|
|
10378 |
|
|
/* If we're using addressing macros, $gp is implicitly used by all
|
10379 |
|
|
SYMBOL_REFs. We must emit a blockage insn before restoring $gp
|
10380 |
|
|
from the stack. */
|
10381 |
|
|
if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
|
10382 |
|
|
emit_insn (gen_blockage ());
|
10383 |
|
|
|
10384 |
|
|
if (GENERATE_MIPS16E_SAVE_RESTORE && frame->mask != 0)
|
10385 |
|
|
{
|
10386 |
|
|
unsigned int regno, mask;
|
10387 |
|
|
HOST_WIDE_INT offset;
|
10388 |
|
|
rtx restore;
|
10389 |
|
|
|
10390 |
|
|
/* Generate the restore instruction. */
|
10391 |
|
|
mask = frame->mask;
|
10392 |
|
|
restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
|
10393 |
|
|
|
10394 |
|
|
/* Restore any other registers manually. */
|
10395 |
|
|
for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
|
10396 |
|
|
if (BITSET_P (mask, regno - GP_REG_FIRST))
|
10397 |
|
|
{
|
10398 |
|
|
offset -= UNITS_PER_WORD;
|
10399 |
|
|
mips_save_restore_reg (word_mode, regno, offset, mips_restore_reg);
|
10400 |
|
|
}
|
10401 |
|
|
|
10402 |
|
|
/* Restore the remaining registers and deallocate the final bit
|
10403 |
|
|
of the frame. */
|
10404 |
|
|
emit_insn (restore);
|
10405 |
|
|
}
|
10406 |
|
|
else
|
10407 |
|
|
{
|
10408 |
|
|
/* Restore the registers. */
|
10409 |
|
|
mips_for_each_saved_acc (frame->total_size - step2, mips_restore_reg);
|
10410 |
|
|
mips_for_each_saved_gpr_and_fpr (frame->total_size - step2,
|
10411 |
|
|
mips_restore_reg);
|
10412 |
|
|
|
10413 |
|
|
if (cfun->machine->interrupt_handler_p)
|
10414 |
|
|
{
|
10415 |
|
|
HOST_WIDE_INT offset;
|
10416 |
|
|
rtx mem;
|
10417 |
|
|
|
10418 |
|
|
offset = frame->cop0_sp_offset - (frame->total_size - step2);
|
10419 |
|
|
if (!cfun->machine->keep_interrupts_masked_p)
|
10420 |
|
|
{
|
10421 |
|
|
/* Restore the original EPC. */
|
10422 |
|
|
mem = gen_frame_mem (word_mode,
|
10423 |
|
|
plus_constant (stack_pointer_rtx, offset));
|
10424 |
|
|
mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
|
10425 |
|
|
offset -= UNITS_PER_WORD;
|
10426 |
|
|
|
10427 |
|
|
/* Move to COP0 EPC. */
|
10428 |
|
|
emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_EPC_REG_NUM),
|
10429 |
|
|
gen_rtx_REG (SImode, K0_REG_NUM)));
|
10430 |
|
|
}
|
10431 |
|
|
|
10432 |
|
|
/* Restore the original Status. */
|
10433 |
|
|
mem = gen_frame_mem (word_mode,
|
10434 |
|
|
plus_constant (stack_pointer_rtx, offset));
|
10435 |
|
|
mips_emit_move (gen_rtx_REG (word_mode, K0_REG_NUM), mem);
|
10436 |
|
|
offset -= UNITS_PER_WORD;
|
10437 |
|
|
|
10438 |
|
|
/* If we don't use shoadow register set, we need to update SP. */
|
10439 |
|
|
if (!cfun->machine->use_shadow_register_set_p && step2 > 0)
|
10440 |
|
|
emit_insn (gen_add3_insn (stack_pointer_rtx,
|
10441 |
|
|
stack_pointer_rtx,
|
10442 |
|
|
GEN_INT (step2)));
|
10443 |
|
|
|
10444 |
|
|
/* Move to COP0 Status. */
|
10445 |
|
|
emit_insn (gen_cop0_move (gen_rtx_REG (SImode, COP0_STATUS_REG_NUM),
|
10446 |
|
|
gen_rtx_REG (SImode, K0_REG_NUM)));
|
10447 |
|
|
}
|
10448 |
|
|
else
|
10449 |
|
|
{
|
10450 |
|
|
/* Deallocate the final bit of the frame. */
|
10451 |
|
|
if (step2 > 0)
|
10452 |
|
|
emit_insn (gen_add3_insn (stack_pointer_rtx,
|
10453 |
|
|
stack_pointer_rtx,
|
10454 |
|
|
GEN_INT (step2)));
|
10455 |
|
|
}
|
10456 |
|
|
}
|
10457 |
|
|
|
10458 |
|
|
/* Add in the __builtin_eh_return stack adjustment. We need to
|
10459 |
|
|
use a temporary in MIPS16 code. */
|
10460 |
|
|
if (crtl->calls_eh_return)
|
10461 |
|
|
{
|
10462 |
|
|
if (TARGET_MIPS16)
|
10463 |
|
|
{
|
10464 |
|
|
mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
|
10465 |
|
|
emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
|
10466 |
|
|
MIPS_EPILOGUE_TEMP (Pmode),
|
10467 |
|
|
EH_RETURN_STACKADJ_RTX));
|
10468 |
|
|
mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
|
10469 |
|
|
}
|
10470 |
|
|
else
|
10471 |
|
|
emit_insn (gen_add3_insn (stack_pointer_rtx,
|
10472 |
|
|
stack_pointer_rtx,
|
10473 |
|
|
EH_RETURN_STACKADJ_RTX));
|
10474 |
|
|
}
|
10475 |
|
|
|
10476 |
|
|
if (!sibcall_p)
|
10477 |
|
|
{
|
10478 |
|
|
mips_expand_before_return ();
|
10479 |
|
|
if (cfun->machine->interrupt_handler_p)
|
10480 |
|
|
{
|
10481 |
|
|
/* Interrupt handlers generate eret or deret. */
|
10482 |
|
|
if (cfun->machine->use_debug_exception_return_p)
|
10483 |
|
|
emit_jump_insn (gen_mips_deret ());
|
10484 |
|
|
else
|
10485 |
|
|
emit_jump_insn (gen_mips_eret ());
|
10486 |
|
|
}
|
10487 |
|
|
else
|
10488 |
|
|
{
|
10489 |
|
|
unsigned int regno;
|
10490 |
|
|
|
10491 |
|
|
/* When generating MIPS16 code, the normal
|
10492 |
|
|
mips_for_each_saved_gpr_and_fpr path will restore the return
|
10493 |
|
|
address into $7 rather than $31. */
|
10494 |
|
|
if (TARGET_MIPS16
|
10495 |
|
|
&& !GENERATE_MIPS16E_SAVE_RESTORE
|
10496 |
|
|
&& BITSET_P (frame->mask, RETURN_ADDR_REGNUM))
|
10497 |
|
|
regno = GP_REG_FIRST + 7;
|
10498 |
|
|
else
|
10499 |
|
|
regno = RETURN_ADDR_REGNUM;
|
10500 |
|
|
emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, regno)));
|
10501 |
|
|
}
|
10502 |
|
|
}
|
10503 |
|
|
|
10504 |
|
|
/* Search from the beginning to the first use of K0 or K1. */
|
10505 |
|
|
if (cfun->machine->interrupt_handler_p
|
10506 |
|
|
&& !cfun->machine->keep_interrupts_masked_p)
|
10507 |
|
|
{
|
10508 |
|
|
for (insn = get_insns (); insn != NULL_RTX; insn = NEXT_INSN (insn))
|
10509 |
|
|
if (INSN_P (insn)
|
10510 |
|
|
&& for_each_rtx (&PATTERN(insn), mips_kernel_reg_p, NULL))
|
10511 |
|
|
break;
|
10512 |
|
|
gcc_assert (insn != NULL_RTX);
|
10513 |
|
|
/* Insert disable interrupts before the first use of K0 or K1. */
|
10514 |
|
|
emit_insn_before (gen_mips_di (), insn);
|
10515 |
|
|
emit_insn_before (gen_mips_ehb (), insn);
|
10516 |
|
|
}
|
10517 |
|
|
}
|
10518 |
|
|
|
10519 |
|
|
/* Return nonzero if this function is known to have a null epilogue.
|
10520 |
|
|
This allows the optimizer to omit jumps to jumps if no stack
|
10521 |
|
|
was created. */
|
10522 |
|
|
|
10523 |
|
|
bool
|
10524 |
|
|
mips_can_use_return_insn (void)
|
10525 |
|
|
{
|
10526 |
|
|
/* Interrupt handlers need to go through the epilogue. */
|
10527 |
|
|
if (cfun->machine->interrupt_handler_p)
|
10528 |
|
|
return false;
|
10529 |
|
|
|
10530 |
|
|
if (!reload_completed)
|
10531 |
|
|
return false;
|
10532 |
|
|
|
10533 |
|
|
if (crtl->profile)
|
10534 |
|
|
return false;
|
10535 |
|
|
|
10536 |
|
|
/* In MIPS16 mode, a function that returns a floating-point value
|
10537 |
|
|
needs to arrange to copy the return value into the floating-point
|
10538 |
|
|
registers. */
|
10539 |
|
|
if (mips16_cfun_returns_in_fpr_p ())
|
10540 |
|
|
return false;
|
10541 |
|
|
|
10542 |
|
|
return cfun->machine->frame.total_size == 0;
|
10543 |
|
|
}
|
10544 |
|
|
|
10545 |
|
|
/* Return true if register REGNO can store a value of mode MODE.
|
10546 |
|
|
The result of this function is cached in mips_hard_regno_mode_ok. */
|
10547 |
|
|
|
10548 |
|
|
static bool
|
10549 |
|
|
mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
|
10550 |
|
|
{
|
10551 |
|
|
unsigned int size;
|
10552 |
|
|
enum mode_class mclass;
|
10553 |
|
|
|
10554 |
|
|
if (mode == CCV2mode)
|
10555 |
|
|
return (ISA_HAS_8CC
|
10556 |
|
|
&& ST_REG_P (regno)
|
10557 |
|
|
&& (regno - ST_REG_FIRST) % 2 == 0);
|
10558 |
|
|
|
10559 |
|
|
if (mode == CCV4mode)
|
10560 |
|
|
return (ISA_HAS_8CC
|
10561 |
|
|
&& ST_REG_P (regno)
|
10562 |
|
|
&& (regno - ST_REG_FIRST) % 4 == 0);
|
10563 |
|
|
|
10564 |
|
|
if (mode == CCmode)
|
10565 |
|
|
{
|
10566 |
|
|
if (!ISA_HAS_8CC)
|
10567 |
|
|
return regno == FPSW_REGNUM;
|
10568 |
|
|
|
10569 |
|
|
return (ST_REG_P (regno)
|
10570 |
|
|
|| GP_REG_P (regno)
|
10571 |
|
|
|| FP_REG_P (regno));
|
10572 |
|
|
}
|
10573 |
|
|
|
10574 |
|
|
size = GET_MODE_SIZE (mode);
|
10575 |
|
|
mclass = GET_MODE_CLASS (mode);
|
10576 |
|
|
|
10577 |
|
|
if (GP_REG_P (regno))
|
10578 |
|
|
return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
|
10579 |
|
|
|
10580 |
|
|
if (FP_REG_P (regno)
|
10581 |
|
|
&& (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
|
10582 |
|
|
|| (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
|
10583 |
|
|
{
|
10584 |
|
|
/* Allow TFmode for CCmode reloads. */
|
10585 |
|
|
if (mode == TFmode && ISA_HAS_8CC)
|
10586 |
|
|
return true;
|
10587 |
|
|
|
10588 |
|
|
/* Allow 64-bit vector modes for Loongson-2E/2F. */
|
10589 |
|
|
if (TARGET_LOONGSON_VECTORS
|
10590 |
|
|
&& (mode == V2SImode
|
10591 |
|
|
|| mode == V4HImode
|
10592 |
|
|
|| mode == V8QImode
|
10593 |
|
|
|| mode == DImode))
|
10594 |
|
|
return true;
|
10595 |
|
|
|
10596 |
|
|
if (mclass == MODE_FLOAT
|
10597 |
|
|
|| mclass == MODE_COMPLEX_FLOAT
|
10598 |
|
|
|| mclass == MODE_VECTOR_FLOAT)
|
10599 |
|
|
return size <= UNITS_PER_FPVALUE;
|
10600 |
|
|
|
10601 |
|
|
/* Allow integer modes that fit into a single register. We need
|
10602 |
|
|
to put integers into FPRs when using instructions like CVT
|
10603 |
|
|
and TRUNC. There's no point allowing sizes smaller than a word,
|
10604 |
|
|
because the FPU has no appropriate load/store instructions. */
|
10605 |
|
|
if (mclass == MODE_INT)
|
10606 |
|
|
return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
|
10607 |
|
|
}
|
10608 |
|
|
|
10609 |
|
|
if (ACC_REG_P (regno)
|
10610 |
|
|
&& (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
|
10611 |
|
|
{
|
10612 |
|
|
if (MD_REG_P (regno))
|
10613 |
|
|
{
|
10614 |
|
|
/* After a multiplication or division, clobbering HI makes
|
10615 |
|
|
the value of LO unpredictable, and vice versa. This means
|
10616 |
|
|
that, for all interesting cases, HI and LO are effectively
|
10617 |
|
|
a single register.
|
10618 |
|
|
|
10619 |
|
|
We model this by requiring that any value that uses HI
|
10620 |
|
|
also uses LO. */
|
10621 |
|
|
if (size <= UNITS_PER_WORD * 2)
|
10622 |
|
|
return regno == (size <= UNITS_PER_WORD ? LO_REGNUM : MD_REG_FIRST);
|
10623 |
|
|
}
|
10624 |
|
|
else
|
10625 |
|
|
{
|
10626 |
|
|
/* DSP accumulators do not have the same restrictions as
|
10627 |
|
|
HI and LO, so we can treat them as normal doubleword
|
10628 |
|
|
registers. */
|
10629 |
|
|
if (size <= UNITS_PER_WORD)
|
10630 |
|
|
return true;
|
10631 |
|
|
|
10632 |
|
|
if (size <= UNITS_PER_WORD * 2
|
10633 |
|
|
&& ((regno - DSP_ACC_REG_FIRST) & 1) == 0)
|
10634 |
|
|
return true;
|
10635 |
|
|
}
|
10636 |
|
|
}
|
10637 |
|
|
|
10638 |
|
|
if (ALL_COP_REG_P (regno))
|
10639 |
|
|
return mclass == MODE_INT && size <= UNITS_PER_WORD;
|
10640 |
|
|
|
10641 |
|
|
if (regno == GOT_VERSION_REGNUM)
|
10642 |
|
|
return mode == SImode;
|
10643 |
|
|
|
10644 |
|
|
return false;
|
10645 |
|
|
}
|
10646 |
|
|
|
10647 |
|
|
/* Implement HARD_REGNO_NREGS. */
|
10648 |
|
|
|
10649 |
|
|
unsigned int
|
10650 |
|
|
mips_hard_regno_nregs (int regno, enum machine_mode mode)
|
10651 |
|
|
{
|
10652 |
|
|
if (ST_REG_P (regno))
|
10653 |
|
|
/* The size of FP status registers is always 4, because they only hold
|
10654 |
|
|
CCmode values, and CCmode is always considered to be 4 bytes wide. */
|
10655 |
|
|
return (GET_MODE_SIZE (mode) + 3) / 4;
|
10656 |
|
|
|
10657 |
|
|
if (FP_REG_P (regno))
|
10658 |
|
|
return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
|
10659 |
|
|
|
10660 |
|
|
/* All other registers are word-sized. */
|
10661 |
|
|
return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
|
10662 |
|
|
}
|
10663 |
|
|
|
10664 |
|
|
/* Implement CLASS_MAX_NREGS, taking the maximum of the cases
|
10665 |
|
|
in mips_hard_regno_nregs. */
|
10666 |
|
|
|
10667 |
|
|
int
|
10668 |
|
|
mips_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
|
10669 |
|
|
{
|
10670 |
|
|
int size;
|
10671 |
|
|
HARD_REG_SET left;
|
10672 |
|
|
|
10673 |
|
|
size = 0x8000;
|
10674 |
|
|
COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
|
10675 |
|
|
if (hard_reg_set_intersect_p (left, reg_class_contents[(int) ST_REGS]))
|
10676 |
|
|
{
|
10677 |
|
|
size = MIN (size, 4);
|
10678 |
|
|
AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) ST_REGS]);
|
10679 |
|
|
}
|
10680 |
|
|
if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
|
10681 |
|
|
{
|
10682 |
|
|
size = MIN (size, UNITS_PER_FPREG);
|
10683 |
|
|
AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
|
10684 |
|
|
}
|
10685 |
|
|
if (!hard_reg_set_empty_p (left))
|
10686 |
|
|
size = MIN (size, UNITS_PER_WORD);
|
10687 |
|
|
return (GET_MODE_SIZE (mode) + size - 1) / size;
|
10688 |
|
|
}
|
10689 |
|
|
|
10690 |
|
|
/* Implement CANNOT_CHANGE_MODE_CLASS. */
|
10691 |
|
|
|
10692 |
|
|
bool
|
10693 |
|
|
mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED,
|
10694 |
|
|
enum machine_mode to ATTRIBUTE_UNUSED,
|
10695 |
|
|
enum reg_class rclass)
|
10696 |
|
|
{
|
10697 |
|
|
/* There are several problems with changing the modes of values
|
10698 |
|
|
in floating-point registers:
|
10699 |
|
|
|
10700 |
|
|
- When a multi-word value is stored in paired floating-point
|
10701 |
|
|
registers, the first register always holds the low word.
|
10702 |
|
|
We therefore can't allow FPRs to change between single-word
|
10703 |
|
|
and multi-word modes on big-endian targets.
|
10704 |
|
|
|
10705 |
|
|
- GCC assumes that each word of a multiword register can be accessed
|
10706 |
|
|
individually using SUBREGs. This is not true for floating-point
|
10707 |
|
|
registers if they are bigger than a word.
|
10708 |
|
|
|
10709 |
|
|
- Loading a 32-bit value into a 64-bit floating-point register
|
10710 |
|
|
will not sign-extend the value, despite what LOAD_EXTEND_OP says.
|
10711 |
|
|
We can't allow FPRs to change from SImode to to a wider mode on
|
10712 |
|
|
64-bit targets.
|
10713 |
|
|
|
10714 |
|
|
- If the FPU has already interpreted a value in one format, we must
|
10715 |
|
|
not ask it to treat the value as having a different format.
|
10716 |
|
|
|
10717 |
|
|
We therefore disallow all mode changes involving FPRs. */
|
10718 |
|
|
return reg_classes_intersect_p (FP_REGS, rclass);
|
10719 |
|
|
}
|
10720 |
|
|
|
10721 |
|
|
/* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
|
10722 |
|
|
|
10723 |
|
|
static bool
|
10724 |
|
|
mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
|
10725 |
|
|
{
|
10726 |
|
|
switch (mode)
|
10727 |
|
|
{
|
10728 |
|
|
case SFmode:
|
10729 |
|
|
return TARGET_HARD_FLOAT;
|
10730 |
|
|
|
10731 |
|
|
case DFmode:
|
10732 |
|
|
return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
|
10733 |
|
|
|
10734 |
|
|
case V2SFmode:
|
10735 |
|
|
return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
|
10736 |
|
|
|
10737 |
|
|
default:
|
10738 |
|
|
return false;
|
10739 |
|
|
}
|
10740 |
|
|
}
|
10741 |
|
|
|
10742 |
|
|
/* Implement MODES_TIEABLE_P. */
|
10743 |
|
|
|
10744 |
|
|
bool
|
10745 |
|
|
mips_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
|
10746 |
|
|
{
|
10747 |
|
|
/* FPRs allow no mode punning, so it's not worth tying modes if we'd
|
10748 |
|
|
prefer to put one of them in FPRs. */
|
10749 |
|
|
return (mode1 == mode2
|
10750 |
|
|
|| (!mips_mode_ok_for_mov_fmt_p (mode1)
|
10751 |
|
|
&& !mips_mode_ok_for_mov_fmt_p (mode2)));
|
10752 |
|
|
}
|
10753 |
|
|
|
10754 |
|
|
/* Implement PREFERRED_RELOAD_CLASS. */
|
10755 |
|
|
|
10756 |
|
|
enum reg_class
|
10757 |
|
|
mips_preferred_reload_class (rtx x, enum reg_class rclass)
|
10758 |
|
|
{
|
10759 |
|
|
if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, rclass))
|
10760 |
|
|
return LEA_REGS;
|
10761 |
|
|
|
10762 |
|
|
if (reg_class_subset_p (FP_REGS, rclass)
|
10763 |
|
|
&& mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
|
10764 |
|
|
return FP_REGS;
|
10765 |
|
|
|
10766 |
|
|
if (reg_class_subset_p (GR_REGS, rclass))
|
10767 |
|
|
rclass = GR_REGS;
|
10768 |
|
|
|
10769 |
|
|
if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, rclass))
|
10770 |
|
|
rclass = M16_REGS;
|
10771 |
|
|
|
10772 |
|
|
return rclass;
|
10773 |
|
|
}
|
10774 |
|
|
|
10775 |
|
|
/* RCLASS is a class involved in a REGISTER_MOVE_COST calculation.
|
10776 |
|
|
Return a "canonical" class to represent it in later calculations. */
|
10777 |
|
|
|
10778 |
|
|
static enum reg_class
|
10779 |
|
|
mips_canonicalize_move_class (enum reg_class rclass)
|
10780 |
|
|
{
|
10781 |
|
|
/* All moves involving accumulator registers have the same cost. */
|
10782 |
|
|
if (reg_class_subset_p (rclass, ACC_REGS))
|
10783 |
|
|
rclass = ACC_REGS;
|
10784 |
|
|
|
10785 |
|
|
/* Likewise promote subclasses of general registers to the most
|
10786 |
|
|
interesting containing class. */
|
10787 |
|
|
if (TARGET_MIPS16 && reg_class_subset_p (rclass, M16_REGS))
|
10788 |
|
|
rclass = M16_REGS;
|
10789 |
|
|
else if (reg_class_subset_p (rclass, GENERAL_REGS))
|
10790 |
|
|
rclass = GENERAL_REGS;
|
10791 |
|
|
|
10792 |
|
|
return rclass;
|
10793 |
|
|
}
|
10794 |
|
|
|
10795 |
|
|
/* Return the cost of moving a value of mode MODE from a register of
|
10796 |
|
|
class FROM to a GPR. Return 0 for classes that are unions of other
|
10797 |
|
|
classes handled by this function. */
|
10798 |
|
|
|
10799 |
|
|
static int
|
10800 |
|
|
mips_move_to_gpr_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
|
10801 |
|
|
enum reg_class from)
|
10802 |
|
|
{
|
10803 |
|
|
switch (from)
|
10804 |
|
|
{
|
10805 |
|
|
case GENERAL_REGS:
|
10806 |
|
|
/* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
|
10807 |
|
|
return 2;
|
10808 |
|
|
|
10809 |
|
|
case ACC_REGS:
|
10810 |
|
|
/* MFLO and MFHI. */
|
10811 |
|
|
return 6;
|
10812 |
|
|
|
10813 |
|
|
case FP_REGS:
|
10814 |
|
|
/* MFC1, etc. */
|
10815 |
|
|
return 4;
|
10816 |
|
|
|
10817 |
|
|
case ST_REGS:
|
10818 |
|
|
/* LUI followed by MOVF. */
|
10819 |
|
|
return 4;
|
10820 |
|
|
|
10821 |
|
|
case COP0_REGS:
|
10822 |
|
|
case COP2_REGS:
|
10823 |
|
|
case COP3_REGS:
|
10824 |
|
|
/* This choice of value is historical. */
|
10825 |
|
|
return 5;
|
10826 |
|
|
|
10827 |
|
|
default:
|
10828 |
|
|
return 0;
|
10829 |
|
|
}
|
10830 |
|
|
}
|
10831 |
|
|
|
10832 |
|
|
/* Return the cost of moving a value of mode MODE from a GPR to a
|
10833 |
|
|
register of class TO. Return 0 for classes that are unions of
|
10834 |
|
|
other classes handled by this function. */
|
10835 |
|
|
|
10836 |
|
|
static int
|
10837 |
|
|
mips_move_from_gpr_cost (enum machine_mode mode, enum reg_class to)
|
10838 |
|
|
{
|
10839 |
|
|
switch (to)
|
10840 |
|
|
{
|
10841 |
|
|
case GENERAL_REGS:
|
10842 |
|
|
/* A MIPS16 MOVE instruction, or a non-MIPS16 MOVE macro. */
|
10843 |
|
|
return 2;
|
10844 |
|
|
|
10845 |
|
|
case ACC_REGS:
|
10846 |
|
|
/* MTLO and MTHI. */
|
10847 |
|
|
return 6;
|
10848 |
|
|
|
10849 |
|
|
case FP_REGS:
|
10850 |
|
|
/* MTC1, etc. */
|
10851 |
|
|
return 4;
|
10852 |
|
|
|
10853 |
|
|
case ST_REGS:
|
10854 |
|
|
/* A secondary reload through an FPR scratch. */
|
10855 |
|
|
return (mips_register_move_cost (mode, GENERAL_REGS, FP_REGS)
|
10856 |
|
|
+ mips_register_move_cost (mode, FP_REGS, ST_REGS));
|
10857 |
|
|
|
10858 |
|
|
case COP0_REGS:
|
10859 |
|
|
case COP2_REGS:
|
10860 |
|
|
case COP3_REGS:
|
10861 |
|
|
/* This choice of value is historical. */
|
10862 |
|
|
return 5;
|
10863 |
|
|
|
10864 |
|
|
default:
|
10865 |
|
|
return 0;
|
10866 |
|
|
}
|
10867 |
|
|
}
|
10868 |
|
|
|
10869 |
|
|
/* Implement REGISTER_MOVE_COST. Return 0 for classes that are the
|
10870 |
|
|
maximum of the move costs for subclasses; regclass will work out
|
10871 |
|
|
the maximum for us. */
|
10872 |
|
|
|
10873 |
|
|
int
|
10874 |
|
|
mips_register_move_cost (enum machine_mode mode,
|
10875 |
|
|
enum reg_class from, enum reg_class to)
|
10876 |
|
|
{
|
10877 |
|
|
enum reg_class dregs;
|
10878 |
|
|
int cost1, cost2;
|
10879 |
|
|
|
10880 |
|
|
from = mips_canonicalize_move_class (from);
|
10881 |
|
|
to = mips_canonicalize_move_class (to);
|
10882 |
|
|
|
10883 |
|
|
/* Handle moves that can be done without using general-purpose registers. */
|
10884 |
|
|
if (from == FP_REGS)
|
10885 |
|
|
{
|
10886 |
|
|
if (to == FP_REGS && mips_mode_ok_for_mov_fmt_p (mode))
|
10887 |
|
|
/* MOV.FMT. */
|
10888 |
|
|
return 4;
|
10889 |
|
|
if (to == ST_REGS)
|
10890 |
|
|
/* The sequence generated by mips_expand_fcc_reload. */
|
10891 |
|
|
return 8;
|
10892 |
|
|
}
|
10893 |
|
|
|
10894 |
|
|
/* Handle cases in which only one class deviates from the ideal. */
|
10895 |
|
|
dregs = TARGET_MIPS16 ? M16_REGS : GENERAL_REGS;
|
10896 |
|
|
if (from == dregs)
|
10897 |
|
|
return mips_move_from_gpr_cost (mode, to);
|
10898 |
|
|
if (to == dregs)
|
10899 |
|
|
return mips_move_to_gpr_cost (mode, from);
|
10900 |
|
|
|
10901 |
|
|
/* Handles cases that require a GPR temporary. */
|
10902 |
|
|
cost1 = mips_move_to_gpr_cost (mode, from);
|
10903 |
|
|
if (cost1 != 0)
|
10904 |
|
|
{
|
10905 |
|
|
cost2 = mips_move_from_gpr_cost (mode, to);
|
10906 |
|
|
if (cost2 != 0)
|
10907 |
|
|
return cost1 + cost2;
|
10908 |
|
|
}
|
10909 |
|
|
|
10910 |
|
|
return 0;
|
10911 |
|
|
}
|
10912 |
|
|
|
10913 |
|
|
/* Implement TARGET_IRA_COVER_CLASSES. */
|
10914 |
|
|
|
10915 |
|
|
static const enum reg_class *
|
10916 |
|
|
mips_ira_cover_classes (void)
|
10917 |
|
|
{
|
10918 |
|
|
static const enum reg_class acc_classes[] = {
|
10919 |
|
|
GR_AND_ACC_REGS, FP_REGS, COP0_REGS, COP2_REGS, COP3_REGS,
|
10920 |
|
|
ST_REGS, LIM_REG_CLASSES
|
10921 |
|
|
};
|
10922 |
|
|
static const enum reg_class no_acc_classes[] = {
|
10923 |
|
|
GR_REGS, FP_REGS, COP0_REGS, COP2_REGS, COP3_REGS,
|
10924 |
|
|
ST_REGS, LIM_REG_CLASSES
|
10925 |
|
|
};
|
10926 |
|
|
|
10927 |
|
|
/* Don't allow the register allocators to use LO and HI in MIPS16 mode,
|
10928 |
|
|
which has no MTLO or MTHI instructions. Also, using GR_AND_ACC_REGS
|
10929 |
|
|
as a cover class only works well when we keep per-register costs.
|
10930 |
|
|
Using it when not optimizing can cause us to think accumulators
|
10931 |
|
|
have the same cost as GPRs in cases where GPRs are actually much
|
10932 |
|
|
cheaper. */
|
10933 |
|
|
return TARGET_MIPS16 || !optimize ? no_acc_classes : acc_classes;
|
10934 |
|
|
}
|
10935 |
|
|
|
10936 |
|
|
/* Return the register class required for a secondary register when
|
10937 |
|
|
copying between one of the registers in RCLASS and value X, which
|
10938 |
|
|
has mode MODE. X is the source of the move if IN_P, otherwise it
|
10939 |
|
|
is the destination. Return NO_REGS if no secondary register is
|
10940 |
|
|
needed. */
|
10941 |
|
|
|
10942 |
|
|
enum reg_class
|
10943 |
|
|
mips_secondary_reload_class (enum reg_class rclass,
|
10944 |
|
|
enum machine_mode mode, rtx x, bool in_p)
|
10945 |
|
|
{
|
10946 |
|
|
int regno;
|
10947 |
|
|
|
10948 |
|
|
/* If X is a constant that cannot be loaded into $25, it must be loaded
|
10949 |
|
|
into some other GPR. No other register class allows a direct move. */
|
10950 |
|
|
if (mips_dangerous_for_la25_p (x))
|
10951 |
|
|
return reg_class_subset_p (rclass, LEA_REGS) ? NO_REGS : LEA_REGS;
|
10952 |
|
|
|
10953 |
|
|
regno = true_regnum (x);
|
10954 |
|
|
if (TARGET_MIPS16)
|
10955 |
|
|
{
|
10956 |
|
|
/* In MIPS16 mode, every move must involve a member of M16_REGS. */
|
10957 |
|
|
if (!reg_class_subset_p (rclass, M16_REGS) && !M16_REG_P (regno))
|
10958 |
|
|
return M16_REGS;
|
10959 |
|
|
|
10960 |
|
|
return NO_REGS;
|
10961 |
|
|
}
|
10962 |
|
|
|
10963 |
|
|
/* Copying from accumulator registers to anywhere other than a general
|
10964 |
|
|
register requires a temporary general register. */
|
10965 |
|
|
if (reg_class_subset_p (rclass, ACC_REGS))
|
10966 |
|
|
return GP_REG_P (regno) ? NO_REGS : GR_REGS;
|
10967 |
|
|
if (ACC_REG_P (regno))
|
10968 |
|
|
return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
|
10969 |
|
|
|
10970 |
|
|
/* We can only copy a value to a condition code register from a
|
10971 |
|
|
floating-point register, and even then we require a scratch
|
10972 |
|
|
floating-point register. We can only copy a value out of a
|
10973 |
|
|
condition-code register into a general register. */
|
10974 |
|
|
if (reg_class_subset_p (rclass, ST_REGS))
|
10975 |
|
|
{
|
10976 |
|
|
if (in_p)
|
10977 |
|
|
return FP_REGS;
|
10978 |
|
|
return GP_REG_P (regno) ? NO_REGS : GR_REGS;
|
10979 |
|
|
}
|
10980 |
|
|
if (ST_REG_P (regno))
|
10981 |
|
|
{
|
10982 |
|
|
if (!in_p)
|
10983 |
|
|
return FP_REGS;
|
10984 |
|
|
return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
|
10985 |
|
|
}
|
10986 |
|
|
|
10987 |
|
|
if (reg_class_subset_p (rclass, FP_REGS))
|
10988 |
|
|
{
|
10989 |
|
|
if (MEM_P (x)
|
10990 |
|
|
&& (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
|
10991 |
|
|
/* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
|
10992 |
|
|
pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
|
10993 |
|
|
return NO_REGS;
|
10994 |
|
|
|
10995 |
|
|
if (GP_REG_P (regno) || x == CONST0_RTX (mode))
|
10996 |
|
|
/* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
|
10997 |
|
|
return NO_REGS;
|
10998 |
|
|
|
10999 |
|
|
if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (x))
|
11000 |
|
|
/* We can force the constant to memory and use lwc1
|
11001 |
|
|
and ldc1. As above, we will use pairs of lwc1s if
|
11002 |
|
|
ldc1 is not supported. */
|
11003 |
|
|
return NO_REGS;
|
11004 |
|
|
|
11005 |
|
|
if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
|
11006 |
|
|
/* In this case we can use mov.fmt. */
|
11007 |
|
|
return NO_REGS;
|
11008 |
|
|
|
11009 |
|
|
/* Otherwise, we need to reload through an integer register. */
|
11010 |
|
|
return GR_REGS;
|
11011 |
|
|
}
|
11012 |
|
|
if (FP_REG_P (regno))
|
11013 |
|
|
return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
|
11014 |
|
|
|
11015 |
|
|
return NO_REGS;
|
11016 |
|
|
}
|
11017 |
|
|
|
11018 |
|
|
/* Implement TARGET_MODE_REP_EXTENDED. */
|
11019 |
|
|
|
11020 |
|
|
static int
|
11021 |
|
|
mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
|
11022 |
|
|
{
|
11023 |
|
|
/* On 64-bit targets, SImode register values are sign-extended to DImode. */
|
11024 |
|
|
if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
|
11025 |
|
|
return SIGN_EXTEND;
|
11026 |
|
|
|
11027 |
|
|
return UNKNOWN;
|
11028 |
|
|
}
|
11029 |
|
|
|
11030 |
|
|
/* Implement TARGET_VALID_POINTER_MODE. */
|
11031 |
|
|
|
11032 |
|
|
static bool
|
11033 |
|
|
mips_valid_pointer_mode (enum machine_mode mode)
|
11034 |
|
|
{
|
11035 |
|
|
return mode == SImode || (TARGET_64BIT && mode == DImode);
|
11036 |
|
|
}
|
11037 |
|
|
|
11038 |
|
|
/* Implement TARGET_VECTOR_MODE_SUPPORTED_P. */
|
11039 |
|
|
|
11040 |
|
|
static bool
|
11041 |
|
|
mips_vector_mode_supported_p (enum machine_mode mode)
|
11042 |
|
|
{
|
11043 |
|
|
switch (mode)
|
11044 |
|
|
{
|
11045 |
|
|
case V2SFmode:
|
11046 |
|
|
return TARGET_PAIRED_SINGLE_FLOAT;
|
11047 |
|
|
|
11048 |
|
|
case V2HImode:
|
11049 |
|
|
case V4QImode:
|
11050 |
|
|
case V2HQmode:
|
11051 |
|
|
case V2UHQmode:
|
11052 |
|
|
case V2HAmode:
|
11053 |
|
|
case V2UHAmode:
|
11054 |
|
|
case V4QQmode:
|
11055 |
|
|
case V4UQQmode:
|
11056 |
|
|
return TARGET_DSP;
|
11057 |
|
|
|
11058 |
|
|
case V2SImode:
|
11059 |
|
|
case V4HImode:
|
11060 |
|
|
case V8QImode:
|
11061 |
|
|
return TARGET_LOONGSON_VECTORS;
|
11062 |
|
|
|
11063 |
|
|
default:
|
11064 |
|
|
return false;
|
11065 |
|
|
}
|
11066 |
|
|
}
|
11067 |
|
|
|
11068 |
|
|
/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
|
11069 |
|
|
|
11070 |
|
|
static bool
|
11071 |
|
|
mips_scalar_mode_supported_p (enum machine_mode mode)
|
11072 |
|
|
{
|
11073 |
|
|
if (ALL_FIXED_POINT_MODE_P (mode)
|
11074 |
|
|
&& GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
|
11075 |
|
|
return true;
|
11076 |
|
|
|
11077 |
|
|
return default_scalar_mode_supported_p (mode);
|
11078 |
|
|
}
|
11079 |
|
|
|
11080 |
|
|
/* Implement TARGET_INIT_LIBFUNCS. */
|
11081 |
|
|
|
11082 |
|
|
#include "config/gofast.h"
|
11083 |
|
|
|
11084 |
|
|
static void
|
11085 |
|
|
mips_init_libfuncs (void)
|
11086 |
|
|
{
|
11087 |
|
|
if (TARGET_FIX_VR4120)
|
11088 |
|
|
{
|
11089 |
|
|
/* Register the special divsi3 and modsi3 functions needed to work
|
11090 |
|
|
around VR4120 division errata. */
|
11091 |
|
|
set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
|
11092 |
|
|
set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
|
11093 |
|
|
}
|
11094 |
|
|
|
11095 |
|
|
if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
|
11096 |
|
|
{
|
11097 |
|
|
/* Register the MIPS16 -mhard-float stubs. */
|
11098 |
|
|
set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
|
11099 |
|
|
set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
|
11100 |
|
|
set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
|
11101 |
|
|
set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
|
11102 |
|
|
|
11103 |
|
|
set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
|
11104 |
|
|
set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
|
11105 |
|
|
set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
|
11106 |
|
|
set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
|
11107 |
|
|
set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
|
11108 |
|
|
set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
|
11109 |
|
|
set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
|
11110 |
|
|
|
11111 |
|
|
set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
|
11112 |
|
|
set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
|
11113 |
|
|
set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
|
11114 |
|
|
|
11115 |
|
|
if (TARGET_DOUBLE_FLOAT)
|
11116 |
|
|
{
|
11117 |
|
|
set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
|
11118 |
|
|
set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
|
11119 |
|
|
set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
|
11120 |
|
|
set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
|
11121 |
|
|
|
11122 |
|
|
set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
|
11123 |
|
|
set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
|
11124 |
|
|
set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
|
11125 |
|
|
set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
|
11126 |
|
|
set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
|
11127 |
|
|
set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
|
11128 |
|
|
set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
|
11129 |
|
|
|
11130 |
|
|
set_conv_libfunc (sext_optab, DFmode, SFmode,
|
11131 |
|
|
"__mips16_extendsfdf2");
|
11132 |
|
|
set_conv_libfunc (trunc_optab, SFmode, DFmode,
|
11133 |
|
|
"__mips16_truncdfsf2");
|
11134 |
|
|
set_conv_libfunc (sfix_optab, SImode, DFmode,
|
11135 |
|
|
"__mips16_fix_truncdfsi");
|
11136 |
|
|
set_conv_libfunc (sfloat_optab, DFmode, SImode,
|
11137 |
|
|
"__mips16_floatsidf");
|
11138 |
|
|
set_conv_libfunc (ufloat_optab, DFmode, SImode,
|
11139 |
|
|
"__mips16_floatunsidf");
|
11140 |
|
|
}
|
11141 |
|
|
}
|
11142 |
|
|
else
|
11143 |
|
|
/* Register the gofast functions if selected using --enable-gofast. */
|
11144 |
|
|
gofast_maybe_init_libfuncs ();
|
11145 |
|
|
|
11146 |
|
|
/* The MIPS16 ISA does not have an encoding for "sync", so we rely
|
11147 |
|
|
on an external non-MIPS16 routine to implement __sync_synchronize. */
|
11148 |
|
|
if (TARGET_MIPS16)
|
11149 |
|
|
synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
|
11150 |
|
|
}
|
11151 |
|
|
|
11152 |
|
|
/* Build up a multi-insn sequence that loads label TARGET into $AT. */
|
11153 |
|
|
|
11154 |
|
|
static void
|
11155 |
|
|
mips_process_load_label (rtx target)
|
11156 |
|
|
{
|
11157 |
|
|
rtx base, gp, intop;
|
11158 |
|
|
HOST_WIDE_INT offset;
|
11159 |
|
|
|
11160 |
|
|
mips_multi_start ();
|
11161 |
|
|
switch (mips_abi)
|
11162 |
|
|
{
|
11163 |
|
|
case ABI_N32:
|
11164 |
|
|
mips_multi_add_insn ("lw\t%@,%%got_page(%0)(%+)", target, 0);
|
11165 |
|
|
mips_multi_add_insn ("addiu\t%@,%@,%%got_ofst(%0)", target, 0);
|
11166 |
|
|
break;
|
11167 |
|
|
|
11168 |
|
|
case ABI_64:
|
11169 |
|
|
mips_multi_add_insn ("ld\t%@,%%got_page(%0)(%+)", target, 0);
|
11170 |
|
|
mips_multi_add_insn ("daddiu\t%@,%@,%%got_ofst(%0)", target, 0);
|
11171 |
|
|
break;
|
11172 |
|
|
|
11173 |
|
|
default:
|
11174 |
|
|
gp = pic_offset_table_rtx;
|
11175 |
|
|
if (mips_cfun_has_cprestore_slot_p ())
|
11176 |
|
|
{
|
11177 |
|
|
gp = gen_rtx_REG (Pmode, AT_REGNUM);
|
11178 |
|
|
mips_get_cprestore_base_and_offset (&base, &offset, true);
|
11179 |
|
|
if (!SMALL_OPERAND (offset))
|
11180 |
|
|
{
|
11181 |
|
|
intop = GEN_INT (CONST_HIGH_PART (offset));
|
11182 |
|
|
mips_multi_add_insn ("lui\t%0,%1", gp, intop, 0);
|
11183 |
|
|
mips_multi_add_insn ("addu\t%0,%0,%1", gp, base, 0);
|
11184 |
|
|
|
11185 |
|
|
base = gp;
|
11186 |
|
|
offset = CONST_LOW_PART (offset);
|
11187 |
|
|
}
|
11188 |
|
|
intop = GEN_INT (offset);
|
11189 |
|
|
if (ISA_HAS_LOAD_DELAY)
|
11190 |
|
|
mips_multi_add_insn ("lw\t%0,%1(%2)%#", gp, intop, base, 0);
|
11191 |
|
|
else
|
11192 |
|
|
mips_multi_add_insn ("lw\t%0,%1(%2)", gp, intop, base, 0);
|
11193 |
|
|
}
|
11194 |
|
|
if (ISA_HAS_LOAD_DELAY)
|
11195 |
|
|
mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)%#", target, gp, 0);
|
11196 |
|
|
else
|
11197 |
|
|
mips_multi_add_insn ("lw\t%@,%%got(%0)(%1)", target, gp, 0);
|
11198 |
|
|
mips_multi_add_insn ("addiu\t%@,%@,%%lo(%0)", target, 0);
|
11199 |
|
|
break;
|
11200 |
|
|
}
|
11201 |
|
|
}
|
11202 |
|
|
|
11203 |
|
|
/* Return the number of instructions needed to load a label into $AT. */
|
11204 |
|
|
|
11205 |
|
|
static unsigned int
|
11206 |
|
|
mips_load_label_length (void)
|
11207 |
|
|
{
|
11208 |
|
|
if (cfun->machine->load_label_length == 0)
|
11209 |
|
|
{
|
11210 |
|
|
mips_process_load_label (pc_rtx);
|
11211 |
|
|
cfun->machine->load_label_length = mips_multi_num_insns;
|
11212 |
|
|
}
|
11213 |
|
|
return cfun->machine->load_label_length;
|
11214 |
|
|
}
|
11215 |
|
|
|
11216 |
|
|
/* Emit an asm sequence to start a noat block and load the address
|
11217 |
|
|
of a label into $1. */
|
11218 |
|
|
|
11219 |
|
|
void
|
11220 |
|
|
mips_output_load_label (rtx target)
|
11221 |
|
|
{
|
11222 |
|
|
mips_push_asm_switch (&mips_noat);
|
11223 |
|
|
if (TARGET_EXPLICIT_RELOCS)
|
11224 |
|
|
{
|
11225 |
|
|
mips_process_load_label (target);
|
11226 |
|
|
mips_multi_write ();
|
11227 |
|
|
}
|
11228 |
|
|
else
|
11229 |
|
|
{
|
11230 |
|
|
if (Pmode == DImode)
|
11231 |
|
|
output_asm_insn ("dla\t%@,%0", &target);
|
11232 |
|
|
else
|
11233 |
|
|
output_asm_insn ("la\t%@,%0", &target);
|
11234 |
|
|
}
|
11235 |
|
|
}
|
11236 |
|
|
|
11237 |
|
|
/* Return the length of INSN. LENGTH is the initial length computed by
|
11238 |
|
|
attributes in the machine-description file. */
|
11239 |
|
|
|
11240 |
|
|
int
|
11241 |
|
|
mips_adjust_insn_length (rtx insn, int length)
|
11242 |
|
|
{
|
11243 |
|
|
/* mips.md uses MAX_PIC_BRANCH_LENGTH as a placeholder for the length
|
11244 |
|
|
of a PIC long-branch sequence. Substitute the correct value. */
|
11245 |
|
|
if (length == MAX_PIC_BRANCH_LENGTH
|
11246 |
|
|
&& INSN_CODE (insn) >= 0
|
11247 |
|
|
&& get_attr_type (insn) == TYPE_BRANCH)
|
11248 |
|
|
{
|
11249 |
|
|
/* Add the branch-over instruction and its delay slot, if this
|
11250 |
|
|
is a conditional branch. */
|
11251 |
|
|
length = simplejump_p (insn) ? 0 : 8;
|
11252 |
|
|
|
11253 |
|
|
/* Load the label into $AT and jump to it. Ignore the delay
|
11254 |
|
|
slot of the jump. */
|
11255 |
|
|
length += mips_load_label_length () + 4;
|
11256 |
|
|
}
|
11257 |
|
|
|
11258 |
|
|
/* A unconditional jump has an unfilled delay slot if it is not part
|
11259 |
|
|
of a sequence. A conditional jump normally has a delay slot, but
|
11260 |
|
|
does not on MIPS16. */
|
11261 |
|
|
if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
|
11262 |
|
|
length += 4;
|
11263 |
|
|
|
11264 |
|
|
/* See how many nops might be needed to avoid hardware hazards. */
|
11265 |
|
|
if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
|
11266 |
|
|
switch (get_attr_hazard (insn))
|
11267 |
|
|
{
|
11268 |
|
|
case HAZARD_NONE:
|
11269 |
|
|
break;
|
11270 |
|
|
|
11271 |
|
|
case HAZARD_DELAY:
|
11272 |
|
|
length += 4;
|
11273 |
|
|
break;
|
11274 |
|
|
|
11275 |
|
|
case HAZARD_HILO:
|
11276 |
|
|
length += 8;
|
11277 |
|
|
break;
|
11278 |
|
|
}
|
11279 |
|
|
|
11280 |
|
|
/* In order to make it easier to share MIPS16 and non-MIPS16 patterns,
|
11281 |
|
|
the .md file length attributes are 4-based for both modes.
|
11282 |
|
|
Adjust the MIPS16 ones here. */
|
11283 |
|
|
if (TARGET_MIPS16)
|
11284 |
|
|
length /= 2;
|
11285 |
|
|
|
11286 |
|
|
return length;
|
11287 |
|
|
}
|
11288 |
|
|
|
11289 |
|
|
/* Return the assembly code for INSN, which has the operands given by
|
11290 |
|
|
OPERANDS, and which branches to OPERANDS[0] if some condition is true.
|
11291 |
|
|
BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[0]
|
11292 |
|
|
is in range of a direct branch. BRANCH_IF_FALSE is an inverted
|
11293 |
|
|
version of BRANCH_IF_TRUE. */
|
11294 |
|
|
|
11295 |
|
|
const char *
|
11296 |
|
|
mips_output_conditional_branch (rtx insn, rtx *operands,
|
11297 |
|
|
const char *branch_if_true,
|
11298 |
|
|
const char *branch_if_false)
|
11299 |
|
|
{
|
11300 |
|
|
unsigned int length;
|
11301 |
|
|
rtx taken, not_taken;
|
11302 |
|
|
|
11303 |
|
|
gcc_assert (LABEL_P (operands[0]));
|
11304 |
|
|
|
11305 |
|
|
length = get_attr_length (insn);
|
11306 |
|
|
if (length <= 8)
|
11307 |
|
|
{
|
11308 |
|
|
/* Just a simple conditional branch. */
|
11309 |
|
|
mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
|
11310 |
|
|
return branch_if_true;
|
11311 |
|
|
}
|
11312 |
|
|
|
11313 |
|
|
/* Generate a reversed branch around a direct jump. This fallback does
|
11314 |
|
|
not use branch-likely instructions. */
|
11315 |
|
|
mips_branch_likely = false;
|
11316 |
|
|
not_taken = gen_label_rtx ();
|
11317 |
|
|
taken = operands[0];
|
11318 |
|
|
|
11319 |
|
|
/* Generate the reversed branch to NOT_TAKEN. */
|
11320 |
|
|
operands[0] = not_taken;
|
11321 |
|
|
output_asm_insn (branch_if_false, operands);
|
11322 |
|
|
|
11323 |
|
|
/* If INSN has a delay slot, we must provide delay slots for both the
|
11324 |
|
|
branch to NOT_TAKEN and the conditional jump. We must also ensure
|
11325 |
|
|
that INSN's delay slot is executed in the appropriate cases. */
|
11326 |
|
|
if (final_sequence)
|
11327 |
|
|
{
|
11328 |
|
|
/* This first delay slot will always be executed, so use INSN's
|
11329 |
|
|
delay slot if is not annulled. */
|
11330 |
|
|
if (!INSN_ANNULLED_BRANCH_P (insn))
|
11331 |
|
|
{
|
11332 |
|
|
final_scan_insn (XVECEXP (final_sequence, 0, 1),
|
11333 |
|
|
asm_out_file, optimize, 1, NULL);
|
11334 |
|
|
INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
|
11335 |
|
|
}
|
11336 |
|
|
else
|
11337 |
|
|
output_asm_insn ("nop", 0);
|
11338 |
|
|
fprintf (asm_out_file, "\n");
|
11339 |
|
|
}
|
11340 |
|
|
|
11341 |
|
|
/* Output the unconditional branch to TAKEN. */
|
11342 |
|
|
if (TARGET_ABSOLUTE_JUMPS)
|
11343 |
|
|
output_asm_insn (MIPS_ABSOLUTE_JUMP ("j\t%0%/"), &taken);
|
11344 |
|
|
else
|
11345 |
|
|
{
|
11346 |
|
|
mips_output_load_label (taken);
|
11347 |
|
|
output_asm_insn ("jr\t%@%]%/", 0);
|
11348 |
|
|
}
|
11349 |
|
|
|
11350 |
|
|
/* Now deal with its delay slot; see above. */
|
11351 |
|
|
if (final_sequence)
|
11352 |
|
|
{
|
11353 |
|
|
/* This delay slot will only be executed if the branch is taken.
|
11354 |
|
|
Use INSN's delay slot if is annulled. */
|
11355 |
|
|
if (INSN_ANNULLED_BRANCH_P (insn))
|
11356 |
|
|
{
|
11357 |
|
|
final_scan_insn (XVECEXP (final_sequence, 0, 1),
|
11358 |
|
|
asm_out_file, optimize, 1, NULL);
|
11359 |
|
|
INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
|
11360 |
|
|
}
|
11361 |
|
|
else
|
11362 |
|
|
output_asm_insn ("nop", 0);
|
11363 |
|
|
fprintf (asm_out_file, "\n");
|
11364 |
|
|
}
|
11365 |
|
|
|
11366 |
|
|
/* Output NOT_TAKEN. */
|
11367 |
|
|
targetm.asm_out.internal_label (asm_out_file, "L",
|
11368 |
|
|
CODE_LABEL_NUMBER (not_taken));
|
11369 |
|
|
return "";
|
11370 |
|
|
}
|
11371 |
|
|
|
11372 |
|
|
/* Return the assembly code for INSN, which branches to OPERANDS[0]
|
11373 |
|
|
if some ordering condition is true. The condition is given by
|
11374 |
|
|
OPERANDS[1] if !INVERTED_P, otherwise it is the inverse of
|
11375 |
|
|
OPERANDS[1]. OPERANDS[2] is the comparison's first operand;
|
11376 |
|
|
its second is always zero. */
|
11377 |
|
|
|
11378 |
|
|
const char *
|
11379 |
|
|
mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
|
11380 |
|
|
{
|
11381 |
|
|
const char *branch[2];
|
11382 |
|
|
|
11383 |
|
|
/* Make BRANCH[1] branch to OPERANDS[0] when the condition is true.
|
11384 |
|
|
Make BRANCH[0] branch on the inverse condition. */
|
11385 |
|
|
switch (GET_CODE (operands[1]))
|
11386 |
|
|
{
|
11387 |
|
|
/* These cases are equivalent to comparisons against zero. */
|
11388 |
|
|
case LEU:
|
11389 |
|
|
inverted_p = !inverted_p;
|
11390 |
|
|
/* Fall through. */
|
11391 |
|
|
case GTU:
|
11392 |
|
|
branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%0");
|
11393 |
|
|
branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%0");
|
11394 |
|
|
break;
|
11395 |
|
|
|
11396 |
|
|
/* These cases are always true or always false. */
|
11397 |
|
|
case LTU:
|
11398 |
|
|
inverted_p = !inverted_p;
|
11399 |
|
|
/* Fall through. */
|
11400 |
|
|
case GEU:
|
11401 |
|
|
branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%0");
|
11402 |
|
|
branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%0");
|
11403 |
|
|
break;
|
11404 |
|
|
|
11405 |
|
|
default:
|
11406 |
|
|
branch[!inverted_p] = MIPS_BRANCH ("b%C1z", "%2,%0");
|
11407 |
|
|
branch[inverted_p] = MIPS_BRANCH ("b%N1z", "%2,%0");
|
11408 |
|
|
break;
|
11409 |
|
|
}
|
11410 |
|
|
return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
|
11411 |
|
|
}
|
11412 |
|
|
|
11413 |
|
|
/* Start a block of code that needs access to the LL, SC and SYNC
|
11414 |
|
|
instructions. */
|
11415 |
|
|
|
11416 |
|
|
static void
|
11417 |
|
|
mips_start_ll_sc_sync_block (void)
|
11418 |
|
|
{
|
11419 |
|
|
if (!ISA_HAS_LL_SC)
|
11420 |
|
|
{
|
11421 |
|
|
output_asm_insn (".set\tpush", 0);
|
11422 |
|
|
output_asm_insn (".set\tmips2", 0);
|
11423 |
|
|
}
|
11424 |
|
|
}
|
11425 |
|
|
|
11426 |
|
|
/* End a block started by mips_start_ll_sc_sync_block. */
|
11427 |
|
|
|
11428 |
|
|
static void
|
11429 |
|
|
mips_end_ll_sc_sync_block (void)
|
11430 |
|
|
{
|
11431 |
|
|
if (!ISA_HAS_LL_SC)
|
11432 |
|
|
output_asm_insn (".set\tpop", 0);
|
11433 |
|
|
}
|
11434 |
|
|
|
11435 |
|
|
/* Output and/or return the asm template for a sync instruction. */
|
11436 |
|
|
|
11437 |
|
|
const char *
|
11438 |
|
|
mips_output_sync (void)
|
11439 |
|
|
{
|
11440 |
|
|
mips_start_ll_sc_sync_block ();
|
11441 |
|
|
output_asm_insn ("sync", 0);
|
11442 |
|
|
mips_end_ll_sc_sync_block ();
|
11443 |
|
|
return "";
|
11444 |
|
|
}
|
11445 |
|
|
|
11446 |
|
|
/* Return the asm template associated with sync_insn1 value TYPE.
|
11447 |
|
|
IS_64BIT_P is true if we want a 64-bit rather than 32-bit operation. */
|
11448 |
|
|
|
11449 |
|
|
static const char *
|
11450 |
|
|
mips_sync_insn1_template (enum attr_sync_insn1 type, bool is_64bit_p)
|
11451 |
|
|
{
|
11452 |
|
|
switch (type)
|
11453 |
|
|
{
|
11454 |
|
|
case SYNC_INSN1_MOVE:
|
11455 |
|
|
return "move\t%0,%z2";
|
11456 |
|
|
case SYNC_INSN1_LI:
|
11457 |
|
|
return "li\t%0,%2";
|
11458 |
|
|
case SYNC_INSN1_ADDU:
|
11459 |
|
|
return is_64bit_p ? "daddu\t%0,%1,%z2" : "addu\t%0,%1,%z2";
|
11460 |
|
|
case SYNC_INSN1_ADDIU:
|
11461 |
|
|
return is_64bit_p ? "daddiu\t%0,%1,%2" : "addiu\t%0,%1,%2";
|
11462 |
|
|
case SYNC_INSN1_SUBU:
|
11463 |
|
|
return is_64bit_p ? "dsubu\t%0,%1,%z2" : "subu\t%0,%1,%z2";
|
11464 |
|
|
case SYNC_INSN1_AND:
|
11465 |
|
|
return "and\t%0,%1,%z2";
|
11466 |
|
|
case SYNC_INSN1_ANDI:
|
11467 |
|
|
return "andi\t%0,%1,%2";
|
11468 |
|
|
case SYNC_INSN1_OR:
|
11469 |
|
|
return "or\t%0,%1,%z2";
|
11470 |
|
|
case SYNC_INSN1_ORI:
|
11471 |
|
|
return "ori\t%0,%1,%2";
|
11472 |
|
|
case SYNC_INSN1_XOR:
|
11473 |
|
|
return "xor\t%0,%1,%z2";
|
11474 |
|
|
case SYNC_INSN1_XORI:
|
11475 |
|
|
return "xori\t%0,%1,%2";
|
11476 |
|
|
}
|
11477 |
|
|
gcc_unreachable ();
|
11478 |
|
|
}
|
11479 |
|
|
|
11480 |
|
|
/* Return the asm template associated with sync_insn2 value TYPE. */
|
11481 |
|
|
|
11482 |
|
|
static const char *
|
11483 |
|
|
mips_sync_insn2_template (enum attr_sync_insn2 type)
|
11484 |
|
|
{
|
11485 |
|
|
switch (type)
|
11486 |
|
|
{
|
11487 |
|
|
case SYNC_INSN2_NOP:
|
11488 |
|
|
gcc_unreachable ();
|
11489 |
|
|
case SYNC_INSN2_AND:
|
11490 |
|
|
return "and\t%0,%1,%z2";
|
11491 |
|
|
case SYNC_INSN2_XOR:
|
11492 |
|
|
return "xor\t%0,%1,%z2";
|
11493 |
|
|
case SYNC_INSN2_NOT:
|
11494 |
|
|
return "nor\t%0,%1,%.";
|
11495 |
|
|
}
|
11496 |
|
|
gcc_unreachable ();
|
11497 |
|
|
}
|
11498 |
|
|
|
11499 |
|
|
/* OPERANDS are the operands to a sync loop instruction and INDEX is
|
11500 |
|
|
the value of the one of the sync_* attributes. Return the operand
|
11501 |
|
|
referred to by the attribute, or DEFAULT_VALUE if the insn doesn't
|
11502 |
|
|
have the associated attribute. */
|
11503 |
|
|
|
11504 |
|
|
static rtx
|
11505 |
|
|
mips_get_sync_operand (rtx *operands, int index, rtx default_value)
|
11506 |
|
|
{
|
11507 |
|
|
if (index > 0)
|
11508 |
|
|
default_value = operands[index - 1];
|
11509 |
|
|
return default_value;
|
11510 |
|
|
}
|
11511 |
|
|
|
11512 |
|
|
/* INSN is a sync loop with operands OPERANDS. Build up a multi-insn
|
11513 |
|
|
sequence for it. */
|
11514 |
|
|
|
11515 |
|
|
static void
|
11516 |
|
|
mips_process_sync_loop (rtx insn, rtx *operands)
|
11517 |
|
|
{
|
11518 |
|
|
rtx at, mem, oldval, newval, inclusive_mask, exclusive_mask;
|
11519 |
|
|
rtx required_oldval, insn1_op2, tmp1, tmp2, tmp3;
|
11520 |
|
|
unsigned int tmp3_insn;
|
11521 |
|
|
enum attr_sync_insn1 insn1;
|
11522 |
|
|
enum attr_sync_insn2 insn2;
|
11523 |
|
|
bool is_64bit_p;
|
11524 |
|
|
|
11525 |
|
|
/* Read an operand from the sync_WHAT attribute and store it in
|
11526 |
|
|
variable WHAT. DEFAULT is the default value if no attribute
|
11527 |
|
|
is specified. */
|
11528 |
|
|
#define READ_OPERAND(WHAT, DEFAULT) \
|
11529 |
|
|
WHAT = mips_get_sync_operand (operands, (int) get_attr_sync_##WHAT (insn), \
|
11530 |
|
|
DEFAULT)
|
11531 |
|
|
|
11532 |
|
|
/* Read the memory. */
|
11533 |
|
|
READ_OPERAND (mem, 0);
|
11534 |
|
|
gcc_assert (mem);
|
11535 |
|
|
is_64bit_p = (GET_MODE_BITSIZE (GET_MODE (mem)) == 64);
|
11536 |
|
|
|
11537 |
|
|
/* Read the other attributes. */
|
11538 |
|
|
at = gen_rtx_REG (GET_MODE (mem), AT_REGNUM);
|
11539 |
|
|
READ_OPERAND (oldval, at);
|
11540 |
|
|
READ_OPERAND (newval, at);
|
11541 |
|
|
READ_OPERAND (inclusive_mask, 0);
|
11542 |
|
|
READ_OPERAND (exclusive_mask, 0);
|
11543 |
|
|
READ_OPERAND (required_oldval, 0);
|
11544 |
|
|
READ_OPERAND (insn1_op2, 0);
|
11545 |
|
|
insn1 = get_attr_sync_insn1 (insn);
|
11546 |
|
|
insn2 = get_attr_sync_insn2 (insn);
|
11547 |
|
|
|
11548 |
|
|
mips_multi_start ();
|
11549 |
|
|
|
11550 |
|
|
/* Output the release side of the memory barrier. */
|
11551 |
|
|
if (get_attr_sync_release_barrier (insn) == SYNC_RELEASE_BARRIER_YES)
|
11552 |
|
|
{
|
11553 |
|
|
if (required_oldval == 0 && TARGET_OCTEON)
|
11554 |
|
|
{
|
11555 |
|
|
/* Octeon doesn't reorder reads, so a full barrier can be
|
11556 |
|
|
created by using SYNCW to order writes combined with the
|
11557 |
|
|
write from the following SC. When the SC successfully
|
11558 |
|
|
completes, we know that all preceding writes are also
|
11559 |
|
|
committed to the coherent memory system. It is possible
|
11560 |
|
|
for a single SYNCW to fail, but a pair of them will never
|
11561 |
|
|
fail, so we use two. */
|
11562 |
|
|
mips_multi_add_insn ("syncw", NULL);
|
11563 |
|
|
mips_multi_add_insn ("syncw", NULL);
|
11564 |
|
|
}
|
11565 |
|
|
else
|
11566 |
|
|
mips_multi_add_insn ("sync", NULL);
|
11567 |
|
|
}
|
11568 |
|
|
|
11569 |
|
|
/* Output the branch-back label. */
|
11570 |
|
|
mips_multi_add_label ("1:");
|
11571 |
|
|
|
11572 |
|
|
/* OLDVAL = *MEM. */
|
11573 |
|
|
mips_multi_add_insn (is_64bit_p ? "lld\t%0,%1" : "ll\t%0,%1",
|
11574 |
|
|
oldval, mem, NULL);
|
11575 |
|
|
|
11576 |
|
|
/* if ((OLDVAL & INCLUSIVE_MASK) != REQUIRED_OLDVAL) goto 2. */
|
11577 |
|
|
if (required_oldval)
|
11578 |
|
|
{
|
11579 |
|
|
if (inclusive_mask == 0)
|
11580 |
|
|
tmp1 = oldval;
|
11581 |
|
|
else
|
11582 |
|
|
{
|
11583 |
|
|
gcc_assert (oldval != at);
|
11584 |
|
|
mips_multi_add_insn ("and\t%0,%1,%2",
|
11585 |
|
|
at, oldval, inclusive_mask, NULL);
|
11586 |
|
|
tmp1 = at;
|
11587 |
|
|
}
|
11588 |
|
|
mips_multi_add_insn ("bne\t%0,%z1,2f", tmp1, required_oldval, NULL);
|
11589 |
|
|
}
|
11590 |
|
|
|
11591 |
|
|
/* $TMP1 = OLDVAL & EXCLUSIVE_MASK. */
|
11592 |
|
|
if (exclusive_mask == 0)
|
11593 |
|
|
tmp1 = const0_rtx;
|
11594 |
|
|
else
|
11595 |
|
|
{
|
11596 |
|
|
gcc_assert (oldval != at);
|
11597 |
|
|
mips_multi_add_insn ("and\t%0,%1,%z2",
|
11598 |
|
|
at, oldval, exclusive_mask, NULL);
|
11599 |
|
|
tmp1 = at;
|
11600 |
|
|
}
|
11601 |
|
|
|
11602 |
|
|
/* $TMP2 = INSN1 (OLDVAL, INSN1_OP2).
|
11603 |
|
|
|
11604 |
|
|
We can ignore moves if $TMP4 != INSN1_OP2, since we'll still emit
|
11605 |
|
|
at least one instruction in that case. */
|
11606 |
|
|
if (insn1 == SYNC_INSN1_MOVE
|
11607 |
|
|
&& (tmp1 != const0_rtx || insn2 != SYNC_INSN2_NOP))
|
11608 |
|
|
tmp2 = insn1_op2;
|
11609 |
|
|
else
|
11610 |
|
|
{
|
11611 |
|
|
mips_multi_add_insn (mips_sync_insn1_template (insn1, is_64bit_p),
|
11612 |
|
|
newval, oldval, insn1_op2, NULL);
|
11613 |
|
|
tmp2 = newval;
|
11614 |
|
|
}
|
11615 |
|
|
|
11616 |
|
|
/* $TMP3 = INSN2 ($TMP2, INCLUSIVE_MASK). */
|
11617 |
|
|
if (insn2 == SYNC_INSN2_NOP)
|
11618 |
|
|
tmp3 = tmp2;
|
11619 |
|
|
else
|
11620 |
|
|
{
|
11621 |
|
|
mips_multi_add_insn (mips_sync_insn2_template (insn2),
|
11622 |
|
|
newval, tmp2, inclusive_mask, NULL);
|
11623 |
|
|
tmp3 = newval;
|
11624 |
|
|
}
|
11625 |
|
|
tmp3_insn = mips_multi_last_index ();
|
11626 |
|
|
|
11627 |
|
|
/* $AT = $TMP1 | $TMP3. */
|
11628 |
|
|
if (tmp1 == const0_rtx || tmp3 == const0_rtx)
|
11629 |
|
|
{
|
11630 |
|
|
mips_multi_set_operand (tmp3_insn, 0, at);
|
11631 |
|
|
tmp3 = at;
|
11632 |
|
|
}
|
11633 |
|
|
else
|
11634 |
|
|
{
|
11635 |
|
|
gcc_assert (tmp1 != tmp3);
|
11636 |
|
|
mips_multi_add_insn ("or\t%0,%1,%2", at, tmp1, tmp3, NULL);
|
11637 |
|
|
}
|
11638 |
|
|
|
11639 |
|
|
/* if (!commit (*MEM = $AT)) goto 1.
|
11640 |
|
|
|
11641 |
|
|
This will sometimes be a delayed branch; see the write code below
|
11642 |
|
|
for details. */
|
11643 |
|
|
mips_multi_add_insn (is_64bit_p ? "scd\t%0,%1" : "sc\t%0,%1", at, mem, NULL);
|
11644 |
|
|
mips_multi_add_insn ("beq%?\t%0,%.,1b", at, NULL);
|
11645 |
|
|
|
11646 |
|
|
/* if (INSN1 != MOVE && INSN1 != LI) NEWVAL = $TMP3 [delay slot]. */
|
11647 |
|
|
if (insn1 != SYNC_INSN1_MOVE && insn1 != SYNC_INSN1_LI && tmp3 != newval)
|
11648 |
|
|
{
|
11649 |
|
|
mips_multi_copy_insn (tmp3_insn);
|
11650 |
|
|
mips_multi_set_operand (mips_multi_last_index (), 0, newval);
|
11651 |
|
|
}
|
11652 |
|
|
else
|
11653 |
|
|
mips_multi_add_insn ("nop", NULL);
|
11654 |
|
|
|
11655 |
|
|
/* Output the acquire side of the memory barrier. */
|
11656 |
|
|
if (TARGET_SYNC_AFTER_SC)
|
11657 |
|
|
mips_multi_add_insn ("sync", NULL);
|
11658 |
|
|
|
11659 |
|
|
/* Output the exit label, if needed. */
|
11660 |
|
|
if (required_oldval)
|
11661 |
|
|
mips_multi_add_label ("2:");
|
11662 |
|
|
|
11663 |
|
|
#undef READ_OPERAND
|
11664 |
|
|
}
|
11665 |
|
|
|
11666 |
|
|
/* Output and/or return the asm template for sync loop INSN, which has
|
11667 |
|
|
the operands given by OPERANDS. */
|
11668 |
|
|
|
11669 |
|
|
const char *
|
11670 |
|
|
mips_output_sync_loop (rtx insn, rtx *operands)
|
11671 |
|
|
{
|
11672 |
|
|
mips_process_sync_loop (insn, operands);
|
11673 |
|
|
|
11674 |
|
|
/* Use branch-likely instructions to work around the LL/SC R10000
|
11675 |
|
|
errata. */
|
11676 |
|
|
mips_branch_likely = TARGET_FIX_R10000;
|
11677 |
|
|
|
11678 |
|
|
mips_push_asm_switch (&mips_noreorder);
|
11679 |
|
|
mips_push_asm_switch (&mips_nomacro);
|
11680 |
|
|
mips_push_asm_switch (&mips_noat);
|
11681 |
|
|
mips_start_ll_sc_sync_block ();
|
11682 |
|
|
|
11683 |
|
|
mips_multi_write ();
|
11684 |
|
|
|
11685 |
|
|
mips_end_ll_sc_sync_block ();
|
11686 |
|
|
mips_pop_asm_switch (&mips_noat);
|
11687 |
|
|
mips_pop_asm_switch (&mips_nomacro);
|
11688 |
|
|
mips_pop_asm_switch (&mips_noreorder);
|
11689 |
|
|
|
11690 |
|
|
return "";
|
11691 |
|
|
}
|
11692 |
|
|
|
11693 |
|
|
/* Return the number of individual instructions in sync loop INSN,
|
11694 |
|
|
which has the operands given by OPERANDS. */
|
11695 |
|
|
|
11696 |
|
|
unsigned int
|
11697 |
|
|
mips_sync_loop_insns (rtx insn, rtx *operands)
|
11698 |
|
|
{
|
11699 |
|
|
mips_process_sync_loop (insn, operands);
|
11700 |
|
|
return mips_multi_num_insns;
|
11701 |
|
|
}
|
11702 |
|
|
|
11703 |
|
|
/* Return the assembly code for DIV or DDIV instruction DIVISION, which has
|
11704 |
|
|
the operands given by OPERANDS. Add in a divide-by-zero check if needed.
|
11705 |
|
|
|
11706 |
|
|
When working around R4000 and R4400 errata, we need to make sure that
|
11707 |
|
|
the division is not immediately followed by a shift[1][2]. We also
|
11708 |
|
|
need to stop the division from being put into a branch delay slot[3].
|
11709 |
|
|
The easiest way to avoid both problems is to add a nop after the
|
11710 |
|
|
division. When a divide-by-zero check is needed, this nop can be
|
11711 |
|
|
used to fill the branch delay slot.
|
11712 |
|
|
|
11713 |
|
|
[1] If a double-word or a variable shift executes immediately
|
11714 |
|
|
after starting an integer division, the shift may give an
|
11715 |
|
|
incorrect result. See quotations of errata #16 and #28 from
|
11716 |
|
|
"MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
|
11717 |
|
|
in mips.md for details.
|
11718 |
|
|
|
11719 |
|
|
[2] A similar bug to [1] exists for all revisions of the
|
11720 |
|
|
R4000 and the R4400 when run in an MC configuration.
|
11721 |
|
|
From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
|
11722 |
|
|
|
11723 |
|
|
"19. In this following sequence:
|
11724 |
|
|
|
11725 |
|
|
ddiv (or ddivu or div or divu)
|
11726 |
|
|
dsll32 (or dsrl32, dsra32)
|
11727 |
|
|
|
11728 |
|
|
if an MPT stall occurs, while the divide is slipping the cpu
|
11729 |
|
|
pipeline, then the following double shift would end up with an
|
11730 |
|
|
incorrect result.
|
11731 |
|
|
|
11732 |
|
|
Workaround: The compiler needs to avoid generating any
|
11733 |
|
|
sequence with divide followed by extended double shift."
|
11734 |
|
|
|
11735 |
|
|
This erratum is also present in "MIPS R4400MC Errata, Processor
|
11736 |
|
|
Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
|
11737 |
|
|
& 3.0" as errata #10 and #4, respectively.
|
11738 |
|
|
|
11739 |
|
|
[3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
|
11740 |
|
|
(also valid for MIPS R4000MC processors):
|
11741 |
|
|
|
11742 |
|
|
"52. R4000SC: This bug does not apply for the R4000PC.
|
11743 |
|
|
|
11744 |
|
|
There are two flavors of this bug:
|
11745 |
|
|
|
11746 |
|
|
1) If the instruction just after divide takes an RF exception
|
11747 |
|
|
(tlb-refill, tlb-invalid) and gets an instruction cache
|
11748 |
|
|
miss (both primary and secondary) and the line which is
|
11749 |
|
|
currently in secondary cache at this index had the first
|
11750 |
|
|
data word, where the bits 5..2 are set, then R4000 would
|
11751 |
|
|
get a wrong result for the div.
|
11752 |
|
|
|
11753 |
|
|
##1
|
11754 |
|
|
nop
|
11755 |
|
|
div r8, r9
|
11756 |
|
|
------------------- # end-of page. -tlb-refill
|
11757 |
|
|
nop
|
11758 |
|
|
##2
|
11759 |
|
|
nop
|
11760 |
|
|
div r8, r9
|
11761 |
|
|
------------------- # end-of page. -tlb-invalid
|
11762 |
|
|
nop
|
11763 |
|
|
|
11764 |
|
|
2) If the divide is in the taken branch delay slot, where the
|
11765 |
|
|
target takes RF exception and gets an I-cache miss for the
|
11766 |
|
|
exception vector or where I-cache miss occurs for the
|
11767 |
|
|
target address, under the above mentioned scenarios, the
|
11768 |
|
|
div would get wrong results.
|
11769 |
|
|
|
11770 |
|
|
##1
|
11771 |
|
|
j r2 # to next page mapped or unmapped
|
11772 |
|
|
div r8,r9 # this bug would be there as long
|
11773 |
|
|
# as there is an ICache miss and
|
11774 |
|
|
nop # the "data pattern" is present
|
11775 |
|
|
|
11776 |
|
|
##2
|
11777 |
|
|
beq r0, r0, NextPage # to Next page
|
11778 |
|
|
div r8,r9
|
11779 |
|
|
nop
|
11780 |
|
|
|
11781 |
|
|
This bug is present for div, divu, ddiv, and ddivu
|
11782 |
|
|
instructions.
|
11783 |
|
|
|
11784 |
|
|
Workaround: For item 1), OS could make sure that the next page
|
11785 |
|
|
after the divide instruction is also mapped. For item 2), the
|
11786 |
|
|
compiler could make sure that the divide instruction is not in
|
11787 |
|
|
the branch delay slot."
|
11788 |
|
|
|
11789 |
|
|
These processors have PRId values of 0x00004220 and 0x00004300 for
|
11790 |
|
|
the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
|
11791 |
|
|
|
11792 |
|
|
const char *
|
11793 |
|
|
mips_output_division (const char *division, rtx *operands)
|
11794 |
|
|
{
|
11795 |
|
|
const char *s;
|
11796 |
|
|
|
11797 |
|
|
s = division;
|
11798 |
|
|
if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
|
11799 |
|
|
{
|
11800 |
|
|
output_asm_insn (s, operands);
|
11801 |
|
|
s = "nop";
|
11802 |
|
|
}
|
11803 |
|
|
if (TARGET_CHECK_ZERO_DIV)
|
11804 |
|
|
{
|
11805 |
|
|
if (TARGET_MIPS16)
|
11806 |
|
|
{
|
11807 |
|
|
output_asm_insn (s, operands);
|
11808 |
|
|
s = "bnez\t%2,1f\n\tbreak\t7\n1:";
|
11809 |
|
|
}
|
11810 |
|
|
else if (GENERATE_DIVIDE_TRAPS)
|
11811 |
|
|
{
|
11812 |
|
|
output_asm_insn (s, operands);
|
11813 |
|
|
s = "teq\t%2,%.,7";
|
11814 |
|
|
}
|
11815 |
|
|
else
|
11816 |
|
|
{
|
11817 |
|
|
output_asm_insn ("%(bne\t%2,%.,1f", operands);
|
11818 |
|
|
output_asm_insn (s, operands);
|
11819 |
|
|
s = "break\t7%)\n1:";
|
11820 |
|
|
}
|
11821 |
|
|
}
|
11822 |
|
|
return s;
|
11823 |
|
|
}
|
11824 |
|
|
|
11825 |
|
|
/* Return true if IN_INSN is a multiply-add or multiply-subtract
|
11826 |
|
|
instruction and if OUT_INSN assigns to the accumulator operand. */
|
11827 |
|
|
|
11828 |
|
|
bool
|
11829 |
|
|
mips_linked_madd_p (rtx out_insn, rtx in_insn)
|
11830 |
|
|
{
|
11831 |
|
|
rtx x;
|
11832 |
|
|
|
11833 |
|
|
x = single_set (in_insn);
|
11834 |
|
|
if (x == 0)
|
11835 |
|
|
return false;
|
11836 |
|
|
|
11837 |
|
|
x = SET_SRC (x);
|
11838 |
|
|
|
11839 |
|
|
if (GET_CODE (x) == PLUS
|
11840 |
|
|
&& GET_CODE (XEXP (x, 0)) == MULT
|
11841 |
|
|
&& reg_set_p (XEXP (x, 1), out_insn))
|
11842 |
|
|
return true;
|
11843 |
|
|
|
11844 |
|
|
if (GET_CODE (x) == MINUS
|
11845 |
|
|
&& GET_CODE (XEXP (x, 1)) == MULT
|
11846 |
|
|
&& reg_set_p (XEXP (x, 0), out_insn))
|
11847 |
|
|
return true;
|
11848 |
|
|
|
11849 |
|
|
return false;
|
11850 |
|
|
}
|
11851 |
|
|
|
11852 |
|
|
/* True if the dependency between OUT_INSN and IN_INSN is on the store
|
11853 |
|
|
data rather than the address. We need this because the cprestore
|
11854 |
|
|
pattern is type "store", but is defined using an UNSPEC_VOLATILE,
|
11855 |
|
|
which causes the default routine to abort. We just return false
|
11856 |
|
|
for that case. */
|
11857 |
|
|
|
11858 |
|
|
bool
|
11859 |
|
|
mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
|
11860 |
|
|
{
|
11861 |
|
|
if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
|
11862 |
|
|
return false;
|
11863 |
|
|
|
11864 |
|
|
return !store_data_bypass_p (out_insn, in_insn);
|
11865 |
|
|
}
|
11866 |
|
|
|
11867 |
|
|
|
11868 |
|
|
/* Variables and flags used in scheduler hooks when tuning for
|
11869 |
|
|
Loongson 2E/2F. */
|
11870 |
|
|
static struct
|
11871 |
|
|
{
|
11872 |
|
|
/* Variables to support Loongson 2E/2F round-robin [F]ALU1/2 dispatch
|
11873 |
|
|
strategy. */
|
11874 |
|
|
|
11875 |
|
|
/* If true, then next ALU1/2 instruction will go to ALU1. */
|
11876 |
|
|
bool alu1_turn_p;
|
11877 |
|
|
|
11878 |
|
|
/* If true, then next FALU1/2 unstruction will go to FALU1. */
|
11879 |
|
|
bool falu1_turn_p;
|
11880 |
|
|
|
11881 |
|
|
/* Codes to query if [f]alu{1,2}_core units are subscribed or not. */
|
11882 |
|
|
int alu1_core_unit_code;
|
11883 |
|
|
int alu2_core_unit_code;
|
11884 |
|
|
int falu1_core_unit_code;
|
11885 |
|
|
int falu2_core_unit_code;
|
11886 |
|
|
|
11887 |
|
|
/* True if current cycle has a multi instruction.
|
11888 |
|
|
This flag is used in mips_ls2_dfa_post_advance_cycle. */
|
11889 |
|
|
bool cycle_has_multi_p;
|
11890 |
|
|
|
11891 |
|
|
/* Instructions to subscribe ls2_[f]alu{1,2}_turn_enabled units.
|
11892 |
|
|
These are used in mips_ls2_dfa_post_advance_cycle to initialize
|
11893 |
|
|
DFA state.
|
11894 |
|
|
E.g., when alu1_turn_enabled_insn is issued it makes next ALU1/2
|
11895 |
|
|
instruction to go ALU1. */
|
11896 |
|
|
rtx alu1_turn_enabled_insn;
|
11897 |
|
|
rtx alu2_turn_enabled_insn;
|
11898 |
|
|
rtx falu1_turn_enabled_insn;
|
11899 |
|
|
rtx falu2_turn_enabled_insn;
|
11900 |
|
|
} mips_ls2;
|
11901 |
|
|
|
11902 |
|
|
/* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
|
11903 |
|
|
dependencies have no cost, except on the 20Kc where output-dependence
|
11904 |
|
|
is treated like input-dependence. */
|
11905 |
|
|
|
11906 |
|
|
static int
|
11907 |
|
|
mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
|
11908 |
|
|
rtx dep ATTRIBUTE_UNUSED, int cost)
|
11909 |
|
|
{
|
11910 |
|
|
if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
|
11911 |
|
|
&& TUNE_20KC)
|
11912 |
|
|
return cost;
|
11913 |
|
|
if (REG_NOTE_KIND (link) != 0)
|
11914 |
|
|
return 0;
|
11915 |
|
|
return cost;
|
11916 |
|
|
}
|
11917 |
|
|
|
11918 |
|
|
/* Return the number of instructions that can be issued per cycle. */
|
11919 |
|
|
|
11920 |
|
|
static int
|
11921 |
|
|
mips_issue_rate (void)
|
11922 |
|
|
{
|
11923 |
|
|
switch (mips_tune)
|
11924 |
|
|
{
|
11925 |
|
|
case PROCESSOR_74KC:
|
11926 |
|
|
case PROCESSOR_74KF2_1:
|
11927 |
|
|
case PROCESSOR_74KF1_1:
|
11928 |
|
|
case PROCESSOR_74KF3_2:
|
11929 |
|
|
/* The 74k is not strictly quad-issue cpu, but can be seen as one
|
11930 |
|
|
by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
|
11931 |
|
|
but in reality only a maximum of 3 insns can be issued as
|
11932 |
|
|
floating-point loads and stores also require a slot in the
|
11933 |
|
|
AGEN pipe. */
|
11934 |
|
|
case PROCESSOR_R10000:
|
11935 |
|
|
/* All R10K Processors are quad-issue (being the first MIPS
|
11936 |
|
|
processors to support this feature). */
|
11937 |
|
|
return 4;
|
11938 |
|
|
|
11939 |
|
|
case PROCESSOR_20KC:
|
11940 |
|
|
case PROCESSOR_R4130:
|
11941 |
|
|
case PROCESSOR_R5400:
|
11942 |
|
|
case PROCESSOR_R5500:
|
11943 |
|
|
case PROCESSOR_R7000:
|
11944 |
|
|
case PROCESSOR_R9000:
|
11945 |
|
|
case PROCESSOR_OCTEON:
|
11946 |
|
|
return 2;
|
11947 |
|
|
|
11948 |
|
|
case PROCESSOR_SB1:
|
11949 |
|
|
case PROCESSOR_SB1A:
|
11950 |
|
|
/* This is actually 4, but we get better performance if we claim 3.
|
11951 |
|
|
This is partly because of unwanted speculative code motion with the
|
11952 |
|
|
larger number, and partly because in most common cases we can't
|
11953 |
|
|
reach the theoretical max of 4. */
|
11954 |
|
|
return 3;
|
11955 |
|
|
|
11956 |
|
|
case PROCESSOR_LOONGSON_2E:
|
11957 |
|
|
case PROCESSOR_LOONGSON_2F:
|
11958 |
|
|
return 4;
|
11959 |
|
|
|
11960 |
|
|
default:
|
11961 |
|
|
return 1;
|
11962 |
|
|
}
|
11963 |
|
|
}
|
11964 |
|
|
|
11965 |
|
|
/* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook for Loongson2. */
|
11966 |
|
|
|
11967 |
|
|
static void
|
11968 |
|
|
mips_ls2_init_dfa_post_cycle_insn (void)
|
11969 |
|
|
{
|
11970 |
|
|
start_sequence ();
|
11971 |
|
|
emit_insn (gen_ls2_alu1_turn_enabled_insn ());
|
11972 |
|
|
mips_ls2.alu1_turn_enabled_insn = get_insns ();
|
11973 |
|
|
end_sequence ();
|
11974 |
|
|
|
11975 |
|
|
start_sequence ();
|
11976 |
|
|
emit_insn (gen_ls2_alu2_turn_enabled_insn ());
|
11977 |
|
|
mips_ls2.alu2_turn_enabled_insn = get_insns ();
|
11978 |
|
|
end_sequence ();
|
11979 |
|
|
|
11980 |
|
|
start_sequence ();
|
11981 |
|
|
emit_insn (gen_ls2_falu1_turn_enabled_insn ());
|
11982 |
|
|
mips_ls2.falu1_turn_enabled_insn = get_insns ();
|
11983 |
|
|
end_sequence ();
|
11984 |
|
|
|
11985 |
|
|
start_sequence ();
|
11986 |
|
|
emit_insn (gen_ls2_falu2_turn_enabled_insn ());
|
11987 |
|
|
mips_ls2.falu2_turn_enabled_insn = get_insns ();
|
11988 |
|
|
end_sequence ();
|
11989 |
|
|
|
11990 |
|
|
mips_ls2.alu1_core_unit_code = get_cpu_unit_code ("ls2_alu1_core");
|
11991 |
|
|
mips_ls2.alu2_core_unit_code = get_cpu_unit_code ("ls2_alu2_core");
|
11992 |
|
|
mips_ls2.falu1_core_unit_code = get_cpu_unit_code ("ls2_falu1_core");
|
11993 |
|
|
mips_ls2.falu2_core_unit_code = get_cpu_unit_code ("ls2_falu2_core");
|
11994 |
|
|
}
|
11995 |
|
|
|
11996 |
|
|
/* Implement TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN hook.
|
11997 |
|
|
Init data used in mips_dfa_post_advance_cycle. */
|
11998 |
|
|
|
11999 |
|
|
static void
|
12000 |
|
|
mips_init_dfa_post_cycle_insn (void)
|
12001 |
|
|
{
|
12002 |
|
|
if (TUNE_LOONGSON_2EF)
|
12003 |
|
|
mips_ls2_init_dfa_post_cycle_insn ();
|
12004 |
|
|
}
|
12005 |
|
|
|
12006 |
|
|
/* Initialize STATE when scheduling for Loongson 2E/2F.
|
12007 |
|
|
Support round-robin dispatch scheme by enabling only one of
|
12008 |
|
|
ALU1/ALU2 and one of FALU1/FALU2 units for ALU1/2 and FALU1/2 instructions
|
12009 |
|
|
respectively. */
|
12010 |
|
|
|
12011 |
|
|
static void
|
12012 |
|
|
mips_ls2_dfa_post_advance_cycle (state_t state)
|
12013 |
|
|
{
|
12014 |
|
|
if (cpu_unit_reservation_p (state, mips_ls2.alu1_core_unit_code))
|
12015 |
|
|
{
|
12016 |
|
|
/* Though there are no non-pipelined ALU1 insns,
|
12017 |
|
|
we can get an instruction of type 'multi' before reload. */
|
12018 |
|
|
gcc_assert (mips_ls2.cycle_has_multi_p);
|
12019 |
|
|
mips_ls2.alu1_turn_p = false;
|
12020 |
|
|
}
|
12021 |
|
|
|
12022 |
|
|
mips_ls2.cycle_has_multi_p = false;
|
12023 |
|
|
|
12024 |
|
|
if (cpu_unit_reservation_p (state, mips_ls2.alu2_core_unit_code))
|
12025 |
|
|
/* We have a non-pipelined alu instruction in the core,
|
12026 |
|
|
adjust round-robin counter. */
|
12027 |
|
|
mips_ls2.alu1_turn_p = true;
|
12028 |
|
|
|
12029 |
|
|
if (mips_ls2.alu1_turn_p)
|
12030 |
|
|
{
|
12031 |
|
|
if (state_transition (state, mips_ls2.alu1_turn_enabled_insn) >= 0)
|
12032 |
|
|
gcc_unreachable ();
|
12033 |
|
|
}
|
12034 |
|
|
else
|
12035 |
|
|
{
|
12036 |
|
|
if (state_transition (state, mips_ls2.alu2_turn_enabled_insn) >= 0)
|
12037 |
|
|
gcc_unreachable ();
|
12038 |
|
|
}
|
12039 |
|
|
|
12040 |
|
|
if (cpu_unit_reservation_p (state, mips_ls2.falu1_core_unit_code))
|
12041 |
|
|
{
|
12042 |
|
|
/* There are no non-pipelined FALU1 insns. */
|
12043 |
|
|
gcc_unreachable ();
|
12044 |
|
|
mips_ls2.falu1_turn_p = false;
|
12045 |
|
|
}
|
12046 |
|
|
|
12047 |
|
|
if (cpu_unit_reservation_p (state, mips_ls2.falu2_core_unit_code))
|
12048 |
|
|
/* We have a non-pipelined falu instruction in the core,
|
12049 |
|
|
adjust round-robin counter. */
|
12050 |
|
|
mips_ls2.falu1_turn_p = true;
|
12051 |
|
|
|
12052 |
|
|
if (mips_ls2.falu1_turn_p)
|
12053 |
|
|
{
|
12054 |
|
|
if (state_transition (state, mips_ls2.falu1_turn_enabled_insn) >= 0)
|
12055 |
|
|
gcc_unreachable ();
|
12056 |
|
|
}
|
12057 |
|
|
else
|
12058 |
|
|
{
|
12059 |
|
|
if (state_transition (state, mips_ls2.falu2_turn_enabled_insn) >= 0)
|
12060 |
|
|
gcc_unreachable ();
|
12061 |
|
|
}
|
12062 |
|
|
}
|
12063 |
|
|
|
12064 |
|
|
/* Implement TARGET_SCHED_DFA_POST_ADVANCE_CYCLE.
|
12065 |
|
|
This hook is being called at the start of each cycle. */
|
12066 |
|
|
|
12067 |
|
|
static void
|
12068 |
|
|
mips_dfa_post_advance_cycle (void)
|
12069 |
|
|
{
|
12070 |
|
|
if (TUNE_LOONGSON_2EF)
|
12071 |
|
|
mips_ls2_dfa_post_advance_cycle (curr_state);
|
12072 |
|
|
}
|
12073 |
|
|
|
12074 |
|
|
/* Implement TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
|
12075 |
|
|
be as wide as the scheduling freedom in the DFA. */
|
12076 |
|
|
|
12077 |
|
|
static int
|
12078 |
|
|
mips_multipass_dfa_lookahead (void)
|
12079 |
|
|
{
|
12080 |
|
|
/* Can schedule up to 4 of the 6 function units in any one cycle. */
|
12081 |
|
|
if (TUNE_SB1)
|
12082 |
|
|
return 4;
|
12083 |
|
|
|
12084 |
|
|
if (TUNE_LOONGSON_2EF)
|
12085 |
|
|
return 4;
|
12086 |
|
|
|
12087 |
|
|
if (TUNE_OCTEON)
|
12088 |
|
|
return 2;
|
12089 |
|
|
|
12090 |
|
|
return 0;
|
12091 |
|
|
}
|
12092 |
|
|
|
12093 |
|
|
/* Remove the instruction at index LOWER from ready queue READY and
|
12094 |
|
|
reinsert it in front of the instruction at index HIGHER. LOWER must
|
12095 |
|
|
be <= HIGHER. */
|
12096 |
|
|
|
12097 |
|
|
static void
|
12098 |
|
|
mips_promote_ready (rtx *ready, int lower, int higher)
|
12099 |
|
|
{
|
12100 |
|
|
rtx new_head;
|
12101 |
|
|
int i;
|
12102 |
|
|
|
12103 |
|
|
new_head = ready[lower];
|
12104 |
|
|
for (i = lower; i < higher; i++)
|
12105 |
|
|
ready[i] = ready[i + 1];
|
12106 |
|
|
ready[i] = new_head;
|
12107 |
|
|
}
|
12108 |
|
|
|
12109 |
|
|
/* If the priority of the instruction at POS2 in the ready queue READY
|
12110 |
|
|
is within LIMIT units of that of the instruction at POS1, swap the
|
12111 |
|
|
instructions if POS2 is not already less than POS1. */
|
12112 |
|
|
|
12113 |
|
|
static void
|
12114 |
|
|
mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
|
12115 |
|
|
{
|
12116 |
|
|
if (pos1 < pos2
|
12117 |
|
|
&& INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
|
12118 |
|
|
{
|
12119 |
|
|
rtx temp;
|
12120 |
|
|
|
12121 |
|
|
temp = ready[pos1];
|
12122 |
|
|
ready[pos1] = ready[pos2];
|
12123 |
|
|
ready[pos2] = temp;
|
12124 |
|
|
}
|
12125 |
|
|
}
|
12126 |
|
|
|
12127 |
|
|
/* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
|
12128 |
|
|
that may clobber hi or lo. */
|
12129 |
|
|
static rtx mips_macc_chains_last_hilo;
|
12130 |
|
|
|
12131 |
|
|
/* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
|
12132 |
|
|
been scheduled, updating mips_macc_chains_last_hilo appropriately. */
|
12133 |
|
|
|
12134 |
|
|
static void
|
12135 |
|
|
mips_macc_chains_record (rtx insn)
|
12136 |
|
|
{
|
12137 |
|
|
if (get_attr_may_clobber_hilo (insn))
|
12138 |
|
|
mips_macc_chains_last_hilo = insn;
|
12139 |
|
|
}
|
12140 |
|
|
|
12141 |
|
|
/* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
|
12142 |
|
|
has NREADY elements, looking for a multiply-add or multiply-subtract
|
12143 |
|
|
instruction that is cumulative with mips_macc_chains_last_hilo.
|
12144 |
|
|
If there is one, promote it ahead of anything else that might
|
12145 |
|
|
clobber hi or lo. */
|
12146 |
|
|
|
12147 |
|
|
static void
|
12148 |
|
|
mips_macc_chains_reorder (rtx *ready, int nready)
|
12149 |
|
|
{
|
12150 |
|
|
int i, j;
|
12151 |
|
|
|
12152 |
|
|
if (mips_macc_chains_last_hilo != 0)
|
12153 |
|
|
for (i = nready - 1; i >= 0; i--)
|
12154 |
|
|
if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
|
12155 |
|
|
{
|
12156 |
|
|
for (j = nready - 1; j > i; j--)
|
12157 |
|
|
if (recog_memoized (ready[j]) >= 0
|
12158 |
|
|
&& get_attr_may_clobber_hilo (ready[j]))
|
12159 |
|
|
{
|
12160 |
|
|
mips_promote_ready (ready, i, j);
|
12161 |
|
|
break;
|
12162 |
|
|
}
|
12163 |
|
|
break;
|
12164 |
|
|
}
|
12165 |
|
|
}
|
12166 |
|
|
|
12167 |
|
|
/* The last instruction to be scheduled. */
|
12168 |
|
|
static rtx vr4130_last_insn;
|
12169 |
|
|
|
12170 |
|
|
/* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
|
12171 |
|
|
points to an rtx that is initially an instruction. Nullify the rtx
|
12172 |
|
|
if the instruction uses the value of register X. */
|
12173 |
|
|
|
12174 |
|
|
static void
|
12175 |
|
|
vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
|
12176 |
|
|
void *data)
|
12177 |
|
|
{
|
12178 |
|
|
rtx *insn_ptr;
|
12179 |
|
|
|
12180 |
|
|
insn_ptr = (rtx *) data;
|
12181 |
|
|
if (REG_P (x)
|
12182 |
|
|
&& *insn_ptr != 0
|
12183 |
|
|
&& reg_referenced_p (x, PATTERN (*insn_ptr)))
|
12184 |
|
|
*insn_ptr = 0;
|
12185 |
|
|
}
|
12186 |
|
|
|
12187 |
|
|
/* Return true if there is true register dependence between vr4130_last_insn
|
12188 |
|
|
and INSN. */
|
12189 |
|
|
|
12190 |
|
|
static bool
|
12191 |
|
|
vr4130_true_reg_dependence_p (rtx insn)
|
12192 |
|
|
{
|
12193 |
|
|
note_stores (PATTERN (vr4130_last_insn),
|
12194 |
|
|
vr4130_true_reg_dependence_p_1, &insn);
|
12195 |
|
|
return insn == 0;
|
12196 |
|
|
}
|
12197 |
|
|
|
12198 |
|
|
/* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
|
12199 |
|
|
the ready queue and that INSN2 is the instruction after it, return
|
12200 |
|
|
true if it is worth promoting INSN2 ahead of INSN1. Look for cases
|
12201 |
|
|
in which INSN1 and INSN2 can probably issue in parallel, but for
|
12202 |
|
|
which (INSN2, INSN1) should be less sensitive to instruction
|
12203 |
|
|
alignment than (INSN1, INSN2). See 4130.md for more details. */
|
12204 |
|
|
|
12205 |
|
|
static bool
|
12206 |
|
|
vr4130_swap_insns_p (rtx insn1, rtx insn2)
|
12207 |
|
|
{
|
12208 |
|
|
sd_iterator_def sd_it;
|
12209 |
|
|
dep_t dep;
|
12210 |
|
|
|
12211 |
|
|
/* Check for the following case:
|
12212 |
|
|
|
12213 |
|
|
1) there is some other instruction X with an anti dependence on INSN1;
|
12214 |
|
|
2) X has a higher priority than INSN2; and
|
12215 |
|
|
3) X is an arithmetic instruction (and thus has no unit restrictions).
|
12216 |
|
|
|
12217 |
|
|
If INSN1 is the last instruction blocking X, it would better to
|
12218 |
|
|
choose (INSN1, X) over (INSN2, INSN1). */
|
12219 |
|
|
FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
|
12220 |
|
|
if (DEP_TYPE (dep) == REG_DEP_ANTI
|
12221 |
|
|
&& INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
|
12222 |
|
|
&& recog_memoized (DEP_CON (dep)) >= 0
|
12223 |
|
|
&& get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
|
12224 |
|
|
return false;
|
12225 |
|
|
|
12226 |
|
|
if (vr4130_last_insn != 0
|
12227 |
|
|
&& recog_memoized (insn1) >= 0
|
12228 |
|
|
&& recog_memoized (insn2) >= 0)
|
12229 |
|
|
{
|
12230 |
|
|
/* See whether INSN1 and INSN2 use different execution units,
|
12231 |
|
|
or if they are both ALU-type instructions. If so, they can
|
12232 |
|
|
probably execute in parallel. */
|
12233 |
|
|
enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
|
12234 |
|
|
enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
|
12235 |
|
|
if (class1 != class2 || class1 == VR4130_CLASS_ALU)
|
12236 |
|
|
{
|
12237 |
|
|
/* If only one of the instructions has a dependence on
|
12238 |
|
|
vr4130_last_insn, prefer to schedule the other one first. */
|
12239 |
|
|
bool dep1_p = vr4130_true_reg_dependence_p (insn1);
|
12240 |
|
|
bool dep2_p = vr4130_true_reg_dependence_p (insn2);
|
12241 |
|
|
if (dep1_p != dep2_p)
|
12242 |
|
|
return dep1_p;
|
12243 |
|
|
|
12244 |
|
|
/* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
|
12245 |
|
|
is not an ALU-type instruction and if INSN1 uses the same
|
12246 |
|
|
execution unit. (Note that if this condition holds, we already
|
12247 |
|
|
know that INSN2 uses a different execution unit.) */
|
12248 |
|
|
if (class1 != VR4130_CLASS_ALU
|
12249 |
|
|
&& recog_memoized (vr4130_last_insn) >= 0
|
12250 |
|
|
&& class1 == get_attr_vr4130_class (vr4130_last_insn))
|
12251 |
|
|
return true;
|
12252 |
|
|
}
|
12253 |
|
|
}
|
12254 |
|
|
return false;
|
12255 |
|
|
}
|
12256 |
|
|
|
12257 |
|
|
/* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
|
12258 |
|
|
queue with at least two instructions. Swap the first two if
|
12259 |
|
|
vr4130_swap_insns_p says that it could be worthwhile. */
|
12260 |
|
|
|
12261 |
|
|
static void
|
12262 |
|
|
vr4130_reorder (rtx *ready, int nready)
|
12263 |
|
|
{
|
12264 |
|
|
if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
|
12265 |
|
|
mips_promote_ready (ready, nready - 2, nready - 1);
|
12266 |
|
|
}
|
12267 |
|
|
|
12268 |
|
|
/* Record whether last 74k AGEN instruction was a load or store. */
|
12269 |
|
|
static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
|
12270 |
|
|
|
12271 |
|
|
/* Initialize mips_last_74k_agen_insn from INSN. A null argument
|
12272 |
|
|
resets to TYPE_UNKNOWN state. */
|
12273 |
|
|
|
12274 |
|
|
static void
|
12275 |
|
|
mips_74k_agen_init (rtx insn)
|
12276 |
|
|
{
|
12277 |
|
|
if (!insn || CALL_P (insn) || JUMP_P (insn))
|
12278 |
|
|
mips_last_74k_agen_insn = TYPE_UNKNOWN;
|
12279 |
|
|
else
|
12280 |
|
|
{
|
12281 |
|
|
enum attr_type type = get_attr_type (insn);
|
12282 |
|
|
if (type == TYPE_LOAD || type == TYPE_STORE)
|
12283 |
|
|
mips_last_74k_agen_insn = type;
|
12284 |
|
|
}
|
12285 |
|
|
}
|
12286 |
|
|
|
12287 |
|
|
/* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
|
12288 |
|
|
loads to be grouped together, and multiple stores to be grouped
|
12289 |
|
|
together. Swap things around in the ready queue to make this happen. */
|
12290 |
|
|
|
12291 |
|
|
static void
|
12292 |
|
|
mips_74k_agen_reorder (rtx *ready, int nready)
|
12293 |
|
|
{
|
12294 |
|
|
int i;
|
12295 |
|
|
int store_pos, load_pos;
|
12296 |
|
|
|
12297 |
|
|
store_pos = -1;
|
12298 |
|
|
load_pos = -1;
|
12299 |
|
|
|
12300 |
|
|
for (i = nready - 1; i >= 0; i--)
|
12301 |
|
|
{
|
12302 |
|
|
rtx insn = ready[i];
|
12303 |
|
|
if (USEFUL_INSN_P (insn))
|
12304 |
|
|
switch (get_attr_type (insn))
|
12305 |
|
|
{
|
12306 |
|
|
case TYPE_STORE:
|
12307 |
|
|
if (store_pos == -1)
|
12308 |
|
|
store_pos = i;
|
12309 |
|
|
break;
|
12310 |
|
|
|
12311 |
|
|
case TYPE_LOAD:
|
12312 |
|
|
if (load_pos == -1)
|
12313 |
|
|
load_pos = i;
|
12314 |
|
|
break;
|
12315 |
|
|
|
12316 |
|
|
default:
|
12317 |
|
|
break;
|
12318 |
|
|
}
|
12319 |
|
|
}
|
12320 |
|
|
|
12321 |
|
|
if (load_pos == -1 || store_pos == -1)
|
12322 |
|
|
return;
|
12323 |
|
|
|
12324 |
|
|
switch (mips_last_74k_agen_insn)
|
12325 |
|
|
{
|
12326 |
|
|
case TYPE_UNKNOWN:
|
12327 |
|
|
/* Prefer to schedule loads since they have a higher latency. */
|
12328 |
|
|
case TYPE_LOAD:
|
12329 |
|
|
/* Swap loads to the front of the queue. */
|
12330 |
|
|
mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
|
12331 |
|
|
break;
|
12332 |
|
|
case TYPE_STORE:
|
12333 |
|
|
/* Swap stores to the front of the queue. */
|
12334 |
|
|
mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
|
12335 |
|
|
break;
|
12336 |
|
|
default:
|
12337 |
|
|
break;
|
12338 |
|
|
}
|
12339 |
|
|
}
|
12340 |
|
|
|
12341 |
|
|
/* Implement TARGET_SCHED_INIT. */
|
12342 |
|
|
|
12343 |
|
|
static void
|
12344 |
|
|
mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
|
12345 |
|
|
int max_ready ATTRIBUTE_UNUSED)
|
12346 |
|
|
{
|
12347 |
|
|
mips_macc_chains_last_hilo = 0;
|
12348 |
|
|
vr4130_last_insn = 0;
|
12349 |
|
|
mips_74k_agen_init (NULL_RTX);
|
12350 |
|
|
|
12351 |
|
|
/* When scheduling for Loongson2, branch instructions go to ALU1,
|
12352 |
|
|
therefore basic block is most likely to start with round-robin counter
|
12353 |
|
|
pointed to ALU2. */
|
12354 |
|
|
mips_ls2.alu1_turn_p = false;
|
12355 |
|
|
mips_ls2.falu1_turn_p = true;
|
12356 |
|
|
}
|
12357 |
|
|
|
12358 |
|
|
/* Implement TARGET_SCHED_REORDER and TARGET_SCHED_REORDER2. */
|
12359 |
|
|
|
12360 |
|
|
static int
|
12361 |
|
|
mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
|
12362 |
|
|
rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
|
12363 |
|
|
{
|
12364 |
|
|
if (!reload_completed
|
12365 |
|
|
&& TUNE_MACC_CHAINS
|
12366 |
|
|
&& *nreadyp > 0)
|
12367 |
|
|
mips_macc_chains_reorder (ready, *nreadyp);
|
12368 |
|
|
|
12369 |
|
|
if (reload_completed
|
12370 |
|
|
&& TUNE_MIPS4130
|
12371 |
|
|
&& !TARGET_VR4130_ALIGN
|
12372 |
|
|
&& *nreadyp > 1)
|
12373 |
|
|
vr4130_reorder (ready, *nreadyp);
|
12374 |
|
|
|
12375 |
|
|
if (TUNE_74K)
|
12376 |
|
|
mips_74k_agen_reorder (ready, *nreadyp);
|
12377 |
|
|
|
12378 |
|
|
return mips_issue_rate ();
|
12379 |
|
|
}
|
12380 |
|
|
|
12381 |
|
|
/* Update round-robin counters for ALU1/2 and FALU1/2. */
|
12382 |
|
|
|
12383 |
|
|
static void
|
12384 |
|
|
mips_ls2_variable_issue (rtx insn)
|
12385 |
|
|
{
|
12386 |
|
|
if (mips_ls2.alu1_turn_p)
|
12387 |
|
|
{
|
12388 |
|
|
if (cpu_unit_reservation_p (curr_state, mips_ls2.alu1_core_unit_code))
|
12389 |
|
|
mips_ls2.alu1_turn_p = false;
|
12390 |
|
|
}
|
12391 |
|
|
else
|
12392 |
|
|
{
|
12393 |
|
|
if (cpu_unit_reservation_p (curr_state, mips_ls2.alu2_core_unit_code))
|
12394 |
|
|
mips_ls2.alu1_turn_p = true;
|
12395 |
|
|
}
|
12396 |
|
|
|
12397 |
|
|
if (mips_ls2.falu1_turn_p)
|
12398 |
|
|
{
|
12399 |
|
|
if (cpu_unit_reservation_p (curr_state, mips_ls2.falu1_core_unit_code))
|
12400 |
|
|
mips_ls2.falu1_turn_p = false;
|
12401 |
|
|
}
|
12402 |
|
|
else
|
12403 |
|
|
{
|
12404 |
|
|
if (cpu_unit_reservation_p (curr_state, mips_ls2.falu2_core_unit_code))
|
12405 |
|
|
mips_ls2.falu1_turn_p = true;
|
12406 |
|
|
}
|
12407 |
|
|
|
12408 |
|
|
if (recog_memoized (insn) >= 0)
|
12409 |
|
|
mips_ls2.cycle_has_multi_p |= (get_attr_type (insn) == TYPE_MULTI);
|
12410 |
|
|
}
|
12411 |
|
|
|
12412 |
|
|
/* Implement TARGET_SCHED_VARIABLE_ISSUE. */
|
12413 |
|
|
|
12414 |
|
|
static int
|
12415 |
|
|
mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
|
12416 |
|
|
rtx insn, int more)
|
12417 |
|
|
{
|
12418 |
|
|
/* Ignore USEs and CLOBBERs; don't count them against the issue rate. */
|
12419 |
|
|
if (USEFUL_INSN_P (insn))
|
12420 |
|
|
{
|
12421 |
|
|
if (get_attr_type (insn) != TYPE_GHOST)
|
12422 |
|
|
more--;
|
12423 |
|
|
if (!reload_completed && TUNE_MACC_CHAINS)
|
12424 |
|
|
mips_macc_chains_record (insn);
|
12425 |
|
|
vr4130_last_insn = insn;
|
12426 |
|
|
if (TUNE_74K)
|
12427 |
|
|
mips_74k_agen_init (insn);
|
12428 |
|
|
else if (TUNE_LOONGSON_2EF)
|
12429 |
|
|
mips_ls2_variable_issue (insn);
|
12430 |
|
|
}
|
12431 |
|
|
|
12432 |
|
|
/* Instructions of type 'multi' should all be split before
|
12433 |
|
|
the second scheduling pass. */
|
12434 |
|
|
gcc_assert (!reload_completed
|
12435 |
|
|
|| recog_memoized (insn) < 0
|
12436 |
|
|
|| get_attr_type (insn) != TYPE_MULTI);
|
12437 |
|
|
|
12438 |
|
|
return more;
|
12439 |
|
|
}
|
12440 |
|
|
|
12441 |
|
|
/* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
|
12442 |
|
|
return the first operand of the associated PREF or PREFX insn. */
|
12443 |
|
|
|
12444 |
|
|
rtx
|
12445 |
|
|
mips_prefetch_cookie (rtx write, rtx locality)
|
12446 |
|
|
{
|
12447 |
|
|
/* store_streamed / load_streamed. */
|
12448 |
|
|
if (INTVAL (locality) <= 0)
|
12449 |
|
|
return GEN_INT (INTVAL (write) + 4);
|
12450 |
|
|
|
12451 |
|
|
/* store / load. */
|
12452 |
|
|
if (INTVAL (locality) <= 2)
|
12453 |
|
|
return write;
|
12454 |
|
|
|
12455 |
|
|
/* store_retained / load_retained. */
|
12456 |
|
|
return GEN_INT (INTVAL (write) + 6);
|
12457 |
|
|
}
|
12458 |
|
|
|
12459 |
|
|
/* Flags that indicate when a built-in function is available.
|
12460 |
|
|
|
12461 |
|
|
BUILTIN_AVAIL_NON_MIPS16
|
12462 |
|
|
The function is available on the current target, but only
|
12463 |
|
|
in non-MIPS16 mode. */
|
12464 |
|
|
#define BUILTIN_AVAIL_NON_MIPS16 1
|
12465 |
|
|
|
12466 |
|
|
/* Declare an availability predicate for built-in functions that
|
12467 |
|
|
require non-MIPS16 mode and also require COND to be true.
|
12468 |
|
|
NAME is the main part of the predicate's name. */
|
12469 |
|
|
#define AVAIL_NON_MIPS16(NAME, COND) \
|
12470 |
|
|
static unsigned int \
|
12471 |
|
|
mips_builtin_avail_##NAME (void) \
|
12472 |
|
|
{ \
|
12473 |
|
|
return (COND) ? BUILTIN_AVAIL_NON_MIPS16 : 0; \
|
12474 |
|
|
}
|
12475 |
|
|
|
12476 |
|
|
/* This structure describes a single built-in function. */
|
12477 |
|
|
struct mips_builtin_description {
|
12478 |
|
|
/* The code of the main .md file instruction. See mips_builtin_type
|
12479 |
|
|
for more information. */
|
12480 |
|
|
enum insn_code icode;
|
12481 |
|
|
|
12482 |
|
|
/* The floating-point comparison code to use with ICODE, if any. */
|
12483 |
|
|
enum mips_fp_condition cond;
|
12484 |
|
|
|
12485 |
|
|
/* The name of the built-in function. */
|
12486 |
|
|
const char *name;
|
12487 |
|
|
|
12488 |
|
|
/* Specifies how the function should be expanded. */
|
12489 |
|
|
enum mips_builtin_type builtin_type;
|
12490 |
|
|
|
12491 |
|
|
/* The function's prototype. */
|
12492 |
|
|
enum mips_function_type function_type;
|
12493 |
|
|
|
12494 |
|
|
/* Whether the function is available. */
|
12495 |
|
|
unsigned int (*avail) (void);
|
12496 |
|
|
};
|
12497 |
|
|
|
12498 |
|
|
AVAIL_NON_MIPS16 (paired_single, TARGET_PAIRED_SINGLE_FLOAT)
|
12499 |
|
|
AVAIL_NON_MIPS16 (sb1_paired_single, TARGET_SB1 && TARGET_PAIRED_SINGLE_FLOAT)
|
12500 |
|
|
AVAIL_NON_MIPS16 (mips3d, TARGET_MIPS3D)
|
12501 |
|
|
AVAIL_NON_MIPS16 (dsp, TARGET_DSP)
|
12502 |
|
|
AVAIL_NON_MIPS16 (dspr2, TARGET_DSPR2)
|
12503 |
|
|
AVAIL_NON_MIPS16 (dsp_32, !TARGET_64BIT && TARGET_DSP)
|
12504 |
|
|
AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
|
12505 |
|
|
AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_VECTORS)
|
12506 |
|
|
AVAIL_NON_MIPS16 (cache, TARGET_CACHE_BUILTIN)
|
12507 |
|
|
|
12508 |
|
|
/* Construct a mips_builtin_description from the given arguments.
|
12509 |
|
|
|
12510 |
|
|
INSN is the name of the associated instruction pattern, without the
|
12511 |
|
|
leading CODE_FOR_mips_.
|
12512 |
|
|
|
12513 |
|
|
CODE is the floating-point condition code associated with the
|
12514 |
|
|
function. It can be 'f' if the field is not applicable.
|
12515 |
|
|
|
12516 |
|
|
NAME is the name of the function itself, without the leading
|
12517 |
|
|
"__builtin_mips_".
|
12518 |
|
|
|
12519 |
|
|
BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields.
|
12520 |
|
|
|
12521 |
|
|
AVAIL is the name of the availability predicate, without the leading
|
12522 |
|
|
mips_builtin_avail_. */
|
12523 |
|
|
#define MIPS_BUILTIN(INSN, COND, NAME, BUILTIN_TYPE, \
|
12524 |
|
|
FUNCTION_TYPE, AVAIL) \
|
12525 |
|
|
{ CODE_FOR_mips_ ## INSN, MIPS_FP_COND_ ## COND, \
|
12526 |
|
|
"__builtin_mips_" NAME, BUILTIN_TYPE, FUNCTION_TYPE, \
|
12527 |
|
|
mips_builtin_avail_ ## AVAIL }
|
12528 |
|
|
|
12529 |
|
|
/* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function
|
12530 |
|
|
mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE and AVAIL
|
12531 |
|
|
are as for MIPS_BUILTIN. */
|
12532 |
|
|
#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
|
12533 |
|
|
MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
|
12534 |
|
|
|
12535 |
|
|
/* Define __builtin_mips_<INSN>_<COND>_{s,d} functions, both of which
|
12536 |
|
|
are subject to mips_builtin_avail_<AVAIL>. */
|
12537 |
|
|
#define CMP_SCALAR_BUILTINS(INSN, COND, AVAIL) \
|
12538 |
|
|
MIPS_BUILTIN (INSN ## _cond_s, COND, #INSN "_" #COND "_s", \
|
12539 |
|
|
MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, AVAIL), \
|
12540 |
|
|
MIPS_BUILTIN (INSN ## _cond_d, COND, #INSN "_" #COND "_d", \
|
12541 |
|
|
MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, AVAIL)
|
12542 |
|
|
|
12543 |
|
|
/* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
|
12544 |
|
|
The lower and upper forms are subject to mips_builtin_avail_<AVAIL>
|
12545 |
|
|
while the any and all forms are subject to mips_builtin_avail_mips3d. */
|
12546 |
|
|
#define CMP_PS_BUILTINS(INSN, COND, AVAIL) \
|
12547 |
|
|
MIPS_BUILTIN (INSN ## _cond_ps, COND, "any_" #INSN "_" #COND "_ps", \
|
12548 |
|
|
MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, \
|
12549 |
|
|
mips3d), \
|
12550 |
|
|
MIPS_BUILTIN (INSN ## _cond_ps, COND, "all_" #INSN "_" #COND "_ps", \
|
12551 |
|
|
MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, \
|
12552 |
|
|
mips3d), \
|
12553 |
|
|
MIPS_BUILTIN (INSN ## _cond_ps, COND, "lower_" #INSN "_" #COND "_ps", \
|
12554 |
|
|
MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, \
|
12555 |
|
|
AVAIL), \
|
12556 |
|
|
MIPS_BUILTIN (INSN ## _cond_ps, COND, "upper_" #INSN "_" #COND "_ps", \
|
12557 |
|
|
MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, \
|
12558 |
|
|
AVAIL)
|
12559 |
|
|
|
12560 |
|
|
/* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
|
12561 |
|
|
are subject to mips_builtin_avail_mips3d. */
|
12562 |
|
|
#define CMP_4S_BUILTINS(INSN, COND) \
|
12563 |
|
|
MIPS_BUILTIN (INSN ## _cond_4s, COND, "any_" #INSN "_" #COND "_4s", \
|
12564 |
|
|
MIPS_BUILTIN_CMP_ANY, \
|
12565 |
|
|
MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d), \
|
12566 |
|
|
MIPS_BUILTIN (INSN ## _cond_4s, COND, "all_" #INSN "_" #COND "_4s", \
|
12567 |
|
|
MIPS_BUILTIN_CMP_ALL, \
|
12568 |
|
|
MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, mips3d)
|
12569 |
|
|
|
12570 |
|
|
/* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
|
12571 |
|
|
instruction requires mips_builtin_avail_<AVAIL>. */
|
12572 |
|
|
#define MOVTF_BUILTINS(INSN, COND, AVAIL) \
|
12573 |
|
|
MIPS_BUILTIN (INSN ## _cond_ps, COND, "movt_" #INSN "_" #COND "_ps", \
|
12574 |
|
|
MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
|
12575 |
|
|
AVAIL), \
|
12576 |
|
|
MIPS_BUILTIN (INSN ## _cond_ps, COND, "movf_" #INSN "_" #COND "_ps", \
|
12577 |
|
|
MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
|
12578 |
|
|
AVAIL)
|
12579 |
|
|
|
12580 |
|
|
/* Define all the built-in functions related to C.cond.fmt condition COND. */
|
12581 |
|
|
#define CMP_BUILTINS(COND) \
|
12582 |
|
|
MOVTF_BUILTINS (c, COND, paired_single), \
|
12583 |
|
|
MOVTF_BUILTINS (cabs, COND, mips3d), \
|
12584 |
|
|
CMP_SCALAR_BUILTINS (cabs, COND, mips3d), \
|
12585 |
|
|
CMP_PS_BUILTINS (c, COND, paired_single), \
|
12586 |
|
|
CMP_PS_BUILTINS (cabs, COND, mips3d), \
|
12587 |
|
|
CMP_4S_BUILTINS (c, COND), \
|
12588 |
|
|
CMP_4S_BUILTINS (cabs, COND)
|
12589 |
|
|
|
12590 |
|
|
/* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET
|
12591 |
|
|
function mapped to instruction CODE_FOR_mips_<INSN>, FUNCTION_TYPE
|
12592 |
|
|
and AVAIL are as for MIPS_BUILTIN. */
|
12593 |
|
|
#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
|
12594 |
|
|
MIPS_BUILTIN (INSN, f, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET, \
|
12595 |
|
|
FUNCTION_TYPE, AVAIL)
|
12596 |
|
|
|
12597 |
|
|
/* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
|
12598 |
|
|
branch instruction. AVAIL is as for MIPS_BUILTIN. */
|
12599 |
|
|
#define BPOSGE_BUILTIN(VALUE, AVAIL) \
|
12600 |
|
|
MIPS_BUILTIN (bposge, f, "bposge" #VALUE, \
|
12601 |
|
|
MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, AVAIL)
|
12602 |
|
|
|
12603 |
|
|
/* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<FN_NAME>
|
12604 |
|
|
for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
|
12605 |
|
|
builtin_description field. */
|
12606 |
|
|
#define LOONGSON_BUILTIN_ALIAS(INSN, FN_NAME, FUNCTION_TYPE) \
|
12607 |
|
|
{ CODE_FOR_loongson_ ## INSN, MIPS_FP_COND_f, \
|
12608 |
|
|
"__builtin_loongson_" #FN_NAME, MIPS_BUILTIN_DIRECT, \
|
12609 |
|
|
FUNCTION_TYPE, mips_builtin_avail_loongson }
|
12610 |
|
|
|
12611 |
|
|
/* Define a Loongson MIPS_BUILTIN_DIRECT function __builtin_loongson_<INSN>
|
12612 |
|
|
for instruction CODE_FOR_loongson_<INSN>. FUNCTION_TYPE is a
|
12613 |
|
|
builtin_description field. */
|
12614 |
|
|
#define LOONGSON_BUILTIN(INSN, FUNCTION_TYPE) \
|
12615 |
|
|
LOONGSON_BUILTIN_ALIAS (INSN, INSN, FUNCTION_TYPE)
|
12616 |
|
|
|
12617 |
|
|
/* Like LOONGSON_BUILTIN, but add _<SUFFIX> to the end of the function name.
|
12618 |
|
|
We use functions of this form when the same insn can be usefully applied
|
12619 |
|
|
to more than one datatype. */
|
12620 |
|
|
#define LOONGSON_BUILTIN_SUFFIX(INSN, SUFFIX, FUNCTION_TYPE) \
|
12621 |
|
|
LOONGSON_BUILTIN_ALIAS (INSN, INSN ## _ ## SUFFIX, FUNCTION_TYPE)
|
12622 |
|
|
|
12623 |
|
|
#define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
|
12624 |
|
|
#define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
|
12625 |
|
|
#define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
|
12626 |
|
|
#define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
|
12627 |
|
|
#define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
|
12628 |
|
|
#define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
|
12629 |
|
|
|
12630 |
|
|
#define CODE_FOR_loongson_packsswh CODE_FOR_vec_pack_ssat_v2si
|
12631 |
|
|
#define CODE_FOR_loongson_packsshb CODE_FOR_vec_pack_ssat_v4hi
|
12632 |
|
|
#define CODE_FOR_loongson_packushb CODE_FOR_vec_pack_usat_v4hi
|
12633 |
|
|
#define CODE_FOR_loongson_paddw CODE_FOR_addv2si3
|
12634 |
|
|
#define CODE_FOR_loongson_paddh CODE_FOR_addv4hi3
|
12635 |
|
|
#define CODE_FOR_loongson_paddb CODE_FOR_addv8qi3
|
12636 |
|
|
#define CODE_FOR_loongson_paddsh CODE_FOR_ssaddv4hi3
|
12637 |
|
|
#define CODE_FOR_loongson_paddsb CODE_FOR_ssaddv8qi3
|
12638 |
|
|
#define CODE_FOR_loongson_paddush CODE_FOR_usaddv4hi3
|
12639 |
|
|
#define CODE_FOR_loongson_paddusb CODE_FOR_usaddv8qi3
|
12640 |
|
|
#define CODE_FOR_loongson_pmaxsh CODE_FOR_smaxv4hi3
|
12641 |
|
|
#define CODE_FOR_loongson_pmaxub CODE_FOR_umaxv8qi3
|
12642 |
|
|
#define CODE_FOR_loongson_pminsh CODE_FOR_sminv4hi3
|
12643 |
|
|
#define CODE_FOR_loongson_pminub CODE_FOR_uminv8qi3
|
12644 |
|
|
#define CODE_FOR_loongson_pmulhuh CODE_FOR_umulv4hi3_highpart
|
12645 |
|
|
#define CODE_FOR_loongson_pmulhh CODE_FOR_smulv4hi3_highpart
|
12646 |
|
|
#define CODE_FOR_loongson_psubw CODE_FOR_subv2si3
|
12647 |
|
|
#define CODE_FOR_loongson_psubh CODE_FOR_subv4hi3
|
12648 |
|
|
#define CODE_FOR_loongson_psubb CODE_FOR_subv8qi3
|
12649 |
|
|
#define CODE_FOR_loongson_psubsh CODE_FOR_sssubv4hi3
|
12650 |
|
|
#define CODE_FOR_loongson_psubsb CODE_FOR_sssubv8qi3
|
12651 |
|
|
#define CODE_FOR_loongson_psubush CODE_FOR_ussubv4hi3
|
12652 |
|
|
#define CODE_FOR_loongson_psubusb CODE_FOR_ussubv8qi3
|
12653 |
|
|
#define CODE_FOR_loongson_punpckhbh CODE_FOR_vec_interleave_highv8qi
|
12654 |
|
|
#define CODE_FOR_loongson_punpckhhw CODE_FOR_vec_interleave_highv4hi
|
12655 |
|
|
#define CODE_FOR_loongson_punpckhwd CODE_FOR_vec_interleave_highv2si
|
12656 |
|
|
#define CODE_FOR_loongson_punpcklbh CODE_FOR_vec_interleave_lowv8qi
|
12657 |
|
|
#define CODE_FOR_loongson_punpcklhw CODE_FOR_vec_interleave_lowv4hi
|
12658 |
|
|
#define CODE_FOR_loongson_punpcklwd CODE_FOR_vec_interleave_lowv2si
|
12659 |
|
|
|
12660 |
|
|
static const struct mips_builtin_description mips_builtins[] = {
|
12661 |
|
|
DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
|
12662 |
|
|
DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
|
12663 |
|
|
DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
|
12664 |
|
|
DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, paired_single),
|
12665 |
|
|
DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, paired_single),
|
12666 |
|
|
DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, paired_single),
|
12667 |
|
|
DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, paired_single),
|
12668 |
|
|
DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, paired_single),
|
12669 |
|
|
|
12670 |
|
|
DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, paired_single),
|
12671 |
|
|
DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
|
12672 |
|
|
DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
|
12673 |
|
|
DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
|
12674 |
|
|
DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, mips3d),
|
12675 |
|
|
|
12676 |
|
|
DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, mips3d),
|
12677 |
|
|
DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, mips3d),
|
12678 |
|
|
DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
|
12679 |
|
|
DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
|
12680 |
|
|
DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
|
12681 |
|
|
DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
|
12682 |
|
|
|
12683 |
|
|
DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, mips3d),
|
12684 |
|
|
DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, mips3d),
|
12685 |
|
|
DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, mips3d),
|
12686 |
|
|
DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, mips3d),
|
12687 |
|
|
DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, mips3d),
|
12688 |
|
|
DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, mips3d),
|
12689 |
|
|
|
12690 |
|
|
MIPS_FP_CONDITIONS (CMP_BUILTINS),
|
12691 |
|
|
|
12692 |
|
|
/* Built-in functions for the SB-1 processor. */
|
12693 |
|
|
DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, sb1_paired_single),
|
12694 |
|
|
|
12695 |
|
|
/* Built-in functions for the DSP ASE (32-bit and 64-bit). */
|
12696 |
|
|
DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
|
12697 |
|
|
DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
|
12698 |
|
|
DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
|
12699 |
|
|
DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
|
12700 |
|
|
DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
|
12701 |
|
|
DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
|
12702 |
|
|
DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
|
12703 |
|
|
DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
|
12704 |
|
|
DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
|
12705 |
|
|
DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
|
12706 |
|
|
DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, dsp),
|
12707 |
|
|
DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, dsp),
|
12708 |
|
|
DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, dsp),
|
12709 |
|
|
DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, dsp),
|
12710 |
|
|
DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, dsp),
|
12711 |
|
|
DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, dsp),
|
12712 |
|
|
DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
|
12713 |
|
|
DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
|
12714 |
|
|
DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, dsp),
|
12715 |
|
|
DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dsp),
|
12716 |
|
|
DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, dsp),
|
12717 |
|
|
DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, dsp),
|
12718 |
|
|
DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
|
12719 |
|
|
DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
|
12720 |
|
|
DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
|
12721 |
|
|
DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
|
12722 |
|
|
DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, dsp),
|
12723 |
|
|
DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, dsp),
|
12724 |
|
|
DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, dsp),
|
12725 |
|
|
DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, dsp),
|
12726 |
|
|
DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
|
12727 |
|
|
DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
|
12728 |
|
|
DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
|
12729 |
|
|
DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, dsp),
|
12730 |
|
|
DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, dsp),
|
12731 |
|
|
DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
|
12732 |
|
|
DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, dsp),
|
12733 |
|
|
DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, dsp),
|
12734 |
|
|
DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
|
12735 |
|
|
DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, dsp),
|
12736 |
|
|
DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
|
12737 |
|
|
DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
|
12738 |
|
|
DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, dsp),
|
12739 |
|
|
DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, dsp),
|
12740 |
|
|
DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, dsp),
|
12741 |
|
|
DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, dsp),
|
12742 |
|
|
DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, dsp),
|
12743 |
|
|
DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
|
12744 |
|
|
DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
|
12745 |
|
|
DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, dsp),
|
12746 |
|
|
DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
|
12747 |
|
|
DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
|
12748 |
|
|
DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dsp),
|
12749 |
|
|
DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
|
12750 |
|
|
DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
|
12751 |
|
|
DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, dsp),
|
12752 |
|
|
DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dsp),
|
12753 |
|
|
DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
|
12754 |
|
|
DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dsp),
|
12755 |
|
|
DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, dsp),
|
12756 |
|
|
DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, dsp),
|
12757 |
|
|
DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, dsp),
|
12758 |
|
|
DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, dsp),
|
12759 |
|
|
DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, dsp),
|
12760 |
|
|
BPOSGE_BUILTIN (32, dsp),
|
12761 |
|
|
|
12762 |
|
|
/* The following are for the MIPS DSP ASE REV 2 (32-bit and 64-bit). */
|
12763 |
|
|
DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, dspr2),
|
12764 |
|
|
DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
|
12765 |
|
|
DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
|
12766 |
|
|
DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
|
12767 |
|
|
DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
|
12768 |
|
|
DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
|
12769 |
|
|
DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
|
12770 |
|
|
DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
|
12771 |
|
|
DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
|
12772 |
|
|
DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, dspr2),
|
12773 |
|
|
DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
|
12774 |
|
|
DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
|
12775 |
|
|
DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, dspr2),
|
12776 |
|
|
DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
|
12777 |
|
|
DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, dspr2),
|
12778 |
|
|
DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, dspr2),
|
12779 |
|
|
DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
|
12780 |
|
|
DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, dspr2),
|
12781 |
|
|
DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, dspr2),
|
12782 |
|
|
DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
|
12783 |
|
|
DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, dspr2),
|
12784 |
|
|
DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, dspr2),
|
12785 |
|
|
DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
|
12786 |
|
|
DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
|
12787 |
|
|
DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
|
12788 |
|
|
DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, dspr2),
|
12789 |
|
|
DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
|
12790 |
|
|
DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
|
12791 |
|
|
DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
|
12792 |
|
|
DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
|
12793 |
|
|
DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
|
12794 |
|
|
DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, dspr2),
|
12795 |
|
|
DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, dspr2),
|
12796 |
|
|
DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, dspr2),
|
12797 |
|
|
|
12798 |
|
|
/* Built-in functions for the DSP ASE (32-bit only). */
|
12799 |
|
|
DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
|
12800 |
|
|
DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
|
12801 |
|
|
DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
|
12802 |
|
|
DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, dsp_32),
|
12803 |
|
|
DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
|
12804 |
|
|
DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
|
12805 |
|
|
DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
|
12806 |
|
|
DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
|
12807 |
|
|
DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, dsp_32),
|
12808 |
|
|
DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
|
12809 |
|
|
DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
|
12810 |
|
|
DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
|
12811 |
|
|
DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, dsp_32),
|
12812 |
|
|
DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
|
12813 |
|
|
DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
|
12814 |
|
|
DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, dsp_32),
|
12815 |
|
|
DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, dsp_32),
|
12816 |
|
|
DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, dsp_32),
|
12817 |
|
|
DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, dsp_32),
|
12818 |
|
|
DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, dsp_32),
|
12819 |
|
|
DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, dsp_32),
|
12820 |
|
|
|
12821 |
|
|
/* The following are for the MIPS DSP ASE REV 2 (32-bit only). */
|
12822 |
|
|
DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
|
12823 |
|
|
DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
|
12824 |
|
|
DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, dspr2_32),
|
12825 |
|
|
DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, dspr2_32),
|
12826 |
|
|
DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, dspr2_32),
|
12827 |
|
|
DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, dspr2_32),
|
12828 |
|
|
DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
|
12829 |
|
|
DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, dspr2_32),
|
12830 |
|
|
DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, dspr2_32),
|
12831 |
|
|
DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
|
12832 |
|
|
DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
|
12833 |
|
|
DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
|
12834 |
|
|
DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
|
12835 |
|
|
DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
|
12836 |
|
|
DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, dspr2_32),
|
12837 |
|
|
|
12838 |
|
|
/* Builtin functions for ST Microelectronics Loongson-2E/2F cores. */
|
12839 |
|
|
LOONGSON_BUILTIN (packsswh, MIPS_V4HI_FTYPE_V2SI_V2SI),
|
12840 |
|
|
LOONGSON_BUILTIN (packsshb, MIPS_V8QI_FTYPE_V4HI_V4HI),
|
12841 |
|
|
LOONGSON_BUILTIN (packushb, MIPS_UV8QI_FTYPE_UV4HI_UV4HI),
|
12842 |
|
|
LOONGSON_BUILTIN_SUFFIX (paddw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
|
12843 |
|
|
LOONGSON_BUILTIN_SUFFIX (paddh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12844 |
|
|
LOONGSON_BUILTIN_SUFFIX (paddb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
|
12845 |
|
|
LOONGSON_BUILTIN_SUFFIX (paddw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
|
12846 |
|
|
LOONGSON_BUILTIN_SUFFIX (paddh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12847 |
|
|
LOONGSON_BUILTIN_SUFFIX (paddb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
|
12848 |
|
|
LOONGSON_BUILTIN_SUFFIX (paddd, u, MIPS_UDI_FTYPE_UDI_UDI),
|
12849 |
|
|
LOONGSON_BUILTIN_SUFFIX (paddd, s, MIPS_DI_FTYPE_DI_DI),
|
12850 |
|
|
LOONGSON_BUILTIN (paddsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12851 |
|
|
LOONGSON_BUILTIN (paddsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
|
12852 |
|
|
LOONGSON_BUILTIN (paddush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12853 |
|
|
LOONGSON_BUILTIN (paddusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
|
12854 |
|
|
LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_ud, MIPS_UDI_FTYPE_UDI_UDI),
|
12855 |
|
|
LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_uw, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
|
12856 |
|
|
LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_uh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12857 |
|
|
LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_ub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
|
12858 |
|
|
LOONGSON_BUILTIN_ALIAS (pandn_d, pandn_sd, MIPS_DI_FTYPE_DI_DI),
|
12859 |
|
|
LOONGSON_BUILTIN_ALIAS (pandn_w, pandn_sw, MIPS_V2SI_FTYPE_V2SI_V2SI),
|
12860 |
|
|
LOONGSON_BUILTIN_ALIAS (pandn_h, pandn_sh, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12861 |
|
|
LOONGSON_BUILTIN_ALIAS (pandn_b, pandn_sb, MIPS_V8QI_FTYPE_V8QI_V8QI),
|
12862 |
|
|
LOONGSON_BUILTIN (pavgh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12863 |
|
|
LOONGSON_BUILTIN (pavgb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
|
12864 |
|
|
LOONGSON_BUILTIN_SUFFIX (pcmpeqw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
|
12865 |
|
|
LOONGSON_BUILTIN_SUFFIX (pcmpeqh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12866 |
|
|
LOONGSON_BUILTIN_SUFFIX (pcmpeqb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
|
12867 |
|
|
LOONGSON_BUILTIN_SUFFIX (pcmpeqw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
|
12868 |
|
|
LOONGSON_BUILTIN_SUFFIX (pcmpeqh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12869 |
|
|
LOONGSON_BUILTIN_SUFFIX (pcmpeqb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
|
12870 |
|
|
LOONGSON_BUILTIN_SUFFIX (pcmpgtw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
|
12871 |
|
|
LOONGSON_BUILTIN_SUFFIX (pcmpgth, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12872 |
|
|
LOONGSON_BUILTIN_SUFFIX (pcmpgtb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
|
12873 |
|
|
LOONGSON_BUILTIN_SUFFIX (pcmpgtw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
|
12874 |
|
|
LOONGSON_BUILTIN_SUFFIX (pcmpgth, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12875 |
|
|
LOONGSON_BUILTIN_SUFFIX (pcmpgtb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
|
12876 |
|
|
LOONGSON_BUILTIN_SUFFIX (pextrh, u, MIPS_UV4HI_FTYPE_UV4HI_USI),
|
12877 |
|
|
LOONGSON_BUILTIN_SUFFIX (pextrh, s, MIPS_V4HI_FTYPE_V4HI_USI),
|
12878 |
|
|
LOONGSON_BUILTIN_SUFFIX (pinsrh_0, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12879 |
|
|
LOONGSON_BUILTIN_SUFFIX (pinsrh_1, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12880 |
|
|
LOONGSON_BUILTIN_SUFFIX (pinsrh_2, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12881 |
|
|
LOONGSON_BUILTIN_SUFFIX (pinsrh_3, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12882 |
|
|
LOONGSON_BUILTIN_SUFFIX (pinsrh_0, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12883 |
|
|
LOONGSON_BUILTIN_SUFFIX (pinsrh_1, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12884 |
|
|
LOONGSON_BUILTIN_SUFFIX (pinsrh_2, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12885 |
|
|
LOONGSON_BUILTIN_SUFFIX (pinsrh_3, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12886 |
|
|
LOONGSON_BUILTIN (pmaddhw, MIPS_V2SI_FTYPE_V4HI_V4HI),
|
12887 |
|
|
LOONGSON_BUILTIN (pmaxsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12888 |
|
|
LOONGSON_BUILTIN (pmaxub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
|
12889 |
|
|
LOONGSON_BUILTIN (pminsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12890 |
|
|
LOONGSON_BUILTIN (pminub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
|
12891 |
|
|
LOONGSON_BUILTIN_SUFFIX (pmovmskb, u, MIPS_UV8QI_FTYPE_UV8QI),
|
12892 |
|
|
LOONGSON_BUILTIN_SUFFIX (pmovmskb, s, MIPS_V8QI_FTYPE_V8QI),
|
12893 |
|
|
LOONGSON_BUILTIN (pmulhuh, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12894 |
|
|
LOONGSON_BUILTIN (pmulhh, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12895 |
|
|
LOONGSON_BUILTIN (pmullh, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12896 |
|
|
LOONGSON_BUILTIN (pmuluw, MIPS_UDI_FTYPE_UV2SI_UV2SI),
|
12897 |
|
|
LOONGSON_BUILTIN (pasubub, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
|
12898 |
|
|
LOONGSON_BUILTIN (biadd, MIPS_UV4HI_FTYPE_UV8QI),
|
12899 |
|
|
LOONGSON_BUILTIN (psadbh, MIPS_UV4HI_FTYPE_UV8QI_UV8QI),
|
12900 |
|
|
LOONGSON_BUILTIN_SUFFIX (pshufh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI_UQI),
|
12901 |
|
|
LOONGSON_BUILTIN_SUFFIX (pshufh, s, MIPS_V4HI_FTYPE_V4HI_V4HI_UQI),
|
12902 |
|
|
LOONGSON_BUILTIN_SUFFIX (psllh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
|
12903 |
|
|
LOONGSON_BUILTIN_SUFFIX (psllh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
|
12904 |
|
|
LOONGSON_BUILTIN_SUFFIX (psllw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
|
12905 |
|
|
LOONGSON_BUILTIN_SUFFIX (psllw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
|
12906 |
|
|
LOONGSON_BUILTIN_SUFFIX (psrah, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
|
12907 |
|
|
LOONGSON_BUILTIN_SUFFIX (psrah, s, MIPS_V4HI_FTYPE_V4HI_UQI),
|
12908 |
|
|
LOONGSON_BUILTIN_SUFFIX (psraw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
|
12909 |
|
|
LOONGSON_BUILTIN_SUFFIX (psraw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
|
12910 |
|
|
LOONGSON_BUILTIN_SUFFIX (psrlh, u, MIPS_UV4HI_FTYPE_UV4HI_UQI),
|
12911 |
|
|
LOONGSON_BUILTIN_SUFFIX (psrlh, s, MIPS_V4HI_FTYPE_V4HI_UQI),
|
12912 |
|
|
LOONGSON_BUILTIN_SUFFIX (psrlw, u, MIPS_UV2SI_FTYPE_UV2SI_UQI),
|
12913 |
|
|
LOONGSON_BUILTIN_SUFFIX (psrlw, s, MIPS_V2SI_FTYPE_V2SI_UQI),
|
12914 |
|
|
LOONGSON_BUILTIN_SUFFIX (psubw, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
|
12915 |
|
|
LOONGSON_BUILTIN_SUFFIX (psubh, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12916 |
|
|
LOONGSON_BUILTIN_SUFFIX (psubb, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
|
12917 |
|
|
LOONGSON_BUILTIN_SUFFIX (psubw, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
|
12918 |
|
|
LOONGSON_BUILTIN_SUFFIX (psubh, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12919 |
|
|
LOONGSON_BUILTIN_SUFFIX (psubb, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
|
12920 |
|
|
LOONGSON_BUILTIN_SUFFIX (psubd, u, MIPS_UDI_FTYPE_UDI_UDI),
|
12921 |
|
|
LOONGSON_BUILTIN_SUFFIX (psubd, s, MIPS_DI_FTYPE_DI_DI),
|
12922 |
|
|
LOONGSON_BUILTIN (psubsh, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12923 |
|
|
LOONGSON_BUILTIN (psubsb, MIPS_V8QI_FTYPE_V8QI_V8QI),
|
12924 |
|
|
LOONGSON_BUILTIN (psubush, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12925 |
|
|
LOONGSON_BUILTIN (psubusb, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
|
12926 |
|
|
LOONGSON_BUILTIN_SUFFIX (punpckhbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
|
12927 |
|
|
LOONGSON_BUILTIN_SUFFIX (punpckhhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12928 |
|
|
LOONGSON_BUILTIN_SUFFIX (punpckhwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
|
12929 |
|
|
LOONGSON_BUILTIN_SUFFIX (punpckhbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
|
12930 |
|
|
LOONGSON_BUILTIN_SUFFIX (punpckhhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12931 |
|
|
LOONGSON_BUILTIN_SUFFIX (punpckhwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
|
12932 |
|
|
LOONGSON_BUILTIN_SUFFIX (punpcklbh, u, MIPS_UV8QI_FTYPE_UV8QI_UV8QI),
|
12933 |
|
|
LOONGSON_BUILTIN_SUFFIX (punpcklhw, u, MIPS_UV4HI_FTYPE_UV4HI_UV4HI),
|
12934 |
|
|
LOONGSON_BUILTIN_SUFFIX (punpcklwd, u, MIPS_UV2SI_FTYPE_UV2SI_UV2SI),
|
12935 |
|
|
LOONGSON_BUILTIN_SUFFIX (punpcklbh, s, MIPS_V8QI_FTYPE_V8QI_V8QI),
|
12936 |
|
|
LOONGSON_BUILTIN_SUFFIX (punpcklhw, s, MIPS_V4HI_FTYPE_V4HI_V4HI),
|
12937 |
|
|
LOONGSON_BUILTIN_SUFFIX (punpcklwd, s, MIPS_V2SI_FTYPE_V2SI_V2SI),
|
12938 |
|
|
|
12939 |
|
|
/* Sundry other built-in functions. */
|
12940 |
|
|
DIRECT_NO_TARGET_BUILTIN (cache, MIPS_VOID_FTYPE_SI_CVPOINTER, cache)
|
12941 |
|
|
};
|
12942 |
|
|
|
12943 |
|
|
/* MODE is a vector mode whose elements have type TYPE. Return the type
|
12944 |
|
|
of the vector itself. */
|
12945 |
|
|
|
12946 |
|
|
static tree
|
12947 |
|
|
mips_builtin_vector_type (tree type, enum machine_mode mode)
|
12948 |
|
|
{
|
12949 |
|
|
static tree types[2 * (int) MAX_MACHINE_MODE];
|
12950 |
|
|
int mode_index;
|
12951 |
|
|
|
12952 |
|
|
mode_index = (int) mode;
|
12953 |
|
|
|
12954 |
|
|
if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type))
|
12955 |
|
|
mode_index += MAX_MACHINE_MODE;
|
12956 |
|
|
|
12957 |
|
|
if (types[mode_index] == NULL_TREE)
|
12958 |
|
|
types[mode_index] = build_vector_type_for_mode (type, mode);
|
12959 |
|
|
return types[mode_index];
|
12960 |
|
|
}
|
12961 |
|
|
|
12962 |
|
|
/* Return a type for 'const volatile void *'. */
|
12963 |
|
|
|
12964 |
|
|
static tree
|
12965 |
|
|
mips_build_cvpointer_type (void)
|
12966 |
|
|
{
|
12967 |
|
|
static tree cache;
|
12968 |
|
|
|
12969 |
|
|
if (cache == NULL_TREE)
|
12970 |
|
|
cache = build_pointer_type (build_qualified_type
|
12971 |
|
|
(void_type_node,
|
12972 |
|
|
TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE));
|
12973 |
|
|
return cache;
|
12974 |
|
|
}
|
12975 |
|
|
|
12976 |
|
|
/* Source-level argument types. */
|
12977 |
|
|
#define MIPS_ATYPE_VOID void_type_node
|
12978 |
|
|
#define MIPS_ATYPE_INT integer_type_node
|
12979 |
|
|
#define MIPS_ATYPE_POINTER ptr_type_node
|
12980 |
|
|
#define MIPS_ATYPE_CVPOINTER mips_build_cvpointer_type ()
|
12981 |
|
|
|
12982 |
|
|
/* Standard mode-based argument types. */
|
12983 |
|
|
#define MIPS_ATYPE_UQI unsigned_intQI_type_node
|
12984 |
|
|
#define MIPS_ATYPE_SI intSI_type_node
|
12985 |
|
|
#define MIPS_ATYPE_USI unsigned_intSI_type_node
|
12986 |
|
|
#define MIPS_ATYPE_DI intDI_type_node
|
12987 |
|
|
#define MIPS_ATYPE_UDI unsigned_intDI_type_node
|
12988 |
|
|
#define MIPS_ATYPE_SF float_type_node
|
12989 |
|
|
#define MIPS_ATYPE_DF double_type_node
|
12990 |
|
|
|
12991 |
|
|
/* Vector argument types. */
|
12992 |
|
|
#define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
|
12993 |
|
|
#define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
|
12994 |
|
|
#define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode)
|
12995 |
|
|
#define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
|
12996 |
|
|
#define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode)
|
12997 |
|
|
#define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode)
|
12998 |
|
|
#define MIPS_ATYPE_UV2SI \
|
12999 |
|
|
mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
|
13000 |
|
|
#define MIPS_ATYPE_UV4HI \
|
13001 |
|
|
mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
|
13002 |
|
|
#define MIPS_ATYPE_UV8QI \
|
13003 |
|
|
mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
|
13004 |
|
|
|
13005 |
|
|
/* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
|
13006 |
|
|
their associated MIPS_ATYPEs. */
|
13007 |
|
|
#define MIPS_FTYPE_ATYPES1(A, B) \
|
13008 |
|
|
MIPS_ATYPE_##A, MIPS_ATYPE_##B
|
13009 |
|
|
|
13010 |
|
|
#define MIPS_FTYPE_ATYPES2(A, B, C) \
|
13011 |
|
|
MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
|
13012 |
|
|
|
13013 |
|
|
#define MIPS_FTYPE_ATYPES3(A, B, C, D) \
|
13014 |
|
|
MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
|
13015 |
|
|
|
13016 |
|
|
#define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
|
13017 |
|
|
MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
|
13018 |
|
|
MIPS_ATYPE_##E
|
13019 |
|
|
|
13020 |
|
|
/* Return the function type associated with function prototype TYPE. */
|
13021 |
|
|
|
13022 |
|
|
static tree
|
13023 |
|
|
mips_build_function_type (enum mips_function_type type)
|
13024 |
|
|
{
|
13025 |
|
|
static tree types[(int) MIPS_MAX_FTYPE_MAX];
|
13026 |
|
|
|
13027 |
|
|
if (types[(int) type] == NULL_TREE)
|
13028 |
|
|
switch (type)
|
13029 |
|
|
{
|
13030 |
|
|
#define DEF_MIPS_FTYPE(NUM, ARGS) \
|
13031 |
|
|
case MIPS_FTYPE_NAME##NUM ARGS: \
|
13032 |
|
|
types[(int) type] \
|
13033 |
|
|
= build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
|
13034 |
|
|
NULL_TREE); \
|
13035 |
|
|
break;
|
13036 |
|
|
#include "config/mips/mips-ftypes.def"
|
13037 |
|
|
#undef DEF_MIPS_FTYPE
|
13038 |
|
|
default:
|
13039 |
|
|
gcc_unreachable ();
|
13040 |
|
|
}
|
13041 |
|
|
|
13042 |
|
|
return types[(int) type];
|
13043 |
|
|
}
|
13044 |
|
|
|
13045 |
|
|
/* Implement TARGET_INIT_BUILTINS. */
|
13046 |
|
|
|
13047 |
|
|
static void
|
13048 |
|
|
mips_init_builtins (void)
|
13049 |
|
|
{
|
13050 |
|
|
const struct mips_builtin_description *d;
|
13051 |
|
|
unsigned int i;
|
13052 |
|
|
|
13053 |
|
|
/* Iterate through all of the bdesc arrays, initializing all of the
|
13054 |
|
|
builtin functions. */
|
13055 |
|
|
for (i = 0; i < ARRAY_SIZE (mips_builtins); i++)
|
13056 |
|
|
{
|
13057 |
|
|
d = &mips_builtins[i];
|
13058 |
|
|
if (d->avail ())
|
13059 |
|
|
add_builtin_function (d->name,
|
13060 |
|
|
mips_build_function_type (d->function_type),
|
13061 |
|
|
i, BUILT_IN_MD, NULL, NULL);
|
13062 |
|
|
}
|
13063 |
|
|
}
|
13064 |
|
|
|
13065 |
|
|
/* Take argument ARGNO from EXP's argument list and convert it into a
|
13066 |
|
|
form suitable for input operand OPNO of instruction ICODE. Return the
|
13067 |
|
|
value. */
|
13068 |
|
|
|
13069 |
|
|
static rtx
|
13070 |
|
|
mips_prepare_builtin_arg (enum insn_code icode,
|
13071 |
|
|
unsigned int opno, tree exp, unsigned int argno)
|
13072 |
|
|
{
|
13073 |
|
|
tree arg;
|
13074 |
|
|
rtx value;
|
13075 |
|
|
enum machine_mode mode;
|
13076 |
|
|
|
13077 |
|
|
arg = CALL_EXPR_ARG (exp, argno);
|
13078 |
|
|
value = expand_normal (arg);
|
13079 |
|
|
mode = insn_data[icode].operand[opno].mode;
|
13080 |
|
|
if (!insn_data[icode].operand[opno].predicate (value, mode))
|
13081 |
|
|
{
|
13082 |
|
|
/* We need to get the mode from ARG for two reasons:
|
13083 |
|
|
|
13084 |
|
|
- to cope with address operands, where MODE is the mode of the
|
13085 |
|
|
memory, rather than of VALUE itself.
|
13086 |
|
|
|
13087 |
|
|
- to cope with special predicates like pmode_register_operand,
|
13088 |
|
|
where MODE is VOIDmode. */
|
13089 |
|
|
value = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (arg)), value);
|
13090 |
|
|
|
13091 |
|
|
/* Check the predicate again. */
|
13092 |
|
|
if (!insn_data[icode].operand[opno].predicate (value, mode))
|
13093 |
|
|
{
|
13094 |
|
|
error ("invalid argument to built-in function");
|
13095 |
|
|
return const0_rtx;
|
13096 |
|
|
}
|
13097 |
|
|
}
|
13098 |
|
|
|
13099 |
|
|
return value;
|
13100 |
|
|
}
|
13101 |
|
|
|
13102 |
|
|
/* Return an rtx suitable for output operand OP of instruction ICODE.
|
13103 |
|
|
If TARGET is non-null, try to use it where possible. */
|
13104 |
|
|
|
13105 |
|
|
static rtx
|
13106 |
|
|
mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
|
13107 |
|
|
{
|
13108 |
|
|
enum machine_mode mode;
|
13109 |
|
|
|
13110 |
|
|
mode = insn_data[icode].operand[op].mode;
|
13111 |
|
|
if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
|
13112 |
|
|
target = gen_reg_rtx (mode);
|
13113 |
|
|
|
13114 |
|
|
return target;
|
13115 |
|
|
}
|
13116 |
|
|
|
13117 |
|
|
/* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function;
|
13118 |
|
|
HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
|
13119 |
|
|
and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
|
13120 |
|
|
suggests a good place to put the result. */
|
13121 |
|
|
|
13122 |
|
|
static rtx
|
13123 |
|
|
mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
|
13124 |
|
|
bool has_target_p)
|
13125 |
|
|
{
|
13126 |
|
|
rtx ops[MAX_RECOG_OPERANDS];
|
13127 |
|
|
int opno, argno;
|
13128 |
|
|
|
13129 |
|
|
/* Map any target to operand 0. */
|
13130 |
|
|
opno = 0;
|
13131 |
|
|
if (has_target_p)
|
13132 |
|
|
{
|
13133 |
|
|
target = mips_prepare_builtin_target (icode, opno, target);
|
13134 |
|
|
ops[opno] = target;
|
13135 |
|
|
opno++;
|
13136 |
|
|
}
|
13137 |
|
|
|
13138 |
|
|
/* Map the arguments to the other operands. The n_operands value
|
13139 |
|
|
for an expander includes match_dups and match_scratches as well as
|
13140 |
|
|
match_operands, so n_operands is only an upper bound on the number
|
13141 |
|
|
of arguments to the expander function. */
|
13142 |
|
|
gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
|
13143 |
|
|
for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
|
13144 |
|
|
ops[opno] = mips_prepare_builtin_arg (icode, opno, exp, argno);
|
13145 |
|
|
|
13146 |
|
|
switch (opno)
|
13147 |
|
|
{
|
13148 |
|
|
case 2:
|
13149 |
|
|
emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
|
13150 |
|
|
break;
|
13151 |
|
|
|
13152 |
|
|
case 3:
|
13153 |
|
|
emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
|
13154 |
|
|
break;
|
13155 |
|
|
|
13156 |
|
|
case 4:
|
13157 |
|
|
emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
|
13158 |
|
|
break;
|
13159 |
|
|
|
13160 |
|
|
default:
|
13161 |
|
|
gcc_unreachable ();
|
13162 |
|
|
}
|
13163 |
|
|
return target;
|
13164 |
|
|
}
|
13165 |
|
|
|
13166 |
|
|
/* Expand a __builtin_mips_movt_*_ps or __builtin_mips_movf_*_ps
|
13167 |
|
|
function; TYPE says which. EXP is the CALL_EXPR that calls the
|
13168 |
|
|
function, ICODE is the instruction that should be used to compare
|
13169 |
|
|
the first two arguments, and COND is the condition it should test.
|
13170 |
|
|
TARGET, if nonnull, suggests a good place to put the result. */
|
13171 |
|
|
|
13172 |
|
|
static rtx
|
13173 |
|
|
mips_expand_builtin_movtf (enum mips_builtin_type type,
|
13174 |
|
|
enum insn_code icode, enum mips_fp_condition cond,
|
13175 |
|
|
rtx target, tree exp)
|
13176 |
|
|
{
|
13177 |
|
|
rtx cmp_result, op0, op1;
|
13178 |
|
|
|
13179 |
|
|
cmp_result = mips_prepare_builtin_target (icode, 0, 0);
|
13180 |
|
|
op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
|
13181 |
|
|
op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
|
13182 |
|
|
emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
|
13183 |
|
|
|
13184 |
|
|
icode = CODE_FOR_mips_cond_move_tf_ps;
|
13185 |
|
|
target = mips_prepare_builtin_target (icode, 0, target);
|
13186 |
|
|
if (type == MIPS_BUILTIN_MOVT)
|
13187 |
|
|
{
|
13188 |
|
|
op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
|
13189 |
|
|
op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
|
13190 |
|
|
}
|
13191 |
|
|
else
|
13192 |
|
|
{
|
13193 |
|
|
op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
|
13194 |
|
|
op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
|
13195 |
|
|
}
|
13196 |
|
|
emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
|
13197 |
|
|
return target;
|
13198 |
|
|
}
|
13199 |
|
|
|
13200 |
|
|
/* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
|
13201 |
|
|
into TARGET otherwise. Return TARGET. */
|
13202 |
|
|
|
13203 |
|
|
static rtx
|
13204 |
|
|
mips_builtin_branch_and_move (rtx condition, rtx target,
|
13205 |
|
|
rtx value_if_true, rtx value_if_false)
|
13206 |
|
|
{
|
13207 |
|
|
rtx true_label, done_label;
|
13208 |
|
|
|
13209 |
|
|
true_label = gen_label_rtx ();
|
13210 |
|
|
done_label = gen_label_rtx ();
|
13211 |
|
|
|
13212 |
|
|
/* First assume that CONDITION is false. */
|
13213 |
|
|
mips_emit_move (target, value_if_false);
|
13214 |
|
|
|
13215 |
|
|
/* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
|
13216 |
|
|
emit_jump_insn (gen_condjump (condition, true_label));
|
13217 |
|
|
emit_jump_insn (gen_jump (done_label));
|
13218 |
|
|
emit_barrier ();
|
13219 |
|
|
|
13220 |
|
|
/* Fix TARGET if CONDITION is true. */
|
13221 |
|
|
emit_label (true_label);
|
13222 |
|
|
mips_emit_move (target, value_if_true);
|
13223 |
|
|
|
13224 |
|
|
emit_label (done_label);
|
13225 |
|
|
return target;
|
13226 |
|
|
}
|
13227 |
|
|
|
13228 |
|
|
/* Expand a comparison built-in function of type BUILTIN_TYPE. EXP is
|
13229 |
|
|
the CALL_EXPR that calls the function, ICODE is the code of the
|
13230 |
|
|
comparison instruction, and COND is the condition it should test.
|
13231 |
|
|
TARGET, if nonnull, suggests a good place to put the boolean result. */
|
13232 |
|
|
|
13233 |
|
|
static rtx
|
13234 |
|
|
mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
|
13235 |
|
|
enum insn_code icode, enum mips_fp_condition cond,
|
13236 |
|
|
rtx target, tree exp)
|
13237 |
|
|
{
|
13238 |
|
|
rtx offset, condition, cmp_result, args[MAX_RECOG_OPERANDS];
|
13239 |
|
|
int argno;
|
13240 |
|
|
|
13241 |
|
|
if (target == 0 || GET_MODE (target) != SImode)
|
13242 |
|
|
target = gen_reg_rtx (SImode);
|
13243 |
|
|
|
13244 |
|
|
/* The instruction should have a target operand, an operand for each
|
13245 |
|
|
argument, and an operand for COND. */
|
13246 |
|
|
gcc_assert (call_expr_nargs (exp) + 2 == insn_data[icode].n_operands);
|
13247 |
|
|
|
13248 |
|
|
/* Prepare the operands to the comparison. */
|
13249 |
|
|
cmp_result = mips_prepare_builtin_target (icode, 0, 0);
|
13250 |
|
|
for (argno = 0; argno < call_expr_nargs (exp); argno++)
|
13251 |
|
|
args[argno] = mips_prepare_builtin_arg (icode, argno + 1, exp, argno);
|
13252 |
|
|
|
13253 |
|
|
switch (insn_data[icode].n_operands)
|
13254 |
|
|
{
|
13255 |
|
|
case 4:
|
13256 |
|
|
emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
|
13257 |
|
|
GEN_INT (cond)));
|
13258 |
|
|
break;
|
13259 |
|
|
|
13260 |
|
|
case 6:
|
13261 |
|
|
emit_insn (GEN_FCN (icode) (cmp_result, args[0], args[1],
|
13262 |
|
|
args[2], args[3], GEN_INT (cond)));
|
13263 |
|
|
break;
|
13264 |
|
|
|
13265 |
|
|
default:
|
13266 |
|
|
gcc_unreachable ();
|
13267 |
|
|
}
|
13268 |
|
|
|
13269 |
|
|
/* If the comparison sets more than one register, we define the result
|
13270 |
|
|
to be 0 if all registers are false and -1 if all registers are true.
|
13271 |
|
|
The value of the complete result is indeterminate otherwise. */
|
13272 |
|
|
switch (builtin_type)
|
13273 |
|
|
{
|
13274 |
|
|
case MIPS_BUILTIN_CMP_ALL:
|
13275 |
|
|
condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
|
13276 |
|
|
return mips_builtin_branch_and_move (condition, target,
|
13277 |
|
|
const0_rtx, const1_rtx);
|
13278 |
|
|
|
13279 |
|
|
case MIPS_BUILTIN_CMP_UPPER:
|
13280 |
|
|
case MIPS_BUILTIN_CMP_LOWER:
|
13281 |
|
|
offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
|
13282 |
|
|
condition = gen_single_cc (cmp_result, offset);
|
13283 |
|
|
return mips_builtin_branch_and_move (condition, target,
|
13284 |
|
|
const1_rtx, const0_rtx);
|
13285 |
|
|
|
13286 |
|
|
default:
|
13287 |
|
|
condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
|
13288 |
|
|
return mips_builtin_branch_and_move (condition, target,
|
13289 |
|
|
const1_rtx, const0_rtx);
|
13290 |
|
|
}
|
13291 |
|
|
}
|
13292 |
|
|
|
13293 |
|
|
/* Expand a bposge built-in function of type BUILTIN_TYPE. TARGET,
|
13294 |
|
|
if nonnull, suggests a good place to put the boolean result. */
|
13295 |
|
|
|
13296 |
|
|
static rtx
|
13297 |
|
|
mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
|
13298 |
|
|
{
|
13299 |
|
|
rtx condition, cmp_result;
|
13300 |
|
|
int cmp_value;
|
13301 |
|
|
|
13302 |
|
|
if (target == 0 || GET_MODE (target) != SImode)
|
13303 |
|
|
target = gen_reg_rtx (SImode);
|
13304 |
|
|
|
13305 |
|
|
cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
|
13306 |
|
|
|
13307 |
|
|
if (builtin_type == MIPS_BUILTIN_BPOSGE32)
|
13308 |
|
|
cmp_value = 32;
|
13309 |
|
|
else
|
13310 |
|
|
gcc_assert (0);
|
13311 |
|
|
|
13312 |
|
|
condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
|
13313 |
|
|
return mips_builtin_branch_and_move (condition, target,
|
13314 |
|
|
const1_rtx, const0_rtx);
|
13315 |
|
|
}
|
13316 |
|
|
|
13317 |
|
|
/* Implement TARGET_EXPAND_BUILTIN. */
|
13318 |
|
|
|
13319 |
|
|
static rtx
|
13320 |
|
|
mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
|
13321 |
|
|
enum machine_mode mode, int ignore)
|
13322 |
|
|
{
|
13323 |
|
|
tree fndecl;
|
13324 |
|
|
unsigned int fcode, avail;
|
13325 |
|
|
const struct mips_builtin_description *d;
|
13326 |
|
|
|
13327 |
|
|
fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
|
13328 |
|
|
fcode = DECL_FUNCTION_CODE (fndecl);
|
13329 |
|
|
gcc_assert (fcode < ARRAY_SIZE (mips_builtins));
|
13330 |
|
|
d = &mips_builtins[fcode];
|
13331 |
|
|
avail = d->avail ();
|
13332 |
|
|
gcc_assert (avail != 0);
|
13333 |
|
|
if (TARGET_MIPS16)
|
13334 |
|
|
{
|
13335 |
|
|
error ("built-in function %qE not supported for MIPS16",
|
13336 |
|
|
DECL_NAME (fndecl));
|
13337 |
|
|
return ignore ? const0_rtx : CONST0_RTX (mode);
|
13338 |
|
|
}
|
13339 |
|
|
switch (d->builtin_type)
|
13340 |
|
|
{
|
13341 |
|
|
case MIPS_BUILTIN_DIRECT:
|
13342 |
|
|
return mips_expand_builtin_direct (d->icode, target, exp, true);
|
13343 |
|
|
|
13344 |
|
|
case MIPS_BUILTIN_DIRECT_NO_TARGET:
|
13345 |
|
|
return mips_expand_builtin_direct (d->icode, target, exp, false);
|
13346 |
|
|
|
13347 |
|
|
case MIPS_BUILTIN_MOVT:
|
13348 |
|
|
case MIPS_BUILTIN_MOVF:
|
13349 |
|
|
return mips_expand_builtin_movtf (d->builtin_type, d->icode,
|
13350 |
|
|
d->cond, target, exp);
|
13351 |
|
|
|
13352 |
|
|
case MIPS_BUILTIN_CMP_ANY:
|
13353 |
|
|
case MIPS_BUILTIN_CMP_ALL:
|
13354 |
|
|
case MIPS_BUILTIN_CMP_UPPER:
|
13355 |
|
|
case MIPS_BUILTIN_CMP_LOWER:
|
13356 |
|
|
case MIPS_BUILTIN_CMP_SINGLE:
|
13357 |
|
|
return mips_expand_builtin_compare (d->builtin_type, d->icode,
|
13358 |
|
|
d->cond, target, exp);
|
13359 |
|
|
|
13360 |
|
|
case MIPS_BUILTIN_BPOSGE32:
|
13361 |
|
|
return mips_expand_builtin_bposge (d->builtin_type, target);
|
13362 |
|
|
}
|
13363 |
|
|
gcc_unreachable ();
|
13364 |
|
|
}
|
13365 |
|
|
|
13366 |
|
|
/* An entry in the MIPS16 constant pool. VALUE is the pool constant,
|
13367 |
|
|
MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
|
13368 |
|
|
struct mips16_constant {
|
13369 |
|
|
struct mips16_constant *next;
|
13370 |
|
|
rtx value;
|
13371 |
|
|
rtx label;
|
13372 |
|
|
enum machine_mode mode;
|
13373 |
|
|
};
|
13374 |
|
|
|
13375 |
|
|
/* Information about an incomplete MIPS16 constant pool. FIRST is the
|
13376 |
|
|
first constant, HIGHEST_ADDRESS is the highest address that the first
|
13377 |
|
|
byte of the pool can have, and INSN_ADDRESS is the current instruction
|
13378 |
|
|
address. */
|
13379 |
|
|
struct mips16_constant_pool {
|
13380 |
|
|
struct mips16_constant *first;
|
13381 |
|
|
int highest_address;
|
13382 |
|
|
int insn_address;
|
13383 |
|
|
};
|
13384 |
|
|
|
13385 |
|
|
/* Add constant VALUE to POOL and return its label. MODE is the
|
13386 |
|
|
value's mode (used for CONST_INTs, etc.). */
|
13387 |
|
|
|
13388 |
|
|
static rtx
|
13389 |
|
|
mips16_add_constant (struct mips16_constant_pool *pool,
|
13390 |
|
|
rtx value, enum machine_mode mode)
|
13391 |
|
|
{
|
13392 |
|
|
struct mips16_constant **p, *c;
|
13393 |
|
|
bool first_of_size_p;
|
13394 |
|
|
|
13395 |
|
|
/* See whether the constant is already in the pool. If so, return the
|
13396 |
|
|
existing label, otherwise leave P pointing to the place where the
|
13397 |
|
|
constant should be added.
|
13398 |
|
|
|
13399 |
|
|
Keep the pool sorted in increasing order of mode size so that we can
|
13400 |
|
|
reduce the number of alignments needed. */
|
13401 |
|
|
first_of_size_p = true;
|
13402 |
|
|
for (p = &pool->first; *p != 0; p = &(*p)->next)
|
13403 |
|
|
{
|
13404 |
|
|
if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
|
13405 |
|
|
return (*p)->label;
|
13406 |
|
|
if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
|
13407 |
|
|
break;
|
13408 |
|
|
if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
|
13409 |
|
|
first_of_size_p = false;
|
13410 |
|
|
}
|
13411 |
|
|
|
13412 |
|
|
/* In the worst case, the constant needed by the earliest instruction
|
13413 |
|
|
will end up at the end of the pool. The entire pool must then be
|
13414 |
|
|
accessible from that instruction.
|
13415 |
|
|
|
13416 |
|
|
When adding the first constant, set the pool's highest address to
|
13417 |
|
|
the address of the first out-of-range byte. Adjust this address
|
13418 |
|
|
downwards each time a new constant is added. */
|
13419 |
|
|
if (pool->first == 0)
|
13420 |
|
|
/* For LWPC, ADDIUPC and DADDIUPC, the base PC value is the address
|
13421 |
|
|
of the instruction with the lowest two bits clear. The base PC
|
13422 |
|
|
value for LDPC has the lowest three bits clear. Assume the worst
|
13423 |
|
|
case here; namely that the PC-relative instruction occupies the
|
13424 |
|
|
last 2 bytes in an aligned word. */
|
13425 |
|
|
pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
|
13426 |
|
|
pool->highest_address -= GET_MODE_SIZE (mode);
|
13427 |
|
|
if (first_of_size_p)
|
13428 |
|
|
/* Take into account the worst possible padding due to alignment. */
|
13429 |
|
|
pool->highest_address -= GET_MODE_SIZE (mode) - 1;
|
13430 |
|
|
|
13431 |
|
|
/* Create a new entry. */
|
13432 |
|
|
c = XNEW (struct mips16_constant);
|
13433 |
|
|
c->value = value;
|
13434 |
|
|
c->mode = mode;
|
13435 |
|
|
c->label = gen_label_rtx ();
|
13436 |
|
|
c->next = *p;
|
13437 |
|
|
*p = c;
|
13438 |
|
|
|
13439 |
|
|
return c->label;
|
13440 |
|
|
}
|
13441 |
|
|
|
13442 |
|
|
/* Output constant VALUE after instruction INSN and return the last
|
13443 |
|
|
instruction emitted. MODE is the mode of the constant. */
|
13444 |
|
|
|
13445 |
|
|
static rtx
|
13446 |
|
|
mips16_emit_constants_1 (enum machine_mode mode, rtx value, rtx insn)
|
13447 |
|
|
{
|
13448 |
|
|
if (SCALAR_INT_MODE_P (mode) || ALL_SCALAR_FIXED_POINT_MODE_P (mode))
|
13449 |
|
|
{
|
13450 |
|
|
rtx size = GEN_INT (GET_MODE_SIZE (mode));
|
13451 |
|
|
return emit_insn_after (gen_consttable_int (value, size), insn);
|
13452 |
|
|
}
|
13453 |
|
|
|
13454 |
|
|
if (SCALAR_FLOAT_MODE_P (mode))
|
13455 |
|
|
return emit_insn_after (gen_consttable_float (value), insn);
|
13456 |
|
|
|
13457 |
|
|
if (VECTOR_MODE_P (mode))
|
13458 |
|
|
{
|
13459 |
|
|
int i;
|
13460 |
|
|
|
13461 |
|
|
for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
|
13462 |
|
|
insn = mips16_emit_constants_1 (GET_MODE_INNER (mode),
|
13463 |
|
|
CONST_VECTOR_ELT (value, i), insn);
|
13464 |
|
|
return insn;
|
13465 |
|
|
}
|
13466 |
|
|
|
13467 |
|
|
gcc_unreachable ();
|
13468 |
|
|
}
|
13469 |
|
|
|
13470 |
|
|
/* Dump out the constants in CONSTANTS after INSN. */
|
13471 |
|
|
|
13472 |
|
|
static void
|
13473 |
|
|
mips16_emit_constants (struct mips16_constant *constants, rtx insn)
|
13474 |
|
|
{
|
13475 |
|
|
struct mips16_constant *c, *next;
|
13476 |
|
|
int align;
|
13477 |
|
|
|
13478 |
|
|
align = 0;
|
13479 |
|
|
for (c = constants; c != NULL; c = next)
|
13480 |
|
|
{
|
13481 |
|
|
/* If necessary, increase the alignment of PC. */
|
13482 |
|
|
if (align < GET_MODE_SIZE (c->mode))
|
13483 |
|
|
{
|
13484 |
|
|
int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
|
13485 |
|
|
insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
|
13486 |
|
|
}
|
13487 |
|
|
align = GET_MODE_SIZE (c->mode);
|
13488 |
|
|
|
13489 |
|
|
insn = emit_label_after (c->label, insn);
|
13490 |
|
|
insn = mips16_emit_constants_1 (c->mode, c->value, insn);
|
13491 |
|
|
|
13492 |
|
|
next = c->next;
|
13493 |
|
|
free (c);
|
13494 |
|
|
}
|
13495 |
|
|
|
13496 |
|
|
emit_barrier_after (insn);
|
13497 |
|
|
}
|
13498 |
|
|
|
13499 |
|
|
/* Return the length of instruction INSN. */
|
13500 |
|
|
|
13501 |
|
|
static int
|
13502 |
|
|
mips16_insn_length (rtx insn)
|
13503 |
|
|
{
|
13504 |
|
|
if (JUMP_P (insn))
|
13505 |
|
|
{
|
13506 |
|
|
rtx body = PATTERN (insn);
|
13507 |
|
|
if (GET_CODE (body) == ADDR_VEC)
|
13508 |
|
|
return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
|
13509 |
|
|
if (GET_CODE (body) == ADDR_DIFF_VEC)
|
13510 |
|
|
return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
|
13511 |
|
|
}
|
13512 |
|
|
return get_attr_length (insn);
|
13513 |
|
|
}
|
13514 |
|
|
|
13515 |
|
|
/* If *X is a symbolic constant that refers to the constant pool, add
|
13516 |
|
|
the constant to POOL and rewrite *X to use the constant's label. */
|
13517 |
|
|
|
13518 |
|
|
static void
|
13519 |
|
|
mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
|
13520 |
|
|
{
|
13521 |
|
|
rtx base, offset, label;
|
13522 |
|
|
|
13523 |
|
|
split_const (*x, &base, &offset);
|
13524 |
|
|
if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
|
13525 |
|
|
{
|
13526 |
|
|
label = mips16_add_constant (pool, get_pool_constant (base),
|
13527 |
|
|
get_pool_mode (base));
|
13528 |
|
|
base = gen_rtx_LABEL_REF (Pmode, label);
|
13529 |
|
|
*x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
|
13530 |
|
|
}
|
13531 |
|
|
}
|
13532 |
|
|
|
13533 |
|
|
/* This structure is used to communicate with mips16_rewrite_pool_refs.
|
13534 |
|
|
INSN is the instruction we're rewriting and POOL points to the current
|
13535 |
|
|
constant pool. */
|
13536 |
|
|
struct mips16_rewrite_pool_refs_info {
|
13537 |
|
|
rtx insn;
|
13538 |
|
|
struct mips16_constant_pool *pool;
|
13539 |
|
|
};
|
13540 |
|
|
|
13541 |
|
|
/* Rewrite *X so that constant pool references refer to the constant's
|
13542 |
|
|
label instead. DATA points to a mips16_rewrite_pool_refs_info
|
13543 |
|
|
structure. */
|
13544 |
|
|
|
13545 |
|
|
static int
|
13546 |
|
|
mips16_rewrite_pool_refs (rtx *x, void *data)
|
13547 |
|
|
{
|
13548 |
|
|
struct mips16_rewrite_pool_refs_info *info =
|
13549 |
|
|
(struct mips16_rewrite_pool_refs_info *) data;
|
13550 |
|
|
|
13551 |
|
|
if (force_to_mem_operand (*x, Pmode))
|
13552 |
|
|
{
|
13553 |
|
|
rtx mem = force_const_mem (GET_MODE (*x), *x);
|
13554 |
|
|
validate_change (info->insn, x, mem, false);
|
13555 |
|
|
}
|
13556 |
|
|
|
13557 |
|
|
if (MEM_P (*x))
|
13558 |
|
|
{
|
13559 |
|
|
mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
|
13560 |
|
|
return -1;
|
13561 |
|
|
}
|
13562 |
|
|
|
13563 |
|
|
if (TARGET_MIPS16_TEXT_LOADS)
|
13564 |
|
|
mips16_rewrite_pool_constant (info->pool, x);
|
13565 |
|
|
|
13566 |
|
|
return GET_CODE (*x) == CONST ? -1 : 0;
|
13567 |
|
|
}
|
13568 |
|
|
|
13569 |
|
|
/* Return whether CFG is used in mips_reorg. */
|
13570 |
|
|
|
13571 |
|
|
static bool
|
13572 |
|
|
mips_cfg_in_reorg (void)
|
13573 |
|
|
{
|
13574 |
|
|
return (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE
|
13575 |
|
|
|| TARGET_RELAX_PIC_CALLS);
|
13576 |
|
|
}
|
13577 |
|
|
|
13578 |
|
|
/* Build MIPS16 constant pools. */
|
13579 |
|
|
|
13580 |
|
|
static void
|
13581 |
|
|
mips16_lay_out_constants (void)
|
13582 |
|
|
{
|
13583 |
|
|
struct mips16_constant_pool pool;
|
13584 |
|
|
struct mips16_rewrite_pool_refs_info info;
|
13585 |
|
|
rtx insn, barrier;
|
13586 |
|
|
|
13587 |
|
|
if (!TARGET_MIPS16_PCREL_LOADS)
|
13588 |
|
|
return;
|
13589 |
|
|
|
13590 |
|
|
if (mips_cfg_in_reorg ())
|
13591 |
|
|
split_all_insns ();
|
13592 |
|
|
else
|
13593 |
|
|
split_all_insns_noflow ();
|
13594 |
|
|
barrier = 0;
|
13595 |
|
|
memset (&pool, 0, sizeof (pool));
|
13596 |
|
|
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
|
13597 |
|
|
{
|
13598 |
|
|
/* Rewrite constant pool references in INSN. */
|
13599 |
|
|
if (USEFUL_INSN_P (insn))
|
13600 |
|
|
{
|
13601 |
|
|
info.insn = insn;
|
13602 |
|
|
info.pool = &pool;
|
13603 |
|
|
for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
|
13604 |
|
|
}
|
13605 |
|
|
|
13606 |
|
|
pool.insn_address += mips16_insn_length (insn);
|
13607 |
|
|
|
13608 |
|
|
if (pool.first != NULL)
|
13609 |
|
|
{
|
13610 |
|
|
/* If there are no natural barriers between the first user of
|
13611 |
|
|
the pool and the highest acceptable address, we'll need to
|
13612 |
|
|
create a new instruction to jump around the constant pool.
|
13613 |
|
|
In the worst case, this instruction will be 4 bytes long.
|
13614 |
|
|
|
13615 |
|
|
If it's too late to do this transformation after INSN,
|
13616 |
|
|
do it immediately before INSN. */
|
13617 |
|
|
if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
|
13618 |
|
|
{
|
13619 |
|
|
rtx label, jump;
|
13620 |
|
|
|
13621 |
|
|
label = gen_label_rtx ();
|
13622 |
|
|
|
13623 |
|
|
jump = emit_jump_insn_before (gen_jump (label), insn);
|
13624 |
|
|
JUMP_LABEL (jump) = label;
|
13625 |
|
|
LABEL_NUSES (label) = 1;
|
13626 |
|
|
barrier = emit_barrier_after (jump);
|
13627 |
|
|
|
13628 |
|
|
emit_label_after (label, barrier);
|
13629 |
|
|
pool.insn_address += 4;
|
13630 |
|
|
}
|
13631 |
|
|
|
13632 |
|
|
/* See whether the constant pool is now out of range of the first
|
13633 |
|
|
user. If so, output the constants after the previous barrier.
|
13634 |
|
|
Note that any instructions between BARRIER and INSN (inclusive)
|
13635 |
|
|
will use negative offsets to refer to the pool. */
|
13636 |
|
|
if (pool.insn_address > pool.highest_address)
|
13637 |
|
|
{
|
13638 |
|
|
mips16_emit_constants (pool.first, barrier);
|
13639 |
|
|
pool.first = NULL;
|
13640 |
|
|
barrier = 0;
|
13641 |
|
|
}
|
13642 |
|
|
else if (BARRIER_P (insn))
|
13643 |
|
|
barrier = insn;
|
13644 |
|
|
}
|
13645 |
|
|
}
|
13646 |
|
|
mips16_emit_constants (pool.first, get_last_insn ());
|
13647 |
|
|
}
|
13648 |
|
|
|
13649 |
|
|
/* Return true if it is worth r10k_simplify_address's while replacing
|
13650 |
|
|
an address with X. We are looking for constants, and for addresses
|
13651 |
|
|
at a known offset from the incoming stack pointer. */
|
13652 |
|
|
|
13653 |
|
|
static bool
|
13654 |
|
|
r10k_simplified_address_p (rtx x)
|
13655 |
|
|
{
|
13656 |
|
|
if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
|
13657 |
|
|
x = XEXP (x, 0);
|
13658 |
|
|
return x == virtual_incoming_args_rtx || CONSTANT_P (x);
|
13659 |
|
|
}
|
13660 |
|
|
|
13661 |
|
|
/* X is an expression that appears in INSN. Try to use the UD chains
|
13662 |
|
|
to simplify it, returning the simplified form on success and the
|
13663 |
|
|
original form otherwise. Replace the incoming value of $sp with
|
13664 |
|
|
virtual_incoming_args_rtx (which should never occur in X otherwise). */
|
13665 |
|
|
|
13666 |
|
|
static rtx
|
13667 |
|
|
r10k_simplify_address (rtx x, rtx insn)
|
13668 |
|
|
{
|
13669 |
|
|
rtx newx, op0, op1, set, def_insn, note;
|
13670 |
|
|
df_ref use, def;
|
13671 |
|
|
struct df_link *defs;
|
13672 |
|
|
|
13673 |
|
|
newx = NULL_RTX;
|
13674 |
|
|
if (UNARY_P (x))
|
13675 |
|
|
{
|
13676 |
|
|
op0 = r10k_simplify_address (XEXP (x, 0), insn);
|
13677 |
|
|
if (op0 != XEXP (x, 0))
|
13678 |
|
|
newx = simplify_gen_unary (GET_CODE (x), GET_MODE (x),
|
13679 |
|
|
op0, GET_MODE (XEXP (x, 0)));
|
13680 |
|
|
}
|
13681 |
|
|
else if (BINARY_P (x))
|
13682 |
|
|
{
|
13683 |
|
|
op0 = r10k_simplify_address (XEXP (x, 0), insn);
|
13684 |
|
|
op1 = r10k_simplify_address (XEXP (x, 1), insn);
|
13685 |
|
|
if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
|
13686 |
|
|
newx = simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
|
13687 |
|
|
}
|
13688 |
|
|
else if (GET_CODE (x) == LO_SUM)
|
13689 |
|
|
{
|
13690 |
|
|
/* LO_SUMs can be offset from HIGHs, if we know they won't
|
13691 |
|
|
overflow. See mips_classify_address for the rationale behind
|
13692 |
|
|
the lax check. */
|
13693 |
|
|
op0 = r10k_simplify_address (XEXP (x, 0), insn);
|
13694 |
|
|
if (GET_CODE (op0) == HIGH)
|
13695 |
|
|
newx = XEXP (x, 1);
|
13696 |
|
|
}
|
13697 |
|
|
else if (REG_P (x))
|
13698 |
|
|
{
|
13699 |
|
|
/* Uses are recorded by regno_reg_rtx, not X itself. */
|
13700 |
|
|
use = df_find_use (insn, regno_reg_rtx[REGNO (x)]);
|
13701 |
|
|
gcc_assert (use);
|
13702 |
|
|
defs = DF_REF_CHAIN (use);
|
13703 |
|
|
|
13704 |
|
|
/* Require a single definition. */
|
13705 |
|
|
if (defs && defs->next == NULL)
|
13706 |
|
|
{
|
13707 |
|
|
def = defs->ref;
|
13708 |
|
|
if (DF_REF_IS_ARTIFICIAL (def))
|
13709 |
|
|
{
|
13710 |
|
|
/* Replace the incoming value of $sp with
|
13711 |
|
|
virtual_incoming_args_rtx. */
|
13712 |
|
|
if (x == stack_pointer_rtx
|
13713 |
|
|
&& DF_REF_BB (def) == ENTRY_BLOCK_PTR)
|
13714 |
|
|
newx = virtual_incoming_args_rtx;
|
13715 |
|
|
}
|
13716 |
|
|
else if (dominated_by_p (CDI_DOMINATORS, DF_REF_BB (use),
|
13717 |
|
|
DF_REF_BB (def)))
|
13718 |
|
|
{
|
13719 |
|
|
/* Make sure that DEF_INSN is a single set of REG. */
|
13720 |
|
|
def_insn = DF_REF_INSN (def);
|
13721 |
|
|
if (NONJUMP_INSN_P (def_insn))
|
13722 |
|
|
{
|
13723 |
|
|
set = single_set (def_insn);
|
13724 |
|
|
if (set && rtx_equal_p (SET_DEST (set), x))
|
13725 |
|
|
{
|
13726 |
|
|
/* Prefer to use notes, since the def-use chains
|
13727 |
|
|
are often shorter. */
|
13728 |
|
|
note = find_reg_equal_equiv_note (def_insn);
|
13729 |
|
|
if (note)
|
13730 |
|
|
newx = XEXP (note, 0);
|
13731 |
|
|
else
|
13732 |
|
|
newx = SET_SRC (set);
|
13733 |
|
|
newx = r10k_simplify_address (newx, def_insn);
|
13734 |
|
|
}
|
13735 |
|
|
}
|
13736 |
|
|
}
|
13737 |
|
|
}
|
13738 |
|
|
}
|
13739 |
|
|
if (newx && r10k_simplified_address_p (newx))
|
13740 |
|
|
return newx;
|
13741 |
|
|
return x;
|
13742 |
|
|
}
|
13743 |
|
|
|
13744 |
|
|
/* Return true if ADDRESS is known to be an uncached address
|
13745 |
|
|
on R10K systems. */
|
13746 |
|
|
|
13747 |
|
|
static bool
|
13748 |
|
|
r10k_uncached_address_p (unsigned HOST_WIDE_INT address)
|
13749 |
|
|
{
|
13750 |
|
|
unsigned HOST_WIDE_INT upper;
|
13751 |
|
|
|
13752 |
|
|
/* Check for KSEG1. */
|
13753 |
|
|
if (address + 0x60000000 < 0x20000000)
|
13754 |
|
|
return true;
|
13755 |
|
|
|
13756 |
|
|
/* Check for uncached XKPHYS addresses. */
|
13757 |
|
|
if (Pmode == DImode)
|
13758 |
|
|
{
|
13759 |
|
|
upper = (address >> 40) & 0xf9ffff;
|
13760 |
|
|
if (upper == 0x900000 || upper == 0xb80000)
|
13761 |
|
|
return true;
|
13762 |
|
|
}
|
13763 |
|
|
return false;
|
13764 |
|
|
}
|
13765 |
|
|
|
13766 |
|
|
/* Return true if we can prove that an access to address X in instruction
|
13767 |
|
|
INSN would be safe from R10K speculation. This X is a general
|
13768 |
|
|
expression; it might not be a legitimate address. */
|
13769 |
|
|
|
13770 |
|
|
static bool
|
13771 |
|
|
r10k_safe_address_p (rtx x, rtx insn)
|
13772 |
|
|
{
|
13773 |
|
|
rtx base, offset;
|
13774 |
|
|
HOST_WIDE_INT offset_val;
|
13775 |
|
|
|
13776 |
|
|
x = r10k_simplify_address (x, insn);
|
13777 |
|
|
|
13778 |
|
|
/* Check for references to the stack frame. It doesn't really matter
|
13779 |
|
|
how much of the frame has been allocated at INSN; -mr10k-cache-barrier
|
13780 |
|
|
allows us to assume that accesses to any part of the eventual frame
|
13781 |
|
|
is safe from speculation at any point in the function. */
|
13782 |
|
|
mips_split_plus (x, &base, &offset_val);
|
13783 |
|
|
if (base == virtual_incoming_args_rtx
|
13784 |
|
|
&& offset_val >= -cfun->machine->frame.total_size
|
13785 |
|
|
&& offset_val < cfun->machine->frame.args_size)
|
13786 |
|
|
return true;
|
13787 |
|
|
|
13788 |
|
|
/* Check for uncached addresses. */
|
13789 |
|
|
if (CONST_INT_P (x))
|
13790 |
|
|
return r10k_uncached_address_p (INTVAL (x));
|
13791 |
|
|
|
13792 |
|
|
/* Check for accesses to a static object. */
|
13793 |
|
|
split_const (x, &base, &offset);
|
13794 |
|
|
return offset_within_block_p (base, INTVAL (offset));
|
13795 |
|
|
}
|
13796 |
|
|
|
13797 |
|
|
/* Return true if a MEM with MEM_EXPR EXPR and MEM_OFFSET OFFSET is
|
13798 |
|
|
an in-range access to an automatic variable, or to an object with
|
13799 |
|
|
a link-time-constant address. */
|
13800 |
|
|
|
13801 |
|
|
static bool
|
13802 |
|
|
r10k_safe_mem_expr_p (tree expr, rtx offset)
|
13803 |
|
|
{
|
13804 |
|
|
if (expr == NULL_TREE
|
13805 |
|
|
|| offset == NULL_RTX
|
13806 |
|
|
|| !CONST_INT_P (offset)
|
13807 |
|
|
|| INTVAL (offset) < 0
|
13808 |
|
|
|| INTVAL (offset) >= int_size_in_bytes (TREE_TYPE (expr)))
|
13809 |
|
|
return false;
|
13810 |
|
|
|
13811 |
|
|
while (TREE_CODE (expr) == COMPONENT_REF)
|
13812 |
|
|
{
|
13813 |
|
|
expr = TREE_OPERAND (expr, 0);
|
13814 |
|
|
if (expr == NULL_TREE)
|
13815 |
|
|
return false;
|
13816 |
|
|
}
|
13817 |
|
|
|
13818 |
|
|
return DECL_P (expr);
|
13819 |
|
|
}
|
13820 |
|
|
|
13821 |
|
|
/* A for_each_rtx callback for which DATA points to the instruction
|
13822 |
|
|
containing *X. Stop the search if we find a MEM that is not safe
|
13823 |
|
|
from R10K speculation. */
|
13824 |
|
|
|
13825 |
|
|
static int
|
13826 |
|
|
r10k_needs_protection_p_1 (rtx *loc, void *data)
|
13827 |
|
|
{
|
13828 |
|
|
rtx mem;
|
13829 |
|
|
|
13830 |
|
|
mem = *loc;
|
13831 |
|
|
if (!MEM_P (mem))
|
13832 |
|
|
return 0;
|
13833 |
|
|
|
13834 |
|
|
if (r10k_safe_mem_expr_p (MEM_EXPR (mem), MEM_OFFSET (mem)))
|
13835 |
|
|
return -1;
|
13836 |
|
|
|
13837 |
|
|
if (r10k_safe_address_p (XEXP (mem, 0), (rtx) data))
|
13838 |
|
|
return -1;
|
13839 |
|
|
|
13840 |
|
|
return 1;
|
13841 |
|
|
}
|
13842 |
|
|
|
13843 |
|
|
/* A note_stores callback for which DATA points to an instruction pointer.
|
13844 |
|
|
If *DATA is nonnull, make it null if it X contains a MEM that is not
|
13845 |
|
|
safe from R10K speculation. */
|
13846 |
|
|
|
13847 |
|
|
static void
|
13848 |
|
|
r10k_needs_protection_p_store (rtx x, const_rtx pat ATTRIBUTE_UNUSED,
|
13849 |
|
|
void *data)
|
13850 |
|
|
{
|
13851 |
|
|
rtx *insn_ptr;
|
13852 |
|
|
|
13853 |
|
|
insn_ptr = (rtx *) data;
|
13854 |
|
|
if (*insn_ptr && for_each_rtx (&x, r10k_needs_protection_p_1, *insn_ptr))
|
13855 |
|
|
*insn_ptr = NULL_RTX;
|
13856 |
|
|
}
|
13857 |
|
|
|
13858 |
|
|
/* A for_each_rtx callback that iterates over the pattern of a CALL_INSN.
|
13859 |
|
|
Return nonzero if the call is not to a declared function. */
|
13860 |
|
|
|
13861 |
|
|
static int
|
13862 |
|
|
r10k_needs_protection_p_call (rtx *loc, void *data ATTRIBUTE_UNUSED)
|
13863 |
|
|
{
|
13864 |
|
|
rtx x;
|
13865 |
|
|
|
13866 |
|
|
x = *loc;
|
13867 |
|
|
if (!MEM_P (x))
|
13868 |
|
|
return 0;
|
13869 |
|
|
|
13870 |
|
|
x = XEXP (x, 0);
|
13871 |
|
|
if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_DECL (x))
|
13872 |
|
|
return -1;
|
13873 |
|
|
|
13874 |
|
|
return 1;
|
13875 |
|
|
}
|
13876 |
|
|
|
13877 |
|
|
/* Return true if instruction INSN needs to be protected by an R10K
|
13878 |
|
|
cache barrier. */
|
13879 |
|
|
|
13880 |
|
|
static bool
|
13881 |
|
|
r10k_needs_protection_p (rtx insn)
|
13882 |
|
|
{
|
13883 |
|
|
if (CALL_P (insn))
|
13884 |
|
|
return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_call, NULL);
|
13885 |
|
|
|
13886 |
|
|
if (mips_r10k_cache_barrier == R10K_CACHE_BARRIER_STORE)
|
13887 |
|
|
{
|
13888 |
|
|
note_stores (PATTERN (insn), r10k_needs_protection_p_store, &insn);
|
13889 |
|
|
return insn == NULL_RTX;
|
13890 |
|
|
}
|
13891 |
|
|
|
13892 |
|
|
return for_each_rtx (&PATTERN (insn), r10k_needs_protection_p_1, insn);
|
13893 |
|
|
}
|
13894 |
|
|
|
13895 |
|
|
/* Return true if BB is only reached by blocks in PROTECTED_BBS and if every
|
13896 |
|
|
edge is unconditional. */
|
13897 |
|
|
|
13898 |
|
|
static bool
|
13899 |
|
|
r10k_protected_bb_p (basic_block bb, sbitmap protected_bbs)
|
13900 |
|
|
{
|
13901 |
|
|
edge_iterator ei;
|
13902 |
|
|
edge e;
|
13903 |
|
|
|
13904 |
|
|
FOR_EACH_EDGE (e, ei, bb->preds)
|
13905 |
|
|
if (!single_succ_p (e->src)
|
13906 |
|
|
|| !TEST_BIT (protected_bbs, e->src->index)
|
13907 |
|
|
|| (e->flags & EDGE_COMPLEX) != 0)
|
13908 |
|
|
return false;
|
13909 |
|
|
return true;
|
13910 |
|
|
}
|
13911 |
|
|
|
13912 |
|
|
/* Implement -mr10k-cache-barrier= for the current function. */
|
13913 |
|
|
|
13914 |
|
|
static void
|
13915 |
|
|
r10k_insert_cache_barriers (void)
|
13916 |
|
|
{
|
13917 |
|
|
int *rev_post_order;
|
13918 |
|
|
unsigned int i, n;
|
13919 |
|
|
basic_block bb;
|
13920 |
|
|
sbitmap protected_bbs;
|
13921 |
|
|
rtx insn, end, unprotected_region;
|
13922 |
|
|
|
13923 |
|
|
if (TARGET_MIPS16)
|
13924 |
|
|
{
|
13925 |
|
|
sorry ("%qs does not support MIPS16 code", "-mr10k-cache-barrier");
|
13926 |
|
|
return;
|
13927 |
|
|
}
|
13928 |
|
|
|
13929 |
|
|
/* Calculate dominators. */
|
13930 |
|
|
calculate_dominance_info (CDI_DOMINATORS);
|
13931 |
|
|
|
13932 |
|
|
/* Bit X of PROTECTED_BBS is set if the last operation in basic block
|
13933 |
|
|
X is protected by a cache barrier. */
|
13934 |
|
|
protected_bbs = sbitmap_alloc (last_basic_block);
|
13935 |
|
|
sbitmap_zero (protected_bbs);
|
13936 |
|
|
|
13937 |
|
|
/* Iterate over the basic blocks in reverse post-order. */
|
13938 |
|
|
rev_post_order = XNEWVEC (int, last_basic_block);
|
13939 |
|
|
n = pre_and_rev_post_order_compute (NULL, rev_post_order, false);
|
13940 |
|
|
for (i = 0; i < n; i++)
|
13941 |
|
|
{
|
13942 |
|
|
bb = BASIC_BLOCK (rev_post_order[i]);
|
13943 |
|
|
|
13944 |
|
|
/* If this block is only reached by unconditional edges, and if the
|
13945 |
|
|
source of every edge is protected, the beginning of the block is
|
13946 |
|
|
also protected. */
|
13947 |
|
|
if (r10k_protected_bb_p (bb, protected_bbs))
|
13948 |
|
|
unprotected_region = NULL_RTX;
|
13949 |
|
|
else
|
13950 |
|
|
unprotected_region = pc_rtx;
|
13951 |
|
|
end = NEXT_INSN (BB_END (bb));
|
13952 |
|
|
|
13953 |
|
|
/* UNPROTECTED_REGION is:
|
13954 |
|
|
|
13955 |
|
|
- null if we are processing a protected region,
|
13956 |
|
|
- pc_rtx if we are processing an unprotected region but have
|
13957 |
|
|
not yet found the first instruction in it
|
13958 |
|
|
- the first instruction in an unprotected region otherwise. */
|
13959 |
|
|
for (insn = BB_HEAD (bb); insn != end; insn = NEXT_INSN (insn))
|
13960 |
|
|
{
|
13961 |
|
|
if (unprotected_region && USEFUL_INSN_P (insn))
|
13962 |
|
|
{
|
13963 |
|
|
if (recog_memoized (insn) == CODE_FOR_mips_cache)
|
13964 |
|
|
/* This CACHE instruction protects the following code. */
|
13965 |
|
|
unprotected_region = NULL_RTX;
|
13966 |
|
|
else
|
13967 |
|
|
{
|
13968 |
|
|
/* See if INSN is the first instruction in this
|
13969 |
|
|
unprotected region. */
|
13970 |
|
|
if (unprotected_region == pc_rtx)
|
13971 |
|
|
unprotected_region = insn;
|
13972 |
|
|
|
13973 |
|
|
/* See if INSN needs to be protected. If so,
|
13974 |
|
|
we must insert a cache barrier somewhere between
|
13975 |
|
|
PREV_INSN (UNPROTECTED_REGION) and INSN. It isn't
|
13976 |
|
|
clear which position is better performance-wise,
|
13977 |
|
|
but as a tie-breaker, we assume that it is better
|
13978 |
|
|
to allow delay slots to be back-filled where
|
13979 |
|
|
possible, and that it is better not to insert
|
13980 |
|
|
barriers in the middle of already-scheduled code.
|
13981 |
|
|
We therefore insert the barrier at the beginning
|
13982 |
|
|
of the region. */
|
13983 |
|
|
if (r10k_needs_protection_p (insn))
|
13984 |
|
|
{
|
13985 |
|
|
emit_insn_before (gen_r10k_cache_barrier (),
|
13986 |
|
|
unprotected_region);
|
13987 |
|
|
unprotected_region = NULL_RTX;
|
13988 |
|
|
}
|
13989 |
|
|
}
|
13990 |
|
|
}
|
13991 |
|
|
|
13992 |
|
|
if (CALL_P (insn))
|
13993 |
|
|
/* The called function is not required to protect the exit path.
|
13994 |
|
|
The code that follows a call is therefore unprotected. */
|
13995 |
|
|
unprotected_region = pc_rtx;
|
13996 |
|
|
}
|
13997 |
|
|
|
13998 |
|
|
/* Record whether the end of this block is protected. */
|
13999 |
|
|
if (unprotected_region == NULL_RTX)
|
14000 |
|
|
SET_BIT (protected_bbs, bb->index);
|
14001 |
|
|
}
|
14002 |
|
|
XDELETEVEC (rev_post_order);
|
14003 |
|
|
|
14004 |
|
|
sbitmap_free (protected_bbs);
|
14005 |
|
|
|
14006 |
|
|
free_dominance_info (CDI_DOMINATORS);
|
14007 |
|
|
}
|
14008 |
|
|
|
14009 |
|
|
/* If INSN is a call, return the underlying CALL expr. Return NULL_RTX
|
14010 |
|
|
otherwise. */
|
14011 |
|
|
|
14012 |
|
|
static rtx
|
14013 |
|
|
mips_call_expr_from_insn (rtx insn)
|
14014 |
|
|
{
|
14015 |
|
|
rtx x;
|
14016 |
|
|
|
14017 |
|
|
if (!CALL_P (insn))
|
14018 |
|
|
return NULL_RTX;
|
14019 |
|
|
|
14020 |
|
|
x = PATTERN (insn);
|
14021 |
|
|
if (GET_CODE (x) == PARALLEL)
|
14022 |
|
|
x = XVECEXP (x, 0, 0);
|
14023 |
|
|
if (GET_CODE (x) == SET)
|
14024 |
|
|
x = XEXP (x, 1);
|
14025 |
|
|
|
14026 |
|
|
gcc_assert (GET_CODE (x) == CALL);
|
14027 |
|
|
return x;
|
14028 |
|
|
}
|
14029 |
|
|
|
14030 |
|
|
/* REG is set in DEF. See if the definition is one of the ways we load a
|
14031 |
|
|
register with a symbol address for a mips_use_pic_fn_addr_reg_p call. If
|
14032 |
|
|
it is return the symbol reference of the function, otherwise return
|
14033 |
|
|
NULL_RTX. */
|
14034 |
|
|
|
14035 |
|
|
static rtx
|
14036 |
|
|
mips_pic_call_symbol_from_set (df_ref def, rtx reg)
|
14037 |
|
|
{
|
14038 |
|
|
rtx def_insn, set;
|
14039 |
|
|
|
14040 |
|
|
if (DF_REF_IS_ARTIFICIAL (def))
|
14041 |
|
|
return NULL_RTX;
|
14042 |
|
|
|
14043 |
|
|
def_insn = DF_REF_INSN (def);
|
14044 |
|
|
set = single_set (def_insn);
|
14045 |
|
|
if (set && rtx_equal_p (SET_DEST (set), reg))
|
14046 |
|
|
{
|
14047 |
|
|
rtx note, src, symbol;
|
14048 |
|
|
|
14049 |
|
|
/* First, look at REG_EQUAL/EQUIV notes. */
|
14050 |
|
|
note = find_reg_equal_equiv_note (def_insn);
|
14051 |
|
|
if (note && GET_CODE (XEXP (note, 0)) == SYMBOL_REF)
|
14052 |
|
|
return XEXP (note, 0);
|
14053 |
|
|
|
14054 |
|
|
/* For %call16 references we don't have REG_EQUAL. */
|
14055 |
|
|
src = SET_SRC (set);
|
14056 |
|
|
symbol = mips_strip_unspec_call (src);
|
14057 |
|
|
if (symbol)
|
14058 |
|
|
{
|
14059 |
|
|
gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
|
14060 |
|
|
return symbol;
|
14061 |
|
|
}
|
14062 |
|
|
|
14063 |
|
|
/* Follow simple register copies. */
|
14064 |
|
|
if (REG_P (src))
|
14065 |
|
|
return mips_find_pic_call_symbol (def_insn, src);
|
14066 |
|
|
}
|
14067 |
|
|
|
14068 |
|
|
return NULL_RTX;
|
14069 |
|
|
}
|
14070 |
|
|
|
14071 |
|
|
/* Find the definition of the use of REG in INSN. See if the definition is
|
14072 |
|
|
one of the ways we load a register with a symbol address for a
|
14073 |
|
|
mips_use_pic_fn_addr_reg_p call. If it is return the symbol reference of
|
14074 |
|
|
the function, otherwise return NULL_RTX. */
|
14075 |
|
|
|
14076 |
|
|
static rtx
|
14077 |
|
|
mips_find_pic_call_symbol (rtx insn, rtx reg)
|
14078 |
|
|
{
|
14079 |
|
|
df_ref use;
|
14080 |
|
|
struct df_link *defs;
|
14081 |
|
|
rtx symbol;
|
14082 |
|
|
|
14083 |
|
|
use = df_find_use (insn, regno_reg_rtx[REGNO (reg)]);
|
14084 |
|
|
if (!use)
|
14085 |
|
|
return NULL_RTX;
|
14086 |
|
|
defs = DF_REF_CHAIN (use);
|
14087 |
|
|
if (!defs)
|
14088 |
|
|
return NULL_RTX;
|
14089 |
|
|
symbol = mips_pic_call_symbol_from_set (defs->ref, reg);
|
14090 |
|
|
if (!symbol)
|
14091 |
|
|
return NULL_RTX;
|
14092 |
|
|
|
14093 |
|
|
/* If we have more than one definition, they need to be identical. */
|
14094 |
|
|
for (defs = defs->next; defs; defs = defs->next)
|
14095 |
|
|
{
|
14096 |
|
|
rtx other;
|
14097 |
|
|
|
14098 |
|
|
other = mips_pic_call_symbol_from_set (defs->ref, reg);
|
14099 |
|
|
if (!rtx_equal_p (symbol, other))
|
14100 |
|
|
return NULL_RTX;
|
14101 |
|
|
}
|
14102 |
|
|
|
14103 |
|
|
return symbol;
|
14104 |
|
|
}
|
14105 |
|
|
|
14106 |
|
|
/* Replace the args_size operand of the call expression CALL with the
|
14107 |
|
|
call-attribute UNSPEC and fill in SYMBOL as the function symbol. */
|
14108 |
|
|
|
14109 |
|
|
static void
|
14110 |
|
|
mips_annotate_pic_call_expr (rtx call, rtx symbol)
|
14111 |
|
|
{
|
14112 |
|
|
rtx args_size;
|
14113 |
|
|
|
14114 |
|
|
args_size = XEXP (call, 1);
|
14115 |
|
|
XEXP (call, 1) = gen_rtx_UNSPEC (GET_MODE (args_size),
|
14116 |
|
|
gen_rtvec (2, args_size, symbol),
|
14117 |
|
|
UNSPEC_CALL_ATTR);
|
14118 |
|
|
}
|
14119 |
|
|
|
14120 |
|
|
/* OPERANDS[ARGS_SIZE_OPNO] is the arg_size operand of a CALL expression. See
|
14121 |
|
|
if instead of the arg_size argument it contains the call attributes. If
|
14122 |
|
|
yes return true along with setting OPERANDS[ARGS_SIZE_OPNO] to the function
|
14123 |
|
|
symbol from the call attributes. Also return false if ARGS_SIZE_OPNO is
|
14124 |
|
|
-1. */
|
14125 |
|
|
|
14126 |
|
|
bool
|
14127 |
|
|
mips_get_pic_call_symbol (rtx *operands, int args_size_opno)
|
14128 |
|
|
{
|
14129 |
|
|
rtx args_size, symbol;
|
14130 |
|
|
|
14131 |
|
|
if (!TARGET_RELAX_PIC_CALLS || args_size_opno == -1)
|
14132 |
|
|
return false;
|
14133 |
|
|
|
14134 |
|
|
args_size = operands[args_size_opno];
|
14135 |
|
|
if (GET_CODE (args_size) != UNSPEC)
|
14136 |
|
|
return false;
|
14137 |
|
|
gcc_assert (XINT (args_size, 1) == UNSPEC_CALL_ATTR);
|
14138 |
|
|
|
14139 |
|
|
symbol = XVECEXP (args_size, 0, 1);
|
14140 |
|
|
gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
|
14141 |
|
|
|
14142 |
|
|
operands[args_size_opno] = symbol;
|
14143 |
|
|
return true;
|
14144 |
|
|
}
|
14145 |
|
|
|
14146 |
|
|
/* Use DF to annotate PIC indirect calls with the function symbol they
|
14147 |
|
|
dispatch to. */
|
14148 |
|
|
|
14149 |
|
|
static void
|
14150 |
|
|
mips_annotate_pic_calls (void)
|
14151 |
|
|
{
|
14152 |
|
|
basic_block bb;
|
14153 |
|
|
rtx insn;
|
14154 |
|
|
|
14155 |
|
|
FOR_EACH_BB (bb)
|
14156 |
|
|
FOR_BB_INSNS (bb, insn)
|
14157 |
|
|
{
|
14158 |
|
|
rtx call, reg, symbol;
|
14159 |
|
|
|
14160 |
|
|
call = mips_call_expr_from_insn (insn);
|
14161 |
|
|
if (!call)
|
14162 |
|
|
continue;
|
14163 |
|
|
gcc_assert (MEM_P (XEXP (call, 0)));
|
14164 |
|
|
reg = XEXP (XEXP (call, 0), 0);
|
14165 |
|
|
if (!REG_P (reg))
|
14166 |
|
|
continue;
|
14167 |
|
|
|
14168 |
|
|
symbol = mips_find_pic_call_symbol (insn, reg);
|
14169 |
|
|
if (symbol)
|
14170 |
|
|
mips_annotate_pic_call_expr (call, symbol);
|
14171 |
|
|
}
|
14172 |
|
|
}
|
14173 |
|
|
|
14174 |
|
|
/* A temporary variable used by for_each_rtx callbacks, etc. */
|
14175 |
|
|
static rtx mips_sim_insn;
|
14176 |
|
|
|
14177 |
|
|
/* A structure representing the state of the processor pipeline.
|
14178 |
|
|
Used by the mips_sim_* family of functions. */
|
14179 |
|
|
struct mips_sim {
|
14180 |
|
|
/* The maximum number of instructions that can be issued in a cycle.
|
14181 |
|
|
(Caches mips_issue_rate.) */
|
14182 |
|
|
unsigned int issue_rate;
|
14183 |
|
|
|
14184 |
|
|
/* The current simulation time. */
|
14185 |
|
|
unsigned int time;
|
14186 |
|
|
|
14187 |
|
|
/* How many more instructions can be issued in the current cycle. */
|
14188 |
|
|
unsigned int insns_left;
|
14189 |
|
|
|
14190 |
|
|
/* LAST_SET[X].INSN is the last instruction to set register X.
|
14191 |
|
|
LAST_SET[X].TIME is the time at which that instruction was issued.
|
14192 |
|
|
INSN is null if no instruction has yet set register X. */
|
14193 |
|
|
struct {
|
14194 |
|
|
rtx insn;
|
14195 |
|
|
unsigned int time;
|
14196 |
|
|
} last_set[FIRST_PSEUDO_REGISTER];
|
14197 |
|
|
|
14198 |
|
|
/* The pipeline's current DFA state. */
|
14199 |
|
|
state_t dfa_state;
|
14200 |
|
|
};
|
14201 |
|
|
|
14202 |
|
|
/* Reset STATE to the initial simulation state. */
|
14203 |
|
|
|
14204 |
|
|
static void
|
14205 |
|
|
mips_sim_reset (struct mips_sim *state)
|
14206 |
|
|
{
|
14207 |
|
|
state->time = 0;
|
14208 |
|
|
state->insns_left = state->issue_rate;
|
14209 |
|
|
memset (&state->last_set, 0, sizeof (state->last_set));
|
14210 |
|
|
state_reset (state->dfa_state);
|
14211 |
|
|
}
|
14212 |
|
|
|
14213 |
|
|
/* Initialize STATE before its first use. DFA_STATE points to an
|
14214 |
|
|
allocated but uninitialized DFA state. */
|
14215 |
|
|
|
14216 |
|
|
static void
|
14217 |
|
|
mips_sim_init (struct mips_sim *state, state_t dfa_state)
|
14218 |
|
|
{
|
14219 |
|
|
state->issue_rate = mips_issue_rate ();
|
14220 |
|
|
state->dfa_state = dfa_state;
|
14221 |
|
|
mips_sim_reset (state);
|
14222 |
|
|
}
|
14223 |
|
|
|
14224 |
|
|
/* Advance STATE by one clock cycle. */
|
14225 |
|
|
|
14226 |
|
|
static void
|
14227 |
|
|
mips_sim_next_cycle (struct mips_sim *state)
|
14228 |
|
|
{
|
14229 |
|
|
state->time++;
|
14230 |
|
|
state->insns_left = state->issue_rate;
|
14231 |
|
|
state_transition (state->dfa_state, 0);
|
14232 |
|
|
}
|
14233 |
|
|
|
14234 |
|
|
/* Advance simulation state STATE until instruction INSN can read
|
14235 |
|
|
register REG. */
|
14236 |
|
|
|
14237 |
|
|
static void
|
14238 |
|
|
mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
|
14239 |
|
|
{
|
14240 |
|
|
unsigned int regno, end_regno;
|
14241 |
|
|
|
14242 |
|
|
end_regno = END_REGNO (reg);
|
14243 |
|
|
for (regno = REGNO (reg); regno < end_regno; regno++)
|
14244 |
|
|
if (state->last_set[regno].insn != 0)
|
14245 |
|
|
{
|
14246 |
|
|
unsigned int t;
|
14247 |
|
|
|
14248 |
|
|
t = (state->last_set[regno].time
|
14249 |
|
|
+ insn_latency (state->last_set[regno].insn, insn));
|
14250 |
|
|
while (state->time < t)
|
14251 |
|
|
mips_sim_next_cycle (state);
|
14252 |
|
|
}
|
14253 |
|
|
}
|
14254 |
|
|
|
14255 |
|
|
/* A for_each_rtx callback. If *X is a register, advance simulation state
|
14256 |
|
|
DATA until mips_sim_insn can read the register's value. */
|
14257 |
|
|
|
14258 |
|
|
static int
|
14259 |
|
|
mips_sim_wait_regs_2 (rtx *x, void *data)
|
14260 |
|
|
{
|
14261 |
|
|
if (REG_P (*x))
|
14262 |
|
|
mips_sim_wait_reg ((struct mips_sim *) data, mips_sim_insn, *x);
|
14263 |
|
|
return 0;
|
14264 |
|
|
}
|
14265 |
|
|
|
14266 |
|
|
/* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
|
14267 |
|
|
|
14268 |
|
|
static void
|
14269 |
|
|
mips_sim_wait_regs_1 (rtx *x, void *data)
|
14270 |
|
|
{
|
14271 |
|
|
for_each_rtx (x, mips_sim_wait_regs_2, data);
|
14272 |
|
|
}
|
14273 |
|
|
|
14274 |
|
|
/* Advance simulation state STATE until all of INSN's register
|
14275 |
|
|
dependencies are satisfied. */
|
14276 |
|
|
|
14277 |
|
|
static void
|
14278 |
|
|
mips_sim_wait_regs (struct mips_sim *state, rtx insn)
|
14279 |
|
|
{
|
14280 |
|
|
mips_sim_insn = insn;
|
14281 |
|
|
note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
|
14282 |
|
|
}
|
14283 |
|
|
|
14284 |
|
|
/* Advance simulation state STATE until the units required by
|
14285 |
|
|
instruction INSN are available. */
|
14286 |
|
|
|
14287 |
|
|
static void
|
14288 |
|
|
mips_sim_wait_units (struct mips_sim *state, rtx insn)
|
14289 |
|
|
{
|
14290 |
|
|
state_t tmp_state;
|
14291 |
|
|
|
14292 |
|
|
tmp_state = alloca (state_size ());
|
14293 |
|
|
while (state->insns_left == 0
|
14294 |
|
|
|| (memcpy (tmp_state, state->dfa_state, state_size ()),
|
14295 |
|
|
state_transition (tmp_state, insn) >= 0))
|
14296 |
|
|
mips_sim_next_cycle (state);
|
14297 |
|
|
}
|
14298 |
|
|
|
14299 |
|
|
/* Advance simulation state STATE until INSN is ready to issue. */
|
14300 |
|
|
|
14301 |
|
|
static void
|
14302 |
|
|
mips_sim_wait_insn (struct mips_sim *state, rtx insn)
|
14303 |
|
|
{
|
14304 |
|
|
mips_sim_wait_regs (state, insn);
|
14305 |
|
|
mips_sim_wait_units (state, insn);
|
14306 |
|
|
}
|
14307 |
|
|
|
14308 |
|
|
/* mips_sim_insn has just set X. Update the LAST_SET array
|
14309 |
|
|
in simulation state DATA. */
|
14310 |
|
|
|
14311 |
|
|
static void
|
14312 |
|
|
mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
|
14313 |
|
|
{
|
14314 |
|
|
struct mips_sim *state;
|
14315 |
|
|
|
14316 |
|
|
state = (struct mips_sim *) data;
|
14317 |
|
|
if (REG_P (x))
|
14318 |
|
|
{
|
14319 |
|
|
unsigned int regno, end_regno;
|
14320 |
|
|
|
14321 |
|
|
end_regno = END_REGNO (x);
|
14322 |
|
|
for (regno = REGNO (x); regno < end_regno; regno++)
|
14323 |
|
|
{
|
14324 |
|
|
state->last_set[regno].insn = mips_sim_insn;
|
14325 |
|
|
state->last_set[regno].time = state->time;
|
14326 |
|
|
}
|
14327 |
|
|
}
|
14328 |
|
|
}
|
14329 |
|
|
|
14330 |
|
|
/* Issue instruction INSN in scheduler state STATE. Assume that INSN
|
14331 |
|
|
can issue immediately (i.e., that mips_sim_wait_insn has already
|
14332 |
|
|
been called). */
|
14333 |
|
|
|
14334 |
|
|
static void
|
14335 |
|
|
mips_sim_issue_insn (struct mips_sim *state, rtx insn)
|
14336 |
|
|
{
|
14337 |
|
|
state_transition (state->dfa_state, insn);
|
14338 |
|
|
state->insns_left--;
|
14339 |
|
|
|
14340 |
|
|
mips_sim_insn = insn;
|
14341 |
|
|
note_stores (PATTERN (insn), mips_sim_record_set, state);
|
14342 |
|
|
}
|
14343 |
|
|
|
14344 |
|
|
/* Simulate issuing a NOP in state STATE. */
|
14345 |
|
|
|
14346 |
|
|
static void
|
14347 |
|
|
mips_sim_issue_nop (struct mips_sim *state)
|
14348 |
|
|
{
|
14349 |
|
|
if (state->insns_left == 0)
|
14350 |
|
|
mips_sim_next_cycle (state);
|
14351 |
|
|
state->insns_left--;
|
14352 |
|
|
}
|
14353 |
|
|
|
14354 |
|
|
/* Update simulation state STATE so that it's ready to accept the instruction
|
14355 |
|
|
after INSN. INSN should be part of the main rtl chain, not a member of a
|
14356 |
|
|
SEQUENCE. */
|
14357 |
|
|
|
14358 |
|
|
static void
|
14359 |
|
|
mips_sim_finish_insn (struct mips_sim *state, rtx insn)
|
14360 |
|
|
{
|
14361 |
|
|
/* If INSN is a jump with an implicit delay slot, simulate a nop. */
|
14362 |
|
|
if (JUMP_P (insn))
|
14363 |
|
|
mips_sim_issue_nop (state);
|
14364 |
|
|
|
14365 |
|
|
switch (GET_CODE (SEQ_BEGIN (insn)))
|
14366 |
|
|
{
|
14367 |
|
|
case CODE_LABEL:
|
14368 |
|
|
case CALL_INSN:
|
14369 |
|
|
/* We can't predict the processor state after a call or label. */
|
14370 |
|
|
mips_sim_reset (state);
|
14371 |
|
|
break;
|
14372 |
|
|
|
14373 |
|
|
case JUMP_INSN:
|
14374 |
|
|
/* The delay slots of branch likely instructions are only executed
|
14375 |
|
|
when the branch is taken. Therefore, if the caller has simulated
|
14376 |
|
|
the delay slot instruction, STATE does not really reflect the state
|
14377 |
|
|
of the pipeline for the instruction after the delay slot. Also,
|
14378 |
|
|
branch likely instructions tend to incur a penalty when not taken,
|
14379 |
|
|
so there will probably be an extra delay between the branch and
|
14380 |
|
|
the instruction after the delay slot. */
|
14381 |
|
|
if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
|
14382 |
|
|
mips_sim_reset (state);
|
14383 |
|
|
break;
|
14384 |
|
|
|
14385 |
|
|
default:
|
14386 |
|
|
break;
|
14387 |
|
|
}
|
14388 |
|
|
}
|
14389 |
|
|
|
14390 |
|
|
/* The VR4130 pipeline issues aligned pairs of instructions together,
|
14391 |
|
|
but it stalls the second instruction if it depends on the first.
|
14392 |
|
|
In order to cut down the amount of logic required, this dependence
|
14393 |
|
|
check is not based on a full instruction decode. Instead, any non-SPECIAL
|
14394 |
|
|
instruction is assumed to modify the register specified by bits 20-16
|
14395 |
|
|
(which is usually the "rt" field).
|
14396 |
|
|
|
14397 |
|
|
In BEQ, BEQL, BNE and BNEL instructions, the rt field is actually an
|
14398 |
|
|
input, so we can end up with a false dependence between the branch
|
14399 |
|
|
and its delay slot. If this situation occurs in instruction INSN,
|
14400 |
|
|
try to avoid it by swapping rs and rt. */
|
14401 |
|
|
|
14402 |
|
|
static void
|
14403 |
|
|
vr4130_avoid_branch_rt_conflict (rtx insn)
|
14404 |
|
|
{
|
14405 |
|
|
rtx first, second;
|
14406 |
|
|
|
14407 |
|
|
first = SEQ_BEGIN (insn);
|
14408 |
|
|
second = SEQ_END (insn);
|
14409 |
|
|
if (JUMP_P (first)
|
14410 |
|
|
&& NONJUMP_INSN_P (second)
|
14411 |
|
|
&& GET_CODE (PATTERN (first)) == SET
|
14412 |
|
|
&& GET_CODE (SET_DEST (PATTERN (first))) == PC
|
14413 |
|
|
&& GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
|
14414 |
|
|
{
|
14415 |
|
|
/* Check for the right kind of condition. */
|
14416 |
|
|
rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
|
14417 |
|
|
if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
|
14418 |
|
|
&& REG_P (XEXP (cond, 0))
|
14419 |
|
|
&& REG_P (XEXP (cond, 1))
|
14420 |
|
|
&& reg_referenced_p (XEXP (cond, 1), PATTERN (second))
|
14421 |
|
|
&& !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
|
14422 |
|
|
{
|
14423 |
|
|
/* SECOND mentions the rt register but not the rs register. */
|
14424 |
|
|
rtx tmp = XEXP (cond, 0);
|
14425 |
|
|
XEXP (cond, 0) = XEXP (cond, 1);
|
14426 |
|
|
XEXP (cond, 1) = tmp;
|
14427 |
|
|
}
|
14428 |
|
|
}
|
14429 |
|
|
}
|
14430 |
|
|
|
14431 |
|
|
/* Implement -mvr4130-align. Go through each basic block and simulate the
|
14432 |
|
|
processor pipeline. If we find that a pair of instructions could execute
|
14433 |
|
|
in parallel, and the first of those instructions is not 8-byte aligned,
|
14434 |
|
|
insert a nop to make it aligned. */
|
14435 |
|
|
|
14436 |
|
|
static void
|
14437 |
|
|
vr4130_align_insns (void)
|
14438 |
|
|
{
|
14439 |
|
|
struct mips_sim state;
|
14440 |
|
|
rtx insn, subinsn, last, last2, next;
|
14441 |
|
|
bool aligned_p;
|
14442 |
|
|
|
14443 |
|
|
dfa_start ();
|
14444 |
|
|
|
14445 |
|
|
/* LAST is the last instruction before INSN to have a nonzero length.
|
14446 |
|
|
LAST2 is the last such instruction before LAST. */
|
14447 |
|
|
last = 0;
|
14448 |
|
|
last2 = 0;
|
14449 |
|
|
|
14450 |
|
|
/* ALIGNED_P is true if INSN is known to be at an aligned address. */
|
14451 |
|
|
aligned_p = true;
|
14452 |
|
|
|
14453 |
|
|
mips_sim_init (&state, alloca (state_size ()));
|
14454 |
|
|
for (insn = get_insns (); insn != 0; insn = next)
|
14455 |
|
|
{
|
14456 |
|
|
unsigned int length;
|
14457 |
|
|
|
14458 |
|
|
next = NEXT_INSN (insn);
|
14459 |
|
|
|
14460 |
|
|
/* See the comment above vr4130_avoid_branch_rt_conflict for details.
|
14461 |
|
|
This isn't really related to the alignment pass, but we do it on
|
14462 |
|
|
the fly to avoid a separate instruction walk. */
|
14463 |
|
|
vr4130_avoid_branch_rt_conflict (insn);
|
14464 |
|
|
|
14465 |
|
|
if (USEFUL_INSN_P (insn))
|
14466 |
|
|
FOR_EACH_SUBINSN (subinsn, insn)
|
14467 |
|
|
{
|
14468 |
|
|
mips_sim_wait_insn (&state, subinsn);
|
14469 |
|
|
|
14470 |
|
|
/* If we want this instruction to issue in parallel with the
|
14471 |
|
|
previous one, make sure that the previous instruction is
|
14472 |
|
|
aligned. There are several reasons why this isn't worthwhile
|
14473 |
|
|
when the second instruction is a call:
|
14474 |
|
|
|
14475 |
|
|
- Calls are less likely to be performance critical,
|
14476 |
|
|
- There's a good chance that the delay slot can execute
|
14477 |
|
|
in parallel with the call.
|
14478 |
|
|
- The return address would then be unaligned.
|
14479 |
|
|
|
14480 |
|
|
In general, if we're going to insert a nop between instructions
|
14481 |
|
|
X and Y, it's better to insert it immediately after X. That
|
14482 |
|
|
way, if the nop makes Y aligned, it will also align any labels
|
14483 |
|
|
between X and Y. */
|
14484 |
|
|
if (state.insns_left != state.issue_rate
|
14485 |
|
|
&& !CALL_P (subinsn))
|
14486 |
|
|
{
|
14487 |
|
|
if (subinsn == SEQ_BEGIN (insn) && aligned_p)
|
14488 |
|
|
{
|
14489 |
|
|
/* SUBINSN is the first instruction in INSN and INSN is
|
14490 |
|
|
aligned. We want to align the previous instruction
|
14491 |
|
|
instead, so insert a nop between LAST2 and LAST.
|
14492 |
|
|
|
14493 |
|
|
Note that LAST could be either a single instruction
|
14494 |
|
|
or a branch with a delay slot. In the latter case,
|
14495 |
|
|
LAST, like INSN, is already aligned, but the delay
|
14496 |
|
|
slot must have some extra delay that stops it from
|
14497 |
|
|
issuing at the same time as the branch. We therefore
|
14498 |
|
|
insert a nop before the branch in order to align its
|
14499 |
|
|
delay slot. */
|
14500 |
|
|
emit_insn_after (gen_nop (), last2);
|
14501 |
|
|
aligned_p = false;
|
14502 |
|
|
}
|
14503 |
|
|
else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
|
14504 |
|
|
{
|
14505 |
|
|
/* SUBINSN is the delay slot of INSN, but INSN is
|
14506 |
|
|
currently unaligned. Insert a nop between
|
14507 |
|
|
LAST and INSN to align it. */
|
14508 |
|
|
emit_insn_after (gen_nop (), last);
|
14509 |
|
|
aligned_p = true;
|
14510 |
|
|
}
|
14511 |
|
|
}
|
14512 |
|
|
mips_sim_issue_insn (&state, subinsn);
|
14513 |
|
|
}
|
14514 |
|
|
mips_sim_finish_insn (&state, insn);
|
14515 |
|
|
|
14516 |
|
|
/* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
|
14517 |
|
|
length = get_attr_length (insn);
|
14518 |
|
|
if (length > 0)
|
14519 |
|
|
{
|
14520 |
|
|
/* If the instruction is an asm statement or multi-instruction
|
14521 |
|
|
mips.md patern, the length is only an estimate. Insert an
|
14522 |
|
|
8 byte alignment after it so that the following instructions
|
14523 |
|
|
can be handled correctly. */
|
14524 |
|
|
if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
|
14525 |
|
|
&& (recog_memoized (insn) < 0 || length >= 8))
|
14526 |
|
|
{
|
14527 |
|
|
next = emit_insn_after (gen_align (GEN_INT (3)), insn);
|
14528 |
|
|
next = NEXT_INSN (next);
|
14529 |
|
|
mips_sim_next_cycle (&state);
|
14530 |
|
|
aligned_p = true;
|
14531 |
|
|
}
|
14532 |
|
|
else if (length & 4)
|
14533 |
|
|
aligned_p = !aligned_p;
|
14534 |
|
|
last2 = last;
|
14535 |
|
|
last = insn;
|
14536 |
|
|
}
|
14537 |
|
|
|
14538 |
|
|
/* See whether INSN is an aligned label. */
|
14539 |
|
|
if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
|
14540 |
|
|
aligned_p = true;
|
14541 |
|
|
}
|
14542 |
|
|
dfa_finish ();
|
14543 |
|
|
}
|
14544 |
|
|
|
14545 |
|
|
/* This structure records that the current function has a LO_SUM
|
14546 |
|
|
involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is
|
14547 |
|
|
the largest offset applied to BASE by all such LO_SUMs. */
|
14548 |
|
|
struct mips_lo_sum_offset {
|
14549 |
|
|
rtx base;
|
14550 |
|
|
HOST_WIDE_INT offset;
|
14551 |
|
|
};
|
14552 |
|
|
|
14553 |
|
|
/* Return a hash value for SYMBOL_REF or LABEL_REF BASE. */
|
14554 |
|
|
|
14555 |
|
|
static hashval_t
|
14556 |
|
|
mips_hash_base (rtx base)
|
14557 |
|
|
{
|
14558 |
|
|
int do_not_record_p;
|
14559 |
|
|
|
14560 |
|
|
return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false);
|
14561 |
|
|
}
|
14562 |
|
|
|
14563 |
|
|
/* Hash-table callbacks for mips_lo_sum_offsets. */
|
14564 |
|
|
|
14565 |
|
|
static hashval_t
|
14566 |
|
|
mips_lo_sum_offset_hash (const void *entry)
|
14567 |
|
|
{
|
14568 |
|
|
return mips_hash_base (((const struct mips_lo_sum_offset *) entry)->base);
|
14569 |
|
|
}
|
14570 |
|
|
|
14571 |
|
|
static int
|
14572 |
|
|
mips_lo_sum_offset_eq (const void *entry, const void *value)
|
14573 |
|
|
{
|
14574 |
|
|
return rtx_equal_p (((const struct mips_lo_sum_offset *) entry)->base,
|
14575 |
|
|
(const_rtx) value);
|
14576 |
|
|
}
|
14577 |
|
|
|
14578 |
|
|
/* Look up symbolic constant X in HTAB, which is a hash table of
|
14579 |
|
|
mips_lo_sum_offsets. If OPTION is NO_INSERT, return true if X can be
|
14580 |
|
|
paired with a recorded LO_SUM, otherwise record X in the table. */
|
14581 |
|
|
|
14582 |
|
|
static bool
|
14583 |
|
|
mips_lo_sum_offset_lookup (htab_t htab, rtx x, enum insert_option option)
|
14584 |
|
|
{
|
14585 |
|
|
rtx base, offset;
|
14586 |
|
|
void **slot;
|
14587 |
|
|
struct mips_lo_sum_offset *entry;
|
14588 |
|
|
|
14589 |
|
|
/* Split X into a base and offset. */
|
14590 |
|
|
split_const (x, &base, &offset);
|
14591 |
|
|
if (UNSPEC_ADDRESS_P (base))
|
14592 |
|
|
base = UNSPEC_ADDRESS (base);
|
14593 |
|
|
|
14594 |
|
|
/* Look up the base in the hash table. */
|
14595 |
|
|
slot = htab_find_slot_with_hash (htab, base, mips_hash_base (base), option);
|
14596 |
|
|
if (slot == NULL)
|
14597 |
|
|
return false;
|
14598 |
|
|
|
14599 |
|
|
entry = (struct mips_lo_sum_offset *) *slot;
|
14600 |
|
|
if (option == INSERT)
|
14601 |
|
|
{
|
14602 |
|
|
if (entry == NULL)
|
14603 |
|
|
{
|
14604 |
|
|
entry = XNEW (struct mips_lo_sum_offset);
|
14605 |
|
|
entry->base = base;
|
14606 |
|
|
entry->offset = INTVAL (offset);
|
14607 |
|
|
*slot = entry;
|
14608 |
|
|
}
|
14609 |
|
|
else
|
14610 |
|
|
{
|
14611 |
|
|
if (INTVAL (offset) > entry->offset)
|
14612 |
|
|
entry->offset = INTVAL (offset);
|
14613 |
|
|
}
|
14614 |
|
|
}
|
14615 |
|
|
return INTVAL (offset) <= entry->offset;
|
14616 |
|
|
}
|
14617 |
|
|
|
14618 |
|
|
/* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table.
|
14619 |
|
|
Record every LO_SUM in *LOC. */
|
14620 |
|
|
|
14621 |
|
|
static int
|
14622 |
|
|
mips_record_lo_sum (rtx *loc, void *data)
|
14623 |
|
|
{
|
14624 |
|
|
if (GET_CODE (*loc) == LO_SUM)
|
14625 |
|
|
mips_lo_sum_offset_lookup ((htab_t) data, XEXP (*loc, 1), INSERT);
|
14626 |
|
|
return 0;
|
14627 |
|
|
}
|
14628 |
|
|
|
14629 |
|
|
/* Return true if INSN is a SET of an orphaned high-part relocation.
|
14630 |
|
|
HTAB is a hash table of mips_lo_sum_offsets that describes all the
|
14631 |
|
|
LO_SUMs in the current function. */
|
14632 |
|
|
|
14633 |
|
|
static bool
|
14634 |
|
|
mips_orphaned_high_part_p (htab_t htab, rtx insn)
|
14635 |
|
|
{
|
14636 |
|
|
enum mips_symbol_type type;
|
14637 |
|
|
rtx x, set;
|
14638 |
|
|
|
14639 |
|
|
set = single_set (insn);
|
14640 |
|
|
if (set)
|
14641 |
|
|
{
|
14642 |
|
|
/* Check for %his. */
|
14643 |
|
|
x = SET_SRC (set);
|
14644 |
|
|
if (GET_CODE (x) == HIGH
|
14645 |
|
|
&& absolute_symbolic_operand (XEXP (x, 0), VOIDmode))
|
14646 |
|
|
return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT);
|
14647 |
|
|
|
14648 |
|
|
/* Check for local %gots (and %got_pages, which is redundant but OK). */
|
14649 |
|
|
if (GET_CODE (x) == UNSPEC
|
14650 |
|
|
&& XINT (x, 1) == UNSPEC_LOAD_GOT
|
14651 |
|
|
&& mips_symbolic_constant_p (XVECEXP (x, 0, 1),
|
14652 |
|
|
SYMBOL_CONTEXT_LEA, &type)
|
14653 |
|
|
&& type == SYMBOL_GOTOFF_PAGE)
|
14654 |
|
|
return !mips_lo_sum_offset_lookup (htab, XVECEXP (x, 0, 1), NO_INSERT);
|
14655 |
|
|
}
|
14656 |
|
|
return false;
|
14657 |
|
|
}
|
14658 |
|
|
|
14659 |
|
|
/* Subroutine of mips_reorg_process_insns. If there is a hazard between
|
14660 |
|
|
INSN and a previous instruction, avoid it by inserting nops after
|
14661 |
|
|
instruction AFTER.
|
14662 |
|
|
|
14663 |
|
|
*DELAYED_REG and *HILO_DELAY describe the hazards that apply at
|
14664 |
|
|
this point. If *DELAYED_REG is non-null, INSN must wait a cycle
|
14665 |
|
|
before using the value of that register. *HILO_DELAY counts the
|
14666 |
|
|
number of instructions since the last hilo hazard (that is,
|
14667 |
|
|
the number of instructions since the last MFLO or MFHI).
|
14668 |
|
|
|
14669 |
|
|
After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
|
14670 |
|
|
for the next instruction.
|
14671 |
|
|
|
14672 |
|
|
LO_REG is an rtx for the LO register, used in dependence checking. */
|
14673 |
|
|
|
14674 |
|
|
static void
|
14675 |
|
|
mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
|
14676 |
|
|
rtx *delayed_reg, rtx lo_reg)
|
14677 |
|
|
{
|
14678 |
|
|
rtx pattern, set;
|
14679 |
|
|
int nops, ninsns;
|
14680 |
|
|
|
14681 |
|
|
pattern = PATTERN (insn);
|
14682 |
|
|
|
14683 |
|
|
/* Do not put the whole function in .set noreorder if it contains
|
14684 |
|
|
an asm statement. We don't know whether there will be hazards
|
14685 |
|
|
between the asm statement and the gcc-generated code. */
|
14686 |
|
|
if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
|
14687 |
|
|
cfun->machine->all_noreorder_p = false;
|
14688 |
|
|
|
14689 |
|
|
/* Ignore zero-length instructions (barriers and the like). */
|
14690 |
|
|
ninsns = get_attr_length (insn) / 4;
|
14691 |
|
|
if (ninsns == 0)
|
14692 |
|
|
return;
|
14693 |
|
|
|
14694 |
|
|
/* Work out how many nops are needed. Note that we only care about
|
14695 |
|
|
registers that are explicitly mentioned in the instruction's pattern.
|
14696 |
|
|
It doesn't matter that calls use the argument registers or that they
|
14697 |
|
|
clobber hi and lo. */
|
14698 |
|
|
if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
|
14699 |
|
|
nops = 2 - *hilo_delay;
|
14700 |
|
|
else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
|
14701 |
|
|
nops = 1;
|
14702 |
|
|
else
|
14703 |
|
|
nops = 0;
|
14704 |
|
|
|
14705 |
|
|
/* Insert the nops between this instruction and the previous one.
|
14706 |
|
|
Each new nop takes us further from the last hilo hazard. */
|
14707 |
|
|
*hilo_delay += nops;
|
14708 |
|
|
while (nops-- > 0)
|
14709 |
|
|
emit_insn_after (gen_hazard_nop (), after);
|
14710 |
|
|
|
14711 |
|
|
/* Set up the state for the next instruction. */
|
14712 |
|
|
*hilo_delay += ninsns;
|
14713 |
|
|
*delayed_reg = 0;
|
14714 |
|
|
if (INSN_CODE (insn) >= 0)
|
14715 |
|
|
switch (get_attr_hazard (insn))
|
14716 |
|
|
{
|
14717 |
|
|
case HAZARD_NONE:
|
14718 |
|
|
break;
|
14719 |
|
|
|
14720 |
|
|
case HAZARD_HILO:
|
14721 |
|
|
*hilo_delay = 0;
|
14722 |
|
|
break;
|
14723 |
|
|
|
14724 |
|
|
case HAZARD_DELAY:
|
14725 |
|
|
set = single_set (insn);
|
14726 |
|
|
gcc_assert (set);
|
14727 |
|
|
*delayed_reg = SET_DEST (set);
|
14728 |
|
|
break;
|
14729 |
|
|
}
|
14730 |
|
|
}
|
14731 |
|
|
|
14732 |
|
|
/* Go through the instruction stream and insert nops where necessary.
|
14733 |
|
|
Also delete any high-part relocations whose partnering low parts
|
14734 |
|
|
are now all dead. See if the whole function can then be put into
|
14735 |
|
|
.set noreorder and .set nomacro. */
|
14736 |
|
|
|
14737 |
|
|
static void
|
14738 |
|
|
mips_reorg_process_insns (void)
|
14739 |
|
|
{
|
14740 |
|
|
rtx insn, last_insn, subinsn, next_insn, lo_reg, delayed_reg;
|
14741 |
|
|
int hilo_delay;
|
14742 |
|
|
htab_t htab;
|
14743 |
|
|
|
14744 |
|
|
/* Force all instructions to be split into their final form. */
|
14745 |
|
|
split_all_insns_noflow ();
|
14746 |
|
|
|
14747 |
|
|
/* Recalculate instruction lengths without taking nops into account. */
|
14748 |
|
|
cfun->machine->ignore_hazard_length_p = true;
|
14749 |
|
|
shorten_branches (get_insns ());
|
14750 |
|
|
|
14751 |
|
|
cfun->machine->all_noreorder_p = true;
|
14752 |
|
|
|
14753 |
|
|
/* We don't track MIPS16 PC-relative offsets closely enough to make
|
14754 |
|
|
a good job of "set .noreorder" code in MIPS16 mode. */
|
14755 |
|
|
if (TARGET_MIPS16)
|
14756 |
|
|
cfun->machine->all_noreorder_p = false;
|
14757 |
|
|
|
14758 |
|
|
/* Code that doesn't use explicit relocs can't be ".set nomacro". */
|
14759 |
|
|
if (!TARGET_EXPLICIT_RELOCS)
|
14760 |
|
|
cfun->machine->all_noreorder_p = false;
|
14761 |
|
|
|
14762 |
|
|
/* Profiled functions can't be all noreorder because the profiler
|
14763 |
|
|
support uses assembler macros. */
|
14764 |
|
|
if (crtl->profile)
|
14765 |
|
|
cfun->machine->all_noreorder_p = false;
|
14766 |
|
|
|
14767 |
|
|
/* Code compiled with -mfix-vr4120 can't be all noreorder because
|
14768 |
|
|
we rely on the assembler to work around some errata. */
|
14769 |
|
|
if (TARGET_FIX_VR4120)
|
14770 |
|
|
cfun->machine->all_noreorder_p = false;
|
14771 |
|
|
|
14772 |
|
|
/* The same is true for -mfix-vr4130 if we might generate MFLO or
|
14773 |
|
|
MFHI instructions. Note that we avoid using MFLO and MFHI if
|
14774 |
|
|
the VR4130 MACC and DMACC instructions are available instead;
|
14775 |
|
|
see the *mfhilo_{si,di}_macc patterns. */
|
14776 |
|
|
if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
|
14777 |
|
|
cfun->machine->all_noreorder_p = false;
|
14778 |
|
|
|
14779 |
|
|
htab = htab_create (37, mips_lo_sum_offset_hash,
|
14780 |
|
|
mips_lo_sum_offset_eq, free);
|
14781 |
|
|
|
14782 |
|
|
/* Make a first pass over the instructions, recording all the LO_SUMs. */
|
14783 |
|
|
for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
|
14784 |
|
|
FOR_EACH_SUBINSN (subinsn, insn)
|
14785 |
|
|
if (USEFUL_INSN_P (subinsn))
|
14786 |
|
|
for_each_rtx (&PATTERN (subinsn), mips_record_lo_sum, htab);
|
14787 |
|
|
|
14788 |
|
|
last_insn = 0;
|
14789 |
|
|
hilo_delay = 2;
|
14790 |
|
|
delayed_reg = 0;
|
14791 |
|
|
lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
|
14792 |
|
|
|
14793 |
|
|
/* Make a second pass over the instructions. Delete orphaned
|
14794 |
|
|
high-part relocations or turn them into NOPs. Avoid hazards
|
14795 |
|
|
by inserting NOPs. */
|
14796 |
|
|
for (insn = get_insns (); insn != 0; insn = next_insn)
|
14797 |
|
|
{
|
14798 |
|
|
next_insn = NEXT_INSN (insn);
|
14799 |
|
|
if (USEFUL_INSN_P (insn))
|
14800 |
|
|
{
|
14801 |
|
|
if (GET_CODE (PATTERN (insn)) == SEQUENCE)
|
14802 |
|
|
{
|
14803 |
|
|
/* If we find an orphaned high-part relocation in a delay
|
14804 |
|
|
slot, it's easier to turn that instruction into a NOP than
|
14805 |
|
|
to delete it. The delay slot will be a NOP either way. */
|
14806 |
|
|
FOR_EACH_SUBINSN (subinsn, insn)
|
14807 |
|
|
if (INSN_P (subinsn))
|
14808 |
|
|
{
|
14809 |
|
|
if (mips_orphaned_high_part_p (htab, subinsn))
|
14810 |
|
|
{
|
14811 |
|
|
PATTERN (subinsn) = gen_nop ();
|
14812 |
|
|
INSN_CODE (subinsn) = CODE_FOR_nop;
|
14813 |
|
|
}
|
14814 |
|
|
mips_avoid_hazard (last_insn, subinsn, &hilo_delay,
|
14815 |
|
|
&delayed_reg, lo_reg);
|
14816 |
|
|
}
|
14817 |
|
|
last_insn = insn;
|
14818 |
|
|
}
|
14819 |
|
|
else
|
14820 |
|
|
{
|
14821 |
|
|
/* INSN is a single instruction. Delete it if it's an
|
14822 |
|
|
orphaned high-part relocation. */
|
14823 |
|
|
if (mips_orphaned_high_part_p (htab, insn))
|
14824 |
|
|
delete_insn (insn);
|
14825 |
|
|
/* Also delete cache barriers if the last instruction
|
14826 |
|
|
was an annulled branch. INSN will not be speculatively
|
14827 |
|
|
executed. */
|
14828 |
|
|
else if (recog_memoized (insn) == CODE_FOR_r10k_cache_barrier
|
14829 |
|
|
&& last_insn
|
14830 |
|
|
&& INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (last_insn)))
|
14831 |
|
|
delete_insn (insn);
|
14832 |
|
|
else
|
14833 |
|
|
{
|
14834 |
|
|
mips_avoid_hazard (last_insn, insn, &hilo_delay,
|
14835 |
|
|
&delayed_reg, lo_reg);
|
14836 |
|
|
last_insn = insn;
|
14837 |
|
|
}
|
14838 |
|
|
}
|
14839 |
|
|
}
|
14840 |
|
|
}
|
14841 |
|
|
|
14842 |
|
|
htab_delete (htab);
|
14843 |
|
|
}
|
14844 |
|
|
|
14845 |
|
|
/* If we are using a GOT, but have not decided to use a global pointer yet,
|
14846 |
|
|
see whether we need one to implement long branches. Convert the ghost
|
14847 |
|
|
global-pointer instructions into real ones if so. */
|
14848 |
|
|
|
14849 |
|
|
static bool
|
14850 |
|
|
mips_expand_ghost_gp_insns (void)
|
14851 |
|
|
{
|
14852 |
|
|
rtx insn;
|
14853 |
|
|
int normal_length;
|
14854 |
|
|
|
14855 |
|
|
/* Quick exit if we already know that we will or won't need a
|
14856 |
|
|
global pointer. */
|
14857 |
|
|
if (!TARGET_USE_GOT
|
14858 |
|
|
|| cfun->machine->global_pointer == INVALID_REGNUM
|
14859 |
|
|
|| mips_must_initialize_gp_p ())
|
14860 |
|
|
return false;
|
14861 |
|
|
|
14862 |
|
|
shorten_branches (get_insns ());
|
14863 |
|
|
|
14864 |
|
|
/* Look for a branch that is longer than normal. The normal length for
|
14865 |
|
|
non-MIPS16 branches is 8, because the length includes the delay slot.
|
14866 |
|
|
It is 4 for MIPS16, because MIPS16 branches are extended instructions,
|
14867 |
|
|
but they have no delay slot. */
|
14868 |
|
|
normal_length = (TARGET_MIPS16 ? 4 : 8);
|
14869 |
|
|
for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
|
14870 |
|
|
if (JUMP_P (insn)
|
14871 |
|
|
&& USEFUL_INSN_P (insn)
|
14872 |
|
|
&& get_attr_length (insn) > normal_length)
|
14873 |
|
|
break;
|
14874 |
|
|
|
14875 |
|
|
if (insn == NULL_RTX)
|
14876 |
|
|
return false;
|
14877 |
|
|
|
14878 |
|
|
/* We've now established that we need $gp. */
|
14879 |
|
|
cfun->machine->must_initialize_gp_p = true;
|
14880 |
|
|
split_all_insns_noflow ();
|
14881 |
|
|
|
14882 |
|
|
return true;
|
14883 |
|
|
}
|
14884 |
|
|
|
14885 |
|
|
/* Subroutine of mips_reorg to manage passes that require DF. */
|
14886 |
|
|
|
14887 |
|
|
static void
|
14888 |
|
|
mips_df_reorg (void)
|
14889 |
|
|
{
|
14890 |
|
|
/* Create def-use chains. */
|
14891 |
|
|
df_set_flags (DF_EQ_NOTES);
|
14892 |
|
|
df_chain_add_problem (DF_UD_CHAIN);
|
14893 |
|
|
df_analyze ();
|
14894 |
|
|
|
14895 |
|
|
if (TARGET_RELAX_PIC_CALLS)
|
14896 |
|
|
mips_annotate_pic_calls ();
|
14897 |
|
|
|
14898 |
|
|
if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE)
|
14899 |
|
|
r10k_insert_cache_barriers ();
|
14900 |
|
|
|
14901 |
|
|
df_finish_pass (false);
|
14902 |
|
|
}
|
14903 |
|
|
|
14904 |
|
|
/* Implement TARGET_MACHINE_DEPENDENT_REORG. */
|
14905 |
|
|
|
14906 |
|
|
static void
|
14907 |
|
|
mips_reorg (void)
|
14908 |
|
|
{
|
14909 |
|
|
/* Restore the BLOCK_FOR_INSN pointers, which are needed by DF. Also during
|
14910 |
|
|
insn splitting in mips16_lay_out_constants, DF insn info is only kept up
|
14911 |
|
|
to date if the CFG is available. */
|
14912 |
|
|
if (mips_cfg_in_reorg ())
|
14913 |
|
|
compute_bb_for_insn ();
|
14914 |
|
|
mips16_lay_out_constants ();
|
14915 |
|
|
if (mips_cfg_in_reorg ())
|
14916 |
|
|
{
|
14917 |
|
|
mips_df_reorg ();
|
14918 |
|
|
free_bb_for_insn ();
|
14919 |
|
|
}
|
14920 |
|
|
|
14921 |
|
|
if (optimize > 0 && flag_delayed_branch)
|
14922 |
|
|
dbr_schedule (get_insns ());
|
14923 |
|
|
mips_reorg_process_insns ();
|
14924 |
|
|
if (!TARGET_MIPS16
|
14925 |
|
|
&& TARGET_EXPLICIT_RELOCS
|
14926 |
|
|
&& TUNE_MIPS4130
|
14927 |
|
|
&& TARGET_VR4130_ALIGN)
|
14928 |
|
|
vr4130_align_insns ();
|
14929 |
|
|
if (mips_expand_ghost_gp_insns ())
|
14930 |
|
|
/* The expansion could invalidate some of the VR4130 alignment
|
14931 |
|
|
optimizations, but this should be an extremely rare case anyhow. */
|
14932 |
|
|
mips_reorg_process_insns ();
|
14933 |
|
|
}
|
14934 |
|
|
|
14935 |
|
|
/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
|
14936 |
|
|
in order to avoid duplicating too much logic from elsewhere. */
|
14937 |
|
|
|
14938 |
|
|
static void
|
14939 |
|
|
mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
|
14940 |
|
|
HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
|
14941 |
|
|
tree function)
|
14942 |
|
|
{
|
14943 |
|
|
rtx this_rtx, temp1, temp2, insn, fnaddr;
|
14944 |
|
|
bool use_sibcall_p;
|
14945 |
|
|
|
14946 |
|
|
/* Pretend to be a post-reload pass while generating rtl. */
|
14947 |
|
|
reload_completed = 1;
|
14948 |
|
|
|
14949 |
|
|
/* Mark the end of the (empty) prologue. */
|
14950 |
|
|
emit_note (NOTE_INSN_PROLOGUE_END);
|
14951 |
|
|
|
14952 |
|
|
/* Determine if we can use a sibcall to call FUNCTION directly. */
|
14953 |
|
|
fnaddr = XEXP (DECL_RTL (function), 0);
|
14954 |
|
|
use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
|
14955 |
|
|
&& const_call_insn_operand (fnaddr, Pmode));
|
14956 |
|
|
|
14957 |
|
|
/* Determine if we need to load FNADDR from the GOT. */
|
14958 |
|
|
if (!use_sibcall_p
|
14959 |
|
|
&& (mips_got_symbol_type_p
|
14960 |
|
|
(mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))))
|
14961 |
|
|
{
|
14962 |
|
|
/* Pick a global pointer. Use a call-clobbered register if
|
14963 |
|
|
TARGET_CALL_SAVED_GP. */
|
14964 |
|
|
cfun->machine->global_pointer
|
14965 |
|
|
= TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
|
14966 |
|
|
cfun->machine->must_initialize_gp_p = true;
|
14967 |
|
|
SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
|
14968 |
|
|
|
14969 |
|
|
/* Set up the global pointer for n32 or n64 abicalls. */
|
14970 |
|
|
mips_emit_loadgp ();
|
14971 |
|
|
}
|
14972 |
|
|
|
14973 |
|
|
/* We need two temporary registers in some cases. */
|
14974 |
|
|
temp1 = gen_rtx_REG (Pmode, 2);
|
14975 |
|
|
temp2 = gen_rtx_REG (Pmode, 3);
|
14976 |
|
|
|
14977 |
|
|
/* Find out which register contains the "this" pointer. */
|
14978 |
|
|
if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
|
14979 |
|
|
this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
|
14980 |
|
|
else
|
14981 |
|
|
this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
|
14982 |
|
|
|
14983 |
|
|
/* Add DELTA to THIS_RTX. */
|
14984 |
|
|
if (delta != 0)
|
14985 |
|
|
{
|
14986 |
|
|
rtx offset = GEN_INT (delta);
|
14987 |
|
|
if (!SMALL_OPERAND (delta))
|
14988 |
|
|
{
|
14989 |
|
|
mips_emit_move (temp1, offset);
|
14990 |
|
|
offset = temp1;
|
14991 |
|
|
}
|
14992 |
|
|
emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
|
14993 |
|
|
}
|
14994 |
|
|
|
14995 |
|
|
/* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
|
14996 |
|
|
if (vcall_offset != 0)
|
14997 |
|
|
{
|
14998 |
|
|
rtx addr;
|
14999 |
|
|
|
15000 |
|
|
/* Set TEMP1 to *THIS_RTX. */
|
15001 |
|
|
mips_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
|
15002 |
|
|
|
15003 |
|
|
/* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
|
15004 |
|
|
addr = mips_add_offset (temp2, temp1, vcall_offset);
|
15005 |
|
|
|
15006 |
|
|
/* Load the offset and add it to THIS_RTX. */
|
15007 |
|
|
mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
|
15008 |
|
|
emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
|
15009 |
|
|
}
|
15010 |
|
|
|
15011 |
|
|
/* Jump to the target function. Use a sibcall if direct jumps are
|
15012 |
|
|
allowed, otherwise load the address into a register first. */
|
15013 |
|
|
if (use_sibcall_p)
|
15014 |
|
|
{
|
15015 |
|
|
insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
|
15016 |
|
|
SIBLING_CALL_P (insn) = 1;
|
15017 |
|
|
}
|
15018 |
|
|
else
|
15019 |
|
|
{
|
15020 |
|
|
/* This is messy. GAS treats "la $25,foo" as part of a call
|
15021 |
|
|
sequence and may allow a global "foo" to be lazily bound.
|
15022 |
|
|
The general move patterns therefore reject this combination.
|
15023 |
|
|
|
15024 |
|
|
In this context, lazy binding would actually be OK
|
15025 |
|
|
for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
|
15026 |
|
|
TARGET_CALL_SAVED_GP; see mips_load_call_address.
|
15027 |
|
|
We must therefore load the address via a temporary
|
15028 |
|
|
register if mips_dangerous_for_la25_p.
|
15029 |
|
|
|
15030 |
|
|
If we jump to the temporary register rather than $25,
|
15031 |
|
|
the assembler can use the move insn to fill the jump's
|
15032 |
|
|
delay slot.
|
15033 |
|
|
|
15034 |
|
|
We can use the same technique for MIPS16 code, where $25
|
15035 |
|
|
is not a valid JR register. */
|
15036 |
|
|
if (TARGET_USE_PIC_FN_ADDR_REG
|
15037 |
|
|
&& !TARGET_MIPS16
|
15038 |
|
|
&& !mips_dangerous_for_la25_p (fnaddr))
|
15039 |
|
|
temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
|
15040 |
|
|
mips_load_call_address (MIPS_CALL_SIBCALL, temp1, fnaddr);
|
15041 |
|
|
|
15042 |
|
|
if (TARGET_USE_PIC_FN_ADDR_REG
|
15043 |
|
|
&& REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
|
15044 |
|
|
mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
|
15045 |
|
|
emit_jump_insn (gen_indirect_jump (temp1));
|
15046 |
|
|
}
|
15047 |
|
|
|
15048 |
|
|
/* Run just enough of rest_of_compilation. This sequence was
|
15049 |
|
|
"borrowed" from alpha.c. */
|
15050 |
|
|
insn = get_insns ();
|
15051 |
|
|
insn_locators_alloc ();
|
15052 |
|
|
split_all_insns_noflow ();
|
15053 |
|
|
mips16_lay_out_constants ();
|
15054 |
|
|
shorten_branches (insn);
|
15055 |
|
|
final_start_function (insn, file, 1);
|
15056 |
|
|
final (insn, file, 1);
|
15057 |
|
|
final_end_function ();
|
15058 |
|
|
|
15059 |
|
|
/* Clean up the vars set above. Note that final_end_function resets
|
15060 |
|
|
the global pointer for us. */
|
15061 |
|
|
reload_completed = 0;
|
15062 |
|
|
}
|
15063 |
|
|
|
15064 |
|
|
/* The last argument passed to mips_set_mips16_mode, or negative if the
|
15065 |
|
|
function hasn't been called yet.
|
15066 |
|
|
|
15067 |
|
|
There are two copies of this information. One is saved and restored
|
15068 |
|
|
by the PCH process while the other is specific to this compiler
|
15069 |
|
|
invocation. The information calculated by mips_set_mips16_mode
|
15070 |
|
|
is invalid unless the two variables are the same. */
|
15071 |
|
|
static int was_mips16_p = -1;
|
15072 |
|
|
static GTY(()) int was_mips16_pch_p = -1;
|
15073 |
|
|
|
15074 |
|
|
/* Set up the target-dependent global state so that it matches the
|
15075 |
|
|
current function's ISA mode. */
|
15076 |
|
|
|
15077 |
|
|
static void
|
15078 |
|
|
mips_set_mips16_mode (int mips16_p)
|
15079 |
|
|
{
|
15080 |
|
|
if (mips16_p == was_mips16_p
|
15081 |
|
|
&& mips16_p == was_mips16_pch_p)
|
15082 |
|
|
return;
|
15083 |
|
|
|
15084 |
|
|
/* Restore base settings of various flags. */
|
15085 |
|
|
target_flags = mips_base_target_flags;
|
15086 |
|
|
flag_schedule_insns = mips_base_schedule_insns;
|
15087 |
|
|
flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
|
15088 |
|
|
flag_move_loop_invariants = mips_base_move_loop_invariants;
|
15089 |
|
|
align_loops = mips_base_align_loops;
|
15090 |
|
|
align_jumps = mips_base_align_jumps;
|
15091 |
|
|
align_functions = mips_base_align_functions;
|
15092 |
|
|
|
15093 |
|
|
if (mips16_p)
|
15094 |
|
|
{
|
15095 |
|
|
/* Switch to MIPS16 mode. */
|
15096 |
|
|
target_flags |= MASK_MIPS16;
|
15097 |
|
|
|
15098 |
|
|
/* Don't run the scheduler before reload, since it tends to
|
15099 |
|
|
increase register pressure. */
|
15100 |
|
|
flag_schedule_insns = 0;
|
15101 |
|
|
|
15102 |
|
|
/* Don't do hot/cold partitioning. mips16_lay_out_constants expects
|
15103 |
|
|
the whole function to be in a single section. */
|
15104 |
|
|
flag_reorder_blocks_and_partition = 0;
|
15105 |
|
|
|
15106 |
|
|
/* Don't move loop invariants, because it tends to increase
|
15107 |
|
|
register pressure. It also introduces an extra move in cases
|
15108 |
|
|
where the constant is the first operand in a two-operand binary
|
15109 |
|
|
instruction, or when it forms a register argument to a functon
|
15110 |
|
|
call. */
|
15111 |
|
|
flag_move_loop_invariants = 0;
|
15112 |
|
|
|
15113 |
|
|
target_flags |= MASK_EXPLICIT_RELOCS;
|
15114 |
|
|
|
15115 |
|
|
/* Experiments suggest we get the best overall section-anchor
|
15116 |
|
|
results from using the range of an unextended LW or SW. Code
|
15117 |
|
|
that makes heavy use of byte or short accesses can do better
|
15118 |
|
|
with ranges of 0...31 and 0...63 respectively, but most code is
|
15119 |
|
|
sensitive to the range of LW and SW instead. */
|
15120 |
|
|
targetm.min_anchor_offset = 0;
|
15121 |
|
|
targetm.max_anchor_offset = 127;
|
15122 |
|
|
|
15123 |
|
|
targetm.const_anchor = 0;
|
15124 |
|
|
|
15125 |
|
|
/* MIPS16 has no BAL instruction. */
|
15126 |
|
|
target_flags &= ~MASK_RELAX_PIC_CALLS;
|
15127 |
|
|
|
15128 |
|
|
if (flag_pic && !TARGET_OLDABI)
|
15129 |
|
|
sorry ("MIPS16 PIC for ABIs other than o32 and o64");
|
15130 |
|
|
|
15131 |
|
|
if (TARGET_XGOT)
|
15132 |
|
|
sorry ("MIPS16 -mxgot code");
|
15133 |
|
|
|
15134 |
|
|
if (TARGET_HARD_FLOAT_ABI && !TARGET_OLDABI)
|
15135 |
|
|
sorry ("hard-float MIPS16 code for ABIs other than o32 and o64");
|
15136 |
|
|
}
|
15137 |
|
|
else
|
15138 |
|
|
{
|
15139 |
|
|
/* Switch to normal (non-MIPS16) mode. */
|
15140 |
|
|
target_flags &= ~MASK_MIPS16;
|
15141 |
|
|
|
15142 |
|
|
/* Provide default values for align_* for 64-bit targets. */
|
15143 |
|
|
if (TARGET_64BIT)
|
15144 |
|
|
{
|
15145 |
|
|
if (align_loops == 0)
|
15146 |
|
|
align_loops = 8;
|
15147 |
|
|
if (align_jumps == 0)
|
15148 |
|
|
align_jumps = 8;
|
15149 |
|
|
if (align_functions == 0)
|
15150 |
|
|
align_functions = 8;
|
15151 |
|
|
}
|
15152 |
|
|
|
15153 |
|
|
targetm.min_anchor_offset = -32768;
|
15154 |
|
|
targetm.max_anchor_offset = 32767;
|
15155 |
|
|
|
15156 |
|
|
targetm.const_anchor = 0x8000;
|
15157 |
|
|
}
|
15158 |
|
|
|
15159 |
|
|
/* (Re)initialize MIPS target internals for new ISA. */
|
15160 |
|
|
mips_init_relocs ();
|
15161 |
|
|
|
15162 |
|
|
if (was_mips16_p >= 0 || was_mips16_pch_p >= 0)
|
15163 |
|
|
/* Reinitialize target-dependent state. */
|
15164 |
|
|
target_reinit ();
|
15165 |
|
|
|
15166 |
|
|
was_mips16_p = mips16_p;
|
15167 |
|
|
was_mips16_pch_p = mips16_p;
|
15168 |
|
|
}
|
15169 |
|
|
|
15170 |
|
|
/* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
|
15171 |
|
|
function should use the MIPS16 ISA and switch modes accordingly. */
|
15172 |
|
|
|
15173 |
|
|
static void
|
15174 |
|
|
mips_set_current_function (tree fndecl)
|
15175 |
|
|
{
|
15176 |
|
|
mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
|
15177 |
|
|
}
|
15178 |
|
|
|
15179 |
|
|
/* Allocate a chunk of memory for per-function machine-dependent data. */
|
15180 |
|
|
|
15181 |
|
|
static struct machine_function *
|
15182 |
|
|
mips_init_machine_status (void)
|
15183 |
|
|
{
|
15184 |
|
|
return ((struct machine_function *)
|
15185 |
|
|
ggc_alloc_cleared (sizeof (struct machine_function)));
|
15186 |
|
|
}
|
15187 |
|
|
|
15188 |
|
|
/* Return the processor associated with the given ISA level, or null
|
15189 |
|
|
if the ISA isn't valid. */
|
15190 |
|
|
|
15191 |
|
|
static const struct mips_cpu_info *
|
15192 |
|
|
mips_cpu_info_from_isa (int isa)
|
15193 |
|
|
{
|
15194 |
|
|
unsigned int i;
|
15195 |
|
|
|
15196 |
|
|
for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
|
15197 |
|
|
if (mips_cpu_info_table[i].isa == isa)
|
15198 |
|
|
return mips_cpu_info_table + i;
|
15199 |
|
|
|
15200 |
|
|
return NULL;
|
15201 |
|
|
}
|
15202 |
|
|
|
15203 |
|
|
/* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
|
15204 |
|
|
with a final "000" replaced by "k". Ignore case.
|
15205 |
|
|
|
15206 |
|
|
Note: this function is shared between GCC and GAS. */
|
15207 |
|
|
|
15208 |
|
|
static bool
|
15209 |
|
|
mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
|
15210 |
|
|
{
|
15211 |
|
|
while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
|
15212 |
|
|
given++, canonical++;
|
15213 |
|
|
|
15214 |
|
|
return ((*given == 0 && *canonical == 0)
|
15215 |
|
|
|| (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
|
15216 |
|
|
}
|
15217 |
|
|
|
15218 |
|
|
/* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
|
15219 |
|
|
CPU name. We've traditionally allowed a lot of variation here.
|
15220 |
|
|
|
15221 |
|
|
Note: this function is shared between GCC and GAS. */
|
15222 |
|
|
|
15223 |
|
|
static bool
|
15224 |
|
|
mips_matching_cpu_name_p (const char *canonical, const char *given)
|
15225 |
|
|
{
|
15226 |
|
|
/* First see if the name matches exactly, or with a final "000"
|
15227 |
|
|
turned into "k". */
|
15228 |
|
|
if (mips_strict_matching_cpu_name_p (canonical, given))
|
15229 |
|
|
return true;
|
15230 |
|
|
|
15231 |
|
|
/* If not, try comparing based on numerical designation alone.
|
15232 |
|
|
See if GIVEN is an unadorned number, or 'r' followed by a number. */
|
15233 |
|
|
if (TOLOWER (*given) == 'r')
|
15234 |
|
|
given++;
|
15235 |
|
|
if (!ISDIGIT (*given))
|
15236 |
|
|
return false;
|
15237 |
|
|
|
15238 |
|
|
/* Skip over some well-known prefixes in the canonical name,
|
15239 |
|
|
hoping to find a number there too. */
|
15240 |
|
|
if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
|
15241 |
|
|
canonical += 2;
|
15242 |
|
|
else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
|
15243 |
|
|
canonical += 2;
|
15244 |
|
|
else if (TOLOWER (canonical[0]) == 'r')
|
15245 |
|
|
canonical += 1;
|
15246 |
|
|
|
15247 |
|
|
return mips_strict_matching_cpu_name_p (canonical, given);
|
15248 |
|
|
}
|
15249 |
|
|
|
15250 |
|
|
/* Return the mips_cpu_info entry for the processor or ISA given
|
15251 |
|
|
by CPU_STRING. Return null if the string isn't recognized.
|
15252 |
|
|
|
15253 |
|
|
A similar function exists in GAS. */
|
15254 |
|
|
|
15255 |
|
|
static const struct mips_cpu_info *
|
15256 |
|
|
mips_parse_cpu (const char *cpu_string)
|
15257 |
|
|
{
|
15258 |
|
|
unsigned int i;
|
15259 |
|
|
const char *s;
|
15260 |
|
|
|
15261 |
|
|
/* In the past, we allowed upper-case CPU names, but it doesn't
|
15262 |
|
|
work well with the multilib machinery. */
|
15263 |
|
|
for (s = cpu_string; *s != 0; s++)
|
15264 |
|
|
if (ISUPPER (*s))
|
15265 |
|
|
{
|
15266 |
|
|
warning (0, "CPU names must be lower case");
|
15267 |
|
|
break;
|
15268 |
|
|
}
|
15269 |
|
|
|
15270 |
|
|
/* 'from-abi' selects the most compatible architecture for the given
|
15271 |
|
|
ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
|
15272 |
|
|
EABIs, we have to decide whether we're using the 32-bit or 64-bit
|
15273 |
|
|
version. */
|
15274 |
|
|
if (strcasecmp (cpu_string, "from-abi") == 0)
|
15275 |
|
|
return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
|
15276 |
|
|
: ABI_NEEDS_64BIT_REGS ? 3
|
15277 |
|
|
: (TARGET_64BIT ? 3 : 1));
|
15278 |
|
|
|
15279 |
|
|
/* 'default' has traditionally been a no-op. Probably not very useful. */
|
15280 |
|
|
if (strcasecmp (cpu_string, "default") == 0)
|
15281 |
|
|
return NULL;
|
15282 |
|
|
|
15283 |
|
|
for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
|
15284 |
|
|
if (mips_matching_cpu_name_p (mips_cpu_info_table[i].name, cpu_string))
|
15285 |
|
|
return mips_cpu_info_table + i;
|
15286 |
|
|
|
15287 |
|
|
return NULL;
|
15288 |
|
|
}
|
15289 |
|
|
|
15290 |
|
|
/* Set up globals to generate code for the ISA or processor
|
15291 |
|
|
described by INFO. */
|
15292 |
|
|
|
15293 |
|
|
static void
|
15294 |
|
|
mips_set_architecture (const struct mips_cpu_info *info)
|
15295 |
|
|
{
|
15296 |
|
|
if (info != 0)
|
15297 |
|
|
{
|
15298 |
|
|
mips_arch_info = info;
|
15299 |
|
|
mips_arch = info->cpu;
|
15300 |
|
|
mips_isa = info->isa;
|
15301 |
|
|
}
|
15302 |
|
|
}
|
15303 |
|
|
|
15304 |
|
|
/* Likewise for tuning. */
|
15305 |
|
|
|
15306 |
|
|
static void
|
15307 |
|
|
mips_set_tune (const struct mips_cpu_info *info)
|
15308 |
|
|
{
|
15309 |
|
|
if (info != 0)
|
15310 |
|
|
{
|
15311 |
|
|
mips_tune_info = info;
|
15312 |
|
|
mips_tune = info->cpu;
|
15313 |
|
|
}
|
15314 |
|
|
}
|
15315 |
|
|
|
15316 |
|
|
/* Implement TARGET_HANDLE_OPTION. */
|
15317 |
|
|
|
15318 |
|
|
static bool
|
15319 |
|
|
mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
|
15320 |
|
|
{
|
15321 |
|
|
switch (code)
|
15322 |
|
|
{
|
15323 |
|
|
case OPT_mabi_:
|
15324 |
|
|
if (strcmp (arg, "32") == 0)
|
15325 |
|
|
mips_abi = ABI_32;
|
15326 |
|
|
else if (strcmp (arg, "o64") == 0)
|
15327 |
|
|
mips_abi = ABI_O64;
|
15328 |
|
|
else if (strcmp (arg, "n32") == 0)
|
15329 |
|
|
mips_abi = ABI_N32;
|
15330 |
|
|
else if (strcmp (arg, "64") == 0)
|
15331 |
|
|
mips_abi = ABI_64;
|
15332 |
|
|
else if (strcmp (arg, "eabi") == 0)
|
15333 |
|
|
mips_abi = ABI_EABI;
|
15334 |
|
|
else
|
15335 |
|
|
return false;
|
15336 |
|
|
return true;
|
15337 |
|
|
|
15338 |
|
|
case OPT_march_:
|
15339 |
|
|
case OPT_mtune_:
|
15340 |
|
|
return mips_parse_cpu (arg) != 0;
|
15341 |
|
|
|
15342 |
|
|
case OPT_mips:
|
15343 |
|
|
mips_isa_option_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
|
15344 |
|
|
return mips_isa_option_info != 0;
|
15345 |
|
|
|
15346 |
|
|
case OPT_mno_flush_func:
|
15347 |
|
|
mips_cache_flush_func = NULL;
|
15348 |
|
|
return true;
|
15349 |
|
|
|
15350 |
|
|
case OPT_mcode_readable_:
|
15351 |
|
|
if (strcmp (arg, "yes") == 0)
|
15352 |
|
|
mips_code_readable = CODE_READABLE_YES;
|
15353 |
|
|
else if (strcmp (arg, "pcrel") == 0)
|
15354 |
|
|
mips_code_readable = CODE_READABLE_PCREL;
|
15355 |
|
|
else if (strcmp (arg, "no") == 0)
|
15356 |
|
|
mips_code_readable = CODE_READABLE_NO;
|
15357 |
|
|
else
|
15358 |
|
|
return false;
|
15359 |
|
|
return true;
|
15360 |
|
|
|
15361 |
|
|
case OPT_mr10k_cache_barrier_:
|
15362 |
|
|
if (strcmp (arg, "load-store") == 0)
|
15363 |
|
|
mips_r10k_cache_barrier = R10K_CACHE_BARRIER_LOAD_STORE;
|
15364 |
|
|
else if (strcmp (arg, "store") == 0)
|
15365 |
|
|
mips_r10k_cache_barrier = R10K_CACHE_BARRIER_STORE;
|
15366 |
|
|
else if (strcmp (arg, "none") == 0)
|
15367 |
|
|
mips_r10k_cache_barrier = R10K_CACHE_BARRIER_NONE;
|
15368 |
|
|
else
|
15369 |
|
|
return false;
|
15370 |
|
|
return true;
|
15371 |
|
|
|
15372 |
|
|
default:
|
15373 |
|
|
return true;
|
15374 |
|
|
}
|
15375 |
|
|
}
|
15376 |
|
|
|
15377 |
|
|
/* Implement OVERRIDE_OPTIONS. */
|
15378 |
|
|
|
15379 |
|
|
void
|
15380 |
|
|
mips_override_options (void)
|
15381 |
|
|
{
|
15382 |
|
|
int i, start, regno, mode;
|
15383 |
|
|
|
15384 |
|
|
/* Process flags as though we were generating non-MIPS16 code. */
|
15385 |
|
|
mips_base_mips16 = TARGET_MIPS16;
|
15386 |
|
|
target_flags &= ~MASK_MIPS16;
|
15387 |
|
|
|
15388 |
|
|
#ifdef SUBTARGET_OVERRIDE_OPTIONS
|
15389 |
|
|
SUBTARGET_OVERRIDE_OPTIONS;
|
15390 |
|
|
#endif
|
15391 |
|
|
|
15392 |
|
|
/* Set the small data limit. */
|
15393 |
|
|
mips_small_data_threshold = (g_switch_set
|
15394 |
|
|
? g_switch_value
|
15395 |
|
|
: MIPS_DEFAULT_GVALUE);
|
15396 |
|
|
|
15397 |
|
|
/* The following code determines the architecture and register size.
|
15398 |
|
|
Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
|
15399 |
|
|
The GAS and GCC code should be kept in sync as much as possible. */
|
15400 |
|
|
|
15401 |
|
|
if (mips_arch_string != 0)
|
15402 |
|
|
mips_set_architecture (mips_parse_cpu (mips_arch_string));
|
15403 |
|
|
|
15404 |
|
|
if (mips_isa_option_info != 0)
|
15405 |
|
|
{
|
15406 |
|
|
if (mips_arch_info == 0)
|
15407 |
|
|
mips_set_architecture (mips_isa_option_info);
|
15408 |
|
|
else if (mips_arch_info->isa != mips_isa_option_info->isa)
|
15409 |
|
|
error ("%<-%s%> conflicts with the other architecture options, "
|
15410 |
|
|
"which specify a %s processor",
|
15411 |
|
|
mips_isa_option_info->name,
|
15412 |
|
|
mips_cpu_info_from_isa (mips_arch_info->isa)->name);
|
15413 |
|
|
}
|
15414 |
|
|
|
15415 |
|
|
if (mips_arch_info == 0)
|
15416 |
|
|
{
|
15417 |
|
|
#ifdef MIPS_CPU_STRING_DEFAULT
|
15418 |
|
|
mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
|
15419 |
|
|
#else
|
15420 |
|
|
mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
|
15421 |
|
|
#endif
|
15422 |
|
|
}
|
15423 |
|
|
|
15424 |
|
|
if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
|
15425 |
|
|
error ("%<-march=%s%> is not compatible with the selected ABI",
|
15426 |
|
|
mips_arch_info->name);
|
15427 |
|
|
|
15428 |
|
|
/* Optimize for mips_arch, unless -mtune selects a different processor. */
|
15429 |
|
|
if (mips_tune_string != 0)
|
15430 |
|
|
mips_set_tune (mips_parse_cpu (mips_tune_string));
|
15431 |
|
|
|
15432 |
|
|
if (mips_tune_info == 0)
|
15433 |
|
|
mips_set_tune (mips_arch_info);
|
15434 |
|
|
|
15435 |
|
|
if ((target_flags_explicit & MASK_64BIT) != 0)
|
15436 |
|
|
{
|
15437 |
|
|
/* The user specified the size of the integer registers. Make sure
|
15438 |
|
|
it agrees with the ABI and ISA. */
|
15439 |
|
|
if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
|
15440 |
|
|
error ("%<-mgp64%> used with a 32-bit processor");
|
15441 |
|
|
else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
|
15442 |
|
|
error ("%<-mgp32%> used with a 64-bit ABI");
|
15443 |
|
|
else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
|
15444 |
|
|
error ("%<-mgp64%> used with a 32-bit ABI");
|
15445 |
|
|
}
|
15446 |
|
|
else
|
15447 |
|
|
{
|
15448 |
|
|
/* Infer the integer register size from the ABI and processor.
|
15449 |
|
|
Restrict ourselves to 32-bit registers if that's all the
|
15450 |
|
|
processor has, or if the ABI cannot handle 64-bit registers. */
|
15451 |
|
|
if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
|
15452 |
|
|
target_flags &= ~MASK_64BIT;
|
15453 |
|
|
else
|
15454 |
|
|
target_flags |= MASK_64BIT;
|
15455 |
|
|
}
|
15456 |
|
|
|
15457 |
|
|
if ((target_flags_explicit & MASK_FLOAT64) != 0)
|
15458 |
|
|
{
|
15459 |
|
|
if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
|
15460 |
|
|
error ("unsupported combination: %s", "-mfp64 -msingle-float");
|
15461 |
|
|
else if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
|
15462 |
|
|
error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
|
15463 |
|
|
else if (!TARGET_64BIT && TARGET_FLOAT64)
|
15464 |
|
|
{
|
15465 |
|
|
if (!ISA_HAS_MXHC1)
|
15466 |
|
|
error ("%<-mgp32%> and %<-mfp64%> can only be combined if"
|
15467 |
|
|
" the target supports the mfhc1 and mthc1 instructions");
|
15468 |
|
|
else if (mips_abi != ABI_32)
|
15469 |
|
|
error ("%<-mgp32%> and %<-mfp64%> can only be combined when using"
|
15470 |
|
|
" the o32 ABI");
|
15471 |
|
|
}
|
15472 |
|
|
}
|
15473 |
|
|
else
|
15474 |
|
|
{
|
15475 |
|
|
/* -msingle-float selects 32-bit float registers. Otherwise the
|
15476 |
|
|
float registers should be the same size as the integer ones. */
|
15477 |
|
|
if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
|
15478 |
|
|
target_flags |= MASK_FLOAT64;
|
15479 |
|
|
else
|
15480 |
|
|
target_flags &= ~MASK_FLOAT64;
|
15481 |
|
|
}
|
15482 |
|
|
|
15483 |
|
|
/* End of code shared with GAS. */
|
15484 |
|
|
|
15485 |
|
|
/* If no -mlong* option was given, infer it from the other options. */
|
15486 |
|
|
if ((target_flags_explicit & MASK_LONG64) == 0)
|
15487 |
|
|
{
|
15488 |
|
|
if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
|
15489 |
|
|
target_flags |= MASK_LONG64;
|
15490 |
|
|
else
|
15491 |
|
|
target_flags &= ~MASK_LONG64;
|
15492 |
|
|
}
|
15493 |
|
|
|
15494 |
|
|
if (!TARGET_OLDABI)
|
15495 |
|
|
flag_pcc_struct_return = 0;
|
15496 |
|
|
|
15497 |
|
|
/* Decide which rtx_costs structure to use. */
|
15498 |
|
|
if (optimize_size)
|
15499 |
|
|
mips_cost = &mips_rtx_cost_optimize_size;
|
15500 |
|
|
else
|
15501 |
|
|
mips_cost = &mips_rtx_cost_data[mips_tune];
|
15502 |
|
|
|
15503 |
|
|
/* If the user hasn't specified a branch cost, use the processor's
|
15504 |
|
|
default. */
|
15505 |
|
|
if (mips_branch_cost == 0)
|
15506 |
|
|
mips_branch_cost = mips_cost->branch_cost;
|
15507 |
|
|
|
15508 |
|
|
/* If neither -mbranch-likely nor -mno-branch-likely was given
|
15509 |
|
|
on the command line, set MASK_BRANCHLIKELY based on the target
|
15510 |
|
|
architecture and tuning flags. Annulled delay slots are a
|
15511 |
|
|
size win, so we only consider the processor-specific tuning
|
15512 |
|
|
for !optimize_size. */
|
15513 |
|
|
if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
|
15514 |
|
|
{
|
15515 |
|
|
if (ISA_HAS_BRANCHLIKELY
|
15516 |
|
|
&& (optimize_size
|
15517 |
|
|
|| (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
|
15518 |
|
|
target_flags |= MASK_BRANCHLIKELY;
|
15519 |
|
|
else
|
15520 |
|
|
target_flags &= ~MASK_BRANCHLIKELY;
|
15521 |
|
|
}
|
15522 |
|
|
else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
|
15523 |
|
|
warning (0, "the %qs architecture does not support branch-likely"
|
15524 |
|
|
" instructions", mips_arch_info->name);
|
15525 |
|
|
|
15526 |
|
|
/* The effect of -mabicalls isn't defined for the EABI. */
|
15527 |
|
|
if (mips_abi == ABI_EABI && TARGET_ABICALLS)
|
15528 |
|
|
{
|
15529 |
|
|
error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
|
15530 |
|
|
target_flags &= ~MASK_ABICALLS;
|
15531 |
|
|
}
|
15532 |
|
|
|
15533 |
|
|
if (TARGET_ABICALLS_PIC2)
|
15534 |
|
|
/* We need to set flag_pic for executables as well as DSOs
|
15535 |
|
|
because we may reference symbols that are not defined in
|
15536 |
|
|
the final executable. (MIPS does not use things like
|
15537 |
|
|
copy relocs, for example.)
|
15538 |
|
|
|
15539 |
|
|
There is a body of code that uses __PIC__ to distinguish
|
15540 |
|
|
between -mabicalls and -mno-abicalls code. The non-__PIC__
|
15541 |
|
|
variant is usually appropriate for TARGET_ABICALLS_PIC0, as
|
15542 |
|
|
long as any indirect jumps use $25. */
|
15543 |
|
|
flag_pic = 1;
|
15544 |
|
|
|
15545 |
|
|
/* -mvr4130-align is a "speed over size" optimization: it usually produces
|
15546 |
|
|
faster code, but at the expense of more nops. Enable it at -O3 and
|
15547 |
|
|
above. */
|
15548 |
|
|
if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
|
15549 |
|
|
target_flags |= MASK_VR4130_ALIGN;
|
15550 |
|
|
|
15551 |
|
|
/* Prefer a call to memcpy over inline code when optimizing for size,
|
15552 |
|
|
though see MOVE_RATIO in mips.h. */
|
15553 |
|
|
if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
|
15554 |
|
|
target_flags |= MASK_MEMCPY;
|
15555 |
|
|
|
15556 |
|
|
/* If we have a nonzero small-data limit, check that the -mgpopt
|
15557 |
|
|
setting is consistent with the other target flags. */
|
15558 |
|
|
if (mips_small_data_threshold > 0)
|
15559 |
|
|
{
|
15560 |
|
|
if (!TARGET_GPOPT)
|
15561 |
|
|
{
|
15562 |
|
|
if (!TARGET_EXPLICIT_RELOCS)
|
15563 |
|
|
error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
|
15564 |
|
|
|
15565 |
|
|
TARGET_LOCAL_SDATA = false;
|
15566 |
|
|
TARGET_EXTERN_SDATA = false;
|
15567 |
|
|
}
|
15568 |
|
|
else
|
15569 |
|
|
{
|
15570 |
|
|
if (TARGET_VXWORKS_RTP)
|
15571 |
|
|
warning (0, "cannot use small-data accesses for %qs", "-mrtp");
|
15572 |
|
|
|
15573 |
|
|
if (TARGET_ABICALLS)
|
15574 |
|
|
warning (0, "cannot use small-data accesses for %qs",
|
15575 |
|
|
"-mabicalls");
|
15576 |
|
|
}
|
15577 |
|
|
}
|
15578 |
|
|
|
15579 |
|
|
#ifdef MIPS_TFMODE_FORMAT
|
15580 |
|
|
REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
|
15581 |
|
|
#endif
|
15582 |
|
|
|
15583 |
|
|
/* Make sure that the user didn't turn off paired single support when
|
15584 |
|
|
MIPS-3D support is requested. */
|
15585 |
|
|
if (TARGET_MIPS3D
|
15586 |
|
|
&& (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
|
15587 |
|
|
&& !TARGET_PAIRED_SINGLE_FLOAT)
|
15588 |
|
|
error ("%<-mips3d%> requires %<-mpaired-single%>");
|
15589 |
|
|
|
15590 |
|
|
/* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
|
15591 |
|
|
if (TARGET_MIPS3D)
|
15592 |
|
|
target_flags |= MASK_PAIRED_SINGLE_FLOAT;
|
15593 |
|
|
|
15594 |
|
|
/* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
|
15595 |
|
|
and TARGET_HARD_FLOAT_ABI are both true. */
|
15596 |
|
|
if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
|
15597 |
|
|
error ("%qs must be used with %qs",
|
15598 |
|
|
TARGET_MIPS3D ? "-mips3d" : "-mpaired-single",
|
15599 |
|
|
TARGET_HARD_FLOAT_ABI ? "-mfp64" : "-mhard-float");
|
15600 |
|
|
|
15601 |
|
|
/* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
|
15602 |
|
|
enabled. */
|
15603 |
|
|
if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_HAS_PAIRED_SINGLE)
|
15604 |
|
|
warning (0, "the %qs architecture does not support paired-single"
|
15605 |
|
|
" instructions", mips_arch_info->name);
|
15606 |
|
|
|
15607 |
|
|
if (mips_r10k_cache_barrier != R10K_CACHE_BARRIER_NONE
|
15608 |
|
|
&& !TARGET_CACHE_BUILTIN)
|
15609 |
|
|
{
|
15610 |
|
|
error ("%qs requires a target that provides the %qs instruction",
|
15611 |
|
|
"-mr10k-cache-barrier", "cache");
|
15612 |
|
|
mips_r10k_cache_barrier = R10K_CACHE_BARRIER_NONE;
|
15613 |
|
|
}
|
15614 |
|
|
|
15615 |
|
|
/* If TARGET_DSPR2, enable MASK_DSP. */
|
15616 |
|
|
if (TARGET_DSPR2)
|
15617 |
|
|
target_flags |= MASK_DSP;
|
15618 |
|
|
|
15619 |
|
|
/* .eh_frame addresses should be the same width as a C pointer.
|
15620 |
|
|
Most MIPS ABIs support only one pointer size, so the assembler
|
15621 |
|
|
will usually know exactly how big an .eh_frame address is.
|
15622 |
|
|
|
15623 |
|
|
Unfortunately, this is not true of the 64-bit EABI. The ABI was
|
15624 |
|
|
originally defined to use 64-bit pointers (i.e. it is LP64), and
|
15625 |
|
|
this is still the default mode. However, we also support an n32-like
|
15626 |
|
|
ILP32 mode, which is selected by -mlong32. The problem is that the
|
15627 |
|
|
assembler has traditionally not had an -mlong option, so it has
|
15628 |
|
|
traditionally not known whether we're using the ILP32 or LP64 form.
|
15629 |
|
|
|
15630 |
|
|
As it happens, gas versions up to and including 2.19 use _32-bit_
|
15631 |
|
|
addresses for EABI64 .cfi_* directives. This is wrong for the
|
15632 |
|
|
default LP64 mode, so we can't use the directives by default.
|
15633 |
|
|
Moreover, since gas's current behavior is at odds with gcc's
|
15634 |
|
|
default behavior, it seems unwise to rely on future versions
|
15635 |
|
|
of gas behaving the same way. We therefore avoid using .cfi
|
15636 |
|
|
directives for -mlong32 as well. */
|
15637 |
|
|
if (mips_abi == ABI_EABI && TARGET_64BIT)
|
15638 |
|
|
flag_dwarf2_cfi_asm = 0;
|
15639 |
|
|
|
15640 |
|
|
/* .cfi_* directives generate a read-only section, so fall back on
|
15641 |
|
|
manual .eh_frame creation if we need the section to be writable. */
|
15642 |
|
|
if (TARGET_WRITABLE_EH_FRAME)
|
15643 |
|
|
flag_dwarf2_cfi_asm = 0;
|
15644 |
|
|
|
15645 |
|
|
mips_init_print_operand_punct ();
|
15646 |
|
|
|
15647 |
|
|
/* Set up array to map GCC register number to debug register number.
|
15648 |
|
|
Ignore the special purpose register numbers. */
|
15649 |
|
|
|
15650 |
|
|
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
|
15651 |
|
|
{
|
15652 |
|
|
mips_dbx_regno[i] = INVALID_REGNUM;
|
15653 |
|
|
if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
|
15654 |
|
|
mips_dwarf_regno[i] = i;
|
15655 |
|
|
else
|
15656 |
|
|
mips_dwarf_regno[i] = INVALID_REGNUM;
|
15657 |
|
|
}
|
15658 |
|
|
|
15659 |
|
|
start = GP_DBX_FIRST - GP_REG_FIRST;
|
15660 |
|
|
for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
|
15661 |
|
|
mips_dbx_regno[i] = i + start;
|
15662 |
|
|
|
15663 |
|
|
start = FP_DBX_FIRST - FP_REG_FIRST;
|
15664 |
|
|
for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
|
15665 |
|
|
mips_dbx_regno[i] = i + start;
|
15666 |
|
|
|
15667 |
|
|
/* Accumulator debug registers use big-endian ordering. */
|
15668 |
|
|
mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
|
15669 |
|
|
mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
|
15670 |
|
|
mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
|
15671 |
|
|
mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
|
15672 |
|
|
for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
|
15673 |
|
|
{
|
15674 |
|
|
mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
|
15675 |
|
|
mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
|
15676 |
|
|
}
|
15677 |
|
|
|
15678 |
|
|
/* Set up mips_hard_regno_mode_ok. */
|
15679 |
|
|
for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
|
15680 |
|
|
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
|
15681 |
|
|
mips_hard_regno_mode_ok[mode][regno]
|
15682 |
|
|
= mips_hard_regno_mode_ok_p (regno, (enum machine_mode) mode);
|
15683 |
|
|
|
15684 |
|
|
/* Function to allocate machine-dependent function status. */
|
15685 |
|
|
init_machine_status = &mips_init_machine_status;
|
15686 |
|
|
|
15687 |
|
|
/* Default to working around R4000 errata only if the processor
|
15688 |
|
|
was selected explicitly. */
|
15689 |
|
|
if ((target_flags_explicit & MASK_FIX_R4000) == 0
|
15690 |
|
|
&& mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
|
15691 |
|
|
target_flags |= MASK_FIX_R4000;
|
15692 |
|
|
|
15693 |
|
|
/* Default to working around R4400 errata only if the processor
|
15694 |
|
|
was selected explicitly. */
|
15695 |
|
|
if ((target_flags_explicit & MASK_FIX_R4400) == 0
|
15696 |
|
|
&& mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
|
15697 |
|
|
target_flags |= MASK_FIX_R4400;
|
15698 |
|
|
|
15699 |
|
|
/* Default to working around R10000 errata only if the processor
|
15700 |
|
|
was selected explicitly. */
|
15701 |
|
|
if ((target_flags_explicit & MASK_FIX_R10000) == 0
|
15702 |
|
|
&& mips_matching_cpu_name_p (mips_arch_info->name, "r10000"))
|
15703 |
|
|
target_flags |= MASK_FIX_R10000;
|
15704 |
|
|
|
15705 |
|
|
/* Make sure that branch-likely instructions available when using
|
15706 |
|
|
-mfix-r10000. The instructions are not available if either:
|
15707 |
|
|
|
15708 |
|
|
1. -mno-branch-likely was passed.
|
15709 |
|
|
2. The selected ISA does not support branch-likely and
|
15710 |
|
|
the command line does not include -mbranch-likely. */
|
15711 |
|
|
if (TARGET_FIX_R10000
|
15712 |
|
|
&& ((target_flags_explicit & MASK_BRANCHLIKELY) == 0
|
15713 |
|
|
? !ISA_HAS_BRANCHLIKELY
|
15714 |
|
|
: !TARGET_BRANCHLIKELY))
|
15715 |
|
|
sorry ("%qs requires branch-likely instructions", "-mfix-r10000");
|
15716 |
|
|
|
15717 |
|
|
if (TARGET_SYNCI && !ISA_HAS_SYNCI)
|
15718 |
|
|
{
|
15719 |
|
|
warning (0, "the %qs architecture does not support the synci "
|
15720 |
|
|
"instruction", mips_arch_info->name);
|
15721 |
|
|
target_flags &= ~MASK_SYNCI;
|
15722 |
|
|
}
|
15723 |
|
|
|
15724 |
|
|
/* Only optimize PIC indirect calls if they are actually required. */
|
15725 |
|
|
if (!TARGET_USE_GOT || !TARGET_EXPLICIT_RELOCS)
|
15726 |
|
|
target_flags &= ~MASK_RELAX_PIC_CALLS;
|
15727 |
|
|
|
15728 |
|
|
/* Save base state of options. */
|
15729 |
|
|
mips_base_target_flags = target_flags;
|
15730 |
|
|
mips_base_schedule_insns = flag_schedule_insns;
|
15731 |
|
|
mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
|
15732 |
|
|
mips_base_move_loop_invariants = flag_move_loop_invariants;
|
15733 |
|
|
mips_base_align_loops = align_loops;
|
15734 |
|
|
mips_base_align_jumps = align_jumps;
|
15735 |
|
|
mips_base_align_functions = align_functions;
|
15736 |
|
|
|
15737 |
|
|
/* Now select the ISA mode.
|
15738 |
|
|
|
15739 |
|
|
Do all CPP-sensitive stuff in non-MIPS16 mode; we'll switch to
|
15740 |
|
|
MIPS16 mode afterwards if need be. */
|
15741 |
|
|
mips_set_mips16_mode (false);
|
15742 |
|
|
}
|
15743 |
|
|
|
15744 |
|
|
/* Swap the register information for registers I and I + 1, which
|
15745 |
|
|
currently have the wrong endianness. Note that the registers'
|
15746 |
|
|
fixedness and call-clobberedness might have been set on the
|
15747 |
|
|
command line. */
|
15748 |
|
|
|
15749 |
|
|
static void
|
15750 |
|
|
mips_swap_registers (unsigned int i)
|
15751 |
|
|
{
|
15752 |
|
|
int tmpi;
|
15753 |
|
|
const char *tmps;
|
15754 |
|
|
|
15755 |
|
|
#define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
|
15756 |
|
|
#define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
|
15757 |
|
|
|
15758 |
|
|
SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
|
15759 |
|
|
SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
|
15760 |
|
|
SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
|
15761 |
|
|
SWAP_STRING (reg_names[i], reg_names[i + 1]);
|
15762 |
|
|
|
15763 |
|
|
#undef SWAP_STRING
|
15764 |
|
|
#undef SWAP_INT
|
15765 |
|
|
}
|
15766 |
|
|
|
15767 |
|
|
/* Implement CONDITIONAL_REGISTER_USAGE. */
|
15768 |
|
|
|
15769 |
|
|
void
|
15770 |
|
|
mips_conditional_register_usage (void)
|
15771 |
|
|
{
|
15772 |
|
|
|
15773 |
|
|
if (ISA_HAS_DSP)
|
15774 |
|
|
{
|
15775 |
|
|
/* These DSP control register fields are global. */
|
15776 |
|
|
global_regs[CCDSP_PO_REGNUM] = 1;
|
15777 |
|
|
global_regs[CCDSP_SC_REGNUM] = 1;
|
15778 |
|
|
}
|
15779 |
|
|
else
|
15780 |
|
|
{
|
15781 |
|
|
int regno;
|
15782 |
|
|
|
15783 |
|
|
for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
|
15784 |
|
|
fixed_regs[regno] = call_used_regs[regno] = 1;
|
15785 |
|
|
}
|
15786 |
|
|
if (!TARGET_HARD_FLOAT)
|
15787 |
|
|
{
|
15788 |
|
|
int regno;
|
15789 |
|
|
|
15790 |
|
|
for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
|
15791 |
|
|
fixed_regs[regno] = call_used_regs[regno] = 1;
|
15792 |
|
|
for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
|
15793 |
|
|
fixed_regs[regno] = call_used_regs[regno] = 1;
|
15794 |
|
|
}
|
15795 |
|
|
else if (! ISA_HAS_8CC)
|
15796 |
|
|
{
|
15797 |
|
|
int regno;
|
15798 |
|
|
|
15799 |
|
|
/* We only have a single condition-code register. We implement
|
15800 |
|
|
this by fixing all the condition-code registers and generating
|
15801 |
|
|
RTL that refers directly to ST_REG_FIRST. */
|
15802 |
|
|
for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
|
15803 |
|
|
fixed_regs[regno] = call_used_regs[regno] = 1;
|
15804 |
|
|
}
|
15805 |
|
|
/* In MIPS16 mode, we permit the $t temporary registers to be used
|
15806 |
|
|
for reload. We prohibit the unused $s registers, since they
|
15807 |
|
|
are call-saved, and saving them via a MIPS16 register would
|
15808 |
|
|
probably waste more time than just reloading the value. */
|
15809 |
|
|
if (TARGET_MIPS16)
|
15810 |
|
|
{
|
15811 |
|
|
fixed_regs[18] = call_used_regs[18] = 1;
|
15812 |
|
|
fixed_regs[19] = call_used_regs[19] = 1;
|
15813 |
|
|
fixed_regs[20] = call_used_regs[20] = 1;
|
15814 |
|
|
fixed_regs[21] = call_used_regs[21] = 1;
|
15815 |
|
|
fixed_regs[22] = call_used_regs[22] = 1;
|
15816 |
|
|
fixed_regs[23] = call_used_regs[23] = 1;
|
15817 |
|
|
fixed_regs[26] = call_used_regs[26] = 1;
|
15818 |
|
|
fixed_regs[27] = call_used_regs[27] = 1;
|
15819 |
|
|
fixed_regs[30] = call_used_regs[30] = 1;
|
15820 |
|
|
}
|
15821 |
|
|
/* $f20-$f23 are call-clobbered for n64. */
|
15822 |
|
|
if (mips_abi == ABI_64)
|
15823 |
|
|
{
|
15824 |
|
|
int regno;
|
15825 |
|
|
for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
|
15826 |
|
|
call_really_used_regs[regno] = call_used_regs[regno] = 1;
|
15827 |
|
|
}
|
15828 |
|
|
/* Odd registers in the range $f21-$f31 (inclusive) are call-clobbered
|
15829 |
|
|
for n32. */
|
15830 |
|
|
if (mips_abi == ABI_N32)
|
15831 |
|
|
{
|
15832 |
|
|
int regno;
|
15833 |
|
|
for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
|
15834 |
|
|
call_really_used_regs[regno] = call_used_regs[regno] = 1;
|
15835 |
|
|
}
|
15836 |
|
|
/* Make sure that double-register accumulator values are correctly
|
15837 |
|
|
ordered for the current endianness. */
|
15838 |
|
|
if (TARGET_LITTLE_ENDIAN)
|
15839 |
|
|
{
|
15840 |
|
|
unsigned int regno;
|
15841 |
|
|
|
15842 |
|
|
mips_swap_registers (MD_REG_FIRST);
|
15843 |
|
|
for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
|
15844 |
|
|
mips_swap_registers (regno);
|
15845 |
|
|
}
|
15846 |
|
|
}
|
15847 |
|
|
|
15848 |
|
|
/* Initialize vector TARGET to VALS. */
|
15849 |
|
|
|
15850 |
|
|
void
|
15851 |
|
|
mips_expand_vector_init (rtx target, rtx vals)
|
15852 |
|
|
{
|
15853 |
|
|
enum machine_mode mode;
|
15854 |
|
|
enum machine_mode inner;
|
15855 |
|
|
unsigned int i, n_elts;
|
15856 |
|
|
rtx mem;
|
15857 |
|
|
|
15858 |
|
|
mode = GET_MODE (target);
|
15859 |
|
|
inner = GET_MODE_INNER (mode);
|
15860 |
|
|
n_elts = GET_MODE_NUNITS (mode);
|
15861 |
|
|
|
15862 |
|
|
gcc_assert (VECTOR_MODE_P (mode));
|
15863 |
|
|
|
15864 |
|
|
mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
|
15865 |
|
|
for (i = 0; i < n_elts; i++)
|
15866 |
|
|
emit_move_insn (adjust_address_nv (mem, inner, i * GET_MODE_SIZE (inner)),
|
15867 |
|
|
XVECEXP (vals, 0, i));
|
15868 |
|
|
|
15869 |
|
|
emit_move_insn (target, mem);
|
15870 |
|
|
}
|
15871 |
|
|
|
15872 |
|
|
/* When generating MIPS16 code, we want to allocate $24 (T_REG) before
|
15873 |
|
|
other registers for instructions for which it is possible. This
|
15874 |
|
|
encourages the compiler to use CMP in cases where an XOR would
|
15875 |
|
|
require some register shuffling. */
|
15876 |
|
|
|
15877 |
|
|
void
|
15878 |
|
|
mips_order_regs_for_local_alloc (void)
|
15879 |
|
|
{
|
15880 |
|
|
int i;
|
15881 |
|
|
|
15882 |
|
|
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
|
15883 |
|
|
reg_alloc_order[i] = i;
|
15884 |
|
|
|
15885 |
|
|
if (TARGET_MIPS16)
|
15886 |
|
|
{
|
15887 |
|
|
/* It really doesn't matter where we put register 0, since it is
|
15888 |
|
|
a fixed register anyhow. */
|
15889 |
|
|
reg_alloc_order[0] = 24;
|
15890 |
|
|
reg_alloc_order[24] = 0;
|
15891 |
|
|
}
|
15892 |
|
|
}
|
15893 |
|
|
|
15894 |
|
|
/* Implement EH_USES. */
|
15895 |
|
|
|
15896 |
|
|
bool
|
15897 |
|
|
mips_eh_uses (unsigned int regno)
|
15898 |
|
|
{
|
15899 |
|
|
if (reload_completed && !TARGET_ABSOLUTE_JUMPS)
|
15900 |
|
|
{
|
15901 |
|
|
/* We need to force certain registers to be live in order to handle
|
15902 |
|
|
PIC long branches correctly. See mips_must_initialize_gp_p for
|
15903 |
|
|
details. */
|
15904 |
|
|
if (mips_cfun_has_cprestore_slot_p ())
|
15905 |
|
|
{
|
15906 |
|
|
if (regno == CPRESTORE_SLOT_REGNUM)
|
15907 |
|
|
return true;
|
15908 |
|
|
}
|
15909 |
|
|
else
|
15910 |
|
|
{
|
15911 |
|
|
if (cfun->machine->global_pointer == regno)
|
15912 |
|
|
return true;
|
15913 |
|
|
}
|
15914 |
|
|
}
|
15915 |
|
|
|
15916 |
|
|
return false;
|
15917 |
|
|
}
|
15918 |
|
|
|
15919 |
|
|
/* Implement EPILOGUE_USES. */
|
15920 |
|
|
|
15921 |
|
|
bool
|
15922 |
|
|
mips_epilogue_uses (unsigned int regno)
|
15923 |
|
|
{
|
15924 |
|
|
/* Say that the epilogue uses the return address register. Note that
|
15925 |
|
|
in the case of sibcalls, the values "used by the epilogue" are
|
15926 |
|
|
considered live at the start of the called function. */
|
15927 |
|
|
if (regno == RETURN_ADDR_REGNUM)
|
15928 |
|
|
return true;
|
15929 |
|
|
|
15930 |
|
|
/* If using a GOT, say that the epilogue also uses GOT_VERSION_REGNUM.
|
15931 |
|
|
See the comment above load_call<mode> for details. */
|
15932 |
|
|
if (TARGET_USE_GOT && (regno) == GOT_VERSION_REGNUM)
|
15933 |
|
|
return true;
|
15934 |
|
|
|
15935 |
|
|
/* An interrupt handler must preserve some registers that are
|
15936 |
|
|
ordinarily call-clobbered. */
|
15937 |
|
|
if (cfun->machine->interrupt_handler_p
|
15938 |
|
|
&& mips_interrupt_extra_call_saved_reg_p (regno))
|
15939 |
|
|
return true;
|
15940 |
|
|
|
15941 |
|
|
return false;
|
15942 |
|
|
}
|
15943 |
|
|
|
15944 |
|
|
/* A for_each_rtx callback. Stop the search if *X is an AT register. */
|
15945 |
|
|
|
15946 |
|
|
static int
|
15947 |
|
|
mips_at_reg_p (rtx *x, void *data ATTRIBUTE_UNUSED)
|
15948 |
|
|
{
|
15949 |
|
|
return REG_P (*x) && REGNO (*x) == AT_REGNUM;
|
15950 |
|
|
}
|
15951 |
|
|
|
15952 |
|
|
/* Return true if INSN needs to be wrapped in ".set noat".
|
15953 |
|
|
INSN has NOPERANDS operands, stored in OPVEC. */
|
15954 |
|
|
|
15955 |
|
|
static bool
|
15956 |
|
|
mips_need_noat_wrapper_p (rtx insn, rtx *opvec, int noperands)
|
15957 |
|
|
{
|
15958 |
|
|
int i;
|
15959 |
|
|
|
15960 |
|
|
if (recog_memoized (insn) >= 0)
|
15961 |
|
|
for (i = 0; i < noperands; i++)
|
15962 |
|
|
if (for_each_rtx (&opvec[i], mips_at_reg_p, NULL))
|
15963 |
|
|
return true;
|
15964 |
|
|
return false;
|
15965 |
|
|
}
|
15966 |
|
|
|
15967 |
|
|
/* Implement FINAL_PRESCAN_INSN. */
|
15968 |
|
|
|
15969 |
|
|
void
|
15970 |
|
|
mips_final_prescan_insn (rtx insn, rtx *opvec, int noperands)
|
15971 |
|
|
{
|
15972 |
|
|
if (mips_need_noat_wrapper_p (insn, opvec, noperands))
|
15973 |
|
|
mips_push_asm_switch (&mips_noat);
|
15974 |
|
|
}
|
15975 |
|
|
|
15976 |
|
|
/* Implement TARGET_ASM_FINAL_POSTSCAN_INSN. */
|
15977 |
|
|
|
15978 |
|
|
static void
|
15979 |
|
|
mips_final_postscan_insn (FILE *file ATTRIBUTE_UNUSED, rtx insn,
|
15980 |
|
|
rtx *opvec, int noperands)
|
15981 |
|
|
{
|
15982 |
|
|
if (mips_need_noat_wrapper_p (insn, opvec, noperands))
|
15983 |
|
|
mips_pop_asm_switch (&mips_noat);
|
15984 |
|
|
}
|
15985 |
|
|
|
15986 |
|
|
/* Return the function that is used to expand the <u>mulsidi3 pattern.
|
15987 |
|
|
EXT_CODE is the code of the extension used. Return NULL if widening
|
15988 |
|
|
multiplication shouldn't be used. */
|
15989 |
|
|
|
15990 |
|
|
mulsidi3_gen_fn
|
15991 |
|
|
mips_mulsidi3_gen_fn (enum rtx_code ext_code)
|
15992 |
|
|
{
|
15993 |
|
|
bool signed_p;
|
15994 |
|
|
|
15995 |
|
|
signed_p = ext_code == SIGN_EXTEND;
|
15996 |
|
|
if (TARGET_64BIT)
|
15997 |
|
|
{
|
15998 |
|
|
/* Don't use widening multiplication with MULT when we have DMUL. Even
|
15999 |
|
|
with the extension of its input operands DMUL is faster. Note that
|
16000 |
|
|
the extension is not needed for signed multiplication. In order to
|
16001 |
|
|
ensure that we always remove the redundant sign-extension in this
|
16002 |
|
|
case we still expand mulsidi3 for DMUL. */
|
16003 |
|
|
if (ISA_HAS_DMUL3)
|
16004 |
|
|
return signed_p ? gen_mulsidi3_64bit_dmul : NULL;
|
16005 |
|
|
if (TARGET_FIX_R4000)
|
16006 |
|
|
return NULL;
|
16007 |
|
|
return signed_p ? gen_mulsidi3_64bit : gen_umulsidi3_64bit;
|
16008 |
|
|
}
|
16009 |
|
|
else
|
16010 |
|
|
{
|
16011 |
|
|
if (TARGET_FIX_R4000)
|
16012 |
|
|
return signed_p ? gen_mulsidi3_32bit_r4000 : gen_umulsidi3_32bit_r4000;
|
16013 |
|
|
if (ISA_HAS_DSPR2)
|
16014 |
|
|
return signed_p ? gen_mips_mult : gen_mips_multu;
|
16015 |
|
|
return signed_p ? gen_mulsidi3_32bit : gen_umulsidi3_32bit;
|
16016 |
|
|
}
|
16017 |
|
|
}
|
16018 |
|
|
|
16019 |
|
|
/* Return the size in bytes of the trampoline code, padded to
|
16020 |
|
|
TRAMPOLINE_ALIGNMENT bits. The static chain pointer and target
|
16021 |
|
|
function address immediately follow. */
|
16022 |
|
|
|
16023 |
|
|
int
|
16024 |
|
|
mips_trampoline_code_size (void)
|
16025 |
|
|
{
|
16026 |
|
|
if (TARGET_USE_PIC_FN_ADDR_REG)
|
16027 |
|
|
return 4 * 4;
|
16028 |
|
|
else if (ptr_mode == DImode)
|
16029 |
|
|
return 8 * 4;
|
16030 |
|
|
else if (ISA_HAS_LOAD_DELAY)
|
16031 |
|
|
return 6 * 4;
|
16032 |
|
|
else
|
16033 |
|
|
return 4 * 4;
|
16034 |
|
|
}
|
16035 |
|
|
|
16036 |
|
|
/* Implement TARGET_TRAMPOLINE_INIT. */
|
16037 |
|
|
|
16038 |
|
|
static void
|
16039 |
|
|
mips_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
|
16040 |
|
|
{
|
16041 |
|
|
rtx addr, end_addr, high, low, opcode, mem;
|
16042 |
|
|
rtx trampoline[8];
|
16043 |
|
|
unsigned int i, j;
|
16044 |
|
|
HOST_WIDE_INT end_addr_offset, static_chain_offset, target_function_offset;
|
16045 |
|
|
|
16046 |
|
|
/* Work out the offsets of the pointers from the start of the
|
16047 |
|
|
trampoline code. */
|
16048 |
|
|
end_addr_offset = mips_trampoline_code_size ();
|
16049 |
|
|
static_chain_offset = end_addr_offset;
|
16050 |
|
|
target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
|
16051 |
|
|
|
16052 |
|
|
/* Get pointers to the beginning and end of the code block. */
|
16053 |
|
|
addr = force_reg (Pmode, XEXP (m_tramp, 0));
|
16054 |
|
|
end_addr = mips_force_binary (Pmode, PLUS, addr, GEN_INT (end_addr_offset));
|
16055 |
|
|
|
16056 |
|
|
#define OP(X) gen_int_mode (X, SImode)
|
16057 |
|
|
|
16058 |
|
|
/* Build up the code in TRAMPOLINE. */
|
16059 |
|
|
i = 0;
|
16060 |
|
|
if (TARGET_USE_PIC_FN_ADDR_REG)
|
16061 |
|
|
{
|
16062 |
|
|
/* $25 contains the address of the trampoline. Emit code of the form:
|
16063 |
|
|
|
16064 |
|
|
l[wd] $1, target_function_offset($25)
|
16065 |
|
|
l[wd] $static_chain, static_chain_offset($25)
|
16066 |
|
|
jr $1
|
16067 |
|
|
move $25,$1. */
|
16068 |
|
|
trampoline[i++] = OP (MIPS_LOAD_PTR (AT_REGNUM,
|
16069 |
|
|
target_function_offset,
|
16070 |
|
|
PIC_FUNCTION_ADDR_REGNUM));
|
16071 |
|
|
trampoline[i++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
|
16072 |
|
|
static_chain_offset,
|
16073 |
|
|
PIC_FUNCTION_ADDR_REGNUM));
|
16074 |
|
|
trampoline[i++] = OP (MIPS_JR (AT_REGNUM));
|
16075 |
|
|
trampoline[i++] = OP (MIPS_MOVE (PIC_FUNCTION_ADDR_REGNUM, AT_REGNUM));
|
16076 |
|
|
}
|
16077 |
|
|
else if (ptr_mode == DImode)
|
16078 |
|
|
{
|
16079 |
|
|
/* It's too cumbersome to create the full 64-bit address, so let's
|
16080 |
|
|
instead use:
|
16081 |
|
|
|
16082 |
|
|
move $1, $31
|
16083 |
|
|
bal 1f
|
16084 |
|
|
nop
|
16085 |
|
|
1: l[wd] $25, target_function_offset - 12($31)
|
16086 |
|
|
l[wd] $static_chain, static_chain_offset - 12($31)
|
16087 |
|
|
jr $25
|
16088 |
|
|
move $31, $1
|
16089 |
|
|
|
16090 |
|
|
where 12 is the offset of "1:" from the start of the code block. */
|
16091 |
|
|
trampoline[i++] = OP (MIPS_MOVE (AT_REGNUM, RETURN_ADDR_REGNUM));
|
16092 |
|
|
trampoline[i++] = OP (MIPS_BAL (1));
|
16093 |
|
|
trampoline[i++] = OP (MIPS_NOP);
|
16094 |
|
|
trampoline[i++] = OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM,
|
16095 |
|
|
target_function_offset - 12,
|
16096 |
|
|
RETURN_ADDR_REGNUM));
|
16097 |
|
|
trampoline[i++] = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
|
16098 |
|
|
static_chain_offset - 12,
|
16099 |
|
|
RETURN_ADDR_REGNUM));
|
16100 |
|
|
trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
|
16101 |
|
|
trampoline[i++] = OP (MIPS_MOVE (RETURN_ADDR_REGNUM, AT_REGNUM));
|
16102 |
|
|
}
|
16103 |
|
|
else
|
16104 |
|
|
{
|
16105 |
|
|
/* If the target has load delays, emit:
|
16106 |
|
|
|
16107 |
|
|
lui $1, %hi(end_addr)
|
16108 |
|
|
lw $25, %lo(end_addr + ...)($1)
|
16109 |
|
|
lw $static_chain, %lo(end_addr + ...)($1)
|
16110 |
|
|
jr $25
|
16111 |
|
|
nop
|
16112 |
|
|
|
16113 |
|
|
Otherwise emit:
|
16114 |
|
|
|
16115 |
|
|
lui $1, %hi(end_addr)
|
16116 |
|
|
lw $25, %lo(end_addr + ...)($1)
|
16117 |
|
|
jr $25
|
16118 |
|
|
lw $static_chain, %lo(end_addr + ...)($1). */
|
16119 |
|
|
|
16120 |
|
|
/* Split END_ADDR into %hi and %lo values. Trampolines are aligned
|
16121 |
|
|
to 64 bits, so the %lo value will have the bottom 3 bits clear. */
|
16122 |
|
|
high = expand_simple_binop (SImode, PLUS, end_addr, GEN_INT (0x8000),
|
16123 |
|
|
NULL, false, OPTAB_WIDEN);
|
16124 |
|
|
high = expand_simple_binop (SImode, LSHIFTRT, high, GEN_INT (16),
|
16125 |
|
|
NULL, false, OPTAB_WIDEN);
|
16126 |
|
|
low = convert_to_mode (SImode, gen_lowpart (HImode, end_addr), true);
|
16127 |
|
|
|
16128 |
|
|
/* Emit the LUI. */
|
16129 |
|
|
opcode = OP (MIPS_LUI (AT_REGNUM, 0));
|
16130 |
|
|
trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, high,
|
16131 |
|
|
NULL, false, OPTAB_WIDEN);
|
16132 |
|
|
|
16133 |
|
|
/* Emit the load of the target function. */
|
16134 |
|
|
opcode = OP (MIPS_LOAD_PTR (PIC_FUNCTION_ADDR_REGNUM,
|
16135 |
|
|
target_function_offset - end_addr_offset,
|
16136 |
|
|
AT_REGNUM));
|
16137 |
|
|
trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, low,
|
16138 |
|
|
NULL, false, OPTAB_WIDEN);
|
16139 |
|
|
|
16140 |
|
|
/* Emit the JR here, if we can. */
|
16141 |
|
|
if (!ISA_HAS_LOAD_DELAY)
|
16142 |
|
|
trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
|
16143 |
|
|
|
16144 |
|
|
/* Emit the load of the static chain register. */
|
16145 |
|
|
opcode = OP (MIPS_LOAD_PTR (STATIC_CHAIN_REGNUM,
|
16146 |
|
|
static_chain_offset - end_addr_offset,
|
16147 |
|
|
AT_REGNUM));
|
16148 |
|
|
trampoline[i++] = expand_simple_binop (SImode, IOR, opcode, low,
|
16149 |
|
|
NULL, false, OPTAB_WIDEN);
|
16150 |
|
|
|
16151 |
|
|
/* Emit the JR, if we couldn't above. */
|
16152 |
|
|
if (ISA_HAS_LOAD_DELAY)
|
16153 |
|
|
{
|
16154 |
|
|
trampoline[i++] = OP (MIPS_JR (PIC_FUNCTION_ADDR_REGNUM));
|
16155 |
|
|
trampoline[i++] = OP (MIPS_NOP);
|
16156 |
|
|
}
|
16157 |
|
|
}
|
16158 |
|
|
|
16159 |
|
|
#undef OP
|
16160 |
|
|
|
16161 |
|
|
/* Copy the trampoline code. Leave any padding uninitialized. */
|
16162 |
|
|
for (j = 0; j < i; j++)
|
16163 |
|
|
{
|
16164 |
|
|
mem = adjust_address (m_tramp, SImode, j * GET_MODE_SIZE (SImode));
|
16165 |
|
|
mips_emit_move (mem, trampoline[j]);
|
16166 |
|
|
}
|
16167 |
|
|
|
16168 |
|
|
/* Set up the static chain pointer field. */
|
16169 |
|
|
mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
|
16170 |
|
|
mips_emit_move (mem, chain_value);
|
16171 |
|
|
|
16172 |
|
|
/* Set up the target function field. */
|
16173 |
|
|
mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
|
16174 |
|
|
mips_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
|
16175 |
|
|
|
16176 |
|
|
/* Flush the code part of the trampoline. */
|
16177 |
|
|
emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
|
16178 |
|
|
emit_insn (gen_clear_cache (addr, end_addr));
|
16179 |
|
|
}
|
16180 |
|
|
|
16181 |
|
|
/* Implement FUNCTION_PROFILER. */
|
16182 |
|
|
|
16183 |
|
|
void mips_function_profiler (FILE *file)
|
16184 |
|
|
{
|
16185 |
|
|
if (TARGET_MIPS16)
|
16186 |
|
|
sorry ("mips16 function profiling");
|
16187 |
|
|
if (TARGET_LONG_CALLS)
|
16188 |
|
|
{
|
16189 |
|
|
/* For TARGET_LONG_CALLS use $3 for the address of _mcount. */
|
16190 |
|
|
if (Pmode == DImode)
|
16191 |
|
|
fprintf (file, "\tdla\t%s,_mcount\n", reg_names[3]);
|
16192 |
|
|
else
|
16193 |
|
|
fprintf (file, "\tla\t%s,_mcount\n", reg_names[3]);
|
16194 |
|
|
}
|
16195 |
|
|
mips_push_asm_switch (&mips_noat);
|
16196 |
|
|
fprintf (file, "\tmove\t%s,%s\t\t# save current return address\n",
|
16197 |
|
|
reg_names[AT_REGNUM], reg_names[RETURN_ADDR_REGNUM]);
|
16198 |
|
|
/* _mcount treats $2 as the static chain register. */
|
16199 |
|
|
if (cfun->static_chain_decl != NULL)
|
16200 |
|
|
fprintf (file, "\tmove\t%s,%s\n", reg_names[2],
|
16201 |
|
|
reg_names[STATIC_CHAIN_REGNUM]);
|
16202 |
|
|
if (TARGET_MCOUNT_RA_ADDRESS)
|
16203 |
|
|
{
|
16204 |
|
|
/* If TARGET_MCOUNT_RA_ADDRESS load $12 with the address of the
|
16205 |
|
|
ra save location. */
|
16206 |
|
|
if (cfun->machine->frame.ra_fp_offset == 0)
|
16207 |
|
|
/* ra not saved, pass zero. */
|
16208 |
|
|
fprintf (file, "\tmove\t%s,%s\n", reg_names[12], reg_names[0]);
|
16209 |
|
|
else
|
16210 |
|
|
fprintf (file, "\t%s\t%s," HOST_WIDE_INT_PRINT_DEC "(%s)\n",
|
16211 |
|
|
Pmode == DImode ? "dla" : "la", reg_names[12],
|
16212 |
|
|
cfun->machine->frame.ra_fp_offset,
|
16213 |
|
|
reg_names[STACK_POINTER_REGNUM]);
|
16214 |
|
|
}
|
16215 |
|
|
if (!TARGET_NEWABI)
|
16216 |
|
|
fprintf (file,
|
16217 |
|
|
"\t%s\t%s,%s,%d\t\t# _mcount pops 2 words from stack\n",
|
16218 |
|
|
TARGET_64BIT ? "dsubu" : "subu",
|
16219 |
|
|
reg_names[STACK_POINTER_REGNUM],
|
16220 |
|
|
reg_names[STACK_POINTER_REGNUM],
|
16221 |
|
|
Pmode == DImode ? 16 : 8);
|
16222 |
|
|
|
16223 |
|
|
if (TARGET_LONG_CALLS)
|
16224 |
|
|
fprintf (file, "\tjalr\t%s\n", reg_names[3]);
|
16225 |
|
|
else
|
16226 |
|
|
fprintf (file, "\tjal\t_mcount\n");
|
16227 |
|
|
mips_pop_asm_switch (&mips_noat);
|
16228 |
|
|
/* _mcount treats $2 as the static chain register. */
|
16229 |
|
|
if (cfun->static_chain_decl != NULL)
|
16230 |
|
|
fprintf (file, "\tmove\t%s,%s\n", reg_names[STATIC_CHAIN_REGNUM],
|
16231 |
|
|
reg_names[2]);
|
16232 |
|
|
}
|
16233 |
|
|
|
16234 |
|
|
/* Initialize the GCC target structure. */
|
16235 |
|
|
#undef TARGET_ASM_ALIGNED_HI_OP
|
16236 |
|
|
#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
|
16237 |
|
|
#undef TARGET_ASM_ALIGNED_SI_OP
|
16238 |
|
|
#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
|
16239 |
|
|
#undef TARGET_ASM_ALIGNED_DI_OP
|
16240 |
|
|
#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
|
16241 |
|
|
|
16242 |
|
|
#undef TARGET_LEGITIMIZE_ADDRESS
|
16243 |
|
|
#define TARGET_LEGITIMIZE_ADDRESS mips_legitimize_address
|
16244 |
|
|
|
16245 |
|
|
#undef TARGET_ASM_FUNCTION_PROLOGUE
|
16246 |
|
|
#define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
|
16247 |
|
|
#undef TARGET_ASM_FUNCTION_EPILOGUE
|
16248 |
|
|
#define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
|
16249 |
|
|
#undef TARGET_ASM_SELECT_RTX_SECTION
|
16250 |
|
|
#define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
|
16251 |
|
|
#undef TARGET_ASM_FUNCTION_RODATA_SECTION
|
16252 |
|
|
#define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
|
16253 |
|
|
|
16254 |
|
|
#undef TARGET_SCHED_INIT
|
16255 |
|
|
#define TARGET_SCHED_INIT mips_sched_init
|
16256 |
|
|
#undef TARGET_SCHED_REORDER
|
16257 |
|
|
#define TARGET_SCHED_REORDER mips_sched_reorder
|
16258 |
|
|
#undef TARGET_SCHED_REORDER2
|
16259 |
|
|
#define TARGET_SCHED_REORDER2 mips_sched_reorder
|
16260 |
|
|
#undef TARGET_SCHED_VARIABLE_ISSUE
|
16261 |
|
|
#define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
|
16262 |
|
|
#undef TARGET_SCHED_ADJUST_COST
|
16263 |
|
|
#define TARGET_SCHED_ADJUST_COST mips_adjust_cost
|
16264 |
|
|
#undef TARGET_SCHED_ISSUE_RATE
|
16265 |
|
|
#define TARGET_SCHED_ISSUE_RATE mips_issue_rate
|
16266 |
|
|
#undef TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN
|
16267 |
|
|
#define TARGET_SCHED_INIT_DFA_POST_CYCLE_INSN mips_init_dfa_post_cycle_insn
|
16268 |
|
|
#undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
|
16269 |
|
|
#define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE mips_dfa_post_advance_cycle
|
16270 |
|
|
#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
|
16271 |
|
|
#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
|
16272 |
|
|
mips_multipass_dfa_lookahead
|
16273 |
|
|
|
16274 |
|
|
#undef TARGET_DEFAULT_TARGET_FLAGS
|
16275 |
|
|
#define TARGET_DEFAULT_TARGET_FLAGS \
|
16276 |
|
|
(TARGET_DEFAULT \
|
16277 |
|
|
| TARGET_CPU_DEFAULT \
|
16278 |
|
|
| TARGET_ENDIAN_DEFAULT \
|
16279 |
|
|
| TARGET_FP_EXCEPTIONS_DEFAULT \
|
16280 |
|
|
| MASK_CHECK_ZERO_DIV \
|
16281 |
|
|
| MASK_FUSED_MADD)
|
16282 |
|
|
#undef TARGET_HANDLE_OPTION
|
16283 |
|
|
#define TARGET_HANDLE_OPTION mips_handle_option
|
16284 |
|
|
|
16285 |
|
|
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
|
16286 |
|
|
#define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
|
16287 |
|
|
|
16288 |
|
|
#undef TARGET_INSERT_ATTRIBUTES
|
16289 |
|
|
#define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
|
16290 |
|
|
#undef TARGET_MERGE_DECL_ATTRIBUTES
|
16291 |
|
|
#define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
|
16292 |
|
|
#undef TARGET_SET_CURRENT_FUNCTION
|
16293 |
|
|
#define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
|
16294 |
|
|
|
16295 |
|
|
#undef TARGET_VALID_POINTER_MODE
|
16296 |
|
|
#define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
|
16297 |
|
|
#undef TARGET_RTX_COSTS
|
16298 |
|
|
#define TARGET_RTX_COSTS mips_rtx_costs
|
16299 |
|
|
#undef TARGET_ADDRESS_COST
|
16300 |
|
|
#define TARGET_ADDRESS_COST mips_address_cost
|
16301 |
|
|
|
16302 |
|
|
#undef TARGET_IN_SMALL_DATA_P
|
16303 |
|
|
#define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
|
16304 |
|
|
|
16305 |
|
|
#undef TARGET_MACHINE_DEPENDENT_REORG
|
16306 |
|
|
#define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
|
16307 |
|
|
|
16308 |
|
|
#undef TARGET_ASM_FILE_START
|
16309 |
|
|
#define TARGET_ASM_FILE_START mips_file_start
|
16310 |
|
|
#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
|
16311 |
|
|
#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
|
16312 |
|
|
|
16313 |
|
|
#undef TARGET_INIT_LIBFUNCS
|
16314 |
|
|
#define TARGET_INIT_LIBFUNCS mips_init_libfuncs
|
16315 |
|
|
|
16316 |
|
|
#undef TARGET_BUILD_BUILTIN_VA_LIST
|
16317 |
|
|
#define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
|
16318 |
|
|
#undef TARGET_EXPAND_BUILTIN_VA_START
|
16319 |
|
|
#define TARGET_EXPAND_BUILTIN_VA_START mips_va_start
|
16320 |
|
|
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
|
16321 |
|
|
#define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
|
16322 |
|
|
|
16323 |
|
|
#undef TARGET_PROMOTE_FUNCTION_MODE
|
16324 |
|
|
#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
|
16325 |
|
|
#undef TARGET_PROMOTE_PROTOTYPES
|
16326 |
|
|
#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
|
16327 |
|
|
|
16328 |
|
|
#undef TARGET_RETURN_IN_MEMORY
|
16329 |
|
|
#define TARGET_RETURN_IN_MEMORY mips_return_in_memory
|
16330 |
|
|
#undef TARGET_RETURN_IN_MSB
|
16331 |
|
|
#define TARGET_RETURN_IN_MSB mips_return_in_msb
|
16332 |
|
|
|
16333 |
|
|
#undef TARGET_ASM_OUTPUT_MI_THUNK
|
16334 |
|
|
#define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
|
16335 |
|
|
#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
|
16336 |
|
|
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
|
16337 |
|
|
|
16338 |
|
|
#undef TARGET_SETUP_INCOMING_VARARGS
|
16339 |
|
|
#define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
|
16340 |
|
|
#undef TARGET_STRICT_ARGUMENT_NAMING
|
16341 |
|
|
#define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
|
16342 |
|
|
#undef TARGET_MUST_PASS_IN_STACK
|
16343 |
|
|
#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
|
16344 |
|
|
#undef TARGET_PASS_BY_REFERENCE
|
16345 |
|
|
#define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
|
16346 |
|
|
#undef TARGET_CALLEE_COPIES
|
16347 |
|
|
#define TARGET_CALLEE_COPIES mips_callee_copies
|
16348 |
|
|
#undef TARGET_ARG_PARTIAL_BYTES
|
16349 |
|
|
#define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
|
16350 |
|
|
|
16351 |
|
|
#undef TARGET_MODE_REP_EXTENDED
|
16352 |
|
|
#define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
|
16353 |
|
|
|
16354 |
|
|
#undef TARGET_VECTOR_MODE_SUPPORTED_P
|
16355 |
|
|
#define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
|
16356 |
|
|
|
16357 |
|
|
#undef TARGET_SCALAR_MODE_SUPPORTED_P
|
16358 |
|
|
#define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
|
16359 |
|
|
|
16360 |
|
|
#undef TARGET_INIT_BUILTINS
|
16361 |
|
|
#define TARGET_INIT_BUILTINS mips_init_builtins
|
16362 |
|
|
#undef TARGET_EXPAND_BUILTIN
|
16363 |
|
|
#define TARGET_EXPAND_BUILTIN mips_expand_builtin
|
16364 |
|
|
|
16365 |
|
|
#undef TARGET_HAVE_TLS
|
16366 |
|
|
#define TARGET_HAVE_TLS HAVE_AS_TLS
|
16367 |
|
|
|
16368 |
|
|
#undef TARGET_CANNOT_FORCE_CONST_MEM
|
16369 |
|
|
#define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
|
16370 |
|
|
|
16371 |
|
|
#undef TARGET_ENCODE_SECTION_INFO
|
16372 |
|
|
#define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
|
16373 |
|
|
|
16374 |
|
|
#undef TARGET_ATTRIBUTE_TABLE
|
16375 |
|
|
#define TARGET_ATTRIBUTE_TABLE mips_attribute_table
|
16376 |
|
|
/* All our function attributes are related to how out-of-line copies should
|
16377 |
|
|
be compiled or called. They don't in themselves prevent inlining. */
|
16378 |
|
|
#undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
|
16379 |
|
|
#define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
|
16380 |
|
|
|
16381 |
|
|
#undef TARGET_EXTRA_LIVE_ON_ENTRY
|
16382 |
|
|
#define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
|
16383 |
|
|
|
16384 |
|
|
#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
|
16385 |
|
|
#define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
|
16386 |
|
|
#undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
|
16387 |
|
|
#define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
|
16388 |
|
|
|
16389 |
|
|
#undef TARGET_COMP_TYPE_ATTRIBUTES
|
16390 |
|
|
#define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
|
16391 |
|
|
|
16392 |
|
|
#ifdef HAVE_AS_DTPRELWORD
|
16393 |
|
|
#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
|
16394 |
|
|
#define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
|
16395 |
|
|
#endif
|
16396 |
|
|
#undef TARGET_DWARF_REGISTER_SPAN
|
16397 |
|
|
#define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
|
16398 |
|
|
|
16399 |
|
|
#undef TARGET_IRA_COVER_CLASSES
|
16400 |
|
|
#define TARGET_IRA_COVER_CLASSES mips_ira_cover_classes
|
16401 |
|
|
|
16402 |
|
|
#undef TARGET_ASM_FINAL_POSTSCAN_INSN
|
16403 |
|
|
#define TARGET_ASM_FINAL_POSTSCAN_INSN mips_final_postscan_insn
|
16404 |
|
|
|
16405 |
|
|
#undef TARGET_LEGITIMATE_ADDRESS_P
|
16406 |
|
|
#define TARGET_LEGITIMATE_ADDRESS_P mips_legitimate_address_p
|
16407 |
|
|
|
16408 |
|
|
#undef TARGET_FRAME_POINTER_REQUIRED
|
16409 |
|
|
#define TARGET_FRAME_POINTER_REQUIRED mips_frame_pointer_required
|
16410 |
|
|
|
16411 |
|
|
#undef TARGET_CAN_ELIMINATE
|
16412 |
|
|
#define TARGET_CAN_ELIMINATE mips_can_eliminate
|
16413 |
|
|
|
16414 |
|
|
#undef TARGET_TRAMPOLINE_INIT
|
16415 |
|
|
#define TARGET_TRAMPOLINE_INIT mips_trampoline_init
|
16416 |
|
|
|
16417 |
|
|
struct gcc_target targetm = TARGET_INITIALIZER;
|
16418 |
|
|
|
16419 |
|
|
#include "gt-mips.h"
|