1 |
709 |
jeremybenn |
/* Definitions of target machine for GCC for IA-32.
|
2 |
|
|
Copyright (C) 1988, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
|
3 |
|
|
2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
|
4 |
|
|
Free Software Foundation, Inc.
|
5 |
|
|
|
6 |
|
|
This file is part of GCC.
|
7 |
|
|
|
8 |
|
|
GCC is free software; you can redistribute it and/or modify
|
9 |
|
|
it under the terms of the GNU General Public License as published by
|
10 |
|
|
the Free Software Foundation; either version 3, or (at your option)
|
11 |
|
|
any later version.
|
12 |
|
|
|
13 |
|
|
GCC is distributed in the hope that it will be useful,
|
14 |
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
15 |
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
16 |
|
|
GNU General Public License for more details.
|
17 |
|
|
|
18 |
|
|
Under Section 7 of GPL version 3, you are granted additional
|
19 |
|
|
permissions described in the GCC Runtime Library Exception, version
|
20 |
|
|
3.1, as published by the Free Software Foundation.
|
21 |
|
|
|
22 |
|
|
You should have received a copy of the GNU General Public License and
|
23 |
|
|
a copy of the GCC Runtime Library Exception along with this program;
|
24 |
|
|
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
25 |
|
|
<http://www.gnu.org/licenses/>. */
|
26 |
|
|
|
27 |
|
|
/* The purpose of this file is to define the characteristics of the i386,
|
28 |
|
|
independent of assembler syntax or operating system.
|
29 |
|
|
|
30 |
|
|
Three other files build on this one to describe a specific assembler syntax:
|
31 |
|
|
bsd386.h, att386.h, and sun386.h.
|
32 |
|
|
|
33 |
|
|
The actual tm.h file for a particular system should include
|
34 |
|
|
this file, and then the file for the appropriate assembler syntax.
|
35 |
|
|
|
36 |
|
|
Many macros that specify assembler syntax are omitted entirely from
|
37 |
|
|
this file because they really belong in the files for particular
|
38 |
|
|
assemblers. These include RP, IP, LPREFIX, PUT_OP_SIZE, USE_STAR,
|
39 |
|
|
ADDR_BEG, ADDR_END, PRINT_IREG, PRINT_SCALE, PRINT_B_I_S, and many
|
40 |
|
|
that start with ASM_ or end in ASM_OP. */
|
41 |
|
|
|
42 |
|
|
/* Redefines for option macros. */
|
43 |
|
|
|
44 |
|
|
#define TARGET_64BIT OPTION_ISA_64BIT
|
45 |
|
|
#define TARGET_X32 OPTION_ISA_X32
|
46 |
|
|
#define TARGET_MMX OPTION_ISA_MMX
|
47 |
|
|
#define TARGET_3DNOW OPTION_ISA_3DNOW
|
48 |
|
|
#define TARGET_3DNOW_A OPTION_ISA_3DNOW_A
|
49 |
|
|
#define TARGET_SSE OPTION_ISA_SSE
|
50 |
|
|
#define TARGET_SSE2 OPTION_ISA_SSE2
|
51 |
|
|
#define TARGET_SSE3 OPTION_ISA_SSE3
|
52 |
|
|
#define TARGET_SSSE3 OPTION_ISA_SSSE3
|
53 |
|
|
#define TARGET_SSE4_1 OPTION_ISA_SSE4_1
|
54 |
|
|
#define TARGET_SSE4_2 OPTION_ISA_SSE4_2
|
55 |
|
|
#define TARGET_AVX OPTION_ISA_AVX
|
56 |
|
|
#define TARGET_AVX2 OPTION_ISA_AVX2
|
57 |
|
|
#define TARGET_FMA OPTION_ISA_FMA
|
58 |
|
|
#define TARGET_SSE4A OPTION_ISA_SSE4A
|
59 |
|
|
#define TARGET_FMA4 OPTION_ISA_FMA4
|
60 |
|
|
#define TARGET_XOP OPTION_ISA_XOP
|
61 |
|
|
#define TARGET_LWP OPTION_ISA_LWP
|
62 |
|
|
#define TARGET_ROUND OPTION_ISA_ROUND
|
63 |
|
|
#define TARGET_ABM OPTION_ISA_ABM
|
64 |
|
|
#define TARGET_BMI OPTION_ISA_BMI
|
65 |
|
|
#define TARGET_BMI2 OPTION_ISA_BMI2
|
66 |
|
|
#define TARGET_LZCNT OPTION_ISA_LZCNT
|
67 |
|
|
#define TARGET_TBM OPTION_ISA_TBM
|
68 |
|
|
#define TARGET_POPCNT OPTION_ISA_POPCNT
|
69 |
|
|
#define TARGET_SAHF OPTION_ISA_SAHF
|
70 |
|
|
#define TARGET_MOVBE OPTION_ISA_MOVBE
|
71 |
|
|
#define TARGET_CRC32 OPTION_ISA_CRC32
|
72 |
|
|
#define TARGET_AES OPTION_ISA_AES
|
73 |
|
|
#define TARGET_PCLMUL OPTION_ISA_PCLMUL
|
74 |
|
|
#define TARGET_CMPXCHG16B OPTION_ISA_CX16
|
75 |
|
|
#define TARGET_FSGSBASE OPTION_ISA_FSGSBASE
|
76 |
|
|
#define TARGET_RDRND OPTION_ISA_RDRND
|
77 |
|
|
#define TARGET_F16C OPTION_ISA_F16C
|
78 |
|
|
|
79 |
|
|
#define TARGET_LP64 (TARGET_64BIT && !TARGET_X32)
|
80 |
|
|
|
81 |
|
|
/* SSE4.1 defines round instructions */
|
82 |
|
|
#define OPTION_MASK_ISA_ROUND OPTION_MASK_ISA_SSE4_1
|
83 |
|
|
#define OPTION_ISA_ROUND ((ix86_isa_flags & OPTION_MASK_ISA_ROUND) != 0)
|
84 |
|
|
|
85 |
|
|
#include "config/vxworks-dummy.h"
|
86 |
|
|
|
87 |
|
|
#include "config/i386/i386-opts.h"
|
88 |
|
|
|
89 |
|
|
#define MAX_STRINGOP_ALGS 4
|
90 |
|
|
|
91 |
|
|
/* Specify what algorithm to use for stringops on known size.
|
92 |
|
|
When size is unknown, the UNKNOWN_SIZE alg is used. When size is
|
93 |
|
|
known at compile time or estimated via feedback, the SIZE array
|
94 |
|
|
is walked in order until MAX is greater then the estimate (or -1
|
95 |
|
|
means infinity). Corresponding ALG is used then.
|
96 |
|
|
For example initializer:
|
97 |
|
|
{{256, loop}, {-1, rep_prefix_4_byte}}
|
98 |
|
|
will use loop for blocks smaller or equal to 256 bytes, rep prefix will
|
99 |
|
|
be used otherwise. */
|
100 |
|
|
struct stringop_algs
|
101 |
|
|
{
|
102 |
|
|
const enum stringop_alg unknown_size;
|
103 |
|
|
const struct stringop_strategy {
|
104 |
|
|
const int max;
|
105 |
|
|
const enum stringop_alg alg;
|
106 |
|
|
} size [MAX_STRINGOP_ALGS];
|
107 |
|
|
};
|
108 |
|
|
|
109 |
|
|
/* Define the specific costs for a given cpu */
|
110 |
|
|
|
111 |
|
|
struct processor_costs {
|
112 |
|
|
const int add; /* cost of an add instruction */
|
113 |
|
|
const int lea; /* cost of a lea instruction */
|
114 |
|
|
const int shift_var; /* variable shift costs */
|
115 |
|
|
const int shift_const; /* constant shift costs */
|
116 |
|
|
const int mult_init[5]; /* cost of starting a multiply
|
117 |
|
|
in QImode, HImode, SImode, DImode, TImode*/
|
118 |
|
|
const int mult_bit; /* cost of multiply per each bit set */
|
119 |
|
|
const int divide[5]; /* cost of a divide/mod
|
120 |
|
|
in QImode, HImode, SImode, DImode, TImode*/
|
121 |
|
|
int movsx; /* The cost of movsx operation. */
|
122 |
|
|
int movzx; /* The cost of movzx operation. */
|
123 |
|
|
const int large_insn; /* insns larger than this cost more */
|
124 |
|
|
const int move_ratio; /* The threshold of number of scalar
|
125 |
|
|
memory-to-memory move insns. */
|
126 |
|
|
const int movzbl_load; /* cost of loading using movzbl */
|
127 |
|
|
const int int_load[3]; /* cost of loading integer registers
|
128 |
|
|
in QImode, HImode and SImode relative
|
129 |
|
|
to reg-reg move (2). */
|
130 |
|
|
const int int_store[3]; /* cost of storing integer register
|
131 |
|
|
in QImode, HImode and SImode */
|
132 |
|
|
const int fp_move; /* cost of reg,reg fld/fst */
|
133 |
|
|
const int fp_load[3]; /* cost of loading FP register
|
134 |
|
|
in SFmode, DFmode and XFmode */
|
135 |
|
|
const int fp_store[3]; /* cost of storing FP register
|
136 |
|
|
in SFmode, DFmode and XFmode */
|
137 |
|
|
const int mmx_move; /* cost of moving MMX register. */
|
138 |
|
|
const int mmx_load[2]; /* cost of loading MMX register
|
139 |
|
|
in SImode and DImode */
|
140 |
|
|
const int mmx_store[2]; /* cost of storing MMX register
|
141 |
|
|
in SImode and DImode */
|
142 |
|
|
const int sse_move; /* cost of moving SSE register. */
|
143 |
|
|
const int sse_load[3]; /* cost of loading SSE register
|
144 |
|
|
in SImode, DImode and TImode*/
|
145 |
|
|
const int sse_store[3]; /* cost of storing SSE register
|
146 |
|
|
in SImode, DImode and TImode*/
|
147 |
|
|
const int mmxsse_to_integer; /* cost of moving mmxsse register to
|
148 |
|
|
integer and vice versa. */
|
149 |
|
|
const int l1_cache_size; /* size of l1 cache, in kilobytes. */
|
150 |
|
|
const int l2_cache_size; /* size of l2 cache, in kilobytes. */
|
151 |
|
|
const int prefetch_block; /* bytes moved to cache for prefetch. */
|
152 |
|
|
const int simultaneous_prefetches; /* number of parallel prefetch
|
153 |
|
|
operations. */
|
154 |
|
|
const int branch_cost; /* Default value for BRANCH_COST. */
|
155 |
|
|
const int fadd; /* cost of FADD and FSUB instructions. */
|
156 |
|
|
const int fmul; /* cost of FMUL instruction. */
|
157 |
|
|
const int fdiv; /* cost of FDIV instruction. */
|
158 |
|
|
const int fabs; /* cost of FABS instruction. */
|
159 |
|
|
const int fchs; /* cost of FCHS instruction. */
|
160 |
|
|
const int fsqrt; /* cost of FSQRT instruction. */
|
161 |
|
|
/* Specify what algorithm
|
162 |
|
|
to use for stringops on unknown size. */
|
163 |
|
|
struct stringop_algs memcpy[2], memset[2];
|
164 |
|
|
const int scalar_stmt_cost; /* Cost of any scalar operation, excluding
|
165 |
|
|
load and store. */
|
166 |
|
|
const int scalar_load_cost; /* Cost of scalar load. */
|
167 |
|
|
const int scalar_store_cost; /* Cost of scalar store. */
|
168 |
|
|
const int vec_stmt_cost; /* Cost of any vector operation, excluding
|
169 |
|
|
load, store, vector-to-scalar and
|
170 |
|
|
scalar-to-vector operation. */
|
171 |
|
|
const int vec_to_scalar_cost; /* Cost of vect-to-scalar operation. */
|
172 |
|
|
const int scalar_to_vec_cost; /* Cost of scalar-to-vector operation. */
|
173 |
|
|
const int vec_align_load_cost; /* Cost of aligned vector load. */
|
174 |
|
|
const int vec_unalign_load_cost; /* Cost of unaligned vector load. */
|
175 |
|
|
const int vec_store_cost; /* Cost of vector store. */
|
176 |
|
|
const int cond_taken_branch_cost; /* Cost of taken branch for vectorizer
|
177 |
|
|
cost model. */
|
178 |
|
|
const int cond_not_taken_branch_cost;/* Cost of not taken branch for
|
179 |
|
|
vectorizer cost model. */
|
180 |
|
|
};
|
181 |
|
|
|
182 |
|
|
extern const struct processor_costs *ix86_cost;
|
183 |
|
|
extern const struct processor_costs ix86_size_cost;
|
184 |
|
|
|
185 |
|
|
#define ix86_cur_cost() \
|
186 |
|
|
(optimize_insn_for_size_p () ? &ix86_size_cost: ix86_cost)
|
187 |
|
|
|
188 |
|
|
/* Macros used in the machine description to test the flags. */
|
189 |
|
|
|
190 |
|
|
/* configure can arrange to make this 2, to force a 486. */
|
191 |
|
|
|
192 |
|
|
#ifndef TARGET_CPU_DEFAULT
|
193 |
|
|
#define TARGET_CPU_DEFAULT TARGET_CPU_DEFAULT_generic
|
194 |
|
|
#endif
|
195 |
|
|
|
196 |
|
|
#ifndef TARGET_FPMATH_DEFAULT
|
197 |
|
|
#define TARGET_FPMATH_DEFAULT \
|
198 |
|
|
(TARGET_64BIT && TARGET_SSE ? FPMATH_SSE : FPMATH_387)
|
199 |
|
|
#endif
|
200 |
|
|
|
201 |
|
|
#define TARGET_FLOAT_RETURNS_IN_80387 TARGET_FLOAT_RETURNS
|
202 |
|
|
|
203 |
|
|
/* 64bit Sledgehammer mode. For libgcc2 we make sure this is a
|
204 |
|
|
compile-time constant. */
|
205 |
|
|
#ifdef IN_LIBGCC2
|
206 |
|
|
#undef TARGET_64BIT
|
207 |
|
|
#ifdef __x86_64__
|
208 |
|
|
#define TARGET_64BIT 1
|
209 |
|
|
#else
|
210 |
|
|
#define TARGET_64BIT 0
|
211 |
|
|
#endif
|
212 |
|
|
#else
|
213 |
|
|
#ifndef TARGET_BI_ARCH
|
214 |
|
|
#undef TARGET_64BIT
|
215 |
|
|
#if TARGET_64BIT_DEFAULT
|
216 |
|
|
#define TARGET_64BIT 1
|
217 |
|
|
#else
|
218 |
|
|
#define TARGET_64BIT 0
|
219 |
|
|
#endif
|
220 |
|
|
#endif
|
221 |
|
|
#endif
|
222 |
|
|
|
223 |
|
|
#define HAS_LONG_COND_BRANCH 1
|
224 |
|
|
#define HAS_LONG_UNCOND_BRANCH 1
|
225 |
|
|
|
226 |
|
|
#define TARGET_386 (ix86_tune == PROCESSOR_I386)
|
227 |
|
|
#define TARGET_486 (ix86_tune == PROCESSOR_I486)
|
228 |
|
|
#define TARGET_PENTIUM (ix86_tune == PROCESSOR_PENTIUM)
|
229 |
|
|
#define TARGET_PENTIUMPRO (ix86_tune == PROCESSOR_PENTIUMPRO)
|
230 |
|
|
#define TARGET_GEODE (ix86_tune == PROCESSOR_GEODE)
|
231 |
|
|
#define TARGET_K6 (ix86_tune == PROCESSOR_K6)
|
232 |
|
|
#define TARGET_ATHLON (ix86_tune == PROCESSOR_ATHLON)
|
233 |
|
|
#define TARGET_PENTIUM4 (ix86_tune == PROCESSOR_PENTIUM4)
|
234 |
|
|
#define TARGET_K8 (ix86_tune == PROCESSOR_K8)
|
235 |
|
|
#define TARGET_ATHLON_K8 (TARGET_K8 || TARGET_ATHLON)
|
236 |
|
|
#define TARGET_NOCONA (ix86_tune == PROCESSOR_NOCONA)
|
237 |
|
|
#define TARGET_CORE2_32 (ix86_tune == PROCESSOR_CORE2_32)
|
238 |
|
|
#define TARGET_CORE2_64 (ix86_tune == PROCESSOR_CORE2_64)
|
239 |
|
|
#define TARGET_CORE2 (TARGET_CORE2_32 || TARGET_CORE2_64)
|
240 |
|
|
#define TARGET_COREI7_32 (ix86_tune == PROCESSOR_COREI7_32)
|
241 |
|
|
#define TARGET_COREI7_64 (ix86_tune == PROCESSOR_COREI7_64)
|
242 |
|
|
#define TARGET_COREI7 (TARGET_COREI7_32 || TARGET_COREI7_64)
|
243 |
|
|
#define TARGET_GENERIC32 (ix86_tune == PROCESSOR_GENERIC32)
|
244 |
|
|
#define TARGET_GENERIC64 (ix86_tune == PROCESSOR_GENERIC64)
|
245 |
|
|
#define TARGET_GENERIC (TARGET_GENERIC32 || TARGET_GENERIC64)
|
246 |
|
|
#define TARGET_AMDFAM10 (ix86_tune == PROCESSOR_AMDFAM10)
|
247 |
|
|
#define TARGET_BDVER1 (ix86_tune == PROCESSOR_BDVER1)
|
248 |
|
|
#define TARGET_BDVER2 (ix86_tune == PROCESSOR_BDVER2)
|
249 |
|
|
#define TARGET_BTVER1 (ix86_tune == PROCESSOR_BTVER1)
|
250 |
|
|
#define TARGET_ATOM (ix86_tune == PROCESSOR_ATOM)
|
251 |
|
|
|
252 |
|
|
/* Feature tests against the various tunings. */
|
253 |
|
|
enum ix86_tune_indices {
|
254 |
|
|
X86_TUNE_USE_LEAVE,
|
255 |
|
|
X86_TUNE_PUSH_MEMORY,
|
256 |
|
|
X86_TUNE_ZERO_EXTEND_WITH_AND,
|
257 |
|
|
X86_TUNE_UNROLL_STRLEN,
|
258 |
|
|
X86_TUNE_BRANCH_PREDICTION_HINTS,
|
259 |
|
|
X86_TUNE_DOUBLE_WITH_ADD,
|
260 |
|
|
X86_TUNE_USE_SAHF,
|
261 |
|
|
X86_TUNE_MOVX,
|
262 |
|
|
X86_TUNE_PARTIAL_REG_STALL,
|
263 |
|
|
X86_TUNE_PARTIAL_FLAG_REG_STALL,
|
264 |
|
|
X86_TUNE_USE_HIMODE_FIOP,
|
265 |
|
|
X86_TUNE_USE_SIMODE_FIOP,
|
266 |
|
|
X86_TUNE_USE_MOV0,
|
267 |
|
|
X86_TUNE_USE_CLTD,
|
268 |
|
|
X86_TUNE_USE_XCHGB,
|
269 |
|
|
X86_TUNE_SPLIT_LONG_MOVES,
|
270 |
|
|
X86_TUNE_READ_MODIFY_WRITE,
|
271 |
|
|
X86_TUNE_READ_MODIFY,
|
272 |
|
|
X86_TUNE_PROMOTE_QIMODE,
|
273 |
|
|
X86_TUNE_FAST_PREFIX,
|
274 |
|
|
X86_TUNE_SINGLE_STRINGOP,
|
275 |
|
|
X86_TUNE_QIMODE_MATH,
|
276 |
|
|
X86_TUNE_HIMODE_MATH,
|
277 |
|
|
X86_TUNE_PROMOTE_QI_REGS,
|
278 |
|
|
X86_TUNE_PROMOTE_HI_REGS,
|
279 |
|
|
X86_TUNE_SINGLE_POP,
|
280 |
|
|
X86_TUNE_DOUBLE_POP,
|
281 |
|
|
X86_TUNE_SINGLE_PUSH,
|
282 |
|
|
X86_TUNE_DOUBLE_PUSH,
|
283 |
|
|
X86_TUNE_INTEGER_DFMODE_MOVES,
|
284 |
|
|
X86_TUNE_PARTIAL_REG_DEPENDENCY,
|
285 |
|
|
X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY,
|
286 |
|
|
X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL,
|
287 |
|
|
X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL,
|
288 |
|
|
X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL,
|
289 |
|
|
X86_TUNE_SSE_SPLIT_REGS,
|
290 |
|
|
X86_TUNE_SSE_TYPELESS_STORES,
|
291 |
|
|
X86_TUNE_SSE_LOAD0_BY_PXOR,
|
292 |
|
|
X86_TUNE_MEMORY_MISMATCH_STALL,
|
293 |
|
|
X86_TUNE_PROLOGUE_USING_MOVE,
|
294 |
|
|
X86_TUNE_EPILOGUE_USING_MOVE,
|
295 |
|
|
X86_TUNE_SHIFT1,
|
296 |
|
|
X86_TUNE_USE_FFREEP,
|
297 |
|
|
X86_TUNE_INTER_UNIT_MOVES,
|
298 |
|
|
X86_TUNE_INTER_UNIT_CONVERSIONS,
|
299 |
|
|
X86_TUNE_FOUR_JUMP_LIMIT,
|
300 |
|
|
X86_TUNE_SCHEDULE,
|
301 |
|
|
X86_TUNE_USE_BT,
|
302 |
|
|
X86_TUNE_USE_INCDEC,
|
303 |
|
|
X86_TUNE_PAD_RETURNS,
|
304 |
|
|
X86_TUNE_PAD_SHORT_FUNCTION,
|
305 |
|
|
X86_TUNE_EXT_80387_CONSTANTS,
|
306 |
|
|
X86_TUNE_SHORTEN_X87_SSE,
|
307 |
|
|
X86_TUNE_AVOID_VECTOR_DECODE,
|
308 |
|
|
X86_TUNE_PROMOTE_HIMODE_IMUL,
|
309 |
|
|
X86_TUNE_SLOW_IMUL_IMM32_MEM,
|
310 |
|
|
X86_TUNE_SLOW_IMUL_IMM8,
|
311 |
|
|
X86_TUNE_MOVE_M1_VIA_OR,
|
312 |
|
|
X86_TUNE_NOT_UNPAIRABLE,
|
313 |
|
|
X86_TUNE_NOT_VECTORMODE,
|
314 |
|
|
X86_TUNE_USE_VECTOR_FP_CONVERTS,
|
315 |
|
|
X86_TUNE_USE_VECTOR_CONVERTS,
|
316 |
|
|
X86_TUNE_FUSE_CMP_AND_BRANCH,
|
317 |
|
|
X86_TUNE_OPT_AGU,
|
318 |
|
|
X86_TUNE_VECTORIZE_DOUBLE,
|
319 |
|
|
X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL,
|
320 |
|
|
X86_TUNE_AVX128_OPTIMAL,
|
321 |
|
|
X86_TUNE_REASSOC_INT_TO_PARALLEL,
|
322 |
|
|
X86_TUNE_REASSOC_FP_TO_PARALLEL,
|
323 |
|
|
|
324 |
|
|
X86_TUNE_LAST
|
325 |
|
|
};
|
326 |
|
|
|
327 |
|
|
extern unsigned char ix86_tune_features[X86_TUNE_LAST];
|
328 |
|
|
|
329 |
|
|
#define TARGET_USE_LEAVE ix86_tune_features[X86_TUNE_USE_LEAVE]
|
330 |
|
|
#define TARGET_PUSH_MEMORY ix86_tune_features[X86_TUNE_PUSH_MEMORY]
|
331 |
|
|
#define TARGET_ZERO_EXTEND_WITH_AND \
|
332 |
|
|
ix86_tune_features[X86_TUNE_ZERO_EXTEND_WITH_AND]
|
333 |
|
|
#define TARGET_UNROLL_STRLEN ix86_tune_features[X86_TUNE_UNROLL_STRLEN]
|
334 |
|
|
#define TARGET_BRANCH_PREDICTION_HINTS \
|
335 |
|
|
ix86_tune_features[X86_TUNE_BRANCH_PREDICTION_HINTS]
|
336 |
|
|
#define TARGET_DOUBLE_WITH_ADD ix86_tune_features[X86_TUNE_DOUBLE_WITH_ADD]
|
337 |
|
|
#define TARGET_USE_SAHF ix86_tune_features[X86_TUNE_USE_SAHF]
|
338 |
|
|
#define TARGET_MOVX ix86_tune_features[X86_TUNE_MOVX]
|
339 |
|
|
#define TARGET_PARTIAL_REG_STALL ix86_tune_features[X86_TUNE_PARTIAL_REG_STALL]
|
340 |
|
|
#define TARGET_PARTIAL_FLAG_REG_STALL \
|
341 |
|
|
ix86_tune_features[X86_TUNE_PARTIAL_FLAG_REG_STALL]
|
342 |
|
|
#define TARGET_USE_HIMODE_FIOP ix86_tune_features[X86_TUNE_USE_HIMODE_FIOP]
|
343 |
|
|
#define TARGET_USE_SIMODE_FIOP ix86_tune_features[X86_TUNE_USE_SIMODE_FIOP]
|
344 |
|
|
#define TARGET_USE_MOV0 ix86_tune_features[X86_TUNE_USE_MOV0]
|
345 |
|
|
#define TARGET_USE_CLTD ix86_tune_features[X86_TUNE_USE_CLTD]
|
346 |
|
|
#define TARGET_USE_XCHGB ix86_tune_features[X86_TUNE_USE_XCHGB]
|
347 |
|
|
#define TARGET_SPLIT_LONG_MOVES ix86_tune_features[X86_TUNE_SPLIT_LONG_MOVES]
|
348 |
|
|
#define TARGET_READ_MODIFY_WRITE ix86_tune_features[X86_TUNE_READ_MODIFY_WRITE]
|
349 |
|
|
#define TARGET_READ_MODIFY ix86_tune_features[X86_TUNE_READ_MODIFY]
|
350 |
|
|
#define TARGET_PROMOTE_QImode ix86_tune_features[X86_TUNE_PROMOTE_QIMODE]
|
351 |
|
|
#define TARGET_FAST_PREFIX ix86_tune_features[X86_TUNE_FAST_PREFIX]
|
352 |
|
|
#define TARGET_SINGLE_STRINGOP ix86_tune_features[X86_TUNE_SINGLE_STRINGOP]
|
353 |
|
|
#define TARGET_QIMODE_MATH ix86_tune_features[X86_TUNE_QIMODE_MATH]
|
354 |
|
|
#define TARGET_HIMODE_MATH ix86_tune_features[X86_TUNE_HIMODE_MATH]
|
355 |
|
|
#define TARGET_PROMOTE_QI_REGS ix86_tune_features[X86_TUNE_PROMOTE_QI_REGS]
|
356 |
|
|
#define TARGET_PROMOTE_HI_REGS ix86_tune_features[X86_TUNE_PROMOTE_HI_REGS]
|
357 |
|
|
#define TARGET_SINGLE_POP ix86_tune_features[X86_TUNE_SINGLE_POP]
|
358 |
|
|
#define TARGET_DOUBLE_POP ix86_tune_features[X86_TUNE_DOUBLE_POP]
|
359 |
|
|
#define TARGET_SINGLE_PUSH ix86_tune_features[X86_TUNE_SINGLE_PUSH]
|
360 |
|
|
#define TARGET_DOUBLE_PUSH ix86_tune_features[X86_TUNE_DOUBLE_PUSH]
|
361 |
|
|
#define TARGET_INTEGER_DFMODE_MOVES \
|
362 |
|
|
ix86_tune_features[X86_TUNE_INTEGER_DFMODE_MOVES]
|
363 |
|
|
#define TARGET_PARTIAL_REG_DEPENDENCY \
|
364 |
|
|
ix86_tune_features[X86_TUNE_PARTIAL_REG_DEPENDENCY]
|
365 |
|
|
#define TARGET_SSE_PARTIAL_REG_DEPENDENCY \
|
366 |
|
|
ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY]
|
367 |
|
|
#define TARGET_SSE_UNALIGNED_LOAD_OPTIMAL \
|
368 |
|
|
ix86_tune_features[X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL]
|
369 |
|
|
#define TARGET_SSE_UNALIGNED_STORE_OPTIMAL \
|
370 |
|
|
ix86_tune_features[X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL]
|
371 |
|
|
#define TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL \
|
372 |
|
|
ix86_tune_features[X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL]
|
373 |
|
|
#define TARGET_SSE_SPLIT_REGS ix86_tune_features[X86_TUNE_SSE_SPLIT_REGS]
|
374 |
|
|
#define TARGET_SSE_TYPELESS_STORES \
|
375 |
|
|
ix86_tune_features[X86_TUNE_SSE_TYPELESS_STORES]
|
376 |
|
|
#define TARGET_SSE_LOAD0_BY_PXOR ix86_tune_features[X86_TUNE_SSE_LOAD0_BY_PXOR]
|
377 |
|
|
#define TARGET_MEMORY_MISMATCH_STALL \
|
378 |
|
|
ix86_tune_features[X86_TUNE_MEMORY_MISMATCH_STALL]
|
379 |
|
|
#define TARGET_PROLOGUE_USING_MOVE \
|
380 |
|
|
ix86_tune_features[X86_TUNE_PROLOGUE_USING_MOVE]
|
381 |
|
|
#define TARGET_EPILOGUE_USING_MOVE \
|
382 |
|
|
ix86_tune_features[X86_TUNE_EPILOGUE_USING_MOVE]
|
383 |
|
|
#define TARGET_SHIFT1 ix86_tune_features[X86_TUNE_SHIFT1]
|
384 |
|
|
#define TARGET_USE_FFREEP ix86_tune_features[X86_TUNE_USE_FFREEP]
|
385 |
|
|
#define TARGET_INTER_UNIT_MOVES ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES]
|
386 |
|
|
#define TARGET_INTER_UNIT_CONVERSIONS\
|
387 |
|
|
ix86_tune_features[X86_TUNE_INTER_UNIT_CONVERSIONS]
|
388 |
|
|
#define TARGET_FOUR_JUMP_LIMIT ix86_tune_features[X86_TUNE_FOUR_JUMP_LIMIT]
|
389 |
|
|
#define TARGET_SCHEDULE ix86_tune_features[X86_TUNE_SCHEDULE]
|
390 |
|
|
#define TARGET_USE_BT ix86_tune_features[X86_TUNE_USE_BT]
|
391 |
|
|
#define TARGET_USE_INCDEC ix86_tune_features[X86_TUNE_USE_INCDEC]
|
392 |
|
|
#define TARGET_PAD_RETURNS ix86_tune_features[X86_TUNE_PAD_RETURNS]
|
393 |
|
|
#define TARGET_PAD_SHORT_FUNCTION \
|
394 |
|
|
ix86_tune_features[X86_TUNE_PAD_SHORT_FUNCTION]
|
395 |
|
|
#define TARGET_EXT_80387_CONSTANTS \
|
396 |
|
|
ix86_tune_features[X86_TUNE_EXT_80387_CONSTANTS]
|
397 |
|
|
#define TARGET_SHORTEN_X87_SSE ix86_tune_features[X86_TUNE_SHORTEN_X87_SSE]
|
398 |
|
|
#define TARGET_AVOID_VECTOR_DECODE \
|
399 |
|
|
ix86_tune_features[X86_TUNE_AVOID_VECTOR_DECODE]
|
400 |
|
|
#define TARGET_TUNE_PROMOTE_HIMODE_IMUL \
|
401 |
|
|
ix86_tune_features[X86_TUNE_PROMOTE_HIMODE_IMUL]
|
402 |
|
|
#define TARGET_SLOW_IMUL_IMM32_MEM \
|
403 |
|
|
ix86_tune_features[X86_TUNE_SLOW_IMUL_IMM32_MEM]
|
404 |
|
|
#define TARGET_SLOW_IMUL_IMM8 ix86_tune_features[X86_TUNE_SLOW_IMUL_IMM8]
|
405 |
|
|
#define TARGET_MOVE_M1_VIA_OR ix86_tune_features[X86_TUNE_MOVE_M1_VIA_OR]
|
406 |
|
|
#define TARGET_NOT_UNPAIRABLE ix86_tune_features[X86_TUNE_NOT_UNPAIRABLE]
|
407 |
|
|
#define TARGET_NOT_VECTORMODE ix86_tune_features[X86_TUNE_NOT_VECTORMODE]
|
408 |
|
|
#define TARGET_USE_VECTOR_FP_CONVERTS \
|
409 |
|
|
ix86_tune_features[X86_TUNE_USE_VECTOR_FP_CONVERTS]
|
410 |
|
|
#define TARGET_USE_VECTOR_CONVERTS \
|
411 |
|
|
ix86_tune_features[X86_TUNE_USE_VECTOR_CONVERTS]
|
412 |
|
|
#define TARGET_FUSE_CMP_AND_BRANCH \
|
413 |
|
|
ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH]
|
414 |
|
|
#define TARGET_OPT_AGU ix86_tune_features[X86_TUNE_OPT_AGU]
|
415 |
|
|
#define TARGET_VECTORIZE_DOUBLE \
|
416 |
|
|
ix86_tune_features[X86_TUNE_VECTORIZE_DOUBLE]
|
417 |
|
|
#define TARGET_SOFTWARE_PREFETCHING_BENEFICIAL \
|
418 |
|
|
ix86_tune_features[X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL]
|
419 |
|
|
#define TARGET_AVX128_OPTIMAL \
|
420 |
|
|
ix86_tune_features[X86_TUNE_AVX128_OPTIMAL]
|
421 |
|
|
#define TARGET_REASSOC_INT_TO_PARALLEL \
|
422 |
|
|
ix86_tune_features[X86_TUNE_REASSOC_INT_TO_PARALLEL]
|
423 |
|
|
#define TARGET_REASSOC_FP_TO_PARALLEL \
|
424 |
|
|
ix86_tune_features[X86_TUNE_REASSOC_FP_TO_PARALLEL]
|
425 |
|
|
|
426 |
|
|
/* Feature tests against the various architecture variations. */
|
427 |
|
|
enum ix86_arch_indices {
|
428 |
|
|
X86_ARCH_CMOVE, /* || TARGET_SSE */
|
429 |
|
|
X86_ARCH_CMPXCHG,
|
430 |
|
|
X86_ARCH_CMPXCHG8B,
|
431 |
|
|
X86_ARCH_XADD,
|
432 |
|
|
X86_ARCH_BSWAP,
|
433 |
|
|
|
434 |
|
|
X86_ARCH_LAST
|
435 |
|
|
};
|
436 |
|
|
|
437 |
|
|
extern unsigned char ix86_arch_features[X86_ARCH_LAST];
|
438 |
|
|
|
439 |
|
|
#define TARGET_CMOVE ix86_arch_features[X86_ARCH_CMOVE]
|
440 |
|
|
#define TARGET_CMPXCHG ix86_arch_features[X86_ARCH_CMPXCHG]
|
441 |
|
|
#define TARGET_CMPXCHG8B ix86_arch_features[X86_ARCH_CMPXCHG8B]
|
442 |
|
|
#define TARGET_XADD ix86_arch_features[X86_ARCH_XADD]
|
443 |
|
|
#define TARGET_BSWAP ix86_arch_features[X86_ARCH_BSWAP]
|
444 |
|
|
|
445 |
|
|
#define TARGET_FISTTP (TARGET_SSE3 && TARGET_80387)
|
446 |
|
|
|
447 |
|
|
extern int x86_prefetch_sse;
|
448 |
|
|
|
449 |
|
|
#define TARGET_PREFETCH_SSE x86_prefetch_sse
|
450 |
|
|
|
451 |
|
|
#define ASSEMBLER_DIALECT (ix86_asm_dialect)
|
452 |
|
|
|
453 |
|
|
#define TARGET_SSE_MATH ((ix86_fpmath & FPMATH_SSE) != 0)
|
454 |
|
|
#define TARGET_MIX_SSE_I387 \
|
455 |
|
|
((ix86_fpmath & (FPMATH_SSE | FPMATH_387)) == (FPMATH_SSE | FPMATH_387))
|
456 |
|
|
|
457 |
|
|
#define TARGET_GNU_TLS (ix86_tls_dialect == TLS_DIALECT_GNU)
|
458 |
|
|
#define TARGET_GNU2_TLS (ix86_tls_dialect == TLS_DIALECT_GNU2)
|
459 |
|
|
#define TARGET_ANY_GNU_TLS (TARGET_GNU_TLS || TARGET_GNU2_TLS)
|
460 |
|
|
#define TARGET_SUN_TLS 0
|
461 |
|
|
|
462 |
|
|
#ifndef TARGET_64BIT_DEFAULT
|
463 |
|
|
#define TARGET_64BIT_DEFAULT 0
|
464 |
|
|
#endif
|
465 |
|
|
#ifndef TARGET_TLS_DIRECT_SEG_REFS_DEFAULT
|
466 |
|
|
#define TARGET_TLS_DIRECT_SEG_REFS_DEFAULT 0
|
467 |
|
|
#endif
|
468 |
|
|
|
469 |
|
|
/* Fence to use after loop using storent. */
|
470 |
|
|
|
471 |
|
|
extern tree x86_mfence;
|
472 |
|
|
#define FENCE_FOLLOWING_MOVNT x86_mfence
|
473 |
|
|
|
474 |
|
|
/* Once GDB has been enhanced to deal with functions without frame
|
475 |
|
|
pointers, we can change this to allow for elimination of
|
476 |
|
|
the frame pointer in leaf functions. */
|
477 |
|
|
#define TARGET_DEFAULT 0
|
478 |
|
|
|
479 |
|
|
/* Extra bits to force. */
|
480 |
|
|
#define TARGET_SUBTARGET_DEFAULT 0
|
481 |
|
|
#define TARGET_SUBTARGET_ISA_DEFAULT 0
|
482 |
|
|
|
483 |
|
|
/* Extra bits to force on w/ 32-bit mode. */
|
484 |
|
|
#define TARGET_SUBTARGET32_DEFAULT 0
|
485 |
|
|
#define TARGET_SUBTARGET32_ISA_DEFAULT 0
|
486 |
|
|
|
487 |
|
|
/* Extra bits to force on w/ 64-bit mode. */
|
488 |
|
|
#define TARGET_SUBTARGET64_DEFAULT 0
|
489 |
|
|
#define TARGET_SUBTARGET64_ISA_DEFAULT 0
|
490 |
|
|
|
491 |
|
|
/* Replace MACH-O, ifdefs by in-line tests, where possible.
|
492 |
|
|
(a) Macros defined in config/i386/darwin.h */
|
493 |
|
|
#define TARGET_MACHO 0
|
494 |
|
|
#define TARGET_MACHO_BRANCH_ISLANDS 0
|
495 |
|
|
#define MACHOPIC_ATT_STUB 0
|
496 |
|
|
/* (b) Macros defined in config/darwin.h */
|
497 |
|
|
#define MACHO_DYNAMIC_NO_PIC_P 0
|
498 |
|
|
#define MACHOPIC_INDIRECT 0
|
499 |
|
|
#define MACHOPIC_PURE 0
|
500 |
|
|
|
501 |
|
|
/* For the Windows 64-bit ABI. */
|
502 |
|
|
#define TARGET_64BIT_MS_ABI (TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
|
503 |
|
|
|
504 |
|
|
/* For the Windows 32-bit ABI. */
|
505 |
|
|
#define TARGET_32BIT_MS_ABI (!TARGET_64BIT && ix86_cfun_abi () == MS_ABI)
|
506 |
|
|
|
507 |
|
|
/* This is re-defined by cygming.h. */
|
508 |
|
|
#define TARGET_SEH 0
|
509 |
|
|
|
510 |
|
|
/* The default abi used by target. */
|
511 |
|
|
#define DEFAULT_ABI SYSV_ABI
|
512 |
|
|
|
513 |
|
|
/* Subtargets may reset this to 1 in order to enable 96-bit long double
|
514 |
|
|
with the rounding mode forced to 53 bits. */
|
515 |
|
|
#define TARGET_96_ROUND_53_LONG_DOUBLE 0
|
516 |
|
|
|
517 |
|
|
/* -march=native handling only makes sense with compiler running on
|
518 |
|
|
an x86 or x86_64 chip. If changing this condition, also change
|
519 |
|
|
the condition in driver-i386.c. */
|
520 |
|
|
#if defined(__i386__) || defined(__x86_64__)
|
521 |
|
|
/* In driver-i386.c. */
|
522 |
|
|
extern const char *host_detect_local_cpu (int argc, const char **argv);
|
523 |
|
|
#define EXTRA_SPEC_FUNCTIONS \
|
524 |
|
|
{ "local_cpu_detect", host_detect_local_cpu },
|
525 |
|
|
#define HAVE_LOCAL_CPU_DETECT
|
526 |
|
|
#endif
|
527 |
|
|
|
528 |
|
|
#if TARGET_64BIT_DEFAULT
|
529 |
|
|
#define OPT_ARCH64 "!m32"
|
530 |
|
|
#define OPT_ARCH32 "m32"
|
531 |
|
|
#else
|
532 |
|
|
#define OPT_ARCH64 "m64|mx32"
|
533 |
|
|
#define OPT_ARCH32 "m64|mx32:;"
|
534 |
|
|
#endif
|
535 |
|
|
|
536 |
|
|
/* Support for configure-time defaults of some command line options.
|
537 |
|
|
The order here is important so that -march doesn't squash the
|
538 |
|
|
tune or cpu values. */
|
539 |
|
|
#define OPTION_DEFAULT_SPECS \
|
540 |
|
|
{"tune", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \
|
541 |
|
|
{"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \
|
542 |
|
|
{"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \
|
543 |
|
|
{"cpu", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \
|
544 |
|
|
{"cpu_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \
|
545 |
|
|
{"cpu_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \
|
546 |
|
|
{"arch", "%{!march=*:-march=%(VALUE)}"}, \
|
547 |
|
|
{"arch_32", "%{" OPT_ARCH32 ":%{!march=*:-march=%(VALUE)}}"}, \
|
548 |
|
|
{"arch_64", "%{" OPT_ARCH64 ":%{!march=*:-march=%(VALUE)}}"},
|
549 |
|
|
|
550 |
|
|
/* Specs for the compiler proper */
|
551 |
|
|
|
552 |
|
|
#ifndef CC1_CPU_SPEC
|
553 |
|
|
#define CC1_CPU_SPEC_1 ""
|
554 |
|
|
|
555 |
|
|
#ifndef HAVE_LOCAL_CPU_DETECT
|
556 |
|
|
#define CC1_CPU_SPEC CC1_CPU_SPEC_1
|
557 |
|
|
#else
|
558 |
|
|
#define CC1_CPU_SPEC CC1_CPU_SPEC_1 \
|
559 |
|
|
"%{march=native:%>march=native %:local_cpu_detect(arch) \
|
560 |
|
|
%{!mtune=*:%>mtune=native %:local_cpu_detect(tune)}} \
|
561 |
|
|
%{mtune=native:%>mtune=native %:local_cpu_detect(tune)}"
|
562 |
|
|
#endif
|
563 |
|
|
#endif
|
564 |
|
|
|
565 |
|
|
/* Target CPU builtins. */
|
566 |
|
|
#define TARGET_CPU_CPP_BUILTINS() ix86_target_macros ()
|
567 |
|
|
|
568 |
|
|
/* Target Pragmas. */
|
569 |
|
|
#define REGISTER_TARGET_PRAGMAS() ix86_register_pragmas ()
|
570 |
|
|
|
571 |
|
|
enum target_cpu_default
|
572 |
|
|
{
|
573 |
|
|
TARGET_CPU_DEFAULT_generic = 0,
|
574 |
|
|
|
575 |
|
|
TARGET_CPU_DEFAULT_i386,
|
576 |
|
|
TARGET_CPU_DEFAULT_i486,
|
577 |
|
|
TARGET_CPU_DEFAULT_pentium,
|
578 |
|
|
TARGET_CPU_DEFAULT_pentium_mmx,
|
579 |
|
|
TARGET_CPU_DEFAULT_pentiumpro,
|
580 |
|
|
TARGET_CPU_DEFAULT_pentium2,
|
581 |
|
|
TARGET_CPU_DEFAULT_pentium3,
|
582 |
|
|
TARGET_CPU_DEFAULT_pentium4,
|
583 |
|
|
TARGET_CPU_DEFAULT_pentium_m,
|
584 |
|
|
TARGET_CPU_DEFAULT_prescott,
|
585 |
|
|
TARGET_CPU_DEFAULT_nocona,
|
586 |
|
|
TARGET_CPU_DEFAULT_core2,
|
587 |
|
|
TARGET_CPU_DEFAULT_corei7,
|
588 |
|
|
TARGET_CPU_DEFAULT_atom,
|
589 |
|
|
|
590 |
|
|
TARGET_CPU_DEFAULT_geode,
|
591 |
|
|
TARGET_CPU_DEFAULT_k6,
|
592 |
|
|
TARGET_CPU_DEFAULT_k6_2,
|
593 |
|
|
TARGET_CPU_DEFAULT_k6_3,
|
594 |
|
|
TARGET_CPU_DEFAULT_athlon,
|
595 |
|
|
TARGET_CPU_DEFAULT_athlon_sse,
|
596 |
|
|
TARGET_CPU_DEFAULT_k8,
|
597 |
|
|
TARGET_CPU_DEFAULT_amdfam10,
|
598 |
|
|
TARGET_CPU_DEFAULT_bdver1,
|
599 |
|
|
TARGET_CPU_DEFAULT_bdver2,
|
600 |
|
|
TARGET_CPU_DEFAULT_btver1,
|
601 |
|
|
|
602 |
|
|
TARGET_CPU_DEFAULT_max
|
603 |
|
|
};
|
604 |
|
|
|
605 |
|
|
#ifndef CC1_SPEC
|
606 |
|
|
#define CC1_SPEC "%(cc1_cpu) "
|
607 |
|
|
#endif
|
608 |
|
|
|
609 |
|
|
/* This macro defines names of additional specifications to put in the
|
610 |
|
|
specs that can be used in various specifications like CC1_SPEC. Its
|
611 |
|
|
definition is an initializer with a subgrouping for each command option.
|
612 |
|
|
|
613 |
|
|
Each subgrouping contains a string constant, that defines the
|
614 |
|
|
specification name, and a string constant that used by the GCC driver
|
615 |
|
|
program.
|
616 |
|
|
|
617 |
|
|
Do not define this macro if it does not need to do anything. */
|
618 |
|
|
|
619 |
|
|
#ifndef SUBTARGET_EXTRA_SPECS
|
620 |
|
|
#define SUBTARGET_EXTRA_SPECS
|
621 |
|
|
#endif
|
622 |
|
|
|
623 |
|
|
#define EXTRA_SPECS \
|
624 |
|
|
{ "cc1_cpu", CC1_CPU_SPEC }, \
|
625 |
|
|
SUBTARGET_EXTRA_SPECS
|
626 |
|
|
|
627 |
|
|
|
628 |
|
|
/* Set the value of FLT_EVAL_METHOD in float.h. When using only the
|
629 |
|
|
FPU, assume that the fpcw is set to extended precision; when using
|
630 |
|
|
only SSE, rounding is correct; when using both SSE and the FPU,
|
631 |
|
|
the rounding precision is indeterminate, since either may be chosen
|
632 |
|
|
apparently at random. */
|
633 |
|
|
#define TARGET_FLT_EVAL_METHOD \
|
634 |
|
|
(TARGET_MIX_SSE_I387 ? -1 : TARGET_SSE_MATH ? 0 : 2)
|
635 |
|
|
|
636 |
|
|
/* Whether to allow x87 floating-point arithmetic on MODE (one of
|
637 |
|
|
SFmode, DFmode and XFmode) in the current excess precision
|
638 |
|
|
configuration. */
|
639 |
|
|
#define X87_ENABLE_ARITH(MODE) \
|
640 |
|
|
(flag_excess_precision == EXCESS_PRECISION_FAST || (MODE) == XFmode)
|
641 |
|
|
|
642 |
|
|
/* Likewise, whether to allow direct conversions from integer mode
|
643 |
|
|
IMODE (HImode, SImode or DImode) to MODE. */
|
644 |
|
|
#define X87_ENABLE_FLOAT(MODE, IMODE) \
|
645 |
|
|
(flag_excess_precision == EXCESS_PRECISION_FAST \
|
646 |
|
|
|| (MODE) == XFmode \
|
647 |
|
|
|| ((MODE) == DFmode && (IMODE) == SImode) \
|
648 |
|
|
|| (IMODE) == HImode)
|
649 |
|
|
|
650 |
|
|
/* target machine storage layout */
|
651 |
|
|
|
652 |
|
|
#define SHORT_TYPE_SIZE 16
|
653 |
|
|
#define INT_TYPE_SIZE 32
|
654 |
|
|
#define LONG_TYPE_SIZE (TARGET_X32 ? 32 : BITS_PER_WORD)
|
655 |
|
|
#define POINTER_SIZE (TARGET_X32 ? 32 : BITS_PER_WORD)
|
656 |
|
|
#define LONG_LONG_TYPE_SIZE 64
|
657 |
|
|
#define FLOAT_TYPE_SIZE 32
|
658 |
|
|
#define DOUBLE_TYPE_SIZE 64
|
659 |
|
|
#define LONG_DOUBLE_TYPE_SIZE 80
|
660 |
|
|
|
661 |
|
|
#define WIDEST_HARDWARE_FP_SIZE LONG_DOUBLE_TYPE_SIZE
|
662 |
|
|
|
663 |
|
|
#if defined (TARGET_BI_ARCH) || TARGET_64BIT_DEFAULT
|
664 |
|
|
#define MAX_BITS_PER_WORD 64
|
665 |
|
|
#else
|
666 |
|
|
#define MAX_BITS_PER_WORD 32
|
667 |
|
|
#endif
|
668 |
|
|
|
669 |
|
|
/* Define this if most significant byte of a word is the lowest numbered. */
|
670 |
|
|
/* That is true on the 80386. */
|
671 |
|
|
|
672 |
|
|
#define BITS_BIG_ENDIAN 0
|
673 |
|
|
|
674 |
|
|
/* Define this if most significant byte of a word is the lowest numbered. */
|
675 |
|
|
/* That is not true on the 80386. */
|
676 |
|
|
#define BYTES_BIG_ENDIAN 0
|
677 |
|
|
|
678 |
|
|
/* Define this if most significant word of a multiword number is the lowest
|
679 |
|
|
numbered. */
|
680 |
|
|
/* Not true for 80386 */
|
681 |
|
|
#define WORDS_BIG_ENDIAN 0
|
682 |
|
|
|
683 |
|
|
/* Width of a word, in units (bytes). */
|
684 |
|
|
#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
|
685 |
|
|
|
686 |
|
|
#ifndef IN_LIBGCC2
|
687 |
|
|
#define MIN_UNITS_PER_WORD 4
|
688 |
|
|
#endif
|
689 |
|
|
|
690 |
|
|
/* Allocation boundary (in *bits*) for storing arguments in argument list. */
|
691 |
|
|
#define PARM_BOUNDARY BITS_PER_WORD
|
692 |
|
|
|
693 |
|
|
/* Boundary (in *bits*) on which stack pointer should be aligned. */
|
694 |
|
|
#define STACK_BOUNDARY \
|
695 |
|
|
(TARGET_64BIT && ix86_abi == MS_ABI ? 128 : BITS_PER_WORD)
|
696 |
|
|
|
697 |
|
|
/* Stack boundary of the main function guaranteed by OS. */
|
698 |
|
|
#define MAIN_STACK_BOUNDARY (TARGET_64BIT ? 128 : 32)
|
699 |
|
|
|
700 |
|
|
/* Minimum stack boundary. */
|
701 |
|
|
#define MIN_STACK_BOUNDARY (TARGET_64BIT ? 128 : 32)
|
702 |
|
|
|
703 |
|
|
/* Boundary (in *bits*) on which the stack pointer prefers to be
|
704 |
|
|
aligned; the compiler cannot rely on having this alignment. */
|
705 |
|
|
#define PREFERRED_STACK_BOUNDARY ix86_preferred_stack_boundary
|
706 |
|
|
|
707 |
|
|
/* It should be MIN_STACK_BOUNDARY. But we set it to 128 bits for
|
708 |
|
|
both 32bit and 64bit, to support codes that need 128 bit stack
|
709 |
|
|
alignment for SSE instructions, but can't realign the stack. */
|
710 |
|
|
#define PREFERRED_STACK_BOUNDARY_DEFAULT 128
|
711 |
|
|
|
712 |
|
|
/* 1 if -mstackrealign should be turned on by default. It will
|
713 |
|
|
generate an alternate prologue and epilogue that realigns the
|
714 |
|
|
runtime stack if nessary. This supports mixing codes that keep a
|
715 |
|
|
4-byte aligned stack, as specified by i386 psABI, with codes that
|
716 |
|
|
need a 16-byte aligned stack, as required by SSE instructions. */
|
717 |
|
|
#define STACK_REALIGN_DEFAULT 0
|
718 |
|
|
|
719 |
|
|
/* Boundary (in *bits*) on which the incoming stack is aligned. */
|
720 |
|
|
#define INCOMING_STACK_BOUNDARY ix86_incoming_stack_boundary
|
721 |
|
|
|
722 |
|
|
/* Target OS keeps a vector-aligned (128-bit, 16-byte) stack. This is
|
723 |
|
|
mandatory for the 64-bit ABI, and may or may not be true for other
|
724 |
|
|
operating systems. */
|
725 |
|
|
#define TARGET_KEEPS_VECTOR_ALIGNED_STACK TARGET_64BIT
|
726 |
|
|
|
727 |
|
|
/* Minimum allocation boundary for the code of a function. */
|
728 |
|
|
#define FUNCTION_BOUNDARY 8
|
729 |
|
|
|
730 |
|
|
/* C++ stores the virtual bit in the lowest bit of function pointers. */
|
731 |
|
|
#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_pfn
|
732 |
|
|
|
733 |
|
|
/* Minimum size in bits of the largest boundary to which any
|
734 |
|
|
and all fundamental data types supported by the hardware
|
735 |
|
|
might need to be aligned. No data type wants to be aligned
|
736 |
|
|
rounder than this.
|
737 |
|
|
|
738 |
|
|
Pentium+ prefers DFmode values to be aligned to 64 bit boundary
|
739 |
|
|
and Pentium Pro XFmode values at 128 bit boundaries. */
|
740 |
|
|
|
741 |
|
|
#define BIGGEST_ALIGNMENT (TARGET_AVX ? 256 : 128)
|
742 |
|
|
|
743 |
|
|
/* Maximum stack alignment. */
|
744 |
|
|
#define MAX_STACK_ALIGNMENT MAX_OFILE_ALIGNMENT
|
745 |
|
|
|
746 |
|
|
/* Alignment value for attribute ((aligned)). It is a constant since
|
747 |
|
|
it is the part of the ABI. We shouldn't change it with -mavx. */
|
748 |
|
|
#define ATTRIBUTE_ALIGNED_VALUE 128
|
749 |
|
|
|
750 |
|
|
/* Decide whether a variable of mode MODE should be 128 bit aligned. */
|
751 |
|
|
#define ALIGN_MODE_128(MODE) \
|
752 |
|
|
((MODE) == XFmode || SSE_REG_MODE_P (MODE))
|
753 |
|
|
|
754 |
|
|
/* The published ABIs say that doubles should be aligned on word
|
755 |
|
|
boundaries, so lower the alignment for structure fields unless
|
756 |
|
|
-malign-double is set. */
|
757 |
|
|
|
758 |
|
|
/* ??? Blah -- this macro is used directly by libobjc. Since it
|
759 |
|
|
supports no vector modes, cut out the complexity and fall back
|
760 |
|
|
on BIGGEST_FIELD_ALIGNMENT. */
|
761 |
|
|
#ifdef IN_TARGET_LIBS
|
762 |
|
|
#ifdef __x86_64__
|
763 |
|
|
#define BIGGEST_FIELD_ALIGNMENT 128
|
764 |
|
|
#else
|
765 |
|
|
#define BIGGEST_FIELD_ALIGNMENT 32
|
766 |
|
|
#endif
|
767 |
|
|
#else
|
768 |
|
|
#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
|
769 |
|
|
x86_field_alignment (FIELD, COMPUTED)
|
770 |
|
|
#endif
|
771 |
|
|
|
772 |
|
|
/* If defined, a C expression to compute the alignment given to a
|
773 |
|
|
constant that is being placed in memory. EXP is the constant
|
774 |
|
|
and ALIGN is the alignment that the object would ordinarily have.
|
775 |
|
|
The value of this macro is used instead of that alignment to align
|
776 |
|
|
the object.
|
777 |
|
|
|
778 |
|
|
If this macro is not defined, then ALIGN is used.
|
779 |
|
|
|
780 |
|
|
The typical use of this macro is to increase alignment for string
|
781 |
|
|
constants to be word aligned so that `strcpy' calls that copy
|
782 |
|
|
constants can be done inline. */
|
783 |
|
|
|
784 |
|
|
#define CONSTANT_ALIGNMENT(EXP, ALIGN) ix86_constant_alignment ((EXP), (ALIGN))
|
785 |
|
|
|
786 |
|
|
/* If defined, a C expression to compute the alignment for a static
|
787 |
|
|
variable. TYPE is the data type, and ALIGN is the alignment that
|
788 |
|
|
the object would ordinarily have. The value of this macro is used
|
789 |
|
|
instead of that alignment to align the object.
|
790 |
|
|
|
791 |
|
|
If this macro is not defined, then ALIGN is used.
|
792 |
|
|
|
793 |
|
|
One use of this macro is to increase alignment of medium-size
|
794 |
|
|
data to make it all fit in fewer cache lines. Another is to
|
795 |
|
|
cause character arrays to be word-aligned so that `strcpy' calls
|
796 |
|
|
that copy constants to character arrays can be done inline. */
|
797 |
|
|
|
798 |
|
|
#define DATA_ALIGNMENT(TYPE, ALIGN) ix86_data_alignment ((TYPE), (ALIGN))
|
799 |
|
|
|
800 |
|
|
/* If defined, a C expression to compute the alignment for a local
|
801 |
|
|
variable. TYPE is the data type, and ALIGN is the alignment that
|
802 |
|
|
the object would ordinarily have. The value of this macro is used
|
803 |
|
|
instead of that alignment to align the object.
|
804 |
|
|
|
805 |
|
|
If this macro is not defined, then ALIGN is used.
|
806 |
|
|
|
807 |
|
|
One use of this macro is to increase alignment of medium-size
|
808 |
|
|
data to make it all fit in fewer cache lines. */
|
809 |
|
|
|
810 |
|
|
#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
|
811 |
|
|
ix86_local_alignment ((TYPE), VOIDmode, (ALIGN))
|
812 |
|
|
|
813 |
|
|
/* If defined, a C expression to compute the alignment for stack slot.
|
814 |
|
|
TYPE is the data type, MODE is the widest mode available, and ALIGN
|
815 |
|
|
is the alignment that the slot would ordinarily have. The value of
|
816 |
|
|
this macro is used instead of that alignment to align the slot.
|
817 |
|
|
|
818 |
|
|
If this macro is not defined, then ALIGN is used when TYPE is NULL,
|
819 |
|
|
Otherwise, LOCAL_ALIGNMENT will be used.
|
820 |
|
|
|
821 |
|
|
One use of this macro is to set alignment of stack slot to the
|
822 |
|
|
maximum alignment of all possible modes which the slot may have. */
|
823 |
|
|
|
824 |
|
|
#define STACK_SLOT_ALIGNMENT(TYPE, MODE, ALIGN) \
|
825 |
|
|
ix86_local_alignment ((TYPE), (MODE), (ALIGN))
|
826 |
|
|
|
827 |
|
|
/* If defined, a C expression to compute the alignment for a local
|
828 |
|
|
variable DECL.
|
829 |
|
|
|
830 |
|
|
If this macro is not defined, then
|
831 |
|
|
LOCAL_ALIGNMENT (TREE_TYPE (DECL), DECL_ALIGN (DECL)) will be used.
|
832 |
|
|
|
833 |
|
|
One use of this macro is to increase alignment of medium-size
|
834 |
|
|
data to make it all fit in fewer cache lines. */
|
835 |
|
|
|
836 |
|
|
#define LOCAL_DECL_ALIGNMENT(DECL) \
|
837 |
|
|
ix86_local_alignment ((DECL), VOIDmode, DECL_ALIGN (DECL))
|
838 |
|
|
|
839 |
|
|
/* If defined, a C expression to compute the minimum required alignment
|
840 |
|
|
for dynamic stack realignment purposes for EXP (a TYPE or DECL),
|
841 |
|
|
MODE, assuming normal alignment ALIGN.
|
842 |
|
|
|
843 |
|
|
If this macro is not defined, then (ALIGN) will be used. */
|
844 |
|
|
|
845 |
|
|
#define MINIMUM_ALIGNMENT(EXP, MODE, ALIGN) \
|
846 |
|
|
ix86_minimum_alignment (EXP, MODE, ALIGN)
|
847 |
|
|
|
848 |
|
|
|
849 |
|
|
/* Set this nonzero if move instructions will actually fail to work
|
850 |
|
|
when given unaligned data. */
|
851 |
|
|
#define STRICT_ALIGNMENT 0
|
852 |
|
|
|
853 |
|
|
/* If bit field type is int, don't let it cross an int,
|
854 |
|
|
and give entire struct the alignment of an int. */
|
855 |
|
|
/* Required on the 386 since it doesn't have bit-field insns. */
|
856 |
|
|
#define PCC_BITFIELD_TYPE_MATTERS 1
|
857 |
|
|
|
858 |
|
|
/* Standard register usage. */
|
859 |
|
|
|
860 |
|
|
/* This processor has special stack-like registers. See reg-stack.c
|
861 |
|
|
for details. */
|
862 |
|
|
|
863 |
|
|
#define STACK_REGS
|
864 |
|
|
|
865 |
|
|
#define IS_STACK_MODE(MODE) \
|
866 |
|
|
(((MODE) == SFmode && !(TARGET_SSE && TARGET_SSE_MATH)) \
|
867 |
|
|
|| ((MODE) == DFmode && !(TARGET_SSE2 && TARGET_SSE_MATH)) \
|
868 |
|
|
|| (MODE) == XFmode)
|
869 |
|
|
|
870 |
|
|
/* Number of actual hardware registers.
|
871 |
|
|
The hardware registers are assigned numbers for the compiler
|
872 |
|
|
from 0 to just below FIRST_PSEUDO_REGISTER.
|
873 |
|
|
All registers that the compiler knows about must be given numbers,
|
874 |
|
|
even those that are not normally considered general registers.
|
875 |
|
|
|
876 |
|
|
In the 80386 we give the 8 general purpose registers the numbers 0-7.
|
877 |
|
|
We number the floating point registers 8-15.
|
878 |
|
|
Note that registers 0-7 can be accessed as a short or int,
|
879 |
|
|
while only 0-3 may be used with byte `mov' instructions.
|
880 |
|
|
|
881 |
|
|
Reg 16 does not correspond to any hardware register, but instead
|
882 |
|
|
appears in the RTL as an argument pointer prior to reload, and is
|
883 |
|
|
eliminated during reloading in favor of either the stack or frame
|
884 |
|
|
pointer. */
|
885 |
|
|
|
886 |
|
|
#define FIRST_PSEUDO_REGISTER 53
|
887 |
|
|
|
888 |
|
|
/* Number of hardware registers that go into the DWARF-2 unwind info.
|
889 |
|
|
If not defined, equals FIRST_PSEUDO_REGISTER. */
|
890 |
|
|
|
891 |
|
|
#define DWARF_FRAME_REGISTERS 17
|
892 |
|
|
|
893 |
|
|
/* 1 for registers that have pervasive standard uses
|
894 |
|
|
and are not available for the register allocator.
|
895 |
|
|
On the 80386, the stack pointer is such, as is the arg pointer.
|
896 |
|
|
|
897 |
|
|
The value is zero if the register is not fixed on either 32 or
|
898 |
|
|
64 bit targets, one if the register if fixed on both 32 and 64
|
899 |
|
|
bit targets, two if it is only fixed on 32bit targets and three
|
900 |
|
|
if its only fixed on 64bit targets.
|
901 |
|
|
Proper values are computed in TARGET_CONDITIONAL_REGISTER_USAGE.
|
902 |
|
|
*/
|
903 |
|
|
#define FIXED_REGISTERS \
|
904 |
|
|
/*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \
|
905 |
|
|
{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, \
|
906 |
|
|
/*arg,flags,fpsr,fpcr,frame*/ \
|
907 |
|
|
1, 1, 1, 1, 1, \
|
908 |
|
|
/*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \
|
909 |
|
|
0, 0, 0, 0, 0, 0, 0, 0, \
|
910 |
|
|
/* mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7*/ \
|
911 |
|
|
0, 0, 0, 0, 0, 0, 0, 0, \
|
912 |
|
|
/* r8, r9, r10, r11, r12, r13, r14, r15*/ \
|
913 |
|
|
2, 2, 2, 2, 2, 2, 2, 2, \
|
914 |
|
|
/*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \
|
915 |
|
|
2, 2, 2, 2, 2, 2, 2, 2 }
|
916 |
|
|
|
917 |
|
|
|
918 |
|
|
/* 1 for registers not available across function calls.
|
919 |
|
|
These must include the FIXED_REGISTERS and also any
|
920 |
|
|
registers that can be used without being saved.
|
921 |
|
|
The latter must include the registers where values are returned
|
922 |
|
|
and the register where structure-value addresses are passed.
|
923 |
|
|
Aside from that, you can include as many other registers as you like.
|
924 |
|
|
|
925 |
|
|
The value is zero if the register is not call used on either 32 or
|
926 |
|
|
64 bit targets, one if the register if call used on both 32 and 64
|
927 |
|
|
bit targets, two if it is only call used on 32bit targets and three
|
928 |
|
|
if its only call used on 64bit targets.
|
929 |
|
|
Proper values are computed in TARGET_CONDITIONAL_REGISTER_USAGE.
|
930 |
|
|
*/
|
931 |
|
|
#define CALL_USED_REGISTERS \
|
932 |
|
|
/*ax,dx,cx,bx,si,di,bp,sp,st,st1,st2,st3,st4,st5,st6,st7*/ \
|
933 |
|
|
{ 1, 1, 1, 0, 3, 3, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
|
934 |
|
|
/*arg,flags,fpsr,fpcr,frame*/ \
|
935 |
|
|
1, 1, 1, 1, 1, \
|
936 |
|
|
/*xmm0,xmm1,xmm2,xmm3,xmm4,xmm5,xmm6,xmm7*/ \
|
937 |
|
|
1, 1, 1, 1, 1, 1, 1, 1, \
|
938 |
|
|
/* mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7*/ \
|
939 |
|
|
1, 1, 1, 1, 1, 1, 1, 1, \
|
940 |
|
|
/* r8, r9, r10, r11, r12, r13, r14, r15*/ \
|
941 |
|
|
1, 1, 1, 1, 2, 2, 2, 2, \
|
942 |
|
|
/*xmm8,xmm9,xmm10,xmm11,xmm12,xmm13,xmm14,xmm15*/ \
|
943 |
|
|
1, 1, 1, 1, 1, 1, 1, 1 }
|
944 |
|
|
|
945 |
|
|
/* Order in which to allocate registers. Each register must be
|
946 |
|
|
listed once, even those in FIXED_REGISTERS. List frame pointer
|
947 |
|
|
late and fixed registers last. Note that, in general, we prefer
|
948 |
|
|
registers listed in CALL_USED_REGISTERS, keeping the others
|
949 |
|
|
available for storage of persistent values.
|
950 |
|
|
|
951 |
|
|
The ADJUST_REG_ALLOC_ORDER actually overwrite the order,
|
952 |
|
|
so this is just empty initializer for array. */
|
953 |
|
|
|
954 |
|
|
#define REG_ALLOC_ORDER \
|
955 |
|
|
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,\
|
956 |
|
|
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, \
|
957 |
|
|
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, \
|
958 |
|
|
48, 49, 50, 51, 52 }
|
959 |
|
|
|
960 |
|
|
/* ADJUST_REG_ALLOC_ORDER is a macro which permits reg_alloc_order
|
961 |
|
|
to be rearranged based on a particular function. When using sse math,
|
962 |
|
|
we want to allocate SSE before x87 registers and vice versa. */
|
963 |
|
|
|
964 |
|
|
#define ADJUST_REG_ALLOC_ORDER x86_order_regs_for_local_alloc ()
|
965 |
|
|
|
966 |
|
|
|
967 |
|
|
#define OVERRIDE_ABI_FORMAT(FNDECL) ix86_call_abi_override (FNDECL)
|
968 |
|
|
|
969 |
|
|
/* Return number of consecutive hard regs needed starting at reg REGNO
|
970 |
|
|
to hold something of mode MODE.
|
971 |
|
|
This is ordinarily the length in words of a value of mode MODE
|
972 |
|
|
but can be less for certain modes in special long registers.
|
973 |
|
|
|
974 |
|
|
Actually there are no two word move instructions for consecutive
|
975 |
|
|
registers. And only registers 0-3 may have mov byte instructions
|
976 |
|
|
applied to them. */
|
977 |
|
|
|
978 |
|
|
#define HARD_REGNO_NREGS(REGNO, MODE) \
|
979 |
|
|
(FP_REGNO_P (REGNO) || SSE_REGNO_P (REGNO) || MMX_REGNO_P (REGNO) \
|
980 |
|
|
? (COMPLEX_MODE_P (MODE) ? 2 : 1) \
|
981 |
|
|
: ((MODE) == XFmode \
|
982 |
|
|
? (TARGET_64BIT ? 2 : 3) \
|
983 |
|
|
: (MODE) == XCmode \
|
984 |
|
|
? (TARGET_64BIT ? 4 : 6) \
|
985 |
|
|
: ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
|
986 |
|
|
|
987 |
|
|
#define HARD_REGNO_NREGS_HAS_PADDING(REGNO, MODE) \
|
988 |
|
|
((TARGET_128BIT_LONG_DOUBLE && !TARGET_64BIT) \
|
989 |
|
|
? (FP_REGNO_P (REGNO) || SSE_REGNO_P (REGNO) || MMX_REGNO_P (REGNO) \
|
990 |
|
|
? 0 \
|
991 |
|
|
: ((MODE) == XFmode || (MODE) == XCmode)) \
|
992 |
|
|
: 0)
|
993 |
|
|
|
994 |
|
|
#define HARD_REGNO_NREGS_WITH_PADDING(REGNO, MODE) ((MODE) == XFmode ? 4 : 8)
|
995 |
|
|
|
996 |
|
|
#define VALID_AVX256_REG_MODE(MODE) \
|
997 |
|
|
((MODE) == V32QImode || (MODE) == V16HImode || (MODE) == V8SImode \
|
998 |
|
|
|| (MODE) == V4DImode || (MODE) == V2TImode || (MODE) == V8SFmode \
|
999 |
|
|
|| (MODE) == V4DFmode)
|
1000 |
|
|
|
1001 |
|
|
#define VALID_SSE2_REG_MODE(MODE) \
|
1002 |
|
|
((MODE) == V16QImode || (MODE) == V8HImode || (MODE) == V2DFmode \
|
1003 |
|
|
|| (MODE) == V2DImode || (MODE) == DFmode)
|
1004 |
|
|
|
1005 |
|
|
#define VALID_SSE_REG_MODE(MODE) \
|
1006 |
|
|
((MODE) == V1TImode || (MODE) == TImode \
|
1007 |
|
|
|| (MODE) == V4SFmode || (MODE) == V4SImode \
|
1008 |
|
|
|| (MODE) == SFmode || (MODE) == TFmode)
|
1009 |
|
|
|
1010 |
|
|
#define VALID_MMX_REG_MODE_3DNOW(MODE) \
|
1011 |
|
|
((MODE) == V2SFmode || (MODE) == SFmode)
|
1012 |
|
|
|
1013 |
|
|
#define VALID_MMX_REG_MODE(MODE) \
|
1014 |
|
|
((MODE == V1DImode) || (MODE) == DImode \
|
1015 |
|
|
|| (MODE) == V2SImode || (MODE) == SImode \
|
1016 |
|
|
|| (MODE) == V4HImode || (MODE) == V8QImode)
|
1017 |
|
|
|
1018 |
|
|
#define VALID_DFP_MODE_P(MODE) \
|
1019 |
|
|
((MODE) == SDmode || (MODE) == DDmode || (MODE) == TDmode)
|
1020 |
|
|
|
1021 |
|
|
#define VALID_FP_MODE_P(MODE) \
|
1022 |
|
|
((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode \
|
1023 |
|
|
|| (MODE) == SCmode || (MODE) == DCmode || (MODE) == XCmode) \
|
1024 |
|
|
|
1025 |
|
|
#define VALID_INT_MODE_P(MODE) \
|
1026 |
|
|
((MODE) == QImode || (MODE) == HImode || (MODE) == SImode \
|
1027 |
|
|
|| (MODE) == DImode \
|
1028 |
|
|
|| (MODE) == CQImode || (MODE) == CHImode || (MODE) == CSImode \
|
1029 |
|
|
|| (MODE) == CDImode \
|
1030 |
|
|
|| (TARGET_64BIT && ((MODE) == TImode || (MODE) == CTImode \
|
1031 |
|
|
|| (MODE) == TFmode || (MODE) == TCmode)))
|
1032 |
|
|
|
1033 |
|
|
/* Return true for modes passed in SSE registers. */
|
1034 |
|
|
#define SSE_REG_MODE_P(MODE) \
|
1035 |
|
|
((MODE) == V1TImode || (MODE) == TImode || (MODE) == V16QImode \
|
1036 |
|
|
|| (MODE) == TFmode || (MODE) == V8HImode || (MODE) == V2DFmode \
|
1037 |
|
|
|| (MODE) == V2DImode || (MODE) == V4SFmode || (MODE) == V4SImode \
|
1038 |
|
|
|| (MODE) == V32QImode || (MODE) == V16HImode || (MODE) == V8SImode \
|
1039 |
|
|
|| (MODE) == V4DImode || (MODE) == V8SFmode || (MODE) == V4DFmode \
|
1040 |
|
|
|| (MODE) == V2TImode)
|
1041 |
|
|
|
1042 |
|
|
/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE. */
|
1043 |
|
|
|
1044 |
|
|
#define HARD_REGNO_MODE_OK(REGNO, MODE) \
|
1045 |
|
|
ix86_hard_regno_mode_ok ((REGNO), (MODE))
|
1046 |
|
|
|
1047 |
|
|
/* Value is 1 if it is a good idea to tie two pseudo registers
|
1048 |
|
|
when one has mode MODE1 and one has mode MODE2.
|
1049 |
|
|
If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
|
1050 |
|
|
for any hard reg, then this must be 0 for correct output. */
|
1051 |
|
|
|
1052 |
|
|
#define MODES_TIEABLE_P(MODE1, MODE2) ix86_modes_tieable_p (MODE1, MODE2)
|
1053 |
|
|
|
1054 |
|
|
/* It is possible to write patterns to move flags; but until someone
|
1055 |
|
|
does it, */
|
1056 |
|
|
#define AVOID_CCMODE_COPIES
|
1057 |
|
|
|
1058 |
|
|
/* Specify the modes required to caller save a given hard regno.
|
1059 |
|
|
We do this on i386 to prevent flags from being saved at all.
|
1060 |
|
|
|
1061 |
|
|
Kill any attempts to combine saving of modes. */
|
1062 |
|
|
|
1063 |
|
|
#define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
|
1064 |
|
|
(CC_REGNO_P (REGNO) ? VOIDmode \
|
1065 |
|
|
: (MODE) == VOIDmode && (NREGS) != 1 ? VOIDmode \
|
1066 |
|
|
: (MODE) == VOIDmode ? choose_hard_reg_mode ((REGNO), (NREGS), false) \
|
1067 |
|
|
: (MODE) == HImode && !TARGET_PARTIAL_REG_STALL ? SImode \
|
1068 |
|
|
: (MODE) == QImode && (REGNO) > BX_REG && !TARGET_64BIT ? SImode \
|
1069 |
|
|
: (MODE))
|
1070 |
|
|
|
1071 |
|
|
/* The only ABI that saves SSE registers across calls is Win64 (thus no
|
1072 |
|
|
need to check the current ABI here), and with AVX enabled Win64 only
|
1073 |
|
|
guarantees that the low 16 bytes are saved. */
|
1074 |
|
|
#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) \
|
1075 |
|
|
(SSE_REGNO_P (REGNO) && GET_MODE_SIZE (MODE) > 16)
|
1076 |
|
|
|
1077 |
|
|
/* Specify the registers used for certain standard purposes.
|
1078 |
|
|
The values of these macros are register numbers. */
|
1079 |
|
|
|
1080 |
|
|
/* on the 386 the pc register is %eip, and is not usable as a general
|
1081 |
|
|
register. The ordinary mov instructions won't work */
|
1082 |
|
|
/* #define PC_REGNUM */
|
1083 |
|
|
|
1084 |
|
|
/* Register to use for pushing function arguments. */
|
1085 |
|
|
#define STACK_POINTER_REGNUM 7
|
1086 |
|
|
|
1087 |
|
|
/* Base register for access to local variables of the function. */
|
1088 |
|
|
#define HARD_FRAME_POINTER_REGNUM 6
|
1089 |
|
|
|
1090 |
|
|
/* Base register for access to local variables of the function. */
|
1091 |
|
|
#define FRAME_POINTER_REGNUM 20
|
1092 |
|
|
|
1093 |
|
|
/* First floating point reg */
|
1094 |
|
|
#define FIRST_FLOAT_REG 8
|
1095 |
|
|
|
1096 |
|
|
/* First & last stack-like regs */
|
1097 |
|
|
#define FIRST_STACK_REG FIRST_FLOAT_REG
|
1098 |
|
|
#define LAST_STACK_REG (FIRST_FLOAT_REG + 7)
|
1099 |
|
|
|
1100 |
|
|
#define FIRST_SSE_REG (FRAME_POINTER_REGNUM + 1)
|
1101 |
|
|
#define LAST_SSE_REG (FIRST_SSE_REG + 7)
|
1102 |
|
|
|
1103 |
|
|
#define FIRST_MMX_REG (LAST_SSE_REG + 1)
|
1104 |
|
|
#define LAST_MMX_REG (FIRST_MMX_REG + 7)
|
1105 |
|
|
|
1106 |
|
|
#define FIRST_REX_INT_REG (LAST_MMX_REG + 1)
|
1107 |
|
|
#define LAST_REX_INT_REG (FIRST_REX_INT_REG + 7)
|
1108 |
|
|
|
1109 |
|
|
#define FIRST_REX_SSE_REG (LAST_REX_INT_REG + 1)
|
1110 |
|
|
#define LAST_REX_SSE_REG (FIRST_REX_SSE_REG + 7)
|
1111 |
|
|
|
1112 |
|
|
/* Override this in other tm.h files to cope with various OS lossage
|
1113 |
|
|
requiring a frame pointer. */
|
1114 |
|
|
#ifndef SUBTARGET_FRAME_POINTER_REQUIRED
|
1115 |
|
|
#define SUBTARGET_FRAME_POINTER_REQUIRED 0
|
1116 |
|
|
#endif
|
1117 |
|
|
|
1118 |
|
|
/* Make sure we can access arbitrary call frames. */
|
1119 |
|
|
#define SETUP_FRAME_ADDRESSES() ix86_setup_frame_addresses ()
|
1120 |
|
|
|
1121 |
|
|
/* Base register for access to arguments of the function. */
|
1122 |
|
|
#define ARG_POINTER_REGNUM 16
|
1123 |
|
|
|
1124 |
|
|
/* Register to hold the addressing base for position independent
|
1125 |
|
|
code access to data items. We don't use PIC pointer for 64bit
|
1126 |
|
|
mode. Define the regnum to dummy value to prevent gcc from
|
1127 |
|
|
pessimizing code dealing with EBX.
|
1128 |
|
|
|
1129 |
|
|
To avoid clobbering a call-saved register unnecessarily, we renumber
|
1130 |
|
|
the pic register when possible. The change is visible after the
|
1131 |
|
|
prologue has been emitted. */
|
1132 |
|
|
|
1133 |
|
|
#define REAL_PIC_OFFSET_TABLE_REGNUM BX_REG
|
1134 |
|
|
|
1135 |
|
|
#define PIC_OFFSET_TABLE_REGNUM \
|
1136 |
|
|
((TARGET_64BIT && ix86_cmodel == CM_SMALL_PIC) \
|
1137 |
|
|
|| !flag_pic ? INVALID_REGNUM \
|
1138 |
|
|
: reload_completed ? REGNO (pic_offset_table_rtx) \
|
1139 |
|
|
: REAL_PIC_OFFSET_TABLE_REGNUM)
|
1140 |
|
|
|
1141 |
|
|
#define GOT_SYMBOL_NAME "_GLOBAL_OFFSET_TABLE_"
|
1142 |
|
|
|
1143 |
|
|
/* This is overridden by <cygwin.h>. */
|
1144 |
|
|
#define MS_AGGREGATE_RETURN 0
|
1145 |
|
|
|
1146 |
|
|
#define KEEP_AGGREGATE_RETURN_POINTER 0
|
1147 |
|
|
|
1148 |
|
|
/* Define the classes of registers for register constraints in the
|
1149 |
|
|
machine description. Also define ranges of constants.
|
1150 |
|
|
|
1151 |
|
|
One of the classes must always be named ALL_REGS and include all hard regs.
|
1152 |
|
|
If there is more than one class, another class must be named NO_REGS
|
1153 |
|
|
and contain no registers.
|
1154 |
|
|
|
1155 |
|
|
The name GENERAL_REGS must be the name of a class (or an alias for
|
1156 |
|
|
another name such as ALL_REGS). This is the class of registers
|
1157 |
|
|
that is allowed by "g" or "r" in a register constraint.
|
1158 |
|
|
Also, registers outside this class are allocated only when
|
1159 |
|
|
instructions express preferences for them.
|
1160 |
|
|
|
1161 |
|
|
The classes must be numbered in nondecreasing order; that is,
|
1162 |
|
|
a larger-numbered class must never be contained completely
|
1163 |
|
|
in a smaller-numbered class.
|
1164 |
|
|
|
1165 |
|
|
For any two classes, it is very desirable that there be another
|
1166 |
|
|
class that represents their union.
|
1167 |
|
|
|
1168 |
|
|
It might seem that class BREG is unnecessary, since no useful 386
|
1169 |
|
|
opcode needs reg %ebx. But some systems pass args to the OS in ebx,
|
1170 |
|
|
and the "b" register constraint is useful in asms for syscalls.
|
1171 |
|
|
|
1172 |
|
|
The flags, fpsr and fpcr registers are in no class. */
|
1173 |
|
|
|
1174 |
|
|
enum reg_class
|
1175 |
|
|
{
|
1176 |
|
|
NO_REGS,
|
1177 |
|
|
AREG, DREG, CREG, BREG, SIREG, DIREG,
|
1178 |
|
|
AD_REGS, /* %eax/%edx for DImode */
|
1179 |
|
|
CLOBBERED_REGS, /* call-clobbered integers */
|
1180 |
|
|
Q_REGS, /* %eax %ebx %ecx %edx */
|
1181 |
|
|
NON_Q_REGS, /* %esi %edi %ebp %esp */
|
1182 |
|
|
INDEX_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp */
|
1183 |
|
|
LEGACY_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp */
|
1184 |
|
|
GENERAL_REGS, /* %eax %ebx %ecx %edx %esi %edi %ebp %esp
|
1185 |
|
|
%r8 %r9 %r10 %r11 %r12 %r13 %r14 %r15 */
|
1186 |
|
|
FP_TOP_REG, FP_SECOND_REG, /* %st(0) %st(1) */
|
1187 |
|
|
FLOAT_REGS,
|
1188 |
|
|
SSE_FIRST_REG,
|
1189 |
|
|
SSE_REGS,
|
1190 |
|
|
MMX_REGS,
|
1191 |
|
|
FP_TOP_SSE_REGS,
|
1192 |
|
|
FP_SECOND_SSE_REGS,
|
1193 |
|
|
FLOAT_SSE_REGS,
|
1194 |
|
|
FLOAT_INT_REGS,
|
1195 |
|
|
INT_SSE_REGS,
|
1196 |
|
|
FLOAT_INT_SSE_REGS,
|
1197 |
|
|
ALL_REGS, LIM_REG_CLASSES
|
1198 |
|
|
};
|
1199 |
|
|
|
1200 |
|
|
#define N_REG_CLASSES ((int) LIM_REG_CLASSES)
|
1201 |
|
|
|
1202 |
|
|
#define INTEGER_CLASS_P(CLASS) \
|
1203 |
|
|
reg_class_subset_p ((CLASS), GENERAL_REGS)
|
1204 |
|
|
#define FLOAT_CLASS_P(CLASS) \
|
1205 |
|
|
reg_class_subset_p ((CLASS), FLOAT_REGS)
|
1206 |
|
|
#define SSE_CLASS_P(CLASS) \
|
1207 |
|
|
reg_class_subset_p ((CLASS), SSE_REGS)
|
1208 |
|
|
#define MMX_CLASS_P(CLASS) \
|
1209 |
|
|
((CLASS) == MMX_REGS)
|
1210 |
|
|
#define MAYBE_INTEGER_CLASS_P(CLASS) \
|
1211 |
|
|
reg_classes_intersect_p ((CLASS), GENERAL_REGS)
|
1212 |
|
|
#define MAYBE_FLOAT_CLASS_P(CLASS) \
|
1213 |
|
|
reg_classes_intersect_p ((CLASS), FLOAT_REGS)
|
1214 |
|
|
#define MAYBE_SSE_CLASS_P(CLASS) \
|
1215 |
|
|
reg_classes_intersect_p (SSE_REGS, (CLASS))
|
1216 |
|
|
#define MAYBE_MMX_CLASS_P(CLASS) \
|
1217 |
|
|
reg_classes_intersect_p (MMX_REGS, (CLASS))
|
1218 |
|
|
|
1219 |
|
|
#define Q_CLASS_P(CLASS) \
|
1220 |
|
|
reg_class_subset_p ((CLASS), Q_REGS)
|
1221 |
|
|
|
1222 |
|
|
/* Give names of register classes as strings for dump file. */
|
1223 |
|
|
|
1224 |
|
|
#define REG_CLASS_NAMES \
|
1225 |
|
|
{ "NO_REGS", \
|
1226 |
|
|
"AREG", "DREG", "CREG", "BREG", \
|
1227 |
|
|
"SIREG", "DIREG", \
|
1228 |
|
|
"AD_REGS", \
|
1229 |
|
|
"CLOBBERED_REGS", \
|
1230 |
|
|
"Q_REGS", "NON_Q_REGS", \
|
1231 |
|
|
"INDEX_REGS", \
|
1232 |
|
|
"LEGACY_REGS", \
|
1233 |
|
|
"GENERAL_REGS", \
|
1234 |
|
|
"FP_TOP_REG", "FP_SECOND_REG", \
|
1235 |
|
|
"FLOAT_REGS", \
|
1236 |
|
|
"SSE_FIRST_REG", \
|
1237 |
|
|
"SSE_REGS", \
|
1238 |
|
|
"MMX_REGS", \
|
1239 |
|
|
"FP_TOP_SSE_REGS", \
|
1240 |
|
|
"FP_SECOND_SSE_REGS", \
|
1241 |
|
|
"FLOAT_SSE_REGS", \
|
1242 |
|
|
"FLOAT_INT_REGS", \
|
1243 |
|
|
"INT_SSE_REGS", \
|
1244 |
|
|
"FLOAT_INT_SSE_REGS", \
|
1245 |
|
|
"ALL_REGS" }
|
1246 |
|
|
|
1247 |
|
|
/* Define which registers fit in which classes. This is an initializer
|
1248 |
|
|
for a vector of HARD_REG_SET of length N_REG_CLASSES.
|
1249 |
|
|
|
1250 |
|
|
Note that the default setting of CLOBBERED_REGS is for 32-bit; this
|
1251 |
|
|
is adjusted by TARGET_CONDITIONAL_REGISTER_USAGE for the 64-bit ABI
|
1252 |
|
|
in effect. */
|
1253 |
|
|
|
1254 |
|
|
#define REG_CLASS_CONTENTS \
|
1255 |
|
|
{ { 0x00, 0x0 }, \
|
1256 |
|
|
{ 0x01, 0x0 }, { 0x02, 0x0 }, /* AREG, DREG */ \
|
1257 |
|
|
{ 0x04, 0x0 }, { 0x08, 0x0 }, /* CREG, BREG */ \
|
1258 |
|
|
{ 0x10, 0x0 }, { 0x20, 0x0 }, /* SIREG, DIREG */ \
|
1259 |
|
|
{ 0x03, 0x0 }, /* AD_REGS */ \
|
1260 |
|
|
{ 0x07, 0x0 }, /* CLOBBERED_REGS */ \
|
1261 |
|
|
{ 0x0f, 0x0 }, /* Q_REGS */ \
|
1262 |
|
|
{ 0x1100f0, 0x1fe0 }, /* NON_Q_REGS */ \
|
1263 |
|
|
{ 0x7f, 0x1fe0 }, /* INDEX_REGS */ \
|
1264 |
|
|
{ 0x1100ff, 0x0 }, /* LEGACY_REGS */ \
|
1265 |
|
|
{ 0x1100ff, 0x1fe0 }, /* GENERAL_REGS */ \
|
1266 |
|
|
{ 0x100, 0x0 }, { 0x0200, 0x0 },/* FP_TOP_REG, FP_SECOND_REG */\
|
1267 |
|
|
{ 0xff00, 0x0 }, /* FLOAT_REGS */ \
|
1268 |
|
|
{ 0x200000, 0x0 }, /* SSE_FIRST_REG */ \
|
1269 |
|
|
{ 0x1fe00000,0x1fe000 }, /* SSE_REGS */ \
|
1270 |
|
|
{ 0xe0000000, 0x1f }, /* MMX_REGS */ \
|
1271 |
|
|
{ 0x1fe00100,0x1fe000 }, /* FP_TOP_SSE_REG */ \
|
1272 |
|
|
{ 0x1fe00200,0x1fe000 }, /* FP_SECOND_SSE_REG */ \
|
1273 |
|
|
{ 0x1fe0ff00,0x1fe000 }, /* FLOAT_SSE_REGS */ \
|
1274 |
|
|
{ 0x1ffff, 0x1fe0 }, /* FLOAT_INT_REGS */ \
|
1275 |
|
|
{ 0x1fe100ff,0x1fffe0 }, /* INT_SSE_REGS */ \
|
1276 |
|
|
{ 0x1fe1ffff,0x1fffe0 }, /* FLOAT_INT_SSE_REGS */ \
|
1277 |
|
|
{ 0xffffffff,0x1fffff } \
|
1278 |
|
|
}
|
1279 |
|
|
|
1280 |
|
|
/* The same information, inverted:
|
1281 |
|
|
Return the class number of the smallest class containing
|
1282 |
|
|
reg number REGNO. This could be a conditional expression
|
1283 |
|
|
or could index an array. */
|
1284 |
|
|
|
1285 |
|
|
#define REGNO_REG_CLASS(REGNO) (regclass_map[REGNO])
|
1286 |
|
|
|
1287 |
|
|
/* When this hook returns true for MODE, the compiler allows
|
1288 |
|
|
registers explicitly used in the rtl to be used as spill registers
|
1289 |
|
|
but prevents the compiler from extending the lifetime of these
|
1290 |
|
|
registers. */
|
1291 |
|
|
#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P hook_bool_mode_true
|
1292 |
|
|
|
1293 |
|
|
#define QI_REG_P(X) (REG_P (X) && REGNO (X) <= BX_REG)
|
1294 |
|
|
|
1295 |
|
|
#define GENERAL_REGNO_P(N) \
|
1296 |
|
|
((N) <= STACK_POINTER_REGNUM || REX_INT_REGNO_P (N))
|
1297 |
|
|
|
1298 |
|
|
#define GENERAL_REG_P(X) \
|
1299 |
|
|
(REG_P (X) && GENERAL_REGNO_P (REGNO (X)))
|
1300 |
|
|
|
1301 |
|
|
#define ANY_QI_REG_P(X) (TARGET_64BIT ? GENERAL_REG_P(X) : QI_REG_P (X))
|
1302 |
|
|
|
1303 |
|
|
#define REX_INT_REGNO_P(N) \
|
1304 |
|
|
IN_RANGE ((N), FIRST_REX_INT_REG, LAST_REX_INT_REG)
|
1305 |
|
|
#define REX_INT_REG_P(X) (REG_P (X) && REX_INT_REGNO_P (REGNO (X)))
|
1306 |
|
|
|
1307 |
|
|
#define FP_REG_P(X) (REG_P (X) && FP_REGNO_P (REGNO (X)))
|
1308 |
|
|
#define FP_REGNO_P(N) IN_RANGE ((N), FIRST_STACK_REG, LAST_STACK_REG)
|
1309 |
|
|
#define ANY_FP_REG_P(X) (REG_P (X) && ANY_FP_REGNO_P (REGNO (X)))
|
1310 |
|
|
#define ANY_FP_REGNO_P(N) (FP_REGNO_P (N) || SSE_REGNO_P (N))
|
1311 |
|
|
|
1312 |
|
|
#define X87_FLOAT_MODE_P(MODE) \
|
1313 |
|
|
(TARGET_80387 && ((MODE) == SFmode || (MODE) == DFmode || (MODE) == XFmode))
|
1314 |
|
|
|
1315 |
|
|
#define SSE_REG_P(N) (REG_P (N) && SSE_REGNO_P (REGNO (N)))
|
1316 |
|
|
#define SSE_REGNO_P(N) \
|
1317 |
|
|
(IN_RANGE ((N), FIRST_SSE_REG, LAST_SSE_REG) \
|
1318 |
|
|
|| REX_SSE_REGNO_P (N))
|
1319 |
|
|
|
1320 |
|
|
#define REX_SSE_REGNO_P(N) \
|
1321 |
|
|
IN_RANGE ((N), FIRST_REX_SSE_REG, LAST_REX_SSE_REG)
|
1322 |
|
|
|
1323 |
|
|
#define SSE_REGNO(N) \
|
1324 |
|
|
((N) < 8 ? FIRST_SSE_REG + (N) : FIRST_REX_SSE_REG + (N) - 8)
|
1325 |
|
|
|
1326 |
|
|
#define SSE_FLOAT_MODE_P(MODE) \
|
1327 |
|
|
((TARGET_SSE && (MODE) == SFmode) || (TARGET_SSE2 && (MODE) == DFmode))
|
1328 |
|
|
|
1329 |
|
|
#define FMA4_VEC_FLOAT_MODE_P(MODE) \
|
1330 |
|
|
(TARGET_FMA4 && ((MODE) == V4SFmode || (MODE) == V2DFmode \
|
1331 |
|
|
|| (MODE) == V8SFmode || (MODE) == V4DFmode))
|
1332 |
|
|
|
1333 |
|
|
#define MMX_REG_P(XOP) (REG_P (XOP) && MMX_REGNO_P (REGNO (XOP)))
|
1334 |
|
|
#define MMX_REGNO_P(N) IN_RANGE ((N), FIRST_MMX_REG, LAST_MMX_REG)
|
1335 |
|
|
|
1336 |
|
|
#define STACK_REG_P(XOP) (REG_P (XOP) && STACK_REGNO_P (REGNO (XOP)))
|
1337 |
|
|
#define STACK_REGNO_P(N) IN_RANGE ((N), FIRST_STACK_REG, LAST_STACK_REG)
|
1338 |
|
|
|
1339 |
|
|
#define STACK_TOP_P(XOP) (REG_P (XOP) && REGNO (XOP) == FIRST_STACK_REG)
|
1340 |
|
|
|
1341 |
|
|
#define CC_REG_P(X) (REG_P (X) && CC_REGNO_P (REGNO (X)))
|
1342 |
|
|
#define CC_REGNO_P(X) ((X) == FLAGS_REG || (X) == FPSR_REG)
|
1343 |
|
|
|
1344 |
|
|
/* The class value for index registers, and the one for base regs. */
|
1345 |
|
|
|
1346 |
|
|
#define INDEX_REG_CLASS INDEX_REGS
|
1347 |
|
|
#define BASE_REG_CLASS GENERAL_REGS
|
1348 |
|
|
|
1349 |
|
|
/* Place additional restrictions on the register class to use when it
|
1350 |
|
|
is necessary to be able to hold a value of mode MODE in a reload
|
1351 |
|
|
register for which class CLASS would ordinarily be used. */
|
1352 |
|
|
|
1353 |
|
|
#define LIMIT_RELOAD_CLASS(MODE, CLASS) \
|
1354 |
|
|
((MODE) == QImode && !TARGET_64BIT \
|
1355 |
|
|
&& ((CLASS) == ALL_REGS || (CLASS) == GENERAL_REGS \
|
1356 |
|
|
|| (CLASS) == LEGACY_REGS || (CLASS) == INDEX_REGS) \
|
1357 |
|
|
? Q_REGS : (CLASS))
|
1358 |
|
|
|
1359 |
|
|
/* If we are copying between general and FP registers, we need a memory
|
1360 |
|
|
location. The same is true for SSE and MMX registers. */
|
1361 |
|
|
#define SECONDARY_MEMORY_NEEDED(CLASS1, CLASS2, MODE) \
|
1362 |
|
|
ix86_secondary_memory_needed ((CLASS1), (CLASS2), (MODE), 1)
|
1363 |
|
|
|
1364 |
|
|
/* Get_secondary_mem widens integral modes to BITS_PER_WORD.
|
1365 |
|
|
There is no need to emit full 64 bit move on 64 bit targets
|
1366 |
|
|
for integral modes that can be moved using 32 bit move. */
|
1367 |
|
|
#define SECONDARY_MEMORY_NEEDED_MODE(MODE) \
|
1368 |
|
|
(GET_MODE_BITSIZE (MODE) < 32 && INTEGRAL_MODE_P (MODE) \
|
1369 |
|
|
? mode_for_size (32, GET_MODE_CLASS (MODE), 0) \
|
1370 |
|
|
: MODE)
|
1371 |
|
|
|
1372 |
|
|
/* Return a class of registers that cannot change FROM mode to TO mode. */
|
1373 |
|
|
|
1374 |
|
|
#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
|
1375 |
|
|
ix86_cannot_change_mode_class (FROM, TO, CLASS)
|
1376 |
|
|
|
1377 |
|
|
/* Stack layout; function entry, exit and calling. */
|
1378 |
|
|
|
1379 |
|
|
/* Define this if pushing a word on the stack
|
1380 |
|
|
makes the stack pointer a smaller address. */
|
1381 |
|
|
#define STACK_GROWS_DOWNWARD
|
1382 |
|
|
|
1383 |
|
|
/* Define this to nonzero if the nominal address of the stack frame
|
1384 |
|
|
is at the high-address end of the local variables;
|
1385 |
|
|
that is, each additional local variable allocated
|
1386 |
|
|
goes at a more negative offset in the frame. */
|
1387 |
|
|
#define FRAME_GROWS_DOWNWARD 1
|
1388 |
|
|
|
1389 |
|
|
/* Offset within stack frame to start allocating local variables at.
|
1390 |
|
|
If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
|
1391 |
|
|
first local allocated. Otherwise, it is the offset to the BEGINNING
|
1392 |
|
|
of the first local allocated. */
|
1393 |
|
|
#define STARTING_FRAME_OFFSET 0
|
1394 |
|
|
|
1395 |
|
|
/* If we generate an insn to push BYTES bytes, this says how many the stack
|
1396 |
|
|
pointer really advances by. On 386, we have pushw instruction that
|
1397 |
|
|
decrements by exactly 2 no matter what the position was, there is no pushb.
|
1398 |
|
|
|
1399 |
|
|
But as CIE data alignment factor on this arch is -4 for 32bit targets
|
1400 |
|
|
and -8 for 64bit targets, we need to make sure all stack pointer adjustments
|
1401 |
|
|
are in multiple of 4 for 32bit targets and 8 for 64bit targets. */
|
1402 |
|
|
|
1403 |
|
|
#define PUSH_ROUNDING(BYTES) \
|
1404 |
|
|
(((BYTES) + UNITS_PER_WORD - 1) & -UNITS_PER_WORD)
|
1405 |
|
|
|
1406 |
|
|
/* If defined, the maximum amount of space required for outgoing arguments
|
1407 |
|
|
will be computed and placed into the variable `crtl->outgoing_args_size'.
|
1408 |
|
|
No space will be pushed onto the stack for each call; instead, the
|
1409 |
|
|
function prologue should increase the stack frame size by this amount.
|
1410 |
|
|
|
1411 |
|
|
64-bit MS ABI seem to require 16 byte alignment everywhere except for
|
1412 |
|
|
function prologue and apilogue. This is not possible without
|
1413 |
|
|
ACCUMULATE_OUTGOING_ARGS. */
|
1414 |
|
|
|
1415 |
|
|
#define ACCUMULATE_OUTGOING_ARGS \
|
1416 |
|
|
(TARGET_ACCUMULATE_OUTGOING_ARGS || TARGET_64BIT_MS_ABI)
|
1417 |
|
|
|
1418 |
|
|
/* If defined, a C expression whose value is nonzero when we want to use PUSH
|
1419 |
|
|
instructions to pass outgoing arguments. */
|
1420 |
|
|
|
1421 |
|
|
#define PUSH_ARGS (TARGET_PUSH_ARGS && !ACCUMULATE_OUTGOING_ARGS)
|
1422 |
|
|
|
1423 |
|
|
/* We want the stack and args grow in opposite directions, even if
|
1424 |
|
|
PUSH_ARGS is 0. */
|
1425 |
|
|
#define PUSH_ARGS_REVERSED 1
|
1426 |
|
|
|
1427 |
|
|
/* Offset of first parameter from the argument pointer register value. */
|
1428 |
|
|
#define FIRST_PARM_OFFSET(FNDECL) 0
|
1429 |
|
|
|
1430 |
|
|
/* Define this macro if functions should assume that stack space has been
|
1431 |
|
|
allocated for arguments even when their values are passed in registers.
|
1432 |
|
|
|
1433 |
|
|
The value of this macro is the size, in bytes, of the area reserved for
|
1434 |
|
|
arguments passed in registers for the function represented by FNDECL.
|
1435 |
|
|
|
1436 |
|
|
This space can be allocated by the caller, or be a part of the
|
1437 |
|
|
machine-dependent stack frame: `OUTGOING_REG_PARM_STACK_SPACE' says
|
1438 |
|
|
which. */
|
1439 |
|
|
#define REG_PARM_STACK_SPACE(FNDECL) ix86_reg_parm_stack_space (FNDECL)
|
1440 |
|
|
|
1441 |
|
|
#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) \
|
1442 |
|
|
(TARGET_64BIT && ix86_function_type_abi (FNTYPE) == MS_ABI)
|
1443 |
|
|
|
1444 |
|
|
/* Define how to find the value returned by a library function
|
1445 |
|
|
assuming the value has mode MODE. */
|
1446 |
|
|
|
1447 |
|
|
#define LIBCALL_VALUE(MODE) ix86_libcall_value (MODE)
|
1448 |
|
|
|
1449 |
|
|
/* Define the size of the result block used for communication between
|
1450 |
|
|
untyped_call and untyped_return. The block contains a DImode value
|
1451 |
|
|
followed by the block used by fnsave and frstor. */
|
1452 |
|
|
|
1453 |
|
|
#define APPLY_RESULT_SIZE (8+108)
|
1454 |
|
|
|
1455 |
|
|
/* 1 if N is a possible register number for function argument passing. */
|
1456 |
|
|
#define FUNCTION_ARG_REGNO_P(N) ix86_function_arg_regno_p (N)
|
1457 |
|
|
|
1458 |
|
|
/* Define a data type for recording info about an argument list
|
1459 |
|
|
during the scan of that argument list. This data type should
|
1460 |
|
|
hold all necessary information about the function itself
|
1461 |
|
|
and about the args processed so far, enough to enable macros
|
1462 |
|
|
such as FUNCTION_ARG to determine where the next arg should go. */
|
1463 |
|
|
|
1464 |
|
|
typedef struct ix86_args {
|
1465 |
|
|
int words; /* # words passed so far */
|
1466 |
|
|
int nregs; /* # registers available for passing */
|
1467 |
|
|
int regno; /* next available register number */
|
1468 |
|
|
int fastcall; /* fastcall or thiscall calling convention
|
1469 |
|
|
is used */
|
1470 |
|
|
int sse_words; /* # sse words passed so far */
|
1471 |
|
|
int sse_nregs; /* # sse registers available for passing */
|
1472 |
|
|
int warn_avx; /* True when we want to warn about AVX ABI. */
|
1473 |
|
|
int warn_sse; /* True when we want to warn about SSE ABI. */
|
1474 |
|
|
int warn_mmx; /* True when we want to warn about MMX ABI. */
|
1475 |
|
|
int sse_regno; /* next available sse register number */
|
1476 |
|
|
int mmx_words; /* # mmx words passed so far */
|
1477 |
|
|
int mmx_nregs; /* # mmx registers available for passing */
|
1478 |
|
|
int mmx_regno; /* next available mmx register number */
|
1479 |
|
|
int maybe_vaarg; /* true for calls to possibly vardic fncts. */
|
1480 |
|
|
int caller; /* true if it is caller. */
|
1481 |
|
|
int float_in_sse; /* Set to 1 or 2 for 32bit targets if
|
1482 |
|
|
SFmode/DFmode arguments should be passed
|
1483 |
|
|
in SSE registers. Otherwise 0. */
|
1484 |
|
|
enum calling_abi call_abi; /* Set to SYSV_ABI for sysv abi. Otherwise
|
1485 |
|
|
MS_ABI for ms abi. */
|
1486 |
|
|
} CUMULATIVE_ARGS;
|
1487 |
|
|
|
1488 |
|
|
/* Initialize a variable CUM of type CUMULATIVE_ARGS
|
1489 |
|
|
for a call to a function whose data type is FNTYPE.
|
1490 |
|
|
For a library call, FNTYPE is 0. */
|
1491 |
|
|
|
1492 |
|
|
#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
|
1493 |
|
|
init_cumulative_args (&(CUM), (FNTYPE), (LIBNAME), (FNDECL), \
|
1494 |
|
|
(N_NAMED_ARGS) != -1)
|
1495 |
|
|
|
1496 |
|
|
/* Output assembler code to FILE to increment profiler label # LABELNO
|
1497 |
|
|
for profiling a function entry. */
|
1498 |
|
|
|
1499 |
|
|
#define FUNCTION_PROFILER(FILE, LABELNO) x86_function_profiler (FILE, LABELNO)
|
1500 |
|
|
|
1501 |
|
|
#define MCOUNT_NAME "_mcount"
|
1502 |
|
|
|
1503 |
|
|
#define MCOUNT_NAME_BEFORE_PROLOGUE "__fentry__"
|
1504 |
|
|
|
1505 |
|
|
#define PROFILE_COUNT_REGISTER "edx"
|
1506 |
|
|
|
1507 |
|
|
/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
|
1508 |
|
|
the stack pointer does not matter. The value is tested only in
|
1509 |
|
|
functions that have frame pointers.
|
1510 |
|
|
No definition is equivalent to always zero. */
|
1511 |
|
|
/* Note on the 386 it might be more efficient not to define this since
|
1512 |
|
|
we have to restore it ourselves from the frame pointer, in order to
|
1513 |
|
|
use pop */
|
1514 |
|
|
|
1515 |
|
|
#define EXIT_IGNORE_STACK 1
|
1516 |
|
|
|
1517 |
|
|
/* Output assembler code for a block containing the constant parts
|
1518 |
|
|
of a trampoline, leaving space for the variable parts. */
|
1519 |
|
|
|
1520 |
|
|
/* On the 386, the trampoline contains two instructions:
|
1521 |
|
|
mov #STATIC,ecx
|
1522 |
|
|
jmp FUNCTION
|
1523 |
|
|
The trampoline is generated entirely at runtime. The operand of JMP
|
1524 |
|
|
is the address of FUNCTION relative to the instruction following the
|
1525 |
|
|
JMP (which is 5 bytes long). */
|
1526 |
|
|
|
1527 |
|
|
/* Length in units of the trampoline for entering a nested function. */
|
1528 |
|
|
|
1529 |
|
|
#define TRAMPOLINE_SIZE (TARGET_64BIT ? 24 : 10)
|
1530 |
|
|
|
1531 |
|
|
/* Definitions for register eliminations.
|
1532 |
|
|
|
1533 |
|
|
This is an array of structures. Each structure initializes one pair
|
1534 |
|
|
of eliminable registers. The "from" register number is given first,
|
1535 |
|
|
followed by "to". Eliminations of the same "from" register are listed
|
1536 |
|
|
in order of preference.
|
1537 |
|
|
|
1538 |
|
|
There are two registers that can always be eliminated on the i386.
|
1539 |
|
|
The frame pointer and the arg pointer can be replaced by either the
|
1540 |
|
|
hard frame pointer or to the stack pointer, depending upon the
|
1541 |
|
|
circumstances. The hard frame pointer is not used before reload and
|
1542 |
|
|
so it is not eligible for elimination. */
|
1543 |
|
|
|
1544 |
|
|
#define ELIMINABLE_REGS \
|
1545 |
|
|
{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
|
1546 |
|
|
{ ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
|
1547 |
|
|
{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
|
1548 |
|
|
{ FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \
|
1549 |
|
|
|
1550 |
|
|
/* Define the offset between two registers, one to be eliminated, and the other
|
1551 |
|
|
its replacement, at the start of a routine. */
|
1552 |
|
|
|
1553 |
|
|
#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
|
1554 |
|
|
((OFFSET) = ix86_initial_elimination_offset ((FROM), (TO)))
|
1555 |
|
|
|
1556 |
|
|
/* Addressing modes, and classification of registers for them. */
|
1557 |
|
|
|
1558 |
|
|
/* Macros to check register numbers against specific register classes. */
|
1559 |
|
|
|
1560 |
|
|
/* These assume that REGNO is a hard or pseudo reg number.
|
1561 |
|
|
They give nonzero only if REGNO is a hard reg of the suitable class
|
1562 |
|
|
or a pseudo reg currently allocated to a suitable hard reg.
|
1563 |
|
|
Since they use reg_renumber, they are safe only once reg_renumber
|
1564 |
|
|
has been allocated, which happens in local-alloc.c. */
|
1565 |
|
|
|
1566 |
|
|
#define REGNO_OK_FOR_INDEX_P(REGNO) \
|
1567 |
|
|
((REGNO) < STACK_POINTER_REGNUM \
|
1568 |
|
|
|| REX_INT_REGNO_P (REGNO) \
|
1569 |
|
|
|| (unsigned) reg_renumber[(REGNO)] < STACK_POINTER_REGNUM \
|
1570 |
|
|
|| REX_INT_REGNO_P ((unsigned) reg_renumber[(REGNO)]))
|
1571 |
|
|
|
1572 |
|
|
#define REGNO_OK_FOR_BASE_P(REGNO) \
|
1573 |
|
|
(GENERAL_REGNO_P (REGNO) \
|
1574 |
|
|
|| (REGNO) == ARG_POINTER_REGNUM \
|
1575 |
|
|
|| (REGNO) == FRAME_POINTER_REGNUM \
|
1576 |
|
|
|| GENERAL_REGNO_P ((unsigned) reg_renumber[(REGNO)]))
|
1577 |
|
|
|
1578 |
|
|
/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
|
1579 |
|
|
and check its validity for a certain class.
|
1580 |
|
|
We have two alternate definitions for each of them.
|
1581 |
|
|
The usual definition accepts all pseudo regs; the other rejects
|
1582 |
|
|
them unless they have been allocated suitable hard regs.
|
1583 |
|
|
The symbol REG_OK_STRICT causes the latter definition to be used.
|
1584 |
|
|
|
1585 |
|
|
Most source files want to accept pseudo regs in the hope that
|
1586 |
|
|
they will get allocated to the class that the insn wants them to be in.
|
1587 |
|
|
Source files for reload pass need to be strict.
|
1588 |
|
|
After reload, it makes no difference, since pseudo regs have
|
1589 |
|
|
been eliminated by then. */
|
1590 |
|
|
|
1591 |
|
|
|
1592 |
|
|
/* Non strict versions, pseudos are ok. */
|
1593 |
|
|
#define REG_OK_FOR_INDEX_NONSTRICT_P(X) \
|
1594 |
|
|
(REGNO (X) < STACK_POINTER_REGNUM \
|
1595 |
|
|
|| REX_INT_REGNO_P (REGNO (X)) \
|
1596 |
|
|
|| REGNO (X) >= FIRST_PSEUDO_REGISTER)
|
1597 |
|
|
|
1598 |
|
|
#define REG_OK_FOR_BASE_NONSTRICT_P(X) \
|
1599 |
|
|
(GENERAL_REGNO_P (REGNO (X)) \
|
1600 |
|
|
|| REGNO (X) == ARG_POINTER_REGNUM \
|
1601 |
|
|
|| REGNO (X) == FRAME_POINTER_REGNUM \
|
1602 |
|
|
|| REGNO (X) >= FIRST_PSEUDO_REGISTER)
|
1603 |
|
|
|
1604 |
|
|
/* Strict versions, hard registers only */
|
1605 |
|
|
#define REG_OK_FOR_INDEX_STRICT_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
|
1606 |
|
|
#define REG_OK_FOR_BASE_STRICT_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
|
1607 |
|
|
|
1608 |
|
|
#ifndef REG_OK_STRICT
|
1609 |
|
|
#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_NONSTRICT_P (X)
|
1610 |
|
|
#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_NONSTRICT_P (X)
|
1611 |
|
|
|
1612 |
|
|
#else
|
1613 |
|
|
#define REG_OK_FOR_INDEX_P(X) REG_OK_FOR_INDEX_STRICT_P (X)
|
1614 |
|
|
#define REG_OK_FOR_BASE_P(X) REG_OK_FOR_BASE_STRICT_P (X)
|
1615 |
|
|
#endif
|
1616 |
|
|
|
1617 |
|
|
/* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
|
1618 |
|
|
that is a valid memory address for an instruction.
|
1619 |
|
|
The MODE argument is the machine mode for the MEM expression
|
1620 |
|
|
that wants to use this address.
|
1621 |
|
|
|
1622 |
|
|
The other macros defined here are used only in TARGET_LEGITIMATE_ADDRESS_P,
|
1623 |
|
|
except for CONSTANT_ADDRESS_P which is usually machine-independent.
|
1624 |
|
|
|
1625 |
|
|
See legitimize_pic_address in i386.c for details as to what
|
1626 |
|
|
constitutes a legitimate address when -fpic is used. */
|
1627 |
|
|
|
1628 |
|
|
#define MAX_REGS_PER_ADDRESS 2
|
1629 |
|
|
|
1630 |
|
|
#define CONSTANT_ADDRESS_P(X) constant_address_p (X)
|
1631 |
|
|
|
1632 |
|
|
/* If defined, a C expression to determine the base term of address X.
|
1633 |
|
|
This macro is used in only one place: `find_base_term' in alias.c.
|
1634 |
|
|
|
1635 |
|
|
It is always safe for this macro to not be defined. It exists so
|
1636 |
|
|
that alias analysis can understand machine-dependent addresses.
|
1637 |
|
|
|
1638 |
|
|
The typical use of this macro is to handle addresses containing
|
1639 |
|
|
a label_ref or symbol_ref within an UNSPEC. */
|
1640 |
|
|
|
1641 |
|
|
#define FIND_BASE_TERM(X) ix86_find_base_term (X)
|
1642 |
|
|
|
1643 |
|
|
/* Nonzero if the constant value X is a legitimate general operand
|
1644 |
|
|
when generating PIC code. It is given that flag_pic is on and
|
1645 |
|
|
that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
|
1646 |
|
|
|
1647 |
|
|
#define LEGITIMATE_PIC_OPERAND_P(X) legitimate_pic_operand_p (X)
|
1648 |
|
|
|
1649 |
|
|
#define SYMBOLIC_CONST(X) \
|
1650 |
|
|
(GET_CODE (X) == SYMBOL_REF \
|
1651 |
|
|
|| GET_CODE (X) == LABEL_REF \
|
1652 |
|
|
|| (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X)))
|
1653 |
|
|
|
1654 |
|
|
/* Max number of args passed in registers. If this is more than 3, we will
|
1655 |
|
|
have problems with ebx (register #4), since it is a caller save register and
|
1656 |
|
|
is also used as the pic register in ELF. So for now, don't allow more than
|
1657 |
|
|
3 registers to be passed in registers. */
|
1658 |
|
|
|
1659 |
|
|
/* Abi specific values for REGPARM_MAX and SSE_REGPARM_MAX */
|
1660 |
|
|
#define X86_64_REGPARM_MAX 6
|
1661 |
|
|
#define X86_64_MS_REGPARM_MAX 4
|
1662 |
|
|
|
1663 |
|
|
#define X86_32_REGPARM_MAX 3
|
1664 |
|
|
|
1665 |
|
|
#define REGPARM_MAX \
|
1666 |
|
|
(TARGET_64BIT \
|
1667 |
|
|
? (TARGET_64BIT_MS_ABI \
|
1668 |
|
|
? X86_64_MS_REGPARM_MAX \
|
1669 |
|
|
: X86_64_REGPARM_MAX) \
|
1670 |
|
|
: X86_32_REGPARM_MAX)
|
1671 |
|
|
|
1672 |
|
|
#define X86_64_SSE_REGPARM_MAX 8
|
1673 |
|
|
#define X86_64_MS_SSE_REGPARM_MAX 4
|
1674 |
|
|
|
1675 |
|
|
#define X86_32_SSE_REGPARM_MAX (TARGET_SSE ? (TARGET_MACHO ? 4 : 3) : 0)
|
1676 |
|
|
|
1677 |
|
|
#define SSE_REGPARM_MAX \
|
1678 |
|
|
(TARGET_64BIT \
|
1679 |
|
|
? (TARGET_64BIT_MS_ABI \
|
1680 |
|
|
? X86_64_MS_SSE_REGPARM_MAX \
|
1681 |
|
|
: X86_64_SSE_REGPARM_MAX) \
|
1682 |
|
|
: X86_32_SSE_REGPARM_MAX)
|
1683 |
|
|
|
1684 |
|
|
#define MMX_REGPARM_MAX (TARGET_64BIT ? 0 : (TARGET_MMX ? 3 : 0))
|
1685 |
|
|
|
1686 |
|
|
/* Specify the machine mode that this machine uses
|
1687 |
|
|
for the index in the tablejump instruction. */
|
1688 |
|
|
#define CASE_VECTOR_MODE \
|
1689 |
|
|
(!TARGET_LP64 || (flag_pic && ix86_cmodel != CM_LARGE_PIC) ? SImode : DImode)
|
1690 |
|
|
|
1691 |
|
|
/* Define this as 1 if `char' should by default be signed; else as 0. */
|
1692 |
|
|
#define DEFAULT_SIGNED_CHAR 1
|
1693 |
|
|
|
1694 |
|
|
/* Max number of bytes we can move from memory to memory
|
1695 |
|
|
in one reasonably fast instruction. */
|
1696 |
|
|
#define MOVE_MAX 16
|
1697 |
|
|
|
1698 |
|
|
/* MOVE_MAX_PIECES is the number of bytes at a time which we can
|
1699 |
|
|
move efficiently, as opposed to MOVE_MAX which is the maximum
|
1700 |
|
|
number of bytes we can move with a single instruction. */
|
1701 |
|
|
#define MOVE_MAX_PIECES UNITS_PER_WORD
|
1702 |
|
|
|
1703 |
|
|
/* If a memory-to-memory move would take MOVE_RATIO or more simple
|
1704 |
|
|
move-instruction pairs, we will do a movmem or libcall instead.
|
1705 |
|
|
Increasing the value will always make code faster, but eventually
|
1706 |
|
|
incurs high cost in increased code size.
|
1707 |
|
|
|
1708 |
|
|
If you don't define this, a reasonable default is used. */
|
1709 |
|
|
|
1710 |
|
|
#define MOVE_RATIO(speed) ((speed) ? ix86_cost->move_ratio : 3)
|
1711 |
|
|
|
1712 |
|
|
/* If a clear memory operation would take CLEAR_RATIO or more simple
|
1713 |
|
|
move-instruction sequences, we will do a clrmem or libcall instead. */
|
1714 |
|
|
|
1715 |
|
|
#define CLEAR_RATIO(speed) ((speed) ? MIN (6, ix86_cost->move_ratio) : 2)
|
1716 |
|
|
|
1717 |
|
|
/* Define if shifts truncate the shift count which implies one can
|
1718 |
|
|
omit a sign-extension or zero-extension of a shift count.
|
1719 |
|
|
|
1720 |
|
|
On i386, shifts do truncate the count. But bit test instructions
|
1721 |
|
|
take the modulo of the bit offset operand. */
|
1722 |
|
|
|
1723 |
|
|
/* #define SHIFT_COUNT_TRUNCATED */
|
1724 |
|
|
|
1725 |
|
|
/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
|
1726 |
|
|
is done just by pretending it is already truncated. */
|
1727 |
|
|
#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
|
1728 |
|
|
|
1729 |
|
|
/* A macro to update M and UNSIGNEDP when an object whose type is
|
1730 |
|
|
TYPE and which has the specified mode and signedness is to be
|
1731 |
|
|
stored in a register. This macro is only called when TYPE is a
|
1732 |
|
|
scalar type.
|
1733 |
|
|
|
1734 |
|
|
On i386 it is sometimes useful to promote HImode and QImode
|
1735 |
|
|
quantities to SImode. The choice depends on target type. */
|
1736 |
|
|
|
1737 |
|
|
#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
|
1738 |
|
|
do { \
|
1739 |
|
|
if (((MODE) == HImode && TARGET_PROMOTE_HI_REGS) \
|
1740 |
|
|
|| ((MODE) == QImode && TARGET_PROMOTE_QI_REGS)) \
|
1741 |
|
|
(MODE) = SImode; \
|
1742 |
|
|
} while (0)
|
1743 |
|
|
|
1744 |
|
|
/* Specify the machine mode that pointers have.
|
1745 |
|
|
After generation of rtl, the compiler makes no further distinction
|
1746 |
|
|
between pointers and any other objects of this machine mode. */
|
1747 |
|
|
#define Pmode (TARGET_64BIT ? DImode : SImode)
|
1748 |
|
|
|
1749 |
|
|
/* A C expression whose value is zero if pointers that need to be extended
|
1750 |
|
|
from being `POINTER_SIZE' bits wide to `Pmode' are sign-extended and
|
1751 |
|
|
greater then zero if they are zero-extended and less then zero if the
|
1752 |
|
|
ptr_extend instruction should be used. */
|
1753 |
|
|
|
1754 |
|
|
#define POINTERS_EXTEND_UNSIGNED 1
|
1755 |
|
|
|
1756 |
|
|
/* A function address in a call instruction
|
1757 |
|
|
is a byte address (for indexing purposes)
|
1758 |
|
|
so give the MEM rtx a byte's mode. */
|
1759 |
|
|
#define FUNCTION_MODE QImode
|
1760 |
|
|
|
1761 |
|
|
|
1762 |
|
|
/* A C expression for the cost of a branch instruction. A value of 1
|
1763 |
|
|
is the default; other values are interpreted relative to that. */
|
1764 |
|
|
|
1765 |
|
|
#define BRANCH_COST(speed_p, predictable_p) \
|
1766 |
|
|
(!(speed_p) ? 2 : (predictable_p) ? 0 : ix86_branch_cost)
|
1767 |
|
|
|
1768 |
|
|
/* Define this macro as a C expression which is nonzero if accessing
|
1769 |
|
|
less than a word of memory (i.e. a `char' or a `short') is no
|
1770 |
|
|
faster than accessing a word of memory, i.e., if such access
|
1771 |
|
|
require more than one instruction or if there is no difference in
|
1772 |
|
|
cost between byte and (aligned) word loads.
|
1773 |
|
|
|
1774 |
|
|
When this macro is not defined, the compiler will access a field by
|
1775 |
|
|
finding the smallest containing object; when it is defined, a
|
1776 |
|
|
fullword load will be used if alignment permits. Unless bytes
|
1777 |
|
|
accesses are faster than word accesses, using word accesses is
|
1778 |
|
|
preferable since it may eliminate subsequent memory access if
|
1779 |
|
|
subsequent accesses occur to other fields in the same word of the
|
1780 |
|
|
structure, but to different bytes. */
|
1781 |
|
|
|
1782 |
|
|
#define SLOW_BYTE_ACCESS 0
|
1783 |
|
|
|
1784 |
|
|
/* Nonzero if access to memory by shorts is slow and undesirable. */
|
1785 |
|
|
#define SLOW_SHORT_ACCESS 0
|
1786 |
|
|
|
1787 |
|
|
/* Define this macro to be the value 1 if unaligned accesses have a
|
1788 |
|
|
cost many times greater than aligned accesses, for example if they
|
1789 |
|
|
are emulated in a trap handler.
|
1790 |
|
|
|
1791 |
|
|
When this macro is nonzero, the compiler will act as if
|
1792 |
|
|
`STRICT_ALIGNMENT' were nonzero when generating code for block
|
1793 |
|
|
moves. This can cause significantly more instructions to be
|
1794 |
|
|
produced. Therefore, do not set this macro nonzero if unaligned
|
1795 |
|
|
accesses only add a cycle or two to the time for a memory access.
|
1796 |
|
|
|
1797 |
|
|
If the value of this macro is always zero, it need not be defined. */
|
1798 |
|
|
|
1799 |
|
|
/* #define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) 0 */
|
1800 |
|
|
|
1801 |
|
|
/* Define this macro if it is as good or better to call a constant
|
1802 |
|
|
function address than to call an address kept in a register.
|
1803 |
|
|
|
1804 |
|
|
Desirable on the 386 because a CALL with a constant address is
|
1805 |
|
|
faster than one with a register address. */
|
1806 |
|
|
|
1807 |
|
|
#define NO_FUNCTION_CSE
|
1808 |
|
|
|
1809 |
|
|
/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
|
1810 |
|
|
return the mode to be used for the comparison.
|
1811 |
|
|
|
1812 |
|
|
For floating-point equality comparisons, CCFPEQmode should be used.
|
1813 |
|
|
VOIDmode should be used in all other cases.
|
1814 |
|
|
|
1815 |
|
|
For integer comparisons against zero, reduce to CCNOmode or CCZmode if
|
1816 |
|
|
possible, to allow for more combinations. */
|
1817 |
|
|
|
1818 |
|
|
#define SELECT_CC_MODE(OP, X, Y) ix86_cc_mode ((OP), (X), (Y))
|
1819 |
|
|
|
1820 |
|
|
/* Return nonzero if MODE implies a floating point inequality can be
|
1821 |
|
|
reversed. */
|
1822 |
|
|
|
1823 |
|
|
#define REVERSIBLE_CC_MODE(MODE) 1
|
1824 |
|
|
|
1825 |
|
|
/* A C expression whose value is reversed condition code of the CODE for
|
1826 |
|
|
comparison done in CC_MODE mode. */
|
1827 |
|
|
#define REVERSE_CONDITION(CODE, MODE) ix86_reverse_condition ((CODE), (MODE))
|
1828 |
|
|
|
1829 |
|
|
|
1830 |
|
|
/* Control the assembler format that we output, to the extent
|
1831 |
|
|
this does not vary between assemblers. */
|
1832 |
|
|
|
1833 |
|
|
/* How to refer to registers in assembler output.
|
1834 |
|
|
This sequence is indexed by compiler's hard-register-number (see above). */
|
1835 |
|
|
|
1836 |
|
|
/* In order to refer to the first 8 regs as 32-bit regs, prefix an "e".
|
1837 |
|
|
For non floating point regs, the following are the HImode names.
|
1838 |
|
|
|
1839 |
|
|
For float regs, the stack top is sometimes referred to as "%st(0)"
|
1840 |
|
|
instead of just "%st". TARGET_PRINT_OPERAND handles this with the
|
1841 |
|
|
"y" code. */
|
1842 |
|
|
|
1843 |
|
|
#define HI_REGISTER_NAMES \
|
1844 |
|
|
{"ax","dx","cx","bx","si","di","bp","sp", \
|
1845 |
|
|
"st","st(1)","st(2)","st(3)","st(4)","st(5)","st(6)","st(7)", \
|
1846 |
|
|
"argp", "flags", "fpsr", "fpcr", "frame", \
|
1847 |
|
|
"xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7", \
|
1848 |
|
|
"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", \
|
1849 |
|
|
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
|
1850 |
|
|
"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"}
|
1851 |
|
|
|
1852 |
|
|
#define REGISTER_NAMES HI_REGISTER_NAMES
|
1853 |
|
|
|
1854 |
|
|
/* Table of additional register names to use in user input. */
|
1855 |
|
|
|
1856 |
|
|
#define ADDITIONAL_REGISTER_NAMES \
|
1857 |
|
|
{ { "eax", 0 }, { "edx", 1 }, { "ecx", 2 }, { "ebx", 3 }, \
|
1858 |
|
|
{ "esi", 4 }, { "edi", 5 }, { "ebp", 6 }, { "esp", 7 }, \
|
1859 |
|
|
{ "rax", 0 }, { "rdx", 1 }, { "rcx", 2 }, { "rbx", 3 }, \
|
1860 |
|
|
{ "rsi", 4 }, { "rdi", 5 }, { "rbp", 6 }, { "rsp", 7 }, \
|
1861 |
|
|
{ "al", 0 }, { "dl", 1 }, { "cl", 2 }, { "bl", 3 }, \
|
1862 |
|
|
{ "ah", 0 }, { "dh", 1 }, { "ch", 2 }, { "bh", 3 } }
|
1863 |
|
|
|
1864 |
|
|
/* Note we are omitting these since currently I don't know how
|
1865 |
|
|
to get gcc to use these, since they want the same but different
|
1866 |
|
|
number as al, and ax.
|
1867 |
|
|
*/
|
1868 |
|
|
|
1869 |
|
|
#define QI_REGISTER_NAMES \
|
1870 |
|
|
{"al", "dl", "cl", "bl", "sil", "dil", "bpl", "spl",}
|
1871 |
|
|
|
1872 |
|
|
/* These parallel the array above, and can be used to access bits 8:15
|
1873 |
|
|
of regs 0 through 3. */
|
1874 |
|
|
|
1875 |
|
|
#define QI_HIGH_REGISTER_NAMES \
|
1876 |
|
|
{"ah", "dh", "ch", "bh", }
|
1877 |
|
|
|
1878 |
|
|
/* How to renumber registers for dbx and gdb. */
|
1879 |
|
|
|
1880 |
|
|
#define DBX_REGISTER_NUMBER(N) \
|
1881 |
|
|
(TARGET_64BIT ? dbx64_register_map[(N)] : dbx_register_map[(N)])
|
1882 |
|
|
|
1883 |
|
|
extern int const dbx_register_map[FIRST_PSEUDO_REGISTER];
|
1884 |
|
|
extern int const dbx64_register_map[FIRST_PSEUDO_REGISTER];
|
1885 |
|
|
extern int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER];
|
1886 |
|
|
|
1887 |
|
|
/* Before the prologue, RA is at 0(%esp). */
|
1888 |
|
|
#define INCOMING_RETURN_ADDR_RTX \
|
1889 |
|
|
gen_rtx_MEM (VOIDmode, gen_rtx_REG (VOIDmode, STACK_POINTER_REGNUM))
|
1890 |
|
|
|
1891 |
|
|
/* After the prologue, RA is at -4(AP) in the current frame. */
|
1892 |
|
|
#define RETURN_ADDR_RTX(COUNT, FRAME) \
|
1893 |
|
|
((COUNT) == 0 \
|
1894 |
|
|
? gen_rtx_MEM (Pmode, plus_constant (arg_pointer_rtx, -UNITS_PER_WORD)) \
|
1895 |
|
|
: gen_rtx_MEM (Pmode, plus_constant (FRAME, UNITS_PER_WORD)))
|
1896 |
|
|
|
1897 |
|
|
/* PC is dbx register 8; let's use that column for RA. */
|
1898 |
|
|
#define DWARF_FRAME_RETURN_COLUMN (TARGET_64BIT ? 16 : 8)
|
1899 |
|
|
|
1900 |
|
|
/* Before the prologue, the top of the frame is at 4(%esp). */
|
1901 |
|
|
#define INCOMING_FRAME_SP_OFFSET UNITS_PER_WORD
|
1902 |
|
|
|
1903 |
|
|
/* Describe how we implement __builtin_eh_return. */
|
1904 |
|
|
#define EH_RETURN_DATA_REGNO(N) ((N) <= DX_REG ? (N) : INVALID_REGNUM)
|
1905 |
|
|
#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, CX_REG)
|
1906 |
|
|
|
1907 |
|
|
|
1908 |
|
|
/* Select a format to encode pointers in exception handling data. CODE
|
1909 |
|
|
is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
|
1910 |
|
|
true if the symbol may be affected by dynamic relocations.
|
1911 |
|
|
|
1912 |
|
|
??? All x86 object file formats are capable of representing this.
|
1913 |
|
|
After all, the relocation needed is the same as for the call insn.
|
1914 |
|
|
Whether or not a particular assembler allows us to enter such, I
|
1915 |
|
|
guess we'll have to see. */
|
1916 |
|
|
#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
|
1917 |
|
|
asm_preferred_eh_data_format ((CODE), (GLOBAL))
|
1918 |
|
|
|
1919 |
|
|
/* This is how to output an insn to push a register on the stack.
|
1920 |
|
|
It need not be very fast code. */
|
1921 |
|
|
|
1922 |
|
|
#define ASM_OUTPUT_REG_PUSH(FILE, REGNO) \
|
1923 |
|
|
do { \
|
1924 |
|
|
if (TARGET_64BIT) \
|
1925 |
|
|
asm_fprintf ((FILE), "\tpush{q}\t%%r%s\n", \
|
1926 |
|
|
reg_names[(REGNO)] + (REX_INT_REGNO_P (REGNO) != 0)); \
|
1927 |
|
|
else \
|
1928 |
|
|
asm_fprintf ((FILE), "\tpush{l}\t%%e%s\n", reg_names[(REGNO)]); \
|
1929 |
|
|
} while (0)
|
1930 |
|
|
|
1931 |
|
|
/* This is how to output an insn to pop a register from the stack.
|
1932 |
|
|
It need not be very fast code. */
|
1933 |
|
|
|
1934 |
|
|
#define ASM_OUTPUT_REG_POP(FILE, REGNO) \
|
1935 |
|
|
do { \
|
1936 |
|
|
if (TARGET_64BIT) \
|
1937 |
|
|
asm_fprintf ((FILE), "\tpop{q}\t%%r%s\n", \
|
1938 |
|
|
reg_names[(REGNO)] + (REX_INT_REGNO_P (REGNO) != 0)); \
|
1939 |
|
|
else \
|
1940 |
|
|
asm_fprintf ((FILE), "\tpop{l}\t%%e%s\n", reg_names[(REGNO)]); \
|
1941 |
|
|
} while (0)
|
1942 |
|
|
|
1943 |
|
|
/* This is how to output an element of a case-vector that is absolute. */
|
1944 |
|
|
|
1945 |
|
|
#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
|
1946 |
|
|
ix86_output_addr_vec_elt ((FILE), (VALUE))
|
1947 |
|
|
|
1948 |
|
|
/* This is how to output an element of a case-vector that is relative. */
|
1949 |
|
|
|
1950 |
|
|
#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
|
1951 |
|
|
ix86_output_addr_diff_elt ((FILE), (VALUE), (REL))
|
1952 |
|
|
|
1953 |
|
|
/* When we see %v, we will print the 'v' prefix if TARGET_AVX is true. */
|
1954 |
|
|
|
1955 |
|
|
#define ASM_OUTPUT_AVX_PREFIX(STREAM, PTR) \
|
1956 |
|
|
{ \
|
1957 |
|
|
if ((PTR)[0] == '%' && (PTR)[1] == 'v') \
|
1958 |
|
|
(PTR) += TARGET_AVX ? 1 : 2; \
|
1959 |
|
|
}
|
1960 |
|
|
|
1961 |
|
|
/* A C statement or statements which output an assembler instruction
|
1962 |
|
|
opcode to the stdio stream STREAM. The macro-operand PTR is a
|
1963 |
|
|
variable of type `char *' which points to the opcode name in
|
1964 |
|
|
its "internal" form--the form that is written in the machine
|
1965 |
|
|
description. */
|
1966 |
|
|
|
1967 |
|
|
#define ASM_OUTPUT_OPCODE(STREAM, PTR) \
|
1968 |
|
|
ASM_OUTPUT_AVX_PREFIX ((STREAM), (PTR))
|
1969 |
|
|
|
1970 |
|
|
/* A C statement to output to the stdio stream FILE an assembler
|
1971 |
|
|
command to pad the location counter to a multiple of 1<<LOG
|
1972 |
|
|
bytes if it is within MAX_SKIP bytes. */
|
1973 |
|
|
|
1974 |
|
|
#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
|
1975 |
|
|
#undef ASM_OUTPUT_MAX_SKIP_PAD
|
1976 |
|
|
#define ASM_OUTPUT_MAX_SKIP_PAD(FILE, LOG, MAX_SKIP) \
|
1977 |
|
|
if ((LOG) != 0) \
|
1978 |
|
|
{ \
|
1979 |
|
|
if ((MAX_SKIP) == 0) \
|
1980 |
|
|
fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
|
1981 |
|
|
else \
|
1982 |
|
|
fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \
|
1983 |
|
|
}
|
1984 |
|
|
#endif
|
1985 |
|
|
|
1986 |
|
|
/* Write the extra assembler code needed to declare a function
|
1987 |
|
|
properly. */
|
1988 |
|
|
|
1989 |
|
|
#undef ASM_OUTPUT_FUNCTION_LABEL
|
1990 |
|
|
#define ASM_OUTPUT_FUNCTION_LABEL(FILE, NAME, DECL) \
|
1991 |
|
|
ix86_asm_output_function_label (FILE, NAME, DECL)
|
1992 |
|
|
|
1993 |
|
|
/* Under some conditions we need jump tables in the text section,
|
1994 |
|
|
because the assembler cannot handle label differences between
|
1995 |
|
|
sections. This is the case for x86_64 on Mach-O for example. */
|
1996 |
|
|
|
1997 |
|
|
#define JUMP_TABLES_IN_TEXT_SECTION \
|
1998 |
|
|
(flag_pic && ((TARGET_MACHO && TARGET_64BIT) \
|
1999 |
|
|
|| (!TARGET_64BIT && !HAVE_AS_GOTOFF_IN_DATA)))
|
2000 |
|
|
|
2001 |
|
|
/* Switch to init or fini section via SECTION_OP, emit a call to FUNC,
|
2002 |
|
|
and switch back. For x86 we do this only to save a few bytes that
|
2003 |
|
|
would otherwise be unused in the text section. */
|
2004 |
|
|
#define CRT_MKSTR2(VAL) #VAL
|
2005 |
|
|
#define CRT_MKSTR(x) CRT_MKSTR2(x)
|
2006 |
|
|
|
2007 |
|
|
#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
|
2008 |
|
|
asm (SECTION_OP "\n\t" \
|
2009 |
|
|
"call " CRT_MKSTR(__USER_LABEL_PREFIX__) #FUNC "\n" \
|
2010 |
|
|
TEXT_SECTION_ASM_OP);
|
2011 |
|
|
|
2012 |
|
|
/* Which processor to tune code generation for. */
|
2013 |
|
|
|
2014 |
|
|
enum processor_type
|
2015 |
|
|
{
|
2016 |
|
|
PROCESSOR_I386 = 0, /* 80386 */
|
2017 |
|
|
PROCESSOR_I486, /* 80486DX, 80486SX, 80486DX[24] */
|
2018 |
|
|
PROCESSOR_PENTIUM,
|
2019 |
|
|
PROCESSOR_PENTIUMPRO,
|
2020 |
|
|
PROCESSOR_GEODE,
|
2021 |
|
|
PROCESSOR_K6,
|
2022 |
|
|
PROCESSOR_ATHLON,
|
2023 |
|
|
PROCESSOR_PENTIUM4,
|
2024 |
|
|
PROCESSOR_K8,
|
2025 |
|
|
PROCESSOR_NOCONA,
|
2026 |
|
|
PROCESSOR_CORE2_32,
|
2027 |
|
|
PROCESSOR_CORE2_64,
|
2028 |
|
|
PROCESSOR_COREI7_32,
|
2029 |
|
|
PROCESSOR_COREI7_64,
|
2030 |
|
|
PROCESSOR_GENERIC32,
|
2031 |
|
|
PROCESSOR_GENERIC64,
|
2032 |
|
|
PROCESSOR_AMDFAM10,
|
2033 |
|
|
PROCESSOR_BDVER1,
|
2034 |
|
|
PROCESSOR_BDVER2,
|
2035 |
|
|
PROCESSOR_BTVER1,
|
2036 |
|
|
PROCESSOR_ATOM,
|
2037 |
|
|
PROCESSOR_max
|
2038 |
|
|
};
|
2039 |
|
|
|
2040 |
|
|
extern enum processor_type ix86_tune;
|
2041 |
|
|
extern enum processor_type ix86_arch;
|
2042 |
|
|
|
2043 |
|
|
/* Size of the RED_ZONE area. */
|
2044 |
|
|
#define RED_ZONE_SIZE 128
|
2045 |
|
|
/* Reserved area of the red zone for temporaries. */
|
2046 |
|
|
#define RED_ZONE_RESERVE 8
|
2047 |
|
|
|
2048 |
|
|
extern unsigned int ix86_preferred_stack_boundary;
|
2049 |
|
|
extern unsigned int ix86_incoming_stack_boundary;
|
2050 |
|
|
|
2051 |
|
|
/* Smallest class containing REGNO. */
|
2052 |
|
|
extern enum reg_class const regclass_map[FIRST_PSEUDO_REGISTER];
|
2053 |
|
|
|
2054 |
|
|
enum ix86_fpcmp_strategy {
|
2055 |
|
|
IX86_FPCMP_SAHF,
|
2056 |
|
|
IX86_FPCMP_COMI,
|
2057 |
|
|
IX86_FPCMP_ARITH
|
2058 |
|
|
};
|
2059 |
|
|
|
2060 |
|
|
/* To properly truncate FP values into integers, we need to set i387 control
|
2061 |
|
|
word. We can't emit proper mode switching code before reload, as spills
|
2062 |
|
|
generated by reload may truncate values incorrectly, but we still can avoid
|
2063 |
|
|
redundant computation of new control word by the mode switching pass.
|
2064 |
|
|
The fldcw instructions are still emitted redundantly, but this is probably
|
2065 |
|
|
not going to be noticeable problem, as most CPUs do have fast path for
|
2066 |
|
|
the sequence.
|
2067 |
|
|
|
2068 |
|
|
The machinery is to emit simple truncation instructions and split them
|
2069 |
|
|
before reload to instructions having USEs of two memory locations that
|
2070 |
|
|
are filled by this code to old and new control word.
|
2071 |
|
|
|
2072 |
|
|
Post-reload pass may be later used to eliminate the redundant fildcw if
|
2073 |
|
|
needed. */
|
2074 |
|
|
|
2075 |
|
|
enum ix86_entity
|
2076 |
|
|
{
|
2077 |
|
|
I387_TRUNC = 0,
|
2078 |
|
|
I387_FLOOR,
|
2079 |
|
|
I387_CEIL,
|
2080 |
|
|
I387_MASK_PM,
|
2081 |
|
|
MAX_386_ENTITIES
|
2082 |
|
|
};
|
2083 |
|
|
|
2084 |
|
|
enum ix86_stack_slot
|
2085 |
|
|
{
|
2086 |
|
|
SLOT_VIRTUAL = 0,
|
2087 |
|
|
SLOT_TEMP,
|
2088 |
|
|
SLOT_CW_STORED,
|
2089 |
|
|
SLOT_CW_TRUNC,
|
2090 |
|
|
SLOT_CW_FLOOR,
|
2091 |
|
|
SLOT_CW_CEIL,
|
2092 |
|
|
SLOT_CW_MASK_PM,
|
2093 |
|
|
MAX_386_STACK_LOCALS
|
2094 |
|
|
};
|
2095 |
|
|
|
2096 |
|
|
/* Define this macro if the port needs extra instructions inserted
|
2097 |
|
|
for mode switching in an optimizing compilation. */
|
2098 |
|
|
|
2099 |
|
|
#define OPTIMIZE_MODE_SWITCHING(ENTITY) \
|
2100 |
|
|
ix86_optimize_mode_switching[(ENTITY)]
|
2101 |
|
|
|
2102 |
|
|
/* If you define `OPTIMIZE_MODE_SWITCHING', you have to define this as
|
2103 |
|
|
initializer for an array of integers. Each initializer element N
|
2104 |
|
|
refers to an entity that needs mode switching, and specifies the
|
2105 |
|
|
number of different modes that might need to be set for this
|
2106 |
|
|
entity. The position of the initializer in the initializer -
|
2107 |
|
|
starting counting at zero - determines the integer that is used to
|
2108 |
|
|
refer to the mode-switched entity in question. */
|
2109 |
|
|
|
2110 |
|
|
#define NUM_MODES_FOR_MODE_SWITCHING \
|
2111 |
|
|
{ I387_CW_ANY, I387_CW_ANY, I387_CW_ANY, I387_CW_ANY }
|
2112 |
|
|
|
2113 |
|
|
/* ENTITY is an integer specifying a mode-switched entity. If
|
2114 |
|
|
`OPTIMIZE_MODE_SWITCHING' is defined, you must define this macro to
|
2115 |
|
|
return an integer value not larger than the corresponding element
|
2116 |
|
|
in `NUM_MODES_FOR_MODE_SWITCHING', to denote the mode that ENTITY
|
2117 |
|
|
must be switched into prior to the execution of INSN. */
|
2118 |
|
|
|
2119 |
|
|
#define MODE_NEEDED(ENTITY, I) ix86_mode_needed ((ENTITY), (I))
|
2120 |
|
|
|
2121 |
|
|
/* This macro specifies the order in which modes for ENTITY are
|
2122 |
|
|
processed. 0 is the highest priority. */
|
2123 |
|
|
|
2124 |
|
|
#define MODE_PRIORITY_TO_MODE(ENTITY, N) (N)
|
2125 |
|
|
|
2126 |
|
|
/* Generate one or more insns to set ENTITY to MODE. HARD_REG_LIVE
|
2127 |
|
|
is the set of hard registers live at the point where the insn(s)
|
2128 |
|
|
are to be inserted. */
|
2129 |
|
|
|
2130 |
|
|
#define EMIT_MODE_SET(ENTITY, MODE, HARD_REGS_LIVE) \
|
2131 |
|
|
((MODE) != I387_CW_ANY && (MODE) != I387_CW_UNINITIALIZED \
|
2132 |
|
|
? emit_i387_cw_initialization (MODE), 0 \
|
2133 |
|
|
: 0)
|
2134 |
|
|
|
2135 |
|
|
|
2136 |
|
|
/* Avoid renaming of stack registers, as doing so in combination with
|
2137 |
|
|
scheduling just increases amount of live registers at time and in
|
2138 |
|
|
the turn amount of fxch instructions needed.
|
2139 |
|
|
|
2140 |
|
|
??? Maybe Pentium chips benefits from renaming, someone can try.... */
|
2141 |
|
|
|
2142 |
|
|
#define HARD_REGNO_RENAME_OK(SRC, TARGET) \
|
2143 |
|
|
(! IN_RANGE ((SRC), FIRST_STACK_REG, LAST_STACK_REG))
|
2144 |
|
|
|
2145 |
|
|
|
2146 |
|
|
#define FASTCALL_PREFIX '@'
|
2147 |
|
|
|
2148 |
|
|
/* Machine specific frame tracking during prologue/epilogue generation. */
|
2149 |
|
|
|
2150 |
|
|
#ifndef USED_FOR_TARGET
|
2151 |
|
|
struct GTY(()) machine_frame_state
|
2152 |
|
|
{
|
2153 |
|
|
/* This pair tracks the currently active CFA as reg+offset. When reg
|
2154 |
|
|
is drap_reg, we don't bother trying to record here the real CFA when
|
2155 |
|
|
it might really be a DW_CFA_def_cfa_expression. */
|
2156 |
|
|
rtx cfa_reg;
|
2157 |
|
|
HOST_WIDE_INT cfa_offset;
|
2158 |
|
|
|
2159 |
|
|
/* The current offset (canonically from the CFA) of ESP and EBP.
|
2160 |
|
|
When stack frame re-alignment is active, these may not be relative
|
2161 |
|
|
to the CFA. However, in all cases they are relative to the offsets
|
2162 |
|
|
of the saved registers stored in ix86_frame. */
|
2163 |
|
|
HOST_WIDE_INT sp_offset;
|
2164 |
|
|
HOST_WIDE_INT fp_offset;
|
2165 |
|
|
|
2166 |
|
|
/* The size of the red-zone that may be assumed for the purposes of
|
2167 |
|
|
eliding register restore notes in the epilogue. This may be zero
|
2168 |
|
|
if no red-zone is in effect, or may be reduced from the real
|
2169 |
|
|
red-zone value by a maximum runtime stack re-alignment value. */
|
2170 |
|
|
int red_zone_offset;
|
2171 |
|
|
|
2172 |
|
|
/* Indicate whether each of ESP, EBP or DRAP currently holds a valid
|
2173 |
|
|
value within the frame. If false then the offset above should be
|
2174 |
|
|
ignored. Note that DRAP, if valid, *always* points to the CFA and
|
2175 |
|
|
thus has an offset of zero. */
|
2176 |
|
|
BOOL_BITFIELD sp_valid : 1;
|
2177 |
|
|
BOOL_BITFIELD fp_valid : 1;
|
2178 |
|
|
BOOL_BITFIELD drap_valid : 1;
|
2179 |
|
|
|
2180 |
|
|
/* Indicate whether the local stack frame has been re-aligned. When
|
2181 |
|
|
set, the SP/FP offsets above are relative to the aligned frame
|
2182 |
|
|
and not the CFA. */
|
2183 |
|
|
BOOL_BITFIELD realigned : 1;
|
2184 |
|
|
};
|
2185 |
|
|
|
2186 |
|
|
/* Private to winnt.c. */
|
2187 |
|
|
struct seh_frame_state;
|
2188 |
|
|
|
2189 |
|
|
struct GTY(()) machine_function {
|
2190 |
|
|
struct stack_local_entry *stack_locals;
|
2191 |
|
|
const char *some_ld_name;
|
2192 |
|
|
int varargs_gpr_size;
|
2193 |
|
|
int varargs_fpr_size;
|
2194 |
|
|
int optimize_mode_switching[MAX_386_ENTITIES];
|
2195 |
|
|
|
2196 |
|
|
/* Number of saved registers USE_FAST_PROLOGUE_EPILOGUE
|
2197 |
|
|
has been computed for. */
|
2198 |
|
|
int use_fast_prologue_epilogue_nregs;
|
2199 |
|
|
|
2200 |
|
|
/* For -fsplit-stack support: A stack local which holds a pointer to
|
2201 |
|
|
the stack arguments for a function with a variable number of
|
2202 |
|
|
arguments. This is set at the start of the function and is used
|
2203 |
|
|
to initialize the overflow_arg_area field of the va_list
|
2204 |
|
|
structure. */
|
2205 |
|
|
rtx split_stack_varargs_pointer;
|
2206 |
|
|
|
2207 |
|
|
/* This value is used for amd64 targets and specifies the current abi
|
2208 |
|
|
to be used. MS_ABI means ms abi. Otherwise SYSV_ABI means sysv abi. */
|
2209 |
|
|
ENUM_BITFIELD(calling_abi) call_abi : 8;
|
2210 |
|
|
|
2211 |
|
|
/* Nonzero if the function accesses a previous frame. */
|
2212 |
|
|
BOOL_BITFIELD accesses_prev_frame : 1;
|
2213 |
|
|
|
2214 |
|
|
/* Nonzero if the function requires a CLD in the prologue. */
|
2215 |
|
|
BOOL_BITFIELD needs_cld : 1;
|
2216 |
|
|
|
2217 |
|
|
/* Set by ix86_compute_frame_layout and used by prologue/epilogue
|
2218 |
|
|
expander to determine the style used. */
|
2219 |
|
|
BOOL_BITFIELD use_fast_prologue_epilogue : 1;
|
2220 |
|
|
|
2221 |
|
|
/* If true, the current function needs the default PIC register, not
|
2222 |
|
|
an alternate register (on x86) and must not use the red zone (on
|
2223 |
|
|
x86_64), even if it's a leaf function. We don't want the
|
2224 |
|
|
function to be regarded as non-leaf because TLS calls need not
|
2225 |
|
|
affect register allocation. This flag is set when a TLS call
|
2226 |
|
|
instruction is expanded within a function, and never reset, even
|
2227 |
|
|
if all such instructions are optimized away. Use the
|
2228 |
|
|
ix86_current_function_calls_tls_descriptor macro for a better
|
2229 |
|
|
approximation. */
|
2230 |
|
|
BOOL_BITFIELD tls_descriptor_call_expanded_p : 1;
|
2231 |
|
|
|
2232 |
|
|
/* If true, the current function has a STATIC_CHAIN is placed on the
|
2233 |
|
|
stack below the return address. */
|
2234 |
|
|
BOOL_BITFIELD static_chain_on_stack : 1;
|
2235 |
|
|
|
2236 |
|
|
/* Nonzero if caller passes 256bit AVX modes. */
|
2237 |
|
|
BOOL_BITFIELD caller_pass_avx256_p : 1;
|
2238 |
|
|
|
2239 |
|
|
/* Nonzero if caller returns 256bit AVX modes. */
|
2240 |
|
|
BOOL_BITFIELD caller_return_avx256_p : 1;
|
2241 |
|
|
|
2242 |
|
|
/* Nonzero if the current callee passes 256bit AVX modes. */
|
2243 |
|
|
BOOL_BITFIELD callee_pass_avx256_p : 1;
|
2244 |
|
|
|
2245 |
|
|
/* Nonzero if the current callee returns 256bit AVX modes. */
|
2246 |
|
|
BOOL_BITFIELD callee_return_avx256_p : 1;
|
2247 |
|
|
|
2248 |
|
|
/* Nonzero if rescan vzerouppers in the current function is needed. */
|
2249 |
|
|
BOOL_BITFIELD rescan_vzeroupper_p : 1;
|
2250 |
|
|
|
2251 |
|
|
/* During prologue/epilogue generation, the current frame state.
|
2252 |
|
|
Otherwise, the frame state at the end of the prologue. */
|
2253 |
|
|
struct machine_frame_state fs;
|
2254 |
|
|
|
2255 |
|
|
/* During SEH output, this is non-null. */
|
2256 |
|
|
struct seh_frame_state * GTY((skip(""))) seh;
|
2257 |
|
|
};
|
2258 |
|
|
#endif
|
2259 |
|
|
|
2260 |
|
|
#define ix86_stack_locals (cfun->machine->stack_locals)
|
2261 |
|
|
#define ix86_varargs_gpr_size (cfun->machine->varargs_gpr_size)
|
2262 |
|
|
#define ix86_varargs_fpr_size (cfun->machine->varargs_fpr_size)
|
2263 |
|
|
#define ix86_optimize_mode_switching (cfun->machine->optimize_mode_switching)
|
2264 |
|
|
#define ix86_current_function_needs_cld (cfun->machine->needs_cld)
|
2265 |
|
|
#define ix86_tls_descriptor_calls_expanded_in_cfun \
|
2266 |
|
|
(cfun->machine->tls_descriptor_call_expanded_p)
|
2267 |
|
|
/* Since tls_descriptor_call_expanded is not cleared, even if all TLS
|
2268 |
|
|
calls are optimized away, we try to detect cases in which it was
|
2269 |
|
|
optimized away. Since such instructions (use (reg REG_SP)), we can
|
2270 |
|
|
verify whether there's any such instruction live by testing that
|
2271 |
|
|
REG_SP is live. */
|
2272 |
|
|
#define ix86_current_function_calls_tls_descriptor \
|
2273 |
|
|
(ix86_tls_descriptor_calls_expanded_in_cfun && df_regs_ever_live_p (SP_REG))
|
2274 |
|
|
#define ix86_static_chain_on_stack (cfun->machine->static_chain_on_stack)
|
2275 |
|
|
|
2276 |
|
|
/* Control behavior of x86_file_start. */
|
2277 |
|
|
#define X86_FILE_START_VERSION_DIRECTIVE false
|
2278 |
|
|
#define X86_FILE_START_FLTUSED false
|
2279 |
|
|
|
2280 |
|
|
/* Flag to mark data that is in the large address area. */
|
2281 |
|
|
#define SYMBOL_FLAG_FAR_ADDR (SYMBOL_FLAG_MACH_DEP << 0)
|
2282 |
|
|
#define SYMBOL_REF_FAR_ADDR_P(X) \
|
2283 |
|
|
((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_FAR_ADDR) != 0)
|
2284 |
|
|
|
2285 |
|
|
/* Flags to mark dllimport/dllexport. Used by PE ports, but handy to
|
2286 |
|
|
have defined always, to avoid ifdefing. */
|
2287 |
|
|
#define SYMBOL_FLAG_DLLIMPORT (SYMBOL_FLAG_MACH_DEP << 1)
|
2288 |
|
|
#define SYMBOL_REF_DLLIMPORT_P(X) \
|
2289 |
|
|
((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_DLLIMPORT) != 0)
|
2290 |
|
|
|
2291 |
|
|
#define SYMBOL_FLAG_DLLEXPORT (SYMBOL_FLAG_MACH_DEP << 2)
|
2292 |
|
|
#define SYMBOL_REF_DLLEXPORT_P(X) \
|
2293 |
|
|
((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_DLLEXPORT) != 0)
|
2294 |
|
|
|
2295 |
|
|
extern void debug_ready_dispatch (void);
|
2296 |
|
|
extern void debug_dispatch_window (int);
|
2297 |
|
|
|
2298 |
|
|
/* The value at zero is only defined for the BMI instructions
|
2299 |
|
|
LZCNT and TZCNT, not the BSR/BSF insns in the original isa. */
|
2300 |
|
|
#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
|
2301 |
|
|
((VALUE) = GET_MODE_BITSIZE (MODE), TARGET_BMI)
|
2302 |
|
|
#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
|
2303 |
|
|
((VALUE) = GET_MODE_BITSIZE (MODE), TARGET_LZCNT)
|
2304 |
|
|
|
2305 |
|
|
|
2306 |
|
|
/* Flags returned by ix86_get_callcvt (). */
|
2307 |
|
|
#define IX86_CALLCVT_CDECL 0x1
|
2308 |
|
|
#define IX86_CALLCVT_STDCALL 0x2
|
2309 |
|
|
#define IX86_CALLCVT_FASTCALL 0x4
|
2310 |
|
|
#define IX86_CALLCVT_THISCALL 0x8
|
2311 |
|
|
#define IX86_CALLCVT_REGPARM 0x10
|
2312 |
|
|
#define IX86_CALLCVT_SSEREGPARM 0x20
|
2313 |
|
|
|
2314 |
|
|
#define IX86_BASE_CALLCVT(FLAGS) \
|
2315 |
|
|
((FLAGS) & (IX86_CALLCVT_CDECL | IX86_CALLCVT_STDCALL \
|
2316 |
|
|
| IX86_CALLCVT_FASTCALL | IX86_CALLCVT_THISCALL))
|
2317 |
|
|
|
2318 |
|
|
#define RECIP_MASK_NONE 0x00
|
2319 |
|
|
#define RECIP_MASK_DIV 0x01
|
2320 |
|
|
#define RECIP_MASK_SQRT 0x02
|
2321 |
|
|
#define RECIP_MASK_VEC_DIV 0x04
|
2322 |
|
|
#define RECIP_MASK_VEC_SQRT 0x08
|
2323 |
|
|
#define RECIP_MASK_ALL (RECIP_MASK_DIV | RECIP_MASK_SQRT \
|
2324 |
|
|
| RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_SQRT)
|
2325 |
|
|
#define RECIP_MASK_DEFAULT (RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_SQRT)
|
2326 |
|
|
|
2327 |
|
|
#define TARGET_RECIP_DIV ((recip_mask & RECIP_MASK_DIV) != 0)
|
2328 |
|
|
#define TARGET_RECIP_SQRT ((recip_mask & RECIP_MASK_SQRT) != 0)
|
2329 |
|
|
#define TARGET_RECIP_VEC_DIV ((recip_mask & RECIP_MASK_VEC_DIV) != 0)
|
2330 |
|
|
#define TARGET_RECIP_VEC_SQRT ((recip_mask & RECIP_MASK_VEC_SQRT) != 0)
|
2331 |
|
|
|
2332 |
|
|
/*
|
2333 |
|
|
Local variables:
|
2334 |
|
|
version-control: t
|
2335 |
|
|
End:
|
2336 |
|
|
*/
|