OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [tags/] [gnu-src/] [gcc-4.5.1/] [gcc-4.5.1-or32-1.0rc2/] [gcc/] [config/] [arm/] [arm.c] - Blame information for rev 384

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 282 jeremybenn
/* Output routines for GCC for ARM.
2
   Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3
   2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4
   Free Software Foundation, Inc.
5
   Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
6
   and Martin Simmons (@harleqn.co.uk).
7
   More major hacks by Richard Earnshaw (rearnsha@arm.com).
8
 
9
   This file is part of GCC.
10
 
11
   GCC is free software; you can redistribute it and/or modify it
12
   under the terms of the GNU General Public License as published
13
   by the Free Software Foundation; either version 3, or (at your
14
   option) any later version.
15
 
16
   GCC is distributed in the hope that it will be useful, but WITHOUT
17
   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
18
   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
19
   License for more details.
20
 
21
   You should have received a copy of the GNU General Public License
22
   along with GCC; see the file COPYING3.  If not see
23
   <http://www.gnu.org/licenses/>.  */
24
 
25
#include "config.h"
26
#include "system.h"
27
#include "coretypes.h"
28
#include "tm.h"
29
#include "rtl.h"
30
#include "tree.h"
31
#include "obstack.h"
32
#include "regs.h"
33
#include "hard-reg-set.h"
34
#include "real.h"
35
#include "insn-config.h"
36
#include "conditions.h"
37
#include "output.h"
38
#include "insn-attr.h"
39
#include "flags.h"
40
#include "reload.h"
41
#include "function.h"
42
#include "expr.h"
43
#include "optabs.h"
44
#include "toplev.h"
45
#include "recog.h"
46
#include "cgraph.h"
47
#include "ggc.h"
48
#include "except.h"
49
#include "c-pragma.h"
50
#include "integrate.h"
51
#include "tm_p.h"
52
#include "target.h"
53
#include "target-def.h"
54
#include "debug.h"
55
#include "langhooks.h"
56
#include "df.h"
57
#include "intl.h"
58
#include "libfuncs.h"
59
 
60
/* Forward definitions of types.  */
61
typedef struct minipool_node    Mnode;
62
typedef struct minipool_fixup   Mfix;
63
 
64
void (*arm_lang_output_object_attributes_hook)(void);
65
 
66
/* Forward function declarations.  */
67
static int arm_compute_static_chain_stack_bytes (void);
68
static arm_stack_offsets *arm_get_frame_offsets (void);
69
static void arm_add_gc_roots (void);
70
static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
71
                             HOST_WIDE_INT, rtx, rtx, int, int);
72
static unsigned bit_count (unsigned long);
73
static int arm_address_register_rtx_p (rtx, int);
74
static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
75
static int thumb2_legitimate_index_p (enum machine_mode, rtx, int);
76
static int thumb1_base_register_rtx_p (rtx, enum machine_mode, int);
77
static rtx arm_legitimize_address (rtx, rtx, enum machine_mode);
78
static rtx thumb_legitimize_address (rtx, rtx, enum machine_mode);
79
inline static int thumb1_index_register_rtx_p (rtx, int);
80
static bool arm_legitimate_address_p (enum machine_mode, rtx, bool);
81
static int thumb_far_jump_used_p (void);
82
static bool thumb_force_lr_save (void);
83
static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
84
static rtx emit_sfm (int, int);
85
static unsigned arm_size_return_regs (void);
86
static bool arm_assemble_integer (rtx, unsigned int, int);
87
static const char *fp_const_from_val (REAL_VALUE_TYPE *);
88
static arm_cc get_arm_condition_code (rtx);
89
static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
90
static rtx is_jump_table (rtx);
91
static const char *output_multi_immediate (rtx *, const char *, const char *,
92
                                           int, HOST_WIDE_INT);
93
static const char *shift_op (rtx, HOST_WIDE_INT *);
94
static struct machine_function *arm_init_machine_status (void);
95
static void thumb_exit (FILE *, int);
96
static rtx is_jump_table (rtx);
97
static HOST_WIDE_INT get_jump_table_size (rtx);
98
static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
99
static Mnode *add_minipool_forward_ref (Mfix *);
100
static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
101
static Mnode *add_minipool_backward_ref (Mfix *);
102
static void assign_minipool_offsets (Mfix *);
103
static void arm_print_value (FILE *, rtx);
104
static void dump_minipool (rtx);
105
static int arm_barrier_cost (rtx);
106
static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
107
static void push_minipool_barrier (rtx, HOST_WIDE_INT);
108
static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
109
                               rtx);
110
static void arm_reorg (void);
111
static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
112
static unsigned long arm_compute_save_reg0_reg12_mask (void);
113
static unsigned long arm_compute_save_reg_mask (void);
114
static unsigned long arm_isr_value (tree);
115
static unsigned long arm_compute_func_type (void);
116
static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
117
static tree arm_handle_pcs_attribute (tree *, tree, tree, int, bool *);
118
static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
119
#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
120
static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
121
#endif
122
static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
123
static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
124
static void thumb1_output_function_prologue (FILE *, HOST_WIDE_INT);
125
static int arm_comp_type_attributes (const_tree, const_tree);
126
static void arm_set_default_type_attributes (tree);
127
static int arm_adjust_cost (rtx, rtx, rtx, int);
128
static int count_insns_for_constant (HOST_WIDE_INT, int);
129
static int arm_get_strip_length (int);
130
static bool arm_function_ok_for_sibcall (tree, tree);
131
static enum machine_mode arm_promote_function_mode (const_tree,
132
                                                    enum machine_mode, int *,
133
                                                    const_tree, int);
134
static bool arm_return_in_memory (const_tree, const_tree);
135
static rtx arm_function_value (const_tree, const_tree, bool);
136
static rtx arm_libcall_value (enum machine_mode, const_rtx);
137
 
138
static void arm_internal_label (FILE *, const char *, unsigned long);
139
static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
140
                                 tree);
141
static bool arm_have_conditional_execution (void);
142
static bool arm_rtx_costs_1 (rtx, enum rtx_code, int*, bool);
143
static bool arm_size_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *);
144
static bool arm_slowmul_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *, bool);
145
static bool arm_fastmul_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *, bool);
146
static bool arm_xscale_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *, bool);
147
static bool arm_9e_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *, bool);
148
static bool arm_rtx_costs (rtx, int, int, int *, bool);
149
static int arm_address_cost (rtx, bool);
150
static bool arm_memory_load_p (rtx);
151
static bool arm_cirrus_insn_p (rtx);
152
static void cirrus_reorg (rtx);
153
static void arm_init_builtins (void);
154
static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
155
static void arm_init_iwmmxt_builtins (void);
156
static rtx safe_vector_operand (rtx, enum machine_mode);
157
static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
158
static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
159
static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
160
static void emit_constant_insn (rtx cond, rtx pattern);
161
static rtx emit_set_insn (rtx, rtx);
162
static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
163
                                  tree, bool);
164
static rtx aapcs_allocate_return_reg (enum machine_mode, const_tree,
165
                                      const_tree);
166
static int aapcs_select_return_coproc (const_tree, const_tree);
167
 
168
#ifdef OBJECT_FORMAT_ELF
169
static void arm_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED;
170
static void arm_elf_asm_destructor (rtx, int) ATTRIBUTE_UNUSED;
171
#endif
172
#ifndef ARM_PE
173
static void arm_encode_section_info (tree, rtx, int);
174
#endif
175
 
176
static void arm_file_end (void);
177
static void arm_file_start (void);
178
 
179
static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
180
                                        tree, int *, int);
181
static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
182
                                   enum machine_mode, const_tree, bool);
183
static bool arm_promote_prototypes (const_tree);
184
static bool arm_default_short_enums (void);
185
static bool arm_align_anon_bitfield (void);
186
static bool arm_return_in_msb (const_tree);
187
static bool arm_must_pass_in_stack (enum machine_mode, const_tree);
188
static bool arm_return_in_memory (const_tree, const_tree);
189
#ifdef TARGET_UNWIND_INFO
190
static void arm_unwind_emit (FILE *, rtx);
191
static bool arm_output_ttype (rtx);
192
#endif
193
static void arm_dwarf_handle_frame_unspec (const char *, rtx, int);
194
static rtx arm_dwarf_register_span (rtx);
195
 
196
static tree arm_cxx_guard_type (void);
197
static bool arm_cxx_guard_mask_bit (void);
198
static tree arm_get_cookie_size (tree);
199
static bool arm_cookie_has_size (void);
200
static bool arm_cxx_cdtor_returns_this (void);
201
static bool arm_cxx_key_method_may_be_inline (void);
202
static void arm_cxx_determine_class_data_visibility (tree);
203
static bool arm_cxx_class_data_always_comdat (void);
204
static bool arm_cxx_use_aeabi_atexit (void);
205
static void arm_init_libfuncs (void);
206
static tree arm_build_builtin_va_list (void);
207
static void arm_expand_builtin_va_start (tree, rtx);
208
static tree arm_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
209
static bool arm_handle_option (size_t, const char *, int);
210
static void arm_target_help (void);
211
static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
212
static bool arm_cannot_copy_insn_p (rtx);
213
static bool arm_tls_symbol_p (rtx x);
214
static int arm_issue_rate (void);
215
static void arm_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
216
static bool arm_allocate_stack_slots_for_args (void);
217
static const char *arm_invalid_parameter_type (const_tree t);
218
static const char *arm_invalid_return_type (const_tree t);
219
static tree arm_promoted_type (const_tree t);
220
static tree arm_convert_to_type (tree type, tree expr);
221
static bool arm_scalar_mode_supported_p (enum machine_mode);
222
static bool arm_frame_pointer_required (void);
223
static bool arm_can_eliminate (const int, const int);
224
static void arm_asm_trampoline_template (FILE *);
225
static void arm_trampoline_init (rtx, tree, rtx);
226
static rtx arm_trampoline_adjust_address (rtx);
227
 
228
 
229
/* Table of machine attributes.  */
230
static const struct attribute_spec arm_attribute_table[] =
231
{
232
  /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
233
  /* Function calls made to this symbol must be done indirectly, because
234
     it may lie outside of the 26 bit addressing range of a normal function
235
     call.  */
236
  { "long_call",    0, 0, false, true,  true,  NULL },
237
  /* Whereas these functions are always known to reside within the 26 bit
238
     addressing range.  */
239
  { "short_call",   0, 0, false, true,  true,  NULL },
240
  /* Specify the procedure call conventions for a function.  */
241
  { "pcs",          1, 1, false, true,  true,  arm_handle_pcs_attribute },
242
  /* Interrupt Service Routines have special prologue and epilogue requirements.  */
243
  { "isr",          0, 1, false, false, false, arm_handle_isr_attribute },
244
  { "interrupt",    0, 1, false, false, false, arm_handle_isr_attribute },
245
  { "naked",        0, 0, true,  false, false, arm_handle_fndecl_attribute },
246
#ifdef ARM_PE
247
  /* ARM/PE has three new attributes:
248
     interfacearm - ?
249
     dllexport - for exporting a function/variable that will live in a dll
250
     dllimport - for importing a function/variable from a dll
251
 
252
     Microsoft allows multiple declspecs in one __declspec, separating
253
     them with spaces.  We do NOT support this.  Instead, use __declspec
254
     multiple times.
255
  */
256
  { "dllimport",    0, 0, true,  false, false, NULL },
257
  { "dllexport",    0, 0, true,  false, false, NULL },
258
  { "interfacearm", 0, 0, true,  false, false, arm_handle_fndecl_attribute },
259
#elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
260
  { "dllimport",    0, 0, false, false, false, handle_dll_attribute },
261
  { "dllexport",    0, 0, false, false, false, handle_dll_attribute },
262
  { "notshared",    0, 0, false, true, false, arm_handle_notshared_attribute },
263
#endif
264
  { NULL,           0, 0, false, false, false, NULL }
265
};
266
 
267
/* Initialize the GCC target structure.  */
268
#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
269
#undef  TARGET_MERGE_DECL_ATTRIBUTES
270
#define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
271
#endif
272
 
273
#undef TARGET_LEGITIMIZE_ADDRESS
274
#define TARGET_LEGITIMIZE_ADDRESS arm_legitimize_address
275
 
276
#undef  TARGET_ATTRIBUTE_TABLE
277
#define TARGET_ATTRIBUTE_TABLE arm_attribute_table
278
 
279
#undef TARGET_ASM_FILE_START
280
#define TARGET_ASM_FILE_START arm_file_start
281
#undef TARGET_ASM_FILE_END
282
#define TARGET_ASM_FILE_END arm_file_end
283
 
284
#undef  TARGET_ASM_ALIGNED_SI_OP
285
#define TARGET_ASM_ALIGNED_SI_OP NULL
286
#undef  TARGET_ASM_INTEGER
287
#define TARGET_ASM_INTEGER arm_assemble_integer
288
 
289
#undef  TARGET_ASM_FUNCTION_PROLOGUE
290
#define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
291
 
292
#undef  TARGET_ASM_FUNCTION_EPILOGUE
293
#define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
294
 
295
#undef  TARGET_DEFAULT_TARGET_FLAGS
296
#define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
297
#undef  TARGET_HANDLE_OPTION
298
#define TARGET_HANDLE_OPTION arm_handle_option
299
#undef  TARGET_HELP
300
#define TARGET_HELP arm_target_help
301
 
302
#undef  TARGET_COMP_TYPE_ATTRIBUTES
303
#define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
304
 
305
#undef  TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
306
#define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
307
 
308
#undef  TARGET_SCHED_ADJUST_COST
309
#define TARGET_SCHED_ADJUST_COST arm_adjust_cost
310
 
311
#undef TARGET_ENCODE_SECTION_INFO
312
#ifdef ARM_PE
313
#define TARGET_ENCODE_SECTION_INFO  arm_pe_encode_section_info
314
#else
315
#define TARGET_ENCODE_SECTION_INFO  arm_encode_section_info
316
#endif
317
 
318
#undef  TARGET_STRIP_NAME_ENCODING
319
#define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
320
 
321
#undef  TARGET_ASM_INTERNAL_LABEL
322
#define TARGET_ASM_INTERNAL_LABEL arm_internal_label
323
 
324
#undef  TARGET_FUNCTION_OK_FOR_SIBCALL
325
#define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
326
 
327
#undef  TARGET_FUNCTION_VALUE
328
#define TARGET_FUNCTION_VALUE arm_function_value
329
 
330
#undef  TARGET_LIBCALL_VALUE
331
#define TARGET_LIBCALL_VALUE arm_libcall_value
332
 
333
#undef  TARGET_ASM_OUTPUT_MI_THUNK
334
#define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
335
#undef  TARGET_ASM_CAN_OUTPUT_MI_THUNK
336
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
337
 
338
#undef  TARGET_RTX_COSTS
339
#define TARGET_RTX_COSTS arm_rtx_costs
340
#undef  TARGET_ADDRESS_COST
341
#define TARGET_ADDRESS_COST arm_address_cost
342
 
343
#undef TARGET_SHIFT_TRUNCATION_MASK
344
#define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
345
#undef TARGET_VECTOR_MODE_SUPPORTED_P
346
#define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
347
 
348
#undef  TARGET_MACHINE_DEPENDENT_REORG
349
#define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
350
 
351
#undef  TARGET_INIT_BUILTINS
352
#define TARGET_INIT_BUILTINS  arm_init_builtins
353
#undef  TARGET_EXPAND_BUILTIN
354
#define TARGET_EXPAND_BUILTIN arm_expand_builtin
355
 
356
#undef TARGET_INIT_LIBFUNCS
357
#define TARGET_INIT_LIBFUNCS arm_init_libfuncs
358
 
359
#undef TARGET_PROMOTE_FUNCTION_MODE
360
#define TARGET_PROMOTE_FUNCTION_MODE arm_promote_function_mode
361
#undef TARGET_PROMOTE_PROTOTYPES
362
#define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
363
#undef TARGET_PASS_BY_REFERENCE
364
#define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
365
#undef TARGET_ARG_PARTIAL_BYTES
366
#define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
367
 
368
#undef  TARGET_SETUP_INCOMING_VARARGS
369
#define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
370
 
371
#undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
372
#define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS arm_allocate_stack_slots_for_args
373
 
374
#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
375
#define TARGET_ASM_TRAMPOLINE_TEMPLATE arm_asm_trampoline_template
376
#undef TARGET_TRAMPOLINE_INIT
377
#define TARGET_TRAMPOLINE_INIT arm_trampoline_init
378
#undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
379
#define TARGET_TRAMPOLINE_ADJUST_ADDRESS arm_trampoline_adjust_address
380
 
381
#undef TARGET_DEFAULT_SHORT_ENUMS
382
#define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
383
 
384
#undef TARGET_ALIGN_ANON_BITFIELD
385
#define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
386
 
387
#undef TARGET_NARROW_VOLATILE_BITFIELD
388
#define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
389
 
390
#undef TARGET_CXX_GUARD_TYPE
391
#define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
392
 
393
#undef TARGET_CXX_GUARD_MASK_BIT
394
#define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
395
 
396
#undef TARGET_CXX_GET_COOKIE_SIZE
397
#define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
398
 
399
#undef TARGET_CXX_COOKIE_HAS_SIZE
400
#define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
401
 
402
#undef TARGET_CXX_CDTOR_RETURNS_THIS
403
#define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
404
 
405
#undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
406
#define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
407
 
408
#undef TARGET_CXX_USE_AEABI_ATEXIT
409
#define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
410
 
411
#undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
412
#define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
413
  arm_cxx_determine_class_data_visibility
414
 
415
#undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
416
#define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
417
 
418
#undef TARGET_RETURN_IN_MSB
419
#define TARGET_RETURN_IN_MSB arm_return_in_msb
420
 
421
#undef TARGET_RETURN_IN_MEMORY
422
#define TARGET_RETURN_IN_MEMORY arm_return_in_memory
423
 
424
#undef TARGET_MUST_PASS_IN_STACK
425
#define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
426
 
427
#ifdef TARGET_UNWIND_INFO
428
#undef TARGET_UNWIND_EMIT
429
#define TARGET_UNWIND_EMIT arm_unwind_emit
430
 
431
/* EABI unwinding tables use a different format for the typeinfo tables.  */
432
#undef TARGET_ASM_TTYPE
433
#define TARGET_ASM_TTYPE arm_output_ttype
434
 
435
#undef TARGET_ARM_EABI_UNWINDER
436
#define TARGET_ARM_EABI_UNWINDER true
437
#endif /* TARGET_UNWIND_INFO */
438
 
439
#undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
440
#define TARGET_DWARF_HANDLE_FRAME_UNSPEC arm_dwarf_handle_frame_unspec
441
 
442
#undef TARGET_DWARF_REGISTER_SPAN
443
#define TARGET_DWARF_REGISTER_SPAN arm_dwarf_register_span
444
 
445
#undef  TARGET_CANNOT_COPY_INSN_P
446
#define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
447
 
448
#ifdef HAVE_AS_TLS
449
#undef TARGET_HAVE_TLS
450
#define TARGET_HAVE_TLS true
451
#endif
452
 
453
#undef TARGET_HAVE_CONDITIONAL_EXECUTION
454
#define TARGET_HAVE_CONDITIONAL_EXECUTION arm_have_conditional_execution
455
 
456
#undef TARGET_CANNOT_FORCE_CONST_MEM
457
#define TARGET_CANNOT_FORCE_CONST_MEM arm_cannot_force_const_mem
458
 
459
#undef TARGET_MAX_ANCHOR_OFFSET
460
#define TARGET_MAX_ANCHOR_OFFSET 4095
461
 
462
/* The minimum is set such that the total size of the block
463
   for a particular anchor is -4088 + 1 + 4095 bytes, which is
464
   divisible by eight, ensuring natural spacing of anchors.  */
465
#undef TARGET_MIN_ANCHOR_OFFSET
466
#define TARGET_MIN_ANCHOR_OFFSET -4088
467
 
468
#undef TARGET_SCHED_ISSUE_RATE
469
#define TARGET_SCHED_ISSUE_RATE arm_issue_rate
470
 
471
#undef TARGET_MANGLE_TYPE
472
#define TARGET_MANGLE_TYPE arm_mangle_type
473
 
474
#undef TARGET_BUILD_BUILTIN_VA_LIST
475
#define TARGET_BUILD_BUILTIN_VA_LIST arm_build_builtin_va_list
476
#undef TARGET_EXPAND_BUILTIN_VA_START
477
#define TARGET_EXPAND_BUILTIN_VA_START arm_expand_builtin_va_start
478
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
479
#define TARGET_GIMPLIFY_VA_ARG_EXPR arm_gimplify_va_arg_expr
480
 
481
#ifdef HAVE_AS_TLS
482
#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
483
#define TARGET_ASM_OUTPUT_DWARF_DTPREL arm_output_dwarf_dtprel
484
#endif
485
 
486
#undef TARGET_LEGITIMATE_ADDRESS_P
487
#define TARGET_LEGITIMATE_ADDRESS_P     arm_legitimate_address_p
488
 
489
#undef TARGET_INVALID_PARAMETER_TYPE
490
#define TARGET_INVALID_PARAMETER_TYPE arm_invalid_parameter_type
491
 
492
#undef TARGET_INVALID_RETURN_TYPE
493
#define TARGET_INVALID_RETURN_TYPE arm_invalid_return_type
494
 
495
#undef TARGET_PROMOTED_TYPE
496
#define TARGET_PROMOTED_TYPE arm_promoted_type
497
 
498
#undef TARGET_CONVERT_TO_TYPE
499
#define TARGET_CONVERT_TO_TYPE arm_convert_to_type
500
 
501
#undef TARGET_SCALAR_MODE_SUPPORTED_P
502
#define TARGET_SCALAR_MODE_SUPPORTED_P arm_scalar_mode_supported_p
503
 
504
#undef TARGET_FRAME_POINTER_REQUIRED
505
#define TARGET_FRAME_POINTER_REQUIRED arm_frame_pointer_required
506
 
507
#undef TARGET_CAN_ELIMINATE
508
#define TARGET_CAN_ELIMINATE arm_can_eliminate
509
 
510
struct gcc_target targetm = TARGET_INITIALIZER;
511
 
512
/* Obstack for minipool constant handling.  */
513
static struct obstack minipool_obstack;
514
static char *         minipool_startobj;
515
 
516
/* The maximum number of insns skipped which
517
   will be conditionalised if possible.  */
518
static int max_insns_skipped = 5;
519
 
520
extern FILE * asm_out_file;
521
 
522
/* True if we are currently building a constant table.  */
523
int making_const_table;
524
 
525
/* The processor for which instructions should be scheduled.  */
526
enum processor_type arm_tune = arm_none;
527
 
528
/* The default processor used if not overridden by commandline.  */
529
static enum processor_type arm_default_cpu = arm_none;
530
 
531
/* Which floating point hardware to schedule for.  */
532
int arm_fpu_attr;
533
 
534
/* Which floating popint hardware to use.  */
535
const struct arm_fpu_desc *arm_fpu_desc;
536
 
537
/* Whether to use floating point hardware.  */
538
enum float_abi_type arm_float_abi;
539
 
540
/* Which __fp16 format to use.  */
541
enum arm_fp16_format_type arm_fp16_format;
542
 
543
/* Which ABI to use.  */
544
enum arm_abi_type arm_abi;
545
 
546
/* Which thread pointer model to use.  */
547
enum arm_tp_type target_thread_pointer = TP_AUTO;
548
 
549
/* Used to parse -mstructure_size_boundary command line option.  */
550
int    arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
551
 
552
/* Used for Thumb call_via trampolines.  */
553
rtx thumb_call_via_label[14];
554
static int thumb_call_reg_needed;
555
 
556
/* Bit values used to identify processor capabilities.  */
557
#define FL_CO_PROC    (1 << 0)        /* Has external co-processor bus */
558
#define FL_ARCH3M     (1 << 1)        /* Extended multiply */
559
#define FL_MODE26     (1 << 2)        /* 26-bit mode support */
560
#define FL_MODE32     (1 << 3)        /* 32-bit mode support */
561
#define FL_ARCH4      (1 << 4)        /* Architecture rel 4 */
562
#define FL_ARCH5      (1 << 5)        /* Architecture rel 5 */
563
#define FL_THUMB      (1 << 6)        /* Thumb aware */
564
#define FL_LDSCHED    (1 << 7)        /* Load scheduling necessary */
565
#define FL_STRONG     (1 << 8)        /* StrongARM */
566
#define FL_ARCH5E     (1 << 9)        /* DSP extensions to v5 */
567
#define FL_XSCALE     (1 << 10)       /* XScale */
568
#define FL_CIRRUS     (1 << 11)       /* Cirrus/DSP.  */
569
#define FL_ARCH6      (1 << 12)       /* Architecture rel 6.  Adds
570
                                         media instructions.  */
571
#define FL_VFPV2      (1 << 13)       /* Vector Floating Point V2.  */
572
#define FL_WBUF       (1 << 14)       /* Schedule for write buffer ops.
573
                                         Note: ARM6 & 7 derivatives only.  */
574
#define FL_ARCH6K     (1 << 15)       /* Architecture rel 6 K extensions.  */
575
#define FL_THUMB2     (1 << 16)       /* Thumb-2.  */
576
#define FL_NOTM       (1 << 17)       /* Instructions not present in the 'M'
577
                                         profile.  */
578
#define FL_DIV        (1 << 18)       /* Hardware divide.  */
579
#define FL_VFPV3      (1 << 19)       /* Vector Floating Point V3.  */
580
#define FL_NEON       (1 << 20)       /* Neon instructions.  */
581
#define FL_ARCH7EM    (1 << 21)       /* Instructions present in the ARMv7E-M
582
                                         architecture.  */
583
 
584
#define FL_IWMMXT     (1 << 29)       /* XScale v2 or "Intel Wireless MMX technology".  */
585
 
586
#define FL_FOR_ARCH2    FL_NOTM
587
#define FL_FOR_ARCH3    (FL_FOR_ARCH2 | FL_MODE32)
588
#define FL_FOR_ARCH3M   (FL_FOR_ARCH3 | FL_ARCH3M)
589
#define FL_FOR_ARCH4    (FL_FOR_ARCH3M | FL_ARCH4)
590
#define FL_FOR_ARCH4T   (FL_FOR_ARCH4 | FL_THUMB)
591
#define FL_FOR_ARCH5    (FL_FOR_ARCH4 | FL_ARCH5)
592
#define FL_FOR_ARCH5T   (FL_FOR_ARCH5 | FL_THUMB)
593
#define FL_FOR_ARCH5E   (FL_FOR_ARCH5 | FL_ARCH5E)
594
#define FL_FOR_ARCH5TE  (FL_FOR_ARCH5E | FL_THUMB)
595
#define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
596
#define FL_FOR_ARCH6    (FL_FOR_ARCH5TE | FL_ARCH6)
597
#define FL_FOR_ARCH6J   FL_FOR_ARCH6
598
#define FL_FOR_ARCH6K   (FL_FOR_ARCH6 | FL_ARCH6K)
599
#define FL_FOR_ARCH6Z   FL_FOR_ARCH6
600
#define FL_FOR_ARCH6ZK  FL_FOR_ARCH6K
601
#define FL_FOR_ARCH6T2  (FL_FOR_ARCH6 | FL_THUMB2)
602
#define FL_FOR_ARCH6M   (FL_FOR_ARCH6 & ~FL_NOTM)
603
#define FL_FOR_ARCH7    (FL_FOR_ARCH6T2 &~ FL_NOTM)
604
#define FL_FOR_ARCH7A   (FL_FOR_ARCH7 | FL_NOTM | FL_ARCH6K)
605
#define FL_FOR_ARCH7R   (FL_FOR_ARCH7A | FL_DIV)
606
#define FL_FOR_ARCH7M   (FL_FOR_ARCH7 | FL_DIV)
607
#define FL_FOR_ARCH7EM  (FL_FOR_ARCH7M | FL_ARCH7EM)
608
 
609
/* The bits in this mask specify which
610
   instructions we are allowed to generate.  */
611
static unsigned long insn_flags = 0;
612
 
613
/* The bits in this mask specify which instruction scheduling options should
614
   be used.  */
615
static unsigned long tune_flags = 0;
616
 
617
/* The following are used in the arm.md file as equivalents to bits
618
   in the above two flag variables.  */
619
 
620
/* Nonzero if this chip supports the ARM Architecture 3M extensions.  */
621
int arm_arch3m = 0;
622
 
623
/* Nonzero if this chip supports the ARM Architecture 4 extensions.  */
624
int arm_arch4 = 0;
625
 
626
/* Nonzero if this chip supports the ARM Architecture 4t extensions.  */
627
int arm_arch4t = 0;
628
 
629
/* Nonzero if this chip supports the ARM Architecture 5 extensions.  */
630
int arm_arch5 = 0;
631
 
632
/* Nonzero if this chip supports the ARM Architecture 5E extensions.  */
633
int arm_arch5e = 0;
634
 
635
/* Nonzero if this chip supports the ARM Architecture 6 extensions.  */
636
int arm_arch6 = 0;
637
 
638
/* Nonzero if this chip supports the ARM 6K extensions.  */
639
int arm_arch6k = 0;
640
 
641
/* Nonzero if instructions not present in the 'M' profile can be used.  */
642
int arm_arch_notm = 0;
643
 
644
/* Nonzero if instructions present in ARMv7E-M can be used.  */
645
int arm_arch7em = 0;
646
 
647
/* Nonzero if this chip can benefit from load scheduling.  */
648
int arm_ld_sched = 0;
649
 
650
/* Nonzero if this chip is a StrongARM.  */
651
int arm_tune_strongarm = 0;
652
 
653
/* Nonzero if this chip is a Cirrus variant.  */
654
int arm_arch_cirrus = 0;
655
 
656
/* Nonzero if this chip supports Intel Wireless MMX technology.  */
657
int arm_arch_iwmmxt = 0;
658
 
659
/* Nonzero if this chip is an XScale.  */
660
int arm_arch_xscale = 0;
661
 
662
/* Nonzero if tuning for XScale  */
663
int arm_tune_xscale = 0;
664
 
665
/* Nonzero if we want to tune for stores that access the write-buffer.
666
   This typically means an ARM6 or ARM7 with MMU or MPU.  */
667
int arm_tune_wbuf = 0;
668
 
669
/* Nonzero if tuning for Cortex-A9.  */
670
int arm_tune_cortex_a9 = 0;
671
 
672
/* Nonzero if generating Thumb instructions.  */
673
int thumb_code = 0;
674
 
675
/* Nonzero if we should define __THUMB_INTERWORK__ in the
676
   preprocessor.
677
   XXX This is a bit of a hack, it's intended to help work around
678
   problems in GLD which doesn't understand that armv5t code is
679
   interworking clean.  */
680
int arm_cpp_interwork = 0;
681
 
682
/* Nonzero if chip supports Thumb 2.  */
683
int arm_arch_thumb2;
684
 
685
/* Nonzero if chip supports integer division instruction.  */
686
int arm_arch_hwdiv;
687
 
688
/* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
689
   must report the mode of the memory reference from PRINT_OPERAND to
690
   PRINT_OPERAND_ADDRESS.  */
691
enum machine_mode output_memory_reference_mode;
692
 
693
/* The register number to be used for the PIC offset register.  */
694
unsigned arm_pic_register = INVALID_REGNUM;
695
 
696
/* Set to 1 after arm_reorg has started.  Reset to start at the start of
697
   the next function.  */
698
static int after_arm_reorg = 0;
699
 
700
/* The maximum number of insns to be used when loading a constant.  */
701
static int arm_constant_limit = 3;
702
 
703
static enum arm_pcs arm_pcs_default;
704
 
705
/* For an explanation of these variables, see final_prescan_insn below.  */
706
int arm_ccfsm_state;
707
/* arm_current_cc is also used for Thumb-2 cond_exec blocks.  */
708
enum arm_cond_code arm_current_cc;
709
rtx arm_target_insn;
710
int arm_target_label;
711
/* The number of conditionally executed insns, including the current insn.  */
712
int arm_condexec_count = 0;
713
/* A bitmask specifying the patterns for the IT block.
714
   Zero means do not output an IT block before this insn. */
715
int arm_condexec_mask = 0;
716
/* The number of bits used in arm_condexec_mask.  */
717
int arm_condexec_masklen = 0;
718
 
719
/* The condition codes of the ARM, and the inverse function.  */
720
static const char * const arm_condition_codes[] =
721
{
722
  "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
723
  "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
724
};
725
 
726
#define ARM_LSL_NAME (TARGET_UNIFIED_ASM ? "lsl" : "asl")
727
#define streq(string1, string2) (strcmp (string1, string2) == 0)
728
 
729
#define THUMB2_WORK_REGS (0xff & ~(  (1 << THUMB_HARD_FRAME_POINTER_REGNUM) \
730
                                   | (1 << SP_REGNUM) | (1 << PC_REGNUM) \
731
                                   | (1 << PIC_OFFSET_TABLE_REGNUM)))
732
 
733
/* Initialization code.  */
734
 
735
struct processors
736
{
737
  const char *const name;
738
  enum processor_type core;
739
  const char *arch;
740
  const unsigned long flags;
741
  bool (* rtx_costs) (rtx, enum rtx_code, enum rtx_code, int *, bool);
742
};
743
 
744
/* Not all of these give usefully different compilation alternatives,
745
   but there is no simple way of generalizing them.  */
746
static const struct processors all_cores[] =
747
{
748
  /* ARM Cores */
749
#define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
750
  {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
751
#include "arm-cores.def"
752
#undef ARM_CORE
753
  {NULL, arm_none, NULL, 0, NULL}
754
};
755
 
756
static const struct processors all_architectures[] =
757
{
758
  /* ARM Architectures */
759
  /* We don't specify rtx_costs here as it will be figured out
760
     from the core.  */
761
 
762
  {"armv2",   arm2,       "2",   FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
763
  {"armv2a",  arm2,       "2",   FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
764
  {"armv3",   arm6,       "3",   FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
765
  {"armv3m",  arm7m,      "3M",  FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
766
  {"armv4",   arm7tdmi,   "4",   FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
767
  /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
768
     implementations that support it, so we will leave it out for now.  */
769
  {"armv4t",  arm7tdmi,   "4T",  FL_CO_PROC |             FL_FOR_ARCH4T, NULL},
770
  {"armv5",   arm10tdmi,  "5",   FL_CO_PROC |             FL_FOR_ARCH5, NULL},
771
  {"armv5t",  arm10tdmi,  "5T",  FL_CO_PROC |             FL_FOR_ARCH5T, NULL},
772
  {"armv5e",  arm1026ejs, "5E",  FL_CO_PROC |             FL_FOR_ARCH5E, NULL},
773
  {"armv5te", arm1026ejs, "5TE", FL_CO_PROC |             FL_FOR_ARCH5TE, NULL},
774
  {"armv6",   arm1136js,  "6",   FL_CO_PROC |             FL_FOR_ARCH6, NULL},
775
  {"armv6j",  arm1136js,  "6J",  FL_CO_PROC |             FL_FOR_ARCH6J, NULL},
776
  {"armv6k",  mpcore,     "6K",  FL_CO_PROC |             FL_FOR_ARCH6K, NULL},
777
  {"armv6z",  arm1176jzs, "6Z",  FL_CO_PROC |             FL_FOR_ARCH6Z, NULL},
778
  {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC |             FL_FOR_ARCH6ZK, NULL},
779
  {"armv6t2", arm1156t2s, "6T2", FL_CO_PROC |             FL_FOR_ARCH6T2, NULL},
780
  {"armv6-m", cortexm1,   "6M",                           FL_FOR_ARCH6M, NULL},
781
  {"armv7",   cortexa8,   "7",   FL_CO_PROC |             FL_FOR_ARCH7, NULL},
782
  {"armv7-a", cortexa8,   "7A",  FL_CO_PROC |             FL_FOR_ARCH7A, NULL},
783
  {"armv7-r", cortexr4,   "7R",  FL_CO_PROC |             FL_FOR_ARCH7R, NULL},
784
  {"armv7-m", cortexm3,   "7M",  FL_CO_PROC |             FL_FOR_ARCH7M, NULL},
785
  {"armv7e-m",   cortexm3, "7EM", FL_CO_PROC |            FL_FOR_ARCH7EM, NULL},
786
  {"ep9312",  ep9312,     "4T",  FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
787
  {"iwmmxt",  iwmmxt,     "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
788
  {"iwmmxt2", iwmmxt2,     "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
789
  {NULL, arm_none, NULL, 0 , NULL}
790
};
791
 
792
struct arm_cpu_select
793
{
794
  const char *              string;
795
  const char *              name;
796
  const struct processors * processors;
797
};
798
 
799
/* This is a magic structure.  The 'string' field is magically filled in
800
   with a pointer to the value specified by the user on the command line
801
   assuming that the user has specified such a value.  */
802
 
803
static struct arm_cpu_select arm_select[] =
804
{
805
  /* string       name            processors  */
806
  { NULL,       "-mcpu=",       all_cores  },
807
  { NULL,       "-march=",      all_architectures },
808
  { NULL,       "-mtune=",      all_cores }
809
};
810
 
811
/* Defines representing the indexes into the above table.  */
812
#define ARM_OPT_SET_CPU 0
813
#define ARM_OPT_SET_ARCH 1
814
#define ARM_OPT_SET_TUNE 2
815
 
816
/* The name of the preprocessor macro to define for this architecture.  */
817
 
818
char arm_arch_name[] = "__ARM_ARCH_0UNK__";
819
 
820
/* Available values for -mfpu=.  */
821
 
822
static const struct arm_fpu_desc all_fpus[] =
823
{
824
  {"fpa",               ARM_FP_MODEL_FPA, 0, VFP_NONE, false, false},
825
  {"fpe2",              ARM_FP_MODEL_FPA, 2, VFP_NONE, false, false},
826
  {"fpe3",              ARM_FP_MODEL_FPA, 3, VFP_NONE, false, false},
827
  {"maverick",          ARM_FP_MODEL_MAVERICK, 0, VFP_NONE, false, false},
828
  {"vfp",               ARM_FP_MODEL_VFP, 2, VFP_REG_D16, false, false},
829
  {"vfpv3",             ARM_FP_MODEL_VFP, 3, VFP_REG_D32, false, false},
830
  {"vfpv3-fp16",        ARM_FP_MODEL_VFP, 3, VFP_REG_D32, false, true},
831
  {"vfpv3-d16",         ARM_FP_MODEL_VFP, 3, VFP_REG_D16, false, false},
832
  {"vfpv3-d16-fp16",    ARM_FP_MODEL_VFP, 3, VFP_REG_D16, false, true},
833
  {"vfpv3xd",           ARM_FP_MODEL_VFP, 3, VFP_REG_SINGLE, false, false},
834
  {"vfpv3xd-fp16",      ARM_FP_MODEL_VFP, 3, VFP_REG_SINGLE, false, true},
835
  {"neon",              ARM_FP_MODEL_VFP, 3, VFP_REG_D32, true , false},
836
  {"neon-fp16",         ARM_FP_MODEL_VFP, 3, VFP_REG_D32, true , true },
837
  {"vfpv4",             ARM_FP_MODEL_VFP, 4, VFP_REG_D32, false, true},
838
  {"vfpv4-d16",         ARM_FP_MODEL_VFP, 4, VFP_REG_D16, false, true},
839
  {"fpv4-sp-d16",       ARM_FP_MODEL_VFP, 4, VFP_REG_SINGLE, false, true},
840
  {"neon-vfpv4",        ARM_FP_MODEL_VFP, 4, VFP_REG_D32, true, true},
841
  /* Compatibility aliases.  */
842
  {"vfp3",              ARM_FP_MODEL_VFP, 3, VFP_REG_D32, false, false},
843
};
844
 
845
 
846
struct float_abi
847
{
848
  const char * name;
849
  enum float_abi_type abi_type;
850
};
851
 
852
 
853
/* Available values for -mfloat-abi=.  */
854
 
855
static const struct float_abi all_float_abis[] =
856
{
857
  {"soft",      ARM_FLOAT_ABI_SOFT},
858
  {"softfp",    ARM_FLOAT_ABI_SOFTFP},
859
  {"hard",      ARM_FLOAT_ABI_HARD}
860
};
861
 
862
 
863
struct fp16_format
864
{
865
  const char *name;
866
  enum arm_fp16_format_type fp16_format_type;
867
};
868
 
869
 
870
/* Available values for -mfp16-format=.  */
871
 
872
static const struct fp16_format all_fp16_formats[] =
873
{
874
  {"none",              ARM_FP16_FORMAT_NONE},
875
  {"ieee",              ARM_FP16_FORMAT_IEEE},
876
  {"alternative",       ARM_FP16_FORMAT_ALTERNATIVE}
877
};
878
 
879
 
880
struct abi_name
881
{
882
  const char *name;
883
  enum arm_abi_type abi_type;
884
};
885
 
886
 
887
/* Available values for -mabi=.  */
888
 
889
static const struct abi_name arm_all_abis[] =
890
{
891
  {"apcs-gnu",    ARM_ABI_APCS},
892
  {"atpcs",   ARM_ABI_ATPCS},
893
  {"aapcs",   ARM_ABI_AAPCS},
894
  {"iwmmxt",  ARM_ABI_IWMMXT},
895
  {"aapcs-linux",   ARM_ABI_AAPCS_LINUX}
896
};
897
 
898
/* Supported TLS relocations.  */
899
 
900
enum tls_reloc {
901
  TLS_GD32,
902
  TLS_LDM32,
903
  TLS_LDO32,
904
  TLS_IE32,
905
  TLS_LE32
906
};
907
 
908
/* Emit an insn that's a simple single-set.  Both the operands must be known
909
   to be valid.  */
910
inline static rtx
911
emit_set_insn (rtx x, rtx y)
912
{
913
  return emit_insn (gen_rtx_SET (VOIDmode, x, y));
914
}
915
 
916
/* Return the number of bits set in VALUE.  */
917
static unsigned
918
bit_count (unsigned long value)
919
{
920
  unsigned long count = 0;
921
 
922
  while (value)
923
    {
924
      count++;
925
      value &= value - 1;  /* Clear the least-significant set bit.  */
926
    }
927
 
928
  return count;
929
}
930
 
931
/* Set up library functions unique to ARM.  */
932
 
933
static void
934
arm_init_libfuncs (void)
935
{
936
  /* There are no special library functions unless we are using the
937
     ARM BPABI.  */
938
  if (!TARGET_BPABI)
939
    return;
940
 
941
  /* The functions below are described in Section 4 of the "Run-Time
942
     ABI for the ARM architecture", Version 1.0.  */
943
 
944
  /* Double-precision floating-point arithmetic.  Table 2.  */
945
  set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
946
  set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
947
  set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
948
  set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
949
  set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
950
 
951
  /* Double-precision comparisons.  Table 3.  */
952
  set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
953
  set_optab_libfunc (ne_optab, DFmode, NULL);
954
  set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
955
  set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
956
  set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
957
  set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
958
  set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
959
 
960
  /* Single-precision floating-point arithmetic.  Table 4.  */
961
  set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
962
  set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
963
  set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
964
  set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
965
  set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
966
 
967
  /* Single-precision comparisons.  Table 5.  */
968
  set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
969
  set_optab_libfunc (ne_optab, SFmode, NULL);
970
  set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
971
  set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
972
  set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
973
  set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
974
  set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
975
 
976
  /* Floating-point to integer conversions.  Table 6.  */
977
  set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
978
  set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
979
  set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
980
  set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
981
  set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
982
  set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
983
  set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
984
  set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
985
 
986
  /* Conversions between floating types.  Table 7.  */
987
  set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
988
  set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
989
 
990
  /* Integer to floating-point conversions.  Table 8.  */
991
  set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
992
  set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
993
  set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
994
  set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
995
  set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
996
  set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
997
  set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
998
  set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
999
 
1000
  /* Long long.  Table 9.  */
1001
  set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
1002
  set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
1003
  set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
1004
  set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
1005
  set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
1006
  set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
1007
  set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
1008
  set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
1009
 
1010
  /* Integer (32/32->32) division.  \S 4.3.1.  */
1011
  set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
1012
  set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
1013
 
1014
  /* The divmod functions are designed so that they can be used for
1015
     plain division, even though they return both the quotient and the
1016
     remainder.  The quotient is returned in the usual location (i.e.,
1017
     r0 for SImode, {r0, r1} for DImode), just as would be expected
1018
     for an ordinary division routine.  Because the AAPCS calling
1019
     conventions specify that all of { r0, r1, r2, r3 } are
1020
     callee-saved registers, there is no need to tell the compiler
1021
     explicitly that those registers are clobbered by these
1022
     routines.  */
1023
  set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
1024
  set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
1025
 
1026
  /* For SImode division the ABI provides div-without-mod routines,
1027
     which are faster.  */
1028
  set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idiv");
1029
  set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidiv");
1030
 
1031
  /* We don't have mod libcalls.  Fortunately gcc knows how to use the
1032
     divmod libcalls instead.  */
1033
  set_optab_libfunc (smod_optab, DImode, NULL);
1034
  set_optab_libfunc (umod_optab, DImode, NULL);
1035
  set_optab_libfunc (smod_optab, SImode, NULL);
1036
  set_optab_libfunc (umod_optab, SImode, NULL);
1037
 
1038
  /* Half-precision float operations.  The compiler handles all operations
1039
     with NULL libfuncs by converting the SFmode.  */
1040
  switch (arm_fp16_format)
1041
    {
1042
    case ARM_FP16_FORMAT_IEEE:
1043
    case ARM_FP16_FORMAT_ALTERNATIVE:
1044
 
1045
      /* Conversions.  */
1046
      set_conv_libfunc (trunc_optab, HFmode, SFmode,
1047
                        (arm_fp16_format == ARM_FP16_FORMAT_IEEE
1048
                         ? "__gnu_f2h_ieee"
1049
                         : "__gnu_f2h_alternative"));
1050
      set_conv_libfunc (sext_optab, SFmode, HFmode,
1051
                        (arm_fp16_format == ARM_FP16_FORMAT_IEEE
1052
                         ? "__gnu_h2f_ieee"
1053
                         : "__gnu_h2f_alternative"));
1054
 
1055
      /* Arithmetic.  */
1056
      set_optab_libfunc (add_optab, HFmode, NULL);
1057
      set_optab_libfunc (sdiv_optab, HFmode, NULL);
1058
      set_optab_libfunc (smul_optab, HFmode, NULL);
1059
      set_optab_libfunc (neg_optab, HFmode, NULL);
1060
      set_optab_libfunc (sub_optab, HFmode, NULL);
1061
 
1062
      /* Comparisons.  */
1063
      set_optab_libfunc (eq_optab, HFmode, NULL);
1064
      set_optab_libfunc (ne_optab, HFmode, NULL);
1065
      set_optab_libfunc (lt_optab, HFmode, NULL);
1066
      set_optab_libfunc (le_optab, HFmode, NULL);
1067
      set_optab_libfunc (ge_optab, HFmode, NULL);
1068
      set_optab_libfunc (gt_optab, HFmode, NULL);
1069
      set_optab_libfunc (unord_optab, HFmode, NULL);
1070
      break;
1071
 
1072
    default:
1073
      break;
1074
    }
1075
 
1076
  if (TARGET_AAPCS_BASED)
1077
    synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
1078
}
1079
 
1080
/* On AAPCS systems, this is the "struct __va_list".  */
1081
static GTY(()) tree va_list_type;
1082
 
1083
/* Return the type to use as __builtin_va_list.  */
1084
static tree
1085
arm_build_builtin_va_list (void)
1086
{
1087
  tree va_list_name;
1088
  tree ap_field;
1089
 
1090
  if (!TARGET_AAPCS_BASED)
1091
    return std_build_builtin_va_list ();
1092
 
1093
  /* AAPCS \S 7.1.4 requires that va_list be a typedef for a type
1094
     defined as:
1095
 
1096
       struct __va_list
1097
       {
1098
         void *__ap;
1099
       };
1100
 
1101
     The C Library ABI further reinforces this definition in \S
1102
     4.1.
1103
 
1104
     We must follow this definition exactly.  The structure tag
1105
     name is visible in C++ mangled names, and thus forms a part
1106
     of the ABI.  The field name may be used by people who
1107
     #include <stdarg.h>.  */
1108
  /* Create the type.  */
1109
  va_list_type = lang_hooks.types.make_type (RECORD_TYPE);
1110
  /* Give it the required name.  */
1111
  va_list_name = build_decl (BUILTINS_LOCATION,
1112
                             TYPE_DECL,
1113
                             get_identifier ("__va_list"),
1114
                             va_list_type);
1115
  DECL_ARTIFICIAL (va_list_name) = 1;
1116
  TYPE_NAME (va_list_type) = va_list_name;
1117
  /* Create the __ap field.  */
1118
  ap_field = build_decl (BUILTINS_LOCATION,
1119
                         FIELD_DECL,
1120
                         get_identifier ("__ap"),
1121
                         ptr_type_node);
1122
  DECL_ARTIFICIAL (ap_field) = 1;
1123
  DECL_FIELD_CONTEXT (ap_field) = va_list_type;
1124
  TYPE_FIELDS (va_list_type) = ap_field;
1125
  /* Compute its layout.  */
1126
  layout_type (va_list_type);
1127
 
1128
  return va_list_type;
1129
}
1130
 
1131
/* Return an expression of type "void *" pointing to the next
1132
   available argument in a variable-argument list.  VALIST is the
1133
   user-level va_list object, of type __builtin_va_list.  */
1134
static tree
1135
arm_extract_valist_ptr (tree valist)
1136
{
1137
  if (TREE_TYPE (valist) == error_mark_node)
1138
    return error_mark_node;
1139
 
1140
  /* On an AAPCS target, the pointer is stored within "struct
1141
     va_list".  */
1142
  if (TARGET_AAPCS_BASED)
1143
    {
1144
      tree ap_field = TYPE_FIELDS (TREE_TYPE (valist));
1145
      valist = build3 (COMPONENT_REF, TREE_TYPE (ap_field),
1146
                       valist, ap_field, NULL_TREE);
1147
    }
1148
 
1149
  return valist;
1150
}
1151
 
1152
/* Implement TARGET_EXPAND_BUILTIN_VA_START.  */
1153
static void
1154
arm_expand_builtin_va_start (tree valist, rtx nextarg)
1155
{
1156
  valist = arm_extract_valist_ptr (valist);
1157
  std_expand_builtin_va_start (valist, nextarg);
1158
}
1159
 
1160
/* Implement TARGET_GIMPLIFY_VA_ARG_EXPR.  */
1161
static tree
1162
arm_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
1163
                          gimple_seq *post_p)
1164
{
1165
  valist = arm_extract_valist_ptr (valist);
1166
  return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
1167
}
1168
 
1169
/* Implement TARGET_HANDLE_OPTION.  */
1170
 
1171
static bool
1172
arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
1173
{
1174
  switch (code)
1175
    {
1176
    case OPT_march_:
1177
      arm_select[1].string = arg;
1178
      return true;
1179
 
1180
    case OPT_mcpu_:
1181
      arm_select[0].string = arg;
1182
      return true;
1183
 
1184
    case OPT_mhard_float:
1185
      target_float_abi_name = "hard";
1186
      return true;
1187
 
1188
    case OPT_msoft_float:
1189
      target_float_abi_name = "soft";
1190
      return true;
1191
 
1192
    case OPT_mtune_:
1193
      arm_select[2].string = arg;
1194
      return true;
1195
 
1196
    default:
1197
      return true;
1198
    }
1199
}
1200
 
1201
static void
1202
arm_target_help (void)
1203
{
1204
  int i;
1205
  static int columns = 0;
1206
  int remaining;
1207
 
1208
  /* If we have not done so already, obtain the desired maximum width of
1209
     the output.  Note - this is a duplication of the code at the start of
1210
     gcc/opts.c:print_specific_help() - the two copies should probably be
1211
     replaced by a single function.  */
1212
  if (columns == 0)
1213
    {
1214
      const char *p;
1215
 
1216
      GET_ENVIRONMENT (p, "COLUMNS");
1217
      if (p != NULL)
1218
        {
1219
          int value = atoi (p);
1220
 
1221
          if (value > 0)
1222
            columns = value;
1223
        }
1224
 
1225
      if (columns == 0)
1226
        /* Use a reasonable default.  */
1227
        columns = 80;
1228
    }
1229
 
1230
  printf ("  Known ARM CPUs (for use with the -mcpu= and -mtune= options):\n");
1231
 
1232
  /* The - 2 is because we know that the last entry in the array is NULL.  */
1233
  i = ARRAY_SIZE (all_cores) - 2;
1234
  gcc_assert (i > 0);
1235
  printf ("    %s", all_cores[i].name);
1236
  remaining = columns - (strlen (all_cores[i].name) + 4);
1237
  gcc_assert (remaining >= 0);
1238
 
1239
  while (i--)
1240
    {
1241
      int len = strlen (all_cores[i].name);
1242
 
1243
      if (remaining > len + 2)
1244
        {
1245
          printf (", %s", all_cores[i].name);
1246
          remaining -= len + 2;
1247
        }
1248
      else
1249
        {
1250
          if (remaining > 0)
1251
            printf (",");
1252
          printf ("\n    %s", all_cores[i].name);
1253
          remaining = columns - (len + 4);
1254
        }
1255
    }
1256
 
1257
  printf ("\n\n  Known ARM architectures (for use with the -march= option):\n");
1258
 
1259
  i = ARRAY_SIZE (all_architectures) - 2;
1260
  gcc_assert (i > 0);
1261
 
1262
  printf ("    %s", all_architectures[i].name);
1263
  remaining = columns - (strlen (all_architectures[i].name) + 4);
1264
  gcc_assert (remaining >= 0);
1265
 
1266
  while (i--)
1267
    {
1268
      int len = strlen (all_architectures[i].name);
1269
 
1270
      if (remaining > len + 2)
1271
        {
1272
          printf (", %s", all_architectures[i].name);
1273
          remaining -= len + 2;
1274
        }
1275
      else
1276
        {
1277
          if (remaining > 0)
1278
            printf (",");
1279
          printf ("\n    %s", all_architectures[i].name);
1280
          remaining = columns - (len + 4);
1281
        }
1282
    }
1283
  printf ("\n");
1284
 
1285
}
1286
 
1287
/* Fix up any incompatible options that the user has specified.
1288
   This has now turned into a maze.  */
1289
void
1290
arm_override_options (void)
1291
{
1292
  unsigned i;
1293
  enum processor_type target_arch_cpu = arm_none;
1294
  enum processor_type selected_cpu = arm_none;
1295
 
1296
  /* Set up the flags based on the cpu/architecture selected by the user.  */
1297
  for (i = ARRAY_SIZE (arm_select); i--;)
1298
    {
1299
      struct arm_cpu_select * ptr = arm_select + i;
1300
 
1301
      if (ptr->string != NULL && ptr->string[0] != '\0')
1302
        {
1303
          const struct processors * sel;
1304
 
1305
          for (sel = ptr->processors; sel->name != NULL; sel++)
1306
            if (streq (ptr->string, sel->name))
1307
              {
1308
                /* Set the architecture define.  */
1309
                if (i != ARM_OPT_SET_TUNE)
1310
                  sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
1311
 
1312
                /* Determine the processor core for which we should
1313
                   tune code-generation.  */
1314
                if (/* -mcpu= is a sensible default.  */
1315
                    i == ARM_OPT_SET_CPU
1316
                    /* -mtune= overrides -mcpu= and -march=.  */
1317
                    || i == ARM_OPT_SET_TUNE)
1318
                  arm_tune = (enum processor_type) (sel - ptr->processors);
1319
 
1320
                /* Remember the CPU associated with this architecture.
1321
                   If no other option is used to set the CPU type,
1322
                   we'll use this to guess the most suitable tuning
1323
                   options.  */
1324
                if (i == ARM_OPT_SET_ARCH)
1325
                  target_arch_cpu = sel->core;
1326
 
1327
                if (i == ARM_OPT_SET_CPU)
1328
                  selected_cpu = (enum processor_type) (sel - ptr->processors);
1329
 
1330
                if (i != ARM_OPT_SET_TUNE)
1331
                  {
1332
                    /* If we have been given an architecture and a processor
1333
                       make sure that they are compatible.  We only generate
1334
                       a warning though, and we prefer the CPU over the
1335
                       architecture.  */
1336
                    if (insn_flags != 0 && (insn_flags ^ sel->flags))
1337
                      warning (0, "switch -mcpu=%s conflicts with -march= switch",
1338
                               ptr->string);
1339
 
1340
                    insn_flags = sel->flags;
1341
                  }
1342
 
1343
                break;
1344
              }
1345
 
1346
          if (sel->name == NULL)
1347
            error ("bad value (%s) for %s switch", ptr->string, ptr->name);
1348
        }
1349
    }
1350
 
1351
  /* Guess the tuning options from the architecture if necessary.  */
1352
  if (arm_tune == arm_none)
1353
    arm_tune = target_arch_cpu;
1354
 
1355
  /* If the user did not specify a processor, choose one for them.  */
1356
  if (insn_flags == 0)
1357
    {
1358
      const struct processors * sel;
1359
      unsigned int        sought;
1360
 
1361
      selected_cpu = (enum processor_type) TARGET_CPU_DEFAULT;
1362
      if (selected_cpu == arm_none)
1363
        {
1364
#ifdef SUBTARGET_CPU_DEFAULT
1365
          /* Use the subtarget default CPU if none was specified by
1366
             configure.  */
1367
          selected_cpu = (enum processor_type) SUBTARGET_CPU_DEFAULT;
1368
#endif
1369
          /* Default to ARM6.  */
1370
          if (selected_cpu == arm_none)
1371
            selected_cpu = arm6;
1372
        }
1373
      sel = &all_cores[selected_cpu];
1374
 
1375
      insn_flags = sel->flags;
1376
 
1377
      /* Now check to see if the user has specified some command line
1378
         switch that require certain abilities from the cpu.  */
1379
      sought = 0;
1380
 
1381
      if (TARGET_INTERWORK || TARGET_THUMB)
1382
        {
1383
          sought |= (FL_THUMB | FL_MODE32);
1384
 
1385
          /* There are no ARM processors that support both APCS-26 and
1386
             interworking.  Therefore we force FL_MODE26 to be removed
1387
             from insn_flags here (if it was set), so that the search
1388
             below will always be able to find a compatible processor.  */
1389
          insn_flags &= ~FL_MODE26;
1390
        }
1391
 
1392
      if (sought != 0 && ((sought & insn_flags) != sought))
1393
        {
1394
          /* Try to locate a CPU type that supports all of the abilities
1395
             of the default CPU, plus the extra abilities requested by
1396
             the user.  */
1397
          for (sel = all_cores; sel->name != NULL; sel++)
1398
            if ((sel->flags & sought) == (sought | insn_flags))
1399
              break;
1400
 
1401
          if (sel->name == NULL)
1402
            {
1403
              unsigned current_bit_count = 0;
1404
              const struct processors * best_fit = NULL;
1405
 
1406
              /* Ideally we would like to issue an error message here
1407
                 saying that it was not possible to find a CPU compatible
1408
                 with the default CPU, but which also supports the command
1409
                 line options specified by the programmer, and so they
1410
                 ought to use the -mcpu=<name> command line option to
1411
                 override the default CPU type.
1412
 
1413
                 If we cannot find a cpu that has both the
1414
                 characteristics of the default cpu and the given
1415
                 command line options we scan the array again looking
1416
                 for a best match.  */
1417
              for (sel = all_cores; sel->name != NULL; sel++)
1418
                if ((sel->flags & sought) == sought)
1419
                  {
1420
                    unsigned count;
1421
 
1422
                    count = bit_count (sel->flags & insn_flags);
1423
 
1424
                    if (count >= current_bit_count)
1425
                      {
1426
                        best_fit = sel;
1427
                        current_bit_count = count;
1428
                      }
1429
                  }
1430
 
1431
              gcc_assert (best_fit);
1432
              sel = best_fit;
1433
            }
1434
 
1435
          insn_flags = sel->flags;
1436
        }
1437
      sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
1438
      arm_default_cpu = (enum processor_type) (sel - all_cores);
1439
      if (arm_tune == arm_none)
1440
        arm_tune = arm_default_cpu;
1441
    }
1442
 
1443
  /* The processor for which we should tune should now have been
1444
     chosen.  */
1445
  gcc_assert (arm_tune != arm_none);
1446
 
1447
  tune_flags = all_cores[(int)arm_tune].flags;
1448
 
1449
  if (target_fp16_format_name)
1450
    {
1451
      for (i = 0; i < ARRAY_SIZE (all_fp16_formats); i++)
1452
        {
1453
          if (streq (all_fp16_formats[i].name, target_fp16_format_name))
1454
            {
1455
              arm_fp16_format = all_fp16_formats[i].fp16_format_type;
1456
              break;
1457
            }
1458
        }
1459
      if (i == ARRAY_SIZE (all_fp16_formats))
1460
        error ("invalid __fp16 format option: -mfp16-format=%s",
1461
               target_fp16_format_name);
1462
    }
1463
  else
1464
    arm_fp16_format = ARM_FP16_FORMAT_NONE;
1465
 
1466
  if (target_abi_name)
1467
    {
1468
      for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1469
        {
1470
          if (streq (arm_all_abis[i].name, target_abi_name))
1471
            {
1472
              arm_abi = arm_all_abis[i].abi_type;
1473
              break;
1474
            }
1475
        }
1476
      if (i == ARRAY_SIZE (arm_all_abis))
1477
        error ("invalid ABI option: -mabi=%s", target_abi_name);
1478
    }
1479
  else
1480
    arm_abi = ARM_DEFAULT_ABI;
1481
 
1482
  /* Make sure that the processor choice does not conflict with any of the
1483
     other command line choices.  */
1484
  if (TARGET_ARM && !(insn_flags & FL_NOTM))
1485
    error ("target CPU does not support ARM mode");
1486
 
1487
  /* BPABI targets use linker tricks to allow interworking on cores
1488
     without thumb support.  */
1489
  if (TARGET_INTERWORK && !((insn_flags & FL_THUMB) || TARGET_BPABI))
1490
    {
1491
      warning (0, "target CPU does not support interworking" );
1492
      target_flags &= ~MASK_INTERWORK;
1493
    }
1494
 
1495
  if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1496
    {
1497
      warning (0, "target CPU does not support THUMB instructions");
1498
      target_flags &= ~MASK_THUMB;
1499
    }
1500
 
1501
  if (TARGET_APCS_FRAME && TARGET_THUMB)
1502
    {
1503
      /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1504
      target_flags &= ~MASK_APCS_FRAME;
1505
    }
1506
 
1507
  /* Callee super interworking implies thumb interworking.  Adding
1508
     this to the flags here simplifies the logic elsewhere.  */
1509
  if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
1510
      target_flags |= MASK_INTERWORK;
1511
 
1512
  /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1513
     from here where no function is being compiled currently.  */
1514
  if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1515
    warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1516
 
1517
  if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1518
    warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1519
 
1520
  if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1521
    warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1522
 
1523
  if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1524
    {
1525
      warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1526
      target_flags |= MASK_APCS_FRAME;
1527
    }
1528
 
1529
  if (TARGET_POKE_FUNCTION_NAME)
1530
    target_flags |= MASK_APCS_FRAME;
1531
 
1532
  if (TARGET_APCS_REENT && flag_pic)
1533
    error ("-fpic and -mapcs-reent are incompatible");
1534
 
1535
  if (TARGET_APCS_REENT)
1536
    warning (0, "APCS reentrant code not supported.  Ignored");
1537
 
1538
  /* If this target is normally configured to use APCS frames, warn if they
1539
     are turned off and debugging is turned on.  */
1540
  if (TARGET_ARM
1541
      && write_symbols != NO_DEBUG
1542
      && !TARGET_APCS_FRAME
1543
      && (TARGET_DEFAULT & MASK_APCS_FRAME))
1544
    warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1545
 
1546
  if (TARGET_APCS_FLOAT)
1547
    warning (0, "passing floating point arguments in fp regs not yet supported");
1548
 
1549
  /* Initialize boolean versions of the flags, for use in the arm.md file.  */
1550
  arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1551
  arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1552
  arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1553
  arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1554
  arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1555
  arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1556
  arm_arch6k = (insn_flags & FL_ARCH6K) != 0;
1557
  arm_arch_notm = (insn_flags & FL_NOTM) != 0;
1558
  arm_arch7em = (insn_flags & FL_ARCH7EM) != 0;
1559
  arm_arch_thumb2 = (insn_flags & FL_THUMB2) != 0;
1560
  arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1561
  arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1562
 
1563
  arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1564
  arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1565
  thumb_code = (TARGET_ARM == 0);
1566
  arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1567
  arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1568
  arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1569
  arm_arch_hwdiv = (insn_flags & FL_DIV) != 0;
1570
  arm_tune_cortex_a9 = (arm_tune == cortexa9) != 0;
1571
 
1572
  /* If we are not using the default (ARM mode) section anchor offset
1573
     ranges, then set the correct ranges now.  */
1574
  if (TARGET_THUMB1)
1575
    {
1576
      /* Thumb-1 LDR instructions cannot have negative offsets.
1577
         Permissible positive offset ranges are 5-bit (for byte loads),
1578
         6-bit (for halfword loads), or 7-bit (for word loads).
1579
         Empirical results suggest a 7-bit anchor range gives the best
1580
         overall code size.  */
1581
      targetm.min_anchor_offset = 0;
1582
      targetm.max_anchor_offset = 127;
1583
    }
1584
  else if (TARGET_THUMB2)
1585
    {
1586
      /* The minimum is set such that the total size of the block
1587
         for a particular anchor is 248 + 1 + 4095 bytes, which is
1588
         divisible by eight, ensuring natural spacing of anchors.  */
1589
      targetm.min_anchor_offset = -248;
1590
      targetm.max_anchor_offset = 4095;
1591
    }
1592
 
1593
  /* V5 code we generate is completely interworking capable, so we turn off
1594
     TARGET_INTERWORK here to avoid many tests later on.  */
1595
 
1596
  /* XXX However, we must pass the right pre-processor defines to CPP
1597
     or GLD can get confused.  This is a hack.  */
1598
  if (TARGET_INTERWORK)
1599
    arm_cpp_interwork = 1;
1600
 
1601
  if (arm_arch5)
1602
    target_flags &= ~MASK_INTERWORK;
1603
 
1604
  if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1605
    error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1606
 
1607
  if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1608
    error ("iwmmxt abi requires an iwmmxt capable cpu");
1609
 
1610
  if (target_fpu_name == NULL && target_fpe_name != NULL)
1611
    {
1612
      if (streq (target_fpe_name, "2"))
1613
        target_fpu_name = "fpe2";
1614
      else if (streq (target_fpe_name, "3"))
1615
        target_fpu_name = "fpe3";
1616
      else
1617
        error ("invalid floating point emulation option: -mfpe=%s",
1618
               target_fpe_name);
1619
    }
1620
 
1621
  if (target_fpu_name == NULL)
1622
    {
1623
#ifdef FPUTYPE_DEFAULT
1624
      target_fpu_name = FPUTYPE_DEFAULT;
1625
#else
1626
      if (arm_arch_cirrus)
1627
        target_fpu_name = "maverick";
1628
      else
1629
        target_fpu_name = "fpe2";
1630
#endif
1631
    }
1632
 
1633
  arm_fpu_desc = NULL;
1634
  for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1635
    {
1636
      if (streq (all_fpus[i].name, target_fpu_name))
1637
        {
1638
          arm_fpu_desc = &all_fpus[i];
1639
          break;
1640
        }
1641
    }
1642
 
1643
  if (!arm_fpu_desc)
1644
    {
1645
      error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1646
      return;
1647
    }
1648
 
1649
  switch (arm_fpu_desc->model)
1650
    {
1651
    case ARM_FP_MODEL_FPA:
1652
      if (arm_fpu_desc->rev == 2)
1653
        arm_fpu_attr = FPU_FPE2;
1654
      else if (arm_fpu_desc->rev == 3)
1655
        arm_fpu_attr = FPU_FPE3;
1656
      else
1657
        arm_fpu_attr = FPU_FPA;
1658
      break;
1659
 
1660
    case ARM_FP_MODEL_MAVERICK:
1661
      arm_fpu_attr = FPU_MAVERICK;
1662
      break;
1663
 
1664
    case ARM_FP_MODEL_VFP:
1665
      arm_fpu_attr = FPU_VFP;
1666
      break;
1667
 
1668
    default:
1669
      gcc_unreachable();
1670
    }
1671
 
1672
  if (target_float_abi_name != NULL)
1673
    {
1674
      /* The user specified a FP ABI.  */
1675
      for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1676
        {
1677
          if (streq (all_float_abis[i].name, target_float_abi_name))
1678
            {
1679
              arm_float_abi = all_float_abis[i].abi_type;
1680
              break;
1681
            }
1682
        }
1683
      if (i == ARRAY_SIZE (all_float_abis))
1684
        error ("invalid floating point abi: -mfloat-abi=%s",
1685
               target_float_abi_name);
1686
    }
1687
  else
1688
    arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1689
 
1690
  if (TARGET_AAPCS_BASED
1691
      && (arm_fpu_desc->model == ARM_FP_MODEL_FPA))
1692
    error ("FPA is unsupported in the AAPCS");
1693
 
1694
  if (TARGET_AAPCS_BASED)
1695
    {
1696
      if (TARGET_CALLER_INTERWORKING)
1697
        error ("AAPCS does not support -mcaller-super-interworking");
1698
      else
1699
        if (TARGET_CALLEE_INTERWORKING)
1700
          error ("AAPCS does not support -mcallee-super-interworking");
1701
    }
1702
 
1703
  /* FPA and iWMMXt are incompatible because the insn encodings overlap.
1704
     VFP and iWMMXt can theoretically coexist, but it's unlikely such silicon
1705
     will ever exist.  GCC makes no attempt to support this combination.  */
1706
  if (TARGET_IWMMXT && !TARGET_SOFT_FLOAT)
1707
    sorry ("iWMMXt and hardware floating point");
1708
 
1709
  /* ??? iWMMXt insn patterns need auditing for Thumb-2.  */
1710
  if (TARGET_THUMB2 && TARGET_IWMMXT)
1711
    sorry ("Thumb-2 iWMMXt");
1712
 
1713
  /* __fp16 support currently assumes the core has ldrh.  */
1714
  if (!arm_arch4 && arm_fp16_format != ARM_FP16_FORMAT_NONE)
1715
    sorry ("__fp16 and no ldrh");
1716
 
1717
  /* If soft-float is specified then don't use FPU.  */
1718
  if (TARGET_SOFT_FLOAT)
1719
    arm_fpu_attr = FPU_NONE;
1720
 
1721
  if (TARGET_AAPCS_BASED)
1722
    {
1723
      if (arm_abi == ARM_ABI_IWMMXT)
1724
        arm_pcs_default = ARM_PCS_AAPCS_IWMMXT;
1725
      else if (arm_float_abi == ARM_FLOAT_ABI_HARD
1726
               && TARGET_HARD_FLOAT
1727
               && TARGET_VFP)
1728
        arm_pcs_default = ARM_PCS_AAPCS_VFP;
1729
      else
1730
        arm_pcs_default = ARM_PCS_AAPCS;
1731
    }
1732
  else
1733
    {
1734
      if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1735
        sorry ("-mfloat-abi=hard and VFP");
1736
 
1737
      if (arm_abi == ARM_ABI_APCS)
1738
        arm_pcs_default = ARM_PCS_APCS;
1739
      else
1740
        arm_pcs_default = ARM_PCS_ATPCS;
1741
    }
1742
 
1743
  /* For arm2/3 there is no need to do any scheduling if there is only
1744
     a floating point emulator, or we are doing software floating-point.  */
1745
  if ((TARGET_SOFT_FLOAT
1746
       || (TARGET_FPA && arm_fpu_desc->rev))
1747
      && (tune_flags & FL_MODE32) == 0)
1748
    flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1749
 
1750
  if (target_thread_switch)
1751
    {
1752
      if (strcmp (target_thread_switch, "soft") == 0)
1753
        target_thread_pointer = TP_SOFT;
1754
      else if (strcmp (target_thread_switch, "auto") == 0)
1755
        target_thread_pointer = TP_AUTO;
1756
      else if (strcmp (target_thread_switch, "cp15") == 0)
1757
        target_thread_pointer = TP_CP15;
1758
      else
1759
        error ("invalid thread pointer option: -mtp=%s", target_thread_switch);
1760
    }
1761
 
1762
  /* Use the cp15 method if it is available.  */
1763
  if (target_thread_pointer == TP_AUTO)
1764
    {
1765
      if (arm_arch6k && !TARGET_THUMB1)
1766
        target_thread_pointer = TP_CP15;
1767
      else
1768
        target_thread_pointer = TP_SOFT;
1769
    }
1770
 
1771
  if (TARGET_HARD_TP && TARGET_THUMB1)
1772
    error ("can not use -mtp=cp15 with 16-bit Thumb");
1773
 
1774
  /* Override the default structure alignment for AAPCS ABI.  */
1775
  if (TARGET_AAPCS_BASED)
1776
    arm_structure_size_boundary = 8;
1777
 
1778
  if (structure_size_string != NULL)
1779
    {
1780
      int size = strtol (structure_size_string, NULL, 0);
1781
 
1782
      if (size == 8 || size == 32
1783
          || (ARM_DOUBLEWORD_ALIGN && size == 64))
1784
        arm_structure_size_boundary = size;
1785
      else
1786
        warning (0, "structure size boundary can only be set to %s",
1787
                 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1788
    }
1789
 
1790
  if (!TARGET_ARM && TARGET_VXWORKS_RTP && flag_pic)
1791
    {
1792
      error ("RTP PIC is incompatible with Thumb");
1793
      flag_pic = 0;
1794
    }
1795
 
1796
  /* If stack checking is disabled, we can use r10 as the PIC register,
1797
     which keeps r9 available.  The EABI specifies r9 as the PIC register.  */
1798
  if (flag_pic && TARGET_SINGLE_PIC_BASE)
1799
    {
1800
      if (TARGET_VXWORKS_RTP)
1801
        warning (0, "RTP PIC is incompatible with -msingle-pic-base");
1802
      arm_pic_register = (TARGET_APCS_STACK || TARGET_AAPCS_BASED) ? 9 : 10;
1803
    }
1804
 
1805
  if (flag_pic && TARGET_VXWORKS_RTP)
1806
    arm_pic_register = 9;
1807
 
1808
  if (arm_pic_register_string != NULL)
1809
    {
1810
      int pic_register = decode_reg_name (arm_pic_register_string);
1811
 
1812
      if (!flag_pic)
1813
        warning (0, "-mpic-register= is useless without -fpic");
1814
 
1815
      /* Prevent the user from choosing an obviously stupid PIC register.  */
1816
      else if (pic_register < 0 || call_used_regs[pic_register]
1817
               || pic_register == HARD_FRAME_POINTER_REGNUM
1818
               || pic_register == STACK_POINTER_REGNUM
1819
               || pic_register >= PC_REGNUM
1820
               || (TARGET_VXWORKS_RTP
1821
                   && (unsigned int) pic_register != arm_pic_register))
1822
        error ("unable to use '%s' for PIC register", arm_pic_register_string);
1823
      else
1824
        arm_pic_register = pic_register;
1825
    }
1826
 
1827
  /* Enable -mfix-cortex-m3-ldrd by default for Cortex-M3 cores.  */
1828
  if (fix_cm3_ldrd == 2)
1829
    {
1830
      if (selected_cpu == cortexm3)
1831
        fix_cm3_ldrd = 1;
1832
      else
1833
        fix_cm3_ldrd = 0;
1834
    }
1835
 
1836
  if (TARGET_THUMB1 && flag_schedule_insns)
1837
    {
1838
      /* Don't warn since it's on by default in -O2.  */
1839
      flag_schedule_insns = 0;
1840
    }
1841
 
1842
  if (optimize_size)
1843
    {
1844
      arm_constant_limit = 1;
1845
 
1846
      /* If optimizing for size, bump the number of instructions that we
1847
         are prepared to conditionally execute (even on a StrongARM).  */
1848
      max_insns_skipped = 6;
1849
    }
1850
  else
1851
    {
1852
      /* For processors with load scheduling, it never costs more than
1853
         2 cycles to load a constant, and the load scheduler may well
1854
         reduce that to 1.  */
1855
      if (arm_ld_sched)
1856
        arm_constant_limit = 1;
1857
 
1858
      /* On XScale the longer latency of a load makes it more difficult
1859
         to achieve a good schedule, so it's faster to synthesize
1860
         constants that can be done in two insns.  */
1861
      if (arm_tune_xscale)
1862
        arm_constant_limit = 2;
1863
 
1864
      /* StrongARM has early execution of branches, so a sequence
1865
         that is worth skipping is shorter.  */
1866
      if (arm_tune_strongarm)
1867
        max_insns_skipped = 3;
1868
    }
1869
 
1870
  /* Hot/Cold partitioning is not currently supported, since we can't
1871
     handle literal pool placement in that case.  */
1872
  if (flag_reorder_blocks_and_partition)
1873
    {
1874
      inform (input_location,
1875
              "-freorder-blocks-and-partition not supported on this architecture");
1876
      flag_reorder_blocks_and_partition = 0;
1877
      flag_reorder_blocks = 1;
1878
    }
1879
 
1880
  /* Register global variables with the garbage collector.  */
1881
  arm_add_gc_roots ();
1882
}
1883
 
1884
static void
1885
arm_add_gc_roots (void)
1886
{
1887
  gcc_obstack_init(&minipool_obstack);
1888
  minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1889
}
1890
 
1891
/* A table of known ARM exception types.
1892
   For use with the interrupt function attribute.  */
1893
 
1894
typedef struct
1895
{
1896
  const char *const arg;
1897
  const unsigned long return_value;
1898
}
1899
isr_attribute_arg;
1900
 
1901
static const isr_attribute_arg isr_attribute_args [] =
1902
{
1903
  { "IRQ",   ARM_FT_ISR },
1904
  { "irq",   ARM_FT_ISR },
1905
  { "FIQ",   ARM_FT_FIQ },
1906
  { "fiq",   ARM_FT_FIQ },
1907
  { "ABORT", ARM_FT_ISR },
1908
  { "abort", ARM_FT_ISR },
1909
  { "ABORT", ARM_FT_ISR },
1910
  { "abort", ARM_FT_ISR },
1911
  { "UNDEF", ARM_FT_EXCEPTION },
1912
  { "undef", ARM_FT_EXCEPTION },
1913
  { "SWI",   ARM_FT_EXCEPTION },
1914
  { "swi",   ARM_FT_EXCEPTION },
1915
  { NULL,    ARM_FT_NORMAL }
1916
};
1917
 
1918
/* Returns the (interrupt) function type of the current
1919
   function, or ARM_FT_UNKNOWN if the type cannot be determined.  */
1920
 
1921
static unsigned long
1922
arm_isr_value (tree argument)
1923
{
1924
  const isr_attribute_arg * ptr;
1925
  const char *              arg;
1926
 
1927
  if (!arm_arch_notm)
1928
    return ARM_FT_NORMAL | ARM_FT_STACKALIGN;
1929
 
1930
  /* No argument - default to IRQ.  */
1931
  if (argument == NULL_TREE)
1932
    return ARM_FT_ISR;
1933
 
1934
  /* Get the value of the argument.  */
1935
  if (TREE_VALUE (argument) == NULL_TREE
1936
      || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1937
    return ARM_FT_UNKNOWN;
1938
 
1939
  arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1940
 
1941
  /* Check it against the list of known arguments.  */
1942
  for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1943
    if (streq (arg, ptr->arg))
1944
      return ptr->return_value;
1945
 
1946
  /* An unrecognized interrupt type.  */
1947
  return ARM_FT_UNKNOWN;
1948
}
1949
 
1950
/* Computes the type of the current function.  */
1951
 
1952
static unsigned long
1953
arm_compute_func_type (void)
1954
{
1955
  unsigned long type = ARM_FT_UNKNOWN;
1956
  tree a;
1957
  tree attr;
1958
 
1959
  gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1960
 
1961
  /* Decide if the current function is volatile.  Such functions
1962
     never return, and many memory cycles can be saved by not storing
1963
     register values that will never be needed again.  This optimization
1964
     was added to speed up context switching in a kernel application.  */
1965
  if (optimize > 0
1966
      && (TREE_NOTHROW (current_function_decl)
1967
          || !(flag_unwind_tables
1968
               || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
1969
      && TREE_THIS_VOLATILE (current_function_decl))
1970
    type |= ARM_FT_VOLATILE;
1971
 
1972
  if (cfun->static_chain_decl != NULL)
1973
    type |= ARM_FT_NESTED;
1974
 
1975
  attr = DECL_ATTRIBUTES (current_function_decl);
1976
 
1977
  a = lookup_attribute ("naked", attr);
1978
  if (a != NULL_TREE)
1979
    type |= ARM_FT_NAKED;
1980
 
1981
  a = lookup_attribute ("isr", attr);
1982
  if (a == NULL_TREE)
1983
    a = lookup_attribute ("interrupt", attr);
1984
 
1985
  if (a == NULL_TREE)
1986
    type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1987
  else
1988
    type |= arm_isr_value (TREE_VALUE (a));
1989
 
1990
  return type;
1991
}
1992
 
1993
/* Returns the type of the current function.  */
1994
 
1995
unsigned long
1996
arm_current_func_type (void)
1997
{
1998
  if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1999
    cfun->machine->func_type = arm_compute_func_type ();
2000
 
2001
  return cfun->machine->func_type;
2002
}
2003
 
2004
bool
2005
arm_allocate_stack_slots_for_args (void)
2006
{
2007
  /* Naked functions should not allocate stack slots for arguments.  */
2008
  return !IS_NAKED (arm_current_func_type ());
2009
}
2010
 
2011
 
2012
/* Output assembler code for a block containing the constant parts
2013
   of a trampoline, leaving space for the variable parts.
2014
 
2015
   On the ARM, (if r8 is the static chain regnum, and remembering that
2016
   referencing pc adds an offset of 8) the trampoline looks like:
2017
           ldr          r8, [pc, #0]
2018
           ldr          pc, [pc]
2019
           .word        static chain value
2020
           .word        function's address
2021
   XXX FIXME: When the trampoline returns, r8 will be clobbered.  */
2022
 
2023
static void
2024
arm_asm_trampoline_template (FILE *f)
2025
{
2026
  if (TARGET_ARM)
2027
    {
2028
      asm_fprintf (f, "\tldr\t%r, [%r, #0]\n", STATIC_CHAIN_REGNUM, PC_REGNUM);
2029
      asm_fprintf (f, "\tldr\t%r, [%r, #0]\n", PC_REGNUM, PC_REGNUM);
2030
    }
2031
  else if (TARGET_THUMB2)
2032
    {
2033
      /* The Thumb-2 trampoline is similar to the arm implementation.
2034
         Unlike 16-bit Thumb, we enter the stub in thumb mode.  */
2035
      asm_fprintf (f, "\tldr.w\t%r, [%r, #4]\n",
2036
                   STATIC_CHAIN_REGNUM, PC_REGNUM);
2037
      asm_fprintf (f, "\tldr.w\t%r, [%r, #4]\n", PC_REGNUM, PC_REGNUM);
2038
    }
2039
  else
2040
    {
2041
      ASM_OUTPUT_ALIGN (f, 2);
2042
      fprintf (f, "\t.code\t16\n");
2043
      fprintf (f, ".Ltrampoline_start:\n");
2044
      asm_fprintf (f, "\tpush\t{r0, r1}\n");
2045
      asm_fprintf (f, "\tldr\tr0, [%r, #8]\n", PC_REGNUM);
2046
      asm_fprintf (f, "\tmov\t%r, r0\n", STATIC_CHAIN_REGNUM);
2047
      asm_fprintf (f, "\tldr\tr0, [%r, #8]\n", PC_REGNUM);
2048
      asm_fprintf (f, "\tstr\tr0, [%r, #4]\n", SP_REGNUM);
2049
      asm_fprintf (f, "\tpop\t{r0, %r}\n", PC_REGNUM);
2050
    }
2051
  assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
2052
  assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
2053
}
2054
 
2055
/* Emit RTL insns to initialize the variable parts of a trampoline.  */
2056
 
2057
static void
2058
arm_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2059
{
2060
  rtx fnaddr, mem, a_tramp;
2061
 
2062
  emit_block_move (m_tramp, assemble_trampoline_template (),
2063
                   GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
2064
 
2065
  mem = adjust_address (m_tramp, SImode, TARGET_32BIT ? 8 : 12);
2066
  emit_move_insn (mem, chain_value);
2067
 
2068
  mem = adjust_address (m_tramp, SImode, TARGET_32BIT ? 12 : 16);
2069
  fnaddr = XEXP (DECL_RTL (fndecl), 0);
2070
  emit_move_insn (mem, fnaddr);
2071
 
2072
  a_tramp = XEXP (m_tramp, 0);
2073
  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"),
2074
                     LCT_NORMAL, VOIDmode, 2, a_tramp, Pmode,
2075
                     plus_constant (a_tramp, TRAMPOLINE_SIZE), Pmode);
2076
}
2077
 
2078
/* Thumb trampolines should be entered in thumb mode, so set
2079
   the bottom bit of the address.  */
2080
 
2081
static rtx
2082
arm_trampoline_adjust_address (rtx addr)
2083
{
2084
  if (TARGET_THUMB)
2085
    addr = expand_simple_binop (Pmode, IOR, addr, const1_rtx,
2086
                                NULL, 0, OPTAB_LIB_WIDEN);
2087
  return addr;
2088
}
2089
 
2090
/* Return 1 if it is possible to return using a single instruction.
2091
   If SIBLING is non-null, this is a test for a return before a sibling
2092
   call.  SIBLING is the call insn, so we can examine its register usage.  */
2093
 
2094
int
2095
use_return_insn (int iscond, rtx sibling)
2096
{
2097
  int regno;
2098
  unsigned int func_type;
2099
  unsigned long saved_int_regs;
2100
  unsigned HOST_WIDE_INT stack_adjust;
2101
  arm_stack_offsets *offsets;
2102
 
2103
  /* Never use a return instruction before reload has run.  */
2104
  if (!reload_completed)
2105
    return 0;
2106
 
2107
  func_type = arm_current_func_type ();
2108
 
2109
  /* Naked, volatile and stack alignment functions need special
2110
     consideration.  */
2111
  if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED | ARM_FT_STACKALIGN))
2112
    return 0;
2113
 
2114
  /* So do interrupt functions that use the frame pointer and Thumb
2115
     interrupt functions.  */
2116
  if (IS_INTERRUPT (func_type) && (frame_pointer_needed || TARGET_THUMB))
2117
    return 0;
2118
 
2119
  offsets = arm_get_frame_offsets ();
2120
  stack_adjust = offsets->outgoing_args - offsets->saved_regs;
2121
 
2122
  /* As do variadic functions.  */
2123
  if (crtl->args.pretend_args_size
2124
      || cfun->machine->uses_anonymous_args
2125
      /* Or if the function calls __builtin_eh_return () */
2126
      || crtl->calls_eh_return
2127
      /* Or if the function calls alloca */
2128
      || cfun->calls_alloca
2129
      /* Or if there is a stack adjustment.  However, if the stack pointer
2130
         is saved on the stack, we can use a pre-incrementing stack load.  */
2131
      || !(stack_adjust == 0 || (TARGET_APCS_FRAME && frame_pointer_needed
2132
                                 && stack_adjust == 4)))
2133
    return 0;
2134
 
2135
  saved_int_regs = offsets->saved_regs_mask;
2136
 
2137
  /* Unfortunately, the insn
2138
 
2139
       ldmib sp, {..., sp, ...}
2140
 
2141
     triggers a bug on most SA-110 based devices, such that the stack
2142
     pointer won't be correctly restored if the instruction takes a
2143
     page fault.  We work around this problem by popping r3 along with
2144
     the other registers, since that is never slower than executing
2145
     another instruction.
2146
 
2147
     We test for !arm_arch5 here, because code for any architecture
2148
     less than this could potentially be run on one of the buggy
2149
     chips.  */
2150
  if (stack_adjust == 4 && !arm_arch5 && TARGET_ARM)
2151
    {
2152
      /* Validate that r3 is a call-clobbered register (always true in
2153
         the default abi) ...  */
2154
      if (!call_used_regs[3])
2155
        return 0;
2156
 
2157
      /* ... that it isn't being used for a return value ... */
2158
      if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
2159
        return 0;
2160
 
2161
      /* ... or for a tail-call argument ...  */
2162
      if (sibling)
2163
        {
2164
          gcc_assert (GET_CODE (sibling) == CALL_INSN);
2165
 
2166
          if (find_regno_fusage (sibling, USE, 3))
2167
            return 0;
2168
        }
2169
 
2170
      /* ... and that there are no call-saved registers in r0-r2
2171
         (always true in the default ABI).  */
2172
      if (saved_int_regs & 0x7)
2173
        return 0;
2174
    }
2175
 
2176
  /* Can't be done if interworking with Thumb, and any registers have been
2177
     stacked.  */
2178
  if (TARGET_INTERWORK && saved_int_regs != 0 && !IS_INTERRUPT(func_type))
2179
    return 0;
2180
 
2181
  /* On StrongARM, conditional returns are expensive if they aren't
2182
     taken and multiple registers have been stacked.  */
2183
  if (iscond && arm_tune_strongarm)
2184
    {
2185
      /* Conditional return when just the LR is stored is a simple
2186
         conditional-load instruction, that's not expensive.  */
2187
      if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
2188
        return 0;
2189
 
2190
      if (flag_pic
2191
          && arm_pic_register != INVALID_REGNUM
2192
          && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
2193
        return 0;
2194
    }
2195
 
2196
  /* If there are saved registers but the LR isn't saved, then we need
2197
     two instructions for the return.  */
2198
  if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
2199
    return 0;
2200
 
2201
  /* Can't be done if any of the FPA regs are pushed,
2202
     since this also requires an insn.  */
2203
  if (TARGET_HARD_FLOAT && TARGET_FPA)
2204
    for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
2205
      if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
2206
        return 0;
2207
 
2208
  /* Likewise VFP regs.  */
2209
  if (TARGET_HARD_FLOAT && TARGET_VFP)
2210
    for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
2211
      if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
2212
        return 0;
2213
 
2214
  if (TARGET_REALLY_IWMMXT)
2215
    for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
2216
      if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2217
        return 0;
2218
 
2219
  return 1;
2220
}
2221
 
2222
/* Return TRUE if int I is a valid immediate ARM constant.  */
2223
 
2224
int
2225
const_ok_for_arm (HOST_WIDE_INT i)
2226
{
2227
  int lowbit;
2228
 
2229
  /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
2230
     be all zero, or all one.  */
2231
  if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
2232
      && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
2233
          != ((~(unsigned HOST_WIDE_INT) 0)
2234
              & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
2235
    return FALSE;
2236
 
2237
  i &= (unsigned HOST_WIDE_INT) 0xffffffff;
2238
 
2239
  /* Fast return for 0 and small values.  We must do this for zero, since
2240
     the code below can't handle that one case.  */
2241
  if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
2242
    return TRUE;
2243
 
2244
  /* Get the number of trailing zeros.  */
2245
  lowbit = ffs((int) i) - 1;
2246
 
2247
  /* Only even shifts are allowed in ARM mode so round down to the
2248
     nearest even number.  */
2249
  if (TARGET_ARM)
2250
    lowbit &= ~1;
2251
 
2252
  if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
2253
    return TRUE;
2254
 
2255
  if (TARGET_ARM)
2256
    {
2257
      /* Allow rotated constants in ARM mode.  */
2258
      if (lowbit <= 4
2259
           && ((i & ~0xc000003f) == 0
2260
               || (i & ~0xf000000f) == 0
2261
               || (i & ~0xfc000003) == 0))
2262
        return TRUE;
2263
    }
2264
  else
2265
    {
2266
      HOST_WIDE_INT v;
2267
 
2268
      /* Allow repeated pattern.  */
2269
      v = i & 0xff;
2270
      v |= v << 16;
2271
      if (i == v || i == (v | (v << 8)))
2272
        return TRUE;
2273
    }
2274
 
2275
  return FALSE;
2276
}
2277
 
2278
/* Return true if I is a valid constant for the operation CODE.  */
2279
static int
2280
const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
2281
{
2282
  if (const_ok_for_arm (i))
2283
    return 1;
2284
 
2285
  switch (code)
2286
    {
2287
    case PLUS:
2288
    case COMPARE:
2289
    case EQ:
2290
    case NE:
2291
    case GT:
2292
    case LE:
2293
    case LT:
2294
    case GE:
2295
    case GEU:
2296
    case LTU:
2297
    case GTU:
2298
    case LEU:
2299
    case UNORDERED:
2300
    case ORDERED:
2301
    case UNEQ:
2302
    case UNGE:
2303
    case UNLT:
2304
    case UNGT:
2305
    case UNLE:
2306
      return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
2307
 
2308
    case MINUS:         /* Should only occur with (MINUS I reg) => rsb */
2309
    case XOR:
2310
      return 0;
2311
 
2312
    case IOR:
2313
      if (TARGET_THUMB2)
2314
        return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
2315
      return 0;
2316
 
2317
    case AND:
2318
      return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
2319
 
2320
    default:
2321
      gcc_unreachable ();
2322
    }
2323
}
2324
 
2325
/* Emit a sequence of insns to handle a large constant.
2326
   CODE is the code of the operation required, it can be any of SET, PLUS,
2327
   IOR, AND, XOR, MINUS;
2328
   MODE is the mode in which the operation is being performed;
2329
   VAL is the integer to operate on;
2330
   SOURCE is the other operand (a register, or a null-pointer for SET);
2331
   SUBTARGETS means it is safe to create scratch registers if that will
2332
   either produce a simpler sequence, or we will want to cse the values.
2333
   Return value is the number of insns emitted.  */
2334
 
2335
/* ??? Tweak this for thumb2.  */
2336
int
2337
arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
2338
                    HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
2339
{
2340
  rtx cond;
2341
 
2342
  if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
2343
    cond = COND_EXEC_TEST (PATTERN (insn));
2344
  else
2345
    cond = NULL_RTX;
2346
 
2347
  if (subtargets || code == SET
2348
      || (GET_CODE (target) == REG && GET_CODE (source) == REG
2349
          && REGNO (target) != REGNO (source)))
2350
    {
2351
      /* After arm_reorg has been called, we can't fix up expensive
2352
         constants by pushing them into memory so we must synthesize
2353
         them in-line, regardless of the cost.  This is only likely to
2354
         be more costly on chips that have load delay slots and we are
2355
         compiling without running the scheduler (so no splitting
2356
         occurred before the final instruction emission).
2357
 
2358
         Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
2359
      */
2360
      if (!after_arm_reorg
2361
          && !cond
2362
          && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
2363
                                1, 0)
2364
              > arm_constant_limit + (code != SET)))
2365
        {
2366
          if (code == SET)
2367
            {
2368
              /* Currently SET is the only monadic value for CODE, all
2369
                 the rest are diadic.  */
2370
              if (TARGET_USE_MOVT)
2371
                arm_emit_movpair (target, GEN_INT (val));
2372
              else
2373
                emit_set_insn (target, GEN_INT (val));
2374
 
2375
              return 1;
2376
            }
2377
          else
2378
            {
2379
              rtx temp = subtargets ? gen_reg_rtx (mode) : target;
2380
 
2381
              if (TARGET_USE_MOVT)
2382
                arm_emit_movpair (temp, GEN_INT (val));
2383
              else
2384
                emit_set_insn (temp, GEN_INT (val));
2385
 
2386
              /* For MINUS, the value is subtracted from, since we never
2387
                 have subtraction of a constant.  */
2388
              if (code == MINUS)
2389
                emit_set_insn (target, gen_rtx_MINUS (mode, temp, source));
2390
              else
2391
                emit_set_insn (target,
2392
                               gen_rtx_fmt_ee (code, mode, source, temp));
2393
              return 2;
2394
            }
2395
        }
2396
    }
2397
 
2398
  return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
2399
                           1);
2400
}
2401
 
2402
/* Return the number of instructions required to synthesize the given
2403
   constant, if we start emitting them from bit-position I.  */
2404
static int
2405
count_insns_for_constant (HOST_WIDE_INT remainder, int i)
2406
{
2407
  HOST_WIDE_INT temp1;
2408
  int step_size = TARGET_ARM ? 2 : 1;
2409
  int num_insns = 0;
2410
 
2411
  gcc_assert (TARGET_ARM || i == 0);
2412
 
2413
  do
2414
    {
2415
      int end;
2416
 
2417
      if (i <= 0)
2418
        i += 32;
2419
      if (remainder & (((1 << step_size) - 1) << (i - step_size)))
2420
        {
2421
          end = i - 8;
2422
          if (end < 0)
2423
            end += 32;
2424
          temp1 = remainder & ((0x0ff << end)
2425
                                    | ((i < end) ? (0xff >> (32 - end)) : 0));
2426
          remainder &= ~temp1;
2427
          num_insns++;
2428
          i -= 8 - step_size;
2429
        }
2430
      i -= step_size;
2431
    } while (remainder);
2432
  return num_insns;
2433
}
2434
 
2435
static int
2436
find_best_start (unsigned HOST_WIDE_INT remainder)
2437
{
2438
  int best_consecutive_zeros = 0;
2439
  int i;
2440
  int best_start = 0;
2441
 
2442
  /* If we aren't targetting ARM, the best place to start is always at
2443
     the bottom.  */
2444
  if (! TARGET_ARM)
2445
    return 0;
2446
 
2447
  for (i = 0; i < 32; i += 2)
2448
    {
2449
      int consecutive_zeros = 0;
2450
 
2451
      if (!(remainder & (3 << i)))
2452
        {
2453
          while ((i < 32) && !(remainder & (3 << i)))
2454
            {
2455
              consecutive_zeros += 2;
2456
              i += 2;
2457
            }
2458
          if (consecutive_zeros > best_consecutive_zeros)
2459
            {
2460
              best_consecutive_zeros = consecutive_zeros;
2461
              best_start = i - consecutive_zeros;
2462
            }
2463
          i -= 2;
2464
        }
2465
    }
2466
 
2467
  /* So long as it won't require any more insns to do so, it's
2468
     desirable to emit a small constant (in bits 0...9) in the last
2469
     insn.  This way there is more chance that it can be combined with
2470
     a later addressing insn to form a pre-indexed load or store
2471
     operation.  Consider:
2472
 
2473
           *((volatile int *)0xe0000100) = 1;
2474
           *((volatile int *)0xe0000110) = 2;
2475
 
2476
     We want this to wind up as:
2477
 
2478
            mov rA, #0xe0000000
2479
            mov rB, #1
2480
            str rB, [rA, #0x100]
2481
            mov rB, #2
2482
            str rB, [rA, #0x110]
2483
 
2484
     rather than having to synthesize both large constants from scratch.
2485
 
2486
     Therefore, we calculate how many insns would be required to emit
2487
     the constant starting from `best_start', and also starting from
2488
     zero (i.e. with bit 31 first to be output).  If `best_start' doesn't
2489
     yield a shorter sequence, we may as well use zero.  */
2490
  if (best_start != 0
2491
      && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2492
      && (count_insns_for_constant (remainder, 0) <=
2493
          count_insns_for_constant (remainder, best_start)))
2494
    best_start = 0;
2495
 
2496
  return best_start;
2497
}
2498
 
2499
/* Emit an instruction with the indicated PATTERN.  If COND is
2500
   non-NULL, conditionalize the execution of the instruction on COND
2501
   being true.  */
2502
 
2503
static void
2504
emit_constant_insn (rtx cond, rtx pattern)
2505
{
2506
  if (cond)
2507
    pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
2508
  emit_insn (pattern);
2509
}
2510
 
2511
/* As above, but extra parameter GENERATE which, if clear, suppresses
2512
   RTL generation.  */
2513
/* ??? This needs more work for thumb2.  */
2514
 
2515
static int
2516
arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
2517
                  HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
2518
                  int generate)
2519
{
2520
  int can_invert = 0;
2521
  int can_negate = 0;
2522
  int final_invert = 0;
2523
  int can_negate_initial = 0;
2524
  int can_shift = 0;
2525
  int i;
2526
  int num_bits_set = 0;
2527
  int set_sign_bit_copies = 0;
2528
  int clear_sign_bit_copies = 0;
2529
  int clear_zero_bit_copies = 0;
2530
  int set_zero_bit_copies = 0;
2531
  int insns = 0;
2532
  unsigned HOST_WIDE_INT temp1, temp2;
2533
  unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
2534
  int step_size = TARGET_ARM ? 2 : 1;
2535
 
2536
  /* Find out which operations are safe for a given CODE.  Also do a quick
2537
     check for degenerate cases; these can occur when DImode operations
2538
     are split.  */
2539
  switch (code)
2540
    {
2541
    case SET:
2542
      can_invert = 1;
2543
      can_shift = 1;
2544
      can_negate = 1;
2545
      break;
2546
 
2547
    case PLUS:
2548
      can_negate = 1;
2549
      can_negate_initial = 1;
2550
      break;
2551
 
2552
    case IOR:
2553
      if (remainder == 0xffffffff)
2554
        {
2555
          if (generate)
2556
            emit_constant_insn (cond,
2557
                                gen_rtx_SET (VOIDmode, target,
2558
                                             GEN_INT (ARM_SIGN_EXTEND (val))));
2559
          return 1;
2560
        }
2561
 
2562
      if (remainder == 0)
2563
        {
2564
          if (reload_completed && rtx_equal_p (target, source))
2565
            return 0;
2566
 
2567
          if (generate)
2568
            emit_constant_insn (cond,
2569
                                gen_rtx_SET (VOIDmode, target, source));
2570
          return 1;
2571
        }
2572
 
2573
      if (TARGET_THUMB2)
2574
        can_invert = 1;
2575
      break;
2576
 
2577
    case AND:
2578
      if (remainder == 0)
2579
        {
2580
          if (generate)
2581
            emit_constant_insn (cond,
2582
                                gen_rtx_SET (VOIDmode, target, const0_rtx));
2583
          return 1;
2584
        }
2585
      if (remainder == 0xffffffff)
2586
        {
2587
          if (reload_completed && rtx_equal_p (target, source))
2588
            return 0;
2589
          if (generate)
2590
            emit_constant_insn (cond,
2591
                                gen_rtx_SET (VOIDmode, target, source));
2592
          return 1;
2593
        }
2594
      can_invert = 1;
2595
      break;
2596
 
2597
    case XOR:
2598
      if (remainder == 0)
2599
        {
2600
          if (reload_completed && rtx_equal_p (target, source))
2601
            return 0;
2602
          if (generate)
2603
            emit_constant_insn (cond,
2604
                                gen_rtx_SET (VOIDmode, target, source));
2605
          return 1;
2606
        }
2607
 
2608
      if (remainder == 0xffffffff)
2609
        {
2610
          if (generate)
2611
            emit_constant_insn (cond,
2612
                                gen_rtx_SET (VOIDmode, target,
2613
                                             gen_rtx_NOT (mode, source)));
2614
          return 1;
2615
        }
2616
      break;
2617
 
2618
    case MINUS:
2619
      /* We treat MINUS as (val - source), since (source - val) is always
2620
         passed as (source + (-val)).  */
2621
      if (remainder == 0)
2622
        {
2623
          if (generate)
2624
            emit_constant_insn (cond,
2625
                                gen_rtx_SET (VOIDmode, target,
2626
                                             gen_rtx_NEG (mode, source)));
2627
          return 1;
2628
        }
2629
      if (const_ok_for_arm (val))
2630
        {
2631
          if (generate)
2632
            emit_constant_insn (cond,
2633
                                gen_rtx_SET (VOIDmode, target,
2634
                                             gen_rtx_MINUS (mode, GEN_INT (val),
2635
                                                            source)));
2636
          return 1;
2637
        }
2638
      can_negate = 1;
2639
 
2640
      break;
2641
 
2642
    default:
2643
      gcc_unreachable ();
2644
    }
2645
 
2646
  /* If we can do it in one insn get out quickly.  */
2647
  if (const_ok_for_arm (val)
2648
      || (can_negate_initial && const_ok_for_arm (-val))
2649
      || (can_invert && const_ok_for_arm (~val)))
2650
    {
2651
      if (generate)
2652
        emit_constant_insn (cond,
2653
                            gen_rtx_SET (VOIDmode, target,
2654
                                         (source
2655
                                          ? gen_rtx_fmt_ee (code, mode, source,
2656
                                                            GEN_INT (val))
2657
                                          : GEN_INT (val))));
2658
      return 1;
2659
    }
2660
 
2661
  /* Calculate a few attributes that may be useful for specific
2662
     optimizations.  */
2663
  /* Count number of leading zeros.  */
2664
  for (i = 31; i >= 0; i--)
2665
    {
2666
      if ((remainder & (1 << i)) == 0)
2667
        clear_sign_bit_copies++;
2668
      else
2669
        break;
2670
    }
2671
 
2672
  /* Count number of leading 1's.  */
2673
  for (i = 31; i >= 0; i--)
2674
    {
2675
      if ((remainder & (1 << i)) != 0)
2676
        set_sign_bit_copies++;
2677
      else
2678
        break;
2679
    }
2680
 
2681
  /* Count number of trailing zero's.  */
2682
  for (i = 0; i <= 31; i++)
2683
    {
2684
      if ((remainder & (1 << i)) == 0)
2685
        clear_zero_bit_copies++;
2686
      else
2687
        break;
2688
    }
2689
 
2690
  /* Count number of trailing 1's.  */
2691
  for (i = 0; i <= 31; i++)
2692
    {
2693
      if ((remainder & (1 << i)) != 0)
2694
        set_zero_bit_copies++;
2695
      else
2696
        break;
2697
    }
2698
 
2699
  switch (code)
2700
    {
2701
    case SET:
2702
      /* See if we can use movw.  */
2703
      if (arm_arch_thumb2 && (remainder & 0xffff0000) == 0)
2704
        {
2705
          if (generate)
2706
            emit_constant_insn (cond, gen_rtx_SET (VOIDmode, target,
2707
                                                   GEN_INT (val)));
2708
          return 1;
2709
        }
2710
 
2711
      /* See if we can do this by sign_extending a constant that is known
2712
         to be negative.  This is a good, way of doing it, since the shift
2713
         may well merge into a subsequent insn.  */
2714
      if (set_sign_bit_copies > 1)
2715
        {
2716
          if (const_ok_for_arm
2717
              (temp1 = ARM_SIGN_EXTEND (remainder
2718
                                        << (set_sign_bit_copies - 1))))
2719
            {
2720
              if (generate)
2721
                {
2722
                  rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2723
                  emit_constant_insn (cond,
2724
                                      gen_rtx_SET (VOIDmode, new_src,
2725
                                                   GEN_INT (temp1)));
2726
                  emit_constant_insn (cond,
2727
                                      gen_ashrsi3 (target, new_src,
2728
                                                   GEN_INT (set_sign_bit_copies - 1)));
2729
                }
2730
              return 2;
2731
            }
2732
          /* For an inverted constant, we will need to set the low bits,
2733
             these will be shifted out of harm's way.  */
2734
          temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
2735
          if (const_ok_for_arm (~temp1))
2736
            {
2737
              if (generate)
2738
                {
2739
                  rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2740
                  emit_constant_insn (cond,
2741
                                      gen_rtx_SET (VOIDmode, new_src,
2742
                                                   GEN_INT (temp1)));
2743
                  emit_constant_insn (cond,
2744
                                      gen_ashrsi3 (target, new_src,
2745
                                                   GEN_INT (set_sign_bit_copies - 1)));
2746
                }
2747
              return 2;
2748
            }
2749
        }
2750
 
2751
      /* See if we can calculate the value as the difference between two
2752
         valid immediates.  */
2753
      if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
2754
        {
2755
          int topshift = clear_sign_bit_copies & ~1;
2756
 
2757
          temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
2758
                                   & (0xff000000 >> topshift));
2759
 
2760
          /* If temp1 is zero, then that means the 9 most significant
2761
             bits of remainder were 1 and we've caused it to overflow.
2762
             When topshift is 0 we don't need to do anything since we
2763
             can borrow from 'bit 32'.  */
2764
          if (temp1 == 0 && topshift != 0)
2765
            temp1 = 0x80000000 >> (topshift - 1);
2766
 
2767
          temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
2768
 
2769
          if (const_ok_for_arm (temp2))
2770
            {
2771
              if (generate)
2772
                {
2773
                  rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2774
                  emit_constant_insn (cond,
2775
                                      gen_rtx_SET (VOIDmode, new_src,
2776
                                                   GEN_INT (temp1)));
2777
                  emit_constant_insn (cond,
2778
                                      gen_addsi3 (target, new_src,
2779
                                                  GEN_INT (-temp2)));
2780
                }
2781
 
2782
              return 2;
2783
            }
2784
        }
2785
 
2786
      /* See if we can generate this by setting the bottom (or the top)
2787
         16 bits, and then shifting these into the other half of the
2788
         word.  We only look for the simplest cases, to do more would cost
2789
         too much.  Be careful, however, not to generate this when the
2790
         alternative would take fewer insns.  */
2791
      if (val & 0xffff0000)
2792
        {
2793
          temp1 = remainder & 0xffff0000;
2794
          temp2 = remainder & 0x0000ffff;
2795
 
2796
          /* Overlaps outside this range are best done using other methods.  */
2797
          for (i = 9; i < 24; i++)
2798
            {
2799
              if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
2800
                  && !const_ok_for_arm (temp2))
2801
                {
2802
                  rtx new_src = (subtargets
2803
                                 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2804
                                 : target);
2805
                  insns = arm_gen_constant (code, mode, cond, temp2, new_src,
2806
                                            source, subtargets, generate);
2807
                  source = new_src;
2808
                  if (generate)
2809
                    emit_constant_insn
2810
                      (cond,
2811
                       gen_rtx_SET
2812
                       (VOIDmode, target,
2813
                        gen_rtx_IOR (mode,
2814
                                     gen_rtx_ASHIFT (mode, source,
2815
                                                     GEN_INT (i)),
2816
                                     source)));
2817
                  return insns + 1;
2818
                }
2819
            }
2820
 
2821
          /* Don't duplicate cases already considered.  */
2822
          for (i = 17; i < 24; i++)
2823
            {
2824
              if (((temp1 | (temp1 >> i)) == remainder)
2825
                  && !const_ok_for_arm (temp1))
2826
                {
2827
                  rtx new_src = (subtargets
2828
                                 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2829
                                 : target);
2830
                  insns = arm_gen_constant (code, mode, cond, temp1, new_src,
2831
                                            source, subtargets, generate);
2832
                  source = new_src;
2833
                  if (generate)
2834
                    emit_constant_insn
2835
                      (cond,
2836
                       gen_rtx_SET (VOIDmode, target,
2837
                                    gen_rtx_IOR
2838
                                    (mode,
2839
                                     gen_rtx_LSHIFTRT (mode, source,
2840
                                                       GEN_INT (i)),
2841
                                     source)));
2842
                  return insns + 1;
2843
                }
2844
            }
2845
        }
2846
      break;
2847
 
2848
    case IOR:
2849
    case XOR:
2850
      /* If we have IOR or XOR, and the constant can be loaded in a
2851
         single instruction, and we can find a temporary to put it in,
2852
         then this can be done in two instructions instead of 3-4.  */
2853
      if (subtargets
2854
          /* TARGET can't be NULL if SUBTARGETS is 0 */
2855
          || (reload_completed && !reg_mentioned_p (target, source)))
2856
        {
2857
          if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2858
            {
2859
              if (generate)
2860
                {
2861
                  rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2862
 
2863
                  emit_constant_insn (cond,
2864
                                      gen_rtx_SET (VOIDmode, sub,
2865
                                                   GEN_INT (val)));
2866
                  emit_constant_insn (cond,
2867
                                      gen_rtx_SET (VOIDmode, target,
2868
                                                   gen_rtx_fmt_ee (code, mode,
2869
                                                                   source, sub)));
2870
                }
2871
              return 2;
2872
            }
2873
        }
2874
 
2875
      if (code == XOR)
2876
        break;
2877
 
2878
      /*  Convert.
2879
          x = y | constant ( which is composed of set_sign_bit_copies of leading 1s
2880
                             and the remainder 0s for e.g. 0xfff00000)
2881
          x = ~(~(y ashift set_sign_bit_copies) lshiftrt set_sign_bit_copies)
2882
 
2883
          This can be done in 2 instructions by using shifts with mov or mvn.
2884
          e.g. for
2885
          x = x | 0xfff00000;
2886
          we generate.
2887
          mvn   r0, r0, asl #12
2888
          mvn   r0, r0, lsr #12  */
2889
      if (set_sign_bit_copies > 8
2890
          && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2891
        {
2892
          if (generate)
2893
            {
2894
              rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2895
              rtx shift = GEN_INT (set_sign_bit_copies);
2896
 
2897
              emit_constant_insn
2898
                (cond,
2899
                 gen_rtx_SET (VOIDmode, sub,
2900
                              gen_rtx_NOT (mode,
2901
                                           gen_rtx_ASHIFT (mode,
2902
                                                           source,
2903
                                                           shift))));
2904
              emit_constant_insn
2905
                (cond,
2906
                 gen_rtx_SET (VOIDmode, target,
2907
                              gen_rtx_NOT (mode,
2908
                                           gen_rtx_LSHIFTRT (mode, sub,
2909
                                                             shift))));
2910
            }
2911
          return 2;
2912
        }
2913
 
2914
      /* Convert
2915
          x = y | constant (which has set_zero_bit_copies number of trailing ones).
2916
           to
2917
          x = ~((~y lshiftrt set_zero_bit_copies) ashift set_zero_bit_copies).
2918
 
2919
          For eg. r0 = r0 | 0xfff
2920
               mvn      r0, r0, lsr #12
2921
               mvn      r0, r0, asl #12
2922
 
2923
      */
2924
      if (set_zero_bit_copies > 8
2925
          && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2926
        {
2927
          if (generate)
2928
            {
2929
              rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2930
              rtx shift = GEN_INT (set_zero_bit_copies);
2931
 
2932
              emit_constant_insn
2933
                (cond,
2934
                 gen_rtx_SET (VOIDmode, sub,
2935
                              gen_rtx_NOT (mode,
2936
                                           gen_rtx_LSHIFTRT (mode,
2937
                                                             source,
2938
                                                             shift))));
2939
              emit_constant_insn
2940
                (cond,
2941
                 gen_rtx_SET (VOIDmode, target,
2942
                              gen_rtx_NOT (mode,
2943
                                           gen_rtx_ASHIFT (mode, sub,
2944
                                                           shift))));
2945
            }
2946
          return 2;
2947
        }
2948
 
2949
      /* This will never be reached for Thumb2 because orn is a valid
2950
         instruction. This is for Thumb1 and the ARM 32 bit cases.
2951
 
2952
         x = y | constant (such that ~constant is a valid constant)
2953
         Transform this to
2954
         x = ~(~y & ~constant).
2955
      */
2956
      if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2957
        {
2958
          if (generate)
2959
            {
2960
              rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2961
              emit_constant_insn (cond,
2962
                                  gen_rtx_SET (VOIDmode, sub,
2963
                                               gen_rtx_NOT (mode, source)));
2964
              source = sub;
2965
              if (subtargets)
2966
                sub = gen_reg_rtx (mode);
2967
              emit_constant_insn (cond,
2968
                                  gen_rtx_SET (VOIDmode, sub,
2969
                                               gen_rtx_AND (mode, source,
2970
                                                            GEN_INT (temp1))));
2971
              emit_constant_insn (cond,
2972
                                  gen_rtx_SET (VOIDmode, target,
2973
                                               gen_rtx_NOT (mode, sub)));
2974
            }
2975
          return 3;
2976
        }
2977
      break;
2978
 
2979
    case AND:
2980
      /* See if two shifts will do 2 or more insn's worth of work.  */
2981
      if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2982
        {
2983
          HOST_WIDE_INT shift_mask = ((0xffffffff
2984
                                       << (32 - clear_sign_bit_copies))
2985
                                      & 0xffffffff);
2986
 
2987
          if ((remainder | shift_mask) != 0xffffffff)
2988
            {
2989
              if (generate)
2990
                {
2991
                  rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2992
                  insns = arm_gen_constant (AND, mode, cond,
2993
                                            remainder | shift_mask,
2994
                                            new_src, source, subtargets, 1);
2995
                  source = new_src;
2996
                }
2997
              else
2998
                {
2999
                  rtx targ = subtargets ? NULL_RTX : target;
3000
                  insns = arm_gen_constant (AND, mode, cond,
3001
                                            remainder | shift_mask,
3002
                                            targ, source, subtargets, 0);
3003
                }
3004
            }
3005
 
3006
          if (generate)
3007
            {
3008
              rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
3009
              rtx shift = GEN_INT (clear_sign_bit_copies);
3010
 
3011
              emit_insn (gen_ashlsi3 (new_src, source, shift));
3012
              emit_insn (gen_lshrsi3 (target, new_src, shift));
3013
            }
3014
 
3015
          return insns + 2;
3016
        }
3017
 
3018
      if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
3019
        {
3020
          HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
3021
 
3022
          if ((remainder | shift_mask) != 0xffffffff)
3023
            {
3024
              if (generate)
3025
                {
3026
                  rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
3027
 
3028
                  insns = arm_gen_constant (AND, mode, cond,
3029
                                            remainder | shift_mask,
3030
                                            new_src, source, subtargets, 1);
3031
                  source = new_src;
3032
                }
3033
              else
3034
                {
3035
                  rtx targ = subtargets ? NULL_RTX : target;
3036
 
3037
                  insns = arm_gen_constant (AND, mode, cond,
3038
                                            remainder | shift_mask,
3039
                                            targ, source, subtargets, 0);
3040
                }
3041
            }
3042
 
3043
          if (generate)
3044
            {
3045
              rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
3046
              rtx shift = GEN_INT (clear_zero_bit_copies);
3047
 
3048
              emit_insn (gen_lshrsi3 (new_src, source, shift));
3049
              emit_insn (gen_ashlsi3 (target, new_src, shift));
3050
            }
3051
 
3052
          return insns + 2;
3053
        }
3054
 
3055
      break;
3056
 
3057
    default:
3058
      break;
3059
    }
3060
 
3061
  for (i = 0; i < 32; i++)
3062
    if (remainder & (1 << i))
3063
      num_bits_set++;
3064
 
3065
  if ((code == AND)
3066
      || (code != IOR && can_invert && num_bits_set > 16))
3067
    remainder ^= 0xffffffff;
3068
  else if (code == PLUS && num_bits_set > 16)
3069
    remainder = (-remainder) & 0xffffffff;
3070
 
3071
  /* For XOR, if more than half the bits are set and there's a sequence
3072
     of more than 8 consecutive ones in the pattern then we can XOR by the
3073
     inverted constant and then invert the final result; this may save an
3074
     instruction and might also lead to the final mvn being merged with
3075
     some other operation.  */
3076
  else if (code == XOR && num_bits_set > 16
3077
           && (count_insns_for_constant (remainder ^ 0xffffffff,
3078
                                         find_best_start
3079
                                         (remainder ^ 0xffffffff))
3080
               < count_insns_for_constant (remainder,
3081
                                           find_best_start (remainder))))
3082
    {
3083
      remainder ^= 0xffffffff;
3084
      final_invert = 1;
3085
    }
3086
  else
3087
    {
3088
      can_invert = 0;
3089
      can_negate = 0;
3090
    }
3091
 
3092
  /* Now try and find a way of doing the job in either two or three
3093
     instructions.
3094
     We start by looking for the largest block of zeros that are aligned on
3095
     a 2-bit boundary, we then fill up the temps, wrapping around to the
3096
     top of the word when we drop off the bottom.
3097
     In the worst case this code should produce no more than four insns.
3098
     Thumb-2 constants are shifted, not rotated, so the MSB is always the
3099
     best place to start.  */
3100
 
3101
  /* ??? Use thumb2 replicated constants when the high and low halfwords are
3102
     the same.  */
3103
  {
3104
    /* Now start emitting the insns.  */
3105
    i = find_best_start (remainder);
3106
    do
3107
      {
3108
        int end;
3109
 
3110
        if (i <= 0)
3111
          i += 32;
3112
        if (remainder & (3 << (i - 2)))
3113
          {
3114
            end = i - 8;
3115
            if (end < 0)
3116
              end += 32;
3117
            temp1 = remainder & ((0x0ff << end)
3118
                                 | ((i < end) ? (0xff >> (32 - end)) : 0));
3119
            remainder &= ~temp1;
3120
 
3121
            if (generate)
3122
              {
3123
                rtx new_src, temp1_rtx;
3124
 
3125
                if (code == SET || code == MINUS)
3126
                  {
3127
                    new_src = (subtargets ? gen_reg_rtx (mode) : target);
3128
                    if (can_invert && code != MINUS)
3129
                      temp1 = ~temp1;
3130
                  }
3131
                else
3132
                  {
3133
                    if ((final_invert || remainder) && subtargets)
3134
                      new_src = gen_reg_rtx (mode);
3135
                    else
3136
                      new_src = target;
3137
                    if (can_invert)
3138
                      temp1 = ~temp1;
3139
                    else if (can_negate)
3140
                      temp1 = -temp1;
3141
                  }
3142
 
3143
                temp1 = trunc_int_for_mode (temp1, mode);
3144
                temp1_rtx = GEN_INT (temp1);
3145
 
3146
                if (code == SET)
3147
                  ;
3148
                else if (code == MINUS)
3149
                  temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
3150
                else
3151
                  temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
3152
 
3153
                emit_constant_insn (cond,
3154
                                    gen_rtx_SET (VOIDmode, new_src,
3155
                                                 temp1_rtx));
3156
                source = new_src;
3157
              }
3158
 
3159
            if (code == SET)
3160
              {
3161
                can_invert = 0;
3162
                code = PLUS;
3163
              }
3164
            else if (code == MINUS)
3165
              code = PLUS;
3166
 
3167
            insns++;
3168
            i -= 8 - step_size;
3169
          }
3170
        /* Arm allows rotates by a multiple of two. Thumb-2 allows arbitrary
3171
           shifts.  */
3172
        i -= step_size;
3173
      }
3174
    while (remainder);
3175
  }
3176
 
3177
  if (final_invert)
3178
    {
3179
      if (generate)
3180
        emit_constant_insn (cond, gen_rtx_SET (VOIDmode, target,
3181
                                               gen_rtx_NOT (mode, source)));
3182
      insns++;
3183
    }
3184
 
3185
  return insns;
3186
}
3187
 
3188
/* Canonicalize a comparison so that we are more likely to recognize it.
3189
   This can be done for a few constant compares, where we can make the
3190
   immediate value easier to load.  */
3191
 
3192
enum rtx_code
3193
arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
3194
                             rtx * op1)
3195
{
3196
  unsigned HOST_WIDE_INT i = INTVAL (*op1);
3197
  unsigned HOST_WIDE_INT maxval;
3198
  maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
3199
 
3200
  switch (code)
3201
    {
3202
    case EQ:
3203
    case NE:
3204
      return code;
3205
 
3206
    case GT:
3207
    case LE:
3208
      if (i != maxval
3209
          && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
3210
        {
3211
          *op1 = GEN_INT (i + 1);
3212
          return code == GT ? GE : LT;
3213
        }
3214
      break;
3215
 
3216
    case GE:
3217
    case LT:
3218
      if (i != ~maxval
3219
          && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
3220
        {
3221
          *op1 = GEN_INT (i - 1);
3222
          return code == GE ? GT : LE;
3223
        }
3224
      break;
3225
 
3226
    case GTU:
3227
    case LEU:
3228
      if (i != ~((unsigned HOST_WIDE_INT) 0)
3229
          && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
3230
        {
3231
          *op1 = GEN_INT (i + 1);
3232
          return code == GTU ? GEU : LTU;
3233
        }
3234
      break;
3235
 
3236
    case GEU:
3237
    case LTU:
3238
      if (i != 0
3239
          && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
3240
        {
3241
          *op1 = GEN_INT (i - 1);
3242
          return code == GEU ? GTU : LEU;
3243
        }
3244
      break;
3245
 
3246
    default:
3247
      gcc_unreachable ();
3248
    }
3249
 
3250
  return code;
3251
}
3252
 
3253
 
3254
/* Define how to find the value returned by a function.  */
3255
 
3256
static rtx
3257
arm_function_value(const_tree type, const_tree func,
3258
                   bool outgoing ATTRIBUTE_UNUSED)
3259
{
3260
  enum machine_mode mode;
3261
  int unsignedp ATTRIBUTE_UNUSED;
3262
  rtx r ATTRIBUTE_UNUSED;
3263
 
3264
  mode = TYPE_MODE (type);
3265
 
3266
  if (TARGET_AAPCS_BASED)
3267
    return aapcs_allocate_return_reg (mode, type, func);
3268
 
3269
  /* Promote integer types.  */
3270
  if (INTEGRAL_TYPE_P (type))
3271
    mode = arm_promote_function_mode (type, mode, &unsignedp, func, 1);
3272
 
3273
  /* Promotes small structs returned in a register to full-word size
3274
     for big-endian AAPCS.  */
3275
  if (arm_return_in_msb (type))
3276
    {
3277
      HOST_WIDE_INT size = int_size_in_bytes (type);
3278
      if (size % UNITS_PER_WORD != 0)
3279
        {
3280
          size += UNITS_PER_WORD - size % UNITS_PER_WORD;
3281
          mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
3282
        }
3283
    }
3284
 
3285
  return LIBCALL_VALUE (mode);
3286
}
3287
 
3288
static int
3289
libcall_eq (const void *p1, const void *p2)
3290
{
3291
  return rtx_equal_p ((const_rtx) p1, (const_rtx) p2);
3292
}
3293
 
3294
static hashval_t
3295
libcall_hash (const void *p1)
3296
{
3297
  return hash_rtx ((const_rtx) p1, VOIDmode, NULL, NULL, FALSE);
3298
}
3299
 
3300
static void
3301
add_libcall (htab_t htab, rtx libcall)
3302
{
3303
  *htab_find_slot (htab, libcall, INSERT) = libcall;
3304
}
3305
 
3306
static bool
3307
arm_libcall_uses_aapcs_base (const_rtx libcall)
3308
{
3309
  static bool init_done = false;
3310
  static htab_t libcall_htab;
3311
 
3312
  if (!init_done)
3313
    {
3314
      init_done = true;
3315
 
3316
      libcall_htab = htab_create (31, libcall_hash, libcall_eq,
3317
                                  NULL);
3318
      add_libcall (libcall_htab,
3319
                   convert_optab_libfunc (sfloat_optab, SFmode, SImode));
3320
      add_libcall (libcall_htab,
3321
                   convert_optab_libfunc (sfloat_optab, DFmode, SImode));
3322
      add_libcall (libcall_htab,
3323
                   convert_optab_libfunc (sfloat_optab, SFmode, DImode));
3324
      add_libcall (libcall_htab,
3325
                   convert_optab_libfunc (sfloat_optab, DFmode, DImode));
3326
 
3327
      add_libcall (libcall_htab,
3328
                   convert_optab_libfunc (ufloat_optab, SFmode, SImode));
3329
      add_libcall (libcall_htab,
3330
                   convert_optab_libfunc (ufloat_optab, DFmode, SImode));
3331
      add_libcall (libcall_htab,
3332
                   convert_optab_libfunc (ufloat_optab, SFmode, DImode));
3333
      add_libcall (libcall_htab,
3334
                   convert_optab_libfunc (ufloat_optab, DFmode, DImode));
3335
 
3336
      add_libcall (libcall_htab,
3337
                   convert_optab_libfunc (sext_optab, SFmode, HFmode));
3338
      add_libcall (libcall_htab,
3339
                   convert_optab_libfunc (trunc_optab, HFmode, SFmode));
3340
      add_libcall (libcall_htab,
3341
                   convert_optab_libfunc (sfix_optab, DImode, DFmode));
3342
      add_libcall (libcall_htab,
3343
                   convert_optab_libfunc (ufix_optab, DImode, DFmode));
3344
      add_libcall (libcall_htab,
3345
                   convert_optab_libfunc (sfix_optab, DImode, SFmode));
3346
      add_libcall (libcall_htab,
3347
                   convert_optab_libfunc (ufix_optab, DImode, SFmode));
3348
    }
3349
 
3350
  return libcall && htab_find (libcall_htab, libcall) != NULL;
3351
}
3352
 
3353
rtx
3354
arm_libcall_value (enum machine_mode mode, const_rtx libcall)
3355
{
3356
  if (TARGET_AAPCS_BASED && arm_pcs_default != ARM_PCS_AAPCS
3357
      && GET_MODE_CLASS (mode) == MODE_FLOAT)
3358
    {
3359
      /* The following libcalls return their result in integer registers,
3360
         even though they return a floating point value.  */
3361
      if (arm_libcall_uses_aapcs_base (libcall))
3362
        return gen_rtx_REG (mode, ARG_REGISTER(1));
3363
 
3364
    }
3365
 
3366
  return LIBCALL_VALUE (mode);
3367
}
3368
 
3369
/* Determine the amount of memory needed to store the possible return
3370
   registers of an untyped call.  */
3371
int
3372
arm_apply_result_size (void)
3373
{
3374
  int size = 16;
3375
 
3376
  if (TARGET_32BIT)
3377
    {
3378
      if (TARGET_HARD_FLOAT_ABI)
3379
        {
3380
          if (TARGET_VFP)
3381
            size += 32;
3382
          if (TARGET_FPA)
3383
            size += 12;
3384
          if (TARGET_MAVERICK)
3385
            size += 8;
3386
        }
3387
      if (TARGET_IWMMXT_ABI)
3388
        size += 8;
3389
    }
3390
 
3391
  return size;
3392
}
3393
 
3394
/* Decide whether TYPE should be returned in memory (true)
3395
   or in a register (false).  FNTYPE is the type of the function making
3396
   the call.  */
3397
static bool
3398
arm_return_in_memory (const_tree type, const_tree fntype)
3399
{
3400
  HOST_WIDE_INT size;
3401
 
3402
  size = int_size_in_bytes (type);  /* Negative if not fixed size.  */
3403
 
3404
  if (TARGET_AAPCS_BASED)
3405
    {
3406
      /* Simple, non-aggregate types (ie not including vectors and
3407
         complex) are always returned in a register (or registers).
3408
         We don't care about which register here, so we can short-cut
3409
         some of the detail.  */
3410
      if (!AGGREGATE_TYPE_P (type)
3411
          && TREE_CODE (type) != VECTOR_TYPE
3412
          && TREE_CODE (type) != COMPLEX_TYPE)
3413
        return false;
3414
 
3415
      /* Any return value that is no larger than one word can be
3416
         returned in r0.  */
3417
      if (((unsigned HOST_WIDE_INT) size) <= UNITS_PER_WORD)
3418
        return false;
3419
 
3420
      /* Check any available co-processors to see if they accept the
3421
         type as a register candidate (VFP, for example, can return
3422
         some aggregates in consecutive registers).  These aren't
3423
         available if the call is variadic.  */
3424
      if (aapcs_select_return_coproc (type, fntype) >= 0)
3425
        return false;
3426
 
3427
      /* Vector values should be returned using ARM registers, not
3428
         memory (unless they're over 16 bytes, which will break since
3429
         we only have four call-clobbered registers to play with).  */
3430
      if (TREE_CODE (type) == VECTOR_TYPE)
3431
        return (size < 0 || size > (4 * UNITS_PER_WORD));
3432
 
3433
      /* The rest go in memory.  */
3434
      return true;
3435
    }
3436
 
3437
  if (TREE_CODE (type) == VECTOR_TYPE)
3438
    return (size < 0 || size > (4 * UNITS_PER_WORD));
3439
 
3440
  if (!AGGREGATE_TYPE_P (type) &&
3441
      (TREE_CODE (type) != VECTOR_TYPE))
3442
    /* All simple types are returned in registers.  */
3443
    return false;
3444
 
3445
  if (arm_abi != ARM_ABI_APCS)
3446
    {
3447
      /* ATPCS and later return aggregate types in memory only if they are
3448
         larger than a word (or are variable size).  */
3449
      return (size < 0 || size > UNITS_PER_WORD);
3450
    }
3451
 
3452
  /* For the arm-wince targets we choose to be compatible with Microsoft's
3453
     ARM and Thumb compilers, which always return aggregates in memory.  */
3454
#ifndef ARM_WINCE
3455
  /* All structures/unions bigger than one word are returned in memory.
3456
     Also catch the case where int_size_in_bytes returns -1.  In this case
3457
     the aggregate is either huge or of variable size, and in either case
3458
     we will want to return it via memory and not in a register.  */
3459
  if (size < 0 || size > UNITS_PER_WORD)
3460
    return true;
3461
 
3462
  if (TREE_CODE (type) == RECORD_TYPE)
3463
    {
3464
      tree field;
3465
 
3466
      /* For a struct the APCS says that we only return in a register
3467
         if the type is 'integer like' and every addressable element
3468
         has an offset of zero.  For practical purposes this means
3469
         that the structure can have at most one non bit-field element
3470
         and that this element must be the first one in the structure.  */
3471
 
3472
      /* Find the first field, ignoring non FIELD_DECL things which will
3473
         have been created by C++.  */
3474
      for (field = TYPE_FIELDS (type);
3475
           field && TREE_CODE (field) != FIELD_DECL;
3476
           field = TREE_CHAIN (field))
3477
        continue;
3478
 
3479
      if (field == NULL)
3480
        return false; /* An empty structure.  Allowed by an extension to ANSI C.  */
3481
 
3482
      /* Check that the first field is valid for returning in a register.  */
3483
 
3484
      /* ... Floats are not allowed */
3485
      if (FLOAT_TYPE_P (TREE_TYPE (field)))
3486
        return true;
3487
 
3488
      /* ... Aggregates that are not themselves valid for returning in
3489
         a register are not allowed.  */
3490
      if (arm_return_in_memory (TREE_TYPE (field), NULL_TREE))
3491
        return true;
3492
 
3493
      /* Now check the remaining fields, if any.  Only bitfields are allowed,
3494
         since they are not addressable.  */
3495
      for (field = TREE_CHAIN (field);
3496
           field;
3497
           field = TREE_CHAIN (field))
3498
        {
3499
          if (TREE_CODE (field) != FIELD_DECL)
3500
            continue;
3501
 
3502
          if (!DECL_BIT_FIELD_TYPE (field))
3503
            return true;
3504
        }
3505
 
3506
      return false;
3507
    }
3508
 
3509
  if (TREE_CODE (type) == UNION_TYPE)
3510
    {
3511
      tree field;
3512
 
3513
      /* Unions can be returned in registers if every element is
3514
         integral, or can be returned in an integer register.  */
3515
      for (field = TYPE_FIELDS (type);
3516
           field;
3517
           field = TREE_CHAIN (field))
3518
        {
3519
          if (TREE_CODE (field) != FIELD_DECL)
3520
            continue;
3521
 
3522
          if (FLOAT_TYPE_P (TREE_TYPE (field)))
3523
            return true;
3524
 
3525
          if (arm_return_in_memory (TREE_TYPE (field), NULL_TREE))
3526
            return true;
3527
        }
3528
 
3529
      return false;
3530
    }
3531
#endif /* not ARM_WINCE */
3532
 
3533
  /* Return all other types in memory.  */
3534
  return true;
3535
}
3536
 
3537
/* Indicate whether or not words of a double are in big-endian order.  */
3538
 
3539
int
3540
arm_float_words_big_endian (void)
3541
{
3542
  if (TARGET_MAVERICK)
3543
    return 0;
3544
 
3545
  /* For FPA, float words are always big-endian.  For VFP, floats words
3546
     follow the memory system mode.  */
3547
 
3548
  if (TARGET_FPA)
3549
    {
3550
      return 1;
3551
    }
3552
 
3553
  if (TARGET_VFP)
3554
    return (TARGET_BIG_END ? 1 : 0);
3555
 
3556
  return 1;
3557
}
3558
 
3559
const struct pcs_attribute_arg
3560
{
3561
  const char *arg;
3562
  enum arm_pcs value;
3563
} pcs_attribute_args[] =
3564
  {
3565
    {"aapcs", ARM_PCS_AAPCS},
3566
    {"aapcs-vfp", ARM_PCS_AAPCS_VFP},
3567
#if 0
3568
    /* We could recognize these, but changes would be needed elsewhere
3569
     * to implement them.  */
3570
    {"aapcs-iwmmxt", ARM_PCS_AAPCS_IWMMXT},
3571
    {"atpcs", ARM_PCS_ATPCS},
3572
    {"apcs", ARM_PCS_APCS},
3573
#endif
3574
    {NULL, ARM_PCS_UNKNOWN}
3575
  };
3576
 
3577
static enum arm_pcs
3578
arm_pcs_from_attribute (tree attr)
3579
{
3580
  const struct pcs_attribute_arg *ptr;
3581
  const char *arg;
3582
 
3583
  /* Get the value of the argument.  */
3584
  if (TREE_VALUE (attr) == NULL_TREE
3585
      || TREE_CODE (TREE_VALUE (attr)) != STRING_CST)
3586
    return ARM_PCS_UNKNOWN;
3587
 
3588
  arg = TREE_STRING_POINTER (TREE_VALUE (attr));
3589
 
3590
  /* Check it against the list of known arguments.  */
3591
  for (ptr = pcs_attribute_args; ptr->arg != NULL; ptr++)
3592
    if (streq (arg, ptr->arg))
3593
      return ptr->value;
3594
 
3595
  /* An unrecognized interrupt type.  */
3596
  return ARM_PCS_UNKNOWN;
3597
}
3598
 
3599
/* Get the PCS variant to use for this call.  TYPE is the function's type
3600
   specification, DECL is the specific declartion.  DECL may be null if
3601
   the call could be indirect or if this is a library call.  */
3602
static enum arm_pcs
3603
arm_get_pcs_model (const_tree type, const_tree decl)
3604
{
3605
  bool user_convention = false;
3606
  enum arm_pcs user_pcs = arm_pcs_default;
3607
  tree attr;
3608
 
3609
  gcc_assert (type);
3610
 
3611
  attr = lookup_attribute ("pcs", TYPE_ATTRIBUTES (type));
3612
  if (attr)
3613
    {
3614
      user_pcs = arm_pcs_from_attribute (TREE_VALUE (attr));
3615
      user_convention = true;
3616
    }
3617
 
3618
  if (TARGET_AAPCS_BASED)
3619
    {
3620
      /* Detect varargs functions.  These always use the base rules
3621
         (no argument is ever a candidate for a co-processor
3622
         register).  */
3623
      bool base_rules = (TYPE_ARG_TYPES (type) != 0
3624
                         && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (type)))
3625
                             != void_type_node));
3626
 
3627
      if (user_convention)
3628
        {
3629
          if (user_pcs > ARM_PCS_AAPCS_LOCAL)
3630
            sorry ("Non-AAPCS derived PCS variant");
3631
          else if (base_rules && user_pcs != ARM_PCS_AAPCS)
3632
            error ("Variadic functions must use the base AAPCS variant");
3633
        }
3634
 
3635
      if (base_rules)
3636
        return ARM_PCS_AAPCS;
3637
      else if (user_convention)
3638
        return user_pcs;
3639
      else if (decl && flag_unit_at_a_time)
3640
        {
3641
          /* Local functions never leak outside this compilation unit,
3642
             so we are free to use whatever conventions are
3643
             appropriate.  */
3644
          /* FIXME: remove CONST_CAST_TREE when cgraph is constified.  */
3645
          struct cgraph_local_info *i = cgraph_local_info (CONST_CAST_TREE(decl));
3646
          if (i && i->local)
3647
            return ARM_PCS_AAPCS_LOCAL;
3648
        }
3649
    }
3650
  else if (user_convention && user_pcs != arm_pcs_default)
3651
    sorry ("PCS variant");
3652
 
3653
  /* For everything else we use the target's default.  */
3654
  return arm_pcs_default;
3655
}
3656
 
3657
 
3658
static void
3659
aapcs_vfp_cum_init (CUMULATIVE_ARGS *pcum  ATTRIBUTE_UNUSED,
3660
                    const_tree fntype ATTRIBUTE_UNUSED,
3661
                    rtx libcall ATTRIBUTE_UNUSED,
3662
                    const_tree fndecl ATTRIBUTE_UNUSED)
3663
{
3664
  /* Record the unallocated VFP registers.  */
3665
  pcum->aapcs_vfp_regs_free = (1 << NUM_VFP_ARG_REGS) - 1;
3666
  pcum->aapcs_vfp_reg_alloc = 0;
3667
}
3668
 
3669
/* Walk down the type tree of TYPE counting consecutive base elements.
3670
   If *MODEP is VOIDmode, then set it to the first valid floating point
3671
   type.  If a non-floating point type is found, or if a floating point
3672
   type that doesn't match a non-VOIDmode *MODEP is found, then return -1,
3673
   otherwise return the count in the sub-tree.  */
3674
static int
3675
aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
3676
{
3677
  enum machine_mode mode;
3678
  HOST_WIDE_INT size;
3679
 
3680
  switch (TREE_CODE (type))
3681
    {
3682
    case REAL_TYPE:
3683
      mode = TYPE_MODE (type);
3684
      if (mode != DFmode && mode != SFmode)
3685
        return -1;
3686
 
3687
      if (*modep == VOIDmode)
3688
        *modep = mode;
3689
 
3690
      if (*modep == mode)
3691
        return 1;
3692
 
3693
      break;
3694
 
3695
    case COMPLEX_TYPE:
3696
      mode = TYPE_MODE (TREE_TYPE (type));
3697
      if (mode != DFmode && mode != SFmode)
3698
        return -1;
3699
 
3700
      if (*modep == VOIDmode)
3701
        *modep = mode;
3702
 
3703
      if (*modep == mode)
3704
        return 2;
3705
 
3706
      break;
3707
 
3708
    case VECTOR_TYPE:
3709
      /* Use V2SImode and V4SImode as representatives of all 64-bit
3710
         and 128-bit vector types, whether or not those modes are
3711
         supported with the present options.  */
3712
      size = int_size_in_bytes (type);
3713
      switch (size)
3714
        {
3715
        case 8:
3716
          mode = V2SImode;
3717
          break;
3718
        case 16:
3719
          mode = V4SImode;
3720
          break;
3721
        default:
3722
          return -1;
3723
        }
3724
 
3725
      if (*modep == VOIDmode)
3726
        *modep = mode;
3727
 
3728
      /* Vector modes are considered to be opaque: two vectors are
3729
         equivalent for the purposes of being homogeneous aggregates
3730
         if they are the same size.  */
3731
      if (*modep == mode)
3732
        return 1;
3733
 
3734
      break;
3735
 
3736
    case ARRAY_TYPE:
3737
      {
3738
        int count;
3739
        tree index = TYPE_DOMAIN (type);
3740
 
3741
        /* Can't handle incomplete types.  */
3742
        if (!COMPLETE_TYPE_P(type))
3743
          return -1;
3744
 
3745
        count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep);
3746
        if (count == -1
3747
            || !index
3748
            || !TYPE_MAX_VALUE (index)
3749
            || !host_integerp (TYPE_MAX_VALUE (index), 1)
3750
            || !TYPE_MIN_VALUE (index)
3751
            || !host_integerp (TYPE_MIN_VALUE (index), 1)
3752
            || count < 0)
3753
          return -1;
3754
 
3755
        count *= (1 + tree_low_cst (TYPE_MAX_VALUE (index), 1)
3756
                      - tree_low_cst (TYPE_MIN_VALUE (index), 1));
3757
 
3758
        /* There must be no padding.  */
3759
        if (!host_integerp (TYPE_SIZE (type), 1)
3760
            || (tree_low_cst (TYPE_SIZE (type), 1)
3761
                != count * GET_MODE_BITSIZE (*modep)))
3762
          return -1;
3763
 
3764
        return count;
3765
      }
3766
 
3767
    case RECORD_TYPE:
3768
      {
3769
        int count = 0;
3770
        int sub_count;
3771
        tree field;
3772
 
3773
        /* Can't handle incomplete types.  */
3774
        if (!COMPLETE_TYPE_P(type))
3775
          return -1;
3776
 
3777
        for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3778
          {
3779
            if (TREE_CODE (field) != FIELD_DECL)
3780
              continue;
3781
 
3782
            sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
3783
            if (sub_count < 0)
3784
              return -1;
3785
            count += sub_count;
3786
          }
3787
 
3788
        /* There must be no padding.  */
3789
        if (!host_integerp (TYPE_SIZE (type), 1)
3790
            || (tree_low_cst (TYPE_SIZE (type), 1)
3791
                != count * GET_MODE_BITSIZE (*modep)))
3792
          return -1;
3793
 
3794
        return count;
3795
      }
3796
 
3797
    case UNION_TYPE:
3798
    case QUAL_UNION_TYPE:
3799
      {
3800
        /* These aren't very interesting except in a degenerate case.  */
3801
        int count = 0;
3802
        int sub_count;
3803
        tree field;
3804
 
3805
        /* Can't handle incomplete types.  */
3806
        if (!COMPLETE_TYPE_P(type))
3807
          return -1;
3808
 
3809
        for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3810
          {
3811
            if (TREE_CODE (field) != FIELD_DECL)
3812
              continue;
3813
 
3814
            sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
3815
            if (sub_count < 0)
3816
              return -1;
3817
            count = count > sub_count ? count : sub_count;
3818
          }
3819
 
3820
        /* There must be no padding.  */
3821
        if (!host_integerp (TYPE_SIZE (type), 1)
3822
            || (tree_low_cst (TYPE_SIZE (type), 1)
3823
                != count * GET_MODE_BITSIZE (*modep)))
3824
          return -1;
3825
 
3826
        return count;
3827
      }
3828
 
3829
    default:
3830
      break;
3831
    }
3832
 
3833
  return -1;
3834
}
3835
 
3836
/* Return true if PCS_VARIANT should use VFP registers.  */
3837
static bool
3838
use_vfp_abi (enum arm_pcs pcs_variant, bool is_double)
3839
{
3840
  if (pcs_variant == ARM_PCS_AAPCS_VFP)
3841
    return true;
3842
 
3843
  if (pcs_variant != ARM_PCS_AAPCS_LOCAL)
3844
    return false;
3845
 
3846
  return (TARGET_32BIT && TARGET_VFP && TARGET_HARD_FLOAT &&
3847
          (TARGET_VFP_DOUBLE || !is_double));
3848
}
3849
 
3850
static bool
3851
aapcs_vfp_is_call_or_return_candidate (enum arm_pcs pcs_variant,
3852
                                       enum machine_mode mode, const_tree type,
3853
                                       enum machine_mode *base_mode, int *count)
3854
{
3855
  enum machine_mode new_mode = VOIDmode;
3856
 
3857
  if (GET_MODE_CLASS (mode) == MODE_FLOAT
3858
      || GET_MODE_CLASS (mode) == MODE_VECTOR_INT
3859
      || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3860
    {
3861
      *count = 1;
3862
      new_mode = mode;
3863
    }
3864
  else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3865
    {
3866
      *count = 2;
3867
      new_mode = (mode == DCmode ? DFmode : SFmode);
3868
    }
3869
  else if (type && (mode == BLKmode || TREE_CODE (type) == VECTOR_TYPE))
3870
    {
3871
      int ag_count = aapcs_vfp_sub_candidate (type, &new_mode);
3872
 
3873
      if (ag_count > 0 && ag_count <= 4)
3874
        *count = ag_count;
3875
      else
3876
        return false;
3877
    }
3878
  else
3879
    return false;
3880
 
3881
 
3882
  if (!use_vfp_abi (pcs_variant, ARM_NUM_REGS (new_mode) > 1))
3883
    return false;
3884
 
3885
  *base_mode = new_mode;
3886
  return true;
3887
}
3888
 
3889
static bool
3890
aapcs_vfp_is_return_candidate (enum arm_pcs pcs_variant,
3891
                               enum machine_mode mode, const_tree type)
3892
{
3893
  int count ATTRIBUTE_UNUSED;
3894
  enum machine_mode ag_mode ATTRIBUTE_UNUSED;
3895
 
3896
  if (!use_vfp_abi (pcs_variant, false))
3897
    return false;
3898
  return aapcs_vfp_is_call_or_return_candidate (pcs_variant, mode, type,
3899
                                                &ag_mode, &count);
3900
}
3901
 
3902
static bool
3903
aapcs_vfp_is_call_candidate (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
3904
                             const_tree type)
3905
{
3906
  if (!use_vfp_abi (pcum->pcs_variant, false))
3907
    return false;
3908
 
3909
  return aapcs_vfp_is_call_or_return_candidate (pcum->pcs_variant, mode, type,
3910
                                                &pcum->aapcs_vfp_rmode,
3911
                                                &pcum->aapcs_vfp_rcount);
3912
}
3913
 
3914
static bool
3915
aapcs_vfp_allocate (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
3916
                    const_tree type  ATTRIBUTE_UNUSED)
3917
{
3918
  int shift = GET_MODE_SIZE (pcum->aapcs_vfp_rmode) / GET_MODE_SIZE (SFmode);
3919
  unsigned mask = (1 << (shift * pcum->aapcs_vfp_rcount)) - 1;
3920
  int regno;
3921
 
3922
  for (regno = 0; regno < NUM_VFP_ARG_REGS; regno += shift)
3923
    if (((pcum->aapcs_vfp_regs_free >> regno) & mask) == mask)
3924
      {
3925
        pcum->aapcs_vfp_reg_alloc = mask << regno;
3926
        if (mode == BLKmode || (mode == TImode && !TARGET_NEON))
3927
          {
3928
            int i;
3929
            int rcount = pcum->aapcs_vfp_rcount;
3930
            int rshift = shift;
3931
            enum machine_mode rmode = pcum->aapcs_vfp_rmode;
3932
            rtx par;
3933
            if (!TARGET_NEON)
3934
              {
3935
                /* Avoid using unsupported vector modes.  */
3936
                if (rmode == V2SImode)
3937
                  rmode = DImode;
3938
                else if (rmode == V4SImode)
3939
                  {
3940
                    rmode = DImode;
3941
                    rcount *= 2;
3942
                    rshift /= 2;
3943
                  }
3944
              }
3945
            par = gen_rtx_PARALLEL (mode, rtvec_alloc (rcount));
3946
            for (i = 0; i < rcount; i++)
3947
              {
3948
                rtx tmp = gen_rtx_REG (rmode,
3949
                                       FIRST_VFP_REGNUM + regno + i * rshift);
3950
                tmp = gen_rtx_EXPR_LIST
3951
                  (VOIDmode, tmp,
3952
                   GEN_INT (i * GET_MODE_SIZE (rmode)));
3953
                XVECEXP (par, 0, i) = tmp;
3954
              }
3955
 
3956
            pcum->aapcs_reg = par;
3957
          }
3958
        else
3959
          pcum->aapcs_reg = gen_rtx_REG (mode, FIRST_VFP_REGNUM + regno);
3960
        return true;
3961
      }
3962
  return false;
3963
}
3964
 
3965
static rtx
3966
aapcs_vfp_allocate_return_reg (enum arm_pcs pcs_variant ATTRIBUTE_UNUSED,
3967
                               enum machine_mode mode,
3968
                               const_tree type ATTRIBUTE_UNUSED)
3969
{
3970
  if (!use_vfp_abi (pcs_variant, false))
3971
    return false;
3972
 
3973
  if (mode == BLKmode || (mode == TImode && !TARGET_NEON))
3974
    {
3975
      int count;
3976
      enum machine_mode ag_mode;
3977
      int i;
3978
      rtx par;
3979
      int shift;
3980
 
3981
      aapcs_vfp_is_call_or_return_candidate (pcs_variant, mode, type,
3982
                                             &ag_mode, &count);
3983
 
3984
      if (!TARGET_NEON)
3985
        {
3986
          if (ag_mode == V2SImode)
3987
            ag_mode = DImode;
3988
          else if (ag_mode == V4SImode)
3989
            {
3990
              ag_mode = DImode;
3991
              count *= 2;
3992
            }
3993
        }
3994
      shift = GET_MODE_SIZE(ag_mode) / GET_MODE_SIZE(SFmode);
3995
      par = gen_rtx_PARALLEL (mode, rtvec_alloc (count));
3996
      for (i = 0; i < count; i++)
3997
        {
3998
          rtx tmp = gen_rtx_REG (ag_mode, FIRST_VFP_REGNUM + i * shift);
3999
          tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
4000
                                   GEN_INT (i * GET_MODE_SIZE (ag_mode)));
4001
          XVECEXP (par, 0, i) = tmp;
4002
        }
4003
 
4004
      return par;
4005
    }
4006
 
4007
  return gen_rtx_REG (mode, FIRST_VFP_REGNUM);
4008
}
4009
 
4010
static void
4011
aapcs_vfp_advance (CUMULATIVE_ARGS *pcum  ATTRIBUTE_UNUSED,
4012
                   enum machine_mode mode  ATTRIBUTE_UNUSED,
4013
                   const_tree type  ATTRIBUTE_UNUSED)
4014
{
4015
  pcum->aapcs_vfp_regs_free &= ~pcum->aapcs_vfp_reg_alloc;
4016
  pcum->aapcs_vfp_reg_alloc = 0;
4017
  return;
4018
}
4019
 
4020
#define AAPCS_CP(X)                             \
4021
  {                                             \
4022
    aapcs_ ## X ## _cum_init,                   \
4023
    aapcs_ ## X ## _is_call_candidate,          \
4024
    aapcs_ ## X ## _allocate,                   \
4025
    aapcs_ ## X ## _is_return_candidate,        \
4026
    aapcs_ ## X ## _allocate_return_reg,        \
4027
    aapcs_ ## X ## _advance                     \
4028
  }
4029
 
4030
/* Table of co-processors that can be used to pass arguments in
4031
   registers.  Idealy no arugment should be a candidate for more than
4032
   one co-processor table entry, but the table is processed in order
4033
   and stops after the first match.  If that entry then fails to put
4034
   the argument into a co-processor register, the argument will go on
4035
   the stack.  */
4036
static struct
4037
{
4038
  /* Initialize co-processor related state in CUMULATIVE_ARGS structure.  */
4039
  void (*cum_init) (CUMULATIVE_ARGS *, const_tree, rtx, const_tree);
4040
 
4041
  /* Return true if an argument of mode MODE (or type TYPE if MODE is
4042
     BLKmode) is a candidate for this co-processor's registers; this
4043
     function should ignore any position-dependent state in
4044
     CUMULATIVE_ARGS and only use call-type dependent information.  */
4045
  bool (*is_call_candidate) (CUMULATIVE_ARGS *, enum machine_mode, const_tree);
4046
 
4047
  /* Return true if the argument does get a co-processor register; it
4048
     should set aapcs_reg to an RTX of the register allocated as is
4049
     required for a return from FUNCTION_ARG.  */
4050
  bool (*allocate) (CUMULATIVE_ARGS *, enum machine_mode, const_tree);
4051
 
4052
  /* Return true if a result of mode MODE (or type TYPE if MODE is
4053
     BLKmode) is can be returned in this co-processor's registers.  */
4054
  bool (*is_return_candidate) (enum arm_pcs, enum machine_mode, const_tree);
4055
 
4056
  /* Allocate and return an RTX element to hold the return type of a
4057
     call, this routine must not fail and will only be called if
4058
     is_return_candidate returned true with the same parameters.  */
4059
  rtx (*allocate_return_reg) (enum arm_pcs, enum machine_mode, const_tree);
4060
 
4061
  /* Finish processing this argument and prepare to start processing
4062
     the next one.  */
4063
  void (*advance) (CUMULATIVE_ARGS *, enum machine_mode, const_tree);
4064
} aapcs_cp_arg_layout[ARM_NUM_COPROC_SLOTS] =
4065
  {
4066
    AAPCS_CP(vfp)
4067
  };
4068
 
4069
#undef AAPCS_CP
4070
 
4071
static int
4072
aapcs_select_call_coproc (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
4073
                          tree type)
4074
{
4075
  int i;
4076
 
4077
  for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++)
4078
    if (aapcs_cp_arg_layout[i].is_call_candidate (pcum, mode, type))
4079
      return i;
4080
 
4081
  return -1;
4082
}
4083
 
4084
static int
4085
aapcs_select_return_coproc (const_tree type, const_tree fntype)
4086
{
4087
  /* We aren't passed a decl, so we can't check that a call is local.
4088
     However, it isn't clear that that would be a win anyway, since it
4089
     might limit some tail-calling opportunities.  */
4090
  enum arm_pcs pcs_variant;
4091
 
4092
  if (fntype)
4093
    {
4094
      const_tree fndecl = NULL_TREE;
4095
 
4096
      if (TREE_CODE (fntype) == FUNCTION_DECL)
4097
        {
4098
          fndecl = fntype;
4099
          fntype = TREE_TYPE (fntype);
4100
        }
4101
 
4102
      pcs_variant = arm_get_pcs_model (fntype, fndecl);
4103
    }
4104
  else
4105
    pcs_variant = arm_pcs_default;
4106
 
4107
  if (pcs_variant != ARM_PCS_AAPCS)
4108
    {
4109
      int i;
4110
 
4111
      for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++)
4112
        if (aapcs_cp_arg_layout[i].is_return_candidate (pcs_variant,
4113
                                                        TYPE_MODE (type),
4114
                                                        type))
4115
          return i;
4116
    }
4117
  return -1;
4118
}
4119
 
4120
static rtx
4121
aapcs_allocate_return_reg (enum machine_mode mode, const_tree type,
4122
                           const_tree fntype)
4123
{
4124
  /* We aren't passed a decl, so we can't check that a call is local.
4125
     However, it isn't clear that that would be a win anyway, since it
4126
     might limit some tail-calling opportunities.  */
4127
  enum arm_pcs pcs_variant;
4128
  int unsignedp ATTRIBUTE_UNUSED;
4129
 
4130
  if (fntype)
4131
    {
4132
      const_tree fndecl = NULL_TREE;
4133
 
4134
      if (TREE_CODE (fntype) == FUNCTION_DECL)
4135
        {
4136
          fndecl = fntype;
4137
          fntype = TREE_TYPE (fntype);
4138
        }
4139
 
4140
      pcs_variant = arm_get_pcs_model (fntype, fndecl);
4141
    }
4142
  else
4143
    pcs_variant = arm_pcs_default;
4144
 
4145
  /* Promote integer types.  */
4146
  if (type && INTEGRAL_TYPE_P (type))
4147
    mode = arm_promote_function_mode (type, mode, &unsignedp, fntype, 1);
4148
 
4149
  if (pcs_variant != ARM_PCS_AAPCS)
4150
    {
4151
      int i;
4152
 
4153
      for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++)
4154
        if (aapcs_cp_arg_layout[i].is_return_candidate (pcs_variant, mode,
4155
                                                        type))
4156
          return aapcs_cp_arg_layout[i].allocate_return_reg (pcs_variant,
4157
                                                             mode, type);
4158
    }
4159
 
4160
  /* Promotes small structs returned in a register to full-word size
4161
     for big-endian AAPCS.  */
4162
  if (type && arm_return_in_msb (type))
4163
    {
4164
      HOST_WIDE_INT size = int_size_in_bytes (type);
4165
      if (size % UNITS_PER_WORD != 0)
4166
        {
4167
          size += UNITS_PER_WORD - size % UNITS_PER_WORD;
4168
          mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
4169
        }
4170
    }
4171
 
4172
  return gen_rtx_REG (mode, R0_REGNUM);
4173
}
4174
 
4175
rtx
4176
aapcs_libcall_value (enum machine_mode mode)
4177
{
4178
  return aapcs_allocate_return_reg (mode, NULL_TREE, NULL_TREE);
4179
}
4180
 
4181
/* Lay out a function argument using the AAPCS rules.  The rule
4182
   numbers referred to here are those in the AAPCS.  */
4183
static void
4184
aapcs_layout_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
4185
                  tree type, int named)
4186
{
4187
  int nregs, nregs2;
4188
  int ncrn;
4189
 
4190
  /* We only need to do this once per argument.  */
4191
  if (pcum->aapcs_arg_processed)
4192
    return;
4193
 
4194
  pcum->aapcs_arg_processed = true;
4195
 
4196
  /* Special case: if named is false then we are handling an incoming
4197
     anonymous argument which is on the stack.  */
4198
  if (!named)
4199
    return;
4200
 
4201
  /* Is this a potential co-processor register candidate?  */
4202
  if (pcum->pcs_variant != ARM_PCS_AAPCS)
4203
    {
4204
      int slot = aapcs_select_call_coproc (pcum, mode, type);
4205
      pcum->aapcs_cprc_slot = slot;
4206
 
4207
      /* We don't have to apply any of the rules from part B of the
4208
         preparation phase, these are handled elsewhere in the
4209
         compiler.  */
4210
 
4211
      if (slot >= 0)
4212
        {
4213
          /* A Co-processor register candidate goes either in its own
4214
             class of registers or on the stack.  */
4215
          if (!pcum->aapcs_cprc_failed[slot])
4216
            {
4217
              /* C1.cp - Try to allocate the argument to co-processor
4218
                 registers.  */
4219
              if (aapcs_cp_arg_layout[slot].allocate (pcum, mode, type))
4220
                return;
4221
 
4222
              /* C2.cp - Put the argument on the stack and note that we
4223
                 can't assign any more candidates in this slot.  We also
4224
                 need to note that we have allocated stack space, so that
4225
                 we won't later try to split a non-cprc candidate between
4226
                 core registers and the stack.  */
4227
              pcum->aapcs_cprc_failed[slot] = true;
4228
              pcum->can_split = false;
4229
            }
4230
 
4231
          /* We didn't get a register, so this argument goes on the
4232
             stack.  */
4233
          gcc_assert (pcum->can_split == false);
4234
          return;
4235
        }
4236
    }
4237
 
4238
  /* C3 - For double-word aligned arguments, round the NCRN up to the
4239
     next even number.  */
4240
  ncrn = pcum->aapcs_ncrn;
4241
  if ((ncrn & 1) && arm_needs_doubleword_align (mode, type))
4242
    ncrn++;
4243
 
4244
  nregs = ARM_NUM_REGS2(mode, type);
4245
 
4246
  /* Sigh, this test should really assert that nregs > 0, but a GCC
4247
     extension allows empty structs and then gives them empty size; it
4248
     then allows such a structure to be passed by value.  For some of
4249
     the code below we have to pretend that such an argument has
4250
     non-zero size so that we 'locate' it correctly either in
4251
     registers or on the stack.  */
4252
  gcc_assert (nregs >= 0);
4253
 
4254
  nregs2 = nregs ? nregs : 1;
4255
 
4256
  /* C4 - Argument fits entirely in core registers.  */
4257
  if (ncrn + nregs2 <= NUM_ARG_REGS)
4258
    {
4259
      pcum->aapcs_reg = gen_rtx_REG (mode, ncrn);
4260
      pcum->aapcs_next_ncrn = ncrn + nregs;
4261
      return;
4262
    }
4263
 
4264
  /* C5 - Some core registers left and there are no arguments already
4265
     on the stack: split this argument between the remaining core
4266
     registers and the stack.  */
4267
  if (ncrn < NUM_ARG_REGS && pcum->can_split)
4268
    {
4269
      pcum->aapcs_reg = gen_rtx_REG (mode, ncrn);
4270
      pcum->aapcs_next_ncrn = NUM_ARG_REGS;
4271
      pcum->aapcs_partial = (NUM_ARG_REGS - ncrn) * UNITS_PER_WORD;
4272
      return;
4273
    }
4274
 
4275
  /* C6 - NCRN is set to 4.  */
4276
  pcum->aapcs_next_ncrn = NUM_ARG_REGS;
4277
 
4278
  /* C7,C8 - arugment goes on the stack.  We have nothing to do here.  */
4279
  return;
4280
}
4281
 
4282
/* Initialize a variable CUM of type CUMULATIVE_ARGS
4283
   for a call to a function whose data type is FNTYPE.
4284
   For a library call, FNTYPE is NULL.  */
4285
void
4286
arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
4287
                          rtx libname,
4288
                          tree fndecl ATTRIBUTE_UNUSED)
4289
{
4290
  /* Long call handling.  */
4291
  if (fntype)
4292
    pcum->pcs_variant = arm_get_pcs_model (fntype, fndecl);
4293
  else
4294
    pcum->pcs_variant = arm_pcs_default;
4295
 
4296
  if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
4297
    {
4298
      if (arm_libcall_uses_aapcs_base (libname))
4299
        pcum->pcs_variant = ARM_PCS_AAPCS;
4300
 
4301
      pcum->aapcs_ncrn = pcum->aapcs_next_ncrn = 0;
4302
      pcum->aapcs_reg = NULL_RTX;
4303
      pcum->aapcs_partial = 0;
4304
      pcum->aapcs_arg_processed = false;
4305
      pcum->aapcs_cprc_slot = -1;
4306
      pcum->can_split = true;
4307
 
4308
      if (pcum->pcs_variant != ARM_PCS_AAPCS)
4309
        {
4310
          int i;
4311
 
4312
          for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++)
4313
            {
4314
              pcum->aapcs_cprc_failed[i] = false;
4315
              aapcs_cp_arg_layout[i].cum_init (pcum, fntype, libname, fndecl);
4316
            }
4317
        }
4318
      return;
4319
    }
4320
 
4321
  /* Legacy ABIs */
4322
 
4323
  /* On the ARM, the offset starts at 0.  */
4324
  pcum->nregs = 0;
4325
  pcum->iwmmxt_nregs = 0;
4326
  pcum->can_split = true;
4327
 
4328
  /* Varargs vectors are treated the same as long long.
4329
     named_count avoids having to change the way arm handles 'named' */
4330
  pcum->named_count = 0;
4331
  pcum->nargs = 0;
4332
 
4333
  if (TARGET_REALLY_IWMMXT && fntype)
4334
    {
4335
      tree fn_arg;
4336
 
4337
      for (fn_arg = TYPE_ARG_TYPES (fntype);
4338
           fn_arg;
4339
           fn_arg = TREE_CHAIN (fn_arg))
4340
        pcum->named_count += 1;
4341
 
4342
      if (! pcum->named_count)
4343
        pcum->named_count = INT_MAX;
4344
    }
4345
}
4346
 
4347
 
4348
/* Return true if mode/type need doubleword alignment.  */
4349
bool
4350
arm_needs_doubleword_align (enum machine_mode mode, tree type)
4351
{
4352
  return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
4353
          || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
4354
}
4355
 
4356
 
4357
/* Determine where to put an argument to a function.
4358
   Value is zero to push the argument on the stack,
4359
   or a hard register in which to store the argument.
4360
 
4361
   MODE is the argument's machine mode.
4362
   TYPE is the data type of the argument (as a tree).
4363
    This is null for libcalls where that information may
4364
    not be available.
4365
   CUM is a variable of type CUMULATIVE_ARGS which gives info about
4366
    the preceding args and about the function being called.
4367
   NAMED is nonzero if this argument is a named parameter
4368
    (otherwise it is an extra parameter matching an ellipsis).  */
4369
 
4370
rtx
4371
arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
4372
                  tree type, int named)
4373
{
4374
  int nregs;
4375
 
4376
  /* Handle the special case quickly.  Pick an arbitrary value for op2 of
4377
     a call insn (op3 of a call_value insn).  */
4378
  if (mode == VOIDmode)
4379
    return const0_rtx;
4380
 
4381
  if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
4382
    {
4383
      aapcs_layout_arg (pcum, mode, type, named);
4384
      return pcum->aapcs_reg;
4385
    }
4386
 
4387
  /* Varargs vectors are treated the same as long long.
4388
     named_count avoids having to change the way arm handles 'named' */
4389
  if (TARGET_IWMMXT_ABI
4390
      && arm_vector_mode_supported_p (mode)
4391
      && pcum->named_count > pcum->nargs + 1)
4392
    {
4393
      if (pcum->iwmmxt_nregs <= 9)
4394
        return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
4395
      else
4396
        {
4397
          pcum->can_split = false;
4398
          return NULL_RTX;
4399
        }
4400
    }
4401
 
4402
  /* Put doubleword aligned quantities in even register pairs.  */
4403
  if (pcum->nregs & 1
4404
      && ARM_DOUBLEWORD_ALIGN
4405
      && arm_needs_doubleword_align (mode, type))
4406
    pcum->nregs++;
4407
 
4408
  if (mode == VOIDmode)
4409
    /* Pick an arbitrary value for operand 2 of the call insn.  */
4410
    return const0_rtx;
4411
 
4412
  /* Only allow splitting an arg between regs and memory if all preceding
4413
     args were allocated to regs.  For args passed by reference we only count
4414
     the reference pointer.  */
4415
  if (pcum->can_split)
4416
    nregs = 1;
4417
  else
4418
    nregs = ARM_NUM_REGS2 (mode, type);
4419
 
4420
  if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
4421
    return NULL_RTX;
4422
 
4423
  return gen_rtx_REG (mode, pcum->nregs);
4424
}
4425
 
4426
static int
4427
arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
4428
                       tree type, bool named)
4429
{
4430
  int nregs = pcum->nregs;
4431
 
4432
  if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
4433
    {
4434
      aapcs_layout_arg (pcum, mode, type, named);
4435
      return pcum->aapcs_partial;
4436
    }
4437
 
4438
  if (TARGET_IWMMXT_ABI && arm_vector_mode_supported_p (mode))
4439
    return 0;
4440
 
4441
  if (NUM_ARG_REGS > nregs
4442
      && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
4443
      && pcum->can_split)
4444
    return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
4445
 
4446
  return 0;
4447
}
4448
 
4449
void
4450
arm_function_arg_advance (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
4451
                          tree type, bool named)
4452
{
4453
  if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
4454
    {
4455
      aapcs_layout_arg (pcum, mode, type, named);
4456
 
4457
      if (pcum->aapcs_cprc_slot >= 0)
4458
        {
4459
          aapcs_cp_arg_layout[pcum->aapcs_cprc_slot].advance (pcum, mode,
4460
                                                              type);
4461
          pcum->aapcs_cprc_slot = -1;
4462
        }
4463
 
4464
      /* Generic stuff.  */
4465
      pcum->aapcs_arg_processed = false;
4466
      pcum->aapcs_ncrn = pcum->aapcs_next_ncrn;
4467
      pcum->aapcs_reg = NULL_RTX;
4468
      pcum->aapcs_partial = 0;
4469
    }
4470
  else
4471
    {
4472
      pcum->nargs += 1;
4473
      if (arm_vector_mode_supported_p (mode)
4474
          && pcum->named_count > pcum->nargs
4475
          && TARGET_IWMMXT_ABI)
4476
        pcum->iwmmxt_nregs += 1;
4477
      else
4478
        pcum->nregs += ARM_NUM_REGS2 (mode, type);
4479
    }
4480
}
4481
 
4482
/* Variable sized types are passed by reference.  This is a GCC
4483
   extension to the ARM ABI.  */
4484
 
4485
static bool
4486
arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4487
                       enum machine_mode mode ATTRIBUTE_UNUSED,
4488
                       const_tree type, bool named ATTRIBUTE_UNUSED)
4489
{
4490
  return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
4491
}
4492
 
4493
/* Encode the current state of the #pragma [no_]long_calls.  */
4494
typedef enum
4495
{
4496
  OFF,          /* No #pragma [no_]long_calls is in effect.  */
4497
  LONG,         /* #pragma long_calls is in effect.  */
4498
  SHORT         /* #pragma no_long_calls is in effect.  */
4499
} arm_pragma_enum;
4500
 
4501
static arm_pragma_enum arm_pragma_long_calls = OFF;
4502
 
4503
void
4504
arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
4505
{
4506
  arm_pragma_long_calls = LONG;
4507
}
4508
 
4509
void
4510
arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
4511
{
4512
  arm_pragma_long_calls = SHORT;
4513
}
4514
 
4515
void
4516
arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
4517
{
4518
  arm_pragma_long_calls = OFF;
4519
}
4520
 
4521
/* Handle an attribute requiring a FUNCTION_DECL;
4522
   arguments as in struct attribute_spec.handler.  */
4523
static tree
4524
arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
4525
                             int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
4526
{
4527
  if (TREE_CODE (*node) != FUNCTION_DECL)
4528
    {
4529
      warning (OPT_Wattributes, "%qE attribute only applies to functions",
4530
               name);
4531
      *no_add_attrs = true;
4532
    }
4533
 
4534
  return NULL_TREE;
4535
}
4536
 
4537
/* Handle an "interrupt" or "isr" attribute;
4538
   arguments as in struct attribute_spec.handler.  */
4539
static tree
4540
arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
4541
                          bool *no_add_attrs)
4542
{
4543
  if (DECL_P (*node))
4544
    {
4545
      if (TREE_CODE (*node) != FUNCTION_DECL)
4546
        {
4547
          warning (OPT_Wattributes, "%qE attribute only applies to functions",
4548
                   name);
4549
          *no_add_attrs = true;
4550
        }
4551
      /* FIXME: the argument if any is checked for type attributes;
4552
         should it be checked for decl ones?  */
4553
    }
4554
  else
4555
    {
4556
      if (TREE_CODE (*node) == FUNCTION_TYPE
4557
          || TREE_CODE (*node) == METHOD_TYPE)
4558
        {
4559
          if (arm_isr_value (args) == ARM_FT_UNKNOWN)
4560
            {
4561
              warning (OPT_Wattributes, "%qE attribute ignored",
4562
                       name);
4563
              *no_add_attrs = true;
4564
            }
4565
        }
4566
      else if (TREE_CODE (*node) == POINTER_TYPE
4567
               && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
4568
                   || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
4569
               && arm_isr_value (args) != ARM_FT_UNKNOWN)
4570
        {
4571
          *node = build_variant_type_copy (*node);
4572
          TREE_TYPE (*node) = build_type_attribute_variant
4573
            (TREE_TYPE (*node),
4574
             tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
4575
          *no_add_attrs = true;
4576
        }
4577
      else
4578
        {
4579
          /* Possibly pass this attribute on from the type to a decl.  */
4580
          if (flags & ((int) ATTR_FLAG_DECL_NEXT
4581
                       | (int) ATTR_FLAG_FUNCTION_NEXT
4582
                       | (int) ATTR_FLAG_ARRAY_NEXT))
4583
            {
4584
              *no_add_attrs = true;
4585
              return tree_cons (name, args, NULL_TREE);
4586
            }
4587
          else
4588
            {
4589
              warning (OPT_Wattributes, "%qE attribute ignored",
4590
                       name);
4591
            }
4592
        }
4593
    }
4594
 
4595
  return NULL_TREE;
4596
}
4597
 
4598
/* Handle a "pcs" attribute; arguments as in struct
4599
   attribute_spec.handler.  */
4600
static tree
4601
arm_handle_pcs_attribute (tree *node ATTRIBUTE_UNUSED, tree name, tree args,
4602
                          int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
4603
{
4604
  if (arm_pcs_from_attribute (args) == ARM_PCS_UNKNOWN)
4605
    {
4606
      warning (OPT_Wattributes, "%qE attribute ignored", name);
4607
      *no_add_attrs = true;
4608
    }
4609
  return NULL_TREE;
4610
}
4611
 
4612
#if TARGET_DLLIMPORT_DECL_ATTRIBUTES
4613
/* Handle the "notshared" attribute.  This attribute is another way of
4614
   requesting hidden visibility.  ARM's compiler supports
4615
   "__declspec(notshared)"; we support the same thing via an
4616
   attribute.  */
4617
 
4618
static tree
4619
arm_handle_notshared_attribute (tree *node,
4620
                                tree name ATTRIBUTE_UNUSED,
4621
                                tree args ATTRIBUTE_UNUSED,
4622
                                int flags ATTRIBUTE_UNUSED,
4623
                                bool *no_add_attrs)
4624
{
4625
  tree decl = TYPE_NAME (*node);
4626
 
4627
  if (decl)
4628
    {
4629
      DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
4630
      DECL_VISIBILITY_SPECIFIED (decl) = 1;
4631
      *no_add_attrs = false;
4632
    }
4633
  return NULL_TREE;
4634
}
4635
#endif
4636
 
4637
/* Return 0 if the attributes for two types are incompatible, 1 if they
4638
   are compatible, and 2 if they are nearly compatible (which causes a
4639
   warning to be generated).  */
4640
static int
4641
arm_comp_type_attributes (const_tree type1, const_tree type2)
4642
{
4643
  int l1, l2, s1, s2;
4644
 
4645
  /* Check for mismatch of non-default calling convention.  */
4646
  if (TREE_CODE (type1) != FUNCTION_TYPE)
4647
    return 1;
4648
 
4649
  /* Check for mismatched call attributes.  */
4650
  l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
4651
  l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
4652
  s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
4653
  s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
4654
 
4655
  /* Only bother to check if an attribute is defined.  */
4656
  if (l1 | l2 | s1 | s2)
4657
    {
4658
      /* If one type has an attribute, the other must have the same attribute.  */
4659
      if ((l1 != l2) || (s1 != s2))
4660
        return 0;
4661
 
4662
      /* Disallow mixed attributes.  */
4663
      if ((l1 & s2) || (l2 & s1))
4664
        return 0;
4665
    }
4666
 
4667
  /* Check for mismatched ISR attribute.  */
4668
  l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
4669
  if (! l1)
4670
    l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
4671
  l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
4672
  if (! l2)
4673
    l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
4674
  if (l1 != l2)
4675
    return 0;
4676
 
4677
  return 1;
4678
}
4679
 
4680
/*  Assigns default attributes to newly defined type.  This is used to
4681
    set short_call/long_call attributes for function types of
4682
    functions defined inside corresponding #pragma scopes.  */
4683
static void
4684
arm_set_default_type_attributes (tree type)
4685
{
4686
  /* Add __attribute__ ((long_call)) to all functions, when
4687
     inside #pragma long_calls or __attribute__ ((short_call)),
4688
     when inside #pragma no_long_calls.  */
4689
  if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
4690
    {
4691
      tree type_attr_list, attr_name;
4692
      type_attr_list = TYPE_ATTRIBUTES (type);
4693
 
4694
      if (arm_pragma_long_calls == LONG)
4695
        attr_name = get_identifier ("long_call");
4696
      else if (arm_pragma_long_calls == SHORT)
4697
        attr_name = get_identifier ("short_call");
4698
      else
4699
        return;
4700
 
4701
      type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
4702
      TYPE_ATTRIBUTES (type) = type_attr_list;
4703
    }
4704
}
4705
 
4706
/* Return true if DECL is known to be linked into section SECTION.  */
4707
 
4708
static bool
4709
arm_function_in_section_p (tree decl, section *section)
4710
{
4711
  /* We can only be certain about functions defined in the same
4712
     compilation unit.  */
4713
  if (!TREE_STATIC (decl))
4714
    return false;
4715
 
4716
  /* Make sure that SYMBOL always binds to the definition in this
4717
     compilation unit.  */
4718
  if (!targetm.binds_local_p (decl))
4719
    return false;
4720
 
4721
  /* If DECL_SECTION_NAME is set, assume it is trustworthy.  */
4722
  if (!DECL_SECTION_NAME (decl))
4723
    {
4724
      /* Make sure that we will not create a unique section for DECL.  */
4725
      if (flag_function_sections || DECL_ONE_ONLY (decl))
4726
        return false;
4727
    }
4728
 
4729
  return function_section (decl) == section;
4730
}
4731
 
4732
/* Return nonzero if a 32-bit "long_call" should be generated for
4733
   a call from the current function to DECL.  We generate a long_call
4734
   if the function:
4735
 
4736
        a.  has an __attribute__((long call))
4737
     or b.  is within the scope of a #pragma long_calls
4738
     or c.  the -mlong-calls command line switch has been specified
4739
 
4740
   However we do not generate a long call if the function:
4741
 
4742
        d.  has an __attribute__ ((short_call))
4743
     or e.  is inside the scope of a #pragma no_long_calls
4744
     or f.  is defined in the same section as the current function.  */
4745
 
4746
bool
4747
arm_is_long_call_p (tree decl)
4748
{
4749
  tree attrs;
4750
 
4751
  if (!decl)
4752
    return TARGET_LONG_CALLS;
4753
 
4754
  attrs = TYPE_ATTRIBUTES (TREE_TYPE (decl));
4755
  if (lookup_attribute ("short_call", attrs))
4756
    return false;
4757
 
4758
  /* For "f", be conservative, and only cater for cases in which the
4759
     whole of the current function is placed in the same section.  */
4760
  if (!flag_reorder_blocks_and_partition
4761
      && TREE_CODE (decl) == FUNCTION_DECL
4762
      && arm_function_in_section_p (decl, current_function_section ()))
4763
    return false;
4764
 
4765
  if (lookup_attribute ("long_call", attrs))
4766
    return true;
4767
 
4768
  return TARGET_LONG_CALLS;
4769
}
4770
 
4771
/* Return nonzero if it is ok to make a tail-call to DECL.  */
4772
static bool
4773
arm_function_ok_for_sibcall (tree decl, tree exp)
4774
{
4775
  unsigned long func_type;
4776
 
4777
  if (cfun->machine->sibcall_blocked)
4778
    return false;
4779
 
4780
  /* Never tailcall something for which we have no decl, or if we
4781
     are in Thumb mode.  */
4782
  if (decl == NULL || TARGET_THUMB)
4783
    return false;
4784
 
4785
  /* The PIC register is live on entry to VxWorks PLT entries, so we
4786
     must make the call before restoring the PIC register.  */
4787
  if (TARGET_VXWORKS_RTP && flag_pic && !targetm.binds_local_p (decl))
4788
    return false;
4789
 
4790
  /* Cannot tail-call to long calls, since these are out of range of
4791
     a branch instruction.  */
4792
  if (arm_is_long_call_p (decl))
4793
    return false;
4794
 
4795
  /* If we are interworking and the function is not declared static
4796
     then we can't tail-call it unless we know that it exists in this
4797
     compilation unit (since it might be a Thumb routine).  */
4798
  if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
4799
    return false;
4800
 
4801
  func_type = arm_current_func_type ();
4802
  /* Never tailcall from an ISR routine - it needs a special exit sequence.  */
4803
  if (IS_INTERRUPT (func_type))
4804
    return false;
4805
 
4806
  if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
4807
    {
4808
      /* Check that the return value locations are the same.  For
4809
         example that we aren't returning a value from the sibling in
4810
         a VFP register but then need to transfer it to a core
4811
         register.  */
4812
      rtx a, b;
4813
 
4814
      a = arm_function_value (TREE_TYPE (exp), decl, false);
4815
      b = arm_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
4816
                              cfun->decl, false);
4817
      if (!rtx_equal_p (a, b))
4818
        return false;
4819
    }
4820
 
4821
  /* Never tailcall if function may be called with a misaligned SP.  */
4822
  if (IS_STACKALIGN (func_type))
4823
    return false;
4824
 
4825
  /* Everything else is ok.  */
4826
  return true;
4827
}
4828
 
4829
 
4830
/* Addressing mode support functions.  */
4831
 
4832
/* Return nonzero if X is a legitimate immediate operand when compiling
4833
   for PIC.  We know that X satisfies CONSTANT_P and flag_pic is true.  */
4834
int
4835
legitimate_pic_operand_p (rtx x)
4836
{
4837
  if (GET_CODE (x) == SYMBOL_REF
4838
      || (GET_CODE (x) == CONST
4839
          && GET_CODE (XEXP (x, 0)) == PLUS
4840
          && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
4841
    return 0;
4842
 
4843
  return 1;
4844
}
4845
 
4846
/* Record that the current function needs a PIC register.  Initialize
4847
   cfun->machine->pic_reg if we have not already done so.  */
4848
 
4849
static void
4850
require_pic_register (void)
4851
{
4852
  /* A lot of the logic here is made obscure by the fact that this
4853
     routine gets called as part of the rtx cost estimation process.
4854
     We don't want those calls to affect any assumptions about the real
4855
     function; and further, we can't call entry_of_function() until we
4856
     start the real expansion process.  */
4857
  if (!crtl->uses_pic_offset_table)
4858
    {
4859
      gcc_assert (can_create_pseudo_p ());
4860
      if (arm_pic_register != INVALID_REGNUM)
4861
        {
4862
          if (!cfun->machine->pic_reg)
4863
            cfun->machine->pic_reg = gen_rtx_REG (Pmode, arm_pic_register);
4864
 
4865
          /* Play games to avoid marking the function as needing pic
4866
             if we are being called as part of the cost-estimation
4867
             process.  */
4868
          if (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl)
4869
            crtl->uses_pic_offset_table = 1;
4870
        }
4871
      else
4872
        {
4873
          rtx seq;
4874
 
4875
          if (!cfun->machine->pic_reg)
4876
            cfun->machine->pic_reg = gen_reg_rtx (Pmode);
4877
 
4878
          /* Play games to avoid marking the function as needing pic
4879
             if we are being called as part of the cost-estimation
4880
             process.  */
4881
          if (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl)
4882
            {
4883
              crtl->uses_pic_offset_table = 1;
4884
              start_sequence ();
4885
 
4886
              arm_load_pic_register (0UL);
4887
 
4888
              seq = get_insns ();
4889
              end_sequence ();
4890
              /* We can be called during expansion of PHI nodes, where
4891
                 we can't yet emit instructions directly in the final
4892
                 insn stream.  Queue the insns on the entry edge, they will
4893
                 be committed after everything else is expanded.  */
4894
              insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR));
4895
            }
4896
        }
4897
    }
4898
}
4899
 
4900
rtx
4901
legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
4902
{
4903
  if (GET_CODE (orig) == SYMBOL_REF
4904
      || GET_CODE (orig) == LABEL_REF)
4905
    {
4906
      rtx pic_ref, address;
4907
      rtx insn;
4908
      int subregs = 0;
4909
 
4910
      /* If this function doesn't have a pic register, create one now.  */
4911
      require_pic_register ();
4912
 
4913
      if (reg == 0)
4914
        {
4915
          gcc_assert (can_create_pseudo_p ());
4916
          reg = gen_reg_rtx (Pmode);
4917
 
4918
          subregs = 1;
4919
        }
4920
 
4921
      if (subregs)
4922
        address = gen_reg_rtx (Pmode);
4923
      else
4924
        address = reg;
4925
 
4926
      if (TARGET_32BIT)
4927
        emit_insn (gen_pic_load_addr_32bit (address, orig));
4928
      else /* TARGET_THUMB1 */
4929
        emit_insn (gen_pic_load_addr_thumb1 (address, orig));
4930
 
4931
      /* VxWorks does not impose a fixed gap between segments; the run-time
4932
         gap can be different from the object-file gap.  We therefore can't
4933
         use GOTOFF unless we are absolutely sure that the symbol is in the
4934
         same segment as the GOT.  Unfortunately, the flexibility of linker
4935
         scripts means that we can't be sure of that in general, so assume
4936
         that GOTOFF is never valid on VxWorks.  */
4937
      if ((GET_CODE (orig) == LABEL_REF
4938
           || (GET_CODE (orig) == SYMBOL_REF &&
4939
               SYMBOL_REF_LOCAL_P (orig)))
4940
          && NEED_GOT_RELOC
4941
          && !TARGET_VXWORKS_RTP)
4942
        pic_ref = gen_rtx_PLUS (Pmode, cfun->machine->pic_reg, address);
4943
      else
4944
        {
4945
          pic_ref = gen_const_mem (Pmode,
4946
                                   gen_rtx_PLUS (Pmode, cfun->machine->pic_reg,
4947
                                                 address));
4948
        }
4949
 
4950
      insn = emit_move_insn (reg, pic_ref);
4951
 
4952
      /* Put a REG_EQUAL note on this insn, so that it can be optimized
4953
         by loop.  */
4954
      set_unique_reg_note (insn, REG_EQUAL, orig);
4955
 
4956
      return reg;
4957
    }
4958
  else if (GET_CODE (orig) == CONST)
4959
    {
4960
      rtx base, offset;
4961
 
4962
      if (GET_CODE (XEXP (orig, 0)) == PLUS
4963
          && XEXP (XEXP (orig, 0), 0) == cfun->machine->pic_reg)
4964
        return orig;
4965
 
4966
      /* Handle the case where we have: const (UNSPEC_TLS).  */
4967
      if (GET_CODE (XEXP (orig, 0)) == UNSPEC
4968
          && XINT (XEXP (orig, 0), 1) == UNSPEC_TLS)
4969
        return orig;
4970
 
4971
      /* Handle the case where we have:
4972
         const (plus (UNSPEC_TLS) (ADDEND)).  The ADDEND must be a
4973
         CONST_INT.  */
4974
      if (GET_CODE (XEXP (orig, 0)) == PLUS
4975
          && GET_CODE (XEXP (XEXP (orig, 0), 0)) == UNSPEC
4976
          && XINT (XEXP (XEXP (orig, 0), 0), 1) == UNSPEC_TLS)
4977
        {
4978
          gcc_assert (GET_CODE (XEXP (XEXP (orig, 0), 1)) == CONST_INT);
4979
          return orig;
4980
        }
4981
 
4982
      if (reg == 0)
4983
        {
4984
          gcc_assert (can_create_pseudo_p ());
4985
          reg = gen_reg_rtx (Pmode);
4986
        }
4987
 
4988
      gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
4989
 
4990
      base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
4991
      offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
4992
                                       base == reg ? 0 : reg);
4993
 
4994
      if (GET_CODE (offset) == CONST_INT)
4995
        {
4996
          /* The base register doesn't really matter, we only want to
4997
             test the index for the appropriate mode.  */
4998
          if (!arm_legitimate_index_p (mode, offset, SET, 0))
4999
            {
5000
              gcc_assert (can_create_pseudo_p ());
5001
              offset = force_reg (Pmode, offset);
5002
            }
5003
 
5004
          if (GET_CODE (offset) == CONST_INT)
5005
            return plus_constant (base, INTVAL (offset));
5006
        }
5007
 
5008
      if (GET_MODE_SIZE (mode) > 4
5009
          && (GET_MODE_CLASS (mode) == MODE_INT
5010
              || TARGET_SOFT_FLOAT))
5011
        {
5012
          emit_insn (gen_addsi3 (reg, base, offset));
5013
          return reg;
5014
        }
5015
 
5016
      return gen_rtx_PLUS (Pmode, base, offset);
5017
    }
5018
 
5019
  return orig;
5020
}
5021
 
5022
 
5023
/* Find a spare register to use during the prolog of a function.  */
5024
 
5025
static int
5026
thumb_find_work_register (unsigned long pushed_regs_mask)
5027
{
5028
  int reg;
5029
 
5030
  /* Check the argument registers first as these are call-used.  The
5031
     register allocation order means that sometimes r3 might be used
5032
     but earlier argument registers might not, so check them all.  */
5033
  for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
5034
    if (!df_regs_ever_live_p (reg))
5035
      return reg;
5036
 
5037
  /* Before going on to check the call-saved registers we can try a couple
5038
     more ways of deducing that r3 is available.  The first is when we are
5039
     pushing anonymous arguments onto the stack and we have less than 4
5040
     registers worth of fixed arguments(*).  In this case r3 will be part of
5041
     the variable argument list and so we can be sure that it will be
5042
     pushed right at the start of the function.  Hence it will be available
5043
     for the rest of the prologue.
5044
     (*): ie crtl->args.pretend_args_size is greater than 0.  */
5045
  if (cfun->machine->uses_anonymous_args
5046
      && crtl->args.pretend_args_size > 0)
5047
    return LAST_ARG_REGNUM;
5048
 
5049
  /* The other case is when we have fixed arguments but less than 4 registers
5050
     worth.  In this case r3 might be used in the body of the function, but
5051
     it is not being used to convey an argument into the function.  In theory
5052
     we could just check crtl->args.size to see how many bytes are
5053
     being passed in argument registers, but it seems that it is unreliable.
5054
     Sometimes it will have the value 0 when in fact arguments are being
5055
     passed.  (See testcase execute/20021111-1.c for an example).  So we also
5056
     check the args_info.nregs field as well.  The problem with this field is
5057
     that it makes no allowances for arguments that are passed to the
5058
     function but which are not used.  Hence we could miss an opportunity
5059
     when a function has an unused argument in r3.  But it is better to be
5060
     safe than to be sorry.  */
5061
  if (! cfun->machine->uses_anonymous_args
5062
      && crtl->args.size >= 0
5063
      && crtl->args.size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
5064
      && crtl->args.info.nregs < 4)
5065
    return LAST_ARG_REGNUM;
5066
 
5067
  /* Otherwise look for a call-saved register that is going to be pushed.  */
5068
  for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
5069
    if (pushed_regs_mask & (1 << reg))
5070
      return reg;
5071
 
5072
  if (TARGET_THUMB2)
5073
    {
5074
      /* Thumb-2 can use high regs.  */
5075
      for (reg = FIRST_HI_REGNUM; reg < 15; reg ++)
5076
        if (pushed_regs_mask & (1 << reg))
5077
          return reg;
5078
    }
5079
  /* Something went wrong - thumb_compute_save_reg_mask()
5080
     should have arranged for a suitable register to be pushed.  */
5081
  gcc_unreachable ();
5082
}
5083
 
5084
static GTY(()) int pic_labelno;
5085
 
5086
/* Generate code to load the PIC register.  In thumb mode SCRATCH is a
5087
   low register.  */
5088
 
5089
void
5090
arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
5091
{
5092
  rtx l1, labelno, pic_tmp, pic_rtx, pic_reg;
5093
 
5094
  if (crtl->uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
5095
    return;
5096
 
5097
  gcc_assert (flag_pic);
5098
 
5099
  pic_reg = cfun->machine->pic_reg;
5100
  if (TARGET_VXWORKS_RTP)
5101
    {
5102
      pic_rtx = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE);
5103
      pic_rtx = gen_rtx_CONST (Pmode, pic_rtx);
5104
      emit_insn (gen_pic_load_addr_32bit (pic_reg, pic_rtx));
5105
 
5106
      emit_insn (gen_rtx_SET (Pmode, pic_reg, gen_rtx_MEM (Pmode, pic_reg)));
5107
 
5108
      pic_tmp = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
5109
      emit_insn (gen_pic_offset_arm (pic_reg, pic_reg, pic_tmp));
5110
    }
5111
  else
5112
    {
5113
      /* We use an UNSPEC rather than a LABEL_REF because this label
5114
         never appears in the code stream.  */
5115
 
5116
      labelno = GEN_INT (pic_labelno++);
5117
      l1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
5118
      l1 = gen_rtx_CONST (VOIDmode, l1);
5119
 
5120
      /* On the ARM the PC register contains 'dot + 8' at the time of the
5121
         addition, on the Thumb it is 'dot + 4'.  */
5122
      pic_rtx = plus_constant (l1, TARGET_ARM ? 8 : 4);
5123
      pic_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, pic_rtx),
5124
                                UNSPEC_GOTSYM_OFF);
5125
      pic_rtx = gen_rtx_CONST (Pmode, pic_rtx);
5126
 
5127
      if (TARGET_32BIT)
5128
        {
5129
          emit_insn (gen_pic_load_addr_32bit (pic_reg, pic_rtx));
5130
          if (TARGET_ARM)
5131
            emit_insn (gen_pic_add_dot_plus_eight (pic_reg, pic_reg, labelno));
5132
          else
5133
            emit_insn (gen_pic_add_dot_plus_four (pic_reg, pic_reg, labelno));
5134
        }
5135
      else /* TARGET_THUMB1 */
5136
        {
5137
          if (arm_pic_register != INVALID_REGNUM
5138
              && REGNO (pic_reg) > LAST_LO_REGNUM)
5139
            {
5140
              /* We will have pushed the pic register, so we should always be
5141
                 able to find a work register.  */
5142
              pic_tmp = gen_rtx_REG (SImode,
5143
                                     thumb_find_work_register (saved_regs));
5144
              emit_insn (gen_pic_load_addr_thumb1 (pic_tmp, pic_rtx));
5145
              emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
5146
            }
5147
          else
5148
            emit_insn (gen_pic_load_addr_thumb1 (pic_reg, pic_rtx));
5149
          emit_insn (gen_pic_add_dot_plus_four (pic_reg, pic_reg, labelno));
5150
        }
5151
    }
5152
 
5153
  /* Need to emit this whether or not we obey regdecls,
5154
     since setjmp/longjmp can cause life info to screw up.  */
5155
  emit_use (pic_reg);
5156
}
5157
 
5158
 
5159
/* Return nonzero if X is valid as an ARM state addressing register.  */
5160
static int
5161
arm_address_register_rtx_p (rtx x, int strict_p)
5162
{
5163
  int regno;
5164
 
5165
  if (GET_CODE (x) != REG)
5166
    return 0;
5167
 
5168
  regno = REGNO (x);
5169
 
5170
  if (strict_p)
5171
    return ARM_REGNO_OK_FOR_BASE_P (regno);
5172
 
5173
  return (regno <= LAST_ARM_REGNUM
5174
          || regno >= FIRST_PSEUDO_REGISTER
5175
          || regno == FRAME_POINTER_REGNUM
5176
          || regno == ARG_POINTER_REGNUM);
5177
}
5178
 
5179
/* Return TRUE if this rtx is the difference of a symbol and a label,
5180
   and will reduce to a PC-relative relocation in the object file.
5181
   Expressions like this can be left alone when generating PIC, rather
5182
   than forced through the GOT.  */
5183
static int
5184
pcrel_constant_p (rtx x)
5185
{
5186
  if (GET_CODE (x) == MINUS)
5187
    return symbol_mentioned_p (XEXP (x, 0)) && label_mentioned_p (XEXP (x, 1));
5188
 
5189
  return FALSE;
5190
}
5191
 
5192
/* Return nonzero if X is a valid ARM state address operand.  */
5193
int
5194
arm_legitimate_address_outer_p (enum machine_mode mode, rtx x, RTX_CODE outer,
5195
                                int strict_p)
5196
{
5197
  bool use_ldrd;
5198
  enum rtx_code code = GET_CODE (x);
5199
 
5200
  if (arm_address_register_rtx_p (x, strict_p))
5201
    return 1;
5202
 
5203
  use_ldrd = (TARGET_LDRD
5204
              && (mode == DImode
5205
                  || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
5206
 
5207
  if (code == POST_INC || code == PRE_DEC
5208
      || ((code == PRE_INC || code == POST_DEC)
5209
          && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
5210
    return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
5211
 
5212
  else if ((code == POST_MODIFY || code == PRE_MODIFY)
5213
           && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
5214
           && GET_CODE (XEXP (x, 1)) == PLUS
5215
           && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
5216
    {
5217
      rtx addend = XEXP (XEXP (x, 1), 1);
5218
 
5219
      /* Don't allow ldrd post increment by register because it's hard
5220
         to fixup invalid register choices.  */
5221
      if (use_ldrd
5222
          && GET_CODE (x) == POST_MODIFY
5223
          && GET_CODE (addend) == REG)
5224
        return 0;
5225
 
5226
      return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
5227
              && arm_legitimate_index_p (mode, addend, outer, strict_p));
5228
    }
5229
 
5230
  /* After reload constants split into minipools will have addresses
5231
     from a LABEL_REF.  */
5232
  else if (reload_completed
5233
           && (code == LABEL_REF
5234
               || (code == CONST
5235
                   && GET_CODE (XEXP (x, 0)) == PLUS
5236
                   && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
5237
                   && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5238
    return 1;
5239
 
5240
  else if (mode == TImode || (TARGET_NEON && VALID_NEON_STRUCT_MODE (mode)))
5241
    return 0;
5242
 
5243
  else if (code == PLUS)
5244
    {
5245
      rtx xop0 = XEXP (x, 0);
5246
      rtx xop1 = XEXP (x, 1);
5247
 
5248
      return ((arm_address_register_rtx_p (xop0, strict_p)
5249
               && GET_CODE(xop1) == CONST_INT
5250
               && arm_legitimate_index_p (mode, xop1, outer, strict_p))
5251
              || (arm_address_register_rtx_p (xop1, strict_p)
5252
                  && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
5253
    }
5254
 
5255
#if 0
5256
  /* Reload currently can't handle MINUS, so disable this for now */
5257
  else if (GET_CODE (x) == MINUS)
5258
    {
5259
      rtx xop0 = XEXP (x, 0);
5260
      rtx xop1 = XEXP (x, 1);
5261
 
5262
      return (arm_address_register_rtx_p (xop0, strict_p)
5263
              && arm_legitimate_index_p (mode, xop1, outer, strict_p));
5264
    }
5265
#endif
5266
 
5267
  else if (GET_MODE_CLASS (mode) != MODE_FLOAT
5268
           && code == SYMBOL_REF
5269
           && CONSTANT_POOL_ADDRESS_P (x)
5270
           && ! (flag_pic
5271
                 && symbol_mentioned_p (get_pool_constant (x))
5272
                 && ! pcrel_constant_p (get_pool_constant (x))))
5273
    return 1;
5274
 
5275
  return 0;
5276
}
5277
 
5278
/* Return nonzero if X is a valid Thumb-2 address operand.  */
5279
static int
5280
thumb2_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
5281
{
5282
  bool use_ldrd;
5283
  enum rtx_code code = GET_CODE (x);
5284
 
5285
  if (arm_address_register_rtx_p (x, strict_p))
5286
    return 1;
5287
 
5288
  use_ldrd = (TARGET_LDRD
5289
              && (mode == DImode
5290
                  || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
5291
 
5292
  if (code == POST_INC || code == PRE_DEC
5293
      || ((code == PRE_INC || code == POST_DEC)
5294
          && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
5295
    return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
5296
 
5297
  else if ((code == POST_MODIFY || code == PRE_MODIFY)
5298
           && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
5299
           && GET_CODE (XEXP (x, 1)) == PLUS
5300
           && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
5301
    {
5302
      /* Thumb-2 only has autoincrement by constant.  */
5303
      rtx addend = XEXP (XEXP (x, 1), 1);
5304
      HOST_WIDE_INT offset;
5305
 
5306
      if (GET_CODE (addend) != CONST_INT)
5307
        return 0;
5308
 
5309
      offset = INTVAL(addend);
5310
      if (GET_MODE_SIZE (mode) <= 4)
5311
        return (offset > -256 && offset < 256);
5312
 
5313
      return (use_ldrd && offset > -1024 && offset < 1024
5314
              && (offset & 3) == 0);
5315
    }
5316
 
5317
  /* After reload constants split into minipools will have addresses
5318
     from a LABEL_REF.  */
5319
  else if (reload_completed
5320
           && (code == LABEL_REF
5321
               || (code == CONST
5322
                   && GET_CODE (XEXP (x, 0)) == PLUS
5323
                   && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
5324
                   && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5325
    return 1;
5326
 
5327
  else if (mode == TImode || (TARGET_NEON && VALID_NEON_STRUCT_MODE (mode)))
5328
    return 0;
5329
 
5330
  else if (code == PLUS)
5331
    {
5332
      rtx xop0 = XEXP (x, 0);
5333
      rtx xop1 = XEXP (x, 1);
5334
 
5335
      return ((arm_address_register_rtx_p (xop0, strict_p)
5336
               && thumb2_legitimate_index_p (mode, xop1, strict_p))
5337
              || (arm_address_register_rtx_p (xop1, strict_p)
5338
                  && thumb2_legitimate_index_p (mode, xop0, strict_p)));
5339
    }
5340
 
5341
  else if (GET_MODE_CLASS (mode) != MODE_FLOAT
5342
           && code == SYMBOL_REF
5343
           && CONSTANT_POOL_ADDRESS_P (x)
5344
           && ! (flag_pic
5345
                 && symbol_mentioned_p (get_pool_constant (x))
5346
                 && ! pcrel_constant_p (get_pool_constant (x))))
5347
    return 1;
5348
 
5349
  return 0;
5350
}
5351
 
5352
/* Return nonzero if INDEX is valid for an address index operand in
5353
   ARM state.  */
5354
static int
5355
arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
5356
                        int strict_p)
5357
{
5358
  HOST_WIDE_INT range;
5359
  enum rtx_code code = GET_CODE (index);
5360
 
5361
  /* Standard coprocessor addressing modes.  */
5362
  if (TARGET_HARD_FLOAT
5363
      && (TARGET_FPA || TARGET_MAVERICK)
5364
      && (GET_MODE_CLASS (mode) == MODE_FLOAT
5365
          || (TARGET_MAVERICK && mode == DImode)))
5366
    return (code == CONST_INT && INTVAL (index) < 1024
5367
            && INTVAL (index) > -1024
5368
            && (INTVAL (index) & 3) == 0);
5369
 
5370
  if (TARGET_NEON
5371
      && (VALID_NEON_DREG_MODE (mode) || VALID_NEON_QREG_MODE (mode)))
5372
    return (code == CONST_INT
5373
            && INTVAL (index) < 1016
5374
            && INTVAL (index) > -1024
5375
            && (INTVAL (index) & 3) == 0);
5376
 
5377
  if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
5378
    return (code == CONST_INT
5379
            && INTVAL (index) < 1024
5380
            && INTVAL (index) > -1024
5381
            && (INTVAL (index) & 3) == 0);
5382
 
5383
  if (arm_address_register_rtx_p (index, strict_p)
5384
      && (GET_MODE_SIZE (mode) <= 4))
5385
    return 1;
5386
 
5387
  if (mode == DImode || mode == DFmode)
5388
    {
5389
      if (code == CONST_INT)
5390
        {
5391
          HOST_WIDE_INT val = INTVAL (index);
5392
 
5393
          if (TARGET_LDRD)
5394
            return val > -256 && val < 256;
5395
          else
5396
            return val > -4096 && val < 4092;
5397
        }
5398
 
5399
      return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
5400
    }
5401
 
5402
  if (GET_MODE_SIZE (mode) <= 4
5403
      && ! (arm_arch4
5404
            && (mode == HImode
5405
                || mode == HFmode
5406
                || (mode == QImode && outer == SIGN_EXTEND))))
5407
    {
5408
      if (code == MULT)
5409
        {
5410
          rtx xiop0 = XEXP (index, 0);
5411
          rtx xiop1 = XEXP (index, 1);
5412
 
5413
          return ((arm_address_register_rtx_p (xiop0, strict_p)
5414
                   && power_of_two_operand (xiop1, SImode))
5415
                  || (arm_address_register_rtx_p (xiop1, strict_p)
5416
                      && power_of_two_operand (xiop0, SImode)));
5417
        }
5418
      else if (code == LSHIFTRT || code == ASHIFTRT
5419
               || code == ASHIFT || code == ROTATERT)
5420
        {
5421
          rtx op = XEXP (index, 1);
5422
 
5423
          return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
5424
                  && GET_CODE (op) == CONST_INT
5425
                  && INTVAL (op) > 0
5426
                  && INTVAL (op) <= 31);
5427
        }
5428
    }
5429
 
5430
  /* For ARM v4 we may be doing a sign-extend operation during the
5431
     load.  */
5432
  if (arm_arch4)
5433
    {
5434
      if (mode == HImode
5435
          || mode == HFmode
5436
          || (outer == SIGN_EXTEND && mode == QImode))
5437
        range = 256;
5438
      else
5439
        range = 4096;
5440
    }
5441
  else
5442
    range = (mode == HImode || mode == HFmode) ? 4095 : 4096;
5443
 
5444
  return (code == CONST_INT
5445
          && INTVAL (index) < range
5446
          && INTVAL (index) > -range);
5447
}
5448
 
5449
/* Return true if OP is a valid index scaling factor for Thumb-2 address
5450
   index operand.  i.e. 1, 2, 4 or 8.  */
5451
static bool
5452
thumb2_index_mul_operand (rtx op)
5453
{
5454
  HOST_WIDE_INT val;
5455
 
5456
  if (GET_CODE(op) != CONST_INT)
5457
    return false;
5458
 
5459
  val = INTVAL(op);
5460
  return (val == 1 || val == 2 || val == 4 || val == 8);
5461
}
5462
 
5463
/* Return nonzero if INDEX is a valid Thumb-2 address index operand.  */
5464
static int
5465
thumb2_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
5466
{
5467
  enum rtx_code code = GET_CODE (index);
5468
 
5469
  /* ??? Combine arm and thumb2 coprocessor addressing modes.  */
5470
  /* Standard coprocessor addressing modes.  */
5471
  if (TARGET_HARD_FLOAT
5472
      && (TARGET_FPA || TARGET_MAVERICK)
5473
      && (GET_MODE_CLASS (mode) == MODE_FLOAT
5474
          || (TARGET_MAVERICK && mode == DImode)))
5475
    return (code == CONST_INT && INTVAL (index) < 1024
5476
            && INTVAL (index) > -1024
5477
            && (INTVAL (index) & 3) == 0);
5478
 
5479
  if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
5480
    {
5481
      /* For DImode assume values will usually live in core regs
5482
         and only allow LDRD addressing modes.  */
5483
      if (!TARGET_LDRD || mode != DImode)
5484
        return (code == CONST_INT
5485
                && INTVAL (index) < 1024
5486
                && INTVAL (index) > -1024
5487
                && (INTVAL (index) & 3) == 0);
5488
    }
5489
 
5490
  if (TARGET_NEON
5491
      && (VALID_NEON_DREG_MODE (mode) || VALID_NEON_QREG_MODE (mode)))
5492
    return (code == CONST_INT
5493
            && INTVAL (index) < 1016
5494
            && INTVAL (index) > -1024
5495
            && (INTVAL (index) & 3) == 0);
5496
 
5497
  if (arm_address_register_rtx_p (index, strict_p)
5498
      && (GET_MODE_SIZE (mode) <= 4))
5499
    return 1;
5500
 
5501
  if (mode == DImode || mode == DFmode)
5502
    {
5503
      if (code == CONST_INT)
5504
        {
5505
          HOST_WIDE_INT val = INTVAL (index);
5506
          /* ??? Can we assume ldrd for thumb2?  */
5507
          /* Thumb-2 ldrd only has reg+const addressing modes.  */
5508
          /* ldrd supports offsets of +-1020.
5509
             However the ldr fallback does not.  */
5510
          return val > -256 && val < 256 && (val & 3) == 0;
5511
        }
5512
      else
5513
        return 0;
5514
    }
5515
 
5516
  if (code == MULT)
5517
    {
5518
      rtx xiop0 = XEXP (index, 0);
5519
      rtx xiop1 = XEXP (index, 1);
5520
 
5521
      return ((arm_address_register_rtx_p (xiop0, strict_p)
5522
               && thumb2_index_mul_operand (xiop1))
5523
              || (arm_address_register_rtx_p (xiop1, strict_p)
5524
                  && thumb2_index_mul_operand (xiop0)));
5525
    }
5526
  else if (code == ASHIFT)
5527
    {
5528
      rtx op = XEXP (index, 1);
5529
 
5530
      return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
5531
              && GET_CODE (op) == CONST_INT
5532
              && INTVAL (op) > 0
5533
              && INTVAL (op) <= 3);
5534
    }
5535
 
5536
  return (code == CONST_INT
5537
          && INTVAL (index) < 4096
5538
          && INTVAL (index) > -256);
5539
}
5540
 
5541
/* Return nonzero if X is valid as a 16-bit Thumb state base register.  */
5542
static int
5543
thumb1_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
5544
{
5545
  int regno;
5546
 
5547
  if (GET_CODE (x) != REG)
5548
    return 0;
5549
 
5550
  regno = REGNO (x);
5551
 
5552
  if (strict_p)
5553
    return THUMB1_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
5554
 
5555
  return (regno <= LAST_LO_REGNUM
5556
          || regno > LAST_VIRTUAL_REGISTER
5557
          || regno == FRAME_POINTER_REGNUM
5558
          || (GET_MODE_SIZE (mode) >= 4
5559
              && (regno == STACK_POINTER_REGNUM
5560
                  || regno >= FIRST_PSEUDO_REGISTER
5561
                  || x == hard_frame_pointer_rtx
5562
                  || x == arg_pointer_rtx)));
5563
}
5564
 
5565
/* Return nonzero if x is a legitimate index register.  This is the case
5566
   for any base register that can access a QImode object.  */
5567
inline static int
5568
thumb1_index_register_rtx_p (rtx x, int strict_p)
5569
{
5570
  return thumb1_base_register_rtx_p (x, QImode, strict_p);
5571
}
5572
 
5573
/* Return nonzero if x is a legitimate 16-bit Thumb-state address.
5574
 
5575
   The AP may be eliminated to either the SP or the FP, so we use the
5576
   least common denominator, e.g. SImode, and offsets from 0 to 64.
5577
 
5578
   ??? Verify whether the above is the right approach.
5579
 
5580
   ??? Also, the FP may be eliminated to the SP, so perhaps that
5581
   needs special handling also.
5582
 
5583
   ??? Look at how the mips16 port solves this problem.  It probably uses
5584
   better ways to solve some of these problems.
5585
 
5586
   Although it is not incorrect, we don't accept QImode and HImode
5587
   addresses based on the frame pointer or arg pointer until the
5588
   reload pass starts.  This is so that eliminating such addresses
5589
   into stack based ones won't produce impossible code.  */
5590
static int
5591
thumb1_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
5592
{
5593
  /* ??? Not clear if this is right.  Experiment.  */
5594
  if (GET_MODE_SIZE (mode) < 4
5595
      && !(reload_in_progress || reload_completed)
5596
      && (reg_mentioned_p (frame_pointer_rtx, x)
5597
          || reg_mentioned_p (arg_pointer_rtx, x)
5598
          || reg_mentioned_p (virtual_incoming_args_rtx, x)
5599
          || reg_mentioned_p (virtual_outgoing_args_rtx, x)
5600
          || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
5601
          || reg_mentioned_p (virtual_stack_vars_rtx, x)))
5602
    return 0;
5603
 
5604
  /* Accept any base register.  SP only in SImode or larger.  */
5605
  else if (thumb1_base_register_rtx_p (x, mode, strict_p))
5606
    return 1;
5607
 
5608
  /* This is PC relative data before arm_reorg runs.  */
5609
  else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
5610
           && GET_CODE (x) == SYMBOL_REF
5611
           && CONSTANT_POOL_ADDRESS_P (x) && !flag_pic)
5612
    return 1;
5613
 
5614
  /* This is PC relative data after arm_reorg runs.  */
5615
  else if ((GET_MODE_SIZE (mode) >= 4 || mode == HFmode)
5616
           && reload_completed
5617
           && (GET_CODE (x) == LABEL_REF
5618
               || (GET_CODE (x) == CONST
5619
                   && GET_CODE (XEXP (x, 0)) == PLUS
5620
                   && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
5621
                   && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5622
    return 1;
5623
 
5624
  /* Post-inc indexing only supported for SImode and larger.  */
5625
  else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
5626
           && thumb1_index_register_rtx_p (XEXP (x, 0), strict_p))
5627
    return 1;
5628
 
5629
  else if (GET_CODE (x) == PLUS)
5630
    {
5631
      /* REG+REG address can be any two index registers.  */
5632
      /* We disallow FRAME+REG addressing since we know that FRAME
5633
         will be replaced with STACK, and SP relative addressing only
5634
         permits SP+OFFSET.  */
5635
      if (GET_MODE_SIZE (mode) <= 4
5636
          && XEXP (x, 0) != frame_pointer_rtx
5637
          && XEXP (x, 1) != frame_pointer_rtx
5638
          && thumb1_index_register_rtx_p (XEXP (x, 0), strict_p)
5639
          && thumb1_index_register_rtx_p (XEXP (x, 1), strict_p))
5640
        return 1;
5641
 
5642
      /* REG+const has 5-7 bit offset for non-SP registers.  */
5643
      else if ((thumb1_index_register_rtx_p (XEXP (x, 0), strict_p)
5644
                || XEXP (x, 0) == arg_pointer_rtx)
5645
               && GET_CODE (XEXP (x, 1)) == CONST_INT
5646
               && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
5647
        return 1;
5648
 
5649
      /* REG+const has 10-bit offset for SP, but only SImode and
5650
         larger is supported.  */
5651
      /* ??? Should probably check for DI/DFmode overflow here
5652
         just like GO_IF_LEGITIMATE_OFFSET does.  */
5653
      else if (GET_CODE (XEXP (x, 0)) == REG
5654
               && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
5655
               && GET_MODE_SIZE (mode) >= 4
5656
               && GET_CODE (XEXP (x, 1)) == CONST_INT
5657
               && INTVAL (XEXP (x, 1)) >= 0
5658
               && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
5659
               && (INTVAL (XEXP (x, 1)) & 3) == 0)
5660
        return 1;
5661
 
5662
      else if (GET_CODE (XEXP (x, 0)) == REG
5663
               && (REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
5664
                   || REGNO (XEXP (x, 0)) == ARG_POINTER_REGNUM
5665
                   || (REGNO (XEXP (x, 0)) >= FIRST_VIRTUAL_REGISTER
5666
                       && REGNO (XEXP (x, 0)) <= LAST_VIRTUAL_REGISTER))
5667
               && GET_MODE_SIZE (mode) >= 4
5668
               && GET_CODE (XEXP (x, 1)) == CONST_INT
5669
               && (INTVAL (XEXP (x, 1)) & 3) == 0)
5670
        return 1;
5671
    }
5672
 
5673
  else if (GET_MODE_CLASS (mode) != MODE_FLOAT
5674
           && GET_MODE_SIZE (mode) == 4
5675
           && GET_CODE (x) == SYMBOL_REF
5676
           && CONSTANT_POOL_ADDRESS_P (x)
5677
           && ! (flag_pic
5678
                 && symbol_mentioned_p (get_pool_constant (x))
5679
                 && ! pcrel_constant_p (get_pool_constant (x))))
5680
    return 1;
5681
 
5682
  return 0;
5683
}
5684
 
5685
/* Return nonzero if VAL can be used as an offset in a Thumb-state address
5686
   instruction of mode MODE.  */
5687
int
5688
thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
5689
{
5690
  switch (GET_MODE_SIZE (mode))
5691
    {
5692
    case 1:
5693
      return val >= 0 && val < 32;
5694
 
5695
    case 2:
5696
      return val >= 0 && val < 64 && (val & 1) == 0;
5697
 
5698
    default:
5699
      return (val >= 0
5700
              && (val + GET_MODE_SIZE (mode)) <= 128
5701
              && (val & 3) == 0);
5702
    }
5703
}
5704
 
5705
bool
5706
arm_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
5707
{
5708
  if (TARGET_ARM)
5709
    return arm_legitimate_address_outer_p (mode, x, SET, strict_p);
5710
  else if (TARGET_THUMB2)
5711
    return thumb2_legitimate_address_p (mode, x, strict_p);
5712
  else /* if (TARGET_THUMB1) */
5713
    return thumb1_legitimate_address_p (mode, x, strict_p);
5714
}
5715
 
5716
/* Build the SYMBOL_REF for __tls_get_addr.  */
5717
 
5718
static GTY(()) rtx tls_get_addr_libfunc;
5719
 
5720
static rtx
5721
get_tls_get_addr (void)
5722
{
5723
  if (!tls_get_addr_libfunc)
5724
    tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
5725
  return tls_get_addr_libfunc;
5726
}
5727
 
5728
static rtx
5729
arm_load_tp (rtx target)
5730
{
5731
  if (!target)
5732
    target = gen_reg_rtx (SImode);
5733
 
5734
  if (TARGET_HARD_TP)
5735
    {
5736
      /* Can return in any reg.  */
5737
      emit_insn (gen_load_tp_hard (target));
5738
    }
5739
  else
5740
    {
5741
      /* Always returned in r0.  Immediately copy the result into a pseudo,
5742
         otherwise other uses of r0 (e.g. setting up function arguments) may
5743
         clobber the value.  */
5744
 
5745
      rtx tmp;
5746
 
5747
      emit_insn (gen_load_tp_soft ());
5748
 
5749
      tmp = gen_rtx_REG (SImode, 0);
5750
      emit_move_insn (target, tmp);
5751
    }
5752
  return target;
5753
}
5754
 
5755
static rtx
5756
load_tls_operand (rtx x, rtx reg)
5757
{
5758
  rtx tmp;
5759
 
5760
  if (reg == NULL_RTX)
5761
    reg = gen_reg_rtx (SImode);
5762
 
5763
  tmp = gen_rtx_CONST (SImode, x);
5764
 
5765
  emit_move_insn (reg, tmp);
5766
 
5767
  return reg;
5768
}
5769
 
5770
static rtx
5771
arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
5772
{
5773
  rtx insns, label, labelno, sum;
5774
 
5775
  start_sequence ();
5776
 
5777
  labelno = GEN_INT (pic_labelno++);
5778
  label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
5779
  label = gen_rtx_CONST (VOIDmode, label);
5780
 
5781
  sum = gen_rtx_UNSPEC (Pmode,
5782
                        gen_rtvec (4, x, GEN_INT (reloc), label,
5783
                                   GEN_INT (TARGET_ARM ? 8 : 4)),
5784
                        UNSPEC_TLS);
5785
  reg = load_tls_operand (sum, reg);
5786
 
5787
  if (TARGET_ARM)
5788
    emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
5789
  else if (TARGET_THUMB2)
5790
    emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
5791
  else /* TARGET_THUMB1 */
5792
    emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
5793
 
5794
  *valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX, LCT_PURE, /* LCT_CONST?  */
5795
                                     Pmode, 1, reg, Pmode);
5796
 
5797
  insns = get_insns ();
5798
  end_sequence ();
5799
 
5800
  return insns;
5801
}
5802
 
5803
rtx
5804
legitimize_tls_address (rtx x, rtx reg)
5805
{
5806
  rtx dest, tp, label, labelno, sum, insns, ret, eqv, addend;
5807
  unsigned int model = SYMBOL_REF_TLS_MODEL (x);
5808
 
5809
  switch (model)
5810
    {
5811
    case TLS_MODEL_GLOBAL_DYNAMIC:
5812
      insns = arm_call_tls_get_addr (x, reg, &ret, TLS_GD32);
5813
      dest = gen_reg_rtx (Pmode);
5814
      emit_libcall_block (insns, dest, ret, x);
5815
      return dest;
5816
 
5817
    case TLS_MODEL_LOCAL_DYNAMIC:
5818
      insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
5819
 
5820
      /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
5821
         share the LDM result with other LD model accesses.  */
5822
      eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
5823
                            UNSPEC_TLS);
5824
      dest = gen_reg_rtx (Pmode);
5825
      emit_libcall_block (insns, dest, ret, eqv);
5826
 
5827
      /* Load the addend.  */
5828
      addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (TLS_LDO32)),
5829
                               UNSPEC_TLS);
5830
      addend = force_reg (SImode, gen_rtx_CONST (SImode, addend));
5831
      return gen_rtx_PLUS (Pmode, dest, addend);
5832
 
5833
    case TLS_MODEL_INITIAL_EXEC:
5834
      labelno = GEN_INT (pic_labelno++);
5835
      label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
5836
      label = gen_rtx_CONST (VOIDmode, label);
5837
      sum = gen_rtx_UNSPEC (Pmode,
5838
                            gen_rtvec (4, x, GEN_INT (TLS_IE32), label,
5839
                                       GEN_INT (TARGET_ARM ? 8 : 4)),
5840
                            UNSPEC_TLS);
5841
      reg = load_tls_operand (sum, reg);
5842
 
5843
      if (TARGET_ARM)
5844
        emit_insn (gen_tls_load_dot_plus_eight (reg, reg, labelno));
5845
      else if (TARGET_THUMB2)
5846
        emit_insn (gen_tls_load_dot_plus_four (reg, NULL, reg, labelno));
5847
      else
5848
        {
5849
          emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
5850
          emit_move_insn (reg, gen_const_mem (SImode, reg));
5851
        }
5852
 
5853
      tp = arm_load_tp (NULL_RTX);
5854
 
5855
      return gen_rtx_PLUS (Pmode, tp, reg);
5856
 
5857
    case TLS_MODEL_LOCAL_EXEC:
5858
      tp = arm_load_tp (NULL_RTX);
5859
 
5860
      reg = gen_rtx_UNSPEC (Pmode,
5861
                            gen_rtvec (2, x, GEN_INT (TLS_LE32)),
5862
                            UNSPEC_TLS);
5863
      reg = force_reg (SImode, gen_rtx_CONST (SImode, reg));
5864
 
5865
      return gen_rtx_PLUS (Pmode, tp, reg);
5866
 
5867
    default:
5868
      abort ();
5869
    }
5870
}
5871
 
5872
/* Try machine-dependent ways of modifying an illegitimate address
5873
   to be legitimate.  If we find one, return the new, valid address.  */
5874
rtx
5875
arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
5876
{
5877
  if (!TARGET_ARM)
5878
    {
5879
      /* TODO: legitimize_address for Thumb2.  */
5880
      if (TARGET_THUMB2)
5881
        return x;
5882
      return thumb_legitimize_address (x, orig_x, mode);
5883
    }
5884
 
5885
  if (arm_tls_symbol_p (x))
5886
    return legitimize_tls_address (x, NULL_RTX);
5887
 
5888
  if (GET_CODE (x) == PLUS)
5889
    {
5890
      rtx xop0 = XEXP (x, 0);
5891
      rtx xop1 = XEXP (x, 1);
5892
 
5893
      if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
5894
        xop0 = force_reg (SImode, xop0);
5895
 
5896
      if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
5897
        xop1 = force_reg (SImode, xop1);
5898
 
5899
      if (ARM_BASE_REGISTER_RTX_P (xop0)
5900
          && GET_CODE (xop1) == CONST_INT)
5901
        {
5902
          HOST_WIDE_INT n, low_n;
5903
          rtx base_reg, val;
5904
          n = INTVAL (xop1);
5905
 
5906
          /* VFP addressing modes actually allow greater offsets, but for
5907
             now we just stick with the lowest common denominator.  */
5908
          if (mode == DImode
5909
              || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
5910
            {
5911
              low_n = n & 0x0f;
5912
              n &= ~0x0f;
5913
              if (low_n > 4)
5914
                {
5915
                  n += 16;
5916
                  low_n -= 16;
5917
                }
5918
            }
5919
          else
5920
            {
5921
              low_n = ((mode) == TImode ? 0
5922
                       : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
5923
              n -= low_n;
5924
            }
5925
 
5926
          base_reg = gen_reg_rtx (SImode);
5927
          val = force_operand (plus_constant (xop0, n), NULL_RTX);
5928
          emit_move_insn (base_reg, val);
5929
          x = plus_constant (base_reg, low_n);
5930
        }
5931
      else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
5932
        x = gen_rtx_PLUS (SImode, xop0, xop1);
5933
    }
5934
 
5935
  /* XXX We don't allow MINUS any more -- see comment in
5936
     arm_legitimate_address_outer_p ().  */
5937
  else if (GET_CODE (x) == MINUS)
5938
    {
5939
      rtx xop0 = XEXP (x, 0);
5940
      rtx xop1 = XEXP (x, 1);
5941
 
5942
      if (CONSTANT_P (xop0))
5943
        xop0 = force_reg (SImode, xop0);
5944
 
5945
      if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
5946
        xop1 = force_reg (SImode, xop1);
5947
 
5948
      if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
5949
        x = gen_rtx_MINUS (SImode, xop0, xop1);
5950
    }
5951
 
5952
  /* Make sure to take full advantage of the pre-indexed addressing mode
5953
     with absolute addresses which often allows for the base register to
5954
     be factorized for multiple adjacent memory references, and it might
5955
     even allows for the mini pool to be avoided entirely. */
5956
  else if (GET_CODE (x) == CONST_INT && optimize > 0)
5957
    {
5958
      unsigned int bits;
5959
      HOST_WIDE_INT mask, base, index;
5960
      rtx base_reg;
5961
 
5962
      /* ldr and ldrb can use a 12-bit index, ldrsb and the rest can only
5963
         use a 8-bit index. So let's use a 12-bit index for SImode only and
5964
         hope that arm_gen_constant will enable ldrb to use more bits. */
5965
      bits = (mode == SImode) ? 12 : 8;
5966
      mask = (1 << bits) - 1;
5967
      base = INTVAL (x) & ~mask;
5968
      index = INTVAL (x) & mask;
5969
      if (bit_count (base & 0xffffffff) > (32 - bits)/2)
5970
        {
5971
          /* It'll most probably be more efficient to generate the base
5972
             with more bits set and use a negative index instead. */
5973
          base |= mask;
5974
          index -= mask;
5975
        }
5976
      base_reg = force_reg (SImode, GEN_INT (base));
5977
      x = plus_constant (base_reg, index);
5978
    }
5979
 
5980
  if (flag_pic)
5981
    {
5982
      /* We need to find and carefully transform any SYMBOL and LABEL
5983
         references; so go back to the original address expression.  */
5984
      rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
5985
 
5986
      if (new_x != orig_x)
5987
        x = new_x;
5988
    }
5989
 
5990
  return x;
5991
}
5992
 
5993
 
5994
/* Try machine-dependent ways of modifying an illegitimate Thumb address
5995
   to be legitimate.  If we find one, return the new, valid address.  */
5996
rtx
5997
thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
5998
{
5999
  if (arm_tls_symbol_p (x))
6000
    return legitimize_tls_address (x, NULL_RTX);
6001
 
6002
  if (GET_CODE (x) == PLUS
6003
      && GET_CODE (XEXP (x, 1)) == CONST_INT
6004
      && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
6005
          || INTVAL (XEXP (x, 1)) < 0))
6006
    {
6007
      rtx xop0 = XEXP (x, 0);
6008
      rtx xop1 = XEXP (x, 1);
6009
      HOST_WIDE_INT offset = INTVAL (xop1);
6010
 
6011
      /* Try and fold the offset into a biasing of the base register and
6012
         then offsetting that.  Don't do this when optimizing for space
6013
         since it can cause too many CSEs.  */
6014
      if (optimize_size && offset >= 0
6015
          && offset < 256 + 31 * GET_MODE_SIZE (mode))
6016
        {
6017
          HOST_WIDE_INT delta;
6018
 
6019
          if (offset >= 256)
6020
            delta = offset - (256 - GET_MODE_SIZE (mode));
6021
          else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
6022
            delta = 31 * GET_MODE_SIZE (mode);
6023
          else
6024
            delta = offset & (~31 * GET_MODE_SIZE (mode));
6025
 
6026
          xop0 = force_operand (plus_constant (xop0, offset - delta),
6027
                                NULL_RTX);
6028
          x = plus_constant (xop0, delta);
6029
        }
6030
      else if (offset < 0 && offset > -256)
6031
        /* Small negative offsets are best done with a subtract before the
6032
           dereference, forcing these into a register normally takes two
6033
           instructions.  */
6034
        x = force_operand (x, NULL_RTX);
6035
      else
6036
        {
6037
          /* For the remaining cases, force the constant into a register.  */
6038
          xop1 = force_reg (SImode, xop1);
6039
          x = gen_rtx_PLUS (SImode, xop0, xop1);
6040
        }
6041
    }
6042
  else if (GET_CODE (x) == PLUS
6043
           && s_register_operand (XEXP (x, 1), SImode)
6044
           && !s_register_operand (XEXP (x, 0), SImode))
6045
    {
6046
      rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
6047
 
6048
      x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
6049
    }
6050
 
6051
  if (flag_pic)
6052
    {
6053
      /* We need to find and carefully transform any SYMBOL and LABEL
6054
         references; so go back to the original address expression.  */
6055
      rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
6056
 
6057
      if (new_x != orig_x)
6058
        x = new_x;
6059
    }
6060
 
6061
  return x;
6062
}
6063
 
6064
rtx
6065
thumb_legitimize_reload_address (rtx *x_p,
6066
                                 enum machine_mode mode,
6067
                                 int opnum, int type,
6068
                                 int ind_levels ATTRIBUTE_UNUSED)
6069
{
6070
  rtx x = *x_p;
6071
 
6072
  if (GET_CODE (x) == PLUS
6073
      && GET_MODE_SIZE (mode) < 4
6074
      && REG_P (XEXP (x, 0))
6075
      && XEXP (x, 0) == stack_pointer_rtx
6076
      && GET_CODE (XEXP (x, 1)) == CONST_INT
6077
      && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
6078
    {
6079
      rtx orig_x = x;
6080
 
6081
      x = copy_rtx (x);
6082
      push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
6083
                   Pmode, VOIDmode, 0, 0, opnum, (enum reload_type) type);
6084
      return x;
6085
    }
6086
 
6087
  /* If both registers are hi-regs, then it's better to reload the
6088
     entire expression rather than each register individually.  That
6089
     only requires one reload register rather than two.  */
6090
  if (GET_CODE (x) == PLUS
6091
      && REG_P (XEXP (x, 0))
6092
      && REG_P (XEXP (x, 1))
6093
      && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
6094
      && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
6095
    {
6096
      rtx orig_x = x;
6097
 
6098
      x = copy_rtx (x);
6099
      push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
6100
                   Pmode, VOIDmode, 0, 0, opnum, (enum reload_type) type);
6101
      return x;
6102
    }
6103
 
6104
  return NULL;
6105
}
6106
 
6107
/* Test for various thread-local symbols.  */
6108
 
6109
/* Return TRUE if X is a thread-local symbol.  */
6110
 
6111
static bool
6112
arm_tls_symbol_p (rtx x)
6113
{
6114
  if (! TARGET_HAVE_TLS)
6115
    return false;
6116
 
6117
  if (GET_CODE (x) != SYMBOL_REF)
6118
    return false;
6119
 
6120
  return SYMBOL_REF_TLS_MODEL (x) != 0;
6121
}
6122
 
6123
/* Helper for arm_tls_referenced_p.  */
6124
 
6125
static int
6126
arm_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
6127
{
6128
  if (GET_CODE (*x) == SYMBOL_REF)
6129
    return SYMBOL_REF_TLS_MODEL (*x) != 0;
6130
 
6131
  /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
6132
     TLS offsets, not real symbol references.  */
6133
  if (GET_CODE (*x) == UNSPEC
6134
      && XINT (*x, 1) == UNSPEC_TLS)
6135
    return -1;
6136
 
6137
  return 0;
6138
}
6139
 
6140
/* Return TRUE if X contains any TLS symbol references.  */
6141
 
6142
bool
6143
arm_tls_referenced_p (rtx x)
6144
{
6145
  if (! TARGET_HAVE_TLS)
6146
    return false;
6147
 
6148
  return for_each_rtx (&x, arm_tls_operand_p_1, NULL);
6149
}
6150
 
6151
/* Implement TARGET_CANNOT_FORCE_CONST_MEM.  */
6152
 
6153
bool
6154
arm_cannot_force_const_mem (rtx x)
6155
{
6156
  rtx base, offset;
6157
 
6158
  if (ARM_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
6159
    {
6160
      split_const (x, &base, &offset);
6161
      if (GET_CODE (base) == SYMBOL_REF
6162
          && !offset_within_block_p (base, INTVAL (offset)))
6163
        return true;
6164
    }
6165
  return arm_tls_referenced_p (x);
6166
}
6167
 
6168
#define REG_OR_SUBREG_REG(X)                                            \
6169
  (GET_CODE (X) == REG                                                  \
6170
   || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
6171
 
6172
#define REG_OR_SUBREG_RTX(X)                    \
6173
   (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
6174
 
6175
#ifndef COSTS_N_INSNS
6176
#define COSTS_N_INSNS(N) ((N) * 4 - 2)
6177
#endif
6178
static inline int
6179
thumb1_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
6180
{
6181
  enum machine_mode mode = GET_MODE (x);
6182
 
6183
  switch (code)
6184
    {
6185
    case ASHIFT:
6186
    case ASHIFTRT:
6187
    case LSHIFTRT:
6188
    case ROTATERT:
6189
    case PLUS:
6190
    case MINUS:
6191
    case COMPARE:
6192
    case NEG:
6193
    case NOT:
6194
      return COSTS_N_INSNS (1);
6195
 
6196
    case MULT:
6197
      if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6198
        {
6199
          int cycles = 0;
6200
          unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
6201
 
6202
          while (i)
6203
            {
6204
              i >>= 2;
6205
              cycles++;
6206
            }
6207
          return COSTS_N_INSNS (2) + cycles;
6208
        }
6209
      return COSTS_N_INSNS (1) + 16;
6210
 
6211
    case SET:
6212
      return (COSTS_N_INSNS (1)
6213
              + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
6214
                     + GET_CODE (SET_DEST (x)) == MEM));
6215
 
6216
    case CONST_INT:
6217
      if (outer == SET)
6218
        {
6219
          if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
6220
            return 0;
6221
          if (thumb_shiftable_const (INTVAL (x)))
6222
            return COSTS_N_INSNS (2);
6223
          return COSTS_N_INSNS (3);
6224
        }
6225
      else if ((outer == PLUS || outer == COMPARE)
6226
               && INTVAL (x) < 256 && INTVAL (x) > -256)
6227
        return 0;
6228
      else if ((outer == IOR || outer == XOR || outer == AND)
6229
               && INTVAL (x) < 256 && INTVAL (x) >= -256)
6230
        return COSTS_N_INSNS (1);
6231
      else if (outer == AND)
6232
        {
6233
          int i;
6234
          /* This duplicates the tests in the andsi3 expander.  */
6235
          for (i = 9; i <= 31; i++)
6236
            if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (x)
6237
                || (((HOST_WIDE_INT) 1) << i) - 1 == ~INTVAL (x))
6238
              return COSTS_N_INSNS (2);
6239
        }
6240
      else if (outer == ASHIFT || outer == ASHIFTRT
6241
               || outer == LSHIFTRT)
6242
        return 0;
6243
      return COSTS_N_INSNS (2);
6244
 
6245
    case CONST:
6246
    case CONST_DOUBLE:
6247
    case LABEL_REF:
6248
    case SYMBOL_REF:
6249
      return COSTS_N_INSNS (3);
6250
 
6251
    case UDIV:
6252
    case UMOD:
6253
    case DIV:
6254
    case MOD:
6255
      return 100;
6256
 
6257
    case TRUNCATE:
6258
      return 99;
6259
 
6260
    case AND:
6261
    case XOR:
6262
    case IOR:
6263
      /* XXX guess.  */
6264
      return 8;
6265
 
6266
    case MEM:
6267
      /* XXX another guess.  */
6268
      /* Memory costs quite a lot for the first word, but subsequent words
6269
         load at the equivalent of a single insn each.  */
6270
      return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
6271
              + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
6272
                 ? 4 : 0));
6273
 
6274
    case IF_THEN_ELSE:
6275
      /* XXX a guess.  */
6276
      if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
6277
        return 14;
6278
      return 2;
6279
 
6280
    case ZERO_EXTEND:
6281
      /* XXX still guessing.  */
6282
      switch (GET_MODE (XEXP (x, 0)))
6283
        {
6284
        case QImode:
6285
          return (1 + (mode == DImode ? 4 : 0)
6286
                  + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
6287
 
6288
        case HImode:
6289
          return (4 + (mode == DImode ? 4 : 0)
6290
                  + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
6291
 
6292
        case SImode:
6293
          return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
6294
 
6295
        default:
6296
          return 99;
6297
        }
6298
 
6299
    default:
6300
      return 99;
6301
    }
6302
}
6303
 
6304
static inline bool
6305
arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
6306
{
6307
  enum machine_mode mode = GET_MODE (x);
6308
  enum rtx_code subcode;
6309
  rtx operand;
6310
  enum rtx_code code = GET_CODE (x);
6311
  int extra_cost;
6312
  *total = 0;
6313
 
6314
  switch (code)
6315
    {
6316
    case MEM:
6317
      /* Memory costs quite a lot for the first word, but subsequent words
6318
         load at the equivalent of a single insn each.  */
6319
      *total = COSTS_N_INSNS (2 + ARM_NUM_REGS (mode));
6320
      return true;
6321
 
6322
    case DIV:
6323
    case MOD:
6324
    case UDIV:
6325
    case UMOD:
6326
      if (TARGET_HARD_FLOAT && mode == SFmode)
6327
        *total = COSTS_N_INSNS (2);
6328
      else if (TARGET_HARD_FLOAT && mode == DFmode && !TARGET_VFP_SINGLE)
6329
        *total = COSTS_N_INSNS (4);
6330
      else
6331
        *total = COSTS_N_INSNS (20);
6332
      return false;
6333
 
6334
    case ROTATE:
6335
      if (GET_CODE (XEXP (x, 1)) == REG)
6336
        *total = COSTS_N_INSNS (1); /* Need to subtract from 32 */
6337
      else if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6338
        *total = rtx_cost (XEXP (x, 1), code, speed);
6339
 
6340
      /* Fall through */
6341
    case ROTATERT:
6342
      if (mode != SImode)
6343
        {
6344
          *total += COSTS_N_INSNS (4);
6345
          return true;
6346
        }
6347
 
6348
      /* Fall through */
6349
    case ASHIFT: case LSHIFTRT: case ASHIFTRT:
6350
      *total += rtx_cost (XEXP (x, 0), code, speed);
6351
      if (mode == DImode)
6352
        {
6353
          *total += COSTS_N_INSNS (3);
6354
          return true;
6355
        }
6356
 
6357
      *total += COSTS_N_INSNS (1);
6358
      /* Increase the cost of complex shifts because they aren't any faster,
6359
         and reduce dual issue opportunities.  */
6360
      if (arm_tune_cortex_a9
6361
          && outer != SET && GET_CODE (XEXP (x, 1)) != CONST_INT)
6362
        ++*total;
6363
 
6364
      return true;
6365
 
6366
    case MINUS:
6367
      if (TARGET_THUMB2)
6368
        {
6369
          if (GET_MODE_CLASS (mode) == MODE_FLOAT)
6370
            {
6371
              if (TARGET_HARD_FLOAT && (mode == SFmode || mode == DFmode))
6372
                *total = COSTS_N_INSNS (1);
6373
              else
6374
                *total = COSTS_N_INSNS (20);
6375
            }
6376
          else
6377
            *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
6378
          /* Thumb2 does not have RSB, so all arguments must be
6379
             registers (subtracting a constant is canonicalized as
6380
             addition of the negated constant).  */
6381
          return false;
6382
        }
6383
 
6384
      if (mode == DImode)
6385
        {
6386
          *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
6387
          if (GET_CODE (XEXP (x, 0)) == CONST_INT
6388
              && const_ok_for_arm (INTVAL (XEXP (x, 0))))
6389
            {
6390
              *total += rtx_cost (XEXP (x, 1), code, speed);
6391
              return true;
6392
            }
6393
 
6394
          if (GET_CODE (XEXP (x, 1)) == CONST_INT
6395
              && const_ok_for_arm (INTVAL (XEXP (x, 1))))
6396
            {
6397
              *total += rtx_cost (XEXP (x, 0), code, speed);
6398
              return true;
6399
            }
6400
 
6401
          return false;
6402
        }
6403
 
6404
      if (GET_MODE_CLASS (mode) == MODE_FLOAT)
6405
        {
6406
          if (TARGET_HARD_FLOAT
6407
              && (mode == SFmode
6408
                  || (mode == DFmode && !TARGET_VFP_SINGLE)))
6409
            {
6410
              *total = COSTS_N_INSNS (1);
6411
              if (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
6412
                  && arm_const_double_rtx (XEXP (x, 0)))
6413
                {
6414
                  *total += rtx_cost (XEXP (x, 1), code, speed);
6415
                  return true;
6416
                }
6417
 
6418
              if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
6419
                  && arm_const_double_rtx (XEXP (x, 1)))
6420
                {
6421
                  *total += rtx_cost (XEXP (x, 0), code, speed);
6422
                  return true;
6423
                }
6424
 
6425
              return false;
6426
            }
6427
          *total = COSTS_N_INSNS (20);
6428
          return false;
6429
        }
6430
 
6431
      *total = COSTS_N_INSNS (1);
6432
      if (GET_CODE (XEXP (x, 0)) == CONST_INT
6433
          && const_ok_for_arm (INTVAL (XEXP (x, 0))))
6434
        {
6435
          *total += rtx_cost (XEXP (x, 1), code, speed);
6436
          return true;
6437
        }
6438
 
6439
      subcode = GET_CODE (XEXP (x, 1));
6440
      if (subcode == ASHIFT || subcode == ASHIFTRT
6441
          || subcode == LSHIFTRT
6442
          || subcode == ROTATE || subcode == ROTATERT)
6443
        {
6444
          *total += rtx_cost (XEXP (x, 0), code, speed);
6445
          *total += rtx_cost (XEXP (XEXP (x, 1), 0), subcode, speed);
6446
          return true;
6447
        }
6448
 
6449
      /* A shift as a part of RSB costs no more than RSB itself.  */
6450
      if (GET_CODE (XEXP (x, 0)) == MULT
6451
          && power_of_two_operand (XEXP (XEXP (x, 0), 1), SImode))
6452
        {
6453
          *total += rtx_cost (XEXP (XEXP (x, 0), 0), code, speed);
6454
          *total += rtx_cost (XEXP (x, 1), code, speed);
6455
          return true;
6456
        }
6457
 
6458
      if (subcode == MULT
6459
          && power_of_two_operand (XEXP (XEXP (x, 1), 1), SImode))
6460
        {
6461
          *total += rtx_cost (XEXP (x, 0), code, speed);
6462
          *total += rtx_cost (XEXP (XEXP (x, 1), 0), subcode, speed);
6463
          return true;
6464
        }
6465
 
6466
      if (GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == RTX_COMPARE
6467
          || GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == RTX_COMM_COMPARE)
6468
        {
6469
          *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, speed);
6470
          if (GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
6471
              && REGNO (XEXP (XEXP (x, 1), 0)) != CC_REGNUM)
6472
            *total += COSTS_N_INSNS (1);
6473
 
6474
          return true;
6475
        }
6476
 
6477
      /* Fall through */
6478
 
6479
    case PLUS:
6480
      if (code == PLUS && arm_arch6 && mode == SImode
6481
          && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
6482
              || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
6483
        {
6484
          *total = COSTS_N_INSNS (1);
6485
          *total += rtx_cost (XEXP (XEXP (x, 0), 0), GET_CODE (XEXP (x, 0)),
6486
                              speed);
6487
          *total += rtx_cost (XEXP (x, 1), code, speed);
6488
          return true;
6489
        }
6490
 
6491
      /* MLA: All arguments must be registers.  We filter out
6492
         multiplication by a power of two, so that we fall down into
6493
         the code below.  */
6494
      if (GET_CODE (XEXP (x, 0)) == MULT
6495
          && !power_of_two_operand (XEXP (XEXP (x, 0), 1), SImode))
6496
        {
6497
          /* The cost comes from the cost of the multiply.  */
6498
          return false;
6499
        }
6500
 
6501
      if (GET_MODE_CLASS (mode) == MODE_FLOAT)
6502
        {
6503
          if (TARGET_HARD_FLOAT
6504
              && (mode == SFmode
6505
                  || (mode == DFmode && !TARGET_VFP_SINGLE)))
6506
            {
6507
              *total = COSTS_N_INSNS (1);
6508
              if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
6509
                  && arm_const_double_rtx (XEXP (x, 1)))
6510
                {
6511
                  *total += rtx_cost (XEXP (x, 0), code, speed);
6512
                  return true;
6513
                }
6514
 
6515
              return false;
6516
            }
6517
 
6518
          *total = COSTS_N_INSNS (20);
6519
          return false;
6520
        }
6521
 
6522
      if (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMPARE
6523
          || GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMM_COMPARE)
6524
        {
6525
          *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 1), code, speed);
6526
          if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
6527
              && REGNO (XEXP (XEXP (x, 0), 0)) != CC_REGNUM)
6528
            *total += COSTS_N_INSNS (1);
6529
          return true;
6530
        }
6531
 
6532
      /* Fall through */
6533
 
6534
    case AND: case XOR: case IOR:
6535
      extra_cost = 0;
6536
 
6537
      /* Normally the frame registers will be spilt into reg+const during
6538
         reload, so it is a bad idea to combine them with other instructions,
6539
         since then they might not be moved outside of loops.  As a compromise
6540
         we allow integration with ops that have a constant as their second
6541
         operand.  */
6542
      if ((REG_OR_SUBREG_REG (XEXP (x, 0))
6543
           && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
6544
           && GET_CODE (XEXP (x, 1)) != CONST_INT)
6545
          || (REG_OR_SUBREG_REG (XEXP (x, 0))
6546
              && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
6547
        *total = 4;
6548
 
6549
      if (mode == DImode)
6550
        {
6551
          *total += COSTS_N_INSNS (2);
6552
          if (GET_CODE (XEXP (x, 1)) == CONST_INT
6553
              && const_ok_for_op (INTVAL (XEXP (x, 1)), code))
6554
            {
6555
              *total += rtx_cost (XEXP (x, 0), code, speed);
6556
              return true;
6557
            }
6558
 
6559
          return false;
6560
        }
6561
 
6562
      *total += COSTS_N_INSNS (1);
6563
      if (GET_CODE (XEXP (x, 1)) == CONST_INT
6564
          && const_ok_for_op (INTVAL (XEXP (x, 1)), code))
6565
        {
6566
          *total += rtx_cost (XEXP (x, 0), code, speed);
6567
          return true;
6568
        }
6569
      subcode = GET_CODE (XEXP (x, 0));
6570
      if (subcode == ASHIFT || subcode == ASHIFTRT
6571
          || subcode == LSHIFTRT
6572
          || subcode == ROTATE || subcode == ROTATERT)
6573
        {
6574
          *total += rtx_cost (XEXP (x, 1), code, speed);
6575
          *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, speed);
6576
          return true;
6577
        }
6578
 
6579
      if (subcode == MULT
6580
          && power_of_two_operand (XEXP (XEXP (x, 0), 1), SImode))
6581
        {
6582
          *total += rtx_cost (XEXP (x, 1), code, speed);
6583
          *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, speed);
6584
          return true;
6585
        }
6586
 
6587
      if (subcode == UMIN || subcode == UMAX
6588
          || subcode == SMIN || subcode == SMAX)
6589
        {
6590
          *total = COSTS_N_INSNS (3);
6591
          return true;
6592
        }
6593
 
6594
      return false;
6595
 
6596
    case MULT:
6597
      /* This should have been handled by the CPU specific routines.  */
6598
      gcc_unreachable ();
6599
 
6600
    case TRUNCATE:
6601
      if (arm_arch3m && mode == SImode
6602
          && GET_CODE (XEXP (x, 0)) == LSHIFTRT
6603
          && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
6604
          && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
6605
              == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
6606
          && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
6607
              || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
6608
        {
6609
          *total = rtx_cost (XEXP (XEXP (x, 0), 0), LSHIFTRT, speed);
6610
          return true;
6611
        }
6612
      *total = COSTS_N_INSNS (2); /* Plus the cost of the MULT */
6613
      return false;
6614
 
6615
    case NEG:
6616
      if (GET_MODE_CLASS (mode) == MODE_FLOAT)
6617
        {
6618
          if (TARGET_HARD_FLOAT
6619
              && (mode == SFmode
6620
                  || (mode == DFmode && !TARGET_VFP_SINGLE)))
6621
            {
6622
              *total = COSTS_N_INSNS (1);
6623
              return false;
6624
            }
6625
          *total = COSTS_N_INSNS (2);
6626
          return false;
6627
        }
6628
 
6629
      /* Fall through */
6630
    case NOT:
6631
      *total = COSTS_N_INSNS (ARM_NUM_REGS(mode));
6632
      if (mode == SImode && code == NOT)
6633
        {
6634
          subcode = GET_CODE (XEXP (x, 0));
6635
          if (subcode == ASHIFT || subcode == ASHIFTRT
6636
              || subcode == LSHIFTRT
6637
              || subcode == ROTATE || subcode == ROTATERT
6638
              || (subcode == MULT
6639
                  && power_of_two_operand (XEXP (XEXP (x, 0), 1), SImode)))
6640
            {
6641
              *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, speed);
6642
              /* Register shifts cost an extra cycle.  */
6643
              if (GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
6644
                *total += COSTS_N_INSNS (1) + rtx_cost (XEXP (XEXP (x, 0), 1),
6645
                                                        subcode, speed);
6646
              return true;
6647
            }
6648
        }
6649
 
6650
      return false;
6651
 
6652
    case IF_THEN_ELSE:
6653
      if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
6654
        {
6655
          *total = COSTS_N_INSNS (4);
6656
          return true;
6657
        }
6658
 
6659
      operand = XEXP (x, 0);
6660
 
6661
      if (!((GET_RTX_CLASS (GET_CODE (operand)) == RTX_COMPARE
6662
             || GET_RTX_CLASS (GET_CODE (operand)) == RTX_COMM_COMPARE)
6663
            && GET_CODE (XEXP (operand, 0)) == REG
6664
            && REGNO (XEXP (operand, 0)) == CC_REGNUM))
6665
        *total += COSTS_N_INSNS (1);
6666
      *total += (rtx_cost (XEXP (x, 1), code, speed)
6667
                 + rtx_cost (XEXP (x, 2), code, speed));
6668
      return true;
6669
 
6670
    case NE:
6671
      if (mode == SImode && XEXP (x, 1) == const0_rtx)
6672
        {
6673
          *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, speed);
6674
          return true;
6675
        }
6676
      goto scc_insn;
6677
 
6678
    case GE:
6679
      if ((GET_CODE (XEXP (x, 0)) != REG || REGNO (XEXP (x, 0)) != CC_REGNUM)
6680
          && mode == SImode && XEXP (x, 1) == const0_rtx)
6681
        {
6682
          *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, speed);
6683
          return true;
6684
        }
6685
      goto scc_insn;
6686
 
6687
    case LT:
6688
      if ((GET_CODE (XEXP (x, 0)) != REG || REGNO (XEXP (x, 0)) != CC_REGNUM)
6689
          && mode == SImode && XEXP (x, 1) == const0_rtx)
6690
        {
6691
          *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, speed);
6692
          return true;
6693
        }
6694
      goto scc_insn;
6695
 
6696
    case EQ:
6697
    case GT:
6698
    case LE:
6699
    case GEU:
6700
    case LTU:
6701
    case GTU:
6702
    case LEU:
6703
    case UNORDERED:
6704
    case ORDERED:
6705
    case UNEQ:
6706
    case UNGE:
6707
    case UNLT:
6708
    case UNGT:
6709
    case UNLE:
6710
    scc_insn:
6711
      /* SCC insns.  In the case where the comparison has already been
6712
         performed, then they cost 2 instructions.  Otherwise they need
6713
         an additional comparison before them.  */
6714
      *total = COSTS_N_INSNS (2);
6715
      if (GET_CODE (XEXP (x, 0)) == REG && REGNO (XEXP (x, 0)) == CC_REGNUM)
6716
        {
6717
          return true;
6718
        }
6719
 
6720
      /* Fall through */
6721
    case COMPARE:
6722
      if (GET_CODE (XEXP (x, 0)) == REG && REGNO (XEXP (x, 0)) == CC_REGNUM)
6723
        {
6724
          *total = 0;
6725
          return true;
6726
        }
6727
 
6728
      *total += COSTS_N_INSNS (1);
6729
      if (GET_CODE (XEXP (x, 1)) == CONST_INT
6730
          && const_ok_for_op (INTVAL (XEXP (x, 1)), code))
6731
        {
6732
          *total += rtx_cost (XEXP (x, 0), code, speed);
6733
          return true;
6734
        }
6735
 
6736
      subcode = GET_CODE (XEXP (x, 0));
6737
      if (subcode == ASHIFT || subcode == ASHIFTRT
6738
          || subcode == LSHIFTRT
6739
          || subcode == ROTATE || subcode == ROTATERT)
6740
        {
6741
          *total += rtx_cost (XEXP (x, 1), code, speed);
6742
          *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, speed);
6743
          return true;
6744
        }
6745
 
6746
      if (subcode == MULT
6747
          && power_of_two_operand (XEXP (XEXP (x, 0), 1), SImode))
6748
        {
6749
          *total += rtx_cost (XEXP (x, 1), code, speed);
6750
          *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, speed);
6751
          return true;
6752
        }
6753
 
6754
      return false;
6755
 
6756
    case UMIN:
6757
    case UMAX:
6758
    case SMIN:
6759
    case SMAX:
6760
      *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, speed);
6761
      if (GET_CODE (XEXP (x, 1)) != CONST_INT
6762
          || !const_ok_for_arm (INTVAL (XEXP (x, 1))))
6763
        *total += rtx_cost (XEXP (x, 1), code, speed);
6764
      return true;
6765
 
6766
    case ABS:
6767
      if (GET_MODE_CLASS (mode) == MODE_FLOAT)
6768
        {
6769
          if (TARGET_HARD_FLOAT
6770
              && (mode == SFmode
6771
                  || (mode == DFmode && !TARGET_VFP_SINGLE)))
6772
            {
6773
              *total = COSTS_N_INSNS (1);
6774
              return false;
6775
            }
6776
          *total = COSTS_N_INSNS (20);
6777
          return false;
6778
        }
6779
      *total = COSTS_N_INSNS (1);
6780
      if (mode == DImode)
6781
        *total += COSTS_N_INSNS (3);
6782
      return false;
6783
 
6784
    case SIGN_EXTEND:
6785
      if (GET_MODE_CLASS (mode) == MODE_INT)
6786
        {
6787
          *total = 0;
6788
          if (mode == DImode)
6789
            *total += COSTS_N_INSNS (1);
6790
 
6791
          if (GET_MODE (XEXP (x, 0)) != SImode)
6792
            {
6793
              if (arm_arch6)
6794
                {
6795
                  if (GET_CODE (XEXP (x, 0)) != MEM)
6796
                    *total += COSTS_N_INSNS (1);
6797
                }
6798
              else if (!arm_arch4 || GET_CODE (XEXP (x, 0)) != MEM)
6799
                *total += COSTS_N_INSNS (2);
6800
            }
6801
 
6802
          return false;
6803
        }
6804
 
6805
      /* Fall through */
6806
    case ZERO_EXTEND:
6807
      *total = 0;
6808
      if (GET_MODE_CLASS (mode) == MODE_INT)
6809
        {
6810
          if (mode == DImode)
6811
            *total += COSTS_N_INSNS (1);
6812
 
6813
          if (GET_MODE (XEXP (x, 0)) != SImode)
6814
            {
6815
              if (arm_arch6)
6816
                {
6817
                  if (GET_CODE (XEXP (x, 0)) != MEM)
6818
                    *total += COSTS_N_INSNS (1);
6819
                }
6820
              else if (!arm_arch4 || GET_CODE (XEXP (x, 0)) != MEM)
6821
                *total += COSTS_N_INSNS (GET_MODE (XEXP (x, 0)) == QImode ?
6822
                                         1 : 2);
6823
            }
6824
 
6825
          return false;
6826
        }
6827
 
6828
      switch (GET_MODE (XEXP (x, 0)))
6829
        {
6830
        case V8QImode:
6831
        case V4HImode:
6832
        case V2SImode:
6833
        case V4QImode:
6834
        case V2HImode:
6835
          *total = COSTS_N_INSNS (1);
6836
          return false;
6837
 
6838
        default:
6839
          gcc_unreachable ();
6840
        }
6841
      gcc_unreachable ();
6842
 
6843
    case ZERO_EXTRACT:
6844
    case SIGN_EXTRACT:
6845
      *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, speed);
6846
      return true;
6847
 
6848
    case CONST_INT:
6849
      if (const_ok_for_arm (INTVAL (x))
6850
          || const_ok_for_arm (~INTVAL (x)))
6851
        *total = COSTS_N_INSNS (1);
6852
      else
6853
        *total = COSTS_N_INSNS (arm_gen_constant (SET, mode, NULL_RTX,
6854
                                                  INTVAL (x), NULL_RTX,
6855
                                                  NULL_RTX, 0, 0));
6856
      return true;
6857
 
6858
    case CONST:
6859
    case LABEL_REF:
6860
    case SYMBOL_REF:
6861
      *total = COSTS_N_INSNS (3);
6862
      return true;
6863
 
6864
    case HIGH:
6865
      *total = COSTS_N_INSNS (1);
6866
      return true;
6867
 
6868
    case LO_SUM:
6869
      *total = COSTS_N_INSNS (1);
6870
      *total += rtx_cost (XEXP (x, 0), code, speed);
6871
      return true;
6872
 
6873
    case CONST_DOUBLE:
6874
      if (TARGET_HARD_FLOAT && vfp3_const_double_rtx (x)
6875
          && (mode == SFmode || !TARGET_VFP_SINGLE))
6876
        *total = COSTS_N_INSNS (1);
6877
      else
6878
        *total = COSTS_N_INSNS (4);
6879
      return true;
6880
 
6881
    default:
6882
      *total = COSTS_N_INSNS (4);
6883
      return false;
6884
    }
6885
}
6886
 
6887
/* RTX costs when optimizing for size.  */
6888
static bool
6889
arm_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
6890
                    int *total)
6891
{
6892
  enum machine_mode mode = GET_MODE (x);
6893
  if (TARGET_THUMB1)
6894
    {
6895
      /* XXX TBD.  For now, use the standard costs.  */
6896
      *total = thumb1_rtx_costs (x, code, outer_code);
6897
      return true;
6898
    }
6899
 
6900
  /* FIXME: This makes no attempt to prefer narrow Thumb-2 instructions.  */
6901
  switch (code)
6902
    {
6903
    case MEM:
6904
      /* A memory access costs 1 insn if the mode is small, or the address is
6905
         a single register, otherwise it costs one insn per word.  */
6906
      if (REG_P (XEXP (x, 0)))
6907
        *total = COSTS_N_INSNS (1);
6908
      else
6909
        *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
6910
      return true;
6911
 
6912
    case DIV:
6913
    case MOD:
6914
    case UDIV:
6915
    case UMOD:
6916
      /* Needs a libcall, so it costs about this.  */
6917
      *total = COSTS_N_INSNS (2);
6918
      return false;
6919
 
6920
    case ROTATE:
6921
      if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
6922
        {
6923
          *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, false);
6924
          return true;
6925
        }
6926
      /* Fall through */
6927
    case ROTATERT:
6928
    case ASHIFT:
6929
    case LSHIFTRT:
6930
    case ASHIFTRT:
6931
      if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
6932
        {
6933
          *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code, false);
6934
          return true;
6935
        }
6936
      else if (mode == SImode)
6937
        {
6938
          *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, false);
6939
          /* Slightly disparage register shifts, but not by much.  */
6940
          if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6941
            *total += 1 + rtx_cost (XEXP (x, 1), code, false);
6942
          return true;
6943
        }
6944
 
6945
      /* Needs a libcall.  */
6946
      *total = COSTS_N_INSNS (2);
6947
      return false;
6948
 
6949
    case MINUS:
6950
      if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
6951
          && (mode == SFmode || !TARGET_VFP_SINGLE))
6952
        {
6953
          *total = COSTS_N_INSNS (1);
6954
          return false;
6955
        }
6956
 
6957
      if (mode == SImode)
6958
        {
6959
          enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
6960
          enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
6961
 
6962
          if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
6963
              || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
6964
              || subcode1 == ROTATE || subcode1 == ROTATERT
6965
              || subcode1 == ASHIFT || subcode1 == LSHIFTRT
6966
              || subcode1 == ASHIFTRT)
6967
            {
6968
              /* It's just the cost of the two operands.  */
6969
              *total = 0;
6970
              return false;
6971
            }
6972
 
6973
          *total = COSTS_N_INSNS (1);
6974
          return false;
6975
        }
6976
 
6977
      *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
6978
      return false;
6979
 
6980
    case PLUS:
6981
      if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
6982
          && (mode == SFmode || !TARGET_VFP_SINGLE))
6983
        {
6984
          *total = COSTS_N_INSNS (1);
6985
          return false;
6986
        }
6987
 
6988
      /* A shift as a part of ADD costs nothing.  */
6989
      if (GET_CODE (XEXP (x, 0)) == MULT
6990
          && power_of_two_operand (XEXP (XEXP (x, 0), 1), SImode))
6991
        {
6992
          *total = COSTS_N_INSNS (TARGET_THUMB2 ? 2 : 1);
6993
          *total += rtx_cost (XEXP (XEXP (x, 0), 0), code, false);
6994
          *total += rtx_cost (XEXP (x, 1), code, false);
6995
          return true;
6996
        }
6997
 
6998
      /* Fall through */
6999
    case AND: case XOR: case IOR:
7000
      if (mode == SImode)
7001
        {
7002
          enum rtx_code subcode = GET_CODE (XEXP (x, 0));
7003
 
7004
          if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
7005
              || subcode == LSHIFTRT || subcode == ASHIFTRT
7006
              || (code == AND && subcode == NOT))
7007
            {
7008
              /* It's just the cost of the two operands.  */
7009
              *total = 0;
7010
              return false;
7011
            }
7012
        }
7013
 
7014
      *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
7015
      return false;
7016
 
7017
    case MULT:
7018
      *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
7019
      return false;
7020
 
7021
    case NEG:
7022
      if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
7023
          && (mode == SFmode || !TARGET_VFP_SINGLE))
7024
        {
7025
          *total = COSTS_N_INSNS (1);
7026
          return false;
7027
        }
7028
 
7029
      /* Fall through */
7030
    case NOT:
7031
      *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
7032
 
7033
      return false;
7034
 
7035
    case IF_THEN_ELSE:
7036
      *total = 0;
7037
      return false;
7038
 
7039
    case COMPARE:
7040
      if (cc_register (XEXP (x, 0), VOIDmode))
7041
        * total = 0;
7042
      else
7043
        *total = COSTS_N_INSNS (1);
7044
      return false;
7045
 
7046
    case ABS:
7047
      if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
7048
          && (mode == SFmode || !TARGET_VFP_SINGLE))
7049
        *total = COSTS_N_INSNS (1);
7050
      else
7051
        *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
7052
      return false;
7053
 
7054
    case SIGN_EXTEND:
7055
      *total = 0;
7056
      if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
7057
        {
7058
          if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
7059
            *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
7060
        }
7061
      if (mode == DImode)
7062
        *total += COSTS_N_INSNS (1);
7063
      return false;
7064
 
7065
    case ZERO_EXTEND:
7066
      *total = 0;
7067
      if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
7068
        {
7069
          switch (GET_MODE (XEXP (x, 0)))
7070
            {
7071
            case QImode:
7072
              *total += COSTS_N_INSNS (1);
7073
              break;
7074
 
7075
            case HImode:
7076
              *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
7077
 
7078
            case SImode:
7079
              break;
7080
 
7081
            default:
7082
              *total += COSTS_N_INSNS (2);
7083
            }
7084
        }
7085
 
7086
      if (mode == DImode)
7087
        *total += COSTS_N_INSNS (1);
7088
 
7089
      return false;
7090
 
7091
    case CONST_INT:
7092
      if (const_ok_for_arm (INTVAL (x)))
7093
        /* A multiplication by a constant requires another instruction
7094
           to load the constant to a register.  */
7095
        *total = COSTS_N_INSNS ((outer_code == SET || outer_code == MULT)
7096
                                ? 1 : 0);
7097
      else if (const_ok_for_arm (~INTVAL (x)))
7098
        *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
7099
      else if (const_ok_for_arm (-INTVAL (x)))
7100
        {
7101
          if (outer_code == COMPARE || outer_code == PLUS
7102
              || outer_code == MINUS)
7103
            *total = 0;
7104
          else
7105
            *total = COSTS_N_INSNS (1);
7106
        }
7107
      else
7108
        *total = COSTS_N_INSNS (2);
7109
      return true;
7110
 
7111
    case CONST:
7112
    case LABEL_REF:
7113
    case SYMBOL_REF:
7114
      *total = COSTS_N_INSNS (2);
7115
      return true;
7116
 
7117
    case CONST_DOUBLE:
7118
      *total = COSTS_N_INSNS (4);
7119
      return true;
7120
 
7121
    case HIGH:
7122
    case LO_SUM:
7123
      /* We prefer constant pool entries to MOVW/MOVT pairs, so bump the
7124
         cost of these slightly.  */
7125
      *total = COSTS_N_INSNS (1) + 1;
7126
      return true;
7127
 
7128
    default:
7129
      if (mode != VOIDmode)
7130
        *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
7131
      else
7132
        *total = COSTS_N_INSNS (4); /* How knows?  */
7133
      return false;
7134
    }
7135
}
7136
 
7137
/* RTX costs when optimizing for size.  */
7138
static bool
7139
arm_rtx_costs (rtx x, int code, int outer_code, int *total,
7140
               bool speed)
7141
{
7142
  if (!speed)
7143
    return arm_size_rtx_costs (x, (enum rtx_code) code,
7144
                               (enum rtx_code) outer_code, total);
7145
  else
7146
    return all_cores[(int)arm_tune].rtx_costs (x, (enum rtx_code) code,
7147
                                               (enum rtx_code) outer_code,
7148
                                               total, speed);
7149
}
7150
 
7151
/* RTX costs for cores with a slow MUL implementation.  Thumb-2 is not
7152
   supported on any "slowmul" cores, so it can be ignored.  */
7153
 
7154
static bool
7155
arm_slowmul_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
7156
                       int *total, bool speed)
7157
{
7158
  enum machine_mode mode = GET_MODE (x);
7159
 
7160
  if (TARGET_THUMB)
7161
    {
7162
      *total = thumb1_rtx_costs (x, code, outer_code);
7163
      return true;
7164
    }
7165
 
7166
  switch (code)
7167
    {
7168
    case MULT:
7169
      if (GET_MODE_CLASS (mode) == MODE_FLOAT
7170
          || mode == DImode)
7171
        {
7172
          *total = COSTS_N_INSNS (20);
7173
          return false;
7174
        }
7175
 
7176
      if (GET_CODE (XEXP (x, 1)) == CONST_INT)
7177
        {
7178
          unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
7179
                                      & (unsigned HOST_WIDE_INT) 0xffffffff);
7180
          int cost, const_ok = const_ok_for_arm (i);
7181
          int j, booth_unit_size;
7182
 
7183
          /* Tune as appropriate.  */
7184
          cost = const_ok ? 4 : 8;
7185
          booth_unit_size = 2;
7186
          for (j = 0; i && j < 32; j += booth_unit_size)
7187
            {
7188
              i >>= booth_unit_size;
7189
              cost++;
7190
            }
7191
 
7192
          *total = COSTS_N_INSNS (cost);
7193
          *total += rtx_cost (XEXP (x, 0), code, speed);
7194
          return true;
7195
        }
7196
 
7197
      *total = COSTS_N_INSNS (20);
7198
      return false;
7199
 
7200
    default:
7201
      return arm_rtx_costs_1 (x, outer_code, total, speed);;
7202
    }
7203
}
7204
 
7205
 
7206
/* RTX cost for cores with a fast multiply unit (M variants).  */
7207
 
7208
static bool
7209
arm_fastmul_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
7210
                       int *total, bool speed)
7211
{
7212
  enum machine_mode mode = GET_MODE (x);
7213
 
7214
  if (TARGET_THUMB1)
7215
    {
7216
      *total = thumb1_rtx_costs (x, code, outer_code);
7217
      return true;
7218
    }
7219
 
7220
  /* ??? should thumb2 use different costs?  */
7221
  switch (code)
7222
    {
7223
    case MULT:
7224
      /* There is no point basing this on the tuning, since it is always the
7225
         fast variant if it exists at all.  */
7226
      if (mode == DImode
7227
          && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
7228
          && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
7229
              || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
7230
        {
7231
          *total = COSTS_N_INSNS(2);
7232
          return false;
7233
        }
7234
 
7235
 
7236
      if (mode == DImode)
7237
        {
7238
          *total = COSTS_N_INSNS (5);
7239
          return false;
7240
        }
7241
 
7242
      if (GET_CODE (XEXP (x, 1)) == CONST_INT)
7243
        {
7244
          unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
7245
                                      & (unsigned HOST_WIDE_INT) 0xffffffff);
7246
          int cost, const_ok = const_ok_for_arm (i);
7247
          int j, booth_unit_size;
7248
 
7249
          /* Tune as appropriate.  */
7250
          cost = const_ok ? 4 : 8;
7251
          booth_unit_size = 8;
7252
          for (j = 0; i && j < 32; j += booth_unit_size)
7253
            {
7254
              i >>= booth_unit_size;
7255
              cost++;
7256
            }
7257
 
7258
          *total = COSTS_N_INSNS(cost);
7259
          return false;
7260
        }
7261
 
7262
      if (mode == SImode)
7263
        {
7264
          *total = COSTS_N_INSNS (4);
7265
          return false;
7266
        }
7267
 
7268
      if (GET_MODE_CLASS (mode) == MODE_FLOAT)
7269
        {
7270
          if (TARGET_HARD_FLOAT
7271
              && (mode == SFmode
7272
                  || (mode == DFmode && !TARGET_VFP_SINGLE)))
7273
            {
7274
              *total = COSTS_N_INSNS (1);
7275
              return false;
7276
            }
7277
        }
7278
 
7279
      /* Requires a lib call */
7280
      *total = COSTS_N_INSNS (20);
7281
      return false;
7282
 
7283
    default:
7284
      return arm_rtx_costs_1 (x, outer_code, total, speed);
7285
    }
7286
}
7287
 
7288
 
7289
/* RTX cost for XScale CPUs.  Thumb-2 is not supported on any xscale cores,
7290
   so it can be ignored.  */
7291
 
7292
static bool
7293
arm_xscale_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code, int *total, bool speed)
7294
{
7295
  enum machine_mode mode = GET_MODE (x);
7296
 
7297
  if (TARGET_THUMB)
7298
    {
7299
      *total = thumb1_rtx_costs (x, code, outer_code);
7300
      return true;
7301
    }
7302
 
7303
  switch (code)
7304
    {
7305
    case COMPARE:
7306
      if (GET_CODE (XEXP (x, 0)) != MULT)
7307
        return arm_rtx_costs_1 (x, outer_code, total, speed);
7308
 
7309
      /* A COMPARE of a MULT is slow on XScale; the muls instruction
7310
         will stall until the multiplication is complete.  */
7311
      *total = COSTS_N_INSNS (3);
7312
      return false;
7313
 
7314
    case MULT:
7315
      /* There is no point basing this on the tuning, since it is always the
7316
         fast variant if it exists at all.  */
7317
      if (mode == DImode
7318
          && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
7319
          && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
7320
              || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
7321
        {
7322
          *total = COSTS_N_INSNS (2);
7323
          return false;
7324
        }
7325
 
7326
 
7327
      if (mode == DImode)
7328
        {
7329
          *total = COSTS_N_INSNS (5);
7330
          return false;
7331
        }
7332
 
7333
      if (GET_CODE (XEXP (x, 1)) == CONST_INT)
7334
        {
7335
          /* If operand 1 is a constant we can more accurately
7336
             calculate the cost of the multiply.  The multiplier can
7337
             retire 15 bits on the first cycle and a further 12 on the
7338
             second.  We do, of course, have to load the constant into
7339
             a register first.  */
7340
          unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
7341
          /* There's a general overhead of one cycle.  */
7342
          int cost = 1;
7343
          unsigned HOST_WIDE_INT masked_const;
7344
 
7345
          if (i & 0x80000000)
7346
            i = ~i;
7347
 
7348
          i &= (unsigned HOST_WIDE_INT) 0xffffffff;
7349
 
7350
          masked_const = i & 0xffff8000;
7351
          if (masked_const != 0)
7352
            {
7353
              cost++;
7354
              masked_const = i & 0xf8000000;
7355
              if (masked_const != 0)
7356
                cost++;
7357
            }
7358
          *total = COSTS_N_INSNS (cost);
7359
          return false;
7360
        }
7361
 
7362
      if (mode == SImode)
7363
        {
7364
          *total = COSTS_N_INSNS (3);
7365
          return false;
7366
        }
7367
 
7368
      /* Requires a lib call */
7369
      *total = COSTS_N_INSNS (20);
7370
      return false;
7371
 
7372
    default:
7373
      return arm_rtx_costs_1 (x, outer_code, total, speed);
7374
    }
7375
}
7376
 
7377
 
7378
/* RTX costs for 9e (and later) cores.  */
7379
 
7380
static bool
7381
arm_9e_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
7382
                  int *total, bool speed)
7383
{
7384
  enum machine_mode mode = GET_MODE (x);
7385
 
7386
  if (TARGET_THUMB1)
7387
    {
7388
      switch (code)
7389
        {
7390
        case MULT:
7391
          *total = COSTS_N_INSNS (3);
7392
          return true;
7393
 
7394
        default:
7395
          *total = thumb1_rtx_costs (x, code, outer_code);
7396
          return true;
7397
        }
7398
    }
7399
 
7400
  switch (code)
7401
    {
7402
    case MULT:
7403
      /* There is no point basing this on the tuning, since it is always the
7404
         fast variant if it exists at all.  */
7405
      if (mode == DImode
7406
          && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
7407
          && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
7408
              || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
7409
        {
7410
          *total = COSTS_N_INSNS (2);
7411
          return false;
7412
        }
7413
 
7414
 
7415
      if (mode == DImode)
7416
        {
7417
          *total = COSTS_N_INSNS (5);
7418
          return false;
7419
        }
7420
 
7421
      if (mode == SImode)
7422
        {
7423
          *total = COSTS_N_INSNS (2);
7424
          return false;
7425
        }
7426
 
7427
      if (GET_MODE_CLASS (mode) == MODE_FLOAT)
7428
        {
7429
          if (TARGET_HARD_FLOAT
7430
              && (mode == SFmode
7431
                  || (mode == DFmode && !TARGET_VFP_SINGLE)))
7432
            {
7433
              *total = COSTS_N_INSNS (1);
7434
              return false;
7435
            }
7436
        }
7437
 
7438
      *total = COSTS_N_INSNS (20);
7439
      return false;
7440
 
7441
    default:
7442
      return arm_rtx_costs_1 (x, outer_code, total, speed);
7443
    }
7444
}
7445
/* All address computations that can be done are free, but rtx cost returns
7446
   the same for practically all of them.  So we weight the different types
7447
   of address here in the order (most pref first):
7448
   PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL.  */
7449
static inline int
7450
arm_arm_address_cost (rtx x)
7451
{
7452
  enum rtx_code c  = GET_CODE (x);
7453
 
7454
  if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
7455
    return 0;
7456
  if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
7457
    return 10;
7458
 
7459
  if (c == PLUS)
7460
    {
7461
      if (GET_CODE (XEXP (x, 1)) == CONST_INT)
7462
        return 2;
7463
 
7464
      if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
7465
        return 3;
7466
 
7467
      return 4;
7468
    }
7469
 
7470
  return 6;
7471
}
7472
 
7473
static inline int
7474
arm_thumb_address_cost (rtx x)
7475
{
7476
  enum rtx_code c  = GET_CODE (x);
7477
 
7478
  if (c == REG)
7479
    return 1;
7480
  if (c == PLUS
7481
      && GET_CODE (XEXP (x, 0)) == REG
7482
      && GET_CODE (XEXP (x, 1)) == CONST_INT)
7483
    return 1;
7484
 
7485
  return 2;
7486
}
7487
 
7488
static int
7489
arm_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED)
7490
{
7491
  return TARGET_32BIT ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
7492
}
7493
 
7494
static int
7495
arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
7496
{
7497
  rtx i_pat, d_pat;
7498
 
7499
  /* Some true dependencies can have a higher cost depending
7500
     on precisely how certain input operands are used.  */
7501
  if (arm_tune_xscale
7502
      && REG_NOTE_KIND (link) == 0
7503
      && recog_memoized (insn) >= 0
7504
      && recog_memoized (dep) >= 0)
7505
    {
7506
      int shift_opnum = get_attr_shift (insn);
7507
      enum attr_type attr_type = get_attr_type (dep);
7508
 
7509
      /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
7510
         operand for INSN.  If we have a shifted input operand and the
7511
         instruction we depend on is another ALU instruction, then we may
7512
         have to account for an additional stall.  */
7513
      if (shift_opnum != 0
7514
          && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
7515
        {
7516
          rtx shifted_operand;
7517
          int opno;
7518
 
7519
          /* Get the shifted operand.  */
7520
          extract_insn (insn);
7521
          shifted_operand = recog_data.operand[shift_opnum];
7522
 
7523
          /* Iterate over all the operands in DEP.  If we write an operand
7524
             that overlaps with SHIFTED_OPERAND, then we have increase the
7525
             cost of this dependency.  */
7526
          extract_insn (dep);
7527
          preprocess_constraints ();
7528
          for (opno = 0; opno < recog_data.n_operands; opno++)
7529
            {
7530
              /* We can ignore strict inputs.  */
7531
              if (recog_data.operand_type[opno] == OP_IN)
7532
                continue;
7533
 
7534
              if (reg_overlap_mentioned_p (recog_data.operand[opno],
7535
                                           shifted_operand))
7536
                return 2;
7537
            }
7538
        }
7539
    }
7540
 
7541
  /* XXX This is not strictly true for the FPA.  */
7542
  if (REG_NOTE_KIND (link) == REG_DEP_ANTI
7543
      || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
7544
    return 0;
7545
 
7546
  /* Call insns don't incur a stall, even if they follow a load.  */
7547
  if (REG_NOTE_KIND (link) == 0
7548
      && GET_CODE (insn) == CALL_INSN)
7549
    return 1;
7550
 
7551
  if ((i_pat = single_set (insn)) != NULL
7552
      && GET_CODE (SET_SRC (i_pat)) == MEM
7553
      && (d_pat = single_set (dep)) != NULL
7554
      && GET_CODE (SET_DEST (d_pat)) == MEM)
7555
    {
7556
      rtx src_mem = XEXP (SET_SRC (i_pat), 0);
7557
      /* This is a load after a store, there is no conflict if the load reads
7558
         from a cached area.  Assume that loads from the stack, and from the
7559
         constant pool are cached, and that others will miss.  This is a
7560
         hack.  */
7561
 
7562
      if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
7563
          || reg_mentioned_p (stack_pointer_rtx, src_mem)
7564
          || reg_mentioned_p (frame_pointer_rtx, src_mem)
7565
          || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
7566
        return 1;
7567
    }
7568
 
7569
  return cost;
7570
}
7571
 
7572
static int fp_consts_inited = 0;
7573
 
7574
/* Only zero is valid for VFP.  Other values are also valid for FPA.  */
7575
static const char * const strings_fp[8] =
7576
{
7577
  "0",   "1",   "2",   "3",
7578
  "4",   "5",   "0.5", "10"
7579
};
7580
 
7581
static REAL_VALUE_TYPE values_fp[8];
7582
 
7583
static void
7584
init_fp_table (void)
7585
{
7586
  int i;
7587
  REAL_VALUE_TYPE r;
7588
 
7589
  if (TARGET_VFP)
7590
    fp_consts_inited = 1;
7591
  else
7592
    fp_consts_inited = 8;
7593
 
7594
  for (i = 0; i < fp_consts_inited; i++)
7595
    {
7596
      r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
7597
      values_fp[i] = r;
7598
    }
7599
}
7600
 
7601
/* Return TRUE if rtx X is a valid immediate FP constant.  */
7602
int
7603
arm_const_double_rtx (rtx x)
7604
{
7605
  REAL_VALUE_TYPE r;
7606
  int i;
7607
 
7608
  if (!fp_consts_inited)
7609
    init_fp_table ();
7610
 
7611
  REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7612
  if (REAL_VALUE_MINUS_ZERO (r))
7613
    return 0;
7614
 
7615
  for (i = 0; i < fp_consts_inited; i++)
7616
    if (REAL_VALUES_EQUAL (r, values_fp[i]))
7617
      return 1;
7618
 
7619
  return 0;
7620
}
7621
 
7622
/* Return TRUE if rtx X is a valid immediate FPA constant.  */
7623
int
7624
neg_const_double_rtx_ok_for_fpa (rtx x)
7625
{
7626
  REAL_VALUE_TYPE r;
7627
  int i;
7628
 
7629
  if (!fp_consts_inited)
7630
    init_fp_table ();
7631
 
7632
  REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7633
  r = REAL_VALUE_NEGATE (r);
7634
  if (REAL_VALUE_MINUS_ZERO (r))
7635
    return 0;
7636
 
7637
  for (i = 0; i < 8; i++)
7638
    if (REAL_VALUES_EQUAL (r, values_fp[i]))
7639
      return 1;
7640
 
7641
  return 0;
7642
}
7643
 
7644
 
7645
/* VFPv3 has a fairly wide range of representable immediates, formed from
7646
   "quarter-precision" floating-point values. These can be evaluated using this
7647
   formula (with ^ for exponentiation):
7648
 
7649
     -1^s * n * 2^-r
7650
 
7651
   Where 's' is a sign bit (0/1), 'n' and 'r' are integers such that
7652
   16 <= n <= 31 and 0 <= r <= 7.
7653
 
7654
   These values are mapped onto an 8-bit integer ABCDEFGH s.t.
7655
 
7656
     - A (most-significant) is the sign bit.
7657
     - BCD are the exponent (encoded as r XOR 3).
7658
     - EFGH are the mantissa (encoded as n - 16).
7659
*/
7660
 
7661
/* Return an integer index for a VFPv3 immediate operand X suitable for the
7662
   fconst[sd] instruction, or -1 if X isn't suitable.  */
7663
static int
7664
vfp3_const_double_index (rtx x)
7665
{
7666
  REAL_VALUE_TYPE r, m;
7667
  int sign, exponent;
7668
  unsigned HOST_WIDE_INT mantissa, mant_hi;
7669
  unsigned HOST_WIDE_INT mask;
7670
  HOST_WIDE_INT m1, m2;
7671
  int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
7672
 
7673
  if (!TARGET_VFP3 || GET_CODE (x) != CONST_DOUBLE)
7674
    return -1;
7675
 
7676
  REAL_VALUE_FROM_CONST_DOUBLE (r, x);
7677
 
7678
  /* We can't represent these things, so detect them first.  */
7679
  if (REAL_VALUE_ISINF (r) || REAL_VALUE_ISNAN (r) || REAL_VALUE_MINUS_ZERO (r))
7680
    return -1;
7681
 
7682
  /* Extract sign, exponent and mantissa.  */
7683
  sign = REAL_VALUE_NEGATIVE (r) ? 1 : 0;
7684
  r = REAL_VALUE_ABS (r);
7685
  exponent = REAL_EXP (&r);
7686
  /* For the mantissa, we expand into two HOST_WIDE_INTS, apart from the
7687
     highest (sign) bit, with a fixed binary point at bit point_pos.
7688
     WARNING: If there's ever a VFP version which uses more than 2 * H_W_I - 1
7689
     bits for the mantissa, this may fail (low bits would be lost).  */
7690
  real_ldexp (&m, &r, point_pos - exponent);
7691
  REAL_VALUE_TO_INT (&m1, &m2, m);
7692
  mantissa = m1;
7693
  mant_hi = m2;
7694
 
7695
  /* If there are bits set in the low part of the mantissa, we can't
7696
     represent this value.  */
7697
  if (mantissa != 0)
7698
    return -1;
7699
 
7700
  /* Now make it so that mantissa contains the most-significant bits, and move
7701
     the point_pos to indicate that the least-significant bits have been
7702
     discarded.  */
7703
  point_pos -= HOST_BITS_PER_WIDE_INT;
7704
  mantissa = mant_hi;
7705
 
7706
  /* We can permit four significant bits of mantissa only, plus a high bit
7707
     which is always 1.  */
7708
  mask = ((unsigned HOST_WIDE_INT)1 << (point_pos - 5)) - 1;
7709
  if ((mantissa & mask) != 0)
7710
    return -1;
7711
 
7712
  /* Now we know the mantissa is in range, chop off the unneeded bits.  */
7713
  mantissa >>= point_pos - 5;
7714
 
7715
  /* The mantissa may be zero. Disallow that case. (It's possible to load the
7716
     floating-point immediate zero with Neon using an integer-zero load, but
7717
     that case is handled elsewhere.)  */
7718
  if (mantissa == 0)
7719
    return -1;
7720
 
7721
  gcc_assert (mantissa >= 16 && mantissa <= 31);
7722
 
7723
  /* The value of 5 here would be 4 if GCC used IEEE754-like encoding (where
7724
     normalized significands are in the range [1, 2). (Our mantissa is shifted
7725
     left 4 places at this point relative to normalized IEEE754 values).  GCC
7726
     internally uses [0.5, 1) (see real.c), so the exponent returned from
7727
     REAL_EXP must be altered.  */
7728
  exponent = 5 - exponent;
7729
 
7730
  if (exponent < 0 || exponent > 7)
7731
    return -1;
7732
 
7733
  /* Sign, mantissa and exponent are now in the correct form to plug into the
7734
     formula described in the comment above.  */
7735
  return (sign << 7) | ((exponent ^ 3) << 4) | (mantissa - 16);
7736
}
7737
 
7738
/* Return TRUE if rtx X is a valid immediate VFPv3 constant.  */
7739
int
7740
vfp3_const_double_rtx (rtx x)
7741
{
7742
  if (!TARGET_VFP3)
7743
    return 0;
7744
 
7745
  return vfp3_const_double_index (x) != -1;
7746
}
7747
 
7748
/* Recognize immediates which can be used in various Neon instructions. Legal
7749
   immediates are described by the following table (for VMVN variants, the
7750
   bitwise inverse of the constant shown is recognized. In either case, VMOV
7751
   is output and the correct instruction to use for a given constant is chosen
7752
   by the assembler). The constant shown is replicated across all elements of
7753
   the destination vector.
7754
 
7755
   insn elems variant constant (binary)
7756
   ---- ----- ------- -----------------
7757
   vmov  i32     0    00000000 00000000 00000000 abcdefgh
7758
   vmov  i32     1    00000000 00000000 abcdefgh 00000000
7759
   vmov  i32     2    00000000 abcdefgh 00000000 00000000
7760
   vmov  i32     3    abcdefgh 00000000 00000000 00000000
7761
   vmov  i16     4    00000000 abcdefgh
7762
   vmov  i16     5    abcdefgh 00000000
7763
   vmvn  i32     6    00000000 00000000 00000000 abcdefgh
7764
   vmvn  i32     7    00000000 00000000 abcdefgh 00000000
7765
   vmvn  i32     8    00000000 abcdefgh 00000000 00000000
7766
   vmvn  i32     9    abcdefgh 00000000 00000000 00000000
7767
   vmvn  i16    10    00000000 abcdefgh
7768
   vmvn  i16    11    abcdefgh 00000000
7769
   vmov  i32    12    00000000 00000000 abcdefgh 11111111
7770
   vmvn  i32    13    00000000 00000000 abcdefgh 11111111
7771
   vmov  i32    14    00000000 abcdefgh 11111111 11111111
7772
   vmvn  i32    15    00000000 abcdefgh 11111111 11111111
7773
   vmov   i8    16    abcdefgh
7774
   vmov  i64    17    aaaaaaaa bbbbbbbb cccccccc dddddddd
7775
                      eeeeeeee ffffffff gggggggg hhhhhhhh
7776
   vmov  f32    18    aBbbbbbc defgh000 00000000 00000000
7777
 
7778
   For case 18, B = !b. Representable values are exactly those accepted by
7779
   vfp3_const_double_index, but are output as floating-point numbers rather
7780
   than indices.
7781
 
7782
   Variants 0-5 (inclusive) may also be used as immediates for the second
7783
   operand of VORR/VBIC instructions.
7784
 
7785
   The INVERSE argument causes the bitwise inverse of the given operand to be
7786
   recognized instead (used for recognizing legal immediates for the VAND/VORN
7787
   pseudo-instructions). If INVERSE is true, the value placed in *MODCONST is
7788
   *not* inverted (i.e. the pseudo-instruction forms vand/vorn should still be
7789
   output, rather than the real insns vbic/vorr).
7790
 
7791
   INVERSE makes no difference to the recognition of float vectors.
7792
 
7793
   The return value is the variant of immediate as shown in the above table, or
7794
   -1 if the given value doesn't match any of the listed patterns.
7795
*/
7796
static int
7797
neon_valid_immediate (rtx op, enum machine_mode mode, int inverse,
7798
                      rtx *modconst, int *elementwidth)
7799
{
7800
#define CHECK(STRIDE, ELSIZE, CLASS, TEST)      \
7801
  matches = 1;                                  \
7802
  for (i = 0; i < idx; i += (STRIDE))            \
7803
    if (!(TEST))                                \
7804
      matches = 0;                               \
7805
  if (matches)                                  \
7806
    {                                           \
7807
      immtype = (CLASS);                        \
7808
      elsize = (ELSIZE);                        \
7809
      break;                                    \
7810
    }
7811
 
7812
  unsigned int i, elsize = 0, idx = 0, n_elts = CONST_VECTOR_NUNITS (op);
7813
  unsigned int innersize = GET_MODE_SIZE (GET_MODE_INNER (mode));
7814
  unsigned char bytes[16];
7815
  int immtype = -1, matches;
7816
  unsigned int invmask = inverse ? 0xff : 0;
7817
 
7818
  /* Vectors of float constants.  */
7819
  if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
7820
    {
7821
      rtx el0 = CONST_VECTOR_ELT (op, 0);
7822
      REAL_VALUE_TYPE r0;
7823
 
7824
      if (!vfp3_const_double_rtx (el0))
7825
        return -1;
7826
 
7827
      REAL_VALUE_FROM_CONST_DOUBLE (r0, el0);
7828
 
7829
      for (i = 1; i < n_elts; i++)
7830
        {
7831
          rtx elt = CONST_VECTOR_ELT (op, i);
7832
          REAL_VALUE_TYPE re;
7833
 
7834
          REAL_VALUE_FROM_CONST_DOUBLE (re, elt);
7835
 
7836
          if (!REAL_VALUES_EQUAL (r0, re))
7837
            return -1;
7838
        }
7839
 
7840
      if (modconst)
7841
        *modconst = CONST_VECTOR_ELT (op, 0);
7842
 
7843
      if (elementwidth)
7844
        *elementwidth = 0;
7845
 
7846
      return 18;
7847
    }
7848
 
7849
  /* Splat vector constant out into a byte vector.  */
7850
  for (i = 0; i < n_elts; i++)
7851
    {
7852
      rtx el = CONST_VECTOR_ELT (op, i);
7853
      unsigned HOST_WIDE_INT elpart;
7854
      unsigned int part, parts;
7855
 
7856
      if (GET_CODE (el) == CONST_INT)
7857
        {
7858
          elpart = INTVAL (el);
7859
          parts = 1;
7860
        }
7861
      else if (GET_CODE (el) == CONST_DOUBLE)
7862
        {
7863
          elpart = CONST_DOUBLE_LOW (el);
7864
          parts = 2;
7865
        }
7866
      else
7867
        gcc_unreachable ();
7868
 
7869
      for (part = 0; part < parts; part++)
7870
        {
7871
          unsigned int byte;
7872
          for (byte = 0; byte < innersize; byte++)
7873
            {
7874
              bytes[idx++] = (elpart & 0xff) ^ invmask;
7875
              elpart >>= BITS_PER_UNIT;
7876
            }
7877
          if (GET_CODE (el) == CONST_DOUBLE)
7878
            elpart = CONST_DOUBLE_HIGH (el);
7879
        }
7880
    }
7881
 
7882
  /* Sanity check.  */
7883
  gcc_assert (idx == GET_MODE_SIZE (mode));
7884
 
7885
  do
7886
    {
7887
      CHECK (4, 32, 0, bytes[i] == bytes[0] && bytes[i + 1] == 0
7888
                       && bytes[i + 2] == 0 && bytes[i + 3] == 0);
7889
 
7890
      CHECK (4, 32, 1, bytes[i] == 0 && bytes[i + 1] == bytes[1]
7891
                       && bytes[i + 2] == 0 && bytes[i + 3] == 0);
7892
 
7893
      CHECK (4, 32, 2, bytes[i] == 0 && bytes[i + 1] == 0
7894
                       && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0);
7895
 
7896
      CHECK (4, 32, 3, bytes[i] == 0 && bytes[i + 1] == 0
7897
                       && bytes[i + 2] == 0 && bytes[i + 3] == bytes[3]);
7898
 
7899
      CHECK (2, 16, 4, bytes[i] == bytes[0] && bytes[i + 1] == 0);
7900
 
7901
      CHECK (2, 16, 5, bytes[i] == 0 && bytes[i + 1] == bytes[1]);
7902
 
7903
      CHECK (4, 32, 6, bytes[i] == bytes[0] && bytes[i + 1] == 0xff
7904
                       && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff);
7905
 
7906
      CHECK (4, 32, 7, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
7907
                       && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff);
7908
 
7909
      CHECK (4, 32, 8, bytes[i] == 0xff && bytes[i + 1] == 0xff
7910
                       && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff);
7911
 
7912
      CHECK (4, 32, 9, bytes[i] == 0xff && bytes[i + 1] == 0xff
7913
                       && bytes[i + 2] == 0xff && bytes[i + 3] == bytes[3]);
7914
 
7915
      CHECK (2, 16, 10, bytes[i] == bytes[0] && bytes[i + 1] == 0xff);
7916
 
7917
      CHECK (2, 16, 11, bytes[i] == 0xff && bytes[i + 1] == bytes[1]);
7918
 
7919
      CHECK (4, 32, 12, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
7920
                        && bytes[i + 2] == 0 && bytes[i + 3] == 0);
7921
 
7922
      CHECK (4, 32, 13, bytes[i] == 0 && bytes[i + 1] == bytes[1]
7923
                        && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff);
7924
 
7925
      CHECK (4, 32, 14, bytes[i] == 0xff && bytes[i + 1] == 0xff
7926
                        && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0);
7927
 
7928
      CHECK (4, 32, 15, bytes[i] == 0 && bytes[i + 1] == 0
7929
                        && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff);
7930
 
7931
      CHECK (1, 8, 16, bytes[i] == bytes[0]);
7932
 
7933
      CHECK (1, 64, 17, (bytes[i] == 0 || bytes[i] == 0xff)
7934
                        && bytes[i] == bytes[(i + 8) % idx]);
7935
    }
7936
  while (0);
7937
 
7938
  if (immtype == -1)
7939
    return -1;
7940
 
7941
  if (elementwidth)
7942
    *elementwidth = elsize;
7943
 
7944
  if (modconst)
7945
    {
7946
      unsigned HOST_WIDE_INT imm = 0;
7947
 
7948
      /* Un-invert bytes of recognized vector, if necessary.  */
7949
      if (invmask != 0)
7950
        for (i = 0; i < idx; i++)
7951
          bytes[i] ^= invmask;
7952
 
7953
      if (immtype == 17)
7954
        {
7955
          /* FIXME: Broken on 32-bit H_W_I hosts.  */
7956
          gcc_assert (sizeof (HOST_WIDE_INT) == 8);
7957
 
7958
          for (i = 0; i < 8; i++)
7959
            imm |= (unsigned HOST_WIDE_INT) (bytes[i] ? 0xff : 0)
7960
                   << (i * BITS_PER_UNIT);
7961
 
7962
          *modconst = GEN_INT (imm);
7963
        }
7964
      else
7965
        {
7966
          unsigned HOST_WIDE_INT imm = 0;
7967
 
7968
          for (i = 0; i < elsize / BITS_PER_UNIT; i++)
7969
            imm |= (unsigned HOST_WIDE_INT) bytes[i] << (i * BITS_PER_UNIT);
7970
 
7971
          *modconst = GEN_INT (imm);
7972
        }
7973
    }
7974
 
7975
  return immtype;
7976
#undef CHECK
7977
}
7978
 
7979
/* Return TRUE if rtx X is legal for use as either a Neon VMOV (or, implicitly,
7980
   VMVN) immediate. Write back width per element to *ELEMENTWIDTH (or zero for
7981
   float elements), and a modified constant (whatever should be output for a
7982
   VMOV) in *MODCONST.  */
7983
 
7984
int
7985
neon_immediate_valid_for_move (rtx op, enum machine_mode mode,
7986
                               rtx *modconst, int *elementwidth)
7987
{
7988
  rtx tmpconst;
7989
  int tmpwidth;
7990
  int retval = neon_valid_immediate (op, mode, 0, &tmpconst, &tmpwidth);
7991
 
7992
  if (retval == -1)
7993
    return 0;
7994
 
7995
  if (modconst)
7996
    *modconst = tmpconst;
7997
 
7998
  if (elementwidth)
7999
    *elementwidth = tmpwidth;
8000
 
8001
  return 1;
8002
}
8003
 
8004
/* Return TRUE if rtx X is legal for use in a VORR or VBIC instruction.  If
8005
   the immediate is valid, write a constant suitable for using as an operand
8006
   to VORR/VBIC/VAND/VORN to *MODCONST and the corresponding element width to
8007
   *ELEMENTWIDTH. See neon_valid_immediate for description of INVERSE.  */
8008
 
8009
int
8010
neon_immediate_valid_for_logic (rtx op, enum machine_mode mode, int inverse,
8011
                                rtx *modconst, int *elementwidth)
8012
{
8013
  rtx tmpconst;
8014
  int tmpwidth;
8015
  int retval = neon_valid_immediate (op, mode, inverse, &tmpconst, &tmpwidth);
8016
 
8017
  if (retval < 0 || retval > 5)
8018
    return 0;
8019
 
8020
  if (modconst)
8021
    *modconst = tmpconst;
8022
 
8023
  if (elementwidth)
8024
    *elementwidth = tmpwidth;
8025
 
8026
  return 1;
8027
}
8028
 
8029
/* Return a string suitable for output of Neon immediate logic operation
8030
   MNEM.  */
8031
 
8032
char *
8033
neon_output_logic_immediate (const char *mnem, rtx *op2, enum machine_mode mode,
8034
                             int inverse, int quad)
8035
{
8036
  int width, is_valid;
8037
  static char templ[40];
8038
 
8039
  is_valid = neon_immediate_valid_for_logic (*op2, mode, inverse, op2, &width);
8040
 
8041
  gcc_assert (is_valid != 0);
8042
 
8043
  if (quad)
8044
    sprintf (templ, "%s.i%d\t%%q0, %%2", mnem, width);
8045
  else
8046
    sprintf (templ, "%s.i%d\t%%P0, %%2", mnem, width);
8047
 
8048
  return templ;
8049
}
8050
 
8051
/* Output a sequence of pairwise operations to implement a reduction.
8052
   NOTE: We do "too much work" here, because pairwise operations work on two
8053
   registers-worth of operands in one go. Unfortunately we can't exploit those
8054
   extra calculations to do the full operation in fewer steps, I don't think.
8055
   Although all vector elements of the result but the first are ignored, we
8056
   actually calculate the same result in each of the elements. An alternative
8057
   such as initially loading a vector with zero to use as each of the second
8058
   operands would use up an additional register and take an extra instruction,
8059
   for no particular gain.  */
8060
 
8061
void
8062
neon_pairwise_reduce (rtx op0, rtx op1, enum machine_mode mode,
8063
                      rtx (*reduc) (rtx, rtx, rtx))
8064
{
8065
  enum machine_mode inner = GET_MODE_INNER (mode);
8066
  unsigned int i, parts = GET_MODE_SIZE (mode) / GET_MODE_SIZE (inner);
8067
  rtx tmpsum = op1;
8068
 
8069
  for (i = parts / 2; i >= 1; i /= 2)
8070
    {
8071
      rtx dest = (i == 1) ? op0 : gen_reg_rtx (mode);
8072
      emit_insn (reduc (dest, tmpsum, tmpsum));
8073
      tmpsum = dest;
8074
    }
8075
}
8076
 
8077
/* If VALS is a vector constant that can be loaded into a register
8078
   using VDUP, generate instructions to do so and return an RTX to
8079
   assign to the register.  Otherwise return NULL_RTX.  */
8080
 
8081
static rtx
8082
neon_vdup_constant (rtx vals)
8083
{
8084
  enum machine_mode mode = GET_MODE (vals);
8085
  enum machine_mode inner_mode = GET_MODE_INNER (mode);
8086
  int n_elts = GET_MODE_NUNITS (mode);
8087
  bool all_same = true;
8088
  rtx x;
8089
  int i;
8090
 
8091
  if (GET_CODE (vals) != CONST_VECTOR || GET_MODE_SIZE (inner_mode) > 4)
8092
    return NULL_RTX;
8093
 
8094
  for (i = 0; i < n_elts; ++i)
8095
    {
8096
      x = XVECEXP (vals, 0, i);
8097
      if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
8098
        all_same = false;
8099
    }
8100
 
8101
  if (!all_same)
8102
    /* The elements are not all the same.  We could handle repeating
8103
       patterns of a mode larger than INNER_MODE here (e.g. int8x8_t
8104
       {0, C, 0, C, 0, C, 0, C} which can be loaded using
8105
       vdup.i16).  */
8106
    return NULL_RTX;
8107
 
8108
  /* We can load this constant by using VDUP and a constant in a
8109
     single ARM register.  This will be cheaper than a vector
8110
     load.  */
8111
 
8112
  x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, 0));
8113
  return gen_rtx_UNSPEC (mode, gen_rtvec (1, x),
8114
                         UNSPEC_VDUP_N);
8115
}
8116
 
8117
/* Generate code to load VALS, which is a PARALLEL containing only
8118
   constants (for vec_init) or CONST_VECTOR, efficiently into a
8119
   register.  Returns an RTX to copy into the register, or NULL_RTX
8120
   for a PARALLEL that can not be converted into a CONST_VECTOR.  */
8121
 
8122
rtx
8123
neon_make_constant (rtx vals)
8124
{
8125
  enum machine_mode mode = GET_MODE (vals);
8126
  rtx target;
8127
  rtx const_vec = NULL_RTX;
8128
  int n_elts = GET_MODE_NUNITS (mode);
8129
  int n_const = 0;
8130
  int i;
8131
 
8132
  if (GET_CODE (vals) == CONST_VECTOR)
8133
    const_vec = vals;
8134
  else if (GET_CODE (vals) == PARALLEL)
8135
    {
8136
      /* A CONST_VECTOR must contain only CONST_INTs and
8137
         CONST_DOUBLEs, but CONSTANT_P allows more (e.g. SYMBOL_REF).
8138
         Only store valid constants in a CONST_VECTOR.  */
8139
      for (i = 0; i < n_elts; ++i)
8140
        {
8141
          rtx x = XVECEXP (vals, 0, i);
8142
          if (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE)
8143
            n_const++;
8144
        }
8145
      if (n_const == n_elts)
8146
        const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
8147
    }
8148
  else
8149
    gcc_unreachable ();
8150
 
8151
  if (const_vec != NULL
8152
      && neon_immediate_valid_for_move (const_vec, mode, NULL, NULL))
8153
    /* Load using VMOV.  On Cortex-A8 this takes one cycle.  */
8154
    return const_vec;
8155
  else if ((target = neon_vdup_constant (vals)) != NULL_RTX)
8156
    /* Loaded using VDUP.  On Cortex-A8 the VDUP takes one NEON
8157
       pipeline cycle; creating the constant takes one or two ARM
8158
       pipeline cycles.  */
8159
    return target;
8160
  else if (const_vec != NULL_RTX)
8161
    /* Load from constant pool.  On Cortex-A8 this takes two cycles
8162
       (for either double or quad vectors).  We can not take advantage
8163
       of single-cycle VLD1 because we need a PC-relative addressing
8164
       mode.  */
8165
    return const_vec;
8166
  else
8167
    /* A PARALLEL containing something not valid inside CONST_VECTOR.
8168
       We can not construct an initializer.  */
8169
    return NULL_RTX;
8170
}
8171
 
8172
/* Initialize vector TARGET to VALS.  */
8173
 
8174
void
8175
neon_expand_vector_init (rtx target, rtx vals)
8176
{
8177
  enum machine_mode mode = GET_MODE (target);
8178
  enum machine_mode inner_mode = GET_MODE_INNER (mode);
8179
  int n_elts = GET_MODE_NUNITS (mode);
8180
  int n_var = 0, one_var = -1;
8181
  bool all_same = true;
8182
  rtx x, mem;
8183
  int i;
8184
 
8185
  for (i = 0; i < n_elts; ++i)
8186
    {
8187
      x = XVECEXP (vals, 0, i);
8188
      if (!CONSTANT_P (x))
8189
        ++n_var, one_var = i;
8190
 
8191
      if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
8192
        all_same = false;
8193
    }
8194
 
8195
  if (n_var == 0)
8196
    {
8197
      rtx constant = neon_make_constant (vals);
8198
      if (constant != NULL_RTX)
8199
        {
8200
          emit_move_insn (target, constant);
8201
          return;
8202
        }
8203
    }
8204
 
8205
  /* Splat a single non-constant element if we can.  */
8206
  if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
8207
    {
8208
      x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, 0));
8209
      emit_insn (gen_rtx_SET (VOIDmode, target,
8210
                              gen_rtx_UNSPEC (mode, gen_rtvec (1, x),
8211
                                              UNSPEC_VDUP_N)));
8212
      return;
8213
    }
8214
 
8215
  /* One field is non-constant.  Load constant then overwrite varying
8216
     field.  This is more efficient than using the stack.  */
8217
  if (n_var == 1)
8218
    {
8219
      rtx copy = copy_rtx (vals);
8220
      rtvec ops;
8221
 
8222
      /* Load constant part of vector, substitute neighboring value for
8223
         varying element.  */
8224
      XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
8225
      neon_expand_vector_init (target, copy);
8226
 
8227
      /* Insert variable.  */
8228
      x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, one_var));
8229
      ops = gen_rtvec (3, x, target, GEN_INT (one_var));
8230
      emit_insn (gen_rtx_SET (VOIDmode, target,
8231
                              gen_rtx_UNSPEC (mode, ops, UNSPEC_VSET_LANE)));
8232
      return;
8233
    }
8234
 
8235
  /* Construct the vector in memory one field at a time
8236
     and load the whole vector.  */
8237
  mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
8238
  for (i = 0; i < n_elts; i++)
8239
    emit_move_insn (adjust_address_nv (mem, inner_mode,
8240
                                    i * GET_MODE_SIZE (inner_mode)),
8241
                    XVECEXP (vals, 0, i));
8242
  emit_move_insn (target, mem);
8243
}
8244
 
8245
/* Ensure OPERAND lies between LOW (inclusive) and HIGH (exclusive).  Raise
8246
   ERR if it doesn't.  FIXME: NEON bounds checks occur late in compilation, so
8247
   reported source locations are bogus.  */
8248
 
8249
static void
8250
bounds_check (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high,
8251
              const char *err)
8252
{
8253
  HOST_WIDE_INT lane;
8254
 
8255
  gcc_assert (GET_CODE (operand) == CONST_INT);
8256
 
8257
  lane = INTVAL (operand);
8258
 
8259
  if (lane < low || lane >= high)
8260
    error (err);
8261
}
8262
 
8263
/* Bounds-check lanes.  */
8264
 
8265
void
8266
neon_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
8267
{
8268
  bounds_check (operand, low, high, "lane out of range");
8269
}
8270
 
8271
/* Bounds-check constants.  */
8272
 
8273
void
8274
neon_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
8275
{
8276
  bounds_check (operand, low, high, "constant out of range");
8277
}
8278
 
8279
HOST_WIDE_INT
8280
neon_element_bits (enum machine_mode mode)
8281
{
8282
  if (mode == DImode)
8283
    return GET_MODE_BITSIZE (mode);
8284
  else
8285
    return GET_MODE_BITSIZE (GET_MODE_INNER (mode));
8286
}
8287
 
8288
 
8289
/* Predicates for `match_operand' and `match_operator'.  */
8290
 
8291
/* Return nonzero if OP is a valid Cirrus memory address pattern.  */
8292
int
8293
cirrus_memory_offset (rtx op)
8294
{
8295
  /* Reject eliminable registers.  */
8296
  if (! (reload_in_progress || reload_completed)
8297
      && (   reg_mentioned_p (frame_pointer_rtx, op)
8298
          || reg_mentioned_p (arg_pointer_rtx, op)
8299
          || reg_mentioned_p (virtual_incoming_args_rtx, op)
8300
          || reg_mentioned_p (virtual_outgoing_args_rtx, op)
8301
          || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
8302
          || reg_mentioned_p (virtual_stack_vars_rtx, op)))
8303
    return 0;
8304
 
8305
  if (GET_CODE (op) == MEM)
8306
    {
8307
      rtx ind;
8308
 
8309
      ind = XEXP (op, 0);
8310
 
8311
      /* Match: (mem (reg)).  */
8312
      if (GET_CODE (ind) == REG)
8313
        return 1;
8314
 
8315
      /* Match:
8316
         (mem (plus (reg)
8317
                    (const))).  */
8318
      if (GET_CODE (ind) == PLUS
8319
          && GET_CODE (XEXP (ind, 0)) == REG
8320
          && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
8321
          && GET_CODE (XEXP (ind, 1)) == CONST_INT)
8322
        return 1;
8323
    }
8324
 
8325
  return 0;
8326
}
8327
 
8328
/* Return TRUE if OP is a valid coprocessor memory address pattern.
8329
   WB is true if full writeback address modes are allowed and is false
8330
   if limited writeback address modes (POST_INC and PRE_DEC) are
8331
   allowed.  */
8332
 
8333
int
8334
arm_coproc_mem_operand (rtx op, bool wb)
8335
{
8336
  rtx ind;
8337
 
8338
  /* Reject eliminable registers.  */
8339
  if (! (reload_in_progress || reload_completed)
8340
      && (   reg_mentioned_p (frame_pointer_rtx, op)
8341
          || reg_mentioned_p (arg_pointer_rtx, op)
8342
          || reg_mentioned_p (virtual_incoming_args_rtx, op)
8343
          || reg_mentioned_p (virtual_outgoing_args_rtx, op)
8344
          || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
8345
          || reg_mentioned_p (virtual_stack_vars_rtx, op)))
8346
    return FALSE;
8347
 
8348
  /* Constants are converted into offsets from labels.  */
8349
  if (GET_CODE (op) != MEM)
8350
    return FALSE;
8351
 
8352
  ind = XEXP (op, 0);
8353
 
8354
  if (reload_completed
8355
      && (GET_CODE (ind) == LABEL_REF
8356
          || (GET_CODE (ind) == CONST
8357
              && GET_CODE (XEXP (ind, 0)) == PLUS
8358
              && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
8359
              && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
8360
    return TRUE;
8361
 
8362
  /* Match: (mem (reg)).  */
8363
  if (GET_CODE (ind) == REG)
8364
    return arm_address_register_rtx_p (ind, 0);
8365
 
8366
  /* Autoincremment addressing modes.  POST_INC and PRE_DEC are
8367
     acceptable in any case (subject to verification by
8368
     arm_address_register_rtx_p).  We need WB to be true to accept
8369
     PRE_INC and POST_DEC.  */
8370
  if (GET_CODE (ind) == POST_INC
8371
      || GET_CODE (ind) == PRE_DEC
8372
      || (wb
8373
          && (GET_CODE (ind) == PRE_INC
8374
              || GET_CODE (ind) == POST_DEC)))
8375
    return arm_address_register_rtx_p (XEXP (ind, 0), 0);
8376
 
8377
  if (wb
8378
      && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
8379
      && arm_address_register_rtx_p (XEXP (ind, 0), 0)
8380
      && GET_CODE (XEXP (ind, 1)) == PLUS
8381
      && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
8382
    ind = XEXP (ind, 1);
8383
 
8384
  /* Match:
8385
     (plus (reg)
8386
           (const)).  */
8387
  if (GET_CODE (ind) == PLUS
8388
      && GET_CODE (XEXP (ind, 0)) == REG
8389
      && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
8390
      && GET_CODE (XEXP (ind, 1)) == CONST_INT
8391
      && INTVAL (XEXP (ind, 1)) > -1024
8392
      && INTVAL (XEXP (ind, 1)) <  1024
8393
      && (INTVAL (XEXP (ind, 1)) & 3) == 0)
8394
    return TRUE;
8395
 
8396
  return FALSE;
8397
}
8398
 
8399
/* Return TRUE if OP is a memory operand which we can load or store a vector
8400
   to/from. TYPE is one of the following values:
8401
 
8402
    1 - Core registers (ldm)
8403
    2 - Element/structure loads (vld1)
8404
 */
8405
int
8406
neon_vector_mem_operand (rtx op, int type)
8407
{
8408
  rtx ind;
8409
 
8410
  /* Reject eliminable registers.  */
8411
  if (! (reload_in_progress || reload_completed)
8412
      && (   reg_mentioned_p (frame_pointer_rtx, op)
8413
          || reg_mentioned_p (arg_pointer_rtx, op)
8414
          || reg_mentioned_p (virtual_incoming_args_rtx, op)
8415
          || reg_mentioned_p (virtual_outgoing_args_rtx, op)
8416
          || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
8417
          || reg_mentioned_p (virtual_stack_vars_rtx, op)))
8418
    return FALSE;
8419
 
8420
  /* Constants are converted into offsets from labels.  */
8421
  if (GET_CODE (op) != MEM)
8422
    return FALSE;
8423
 
8424
  ind = XEXP (op, 0);
8425
 
8426
  if (reload_completed
8427
      && (GET_CODE (ind) == LABEL_REF
8428
          || (GET_CODE (ind) == CONST
8429
              && GET_CODE (XEXP (ind, 0)) == PLUS
8430
              && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
8431
              && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
8432
    return TRUE;
8433
 
8434
  /* Match: (mem (reg)).  */
8435
  if (GET_CODE (ind) == REG)
8436
    return arm_address_register_rtx_p (ind, 0);
8437
 
8438
  /* Allow post-increment with Neon registers.  */
8439
  if (type != 1 && (GET_CODE (ind) == POST_INC || GET_CODE (ind) == PRE_DEC))
8440
    return arm_address_register_rtx_p (XEXP (ind, 0), 0);
8441
 
8442
  /* FIXME: vld1 allows register post-modify.  */
8443
 
8444
  /* Match:
8445
     (plus (reg)
8446
          (const)).  */
8447
  if (type == 0
8448
      && GET_CODE (ind) == PLUS
8449
      && GET_CODE (XEXP (ind, 0)) == REG
8450
      && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
8451
      && GET_CODE (XEXP (ind, 1)) == CONST_INT
8452
      && INTVAL (XEXP (ind, 1)) > -1024
8453
      && INTVAL (XEXP (ind, 1)) < 1016
8454
      && (INTVAL (XEXP (ind, 1)) & 3) == 0)
8455
    return TRUE;
8456
 
8457
  return FALSE;
8458
}
8459
 
8460
/* Return TRUE if OP is a mem suitable for loading/storing a Neon struct
8461
   type.  */
8462
int
8463
neon_struct_mem_operand (rtx op)
8464
{
8465
  rtx ind;
8466
 
8467
  /* Reject eliminable registers.  */
8468
  if (! (reload_in_progress || reload_completed)
8469
      && (   reg_mentioned_p (frame_pointer_rtx, op)
8470
          || reg_mentioned_p (arg_pointer_rtx, op)
8471
          || reg_mentioned_p (virtual_incoming_args_rtx, op)
8472
          || reg_mentioned_p (virtual_outgoing_args_rtx, op)
8473
          || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
8474
          || reg_mentioned_p (virtual_stack_vars_rtx, op)))
8475
    return FALSE;
8476
 
8477
  /* Constants are converted into offsets from labels.  */
8478
  if (GET_CODE (op) != MEM)
8479
    return FALSE;
8480
 
8481
  ind = XEXP (op, 0);
8482
 
8483
  if (reload_completed
8484
      && (GET_CODE (ind) == LABEL_REF
8485
          || (GET_CODE (ind) == CONST
8486
              && GET_CODE (XEXP (ind, 0)) == PLUS
8487
              && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
8488
              && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
8489
    return TRUE;
8490
 
8491
  /* Match: (mem (reg)).  */
8492
  if (GET_CODE (ind) == REG)
8493
    return arm_address_register_rtx_p (ind, 0);
8494
 
8495
  return FALSE;
8496
}
8497
 
8498
/* Return true if X is a register that will be eliminated later on.  */
8499
int
8500
arm_eliminable_register (rtx x)
8501
{
8502
  return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
8503
                       || REGNO (x) == ARG_POINTER_REGNUM
8504
                       || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
8505
                           && REGNO (x) <= LAST_VIRTUAL_REGISTER));
8506
}
8507
 
8508
/* Return GENERAL_REGS if a scratch register required to reload x to/from
8509
   coprocessor registers.  Otherwise return NO_REGS.  */
8510
 
8511
enum reg_class
8512
coproc_secondary_reload_class (enum machine_mode mode, rtx x, bool wb)
8513
{
8514
  if (mode == HFmode)
8515
    {
8516
      if (!TARGET_NEON_FP16)
8517
        return GENERAL_REGS;
8518
      if (s_register_operand (x, mode) || neon_vector_mem_operand (x, 2))
8519
        return NO_REGS;
8520
      return GENERAL_REGS;
8521
    }
8522
 
8523
  if (TARGET_NEON
8524
      && (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
8525
          || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
8526
      && neon_vector_mem_operand (x, 0))
8527
     return NO_REGS;
8528
 
8529
  if (arm_coproc_mem_operand (x, wb) || s_register_operand (x, mode))
8530
    return NO_REGS;
8531
 
8532
  return GENERAL_REGS;
8533
}
8534
 
8535
/* Values which must be returned in the most-significant end of the return
8536
   register.  */
8537
 
8538
static bool
8539
arm_return_in_msb (const_tree valtype)
8540
{
8541
  return (TARGET_AAPCS_BASED
8542
          && BYTES_BIG_ENDIAN
8543
          && (AGGREGATE_TYPE_P (valtype)
8544
              || TREE_CODE (valtype) == COMPLEX_TYPE));
8545
}
8546
 
8547
/* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
8548
   Use by the Cirrus Maverick code which has to workaround
8549
   a hardware bug triggered by such instructions.  */
8550
static bool
8551
arm_memory_load_p (rtx insn)
8552
{
8553
  rtx body, lhs, rhs;;
8554
 
8555
  if (insn == NULL_RTX || GET_CODE (insn) != INSN)
8556
    return false;
8557
 
8558
  body = PATTERN (insn);
8559
 
8560
  if (GET_CODE (body) != SET)
8561
    return false;
8562
 
8563
  lhs = XEXP (body, 0);
8564
  rhs = XEXP (body, 1);
8565
 
8566
  lhs = REG_OR_SUBREG_RTX (lhs);
8567
 
8568
  /* If the destination is not a general purpose
8569
     register we do not have to worry.  */
8570
  if (GET_CODE (lhs) != REG
8571
      || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
8572
    return false;
8573
 
8574
  /* As well as loads from memory we also have to react
8575
     to loads of invalid constants which will be turned
8576
     into loads from the minipool.  */
8577
  return (GET_CODE (rhs) == MEM
8578
          || GET_CODE (rhs) == SYMBOL_REF
8579
          || note_invalid_constants (insn, -1, false));
8580
}
8581
 
8582
/* Return TRUE if INSN is a Cirrus instruction.  */
8583
static bool
8584
arm_cirrus_insn_p (rtx insn)
8585
{
8586
  enum attr_cirrus attr;
8587
 
8588
  /* get_attr cannot accept USE or CLOBBER.  */
8589
  if (!insn
8590
      || GET_CODE (insn) != INSN
8591
      || GET_CODE (PATTERN (insn)) == USE
8592
      || GET_CODE (PATTERN (insn)) == CLOBBER)
8593
    return 0;
8594
 
8595
  attr = get_attr_cirrus (insn);
8596
 
8597
  return attr != CIRRUS_NOT;
8598
}
8599
 
8600
/* Cirrus reorg for invalid instruction combinations.  */
8601
static void
8602
cirrus_reorg (rtx first)
8603
{
8604
  enum attr_cirrus attr;
8605
  rtx body = PATTERN (first);
8606
  rtx t;
8607
  int nops;
8608
 
8609
  /* Any branch must be followed by 2 non Cirrus instructions.  */
8610
  if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
8611
    {
8612
      nops = 0;
8613
      t = next_nonnote_insn (first);
8614
 
8615
      if (arm_cirrus_insn_p (t))
8616
        ++ nops;
8617
 
8618
      if (arm_cirrus_insn_p (next_nonnote_insn (t)))
8619
        ++ nops;
8620
 
8621
      while (nops --)
8622
        emit_insn_after (gen_nop (), first);
8623
 
8624
      return;
8625
    }
8626
 
8627
  /* (float (blah)) is in parallel with a clobber.  */
8628
  if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
8629
    body = XVECEXP (body, 0, 0);
8630
 
8631
  if (GET_CODE (body) == SET)
8632
    {
8633
      rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
8634
 
8635
      /* cfldrd, cfldr64, cfstrd, cfstr64 must
8636
         be followed by a non Cirrus insn.  */
8637
      if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
8638
        {
8639
          if (arm_cirrus_insn_p (next_nonnote_insn (first)))
8640
            emit_insn_after (gen_nop (), first);
8641
 
8642
          return;
8643
        }
8644
      else if (arm_memory_load_p (first))
8645
        {
8646
          unsigned int arm_regno;
8647
 
8648
          /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
8649
             ldr/cfmv64hr combination where the Rd field is the same
8650
             in both instructions must be split with a non Cirrus
8651
             insn.  Example:
8652
 
8653
             ldr r0, blah
8654
             nop
8655
             cfmvsr mvf0, r0.  */
8656
 
8657
          /* Get Arm register number for ldr insn.  */
8658
          if (GET_CODE (lhs) == REG)
8659
            arm_regno = REGNO (lhs);
8660
          else
8661
            {
8662
              gcc_assert (GET_CODE (rhs) == REG);
8663
              arm_regno = REGNO (rhs);
8664
            }
8665
 
8666
          /* Next insn.  */
8667
          first = next_nonnote_insn (first);
8668
 
8669
          if (! arm_cirrus_insn_p (first))
8670
            return;
8671
 
8672
          body = PATTERN (first);
8673
 
8674
          /* (float (blah)) is in parallel with a clobber.  */
8675
          if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
8676
            body = XVECEXP (body, 0, 0);
8677
 
8678
          if (GET_CODE (body) == FLOAT)
8679
            body = XEXP (body, 0);
8680
 
8681
          if (get_attr_cirrus (first) == CIRRUS_MOVE
8682
              && GET_CODE (XEXP (body, 1)) == REG
8683
              && arm_regno == REGNO (XEXP (body, 1)))
8684
            emit_insn_after (gen_nop (), first);
8685
 
8686
          return;
8687
        }
8688
    }
8689
 
8690
  /* get_attr cannot accept USE or CLOBBER.  */
8691
  if (!first
8692
      || GET_CODE (first) != INSN
8693
      || GET_CODE (PATTERN (first)) == USE
8694
      || GET_CODE (PATTERN (first)) == CLOBBER)
8695
    return;
8696
 
8697
  attr = get_attr_cirrus (first);
8698
 
8699
  /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
8700
     must be followed by a non-coprocessor instruction.  */
8701
  if (attr == CIRRUS_COMPARE)
8702
    {
8703
      nops = 0;
8704
 
8705
      t = next_nonnote_insn (first);
8706
 
8707
      if (arm_cirrus_insn_p (t))
8708
        ++ nops;
8709
 
8710
      if (arm_cirrus_insn_p (next_nonnote_insn (t)))
8711
        ++ nops;
8712
 
8713
      while (nops --)
8714
        emit_insn_after (gen_nop (), first);
8715
 
8716
      return;
8717
    }
8718
}
8719
 
8720
/* Return TRUE if X references a SYMBOL_REF.  */
8721
int
8722
symbol_mentioned_p (rtx x)
8723
{
8724
  const char * fmt;
8725
  int i;
8726
 
8727
  if (GET_CODE (x) == SYMBOL_REF)
8728
    return 1;
8729
 
8730
  /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
8731
     are constant offsets, not symbols.  */
8732
  if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
8733
    return 0;
8734
 
8735
  fmt = GET_RTX_FORMAT (GET_CODE (x));
8736
 
8737
  for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8738
    {
8739
      if (fmt[i] == 'E')
8740
        {
8741
          int j;
8742
 
8743
          for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8744
            if (symbol_mentioned_p (XVECEXP (x, i, j)))
8745
              return 1;
8746
        }
8747
      else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
8748
        return 1;
8749
    }
8750
 
8751
  return 0;
8752
}
8753
 
8754
/* Return TRUE if X references a LABEL_REF.  */
8755
int
8756
label_mentioned_p (rtx x)
8757
{
8758
  const char * fmt;
8759
  int i;
8760
 
8761
  if (GET_CODE (x) == LABEL_REF)
8762
    return 1;
8763
 
8764
  /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
8765
     instruction, but they are constant offsets, not symbols.  */
8766
  if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
8767
    return 0;
8768
 
8769
  fmt = GET_RTX_FORMAT (GET_CODE (x));
8770
  for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8771
    {
8772
      if (fmt[i] == 'E')
8773
        {
8774
          int j;
8775
 
8776
          for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8777
            if (label_mentioned_p (XVECEXP (x, i, j)))
8778
              return 1;
8779
        }
8780
      else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
8781
        return 1;
8782
    }
8783
 
8784
  return 0;
8785
}
8786
 
8787
int
8788
tls_mentioned_p (rtx x)
8789
{
8790
  switch (GET_CODE (x))
8791
    {
8792
    case CONST:
8793
      return tls_mentioned_p (XEXP (x, 0));
8794
 
8795
    case UNSPEC:
8796
      if (XINT (x, 1) == UNSPEC_TLS)
8797
        return 1;
8798
 
8799
    default:
8800
      return 0;
8801
    }
8802
}
8803
 
8804
/* Must not copy any rtx that uses a pc-relative address.  */
8805
 
8806
static int
8807
arm_note_pic_base (rtx *x, void *date ATTRIBUTE_UNUSED)
8808
{
8809
  if (GET_CODE (*x) == UNSPEC
8810
      && XINT (*x, 1) == UNSPEC_PIC_BASE)
8811
    return 1;
8812
  return 0;
8813
}
8814
 
8815
static bool
8816
arm_cannot_copy_insn_p (rtx insn)
8817
{
8818
  return for_each_rtx (&PATTERN (insn), arm_note_pic_base, NULL);
8819
}
8820
 
8821
enum rtx_code
8822
minmax_code (rtx x)
8823
{
8824
  enum rtx_code code = GET_CODE (x);
8825
 
8826
  switch (code)
8827
    {
8828
    case SMAX:
8829
      return GE;
8830
    case SMIN:
8831
      return LE;
8832
    case UMIN:
8833
      return LEU;
8834
    case UMAX:
8835
      return GEU;
8836
    default:
8837
      gcc_unreachable ();
8838
    }
8839
}
8840
 
8841
/* Return 1 if memory locations are adjacent.  */
8842
int
8843
adjacent_mem_locations (rtx a, rtx b)
8844
{
8845
  /* We don't guarantee to preserve the order of these memory refs.  */
8846
  if (volatile_refs_p (a) || volatile_refs_p (b))
8847
    return 0;
8848
 
8849
  if ((GET_CODE (XEXP (a, 0)) == REG
8850
       || (GET_CODE (XEXP (a, 0)) == PLUS
8851
           && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
8852
      && (GET_CODE (XEXP (b, 0)) == REG
8853
          || (GET_CODE (XEXP (b, 0)) == PLUS
8854
              && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
8855
    {
8856
      HOST_WIDE_INT val0 = 0, val1 = 0;
8857
      rtx reg0, reg1;
8858
      int val_diff;
8859
 
8860
      if (GET_CODE (XEXP (a, 0)) == PLUS)
8861
        {
8862
          reg0 = XEXP (XEXP (a, 0), 0);
8863
          val0 = INTVAL (XEXP (XEXP (a, 0), 1));
8864
        }
8865
      else
8866
        reg0 = XEXP (a, 0);
8867
 
8868
      if (GET_CODE (XEXP (b, 0)) == PLUS)
8869
        {
8870
          reg1 = XEXP (XEXP (b, 0), 0);
8871
          val1 = INTVAL (XEXP (XEXP (b, 0), 1));
8872
        }
8873
      else
8874
        reg1 = XEXP (b, 0);
8875
 
8876
      /* Don't accept any offset that will require multiple
8877
         instructions to handle, since this would cause the
8878
         arith_adjacentmem pattern to output an overlong sequence.  */
8879
      if (!const_ok_for_op (val0, PLUS) || !const_ok_for_op (val1, PLUS))
8880
        return 0;
8881
 
8882
      /* Don't allow an eliminable register: register elimination can make
8883
         the offset too large.  */
8884
      if (arm_eliminable_register (reg0))
8885
        return 0;
8886
 
8887
      val_diff = val1 - val0;
8888
 
8889
      if (arm_ld_sched)
8890
        {
8891
          /* If the target has load delay slots, then there's no benefit
8892
             to using an ldm instruction unless the offset is zero and
8893
             we are optimizing for size.  */
8894
          return (optimize_size && (REGNO (reg0) == REGNO (reg1))
8895
                  && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
8896
                  && (val_diff == 4 || val_diff == -4));
8897
        }
8898
 
8899
      return ((REGNO (reg0) == REGNO (reg1))
8900
              && (val_diff == 4 || val_diff == -4));
8901
    }
8902
 
8903
  return 0;
8904
}
8905
 
8906
int
8907
load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
8908
                        HOST_WIDE_INT *load_offset)
8909
{
8910
  int unsorted_regs[4];
8911
  HOST_WIDE_INT unsorted_offsets[4];
8912
  int order[4];
8913
  int base_reg = -1;
8914
  int i;
8915
 
8916
  /* Can only handle 2, 3, or 4 insns at present,
8917
     though could be easily extended if required.  */
8918
  gcc_assert (nops >= 2 && nops <= 4);
8919
 
8920
  memset (order, 0, 4 * sizeof (int));
8921
 
8922
  /* Loop over the operands and check that the memory references are
8923
     suitable (i.e. immediate offsets from the same base register).  At
8924
     the same time, extract the target register, and the memory
8925
     offsets.  */
8926
  for (i = 0; i < nops; i++)
8927
    {
8928
      rtx reg;
8929
      rtx offset;
8930
 
8931
      /* Convert a subreg of a mem into the mem itself.  */
8932
      if (GET_CODE (operands[nops + i]) == SUBREG)
8933
        operands[nops + i] = alter_subreg (operands + (nops + i));
8934
 
8935
      gcc_assert (GET_CODE (operands[nops + i]) == MEM);
8936
 
8937
      /* Don't reorder volatile memory references; it doesn't seem worth
8938
         looking for the case where the order is ok anyway.  */
8939
      if (MEM_VOLATILE_P (operands[nops + i]))
8940
        return 0;
8941
 
8942
      offset = const0_rtx;
8943
 
8944
      if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
8945
           || (GET_CODE (reg) == SUBREG
8946
               && GET_CODE (reg = SUBREG_REG (reg)) == REG))
8947
          || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
8948
              && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
8949
                   == REG)
8950
                  || (GET_CODE (reg) == SUBREG
8951
                      && GET_CODE (reg = SUBREG_REG (reg)) == REG))
8952
              && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
8953
                  == CONST_INT)))
8954
        {
8955
          if (i == 0)
8956
            {
8957
              base_reg = REGNO (reg);
8958
              unsorted_regs[0] = (GET_CODE (operands[i]) == REG
8959
                                  ? REGNO (operands[i])
8960
                                  : REGNO (SUBREG_REG (operands[i])));
8961
              order[0] = 0;
8962
            }
8963
          else
8964
            {
8965
              if (base_reg != (int) REGNO (reg))
8966
                /* Not addressed from the same base register.  */
8967
                return 0;
8968
 
8969
              unsorted_regs[i] = (GET_CODE (operands[i]) == REG
8970
                                  ? REGNO (operands[i])
8971
                                  : REGNO (SUBREG_REG (operands[i])));
8972
              if (unsorted_regs[i] < unsorted_regs[order[0]])
8973
                order[0] = i;
8974
            }
8975
 
8976
          /* If it isn't an integer register, or if it overwrites the
8977
             base register but isn't the last insn in the list, then
8978
             we can't do this.  */
8979
          if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
8980
              || (i != nops - 1 && unsorted_regs[i] == base_reg))
8981
            return 0;
8982
 
8983
          unsorted_offsets[i] = INTVAL (offset);
8984
        }
8985
      else
8986
        /* Not a suitable memory address.  */
8987
        return 0;
8988
    }
8989
 
8990
  /* All the useful information has now been extracted from the
8991
     operands into unsorted_regs and unsorted_offsets; additionally,
8992
     order[0] has been set to the lowest numbered register in the
8993
     list.  Sort the registers into order, and check that the memory
8994
     offsets are ascending and adjacent.  */
8995
 
8996
  for (i = 1; i < nops; i++)
8997
    {
8998
      int j;
8999
 
9000
      order[i] = order[i - 1];
9001
      for (j = 0; j < nops; j++)
9002
        if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
9003
            && (order[i] == order[i - 1]
9004
                || unsorted_regs[j] < unsorted_regs[order[i]]))
9005
          order[i] = j;
9006
 
9007
      /* Have we found a suitable register? if not, one must be used more
9008
         than once.  */
9009
      if (order[i] == order[i - 1])
9010
        return 0;
9011
 
9012
      /* Is the memory address adjacent and ascending? */
9013
      if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
9014
        return 0;
9015
    }
9016
 
9017
  if (base)
9018
    {
9019
      *base = base_reg;
9020
 
9021
      for (i = 0; i < nops; i++)
9022
        regs[i] = unsorted_regs[order[i]];
9023
 
9024
      *load_offset = unsorted_offsets[order[0]];
9025
    }
9026
 
9027
  if (unsorted_offsets[order[0]] == 0)
9028
    return 1; /* ldmia */
9029
 
9030
  if (TARGET_ARM && unsorted_offsets[order[0]] == 4)
9031
    return 2; /* ldmib */
9032
 
9033
  if (TARGET_ARM && unsorted_offsets[order[nops - 1]] == 0)
9034
    return 3; /* ldmda */
9035
 
9036
  if (unsorted_offsets[order[nops - 1]] == -4)
9037
    return 4; /* ldmdb */
9038
 
9039
  /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
9040
     if the offset isn't small enough.  The reason 2 ldrs are faster
9041
     is because these ARMs are able to do more than one cache access
9042
     in a single cycle.  The ARM9 and StrongARM have Harvard caches,
9043
     whilst the ARM8 has a double bandwidth cache.  This means that
9044
     these cores can do both an instruction fetch and a data fetch in
9045
     a single cycle, so the trick of calculating the address into a
9046
     scratch register (one of the result regs) and then doing a load
9047
     multiple actually becomes slower (and no smaller in code size).
9048
     That is the transformation
9049
 
9050
        ldr     rd1, [rbase + offset]
9051
        ldr     rd2, [rbase + offset + 4]
9052
 
9053
     to
9054
 
9055
        add     rd1, rbase, offset
9056
        ldmia   rd1, {rd1, rd2}
9057
 
9058
     produces worse code -- '3 cycles + any stalls on rd2' instead of
9059
     '2 cycles + any stalls on rd2'.  On ARMs with only one cache
9060
     access per cycle, the first sequence could never complete in less
9061
     than 6 cycles, whereas the ldm sequence would only take 5 and
9062
     would make better use of sequential accesses if not hitting the
9063
     cache.
9064
 
9065
     We cheat here and test 'arm_ld_sched' which we currently know to
9066
     only be true for the ARM8, ARM9 and StrongARM.  If this ever
9067
     changes, then the test below needs to be reworked.  */
9068
  if (nops == 2 && arm_ld_sched)
9069
    return 0;
9070
 
9071
  /* Can't do it without setting up the offset, only do this if it takes
9072
     no more than one insn.  */
9073
  return (const_ok_for_arm (unsorted_offsets[order[0]])
9074
          || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
9075
}
9076
 
9077
const char *
9078
emit_ldm_seq (rtx *operands, int nops)
9079
{
9080
  int regs[4];
9081
  int base_reg;
9082
  HOST_WIDE_INT offset;
9083
  char buf[100];
9084
  int i;
9085
 
9086
  switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
9087
    {
9088
    case 1:
9089
      strcpy (buf, "ldm%(ia%)\t");
9090
      break;
9091
 
9092
    case 2:
9093
      strcpy (buf, "ldm%(ib%)\t");
9094
      break;
9095
 
9096
    case 3:
9097
      strcpy (buf, "ldm%(da%)\t");
9098
      break;
9099
 
9100
    case 4:
9101
      strcpy (buf, "ldm%(db%)\t");
9102
      break;
9103
 
9104
    case 5:
9105
      if (offset >= 0)
9106
        sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
9107
                 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
9108
                 (long) offset);
9109
      else
9110
        sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
9111
                 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
9112
                 (long) -offset);
9113
      output_asm_insn (buf, operands);
9114
      base_reg = regs[0];
9115
      strcpy (buf, "ldm%(ia%)\t");
9116
      break;
9117
 
9118
    default:
9119
      gcc_unreachable ();
9120
    }
9121
 
9122
  sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
9123
           reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
9124
 
9125
  for (i = 1; i < nops; i++)
9126
    sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
9127
             reg_names[regs[i]]);
9128
 
9129
  strcat (buf, "}\t%@ phole ldm");
9130
 
9131
  output_asm_insn (buf, operands);
9132
  return "";
9133
}
9134
 
9135
int
9136
store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
9137
                         HOST_WIDE_INT * load_offset)
9138
{
9139
  int unsorted_regs[4];
9140
  HOST_WIDE_INT unsorted_offsets[4];
9141
  int order[4];
9142
  int base_reg = -1;
9143
  int i;
9144
 
9145
  /* Can only handle 2, 3, or 4 insns at present, though could be easily
9146
     extended if required.  */
9147
  gcc_assert (nops >= 2 && nops <= 4);
9148
 
9149
  memset (order, 0, 4 * sizeof (int));
9150
 
9151
  /* Loop over the operands and check that the memory references are
9152
     suitable (i.e. immediate offsets from the same base register).  At
9153
     the same time, extract the target register, and the memory
9154
     offsets.  */
9155
  for (i = 0; i < nops; i++)
9156
    {
9157
      rtx reg;
9158
      rtx offset;
9159
 
9160
      /* Convert a subreg of a mem into the mem itself.  */
9161
      if (GET_CODE (operands[nops + i]) == SUBREG)
9162
        operands[nops + i] = alter_subreg (operands + (nops + i));
9163
 
9164
      gcc_assert (GET_CODE (operands[nops + i]) == MEM);
9165
 
9166
      /* Don't reorder volatile memory references; it doesn't seem worth
9167
         looking for the case where the order is ok anyway.  */
9168
      if (MEM_VOLATILE_P (operands[nops + i]))
9169
        return 0;
9170
 
9171
      offset = const0_rtx;
9172
 
9173
      if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
9174
           || (GET_CODE (reg) == SUBREG
9175
               && GET_CODE (reg = SUBREG_REG (reg)) == REG))
9176
          || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
9177
              && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
9178
                   == REG)
9179
                  || (GET_CODE (reg) == SUBREG
9180
                      && GET_CODE (reg = SUBREG_REG (reg)) == REG))
9181
              && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
9182
                  == CONST_INT)))
9183
        {
9184
          if (i == 0)
9185
            {
9186
              base_reg = REGNO (reg);
9187
              unsorted_regs[0] = (GET_CODE (operands[i]) == REG
9188
                                  ? REGNO (operands[i])
9189
                                  : REGNO (SUBREG_REG (operands[i])));
9190
              order[0] = 0;
9191
            }
9192
          else
9193
            {
9194
              if (base_reg != (int) REGNO (reg))
9195
                /* Not addressed from the same base register.  */
9196
                return 0;
9197
 
9198
              unsorted_regs[i] = (GET_CODE (operands[i]) == REG
9199
                                  ? REGNO (operands[i])
9200
                                  : REGNO (SUBREG_REG (operands[i])));
9201
              if (unsorted_regs[i] < unsorted_regs[order[0]])
9202
                order[0] = i;
9203
            }
9204
 
9205
          /* If it isn't an integer register, then we can't do this.  */
9206
          if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
9207
            return 0;
9208
 
9209
          unsorted_offsets[i] = INTVAL (offset);
9210
        }
9211
      else
9212
        /* Not a suitable memory address.  */
9213
        return 0;
9214
    }
9215
 
9216
  /* All the useful information has now been extracted from the
9217
     operands into unsorted_regs and unsorted_offsets; additionally,
9218
     order[0] has been set to the lowest numbered register in the
9219
     list.  Sort the registers into order, and check that the memory
9220
     offsets are ascending and adjacent.  */
9221
 
9222
  for (i = 1; i < nops; i++)
9223
    {
9224
      int j;
9225
 
9226
      order[i] = order[i - 1];
9227
      for (j = 0; j < nops; j++)
9228
        if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
9229
            && (order[i] == order[i - 1]
9230
                || unsorted_regs[j] < unsorted_regs[order[i]]))
9231
          order[i] = j;
9232
 
9233
      /* Have we found a suitable register? if not, one must be used more
9234
         than once.  */
9235
      if (order[i] == order[i - 1])
9236
        return 0;
9237
 
9238
      /* Is the memory address adjacent and ascending? */
9239
      if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
9240
        return 0;
9241
    }
9242
 
9243
  if (base)
9244
    {
9245
      *base = base_reg;
9246
 
9247
      for (i = 0; i < nops; i++)
9248
        regs[i] = unsorted_regs[order[i]];
9249
 
9250
      *load_offset = unsorted_offsets[order[0]];
9251
    }
9252
 
9253
  if (unsorted_offsets[order[0]] == 0)
9254
    return 1; /* stmia */
9255
 
9256
  if (unsorted_offsets[order[0]] == 4)
9257
    return 2; /* stmib */
9258
 
9259
  if (unsorted_offsets[order[nops - 1]] == 0)
9260
    return 3; /* stmda */
9261
 
9262
  if (unsorted_offsets[order[nops - 1]] == -4)
9263
    return 4; /* stmdb */
9264
 
9265
  return 0;
9266
}
9267
 
9268
const char *
9269
emit_stm_seq (rtx *operands, int nops)
9270
{
9271
  int regs[4];
9272
  int base_reg;
9273
  HOST_WIDE_INT offset;
9274
  char buf[100];
9275
  int i;
9276
 
9277
  switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
9278
    {
9279
    case 1:
9280
      strcpy (buf, "stm%(ia%)\t");
9281
      break;
9282
 
9283
    case 2:
9284
      strcpy (buf, "stm%(ib%)\t");
9285
      break;
9286
 
9287
    case 3:
9288
      strcpy (buf, "stm%(da%)\t");
9289
      break;
9290
 
9291
    case 4:
9292
      strcpy (buf, "stm%(db%)\t");
9293
      break;
9294
 
9295
    default:
9296
      gcc_unreachable ();
9297
    }
9298
 
9299
  sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
9300
           reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
9301
 
9302
  for (i = 1; i < nops; i++)
9303
    sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
9304
             reg_names[regs[i]]);
9305
 
9306
  strcat (buf, "}\t%@ phole stm");
9307
 
9308
  output_asm_insn (buf, operands);
9309
  return "";
9310
}
9311
 
9312
/* Routines for use in generating RTL.  */
9313
 
9314
rtx
9315
arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
9316
                       int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
9317
{
9318
  HOST_WIDE_INT offset = *offsetp;
9319
  int i = 0, j;
9320
  rtx result;
9321
  int sign = up ? 1 : -1;
9322
  rtx mem, addr;
9323
 
9324
  /* XScale has load-store double instructions, but they have stricter
9325
     alignment requirements than load-store multiple, so we cannot
9326
     use them.
9327
 
9328
     For XScale ldm requires 2 + NREGS cycles to complete and blocks
9329
     the pipeline until completion.
9330
 
9331
        NREGS           CYCLES
9332
          1               3
9333
          2               4
9334
          3               5
9335
          4               6
9336
 
9337
     An ldr instruction takes 1-3 cycles, but does not block the
9338
     pipeline.
9339
 
9340
        NREGS           CYCLES
9341
          1              1-3
9342
          2              2-6
9343
          3              3-9
9344
          4              4-12
9345
 
9346
     Best case ldr will always win.  However, the more ldr instructions
9347
     we issue, the less likely we are to be able to schedule them well.
9348
     Using ldr instructions also increases code size.
9349
 
9350
     As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
9351
     for counts of 3 or 4 regs.  */
9352
  if (arm_tune_xscale && count <= 2 && ! optimize_size)
9353
    {
9354
      rtx seq;
9355
 
9356
      start_sequence ();
9357
 
9358
      for (i = 0; i < count; i++)
9359
        {
9360
          addr = plus_constant (from, i * 4 * sign);
9361
          mem = adjust_automodify_address (basemem, SImode, addr, offset);
9362
          emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
9363
          offset += 4 * sign;
9364
        }
9365
 
9366
      if (write_back)
9367
        {
9368
          emit_move_insn (from, plus_constant (from, count * 4 * sign));
9369
          *offsetp = offset;
9370
        }
9371
 
9372
      seq = get_insns ();
9373
      end_sequence ();
9374
 
9375
      return seq;
9376
    }
9377
 
9378
  result = gen_rtx_PARALLEL (VOIDmode,
9379
                             rtvec_alloc (count + (write_back ? 1 : 0)));
9380
  if (write_back)
9381
    {
9382
      XVECEXP (result, 0, 0)
9383
        = gen_rtx_SET (VOIDmode, from, plus_constant (from, count * 4 * sign));
9384
      i = 1;
9385
      count++;
9386
    }
9387
 
9388
  for (j = 0; i < count; i++, j++)
9389
    {
9390
      addr = plus_constant (from, j * 4 * sign);
9391
      mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
9392
      XVECEXP (result, 0, i)
9393
        = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
9394
      offset += 4 * sign;
9395
    }
9396
 
9397
  if (write_back)
9398
    *offsetp = offset;
9399
 
9400
  return result;
9401
}
9402
 
9403
rtx
9404
arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
9405
                        int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
9406
{
9407
  HOST_WIDE_INT offset = *offsetp;
9408
  int i = 0, j;
9409
  rtx result;
9410
  int sign = up ? 1 : -1;
9411
  rtx mem, addr;
9412
 
9413
  /* See arm_gen_load_multiple for discussion of
9414
     the pros/cons of ldm/stm usage for XScale.  */
9415
  if (arm_tune_xscale && count <= 2 && ! optimize_size)
9416
    {
9417
      rtx seq;
9418
 
9419
      start_sequence ();
9420
 
9421
      for (i = 0; i < count; i++)
9422
        {
9423
          addr = plus_constant (to, i * 4 * sign);
9424
          mem = adjust_automodify_address (basemem, SImode, addr, offset);
9425
          emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
9426
          offset += 4 * sign;
9427
        }
9428
 
9429
      if (write_back)
9430
        {
9431
          emit_move_insn (to, plus_constant (to, count * 4 * sign));
9432
          *offsetp = offset;
9433
        }
9434
 
9435
      seq = get_insns ();
9436
      end_sequence ();
9437
 
9438
      return seq;
9439
    }
9440
 
9441
  result = gen_rtx_PARALLEL (VOIDmode,
9442
                             rtvec_alloc (count + (write_back ? 1 : 0)));
9443
  if (write_back)
9444
    {
9445
      XVECEXP (result, 0, 0)
9446
        = gen_rtx_SET (VOIDmode, to,
9447
                       plus_constant (to, count * 4 * sign));
9448
      i = 1;
9449
      count++;
9450
    }
9451
 
9452
  for (j = 0; i < count; i++, j++)
9453
    {
9454
      addr = plus_constant (to, j * 4 * sign);
9455
      mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
9456
      XVECEXP (result, 0, i)
9457
        = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
9458
      offset += 4 * sign;
9459
    }
9460
 
9461
  if (write_back)
9462
    *offsetp = offset;
9463
 
9464
  return result;
9465
}
9466
 
9467
int
9468
arm_gen_movmemqi (rtx *operands)
9469
{
9470
  HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
9471
  HOST_WIDE_INT srcoffset, dstoffset;
9472
  int i;
9473
  rtx src, dst, srcbase, dstbase;
9474
  rtx part_bytes_reg = NULL;
9475
  rtx mem;
9476
 
9477
  if (GET_CODE (operands[2]) != CONST_INT
9478
      || GET_CODE (operands[3]) != CONST_INT
9479
      || INTVAL (operands[2]) > 64
9480
      || INTVAL (operands[3]) & 3)
9481
    return 0;
9482
 
9483
  dstbase = operands[0];
9484
  srcbase = operands[1];
9485
 
9486
  dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
9487
  src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
9488
 
9489
  in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
9490
  out_words_to_go = INTVAL (operands[2]) / 4;
9491
  last_bytes = INTVAL (operands[2]) & 3;
9492
  dstoffset = srcoffset = 0;
9493
 
9494
  if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
9495
    part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
9496
 
9497
  for (i = 0; in_words_to_go >= 2; i+=4)
9498
    {
9499
      if (in_words_to_go > 4)
9500
        emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
9501
                                          srcbase, &srcoffset));
9502
      else
9503
        emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
9504
                                          FALSE, srcbase, &srcoffset));
9505
 
9506
      if (out_words_to_go)
9507
        {
9508
          if (out_words_to_go > 4)
9509
            emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
9510
                                               dstbase, &dstoffset));
9511
          else if (out_words_to_go != 1)
9512
            emit_insn (arm_gen_store_multiple (0, out_words_to_go,
9513
                                               dst, TRUE,
9514
                                               (last_bytes == 0
9515
                                                ? FALSE : TRUE),
9516
                                               dstbase, &dstoffset));
9517
          else
9518
            {
9519
              mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
9520
              emit_move_insn (mem, gen_rtx_REG (SImode, 0));
9521
              if (last_bytes != 0)
9522
                {
9523
                  emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
9524
                  dstoffset += 4;
9525
                }
9526
            }
9527
        }
9528
 
9529
      in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
9530
      out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
9531
    }
9532
 
9533
  /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do.  */
9534
  if (out_words_to_go)
9535
    {
9536
      rtx sreg;
9537
 
9538
      mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
9539
      sreg = copy_to_reg (mem);
9540
 
9541
      mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
9542
      emit_move_insn (mem, sreg);
9543
      in_words_to_go--;
9544
 
9545
      gcc_assert (!in_words_to_go);     /* Sanity check */
9546
    }
9547
 
9548
  if (in_words_to_go)
9549
    {
9550
      gcc_assert (in_words_to_go > 0);
9551
 
9552
      mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
9553
      part_bytes_reg = copy_to_mode_reg (SImode, mem);
9554
    }
9555
 
9556
  gcc_assert (!last_bytes || part_bytes_reg);
9557
 
9558
  if (BYTES_BIG_ENDIAN && last_bytes)
9559
    {
9560
      rtx tmp = gen_reg_rtx (SImode);
9561
 
9562
      /* The bytes we want are in the top end of the word.  */
9563
      emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
9564
                              GEN_INT (8 * (4 - last_bytes))));
9565
      part_bytes_reg = tmp;
9566
 
9567
      while (last_bytes)
9568
        {
9569
          mem = adjust_automodify_address (dstbase, QImode,
9570
                                           plus_constant (dst, last_bytes - 1),
9571
                                           dstoffset + last_bytes - 1);
9572
          emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
9573
 
9574
          if (--last_bytes)
9575
            {
9576
              tmp = gen_reg_rtx (SImode);
9577
              emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
9578
              part_bytes_reg = tmp;
9579
            }
9580
        }
9581
 
9582
    }
9583
  else
9584
    {
9585
      if (last_bytes > 1)
9586
        {
9587
          mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
9588
          emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
9589
          last_bytes -= 2;
9590
          if (last_bytes)
9591
            {
9592
              rtx tmp = gen_reg_rtx (SImode);
9593
              emit_insn (gen_addsi3 (dst, dst, const2_rtx));
9594
              emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
9595
              part_bytes_reg = tmp;
9596
              dstoffset += 2;
9597
            }
9598
        }
9599
 
9600
      if (last_bytes)
9601
        {
9602
          mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
9603
          emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
9604
        }
9605
    }
9606
 
9607
  return 1;
9608
}
9609
 
9610
/* Select a dominance comparison mode if possible for a test of the general
9611
   form (OP (COND_OR (X) (Y)) (const_int 0)).  We support three forms.
9612
   COND_OR == DOM_CC_X_AND_Y => (X && Y)
9613
   COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
9614
   COND_OR == DOM_CC_X_OR_Y => (X || Y)
9615
   In all cases OP will be either EQ or NE, but we don't need to know which
9616
   here.  If we are unable to support a dominance comparison we return
9617
   CC mode.  This will then fail to match for the RTL expressions that
9618
   generate this call.  */
9619
enum machine_mode
9620
arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
9621
{
9622
  enum rtx_code cond1, cond2;
9623
  int swapped = 0;
9624
 
9625
  /* Currently we will probably get the wrong result if the individual
9626
     comparisons are not simple.  This also ensures that it is safe to
9627
     reverse a comparison if necessary.  */
9628
  if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
9629
       != CCmode)
9630
      || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
9631
          != CCmode))
9632
    return CCmode;
9633
 
9634
  /* The if_then_else variant of this tests the second condition if the
9635
     first passes, but is true if the first fails.  Reverse the first
9636
     condition to get a true "inclusive-or" expression.  */
9637
  if (cond_or == DOM_CC_NX_OR_Y)
9638
    cond1 = reverse_condition (cond1);
9639
 
9640
  /* If the comparisons are not equal, and one doesn't dominate the other,
9641
     then we can't do this.  */
9642
  if (cond1 != cond2
9643
      && !comparison_dominates_p (cond1, cond2)
9644
      && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
9645
    return CCmode;
9646
 
9647
  if (swapped)
9648
    {
9649
      enum rtx_code temp = cond1;
9650
      cond1 = cond2;
9651
      cond2 = temp;
9652
    }
9653
 
9654
  switch (cond1)
9655
    {
9656
    case EQ:
9657
      if (cond_or == DOM_CC_X_AND_Y)
9658
        return CC_DEQmode;
9659
 
9660
      switch (cond2)
9661
        {
9662
        case EQ: return CC_DEQmode;
9663
        case LE: return CC_DLEmode;
9664
        case LEU: return CC_DLEUmode;
9665
        case GE: return CC_DGEmode;
9666
        case GEU: return CC_DGEUmode;
9667
        default: gcc_unreachable ();
9668
        }
9669
 
9670
    case LT:
9671
      if (cond_or == DOM_CC_X_AND_Y)
9672
        return CC_DLTmode;
9673
 
9674
      switch (cond2)
9675
        {
9676
        case  LT:
9677
            return CC_DLTmode;
9678
        case LE:
9679
          return CC_DLEmode;
9680
        case NE:
9681
          return CC_DNEmode;
9682
        default:
9683
          gcc_unreachable ();
9684
        }
9685
 
9686
    case GT:
9687
      if (cond_or == DOM_CC_X_AND_Y)
9688
        return CC_DGTmode;
9689
 
9690
      switch (cond2)
9691
        {
9692
        case GT:
9693
          return CC_DGTmode;
9694
        case GE:
9695
          return CC_DGEmode;
9696
        case NE:
9697
          return CC_DNEmode;
9698
        default:
9699
          gcc_unreachable ();
9700
        }
9701
 
9702
    case LTU:
9703
      if (cond_or == DOM_CC_X_AND_Y)
9704
        return CC_DLTUmode;
9705
 
9706
      switch (cond2)
9707
        {
9708
        case LTU:
9709
          return CC_DLTUmode;
9710
        case LEU:
9711
          return CC_DLEUmode;
9712
        case NE:
9713
          return CC_DNEmode;
9714
        default:
9715
          gcc_unreachable ();
9716
        }
9717
 
9718
    case GTU:
9719
      if (cond_or == DOM_CC_X_AND_Y)
9720
        return CC_DGTUmode;
9721
 
9722
      switch (cond2)
9723
        {
9724
        case GTU:
9725
          return CC_DGTUmode;
9726
        case GEU:
9727
          return CC_DGEUmode;
9728
        case NE:
9729
          return CC_DNEmode;
9730
        default:
9731
          gcc_unreachable ();
9732
        }
9733
 
9734
    /* The remaining cases only occur when both comparisons are the
9735
       same.  */
9736
    case NE:
9737
      gcc_assert (cond1 == cond2);
9738
      return CC_DNEmode;
9739
 
9740
    case LE:
9741
      gcc_assert (cond1 == cond2);
9742
      return CC_DLEmode;
9743
 
9744
    case GE:
9745
      gcc_assert (cond1 == cond2);
9746
      return CC_DGEmode;
9747
 
9748
    case LEU:
9749
      gcc_assert (cond1 == cond2);
9750
      return CC_DLEUmode;
9751
 
9752
    case GEU:
9753
      gcc_assert (cond1 == cond2);
9754
      return CC_DGEUmode;
9755
 
9756
    default:
9757
      gcc_unreachable ();
9758
    }
9759
}
9760
 
9761
enum machine_mode
9762
arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
9763
{
9764
  /* All floating point compares return CCFP if it is an equality
9765
     comparison, and CCFPE otherwise.  */
9766
  if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
9767
    {
9768
      switch (op)
9769
        {
9770
        case EQ:
9771
        case NE:
9772
        case UNORDERED:
9773
        case ORDERED:
9774
        case UNLT:
9775
        case UNLE:
9776
        case UNGT:
9777
        case UNGE:
9778
        case UNEQ:
9779
        case LTGT:
9780
          return CCFPmode;
9781
 
9782
        case LT:
9783
        case LE:
9784
        case GT:
9785
        case GE:
9786
          if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
9787
            return CCFPmode;
9788
          return CCFPEmode;
9789
 
9790
        default:
9791
          gcc_unreachable ();
9792
        }
9793
    }
9794
 
9795
  /* A compare with a shifted operand.  Because of canonicalization, the
9796
     comparison will have to be swapped when we emit the assembler.  */
9797
  if (GET_MODE (y) == SImode
9798
      && (REG_P (y) || (GET_CODE (y) == SUBREG))
9799
      && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
9800
          || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
9801
          || GET_CODE (x) == ROTATERT))
9802
    return CC_SWPmode;
9803
 
9804
  /* This operation is performed swapped, but since we only rely on the Z
9805
     flag we don't need an additional mode.  */
9806
  if (GET_MODE (y) == SImode
9807
      && (REG_P (y) || (GET_CODE (y) == SUBREG))
9808
      && GET_CODE (x) == NEG
9809
      && (op == EQ || op == NE))
9810
    return CC_Zmode;
9811
 
9812
  /* This is a special case that is used by combine to allow a
9813
     comparison of a shifted byte load to be split into a zero-extend
9814
     followed by a comparison of the shifted integer (only valid for
9815
     equalities and unsigned inequalities).  */
9816
  if (GET_MODE (x) == SImode
9817
      && GET_CODE (x) == ASHIFT
9818
      && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
9819
      && GET_CODE (XEXP (x, 0)) == SUBREG
9820
      && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
9821
      && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
9822
      && (op == EQ || op == NE
9823
          || op == GEU || op == GTU || op == LTU || op == LEU)
9824
      && GET_CODE (y) == CONST_INT)
9825
    return CC_Zmode;
9826
 
9827
  /* A construct for a conditional compare, if the false arm contains
9828
     0, then both conditions must be true, otherwise either condition
9829
     must be true.  Not all conditions are possible, so CCmode is
9830
     returned if it can't be done.  */
9831
  if (GET_CODE (x) == IF_THEN_ELSE
9832
      && (XEXP (x, 2) == const0_rtx
9833
          || XEXP (x, 2) == const1_rtx)
9834
      && COMPARISON_P (XEXP (x, 0))
9835
      && COMPARISON_P (XEXP (x, 1)))
9836
    return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
9837
                                         INTVAL (XEXP (x, 2)));
9838
 
9839
  /* Alternate canonicalizations of the above.  These are somewhat cleaner.  */
9840
  if (GET_CODE (x) == AND
9841
      && COMPARISON_P (XEXP (x, 0))
9842
      && COMPARISON_P (XEXP (x, 1)))
9843
    return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
9844
                                         DOM_CC_X_AND_Y);
9845
 
9846
  if (GET_CODE (x) == IOR
9847
      && COMPARISON_P (XEXP (x, 0))
9848
      && COMPARISON_P (XEXP (x, 1)))
9849
    return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
9850
                                         DOM_CC_X_OR_Y);
9851
 
9852
  /* An operation (on Thumb) where we want to test for a single bit.
9853
     This is done by shifting that bit up into the top bit of a
9854
     scratch register; we can then branch on the sign bit.  */
9855
  if (TARGET_THUMB1
9856
      && GET_MODE (x) == SImode
9857
      && (op == EQ || op == NE)
9858
      && GET_CODE (x) == ZERO_EXTRACT
9859
      && XEXP (x, 1) == const1_rtx)
9860
    return CC_Nmode;
9861
 
9862
  /* An operation that sets the condition codes as a side-effect, the
9863
     V flag is not set correctly, so we can only use comparisons where
9864
     this doesn't matter.  (For LT and GE we can use "mi" and "pl"
9865
     instead.)  */
9866
  /* ??? Does the ZERO_EXTRACT case really apply to thumb2?  */
9867
  if (GET_MODE (x) == SImode
9868
      && y == const0_rtx
9869
      && (op == EQ || op == NE || op == LT || op == GE)
9870
      && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
9871
          || GET_CODE (x) == AND || GET_CODE (x) == IOR
9872
          || GET_CODE (x) == XOR || GET_CODE (x) == MULT
9873
          || GET_CODE (x) == NOT || GET_CODE (x) == NEG
9874
          || GET_CODE (x) == LSHIFTRT
9875
          || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
9876
          || GET_CODE (x) == ROTATERT
9877
          || (TARGET_32BIT && GET_CODE (x) == ZERO_EXTRACT)))
9878
    return CC_NOOVmode;
9879
 
9880
  if (GET_MODE (x) == QImode && (op == EQ || op == NE))
9881
    return CC_Zmode;
9882
 
9883
  if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
9884
      && GET_CODE (x) == PLUS
9885
      && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
9886
    return CC_Cmode;
9887
 
9888
  return CCmode;
9889
}
9890
 
9891
/* X and Y are two things to compare using CODE.  Emit the compare insn and
9892
   return the rtx for register 0 in the proper mode.  FP means this is a
9893
   floating point compare: I don't think that it is needed on the arm.  */
9894
rtx
9895
arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
9896
{
9897
  enum machine_mode mode = SELECT_CC_MODE (code, x, y);
9898
  rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
9899
 
9900
  emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
9901
 
9902
  return cc_reg;
9903
}
9904
 
9905
/* Generate a sequence of insns that will generate the correct return
9906
   address mask depending on the physical architecture that the program
9907
   is running on.  */
9908
rtx
9909
arm_gen_return_addr_mask (void)
9910
{
9911
  rtx reg = gen_reg_rtx (Pmode);
9912
 
9913
  emit_insn (gen_return_addr_mask (reg));
9914
  return reg;
9915
}
9916
 
9917
void
9918
arm_reload_in_hi (rtx *operands)
9919
{
9920
  rtx ref = operands[1];
9921
  rtx base, scratch;
9922
  HOST_WIDE_INT offset = 0;
9923
 
9924
  if (GET_CODE (ref) == SUBREG)
9925
    {
9926
      offset = SUBREG_BYTE (ref);
9927
      ref = SUBREG_REG (ref);
9928
    }
9929
 
9930
  if (GET_CODE (ref) == REG)
9931
    {
9932
      /* We have a pseudo which has been spilt onto the stack; there
9933
         are two cases here: the first where there is a simple
9934
         stack-slot replacement and a second where the stack-slot is
9935
         out of range, or is used as a subreg.  */
9936
      if (reg_equiv_mem[REGNO (ref)])
9937
        {
9938
          ref = reg_equiv_mem[REGNO (ref)];
9939
          base = find_replacement (&XEXP (ref, 0));
9940
        }
9941
      else
9942
        /* The slot is out of range, or was dressed up in a SUBREG.  */
9943
        base = reg_equiv_address[REGNO (ref)];
9944
    }
9945
  else
9946
    base = find_replacement (&XEXP (ref, 0));
9947
 
9948
  /* Handle the case where the address is too complex to be offset by 1.  */
9949
  if (GET_CODE (base) == MINUS
9950
      || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
9951
    {
9952
      rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
9953
 
9954
      emit_set_insn (base_plus, base);
9955
      base = base_plus;
9956
    }
9957
  else if (GET_CODE (base) == PLUS)
9958
    {
9959
      /* The addend must be CONST_INT, or we would have dealt with it above.  */
9960
      HOST_WIDE_INT hi, lo;
9961
 
9962
      offset += INTVAL (XEXP (base, 1));
9963
      base = XEXP (base, 0);
9964
 
9965
      /* Rework the address into a legal sequence of insns.  */
9966
      /* Valid range for lo is -4095 -> 4095 */
9967
      lo = (offset >= 0
9968
            ? (offset & 0xfff)
9969
            : -((-offset) & 0xfff));
9970
 
9971
      /* Corner case, if lo is the max offset then we would be out of range
9972
         once we have added the additional 1 below, so bump the msb into the
9973
         pre-loading insn(s).  */
9974
      if (lo == 4095)
9975
        lo &= 0x7ff;
9976
 
9977
      hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
9978
             ^ (HOST_WIDE_INT) 0x80000000)
9979
            - (HOST_WIDE_INT) 0x80000000);
9980
 
9981
      gcc_assert (hi + lo == offset);
9982
 
9983
      if (hi != 0)
9984
        {
9985
          rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
9986
 
9987
          /* Get the base address; addsi3 knows how to handle constants
9988
             that require more than one insn.  */
9989
          emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
9990
          base = base_plus;
9991
          offset = lo;
9992
        }
9993
    }
9994
 
9995
  /* Operands[2] may overlap operands[0] (though it won't overlap
9996
     operands[1]), that's why we asked for a DImode reg -- so we can
9997
     use the bit that does not overlap.  */
9998
  if (REGNO (operands[2]) == REGNO (operands[0]))
9999
    scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
10000
  else
10001
    scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
10002
 
10003
  emit_insn (gen_zero_extendqisi2 (scratch,
10004
                                   gen_rtx_MEM (QImode,
10005
                                                plus_constant (base,
10006
                                                               offset))));
10007
  emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
10008
                                   gen_rtx_MEM (QImode,
10009
                                                plus_constant (base,
10010
                                                               offset + 1))));
10011
  if (!BYTES_BIG_ENDIAN)
10012
    emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
10013
                   gen_rtx_IOR (SImode,
10014
                                gen_rtx_ASHIFT
10015
                                (SImode,
10016
                                 gen_rtx_SUBREG (SImode, operands[0], 0),
10017
                                 GEN_INT (8)),
10018
                                scratch));
10019
  else
10020
    emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
10021
                   gen_rtx_IOR (SImode,
10022
                                gen_rtx_ASHIFT (SImode, scratch,
10023
                                                GEN_INT (8)),
10024
                                gen_rtx_SUBREG (SImode, operands[0], 0)));
10025
}
10026
 
10027
/* Handle storing a half-word to memory during reload by synthesizing as two
10028
   byte stores.  Take care not to clobber the input values until after we
10029
   have moved them somewhere safe.  This code assumes that if the DImode
10030
   scratch in operands[2] overlaps either the input value or output address
10031
   in some way, then that value must die in this insn (we absolutely need
10032
   two scratch registers for some corner cases).  */
10033
void
10034
arm_reload_out_hi (rtx *operands)
10035
{
10036
  rtx ref = operands[0];
10037
  rtx outval = operands[1];
10038
  rtx base, scratch;
10039
  HOST_WIDE_INT offset = 0;
10040
 
10041
  if (GET_CODE (ref) == SUBREG)
10042
    {
10043
      offset = SUBREG_BYTE (ref);
10044
      ref = SUBREG_REG (ref);
10045
    }
10046
 
10047
  if (GET_CODE (ref) == REG)
10048
    {
10049
      /* We have a pseudo which has been spilt onto the stack; there
10050
         are two cases here: the first where there is a simple
10051
         stack-slot replacement and a second where the stack-slot is
10052
         out of range, or is used as a subreg.  */
10053
      if (reg_equiv_mem[REGNO (ref)])
10054
        {
10055
          ref = reg_equiv_mem[REGNO (ref)];
10056
          base = find_replacement (&XEXP (ref, 0));
10057
        }
10058
      else
10059
        /* The slot is out of range, or was dressed up in a SUBREG.  */
10060
        base = reg_equiv_address[REGNO (ref)];
10061
    }
10062
  else
10063
    base = find_replacement (&XEXP (ref, 0));
10064
 
10065
  scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
10066
 
10067
  /* Handle the case where the address is too complex to be offset by 1.  */
10068
  if (GET_CODE (base) == MINUS
10069
      || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
10070
    {
10071
      rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
10072
 
10073
      /* Be careful not to destroy OUTVAL.  */
10074
      if (reg_overlap_mentioned_p (base_plus, outval))
10075
        {
10076
          /* Updating base_plus might destroy outval, see if we can
10077
             swap the scratch and base_plus.  */
10078
          if (!reg_overlap_mentioned_p (scratch, outval))
10079
            {
10080
              rtx tmp = scratch;
10081
              scratch = base_plus;
10082
              base_plus = tmp;
10083
            }
10084
          else
10085
            {
10086
              rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
10087
 
10088
              /* Be conservative and copy OUTVAL into the scratch now,
10089
                 this should only be necessary if outval is a subreg
10090
                 of something larger than a word.  */
10091
              /* XXX Might this clobber base?  I can't see how it can,
10092
                 since scratch is known to overlap with OUTVAL, and
10093
                 must be wider than a word.  */
10094
              emit_insn (gen_movhi (scratch_hi, outval));
10095
              outval = scratch_hi;
10096
            }
10097
        }
10098
 
10099
      emit_set_insn (base_plus, base);
10100
      base = base_plus;
10101
    }
10102
  else if (GET_CODE (base) == PLUS)
10103
    {
10104
      /* The addend must be CONST_INT, or we would have dealt with it above.  */
10105
      HOST_WIDE_INT hi, lo;
10106
 
10107
      offset += INTVAL (XEXP (base, 1));
10108
      base = XEXP (base, 0);
10109
 
10110
      /* Rework the address into a legal sequence of insns.  */
10111
      /* Valid range for lo is -4095 -> 4095 */
10112
      lo = (offset >= 0
10113
            ? (offset & 0xfff)
10114
            : -((-offset) & 0xfff));
10115
 
10116
      /* Corner case, if lo is the max offset then we would be out of range
10117
         once we have added the additional 1 below, so bump the msb into the
10118
         pre-loading insn(s).  */
10119
      if (lo == 4095)
10120
        lo &= 0x7ff;
10121
 
10122
      hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
10123
             ^ (HOST_WIDE_INT) 0x80000000)
10124
            - (HOST_WIDE_INT) 0x80000000);
10125
 
10126
      gcc_assert (hi + lo == offset);
10127
 
10128
      if (hi != 0)
10129
        {
10130
          rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
10131
 
10132
          /* Be careful not to destroy OUTVAL.  */
10133
          if (reg_overlap_mentioned_p (base_plus, outval))
10134
            {
10135
              /* Updating base_plus might destroy outval, see if we
10136
                 can swap the scratch and base_plus.  */
10137
              if (!reg_overlap_mentioned_p (scratch, outval))
10138
                {
10139
                  rtx tmp = scratch;
10140
                  scratch = base_plus;
10141
                  base_plus = tmp;
10142
                }
10143
              else
10144
                {
10145
                  rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
10146
 
10147
                  /* Be conservative and copy outval into scratch now,
10148
                     this should only be necessary if outval is a
10149
                     subreg of something larger than a word.  */
10150
                  /* XXX Might this clobber base?  I can't see how it
10151
                     can, since scratch is known to overlap with
10152
                     outval.  */
10153
                  emit_insn (gen_movhi (scratch_hi, outval));
10154
                  outval = scratch_hi;
10155
                }
10156
            }
10157
 
10158
          /* Get the base address; addsi3 knows how to handle constants
10159
             that require more than one insn.  */
10160
          emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
10161
          base = base_plus;
10162
          offset = lo;
10163
        }
10164
    }
10165
 
10166
  if (BYTES_BIG_ENDIAN)
10167
    {
10168
      emit_insn (gen_movqi (gen_rtx_MEM (QImode,
10169
                                         plus_constant (base, offset + 1)),
10170
                            gen_lowpart (QImode, outval)));
10171
      emit_insn (gen_lshrsi3 (scratch,
10172
                              gen_rtx_SUBREG (SImode, outval, 0),
10173
                              GEN_INT (8)));
10174
      emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
10175
                            gen_lowpart (QImode, scratch)));
10176
    }
10177
  else
10178
    {
10179
      emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
10180
                            gen_lowpart (QImode, outval)));
10181
      emit_insn (gen_lshrsi3 (scratch,
10182
                              gen_rtx_SUBREG (SImode, outval, 0),
10183
                              GEN_INT (8)));
10184
      emit_insn (gen_movqi (gen_rtx_MEM (QImode,
10185
                                         plus_constant (base, offset + 1)),
10186
                            gen_lowpart (QImode, scratch)));
10187
    }
10188
}
10189
 
10190
/* Return true if a type must be passed in memory. For AAPCS, small aggregates
10191
   (padded to the size of a word) should be passed in a register.  */
10192
 
10193
static bool
10194
arm_must_pass_in_stack (enum machine_mode mode, const_tree type)
10195
{
10196
  if (TARGET_AAPCS_BASED)
10197
    return must_pass_in_stack_var_size (mode, type);
10198
  else
10199
    return must_pass_in_stack_var_size_or_pad (mode, type);
10200
}
10201
 
10202
 
10203
/* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
10204
   Return true if an argument passed on the stack should be padded upwards,
10205
   i.e. if the least-significant byte has useful data.
10206
   For legacy APCS ABIs we use the default.  For AAPCS based ABIs small
10207
   aggregate types are placed in the lowest memory address.  */
10208
 
10209
bool
10210
arm_pad_arg_upward (enum machine_mode mode, const_tree type)
10211
{
10212
  if (!TARGET_AAPCS_BASED)
10213
    return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
10214
 
10215
  if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
10216
    return false;
10217
 
10218
  return true;
10219
}
10220
 
10221
 
10222
/* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
10223
   For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
10224
   byte of the register has useful data, and return the opposite if the
10225
   most significant byte does.
10226
   For AAPCS, small aggregates and small complex types are always padded
10227
   upwards.  */
10228
 
10229
bool
10230
arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
10231
                    tree type, int first ATTRIBUTE_UNUSED)
10232
{
10233
  if (TARGET_AAPCS_BASED
10234
      && BYTES_BIG_ENDIAN
10235
      && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
10236
      && int_size_in_bytes (type) <= 4)
10237
    return true;
10238
 
10239
  /* Otherwise, use default padding.  */
10240
  return !BYTES_BIG_ENDIAN;
10241
}
10242
 
10243
 
10244
/* Print a symbolic form of X to the debug file, F.  */
10245
static void
10246
arm_print_value (FILE *f, rtx x)
10247
{
10248
  switch (GET_CODE (x))
10249
    {
10250
    case CONST_INT:
10251
      fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
10252
      return;
10253
 
10254
    case CONST_DOUBLE:
10255
      fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
10256
      return;
10257
 
10258
    case CONST_VECTOR:
10259
      {
10260
        int i;
10261
 
10262
        fprintf (f, "<");
10263
        for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
10264
          {
10265
            fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
10266
            if (i < (CONST_VECTOR_NUNITS (x) - 1))
10267
              fputc (',', f);
10268
          }
10269
        fprintf (f, ">");
10270
      }
10271
      return;
10272
 
10273
    case CONST_STRING:
10274
      fprintf (f, "\"%s\"", XSTR (x, 0));
10275
      return;
10276
 
10277
    case SYMBOL_REF:
10278
      fprintf (f, "`%s'", XSTR (x, 0));
10279
      return;
10280
 
10281
    case LABEL_REF:
10282
      fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
10283
      return;
10284
 
10285
    case CONST:
10286
      arm_print_value (f, XEXP (x, 0));
10287
      return;
10288
 
10289
    case PLUS:
10290
      arm_print_value (f, XEXP (x, 0));
10291
      fprintf (f, "+");
10292
      arm_print_value (f, XEXP (x, 1));
10293
      return;
10294
 
10295
    case PC:
10296
      fprintf (f, "pc");
10297
      return;
10298
 
10299
    default:
10300
      fprintf (f, "????");
10301
      return;
10302
    }
10303
}
10304
 
10305
/* Routines for manipulation of the constant pool.  */
10306
 
10307
/* Arm instructions cannot load a large constant directly into a
10308
   register; they have to come from a pc relative load.  The constant
10309
   must therefore be placed in the addressable range of the pc
10310
   relative load.  Depending on the precise pc relative load
10311
   instruction the range is somewhere between 256 bytes and 4k.  This
10312
   means that we often have to dump a constant inside a function, and
10313
   generate code to branch around it.
10314
 
10315
   It is important to minimize this, since the branches will slow
10316
   things down and make the code larger.
10317
 
10318
   Normally we can hide the table after an existing unconditional
10319
   branch so that there is no interruption of the flow, but in the
10320
   worst case the code looks like this:
10321
 
10322
        ldr     rn, L1
10323
        ...
10324
        b       L2
10325
        align
10326
        L1:     .long value
10327
        L2:
10328
        ...
10329
 
10330
        ldr     rn, L3
10331
        ...
10332
        b       L4
10333
        align
10334
        L3:     .long value
10335
        L4:
10336
        ...
10337
 
10338
   We fix this by performing a scan after scheduling, which notices
10339
   which instructions need to have their operands fetched from the
10340
   constant table and builds the table.
10341
 
10342
   The algorithm starts by building a table of all the constants that
10343
   need fixing up and all the natural barriers in the function (places
10344
   where a constant table can be dropped without breaking the flow).
10345
   For each fixup we note how far the pc-relative replacement will be
10346
   able to reach and the offset of the instruction into the function.
10347
 
10348
   Having built the table we then group the fixes together to form
10349
   tables that are as large as possible (subject to addressing
10350
   constraints) and emit each table of constants after the last
10351
   barrier that is within range of all the instructions in the group.
10352
   If a group does not contain a barrier, then we forcibly create one
10353
   by inserting a jump instruction into the flow.  Once the table has
10354
   been inserted, the insns are then modified to reference the
10355
   relevant entry in the pool.
10356
 
10357
   Possible enhancements to the algorithm (not implemented) are:
10358
 
10359
   1) For some processors and object formats, there may be benefit in
10360
   aligning the pools to the start of cache lines; this alignment
10361
   would need to be taken into account when calculating addressability
10362
   of a pool.  */
10363
 
10364
/* These typedefs are located at the start of this file, so that
10365
   they can be used in the prototypes there.  This comment is to
10366
   remind readers of that fact so that the following structures
10367
   can be understood more easily.
10368
 
10369
     typedef struct minipool_node    Mnode;
10370
     typedef struct minipool_fixup   Mfix;  */
10371
 
10372
struct minipool_node
10373
{
10374
  /* Doubly linked chain of entries.  */
10375
  Mnode * next;
10376
  Mnode * prev;
10377
  /* The maximum offset into the code that this entry can be placed.  While
10378
     pushing fixes for forward references, all entries are sorted in order
10379
     of increasing max_address.  */
10380
  HOST_WIDE_INT max_address;
10381
  /* Similarly for an entry inserted for a backwards ref.  */
10382
  HOST_WIDE_INT min_address;
10383
  /* The number of fixes referencing this entry.  This can become zero
10384
     if we "unpush" an entry.  In this case we ignore the entry when we
10385
     come to emit the code.  */
10386
  int refcount;
10387
  /* The offset from the start of the minipool.  */
10388
  HOST_WIDE_INT offset;
10389
  /* The value in table.  */
10390
  rtx value;
10391
  /* The mode of value.  */
10392
  enum machine_mode mode;
10393
  /* The size of the value.  With iWMMXt enabled
10394
     sizes > 4 also imply an alignment of 8-bytes.  */
10395
  int fix_size;
10396
};
10397
 
10398
struct minipool_fixup
10399
{
10400
  Mfix *            next;
10401
  rtx               insn;
10402
  HOST_WIDE_INT     address;
10403
  rtx *             loc;
10404
  enum machine_mode mode;
10405
  int               fix_size;
10406
  rtx               value;
10407
  Mnode *           minipool;
10408
  HOST_WIDE_INT     forwards;
10409
  HOST_WIDE_INT     backwards;
10410
};
10411
 
10412
/* Fixes less than a word need padding out to a word boundary.  */
10413
#define MINIPOOL_FIX_SIZE(mode) \
10414
  (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
10415
 
10416
static Mnode *  minipool_vector_head;
10417
static Mnode *  minipool_vector_tail;
10418
static rtx      minipool_vector_label;
10419
static int      minipool_pad;
10420
 
10421
/* The linked list of all minipool fixes required for this function.  */
10422
Mfix *          minipool_fix_head;
10423
Mfix *          minipool_fix_tail;
10424
/* The fix entry for the current minipool, once it has been placed.  */
10425
Mfix *          minipool_barrier;
10426
 
10427
/* Determines if INSN is the start of a jump table.  Returns the end
10428
   of the TABLE or NULL_RTX.  */
10429
static rtx
10430
is_jump_table (rtx insn)
10431
{
10432
  rtx table;
10433
 
10434
  if (GET_CODE (insn) == JUMP_INSN
10435
      && JUMP_LABEL (insn) != NULL
10436
      && ((table = next_real_insn (JUMP_LABEL (insn)))
10437
          == next_real_insn (insn))
10438
      && table != NULL
10439
      && GET_CODE (table) == JUMP_INSN
10440
      && (GET_CODE (PATTERN (table)) == ADDR_VEC
10441
          || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
10442
    return table;
10443
 
10444
  return NULL_RTX;
10445
}
10446
 
10447
#ifndef JUMP_TABLES_IN_TEXT_SECTION
10448
#define JUMP_TABLES_IN_TEXT_SECTION 0
10449
#endif
10450
 
10451
static HOST_WIDE_INT
10452
get_jump_table_size (rtx insn)
10453
{
10454
  /* ADDR_VECs only take room if read-only data does into the text
10455
     section.  */
10456
  if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
10457
    {
10458
      rtx body = PATTERN (insn);
10459
      int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
10460
      HOST_WIDE_INT size;
10461
      HOST_WIDE_INT modesize;
10462
 
10463
      modesize = GET_MODE_SIZE (GET_MODE (body));
10464
      size = modesize * XVECLEN (body, elt);
10465
      switch (modesize)
10466
        {
10467
        case 1:
10468
          /* Round up size  of TBB table to a halfword boundary.  */
10469
          size = (size + 1) & ~(HOST_WIDE_INT)1;
10470
          break;
10471
        case 2:
10472
          /* No padding necessary for TBH.  */
10473
          break;
10474
        case 4:
10475
          /* Add two bytes for alignment on Thumb.  */
10476
          if (TARGET_THUMB)
10477
            size += 2;
10478
          break;
10479
        default:
10480
          gcc_unreachable ();
10481
        }
10482
      return size;
10483
    }
10484
 
10485
  return 0;
10486
}
10487
 
10488
/* Move a minipool fix MP from its current location to before MAX_MP.
10489
   If MAX_MP is NULL, then MP doesn't need moving, but the addressing
10490
   constraints may need updating.  */
10491
static Mnode *
10492
move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
10493
                               HOST_WIDE_INT max_address)
10494
{
10495
  /* The code below assumes these are different.  */
10496
  gcc_assert (mp != max_mp);
10497
 
10498
  if (max_mp == NULL)
10499
    {
10500
      if (max_address < mp->max_address)
10501
        mp->max_address = max_address;
10502
    }
10503
  else
10504
    {
10505
      if (max_address > max_mp->max_address - mp->fix_size)
10506
        mp->max_address = max_mp->max_address - mp->fix_size;
10507
      else
10508
        mp->max_address = max_address;
10509
 
10510
      /* Unlink MP from its current position.  Since max_mp is non-null,
10511
       mp->prev must be non-null.  */
10512
      mp->prev->next = mp->next;
10513
      if (mp->next != NULL)
10514
        mp->next->prev = mp->prev;
10515
      else
10516
        minipool_vector_tail = mp->prev;
10517
 
10518
      /* Re-insert it before MAX_MP.  */
10519
      mp->next = max_mp;
10520
      mp->prev = max_mp->prev;
10521
      max_mp->prev = mp;
10522
 
10523
      if (mp->prev != NULL)
10524
        mp->prev->next = mp;
10525
      else
10526
        minipool_vector_head = mp;
10527
    }
10528
 
10529
  /* Save the new entry.  */
10530
  max_mp = mp;
10531
 
10532
  /* Scan over the preceding entries and adjust their addresses as
10533
     required.  */
10534
  while (mp->prev != NULL
10535
         && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
10536
    {
10537
      mp->prev->max_address = mp->max_address - mp->prev->fix_size;
10538
      mp = mp->prev;
10539
    }
10540
 
10541
  return max_mp;
10542
}
10543
 
10544
/* Add a constant to the minipool for a forward reference.  Returns the
10545
   node added or NULL if the constant will not fit in this pool.  */
10546
static Mnode *
10547
add_minipool_forward_ref (Mfix *fix)
10548
{
10549
  /* If set, max_mp is the first pool_entry that has a lower
10550
     constraint than the one we are trying to add.  */
10551
  Mnode *       max_mp = NULL;
10552
  HOST_WIDE_INT max_address = fix->address + fix->forwards - minipool_pad;
10553
  Mnode *       mp;
10554
 
10555
  /* If the minipool starts before the end of FIX->INSN then this FIX
10556
     can not be placed into the current pool.  Furthermore, adding the
10557
     new constant pool entry may cause the pool to start FIX_SIZE bytes
10558
     earlier.  */
10559
  if (minipool_vector_head &&
10560
      (fix->address + get_attr_length (fix->insn)
10561
       >= minipool_vector_head->max_address - fix->fix_size))
10562
    return NULL;
10563
 
10564
  /* Scan the pool to see if a constant with the same value has
10565
     already been added.  While we are doing this, also note the
10566
     location where we must insert the constant if it doesn't already
10567
     exist.  */
10568
  for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
10569
    {
10570
      if (GET_CODE (fix->value) == GET_CODE (mp->value)
10571
          && fix->mode == mp->mode
10572
          && (GET_CODE (fix->value) != CODE_LABEL
10573
              || (CODE_LABEL_NUMBER (fix->value)
10574
                  == CODE_LABEL_NUMBER (mp->value)))
10575
          && rtx_equal_p (fix->value, mp->value))
10576
        {
10577
          /* More than one fix references this entry.  */
10578
          mp->refcount++;
10579
          return move_minipool_fix_forward_ref (mp, max_mp, max_address);
10580
        }
10581
 
10582
      /* Note the insertion point if necessary.  */
10583
      if (max_mp == NULL
10584
          && mp->max_address > max_address)
10585
        max_mp = mp;
10586
 
10587
      /* If we are inserting an 8-bytes aligned quantity and
10588
         we have not already found an insertion point, then
10589
         make sure that all such 8-byte aligned quantities are
10590
         placed at the start of the pool.  */
10591
      if (ARM_DOUBLEWORD_ALIGN
10592
          && max_mp == NULL
10593
          && fix->fix_size >= 8
10594
          && mp->fix_size < 8)
10595
        {
10596
          max_mp = mp;
10597
          max_address = mp->max_address;
10598
        }
10599
    }
10600
 
10601
  /* The value is not currently in the minipool, so we need to create
10602
     a new entry for it.  If MAX_MP is NULL, the entry will be put on
10603
     the end of the list since the placement is less constrained than
10604
     any existing entry.  Otherwise, we insert the new fix before
10605
     MAX_MP and, if necessary, adjust the constraints on the other
10606
     entries.  */
10607
  mp = XNEW (Mnode);
10608
  mp->fix_size = fix->fix_size;
10609
  mp->mode = fix->mode;
10610
  mp->value = fix->value;
10611
  mp->refcount = 1;
10612
  /* Not yet required for a backwards ref.  */
10613
  mp->min_address = -65536;
10614
 
10615
  if (max_mp == NULL)
10616
    {
10617
      mp->max_address = max_address;
10618
      mp->next = NULL;
10619
      mp->prev = minipool_vector_tail;
10620
 
10621
      if (mp->prev == NULL)
10622
        {
10623
          minipool_vector_head = mp;
10624
          minipool_vector_label = gen_label_rtx ();
10625
        }
10626
      else
10627
        mp->prev->next = mp;
10628
 
10629
      minipool_vector_tail = mp;
10630
    }
10631
  else
10632
    {
10633
      if (max_address > max_mp->max_address - mp->fix_size)
10634
        mp->max_address = max_mp->max_address - mp->fix_size;
10635
      else
10636
        mp->max_address = max_address;
10637
 
10638
      mp->next = max_mp;
10639
      mp->prev = max_mp->prev;
10640
      max_mp->prev = mp;
10641
      if (mp->prev != NULL)
10642
        mp->prev->next = mp;
10643
      else
10644
        minipool_vector_head = mp;
10645
    }
10646
 
10647
  /* Save the new entry.  */
10648
  max_mp = mp;
10649
 
10650
  /* Scan over the preceding entries and adjust their addresses as
10651
     required.  */
10652
  while (mp->prev != NULL
10653
         && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
10654
    {
10655
      mp->prev->max_address = mp->max_address - mp->prev->fix_size;
10656
      mp = mp->prev;
10657
    }
10658
 
10659
  return max_mp;
10660
}
10661
 
10662
static Mnode *
10663
move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
10664
                                HOST_WIDE_INT  min_address)
10665
{
10666
  HOST_WIDE_INT offset;
10667
 
10668
  /* The code below assumes these are different.  */
10669
  gcc_assert (mp != min_mp);
10670
 
10671
  if (min_mp == NULL)
10672
    {
10673
      if (min_address > mp->min_address)
10674
        mp->min_address = min_address;
10675
    }
10676
  else
10677
    {
10678
      /* We will adjust this below if it is too loose.  */
10679
      mp->min_address = min_address;
10680
 
10681
      /* Unlink MP from its current position.  Since min_mp is non-null,
10682
         mp->next must be non-null.  */
10683
      mp->next->prev = mp->prev;
10684
      if (mp->prev != NULL)
10685
        mp->prev->next = mp->next;
10686
      else
10687
        minipool_vector_head = mp->next;
10688
 
10689
      /* Reinsert it after MIN_MP.  */
10690
      mp->prev = min_mp;
10691
      mp->next = min_mp->next;
10692
      min_mp->next = mp;
10693
      if (mp->next != NULL)
10694
        mp->next->prev = mp;
10695
      else
10696
        minipool_vector_tail = mp;
10697
    }
10698
 
10699
  min_mp = mp;
10700
 
10701
  offset = 0;
10702
  for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
10703
    {
10704
      mp->offset = offset;
10705
      if (mp->refcount > 0)
10706
        offset += mp->fix_size;
10707
 
10708
      if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
10709
        mp->next->min_address = mp->min_address + mp->fix_size;
10710
    }
10711
 
10712
  return min_mp;
10713
}
10714
 
10715
/* Add a constant to the minipool for a backward reference.  Returns the
10716
   node added or NULL if the constant will not fit in this pool.
10717
 
10718
   Note that the code for insertion for a backwards reference can be
10719
   somewhat confusing because the calculated offsets for each fix do
10720
   not take into account the size of the pool (which is still under
10721
   construction.  */
10722
static Mnode *
10723
add_minipool_backward_ref (Mfix *fix)
10724
{
10725
  /* If set, min_mp is the last pool_entry that has a lower constraint
10726
     than the one we are trying to add.  */
10727
  Mnode *min_mp = NULL;
10728
  /* This can be negative, since it is only a constraint.  */
10729
  HOST_WIDE_INT  min_address = fix->address - fix->backwards;
10730
  Mnode *mp;
10731
 
10732
  /* If we can't reach the current pool from this insn, or if we can't
10733
     insert this entry at the end of the pool without pushing other
10734
     fixes out of range, then we don't try.  This ensures that we
10735
     can't fail later on.  */
10736
  if (min_address >= minipool_barrier->address
10737
      || (minipool_vector_tail->min_address + fix->fix_size
10738
          >= minipool_barrier->address))
10739
    return NULL;
10740
 
10741
  /* Scan the pool to see if a constant with the same value has
10742
     already been added.  While we are doing this, also note the
10743
     location where we must insert the constant if it doesn't already
10744
     exist.  */
10745
  for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
10746
    {
10747
      if (GET_CODE (fix->value) == GET_CODE (mp->value)
10748
          && fix->mode == mp->mode
10749
          && (GET_CODE (fix->value) != CODE_LABEL
10750
              || (CODE_LABEL_NUMBER (fix->value)
10751
                  == CODE_LABEL_NUMBER (mp->value)))
10752
          && rtx_equal_p (fix->value, mp->value)
10753
          /* Check that there is enough slack to move this entry to the
10754
             end of the table (this is conservative).  */
10755
          && (mp->max_address
10756
              > (minipool_barrier->address
10757
                 + minipool_vector_tail->offset
10758
                 + minipool_vector_tail->fix_size)))
10759
        {
10760
          mp->refcount++;
10761
          return move_minipool_fix_backward_ref (mp, min_mp, min_address);
10762
        }
10763
 
10764
      if (min_mp != NULL)
10765
        mp->min_address += fix->fix_size;
10766
      else
10767
        {
10768
          /* Note the insertion point if necessary.  */
10769
          if (mp->min_address < min_address)
10770
            {
10771
              /* For now, we do not allow the insertion of 8-byte alignment
10772
                 requiring nodes anywhere but at the start of the pool.  */
10773
              if (ARM_DOUBLEWORD_ALIGN
10774
                  && fix->fix_size >= 8 && mp->fix_size < 8)
10775
                return NULL;
10776
              else
10777
                min_mp = mp;
10778
            }
10779
          else if (mp->max_address
10780
                   < minipool_barrier->address + mp->offset + fix->fix_size)
10781
            {
10782
              /* Inserting before this entry would push the fix beyond
10783
                 its maximum address (which can happen if we have
10784
                 re-located a forwards fix); force the new fix to come
10785
                 after it.  */
10786
              if (ARM_DOUBLEWORD_ALIGN
10787
                  && fix->fix_size >= 8 && mp->fix_size < 8)
10788
                return NULL;
10789
              else
10790
                {
10791
                  min_mp = mp;
10792
                  min_address = mp->min_address + fix->fix_size;
10793
                }
10794
            }
10795
          /* Do not insert a non-8-byte aligned quantity before 8-byte
10796
             aligned quantities.  */
10797
          else if (ARM_DOUBLEWORD_ALIGN
10798
                   && fix->fix_size < 8
10799
                   && mp->fix_size >= 8)
10800
            {
10801
              min_mp = mp;
10802
              min_address = mp->min_address + fix->fix_size;
10803
            }
10804
        }
10805
    }
10806
 
10807
  /* We need to create a new entry.  */
10808
  mp = XNEW (Mnode);
10809
  mp->fix_size = fix->fix_size;
10810
  mp->mode = fix->mode;
10811
  mp->value = fix->value;
10812
  mp->refcount = 1;
10813
  mp->max_address = minipool_barrier->address + 65536;
10814
 
10815
  mp->min_address = min_address;
10816
 
10817
  if (min_mp == NULL)
10818
    {
10819
      mp->prev = NULL;
10820
      mp->next = minipool_vector_head;
10821
 
10822
      if (mp->next == NULL)
10823
        {
10824
          minipool_vector_tail = mp;
10825
          minipool_vector_label = gen_label_rtx ();
10826
        }
10827
      else
10828
        mp->next->prev = mp;
10829
 
10830
      minipool_vector_head = mp;
10831
    }
10832
  else
10833
    {
10834
      mp->next = min_mp->next;
10835
      mp->prev = min_mp;
10836
      min_mp->next = mp;
10837
 
10838
      if (mp->next != NULL)
10839
        mp->next->prev = mp;
10840
      else
10841
        minipool_vector_tail = mp;
10842
    }
10843
 
10844
  /* Save the new entry.  */
10845
  min_mp = mp;
10846
 
10847
  if (mp->prev)
10848
    mp = mp->prev;
10849
  else
10850
    mp->offset = 0;
10851
 
10852
  /* Scan over the following entries and adjust their offsets.  */
10853
  while (mp->next != NULL)
10854
    {
10855
      if (mp->next->min_address < mp->min_address + mp->fix_size)
10856
        mp->next->min_address = mp->min_address + mp->fix_size;
10857
 
10858
      if (mp->refcount)
10859
        mp->next->offset = mp->offset + mp->fix_size;
10860
      else
10861
        mp->next->offset = mp->offset;
10862
 
10863
      mp = mp->next;
10864
    }
10865
 
10866
  return min_mp;
10867
}
10868
 
10869
static void
10870
assign_minipool_offsets (Mfix *barrier)
10871
{
10872
  HOST_WIDE_INT offset = 0;
10873
  Mnode *mp;
10874
 
10875
  minipool_barrier = barrier;
10876
 
10877
  for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
10878
    {
10879
      mp->offset = offset;
10880
 
10881
      if (mp->refcount > 0)
10882
        offset += mp->fix_size;
10883
    }
10884
}
10885
 
10886
/* Output the literal table */
10887
static void
10888
dump_minipool (rtx scan)
10889
{
10890
  Mnode * mp;
10891
  Mnode * nmp;
10892
  int align64 = 0;
10893
 
10894
  if (ARM_DOUBLEWORD_ALIGN)
10895
    for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
10896
      if (mp->refcount > 0 && mp->fix_size >= 8)
10897
        {
10898
          align64 = 1;
10899
          break;
10900
        }
10901
 
10902
  if (dump_file)
10903
    fprintf (dump_file,
10904
             ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
10905
             INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
10906
 
10907
  scan = emit_label_after (gen_label_rtx (), scan);
10908
  scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
10909
  scan = emit_label_after (minipool_vector_label, scan);
10910
 
10911
  for (mp = minipool_vector_head; mp != NULL; mp = nmp)
10912
    {
10913
      if (mp->refcount > 0)
10914
        {
10915
          if (dump_file)
10916
            {
10917
              fprintf (dump_file,
10918
                       ";;  Offset %u, min %ld, max %ld ",
10919
                       (unsigned) mp->offset, (unsigned long) mp->min_address,
10920
                       (unsigned long) mp->max_address);
10921
              arm_print_value (dump_file, mp->value);
10922
              fputc ('\n', dump_file);
10923
            }
10924
 
10925
          switch (mp->fix_size)
10926
            {
10927
#ifdef HAVE_consttable_1
10928
            case 1:
10929
              scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
10930
              break;
10931
 
10932
#endif
10933
#ifdef HAVE_consttable_2
10934
            case 2:
10935
              scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
10936
              break;
10937
 
10938
#endif
10939
#ifdef HAVE_consttable_4
10940
            case 4:
10941
              scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
10942
              break;
10943
 
10944
#endif
10945
#ifdef HAVE_consttable_8
10946
            case 8:
10947
              scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
10948
              break;
10949
 
10950
#endif
10951
#ifdef HAVE_consttable_16
10952
            case 16:
10953
              scan = emit_insn_after (gen_consttable_16 (mp->value), scan);
10954
              break;
10955
 
10956
#endif
10957
            default:
10958
              gcc_unreachable ();
10959
            }
10960
        }
10961
 
10962
      nmp = mp->next;
10963
      free (mp);
10964
    }
10965
 
10966
  minipool_vector_head = minipool_vector_tail = NULL;
10967
  scan = emit_insn_after (gen_consttable_end (), scan);
10968
  scan = emit_barrier_after (scan);
10969
}
10970
 
10971
/* Return the cost of forcibly inserting a barrier after INSN.  */
10972
static int
10973
arm_barrier_cost (rtx insn)
10974
{
10975
  /* Basing the location of the pool on the loop depth is preferable,
10976
     but at the moment, the basic block information seems to be
10977
     corrupt by this stage of the compilation.  */
10978
  int base_cost = 50;
10979
  rtx next = next_nonnote_insn (insn);
10980
 
10981
  if (next != NULL && GET_CODE (next) == CODE_LABEL)
10982
    base_cost -= 20;
10983
 
10984
  switch (GET_CODE (insn))
10985
    {
10986
    case CODE_LABEL:
10987
      /* It will always be better to place the table before the label, rather
10988
         than after it.  */
10989
      return 50;
10990
 
10991
    case INSN:
10992
    case CALL_INSN:
10993
      return base_cost;
10994
 
10995
    case JUMP_INSN:
10996
      return base_cost - 10;
10997
 
10998
    default:
10999
      return base_cost + 10;
11000
    }
11001
}
11002
 
11003
/* Find the best place in the insn stream in the range
11004
   (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
11005
   Create the barrier by inserting a jump and add a new fix entry for
11006
   it.  */
11007
static Mfix *
11008
create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
11009
{
11010
  HOST_WIDE_INT count = 0;
11011
  rtx barrier;
11012
  rtx from = fix->insn;
11013
  /* The instruction after which we will insert the jump.  */
11014
  rtx selected = NULL;
11015
  int selected_cost;
11016
  /* The address at which the jump instruction will be placed.  */
11017
  HOST_WIDE_INT selected_address;
11018
  Mfix * new_fix;
11019
  HOST_WIDE_INT max_count = max_address - fix->address;
11020
  rtx label = gen_label_rtx ();
11021
 
11022
  selected_cost = arm_barrier_cost (from);
11023
  selected_address = fix->address;
11024
 
11025
  while (from && count < max_count)
11026
    {
11027
      rtx tmp;
11028
      int new_cost;
11029
 
11030
      /* This code shouldn't have been called if there was a natural barrier
11031
         within range.  */
11032
      gcc_assert (GET_CODE (from) != BARRIER);
11033
 
11034
      /* Count the length of this insn.  */
11035
      count += get_attr_length (from);
11036
 
11037
      /* If there is a jump table, add its length.  */
11038
      tmp = is_jump_table (from);
11039
      if (tmp != NULL)
11040
        {
11041
          count += get_jump_table_size (tmp);
11042
 
11043
          /* Jump tables aren't in a basic block, so base the cost on
11044
             the dispatch insn.  If we select this location, we will
11045
             still put the pool after the table.  */
11046
          new_cost = arm_barrier_cost (from);
11047
 
11048
          if (count < max_count
11049
              && (!selected || new_cost <= selected_cost))
11050
            {
11051
              selected = tmp;
11052
              selected_cost = new_cost;
11053
              selected_address = fix->address + count;
11054
            }
11055
 
11056
          /* Continue after the dispatch table.  */
11057
          from = NEXT_INSN (tmp);
11058
          continue;
11059
        }
11060
 
11061
      new_cost = arm_barrier_cost (from);
11062
 
11063
      if (count < max_count
11064
          && (!selected || new_cost <= selected_cost))
11065
        {
11066
          selected = from;
11067
          selected_cost = new_cost;
11068
          selected_address = fix->address + count;
11069
        }
11070
 
11071
      from = NEXT_INSN (from);
11072
    }
11073
 
11074
  /* Make sure that we found a place to insert the jump.  */
11075
  gcc_assert (selected);
11076
 
11077
  /* Create a new JUMP_INSN that branches around a barrier.  */
11078
  from = emit_jump_insn_after (gen_jump (label), selected);
11079
  JUMP_LABEL (from) = label;
11080
  barrier = emit_barrier_after (from);
11081
  emit_label_after (label, barrier);
11082
 
11083
  /* Create a minipool barrier entry for the new barrier.  */
11084
  new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
11085
  new_fix->insn = barrier;
11086
  new_fix->address = selected_address;
11087
  new_fix->next = fix->next;
11088
  fix->next = new_fix;
11089
 
11090
  return new_fix;
11091
}
11092
 
11093
/* Record that there is a natural barrier in the insn stream at
11094
   ADDRESS.  */
11095
static void
11096
push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
11097
{
11098
  Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
11099
 
11100
  fix->insn = insn;
11101
  fix->address = address;
11102
 
11103
  fix->next = NULL;
11104
  if (minipool_fix_head != NULL)
11105
    minipool_fix_tail->next = fix;
11106
  else
11107
    minipool_fix_head = fix;
11108
 
11109
  minipool_fix_tail = fix;
11110
}
11111
 
11112
/* Record INSN, which will need fixing up to load a value from the
11113
   minipool.  ADDRESS is the offset of the insn since the start of the
11114
   function; LOC is a pointer to the part of the insn which requires
11115
   fixing; VALUE is the constant that must be loaded, which is of type
11116
   MODE.  */
11117
static void
11118
push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
11119
                   enum machine_mode mode, rtx value)
11120
{
11121
  Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
11122
 
11123
  fix->insn = insn;
11124
  fix->address = address;
11125
  fix->loc = loc;
11126
  fix->mode = mode;
11127
  fix->fix_size = MINIPOOL_FIX_SIZE (mode);
11128
  fix->value = value;
11129
  fix->forwards = get_attr_pool_range (insn);
11130
  fix->backwards = get_attr_neg_pool_range (insn);
11131
  fix->minipool = NULL;
11132
 
11133
  /* If an insn doesn't have a range defined for it, then it isn't
11134
     expecting to be reworked by this code.  Better to stop now than
11135
     to generate duff assembly code.  */
11136
  gcc_assert (fix->forwards || fix->backwards);
11137
 
11138
  /* If an entry requires 8-byte alignment then assume all constant pools
11139
     require 4 bytes of padding.  Trying to do this later on a per-pool
11140
     basis is awkward because existing pool entries have to be modified.  */
11141
  if (ARM_DOUBLEWORD_ALIGN && fix->fix_size >= 8)
11142
    minipool_pad = 4;
11143
 
11144
  if (dump_file)
11145
    {
11146
      fprintf (dump_file,
11147
               ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
11148
               GET_MODE_NAME (mode),
11149
               INSN_UID (insn), (unsigned long) address,
11150
               -1 * (long)fix->backwards, (long)fix->forwards);
11151
      arm_print_value (dump_file, fix->value);
11152
      fprintf (dump_file, "\n");
11153
    }
11154
 
11155
  /* Add it to the chain of fixes.  */
11156
  fix->next = NULL;
11157
 
11158
  if (minipool_fix_head != NULL)
11159
    minipool_fix_tail->next = fix;
11160
  else
11161
    minipool_fix_head = fix;
11162
 
11163
  minipool_fix_tail = fix;
11164
}
11165
 
11166
/* Return the cost of synthesizing a 64-bit constant VAL inline.
11167
   Returns the number of insns needed, or 99 if we don't know how to
11168
   do it.  */
11169
int
11170
arm_const_double_inline_cost (rtx val)
11171
{
11172
  rtx lowpart, highpart;
11173
  enum machine_mode mode;
11174
 
11175
  mode = GET_MODE (val);
11176
 
11177
  if (mode == VOIDmode)
11178
    mode = DImode;
11179
 
11180
  gcc_assert (GET_MODE_SIZE (mode) == 8);
11181
 
11182
  lowpart = gen_lowpart (SImode, val);
11183
  highpart = gen_highpart_mode (SImode, mode, val);
11184
 
11185
  gcc_assert (GET_CODE (lowpart) == CONST_INT);
11186
  gcc_assert (GET_CODE (highpart) == CONST_INT);
11187
 
11188
  return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
11189
                            NULL_RTX, NULL_RTX, 0, 0)
11190
          + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
11191
                              NULL_RTX, NULL_RTX, 0, 0));
11192
}
11193
 
11194
/* Return true if it is worthwhile to split a 64-bit constant into two
11195
   32-bit operations.  This is the case if optimizing for size, or
11196
   if we have load delay slots, or if one 32-bit part can be done with
11197
   a single data operation.  */
11198
bool
11199
arm_const_double_by_parts (rtx val)
11200
{
11201
  enum machine_mode mode = GET_MODE (val);
11202
  rtx part;
11203
 
11204
  if (optimize_size || arm_ld_sched)
11205
    return true;
11206
 
11207
  if (mode == VOIDmode)
11208
    mode = DImode;
11209
 
11210
  part = gen_highpart_mode (SImode, mode, val);
11211
 
11212
  gcc_assert (GET_CODE (part) == CONST_INT);
11213
 
11214
  if (const_ok_for_arm (INTVAL (part))
11215
      || const_ok_for_arm (~INTVAL (part)))
11216
    return true;
11217
 
11218
  part = gen_lowpart (SImode, val);
11219
 
11220
  gcc_assert (GET_CODE (part) == CONST_INT);
11221
 
11222
  if (const_ok_for_arm (INTVAL (part))
11223
      || const_ok_for_arm (~INTVAL (part)))
11224
    return true;
11225
 
11226
  return false;
11227
}
11228
 
11229
/* Scan INSN and note any of its operands that need fixing.
11230
   If DO_PUSHES is false we do not actually push any of the fixups
11231
   needed.  The function returns TRUE if any fixups were needed/pushed.
11232
   This is used by arm_memory_load_p() which needs to know about loads
11233
   of constants that will be converted into minipool loads.  */
11234
static bool
11235
note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
11236
{
11237
  bool result = false;
11238
  int opno;
11239
 
11240
  extract_insn (insn);
11241
 
11242
  if (!constrain_operands (1))
11243
    fatal_insn_not_found (insn);
11244
 
11245
  if (recog_data.n_alternatives == 0)
11246
    return false;
11247
 
11248
  /* Fill in recog_op_alt with information about the constraints of
11249
     this insn.  */
11250
  preprocess_constraints ();
11251
 
11252
  for (opno = 0; opno < recog_data.n_operands; opno++)
11253
    {
11254
      /* Things we need to fix can only occur in inputs.  */
11255
      if (recog_data.operand_type[opno] != OP_IN)
11256
        continue;
11257
 
11258
      /* If this alternative is a memory reference, then any mention
11259
         of constants in this alternative is really to fool reload
11260
         into allowing us to accept one there.  We need to fix them up
11261
         now so that we output the right code.  */
11262
      if (recog_op_alt[opno][which_alternative].memory_ok)
11263
        {
11264
          rtx op = recog_data.operand[opno];
11265
 
11266
          if (CONSTANT_P (op))
11267
            {
11268
              if (do_pushes)
11269
                push_minipool_fix (insn, address, recog_data.operand_loc[opno],
11270
                                   recog_data.operand_mode[opno], op);
11271
              result = true;
11272
            }
11273
          else if (GET_CODE (op) == MEM
11274
                   && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
11275
                   && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
11276
            {
11277
              if (do_pushes)
11278
                {
11279
                  rtx cop = avoid_constant_pool_reference (op);
11280
 
11281
                  /* Casting the address of something to a mode narrower
11282
                     than a word can cause avoid_constant_pool_reference()
11283
                     to return the pool reference itself.  That's no good to
11284
                     us here.  Lets just hope that we can use the
11285
                     constant pool value directly.  */
11286
                  if (op == cop)
11287
                    cop = get_pool_constant (XEXP (op, 0));
11288
 
11289
                  push_minipool_fix (insn, address,
11290
                                     recog_data.operand_loc[opno],
11291
                                     recog_data.operand_mode[opno], cop);
11292
                }
11293
 
11294
              result = true;
11295
            }
11296
        }
11297
    }
11298
 
11299
  return result;
11300
}
11301
 
11302
/* Gcc puts the pool in the wrong place for ARM, since we can only
11303
   load addresses a limited distance around the pc.  We do some
11304
   special munging to move the constant pool values to the correct
11305
   point in the code.  */
11306
static void
11307
arm_reorg (void)
11308
{
11309
  rtx insn;
11310
  HOST_WIDE_INT address = 0;
11311
  Mfix * fix;
11312
 
11313
  minipool_fix_head = minipool_fix_tail = NULL;
11314
 
11315
  /* The first insn must always be a note, or the code below won't
11316
     scan it properly.  */
11317
  insn = get_insns ();
11318
  gcc_assert (GET_CODE (insn) == NOTE);
11319
  minipool_pad = 0;
11320
 
11321
  /* Scan all the insns and record the operands that will need fixing.  */
11322
  for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
11323
    {
11324
      if (TARGET_CIRRUS_FIX_INVALID_INSNS
11325
          && (arm_cirrus_insn_p (insn)
11326
              || GET_CODE (insn) == JUMP_INSN
11327
              || arm_memory_load_p (insn)))
11328
        cirrus_reorg (insn);
11329
 
11330
      if (GET_CODE (insn) == BARRIER)
11331
        push_minipool_barrier (insn, address);
11332
      else if (INSN_P (insn))
11333
        {
11334
          rtx table;
11335
 
11336
          note_invalid_constants (insn, address, true);
11337
          address += get_attr_length (insn);
11338
 
11339
          /* If the insn is a vector jump, add the size of the table
11340
             and skip the table.  */
11341
          if ((table = is_jump_table (insn)) != NULL)
11342
            {
11343
              address += get_jump_table_size (table);
11344
              insn = table;
11345
            }
11346
        }
11347
    }
11348
 
11349
  fix = minipool_fix_head;
11350
 
11351
  /* Now scan the fixups and perform the required changes.  */
11352
  while (fix)
11353
    {
11354
      Mfix * ftmp;
11355
      Mfix * fdel;
11356
      Mfix *  last_added_fix;
11357
      Mfix * last_barrier = NULL;
11358
      Mfix * this_fix;
11359
 
11360
      /* Skip any further barriers before the next fix.  */
11361
      while (fix && GET_CODE (fix->insn) == BARRIER)
11362
        fix = fix->next;
11363
 
11364
      /* No more fixes.  */
11365
      if (fix == NULL)
11366
        break;
11367
 
11368
      last_added_fix = NULL;
11369
 
11370
      for (ftmp = fix; ftmp; ftmp = ftmp->next)
11371
        {
11372
          if (GET_CODE (ftmp->insn) == BARRIER)
11373
            {
11374
              if (ftmp->address >= minipool_vector_head->max_address)
11375
                break;
11376
 
11377
              last_barrier = ftmp;
11378
            }
11379
          else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
11380
            break;
11381
 
11382
          last_added_fix = ftmp;  /* Keep track of the last fix added.  */
11383
        }
11384
 
11385
      /* If we found a barrier, drop back to that; any fixes that we
11386
         could have reached but come after the barrier will now go in
11387
         the next mini-pool.  */
11388
      if (last_barrier != NULL)
11389
        {
11390
          /* Reduce the refcount for those fixes that won't go into this
11391
             pool after all.  */
11392
          for (fdel = last_barrier->next;
11393
               fdel && fdel != ftmp;
11394
               fdel = fdel->next)
11395
            {
11396
              fdel->minipool->refcount--;
11397
              fdel->minipool = NULL;
11398
            }
11399
 
11400
          ftmp = last_barrier;
11401
        }
11402
      else
11403
        {
11404
          /* ftmp is first fix that we can't fit into this pool and
11405
             there no natural barriers that we could use.  Insert a
11406
             new barrier in the code somewhere between the previous
11407
             fix and this one, and arrange to jump around it.  */
11408
          HOST_WIDE_INT max_address;
11409
 
11410
          /* The last item on the list of fixes must be a barrier, so
11411
             we can never run off the end of the list of fixes without
11412
             last_barrier being set.  */
11413
          gcc_assert (ftmp);
11414
 
11415
          max_address = minipool_vector_head->max_address;
11416
          /* Check that there isn't another fix that is in range that
11417
             we couldn't fit into this pool because the pool was
11418
             already too large: we need to put the pool before such an
11419
             instruction.  The pool itself may come just after the
11420
             fix because create_fix_barrier also allows space for a
11421
             jump instruction.  */
11422
          if (ftmp->address < max_address)
11423
            max_address = ftmp->address + 1;
11424
 
11425
          last_barrier = create_fix_barrier (last_added_fix, max_address);
11426
        }
11427
 
11428
      assign_minipool_offsets (last_barrier);
11429
 
11430
      while (ftmp)
11431
        {
11432
          if (GET_CODE (ftmp->insn) != BARRIER
11433
              && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
11434
                  == NULL))
11435
            break;
11436
 
11437
          ftmp = ftmp->next;
11438
        }
11439
 
11440
      /* Scan over the fixes we have identified for this pool, fixing them
11441
         up and adding the constants to the pool itself.  */
11442
      for (this_fix = fix; this_fix && ftmp != this_fix;
11443
           this_fix = this_fix->next)
11444
        if (GET_CODE (this_fix->insn) != BARRIER)
11445
          {
11446
            rtx addr
11447
              = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
11448
                                                  minipool_vector_label),
11449
                               this_fix->minipool->offset);
11450
            *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
11451
          }
11452
 
11453
      dump_minipool (last_barrier->insn);
11454
      fix = ftmp;
11455
    }
11456
 
11457
  /* From now on we must synthesize any constants that we can't handle
11458
     directly.  This can happen if the RTL gets split during final
11459
     instruction generation.  */
11460
  after_arm_reorg = 1;
11461
 
11462
  /* Free the minipool memory.  */
11463
  obstack_free (&minipool_obstack, minipool_startobj);
11464
}
11465
 
11466
/* Routines to output assembly language.  */
11467
 
11468
/* If the rtx is the correct value then return the string of the number.
11469
   In this way we can ensure that valid double constants are generated even
11470
   when cross compiling.  */
11471
const char *
11472
fp_immediate_constant (rtx x)
11473
{
11474
  REAL_VALUE_TYPE r;
11475
  int i;
11476
 
11477
  if (!fp_consts_inited)
11478
    init_fp_table ();
11479
 
11480
  REAL_VALUE_FROM_CONST_DOUBLE (r, x);
11481
  for (i = 0; i < 8; i++)
11482
    if (REAL_VALUES_EQUAL (r, values_fp[i]))
11483
      return strings_fp[i];
11484
 
11485
  gcc_unreachable ();
11486
}
11487
 
11488
/* As for fp_immediate_constant, but value is passed directly, not in rtx.  */
11489
static const char *
11490
fp_const_from_val (REAL_VALUE_TYPE *r)
11491
{
11492
  int i;
11493
 
11494
  if (!fp_consts_inited)
11495
    init_fp_table ();
11496
 
11497
  for (i = 0; i < 8; i++)
11498
    if (REAL_VALUES_EQUAL (*r, values_fp[i]))
11499
      return strings_fp[i];
11500
 
11501
  gcc_unreachable ();
11502
}
11503
 
11504
/* Output the operands of a LDM/STM instruction to STREAM.
11505
   MASK is the ARM register set mask of which only bits 0-15 are important.
11506
   REG is the base register, either the frame pointer or the stack pointer,
11507
   INSTR is the possibly suffixed load or store instruction.
11508
   RFE is nonzero if the instruction should also copy spsr to cpsr.  */
11509
 
11510
static void
11511
print_multi_reg (FILE *stream, const char *instr, unsigned reg,
11512
                 unsigned long mask, int rfe)
11513
{
11514
  unsigned i;
11515
  bool not_first = FALSE;
11516
 
11517
  gcc_assert (!rfe || (mask & (1 << PC_REGNUM)));
11518
  fputc ('\t', stream);
11519
  asm_fprintf (stream, instr, reg);
11520
  fputc ('{', stream);
11521
 
11522
  for (i = 0; i <= LAST_ARM_REGNUM; i++)
11523
    if (mask & (1 << i))
11524
      {
11525
        if (not_first)
11526
          fprintf (stream, ", ");
11527
 
11528
        asm_fprintf (stream, "%r", i);
11529
        not_first = TRUE;
11530
      }
11531
 
11532
  if (rfe)
11533
    fprintf (stream, "}^\n");
11534
  else
11535
    fprintf (stream, "}\n");
11536
}
11537
 
11538
 
11539
/* Output a FLDMD instruction to STREAM.
11540
   BASE if the register containing the address.
11541
   REG and COUNT specify the register range.
11542
   Extra registers may be added to avoid hardware bugs.
11543
 
11544
   We output FLDMD even for ARMv5 VFP implementations.  Although
11545
   FLDMD is technically not supported until ARMv6, it is believed
11546
   that all VFP implementations support its use in this context.  */
11547
 
11548
static void
11549
vfp_output_fldmd (FILE * stream, unsigned int base, int reg, int count)
11550
{
11551
  int i;
11552
 
11553
  /* Workaround ARM10 VFPr1 bug.  */
11554
  if (count == 2 && !arm_arch6)
11555
    {
11556
      if (reg == 15)
11557
        reg--;
11558
      count++;
11559
    }
11560
 
11561
  /* FLDMD may not load more than 16 doubleword registers at a time. Split the
11562
     load into multiple parts if we have to handle more than 16 registers.  */
11563
  if (count > 16)
11564
    {
11565
      vfp_output_fldmd (stream, base, reg, 16);
11566
      vfp_output_fldmd (stream, base, reg + 16, count - 16);
11567
      return;
11568
    }
11569
 
11570
  fputc ('\t', stream);
11571
  asm_fprintf (stream, "fldmfdd\t%r!, {", base);
11572
 
11573
  for (i = reg; i < reg + count; i++)
11574
    {
11575
      if (i > reg)
11576
        fputs (", ", stream);
11577
      asm_fprintf (stream, "d%d", i);
11578
    }
11579
  fputs ("}\n", stream);
11580
 
11581
}
11582
 
11583
 
11584
/* Output the assembly for a store multiple.  */
11585
 
11586
const char *
11587
vfp_output_fstmd (rtx * operands)
11588
{
11589
  char pattern[100];
11590
  int p;
11591
  int base;
11592
  int i;
11593
 
11594
  strcpy (pattern, "fstmfdd\t%m0!, {%P1");
11595
  p = strlen (pattern);
11596
 
11597
  gcc_assert (GET_CODE (operands[1]) == REG);
11598
 
11599
  base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
11600
  for (i = 1; i < XVECLEN (operands[2], 0); i++)
11601
    {
11602
      p += sprintf (&pattern[p], ", d%d", base + i);
11603
    }
11604
  strcpy (&pattern[p], "}");
11605
 
11606
  output_asm_insn (pattern, operands);
11607
  return "";
11608
}
11609
 
11610
 
11611
/* Emit RTL to save block of VFP register pairs to the stack.  Returns the
11612
   number of bytes pushed.  */
11613
 
11614
static int
11615
vfp_emit_fstmd (int base_reg, int count)
11616
{
11617
  rtx par;
11618
  rtx dwarf;
11619
  rtx tmp, reg;
11620
  int i;
11621
 
11622
  /* Workaround ARM10 VFPr1 bug.  Data corruption can occur when exactly two
11623
     register pairs are stored by a store multiple insn.  We avoid this
11624
     by pushing an extra pair.  */
11625
  if (count == 2 && !arm_arch6)
11626
    {
11627
      if (base_reg == LAST_VFP_REGNUM - 3)
11628
        base_reg -= 2;
11629
      count++;
11630
    }
11631
 
11632
  /* FSTMD may not store more than 16 doubleword registers at once.  Split
11633
     larger stores into multiple parts (up to a maximum of two, in
11634
     practice).  */
11635
  if (count > 16)
11636
    {
11637
      int saved;
11638
      /* NOTE: base_reg is an internal register number, so each D register
11639
         counts as 2.  */
11640
      saved = vfp_emit_fstmd (base_reg + 32, count - 16);
11641
      saved += vfp_emit_fstmd (base_reg, 16);
11642
      return saved;
11643
    }
11644
 
11645
  par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
11646
  dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
11647
 
11648
  reg = gen_rtx_REG (DFmode, base_reg);
11649
  base_reg += 2;
11650
 
11651
  XVECEXP (par, 0, 0)
11652
    = gen_rtx_SET (VOIDmode,
11653
                   gen_frame_mem
11654
                   (BLKmode,
11655
                    gen_rtx_PRE_MODIFY (Pmode,
11656
                                        stack_pointer_rtx,
11657
                                        plus_constant
11658
                                        (stack_pointer_rtx,
11659
                                         - (count * 8)))
11660
                    ),
11661
                   gen_rtx_UNSPEC (BLKmode,
11662
                                   gen_rtvec (1, reg),
11663
                                   UNSPEC_PUSH_MULT));
11664
 
11665
  tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
11666
                     plus_constant (stack_pointer_rtx, -(count * 8)));
11667
  RTX_FRAME_RELATED_P (tmp) = 1;
11668
  XVECEXP (dwarf, 0, 0) = tmp;
11669
 
11670
  tmp = gen_rtx_SET (VOIDmode,
11671
                     gen_frame_mem (DFmode, stack_pointer_rtx),
11672
                     reg);
11673
  RTX_FRAME_RELATED_P (tmp) = 1;
11674
  XVECEXP (dwarf, 0, 1) = tmp;
11675
 
11676
  for (i = 1; i < count; i++)
11677
    {
11678
      reg = gen_rtx_REG (DFmode, base_reg);
11679
      base_reg += 2;
11680
      XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
11681
 
11682
      tmp = gen_rtx_SET (VOIDmode,
11683
                         gen_frame_mem (DFmode,
11684
                                        plus_constant (stack_pointer_rtx,
11685
                                                       i * 8)),
11686
                         reg);
11687
      RTX_FRAME_RELATED_P (tmp) = 1;
11688
      XVECEXP (dwarf, 0, i + 1) = tmp;
11689
    }
11690
 
11691
  par = emit_insn (par);
11692
  add_reg_note (par, REG_FRAME_RELATED_EXPR, dwarf);
11693
  RTX_FRAME_RELATED_P (par) = 1;
11694
 
11695
  return count * 8;
11696
}
11697
 
11698
/* Emit a call instruction with pattern PAT.  ADDR is the address of
11699
   the call target.  */
11700
 
11701
void
11702
arm_emit_call_insn (rtx pat, rtx addr)
11703
{
11704
  rtx insn;
11705
 
11706
  insn = emit_call_insn (pat);
11707
 
11708
  /* The PIC register is live on entry to VxWorks PIC PLT entries.
11709
     If the call might use such an entry, add a use of the PIC register
11710
     to the instruction's CALL_INSN_FUNCTION_USAGE.  */
11711
  if (TARGET_VXWORKS_RTP
11712
      && flag_pic
11713
      && GET_CODE (addr) == SYMBOL_REF
11714
      && (SYMBOL_REF_DECL (addr)
11715
          ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
11716
          : !SYMBOL_REF_LOCAL_P (addr)))
11717
    {
11718
      require_pic_register ();
11719
      use_reg (&CALL_INSN_FUNCTION_USAGE (insn), cfun->machine->pic_reg);
11720
    }
11721
}
11722
 
11723
/* Output a 'call' insn.  */
11724
const char *
11725
output_call (rtx *operands)
11726
{
11727
  gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly.  */
11728
 
11729
  /* Handle calls to lr using ip (which may be clobbered in subr anyway).  */
11730
  if (REGNO (operands[0]) == LR_REGNUM)
11731
    {
11732
      operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
11733
      output_asm_insn ("mov%?\t%0, %|lr", operands);
11734
    }
11735
 
11736
  output_asm_insn ("mov%?\t%|lr, %|pc", operands);
11737
 
11738
  if (TARGET_INTERWORK || arm_arch4t)
11739
    output_asm_insn ("bx%?\t%0", operands);
11740
  else
11741
    output_asm_insn ("mov%?\t%|pc, %0", operands);
11742
 
11743
  return "";
11744
}
11745
 
11746
/* Output a 'call' insn that is a reference in memory. This is
11747
   disabled for ARMv5 and we prefer a blx instead because otherwise
11748
   there's a significant performance overhead.  */
11749
const char *
11750
output_call_mem (rtx *operands)
11751
{
11752
  gcc_assert (!arm_arch5);
11753
  if (TARGET_INTERWORK)
11754
    {
11755
      output_asm_insn ("ldr%?\t%|ip, %0", operands);
11756
      output_asm_insn ("mov%?\t%|lr, %|pc", operands);
11757
      output_asm_insn ("bx%?\t%|ip", operands);
11758
    }
11759
  else if (regno_use_in (LR_REGNUM, operands[0]))
11760
    {
11761
      /* LR is used in the memory address.  We load the address in the
11762
         first instruction.  It's safe to use IP as the target of the
11763
         load since the call will kill it anyway.  */
11764
      output_asm_insn ("ldr%?\t%|ip, %0", operands);
11765
      output_asm_insn ("mov%?\t%|lr, %|pc", operands);
11766
      if (arm_arch4t)
11767
        output_asm_insn ("bx%?\t%|ip", operands);
11768
      else
11769
        output_asm_insn ("mov%?\t%|pc, %|ip", operands);
11770
    }
11771
  else
11772
    {
11773
      output_asm_insn ("mov%?\t%|lr, %|pc", operands);
11774
      output_asm_insn ("ldr%?\t%|pc, %0", operands);
11775
    }
11776
 
11777
  return "";
11778
}
11779
 
11780
 
11781
/* Output a move from arm registers to an fpa registers.
11782
   OPERANDS[0] is an fpa register.
11783
   OPERANDS[1] is the first registers of an arm register pair.  */
11784
const char *
11785
output_mov_long_double_fpa_from_arm (rtx *operands)
11786
{
11787
  int arm_reg0 = REGNO (operands[1]);
11788
  rtx ops[3];
11789
 
11790
  gcc_assert (arm_reg0 != IP_REGNUM);
11791
 
11792
  ops[0] = gen_rtx_REG (SImode, arm_reg0);
11793
  ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
11794
  ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
11795
 
11796
  output_asm_insn ("stm%(fd%)\t%|sp!, {%0, %1, %2}", ops);
11797
  output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
11798
 
11799
  return "";
11800
}
11801
 
11802
/* Output a move from an fpa register to arm registers.
11803
   OPERANDS[0] is the first registers of an arm register pair.
11804
   OPERANDS[1] is an fpa register.  */
11805
const char *
11806
output_mov_long_double_arm_from_fpa (rtx *operands)
11807
{
11808
  int arm_reg0 = REGNO (operands[0]);
11809
  rtx ops[3];
11810
 
11811
  gcc_assert (arm_reg0 != IP_REGNUM);
11812
 
11813
  ops[0] = gen_rtx_REG (SImode, arm_reg0);
11814
  ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
11815
  ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
11816
 
11817
  output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
11818
  output_asm_insn ("ldm%(fd%)\t%|sp!, {%0, %1, %2}", ops);
11819
  return "";
11820
}
11821
 
11822
/* Output a move from arm registers to arm registers of a long double
11823
   OPERANDS[0] is the destination.
11824
   OPERANDS[1] is the source.  */
11825
const char *
11826
output_mov_long_double_arm_from_arm (rtx *operands)
11827
{
11828
  /* We have to be careful here because the two might overlap.  */
11829
  int dest_start = REGNO (operands[0]);
11830
  int src_start = REGNO (operands[1]);
11831
  rtx ops[2];
11832
  int i;
11833
 
11834
  if (dest_start < src_start)
11835
    {
11836
      for (i = 0; i < 3; i++)
11837
        {
11838
          ops[0] = gen_rtx_REG (SImode, dest_start + i);
11839
          ops[1] = gen_rtx_REG (SImode, src_start + i);
11840
          output_asm_insn ("mov%?\t%0, %1", ops);
11841
        }
11842
    }
11843
  else
11844
    {
11845
      for (i = 2; i >= 0; i--)
11846
        {
11847
          ops[0] = gen_rtx_REG (SImode, dest_start + i);
11848
          ops[1] = gen_rtx_REG (SImode, src_start + i);
11849
          output_asm_insn ("mov%?\t%0, %1", ops);
11850
        }
11851
    }
11852
 
11853
  return "";
11854
}
11855
 
11856
void
11857
arm_emit_movpair (rtx dest, rtx src)
11858
 {
11859
  /* If the src is an immediate, simplify it.  */
11860
  if (CONST_INT_P (src))
11861
    {
11862
      HOST_WIDE_INT val = INTVAL (src);
11863
      emit_set_insn (dest, GEN_INT (val & 0x0000ffff));
11864
      if ((val >> 16) & 0x0000ffff)
11865
        emit_set_insn (gen_rtx_ZERO_EXTRACT (SImode, dest, GEN_INT (16),
11866
                                             GEN_INT (16)),
11867
                       GEN_INT ((val >> 16) & 0x0000ffff));
11868
      return;
11869
    }
11870
   emit_set_insn (dest, gen_rtx_HIGH (SImode, src));
11871
   emit_set_insn (dest, gen_rtx_LO_SUM (SImode, dest, src));
11872
 }
11873
 
11874
/* Output a move from arm registers to an fpa registers.
11875
   OPERANDS[0] is an fpa register.
11876
   OPERANDS[1] is the first registers of an arm register pair.  */
11877
const char *
11878
output_mov_double_fpa_from_arm (rtx *operands)
11879
{
11880
  int arm_reg0 = REGNO (operands[1]);
11881
  rtx ops[2];
11882
 
11883
  gcc_assert (arm_reg0 != IP_REGNUM);
11884
 
11885
  ops[0] = gen_rtx_REG (SImode, arm_reg0);
11886
  ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
11887
  output_asm_insn ("stm%(fd%)\t%|sp!, {%0, %1}", ops);
11888
  output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
11889
  return "";
11890
}
11891
 
11892
/* Output a move from an fpa register to arm registers.
11893
   OPERANDS[0] is the first registers of an arm register pair.
11894
   OPERANDS[1] is an fpa register.  */
11895
const char *
11896
output_mov_double_arm_from_fpa (rtx *operands)
11897
{
11898
  int arm_reg0 = REGNO (operands[0]);
11899
  rtx ops[2];
11900
 
11901
  gcc_assert (arm_reg0 != IP_REGNUM);
11902
 
11903
  ops[0] = gen_rtx_REG (SImode, arm_reg0);
11904
  ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
11905
  output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
11906
  output_asm_insn ("ldm%(fd%)\t%|sp!, {%0, %1}", ops);
11907
  return "";
11908
}
11909
 
11910
/* Output a move between double words.
11911
   It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
11912
   or MEM<-REG and all MEMs must be offsettable addresses.  */
11913
const char *
11914
output_move_double (rtx *operands)
11915
{
11916
  enum rtx_code code0 = GET_CODE (operands[0]);
11917
  enum rtx_code code1 = GET_CODE (operands[1]);
11918
  rtx otherops[3];
11919
 
11920
  if (code0 == REG)
11921
    {
11922
      unsigned int reg0 = REGNO (operands[0]);
11923
 
11924
      otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
11925
 
11926
      gcc_assert (code1 == MEM);  /* Constraints should ensure this.  */
11927
 
11928
      switch (GET_CODE (XEXP (operands[1], 0)))
11929
        {
11930
        case REG:
11931
          if (TARGET_LDRD
11932
              && !(fix_cm3_ldrd && reg0 == REGNO(XEXP (operands[1], 0))))
11933
            output_asm_insn ("ldr%(d%)\t%0, [%m1]", operands);
11934
          else
11935
            output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands);
11936
          break;
11937
 
11938
        case PRE_INC:
11939
          gcc_assert (TARGET_LDRD);
11940
          output_asm_insn ("ldr%(d%)\t%0, [%m1, #8]!", operands);
11941
          break;
11942
 
11943
        case PRE_DEC:
11944
          if (TARGET_LDRD)
11945
            output_asm_insn ("ldr%(d%)\t%0, [%m1, #-8]!", operands);
11946
          else
11947
            output_asm_insn ("ldm%(db%)\t%m1!, %M0", operands);
11948
          break;
11949
 
11950
        case POST_INC:
11951
          if (TARGET_LDRD)
11952
            output_asm_insn ("ldr%(d%)\t%0, [%m1], #8", operands);
11953
          else
11954
            output_asm_insn ("ldm%(ia%)\t%m1!, %M0", operands);
11955
          break;
11956
 
11957
        case POST_DEC:
11958
          gcc_assert (TARGET_LDRD);
11959
          output_asm_insn ("ldr%(d%)\t%0, [%m1], #-8", operands);
11960
          break;
11961
 
11962
        case PRE_MODIFY:
11963
        case POST_MODIFY:
11964
          /* Autoicrement addressing modes should never have overlapping
11965
             base and destination registers, and overlapping index registers
11966
             are already prohibited, so this doesn't need to worry about
11967
             fix_cm3_ldrd.  */
11968
          otherops[0] = operands[0];
11969
          otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
11970
          otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
11971
 
11972
          if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
11973
            {
11974
              if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
11975
                {
11976
                  /* Registers overlap so split out the increment.  */
11977
                  output_asm_insn ("add%?\t%1, %1, %2", otherops);
11978
                  output_asm_insn ("ldr%(d%)\t%0, [%1] @split", otherops);
11979
                }
11980
              else
11981
                {
11982
                  /* Use a single insn if we can.
11983
                     FIXME: IWMMXT allows offsets larger than ldrd can
11984
                     handle, fix these up with a pair of ldr.  */
11985
                  if (TARGET_THUMB2
11986
                      || GET_CODE (otherops[2]) != CONST_INT
11987
                      || (INTVAL (otherops[2]) > -256
11988
                          && INTVAL (otherops[2]) < 256))
11989
                    output_asm_insn ("ldr%(d%)\t%0, [%1, %2]!", otherops);
11990
                  else
11991
                    {
11992
                      output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
11993
                      output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
11994
                    }
11995
                }
11996
            }
11997
          else
11998
            {
11999
              /* Use a single insn if we can.
12000
                 FIXME: IWMMXT allows offsets larger than ldrd can handle,
12001
                 fix these up with a pair of ldr.  */
12002
              if (TARGET_THUMB2
12003
                  || GET_CODE (otherops[2]) != CONST_INT
12004
                  || (INTVAL (otherops[2]) > -256
12005
                      && INTVAL (otherops[2]) < 256))
12006
                output_asm_insn ("ldr%(d%)\t%0, [%1], %2", otherops);
12007
              else
12008
                {
12009
                  output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
12010
                  output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
12011
                }
12012
            }
12013
          break;
12014
 
12015
        case LABEL_REF:
12016
        case CONST:
12017
          /* We might be able to use ldrd %0, %1 here.  However the range is
12018
             different to ldr/adr, and it is broken on some ARMv7-M
12019
             implementations.  */
12020
          /* Use the second register of the pair to avoid problematic
12021
             overlap.  */
12022
          otherops[1] = operands[1];
12023
          output_asm_insn ("adr%?\t%0, %1", otherops);
12024
          operands[1] = otherops[0];
12025
          if (TARGET_LDRD)
12026
            output_asm_insn ("ldr%(d%)\t%0, [%1]", operands);
12027
          else
12028
            output_asm_insn ("ldm%(ia%)\t%1, %M0", operands);
12029
          break;
12030
 
12031
          /* ??? This needs checking for thumb2.  */
12032
        default:
12033
          if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
12034
                               GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
12035
            {
12036
              otherops[0] = operands[0];
12037
              otherops[1] = XEXP (XEXP (operands[1], 0), 0);
12038
              otherops[2] = XEXP (XEXP (operands[1], 0), 1);
12039
 
12040
              if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
12041
                {
12042
                  if (GET_CODE (otherops[2]) == CONST_INT && !TARGET_LDRD)
12043
                    {
12044
                      switch ((int) INTVAL (otherops[2]))
12045
                        {
12046
                        case -8:
12047
                          output_asm_insn ("ldm%(db%)\t%1, %M0", otherops);
12048
                          return "";
12049
                        case -4:
12050
                          if (TARGET_THUMB2)
12051
                            break;
12052
                          output_asm_insn ("ldm%(da%)\t%1, %M0", otherops);
12053
                          return "";
12054
                        case 4:
12055
                          if (TARGET_THUMB2)
12056
                            break;
12057
                          output_asm_insn ("ldm%(ib%)\t%1, %M0", otherops);
12058
                          return "";
12059
                        }
12060
                    }
12061
                  otherops[0] = gen_rtx_REG(SImode, REGNO(operands[0]) + 1);
12062
                  operands[1] = otherops[0];
12063
                  if (TARGET_LDRD
12064
                      && (GET_CODE (otherops[2]) == REG
12065
                          || TARGET_THUMB2
12066
                          || (GET_CODE (otherops[2]) == CONST_INT
12067
                              && INTVAL (otherops[2]) > -256
12068
                              && INTVAL (otherops[2]) < 256)))
12069
                    {
12070
                      if (reg_overlap_mentioned_p (operands[0],
12071
                                                   otherops[2]))
12072
                        {
12073
                          rtx tmp;
12074
                          /* Swap base and index registers over to
12075
                             avoid a conflict.  */
12076
                          tmp = otherops[1];
12077
                          otherops[1] = otherops[2];
12078
                          otherops[2] = tmp;
12079
                        }
12080
                      /* If both registers conflict, it will usually
12081
                         have been fixed by a splitter.  */
12082
                      if (reg_overlap_mentioned_p (operands[0], otherops[2])
12083
                          || (fix_cm3_ldrd && reg0 == REGNO (otherops[1])))
12084
                        {
12085
                          output_asm_insn ("add%?\t%0, %1, %2", otherops);
12086
                          output_asm_insn ("ldr%(d%)\t%0, [%1]", operands);
12087
                        }
12088
                      else
12089
                        {
12090
                          otherops[0] = operands[0];
12091
                          output_asm_insn ("ldr%(d%)\t%0, [%1, %2]", otherops);
12092
                        }
12093
                      return "";
12094
                    }
12095
 
12096
                  if (GET_CODE (otherops[2]) == CONST_INT)
12097
                    {
12098
                      if (!(const_ok_for_arm (INTVAL (otherops[2]))))
12099
                        output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
12100
                      else
12101
                        output_asm_insn ("add%?\t%0, %1, %2", otherops);
12102
                    }
12103
                  else
12104
                    output_asm_insn ("add%?\t%0, %1, %2", otherops);
12105
                }
12106
              else
12107
                output_asm_insn ("sub%?\t%0, %1, %2", otherops);
12108
 
12109
              if (TARGET_LDRD)
12110
                return "ldr%(d%)\t%0, [%1]";
12111
 
12112
              return "ldm%(ia%)\t%1, %M0";
12113
            }
12114
          else
12115
            {
12116
              otherops[1] = adjust_address (operands[1], SImode, 4);
12117
              /* Take care of overlapping base/data reg.  */
12118
              if (reg_mentioned_p (operands[0], operands[1]))
12119
                {
12120
                  output_asm_insn ("ldr%?\t%0, %1", otherops);
12121
                  output_asm_insn ("ldr%?\t%0, %1", operands);
12122
                }
12123
              else
12124
                {
12125
                  output_asm_insn ("ldr%?\t%0, %1", operands);
12126
                  output_asm_insn ("ldr%?\t%0, %1", otherops);
12127
                }
12128
            }
12129
        }
12130
    }
12131
  else
12132
    {
12133
      /* Constraints should ensure this.  */
12134
      gcc_assert (code0 == MEM && code1 == REG);
12135
      gcc_assert (REGNO (operands[1]) != IP_REGNUM);
12136
 
12137
      switch (GET_CODE (XEXP (operands[0], 0)))
12138
        {
12139
        case REG:
12140
          if (TARGET_LDRD)
12141
            output_asm_insn ("str%(d%)\t%1, [%m0]", operands);
12142
          else
12143
            output_asm_insn ("stm%(ia%)\t%m0, %M1", operands);
12144
          break;
12145
 
12146
        case PRE_INC:
12147
          gcc_assert (TARGET_LDRD);
12148
          output_asm_insn ("str%(d%)\t%1, [%m0, #8]!", operands);
12149
          break;
12150
 
12151
        case PRE_DEC:
12152
          if (TARGET_LDRD)
12153
            output_asm_insn ("str%(d%)\t%1, [%m0, #-8]!", operands);
12154
          else
12155
            output_asm_insn ("stm%(db%)\t%m0!, %M1", operands);
12156
          break;
12157
 
12158
        case POST_INC:
12159
          if (TARGET_LDRD)
12160
            output_asm_insn ("str%(d%)\t%1, [%m0], #8", operands);
12161
          else
12162
            output_asm_insn ("stm%(ia%)\t%m0!, %M1", operands);
12163
          break;
12164
 
12165
        case POST_DEC:
12166
          gcc_assert (TARGET_LDRD);
12167
          output_asm_insn ("str%(d%)\t%1, [%m0], #-8", operands);
12168
          break;
12169
 
12170
        case PRE_MODIFY:
12171
        case POST_MODIFY:
12172
          otherops[0] = operands[1];
12173
          otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
12174
          otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
12175
 
12176
          /* IWMMXT allows offsets larger than ldrd can handle,
12177
             fix these up with a pair of ldr.  */
12178
          if (!TARGET_THUMB2
12179
              && GET_CODE (otherops[2]) == CONST_INT
12180
              && (INTVAL(otherops[2]) <= -256
12181
                  || INTVAL(otherops[2]) >= 256))
12182
            {
12183
              if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
12184
                {
12185
                  output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
12186
                  output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
12187
                }
12188
              else
12189
                {
12190
                  output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
12191
                  output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
12192
                }
12193
            }
12194
          else if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
12195
            output_asm_insn ("str%(d%)\t%0, [%1, %2]!", otherops);
12196
          else
12197
            output_asm_insn ("str%(d%)\t%0, [%1], %2", otherops);
12198
          break;
12199
 
12200
        case PLUS:
12201
          otherops[2] = XEXP (XEXP (operands[0], 0), 1);
12202
          if (GET_CODE (otherops[2]) == CONST_INT && !TARGET_LDRD)
12203
            {
12204
              switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
12205
                {
12206
                case -8:
12207
                  output_asm_insn ("stm%(db%)\t%m0, %M1", operands);
12208
                  return "";
12209
 
12210
                case -4:
12211
                  if (TARGET_THUMB2)
12212
                    break;
12213
                  output_asm_insn ("stm%(da%)\t%m0, %M1", operands);
12214
                  return "";
12215
 
12216
                case 4:
12217
                  if (TARGET_THUMB2)
12218
                    break;
12219
                  output_asm_insn ("stm%(ib%)\t%m0, %M1", operands);
12220
                  return "";
12221
                }
12222
            }
12223
          if (TARGET_LDRD
12224
              && (GET_CODE (otherops[2]) == REG
12225
                  || TARGET_THUMB2
12226
                  || (GET_CODE (otherops[2]) == CONST_INT
12227
                      && INTVAL (otherops[2]) > -256
12228
                      && INTVAL (otherops[2]) < 256)))
12229
            {
12230
              otherops[0] = operands[1];
12231
              otherops[1] = XEXP (XEXP (operands[0], 0), 0);
12232
              output_asm_insn ("str%(d%)\t%0, [%1, %2]", otherops);
12233
              return "";
12234
            }
12235
          /* Fall through */
12236
 
12237
        default:
12238
          otherops[0] = adjust_address (operands[0], SImode, 4);
12239
          otherops[1] = operands[1];
12240
          output_asm_insn ("str%?\t%1, %0", operands);
12241
          output_asm_insn ("str%?\t%H1, %0", otherops);
12242
        }
12243
    }
12244
 
12245
  return "";
12246
}
12247
 
12248
/* Output a move, load or store for quad-word vectors in ARM registers.  Only
12249
   handles MEMs accepted by neon_vector_mem_operand with TYPE=1.  */
12250
 
12251
const char *
12252
output_move_quad (rtx *operands)
12253
{
12254
  if (REG_P (operands[0]))
12255
    {
12256
      /* Load, or reg->reg move.  */
12257
 
12258
      if (MEM_P (operands[1]))
12259
        {
12260
          switch (GET_CODE (XEXP (operands[1], 0)))
12261
            {
12262
            case REG:
12263
              output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands);
12264
              break;
12265
 
12266
            case LABEL_REF:
12267
            case CONST:
12268
              output_asm_insn ("adr%?\t%0, %1", operands);
12269
              output_asm_insn ("ldm%(ia%)\t%0, %M0", operands);
12270
              break;
12271
 
12272
            default:
12273
              gcc_unreachable ();
12274
            }
12275
        }
12276
      else
12277
        {
12278
          rtx ops[2];
12279
          int dest, src, i;
12280
 
12281
          gcc_assert (REG_P (operands[1]));
12282
 
12283
          dest = REGNO (operands[0]);
12284
          src = REGNO (operands[1]);
12285
 
12286
          /* This seems pretty dumb, but hopefully GCC won't try to do it
12287
             very often.  */
12288
          if (dest < src)
12289
            for (i = 0; i < 4; i++)
12290
              {
12291
                ops[0] = gen_rtx_REG (SImode, dest + i);
12292
                ops[1] = gen_rtx_REG (SImode, src + i);
12293
                output_asm_insn ("mov%?\t%0, %1", ops);
12294
              }
12295
          else
12296
            for (i = 3; i >= 0; i--)
12297
              {
12298
                ops[0] = gen_rtx_REG (SImode, dest + i);
12299
                ops[1] = gen_rtx_REG (SImode, src + i);
12300
                output_asm_insn ("mov%?\t%0, %1", ops);
12301
              }
12302
        }
12303
    }
12304
  else
12305
    {
12306
      gcc_assert (MEM_P (operands[0]));
12307
      gcc_assert (REG_P (operands[1]));
12308
      gcc_assert (!reg_overlap_mentioned_p (operands[1], operands[0]));
12309
 
12310
      switch (GET_CODE (XEXP (operands[0], 0)))
12311
        {
12312
        case REG:
12313
          output_asm_insn ("stm%(ia%)\t%m0, %M1", operands);
12314
          break;
12315
 
12316
        default:
12317
          gcc_unreachable ();
12318
        }
12319
    }
12320
 
12321
  return "";
12322
}
12323
 
12324
/* Output a VFP load or store instruction.  */
12325
 
12326
const char *
12327
output_move_vfp (rtx *operands)
12328
{
12329
  rtx reg, mem, addr, ops[2];
12330
  int load = REG_P (operands[0]);
12331
  int dp = GET_MODE_SIZE (GET_MODE (operands[0])) == 8;
12332
  int integer_p = GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT;
12333
  const char *templ;
12334
  char buff[50];
12335
  enum machine_mode mode;
12336
 
12337
  reg = operands[!load];
12338
  mem = operands[load];
12339
 
12340
  mode = GET_MODE (reg);
12341
 
12342
  gcc_assert (REG_P (reg));
12343
  gcc_assert (IS_VFP_REGNUM (REGNO (reg)));
12344
  gcc_assert (mode == SFmode
12345
              || mode == DFmode
12346
              || mode == SImode
12347
              || mode == DImode
12348
              || (TARGET_NEON && VALID_NEON_DREG_MODE (mode)));
12349
  gcc_assert (MEM_P (mem));
12350
 
12351
  addr = XEXP (mem, 0);
12352
 
12353
  switch (GET_CODE (addr))
12354
    {
12355
    case PRE_DEC:
12356
      templ = "f%smdb%c%%?\t%%0!, {%%%s1}%s";
12357
      ops[0] = XEXP (addr, 0);
12358
      ops[1] = reg;
12359
      break;
12360
 
12361
    case POST_INC:
12362
      templ = "f%smia%c%%?\t%%0!, {%%%s1}%s";
12363
      ops[0] = XEXP (addr, 0);
12364
      ops[1] = reg;
12365
      break;
12366
 
12367
    default:
12368
      templ = "f%s%c%%?\t%%%s0, %%1%s";
12369
      ops[0] = reg;
12370
      ops[1] = mem;
12371
      break;
12372
    }
12373
 
12374
  sprintf (buff, templ,
12375
           load ? "ld" : "st",
12376
           dp ? 'd' : 's',
12377
           dp ? "P" : "",
12378
           integer_p ? "\t%@ int" : "");
12379
  output_asm_insn (buff, ops);
12380
 
12381
  return "";
12382
}
12383
 
12384
/* Output a Neon quad-word load or store, or a load or store for
12385
   larger structure modes.
12386
 
12387
   WARNING: The ordering of elements is weird in big-endian mode,
12388
   because we use VSTM, as required by the EABI.  GCC RTL defines
12389
   element ordering based on in-memory order.  This can be differ
12390
   from the architectural ordering of elements within a NEON register.
12391
   The intrinsics defined in arm_neon.h use the NEON register element
12392
   ordering, not the GCC RTL element ordering.
12393
 
12394
   For example, the in-memory ordering of a big-endian a quadword
12395
   vector with 16-bit elements when stored from register pair {d0,d1}
12396
   will be (lowest address first, d0[N] is NEON register element N):
12397
 
12398
     [d0[3], d0[2], d0[1], d0[0], d1[7], d1[6], d1[5], d1[4]]
12399
 
12400
   When necessary, quadword registers (dN, dN+1) are moved to ARM
12401
   registers from rN in the order:
12402
 
12403
     dN -> (rN+1, rN), dN+1 -> (rN+3, rN+2)
12404
 
12405
   So that STM/LDM can be used on vectors in ARM registers, and the
12406
   same memory layout will result as if VSTM/VLDM were used.  */
12407
 
12408
const char *
12409
output_move_neon (rtx *operands)
12410
{
12411
  rtx reg, mem, addr, ops[2];
12412
  int regno, load = REG_P (operands[0]);
12413
  const char *templ;
12414
  char buff[50];
12415
  enum machine_mode mode;
12416
 
12417
  reg = operands[!load];
12418
  mem = operands[load];
12419
 
12420
  mode = GET_MODE (reg);
12421
 
12422
  gcc_assert (REG_P (reg));
12423
  regno = REGNO (reg);
12424
  gcc_assert (VFP_REGNO_OK_FOR_DOUBLE (regno)
12425
              || NEON_REGNO_OK_FOR_QUAD (regno));
12426
  gcc_assert (VALID_NEON_DREG_MODE (mode)
12427
              || VALID_NEON_QREG_MODE (mode)
12428
              || VALID_NEON_STRUCT_MODE (mode));
12429
  gcc_assert (MEM_P (mem));
12430
 
12431
  addr = XEXP (mem, 0);
12432
 
12433
  /* Strip off const from addresses like (const (plus (...))).  */
12434
  if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS)
12435
    addr = XEXP (addr, 0);
12436
 
12437
  switch (GET_CODE (addr))
12438
    {
12439
    case POST_INC:
12440
      templ = "v%smia%%?\t%%0!, %%h1";
12441
      ops[0] = XEXP (addr, 0);
12442
      ops[1] = reg;
12443
      break;
12444
 
12445
    case PRE_DEC:
12446
      /* FIXME: We should be using vld1/vst1 here in BE mode?  */
12447
      templ = "v%smdb%%?\t%%0!, %%h1";
12448
      ops[0] = XEXP (addr, 0);
12449
      ops[1] = reg;
12450
      break;
12451
 
12452
    case POST_MODIFY:
12453
      /* FIXME: Not currently enabled in neon_vector_mem_operand.  */
12454
      gcc_unreachable ();
12455
 
12456
    case LABEL_REF:
12457
    case PLUS:
12458
      {
12459
        int nregs = HARD_REGNO_NREGS (REGNO (reg), mode) / 2;
12460
        int i;
12461
        int overlap = -1;
12462
        for (i = 0; i < nregs; i++)
12463
          {
12464
            /* We're only using DImode here because it's a convenient size.  */
12465
            ops[0] = gen_rtx_REG (DImode, REGNO (reg) + 2 * i);
12466
            ops[1] = adjust_address (mem, DImode, 8 * i);
12467
            if (reg_overlap_mentioned_p (ops[0], mem))
12468
              {
12469
                gcc_assert (overlap == -1);
12470
                overlap = i;
12471
              }
12472
            else
12473
              {
12474
                sprintf (buff, "v%sr%%?\t%%P0, %%1", load ? "ld" : "st");
12475
                output_asm_insn (buff, ops);
12476
              }
12477
          }
12478
        if (overlap != -1)
12479
          {
12480
            ops[0] = gen_rtx_REG (DImode, REGNO (reg) + 2 * overlap);
12481
            ops[1] = adjust_address (mem, SImode, 8 * overlap);
12482
            sprintf (buff, "v%sr%%?\t%%P0, %%1", load ? "ld" : "st");
12483
            output_asm_insn (buff, ops);
12484
          }
12485
 
12486
        return "";
12487
      }
12488
 
12489
    default:
12490
      templ = "v%smia%%?\t%%m0, %%h1";
12491
      ops[0] = mem;
12492
      ops[1] = reg;
12493
    }
12494
 
12495
  sprintf (buff, templ, load ? "ld" : "st");
12496
  output_asm_insn (buff, ops);
12497
 
12498
  return "";
12499
}
12500
 
12501
/* Compute and return the length of neon_mov<mode>, where <mode> is
12502
   one of VSTRUCT modes: EI, OI, CI or XI.  */
12503
int
12504
arm_attr_length_move_neon (rtx insn)
12505
{
12506
  rtx reg, mem, addr;
12507
  int load;
12508
  enum machine_mode mode;
12509
 
12510
  extract_insn_cached (insn);
12511
 
12512
  if (REG_P (recog_data.operand[0]) && REG_P (recog_data.operand[1]))
12513
    {
12514
      mode = GET_MODE (recog_data.operand[0]);
12515
      switch (mode)
12516
        {
12517
        case EImode:
12518
        case OImode:
12519
          return 8;
12520
        case CImode:
12521
          return 12;
12522
        case XImode:
12523
          return 16;
12524
        default:
12525
          gcc_unreachable ();
12526
        }
12527
    }
12528
 
12529
  load = REG_P (recog_data.operand[0]);
12530
  reg = recog_data.operand[!load];
12531
  mem = recog_data.operand[load];
12532
 
12533
  gcc_assert (MEM_P (mem));
12534
 
12535
  mode = GET_MODE (reg);
12536
  addr = XEXP (mem, 0);
12537
 
12538
  /* Strip off const from addresses like (const (plus (...))).  */
12539
  if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS)
12540
    addr = XEXP (addr, 0);
12541
 
12542
  if (GET_CODE (addr) == LABEL_REF || GET_CODE (addr) == PLUS)
12543
    {
12544
      int insns = HARD_REGNO_NREGS (REGNO (reg), mode) / 2;
12545
      return insns * 4;
12546
    }
12547
  else
12548
    return 4;
12549
}
12550
 
12551
/* Output an ADD r, s, #n where n may be too big for one instruction.
12552
   If adding zero to one register, output nothing.  */
12553
const char *
12554
output_add_immediate (rtx *operands)
12555
{
12556
  HOST_WIDE_INT n = INTVAL (operands[2]);
12557
 
12558
  if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
12559
    {
12560
      if (n < 0)
12561
        output_multi_immediate (operands,
12562
                                "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
12563
                                -n);
12564
      else
12565
        output_multi_immediate (operands,
12566
                                "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
12567
                                n);
12568
    }
12569
 
12570
  return "";
12571
}
12572
 
12573
/* Output a multiple immediate operation.
12574
   OPERANDS is the vector of operands referred to in the output patterns.
12575
   INSTR1 is the output pattern to use for the first constant.
12576
   INSTR2 is the output pattern to use for subsequent constants.
12577
   IMMED_OP is the index of the constant slot in OPERANDS.
12578
   N is the constant value.  */
12579
static const char *
12580
output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
12581
                        int immed_op, HOST_WIDE_INT n)
12582
{
12583
#if HOST_BITS_PER_WIDE_INT > 32
12584
  n &= 0xffffffff;
12585
#endif
12586
 
12587
  if (n == 0)
12588
    {
12589
      /* Quick and easy output.  */
12590
      operands[immed_op] = const0_rtx;
12591
      output_asm_insn (instr1, operands);
12592
    }
12593
  else
12594
    {
12595
      int i;
12596
      const char * instr = instr1;
12597
 
12598
      /* Note that n is never zero here (which would give no output).  */
12599
      for (i = 0; i < 32; i += 2)
12600
        {
12601
          if (n & (3 << i))
12602
            {
12603
              operands[immed_op] = GEN_INT (n & (255 << i));
12604
              output_asm_insn (instr, operands);
12605
              instr = instr2;
12606
              i += 6;
12607
            }
12608
        }
12609
    }
12610
 
12611
  return "";
12612
}
12613
 
12614
/* Return the name of a shifter operation.  */
12615
static const char *
12616
arm_shift_nmem(enum rtx_code code)
12617
{
12618
  switch (code)
12619
    {
12620
    case ASHIFT:
12621
      return ARM_LSL_NAME;
12622
 
12623
    case ASHIFTRT:
12624
      return "asr";
12625
 
12626
    case LSHIFTRT:
12627
      return "lsr";
12628
 
12629
    case ROTATERT:
12630
      return "ror";
12631
 
12632
    default:
12633
      abort();
12634
    }
12635
}
12636
 
12637
/* Return the appropriate ARM instruction for the operation code.
12638
   The returned result should not be overwritten.  OP is the rtx of the
12639
   operation.  SHIFT_FIRST_ARG is TRUE if the first argument of the operator
12640
   was shifted.  */
12641
const char *
12642
arithmetic_instr (rtx op, int shift_first_arg)
12643
{
12644
  switch (GET_CODE (op))
12645
    {
12646
    case PLUS:
12647
      return "add";
12648
 
12649
    case MINUS:
12650
      return shift_first_arg ? "rsb" : "sub";
12651
 
12652
    case IOR:
12653
      return "orr";
12654
 
12655
    case XOR:
12656
      return "eor";
12657
 
12658
    case AND:
12659
      return "and";
12660
 
12661
    case ASHIFT:
12662
    case ASHIFTRT:
12663
    case LSHIFTRT:
12664
    case ROTATERT:
12665
      return arm_shift_nmem(GET_CODE(op));
12666
 
12667
    default:
12668
      gcc_unreachable ();
12669
    }
12670
}
12671
 
12672
/* Ensure valid constant shifts and return the appropriate shift mnemonic
12673
   for the operation code.  The returned result should not be overwritten.
12674
   OP is the rtx code of the shift.
12675
   On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
12676
   shift.  */
12677
static const char *
12678
shift_op (rtx op, HOST_WIDE_INT *amountp)
12679
{
12680
  const char * mnem;
12681
  enum rtx_code code = GET_CODE (op);
12682
 
12683
  switch (GET_CODE (XEXP (op, 1)))
12684
    {
12685
    case REG:
12686
    case SUBREG:
12687
      *amountp = -1;
12688
      break;
12689
 
12690
    case CONST_INT:
12691
      *amountp = INTVAL (XEXP (op, 1));
12692
      break;
12693
 
12694
    default:
12695
      gcc_unreachable ();
12696
    }
12697
 
12698
  switch (code)
12699
    {
12700
    case ROTATE:
12701
      gcc_assert (*amountp != -1);
12702
      *amountp = 32 - *amountp;
12703
      code = ROTATERT;
12704
 
12705
      /* Fall through.  */
12706
 
12707
    case ASHIFT:
12708
    case ASHIFTRT:
12709
    case LSHIFTRT:
12710
    case ROTATERT:
12711
      mnem = arm_shift_nmem(code);
12712
      break;
12713
 
12714
    case MULT:
12715
      /* We never have to worry about the amount being other than a
12716
         power of 2, since this case can never be reloaded from a reg.  */
12717
      gcc_assert (*amountp != -1);
12718
      *amountp = int_log2 (*amountp);
12719
      return ARM_LSL_NAME;
12720
 
12721
    default:
12722
      gcc_unreachable ();
12723
    }
12724
 
12725
  if (*amountp != -1)
12726
    {
12727
      /* This is not 100% correct, but follows from the desire to merge
12728
         multiplication by a power of 2 with the recognizer for a
12729
         shift.  >=32 is not a valid shift for "lsl", so we must try and
12730
         output a shift that produces the correct arithmetical result.
12731
         Using lsr #32 is identical except for the fact that the carry bit
12732
         is not set correctly if we set the flags; but we never use the
12733
         carry bit from such an operation, so we can ignore that.  */
12734
      if (code == ROTATERT)
12735
        /* Rotate is just modulo 32.  */
12736
        *amountp &= 31;
12737
      else if (*amountp != (*amountp & 31))
12738
        {
12739
          if (code == ASHIFT)
12740
            mnem = "lsr";
12741
          *amountp = 32;
12742
        }
12743
 
12744
      /* Shifts of 0 are no-ops.  */
12745
      if (*amountp == 0)
12746
        return NULL;
12747
    }
12748
 
12749
  return mnem;
12750
}
12751
 
12752
/* Obtain the shift from the POWER of two.  */
12753
 
12754
static HOST_WIDE_INT
12755
int_log2 (HOST_WIDE_INT power)
12756
{
12757
  HOST_WIDE_INT shift = 0;
12758
 
12759
  while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
12760
    {
12761
      gcc_assert (shift <= 31);
12762
      shift++;
12763
    }
12764
 
12765
  return shift;
12766
}
12767
 
12768
/* Output a .ascii pseudo-op, keeping track of lengths.  This is
12769
   because /bin/as is horribly restrictive.  The judgement about
12770
   whether or not each character is 'printable' (and can be output as
12771
   is) or not (and must be printed with an octal escape) must be made
12772
   with reference to the *host* character set -- the situation is
12773
   similar to that discussed in the comments above pp_c_char in
12774
   c-pretty-print.c.  */
12775
 
12776
#define MAX_ASCII_LEN 51
12777
 
12778
void
12779
output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
12780
{
12781
  int i;
12782
  int len_so_far = 0;
12783
 
12784
  fputs ("\t.ascii\t\"", stream);
12785
 
12786
  for (i = 0; i < len; i++)
12787
    {
12788
      int c = p[i];
12789
 
12790
      if (len_so_far >= MAX_ASCII_LEN)
12791
        {
12792
          fputs ("\"\n\t.ascii\t\"", stream);
12793
          len_so_far = 0;
12794
        }
12795
 
12796
      if (ISPRINT (c))
12797
        {
12798
          if (c == '\\' || c == '\"')
12799
            {
12800
              putc ('\\', stream);
12801
              len_so_far++;
12802
            }
12803
          putc (c, stream);
12804
          len_so_far++;
12805
        }
12806
      else
12807
        {
12808
          fprintf (stream, "\\%03o", c);
12809
          len_so_far += 4;
12810
        }
12811
    }
12812
 
12813
  fputs ("\"\n", stream);
12814
}
12815
 
12816
/* Compute the register save mask for registers 0 through 12
12817
   inclusive.  This code is used by arm_compute_save_reg_mask.  */
12818
 
12819
static unsigned long
12820
arm_compute_save_reg0_reg12_mask (void)
12821
{
12822
  unsigned long func_type = arm_current_func_type ();
12823
  unsigned long save_reg_mask = 0;
12824
  unsigned int reg;
12825
 
12826
  if (IS_INTERRUPT (func_type))
12827
    {
12828
      unsigned int max_reg;
12829
      /* Interrupt functions must not corrupt any registers,
12830
         even call clobbered ones.  If this is a leaf function
12831
         we can just examine the registers used by the RTL, but
12832
         otherwise we have to assume that whatever function is
12833
         called might clobber anything, and so we have to save
12834
         all the call-clobbered registers as well.  */
12835
      if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
12836
        /* FIQ handlers have registers r8 - r12 banked, so
12837
           we only need to check r0 - r7, Normal ISRs only
12838
           bank r14 and r15, so we must check up to r12.
12839
           r13 is the stack pointer which is always preserved,
12840
           so we do not need to consider it here.  */
12841
        max_reg = 7;
12842
      else
12843
        max_reg = 12;
12844
 
12845
      for (reg = 0; reg <= max_reg; reg++)
12846
        if (df_regs_ever_live_p (reg)
12847
            || (! current_function_is_leaf && call_used_regs[reg]))
12848
          save_reg_mask |= (1 << reg);
12849
 
12850
      /* Also save the pic base register if necessary.  */
12851
      if (flag_pic
12852
          && !TARGET_SINGLE_PIC_BASE
12853
          && arm_pic_register != INVALID_REGNUM
12854
          && crtl->uses_pic_offset_table)
12855
        save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
12856
    }
12857
  else if (IS_VOLATILE(func_type))
12858
    {
12859
      /* For noreturn functions we historically omitted register saves
12860
         altogether.  However this really messes up debugging.  As a
12861
         compromise save just the frame pointers.  Combined with the link
12862
         register saved elsewhere this should be sufficient to get
12863
         a backtrace.  */
12864
      if (frame_pointer_needed)
12865
        save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
12866
      if (df_regs_ever_live_p (ARM_HARD_FRAME_POINTER_REGNUM))
12867
        save_reg_mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
12868
      if (df_regs_ever_live_p (THUMB_HARD_FRAME_POINTER_REGNUM))
12869
        save_reg_mask |= 1 << THUMB_HARD_FRAME_POINTER_REGNUM;
12870
    }
12871
  else
12872
    {
12873
      /* In the normal case we only need to save those registers
12874
         which are call saved and which are used by this function.  */
12875
      for (reg = 0; reg <= 11; reg++)
12876
        if (df_regs_ever_live_p (reg) && ! call_used_regs[reg])
12877
          save_reg_mask |= (1 << reg);
12878
 
12879
      /* Handle the frame pointer as a special case.  */
12880
      if (frame_pointer_needed)
12881
        save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
12882
 
12883
      /* If we aren't loading the PIC register,
12884
         don't stack it even though it may be live.  */
12885
      if (flag_pic
12886
          && !TARGET_SINGLE_PIC_BASE
12887
          && arm_pic_register != INVALID_REGNUM
12888
          && (df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM)
12889
              || crtl->uses_pic_offset_table))
12890
        save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
12891
 
12892
      /* The prologue will copy SP into R0, so save it.  */
12893
      if (IS_STACKALIGN (func_type))
12894
        save_reg_mask |= 1;
12895
    }
12896
 
12897
  /* Save registers so the exception handler can modify them.  */
12898
  if (crtl->calls_eh_return)
12899
    {
12900
      unsigned int i;
12901
 
12902
      for (i = 0; ; i++)
12903
        {
12904
          reg = EH_RETURN_DATA_REGNO (i);
12905
          if (reg == INVALID_REGNUM)
12906
            break;
12907
          save_reg_mask |= 1 << reg;
12908
        }
12909
    }
12910
 
12911
  return save_reg_mask;
12912
}
12913
 
12914
 
12915
/* Compute the number of bytes used to store the static chain register on the
12916
   stack, above the stack frame. We need to know this accurately to get the
12917
   alignment of the rest of the stack frame correct. */
12918
 
12919
static int arm_compute_static_chain_stack_bytes (void)
12920
{
12921
  unsigned long func_type = arm_current_func_type ();
12922
  int static_chain_stack_bytes = 0;
12923
 
12924
  if (TARGET_APCS_FRAME && frame_pointer_needed && TARGET_ARM &&
12925
      IS_NESTED (func_type) &&
12926
      df_regs_ever_live_p (3) && crtl->args.pretend_args_size == 0)
12927
    static_chain_stack_bytes = 4;
12928
 
12929
  return static_chain_stack_bytes;
12930
}
12931
 
12932
 
12933
/* Compute a bit mask of which registers need to be
12934
   saved on the stack for the current function.
12935
   This is used by arm_get_frame_offsets, which may add extra registers.  */
12936
 
12937
static unsigned long
12938
arm_compute_save_reg_mask (void)
12939
{
12940
  unsigned int save_reg_mask = 0;
12941
  unsigned long func_type = arm_current_func_type ();
12942
  unsigned int reg;
12943
 
12944
  if (IS_NAKED (func_type))
12945
    /* This should never really happen.  */
12946
    return 0;
12947
 
12948
  /* If we are creating a stack frame, then we must save the frame pointer,
12949
     IP (which will hold the old stack pointer), LR and the PC.  */
12950
  if (TARGET_APCS_FRAME && frame_pointer_needed && TARGET_ARM)
12951
    save_reg_mask |=
12952
      (1 << ARM_HARD_FRAME_POINTER_REGNUM)
12953
      | (1 << IP_REGNUM)
12954
      | (1 << LR_REGNUM)
12955
      | (1 << PC_REGNUM);
12956
 
12957
  save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
12958
 
12959
  /* Decide if we need to save the link register.
12960
     Interrupt routines have their own banked link register,
12961
     so they never need to save it.
12962
     Otherwise if we do not use the link register we do not need to save
12963
     it.  If we are pushing other registers onto the stack however, we
12964
     can save an instruction in the epilogue by pushing the link register
12965
     now and then popping it back into the PC.  This incurs extra memory
12966
     accesses though, so we only do it when optimizing for size, and only
12967
     if we know that we will not need a fancy return sequence.  */
12968
  if (df_regs_ever_live_p (LR_REGNUM)
12969
      || (save_reg_mask
12970
          && optimize_size
12971
          && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
12972
          && !crtl->calls_eh_return))
12973
    save_reg_mask |= 1 << LR_REGNUM;
12974
 
12975
  if (cfun->machine->lr_save_eliminated)
12976
    save_reg_mask &= ~ (1 << LR_REGNUM);
12977
 
12978
  if (TARGET_REALLY_IWMMXT
12979
      && ((bit_count (save_reg_mask)
12980
           + ARM_NUM_INTS (crtl->args.pretend_args_size +
12981
                           arm_compute_static_chain_stack_bytes())
12982
           ) % 2) != 0)
12983
    {
12984
      /* The total number of registers that are going to be pushed
12985
         onto the stack is odd.  We need to ensure that the stack
12986
         is 64-bit aligned before we start to save iWMMXt registers,
12987
         and also before we start to create locals.  (A local variable
12988
         might be a double or long long which we will load/store using
12989
         an iWMMXt instruction).  Therefore we need to push another
12990
         ARM register, so that the stack will be 64-bit aligned.  We
12991
         try to avoid using the arg registers (r0 -r3) as they might be
12992
         used to pass values in a tail call.  */
12993
      for (reg = 4; reg <= 12; reg++)
12994
        if ((save_reg_mask & (1 << reg)) == 0)
12995
          break;
12996
 
12997
      if (reg <= 12)
12998
        save_reg_mask |= (1 << reg);
12999
      else
13000
        {
13001
          cfun->machine->sibcall_blocked = 1;
13002
          save_reg_mask |= (1 << 3);
13003
        }
13004
    }
13005
 
13006
  /* We may need to push an additional register for use initializing the
13007
     PIC base register.  */
13008
  if (TARGET_THUMB2 && IS_NESTED (func_type) && flag_pic
13009
      && (save_reg_mask & THUMB2_WORK_REGS) == 0)
13010
    {
13011
      reg = thumb_find_work_register (1 << 4);
13012
      if (!call_used_regs[reg])
13013
        save_reg_mask |= (1 << reg);
13014
    }
13015
 
13016
  return save_reg_mask;
13017
}
13018
 
13019
 
13020
/* Compute a bit mask of which registers need to be
13021
   saved on the stack for the current function.  */
13022
static unsigned long
13023
thumb1_compute_save_reg_mask (void)
13024
{
13025
  unsigned long mask;
13026
  unsigned reg;
13027
 
13028
  mask = 0;
13029
  for (reg = 0; reg < 12; reg ++)
13030
    if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
13031
      mask |= 1 << reg;
13032
 
13033
  if (flag_pic
13034
      && !TARGET_SINGLE_PIC_BASE
13035
      && arm_pic_register != INVALID_REGNUM
13036
      && crtl->uses_pic_offset_table)
13037
    mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
13038
 
13039
  /* See if we might need r11 for calls to _interwork_r11_call_via_rN().  */
13040
  if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
13041
    mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
13042
 
13043
  /* LR will also be pushed if any lo regs are pushed.  */
13044
  if (mask & 0xff || thumb_force_lr_save ())
13045
    mask |= (1 << LR_REGNUM);
13046
 
13047
  /* Make sure we have a low work register if we need one.
13048
     We will need one if we are going to push a high register,
13049
     but we are not currently intending to push a low register.  */
13050
  if ((mask & 0xff) == 0
13051
      && ((mask & 0x0f00) || TARGET_BACKTRACE))
13052
    {
13053
      /* Use thumb_find_work_register to choose which register
13054
         we will use.  If the register is live then we will
13055
         have to push it.  Use LAST_LO_REGNUM as our fallback
13056
         choice for the register to select.  */
13057
      reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
13058
      /* Make sure the register returned by thumb_find_work_register is
13059
         not part of the return value.  */
13060
      if (reg * UNITS_PER_WORD <= (unsigned) arm_size_return_regs ())
13061
        reg = LAST_LO_REGNUM;
13062
 
13063
      if (! call_used_regs[reg])
13064
        mask |= 1 << reg;
13065
    }
13066
 
13067
  /* The 504 below is 8 bytes less than 512 because there are two possible
13068
     alignment words.  We can't tell here if they will be present or not so we
13069
     have to play it safe and assume that they are. */
13070
  if ((CALLER_INTERWORKING_SLOT_SIZE +
13071
       ROUND_UP_WORD (get_frame_size ()) +
13072
       crtl->outgoing_args_size) >= 504)
13073
    {
13074
      /* This is the same as the code in thumb1_expand_prologue() which
13075
         determines which register to use for stack decrement. */
13076
      for (reg = LAST_ARG_REGNUM + 1; reg <= LAST_LO_REGNUM; reg++)
13077
        if (mask & (1 << reg))
13078
          break;
13079
 
13080
      if (reg > LAST_LO_REGNUM)
13081
        {
13082
          /* Make sure we have a register available for stack decrement. */
13083
          mask |= 1 << LAST_LO_REGNUM;
13084
        }
13085
    }
13086
 
13087
  return mask;
13088
}
13089
 
13090
 
13091
/* Return the number of bytes required to save VFP registers.  */
13092
static int
13093
arm_get_vfp_saved_size (void)
13094
{
13095
  unsigned int regno;
13096
  int count;
13097
  int saved;
13098
 
13099
  saved = 0;
13100
  /* Space for saved VFP registers.  */
13101
  if (TARGET_HARD_FLOAT && TARGET_VFP)
13102
    {
13103
      count = 0;
13104
      for (regno = FIRST_VFP_REGNUM;
13105
           regno < LAST_VFP_REGNUM;
13106
           regno += 2)
13107
        {
13108
          if ((!df_regs_ever_live_p (regno) || call_used_regs[regno])
13109
              && (!df_regs_ever_live_p (regno + 1) || call_used_regs[regno + 1]))
13110
            {
13111
              if (count > 0)
13112
                {
13113
                  /* Workaround ARM10 VFPr1 bug.  */
13114
                  if (count == 2 && !arm_arch6)
13115
                    count++;
13116
                  saved += count * 8;
13117
                }
13118
              count = 0;
13119
            }
13120
          else
13121
            count++;
13122
        }
13123
      if (count > 0)
13124
        {
13125
          if (count == 2 && !arm_arch6)
13126
            count++;
13127
          saved += count * 8;
13128
        }
13129
    }
13130
  return saved;
13131
}
13132
 
13133
 
13134
/* Generate a function exit sequence.  If REALLY_RETURN is false, then do
13135
   everything bar the final return instruction.  */
13136
const char *
13137
output_return_instruction (rtx operand, int really_return, int reverse)
13138
{
13139
  char conditional[10];
13140
  char instr[100];
13141
  unsigned reg;
13142
  unsigned long live_regs_mask;
13143
  unsigned long func_type;
13144
  arm_stack_offsets *offsets;
13145
 
13146
  func_type = arm_current_func_type ();
13147
 
13148
  if (IS_NAKED (func_type))
13149
    return "";
13150
 
13151
  if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
13152
    {
13153
      /* If this function was declared non-returning, and we have
13154
         found a tail call, then we have to trust that the called
13155
         function won't return.  */
13156
      if (really_return)
13157
        {
13158
          rtx ops[2];
13159
 
13160
          /* Otherwise, trap an attempted return by aborting.  */
13161
          ops[0] = operand;
13162
          ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
13163
                                       : "abort");
13164
          assemble_external_libcall (ops[1]);
13165
          output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
13166
        }
13167
 
13168
      return "";
13169
    }
13170
 
13171
  gcc_assert (!cfun->calls_alloca || really_return);
13172
 
13173
  sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
13174
 
13175
  cfun->machine->return_used_this_function = 1;
13176
 
13177
  offsets = arm_get_frame_offsets ();
13178
  live_regs_mask = offsets->saved_regs_mask;
13179
 
13180
  if (live_regs_mask)
13181
    {
13182
      const char * return_reg;
13183
 
13184
      /* If we do not have any special requirements for function exit
13185
         (e.g. interworking) then we can load the return address
13186
         directly into the PC.  Otherwise we must load it into LR.  */
13187
      if (really_return
13188
          && (IS_INTERRUPT (func_type) || !TARGET_INTERWORK))
13189
        return_reg = reg_names[PC_REGNUM];
13190
      else
13191
        return_reg = reg_names[LR_REGNUM];
13192
 
13193
      if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
13194
        {
13195
          /* There are three possible reasons for the IP register
13196
             being saved.  1) a stack frame was created, in which case
13197
             IP contains the old stack pointer, or 2) an ISR routine
13198
             corrupted it, or 3) it was saved to align the stack on
13199
             iWMMXt.  In case 1, restore IP into SP, otherwise just
13200
             restore IP.  */
13201
          if (frame_pointer_needed)
13202
            {
13203
              live_regs_mask &= ~ (1 << IP_REGNUM);
13204
              live_regs_mask |=   (1 << SP_REGNUM);
13205
            }
13206
          else
13207
            gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
13208
        }
13209
 
13210
      /* On some ARM architectures it is faster to use LDR rather than
13211
         LDM to load a single register.  On other architectures, the
13212
         cost is the same.  In 26 bit mode, or for exception handlers,
13213
         we have to use LDM to load the PC so that the CPSR is also
13214
         restored.  */
13215
      for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
13216
        if (live_regs_mask == (1U << reg))
13217
          break;
13218
 
13219
      if (reg <= LAST_ARM_REGNUM
13220
          && (reg != LR_REGNUM
13221
              || ! really_return
13222
              || ! IS_INTERRUPT (func_type)))
13223
        {
13224
          sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
13225
                   (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
13226
        }
13227
      else
13228
        {
13229
          char *p;
13230
          int first = 1;
13231
 
13232
          /* Generate the load multiple instruction to restore the
13233
             registers.  Note we can get here, even if
13234
             frame_pointer_needed is true, but only if sp already
13235
             points to the base of the saved core registers.  */
13236
          if (live_regs_mask & (1 << SP_REGNUM))
13237
            {
13238
              unsigned HOST_WIDE_INT stack_adjust;
13239
 
13240
              stack_adjust = offsets->outgoing_args - offsets->saved_regs;
13241
              gcc_assert (stack_adjust == 0 || stack_adjust == 4);
13242
 
13243
              if (stack_adjust && arm_arch5 && TARGET_ARM)
13244
                if (TARGET_UNIFIED_ASM)
13245
                  sprintf (instr, "ldmib%s\t%%|sp, {", conditional);
13246
                else
13247
                  sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
13248
              else
13249
                {
13250
                  /* If we can't use ldmib (SA110 bug),
13251
                     then try to pop r3 instead.  */
13252
                  if (stack_adjust)
13253
                    live_regs_mask |= 1 << 3;
13254
 
13255
                  if (TARGET_UNIFIED_ASM)
13256
                    sprintf (instr, "ldmfd%s\t%%|sp, {", conditional);
13257
                  else
13258
                    sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
13259
                }
13260
            }
13261
          else
13262
            if (TARGET_UNIFIED_ASM)
13263
              sprintf (instr, "pop%s\t{", conditional);
13264
            else
13265
              sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
13266
 
13267
          p = instr + strlen (instr);
13268
 
13269
          for (reg = 0; reg <= SP_REGNUM; reg++)
13270
            if (live_regs_mask & (1 << reg))
13271
              {
13272
                int l = strlen (reg_names[reg]);
13273
 
13274
                if (first)
13275
                  first = 0;
13276
                else
13277
                  {
13278
                    memcpy (p, ", ", 2);
13279
                    p += 2;
13280
                  }
13281
 
13282
                memcpy (p, "%|", 2);
13283
                memcpy (p + 2, reg_names[reg], l);
13284
                p += l + 2;
13285
              }
13286
 
13287
          if (live_regs_mask & (1 << LR_REGNUM))
13288
            {
13289
              sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
13290
              /* If returning from an interrupt, restore the CPSR.  */
13291
              if (IS_INTERRUPT (func_type))
13292
                strcat (p, "^");
13293
            }
13294
          else
13295
            strcpy (p, "}");
13296
        }
13297
 
13298
      output_asm_insn (instr, & operand);
13299
 
13300
      /* See if we need to generate an extra instruction to
13301
         perform the actual function return.  */
13302
      if (really_return
13303
          && func_type != ARM_FT_INTERWORKED
13304
          && (live_regs_mask & (1 << LR_REGNUM)) != 0)
13305
        {
13306
          /* The return has already been handled
13307
             by loading the LR into the PC.  */
13308
          really_return = 0;
13309
        }
13310
    }
13311
 
13312
  if (really_return)
13313
    {
13314
      switch ((int) ARM_FUNC_TYPE (func_type))
13315
        {
13316
        case ARM_FT_ISR:
13317
        case ARM_FT_FIQ:
13318
          /* ??? This is wrong for unified assembly syntax.  */
13319
          sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
13320
          break;
13321
 
13322
        case ARM_FT_INTERWORKED:
13323
          sprintf (instr, "bx%s\t%%|lr", conditional);
13324
          break;
13325
 
13326
        case ARM_FT_EXCEPTION:
13327
          /* ??? This is wrong for unified assembly syntax.  */
13328
          sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
13329
          break;
13330
 
13331
        default:
13332
          /* Use bx if it's available.  */
13333
          if (arm_arch5 || arm_arch4t)
13334
            sprintf (instr, "bx%s\t%%|lr", conditional);
13335
          else
13336
            sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
13337
          break;
13338
        }
13339
 
13340
      output_asm_insn (instr, & operand);
13341
    }
13342
 
13343
  return "";
13344
}
13345
 
13346
/* Write the function name into the code section, directly preceding
13347
   the function prologue.
13348
 
13349
   Code will be output similar to this:
13350
     t0
13351
         .ascii "arm_poke_function_name", 0
13352
         .align
13353
     t1
13354
         .word 0xff000000 + (t1 - t0)
13355
     arm_poke_function_name
13356
         mov     ip, sp
13357
         stmfd   sp!, {fp, ip, lr, pc}
13358
         sub     fp, ip, #4
13359
 
13360
   When performing a stack backtrace, code can inspect the value
13361
   of 'pc' stored at 'fp' + 0.  If the trace function then looks
13362
   at location pc - 12 and the top 8 bits are set, then we know
13363
   that there is a function name embedded immediately preceding this
13364
   location and has length ((pc[-3]) & 0xff000000).
13365
 
13366
   We assume that pc is declared as a pointer to an unsigned long.
13367
 
13368
   It is of no benefit to output the function name if we are assembling
13369
   a leaf function.  These function types will not contain a stack
13370
   backtrace structure, therefore it is not possible to determine the
13371
   function name.  */
13372
void
13373
arm_poke_function_name (FILE *stream, const char *name)
13374
{
13375
  unsigned long alignlength;
13376
  unsigned long length;
13377
  rtx           x;
13378
 
13379
  length      = strlen (name) + 1;
13380
  alignlength = ROUND_UP_WORD (length);
13381
 
13382
  ASM_OUTPUT_ASCII (stream, name, length);
13383
  ASM_OUTPUT_ALIGN (stream, 2);
13384
  x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
13385
  assemble_aligned_integer (UNITS_PER_WORD, x);
13386
}
13387
 
13388
/* Place some comments into the assembler stream
13389
   describing the current function.  */
13390
static void
13391
arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
13392
{
13393
  unsigned long func_type;
13394
 
13395
  if (TARGET_THUMB1)
13396
    {
13397
      thumb1_output_function_prologue (f, frame_size);
13398
      return;
13399
    }
13400
 
13401
  /* Sanity check.  */
13402
  gcc_assert (!arm_ccfsm_state && !arm_target_insn);
13403
 
13404
  func_type = arm_current_func_type ();
13405
 
13406
  switch ((int) ARM_FUNC_TYPE (func_type))
13407
    {
13408
    default:
13409
    case ARM_FT_NORMAL:
13410
      break;
13411
    case ARM_FT_INTERWORKED:
13412
      asm_fprintf (f, "\t%@ Function supports interworking.\n");
13413
      break;
13414
    case ARM_FT_ISR:
13415
      asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
13416
      break;
13417
    case ARM_FT_FIQ:
13418
      asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
13419
      break;
13420
    case ARM_FT_EXCEPTION:
13421
      asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
13422
      break;
13423
    }
13424
 
13425
  if (IS_NAKED (func_type))
13426
    asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
13427
 
13428
  if (IS_VOLATILE (func_type))
13429
    asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
13430
 
13431
  if (IS_NESTED (func_type))
13432
    asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
13433
  if (IS_STACKALIGN (func_type))
13434
    asm_fprintf (f, "\t%@ Stack Align: May be called with mis-aligned SP.\n");
13435
 
13436
  asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
13437
               crtl->args.size,
13438
               crtl->args.pretend_args_size, frame_size);
13439
 
13440
  asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
13441
               frame_pointer_needed,
13442
               cfun->machine->uses_anonymous_args);
13443
 
13444
  if (cfun->machine->lr_save_eliminated)
13445
    asm_fprintf (f, "\t%@ link register save eliminated.\n");
13446
 
13447
  if (crtl->calls_eh_return)
13448
    asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
13449
 
13450
}
13451
 
13452
const char *
13453
arm_output_epilogue (rtx sibling)
13454
{
13455
  int reg;
13456
  unsigned long saved_regs_mask;
13457
  unsigned long func_type;
13458
  /* Floats_offset is the offset from the "virtual" frame.  In an APCS
13459
     frame that is $fp + 4 for a non-variadic function.  */
13460
  int floats_offset = 0;
13461
  rtx operands[3];
13462
  FILE * f = asm_out_file;
13463
  unsigned int lrm_count = 0;
13464
  int really_return = (sibling == NULL);
13465
  int start_reg;
13466
  arm_stack_offsets *offsets;
13467
 
13468
  /* If we have already generated the return instruction
13469
     then it is futile to generate anything else.  */
13470
  if (use_return_insn (FALSE, sibling) &&
13471
      (cfun->machine->return_used_this_function != 0))
13472
    return "";
13473
 
13474
  func_type = arm_current_func_type ();
13475
 
13476
  if (IS_NAKED (func_type))
13477
    /* Naked functions don't have epilogues.  */
13478
    return "";
13479
 
13480
  if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
13481
    {
13482
      rtx op;
13483
 
13484
      /* A volatile function should never return.  Call abort.  */
13485
      op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
13486
      assemble_external_libcall (op);
13487
      output_asm_insn ("bl\t%a0", &op);
13488
 
13489
      return "";
13490
    }
13491
 
13492
  /* If we are throwing an exception, then we really must be doing a
13493
     return, so we can't tail-call.  */
13494
  gcc_assert (!crtl->calls_eh_return || really_return);
13495
 
13496
  offsets = arm_get_frame_offsets ();
13497
  saved_regs_mask = offsets->saved_regs_mask;
13498
 
13499
  if (TARGET_IWMMXT)
13500
    lrm_count = bit_count (saved_regs_mask);
13501
 
13502
  floats_offset = offsets->saved_args;
13503
  /* Compute how far away the floats will be.  */
13504
  for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
13505
    if (saved_regs_mask & (1 << reg))
13506
      floats_offset += 4;
13507
 
13508
  if (TARGET_APCS_FRAME && frame_pointer_needed && TARGET_ARM)
13509
    {
13510
      /* This variable is for the Virtual Frame Pointer, not VFP regs.  */
13511
      int vfp_offset = offsets->frame;
13512
 
13513
      if (TARGET_FPA_EMU2)
13514
        {
13515
          for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
13516
            if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
13517
              {
13518
                floats_offset += 12;
13519
                asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
13520
                             reg, FP_REGNUM, floats_offset - vfp_offset);
13521
              }
13522
        }
13523
      else
13524
        {
13525
          start_reg = LAST_FPA_REGNUM;
13526
 
13527
          for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
13528
            {
13529
              if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
13530
                {
13531
                  floats_offset += 12;
13532
 
13533
                  /* We can't unstack more than four registers at once.  */
13534
                  if (start_reg - reg == 3)
13535
                    {
13536
                      asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
13537
                                   reg, FP_REGNUM, floats_offset - vfp_offset);
13538
                      start_reg = reg - 1;
13539
                    }
13540
                }
13541
              else
13542
                {
13543
                  if (reg != start_reg)
13544
                    asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
13545
                                 reg + 1, start_reg - reg,
13546
                                 FP_REGNUM, floats_offset - vfp_offset);
13547
                  start_reg = reg - 1;
13548
                }
13549
            }
13550
 
13551
          /* Just in case the last register checked also needs unstacking.  */
13552
          if (reg != start_reg)
13553
            asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
13554
                         reg + 1, start_reg - reg,
13555
                         FP_REGNUM, floats_offset - vfp_offset);
13556
        }
13557
 
13558
      if (TARGET_HARD_FLOAT && TARGET_VFP)
13559
        {
13560
          int saved_size;
13561
 
13562
          /* The fldmd insns do not have base+offset addressing
13563
             modes, so we use IP to hold the address.  */
13564
          saved_size = arm_get_vfp_saved_size ();
13565
 
13566
          if (saved_size > 0)
13567
            {
13568
              floats_offset += saved_size;
13569
              asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
13570
                           FP_REGNUM, floats_offset - vfp_offset);
13571
            }
13572
          start_reg = FIRST_VFP_REGNUM;
13573
          for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
13574
            {
13575
              if ((!df_regs_ever_live_p (reg) || call_used_regs[reg])
13576
                  && (!df_regs_ever_live_p (reg + 1) || call_used_regs[reg + 1]))
13577
                {
13578
                  if (start_reg != reg)
13579
                    vfp_output_fldmd (f, IP_REGNUM,
13580
                                      (start_reg - FIRST_VFP_REGNUM) / 2,
13581
                                      (reg - start_reg) / 2);
13582
                  start_reg = reg + 2;
13583
                }
13584
            }
13585
          if (start_reg != reg)
13586
            vfp_output_fldmd (f, IP_REGNUM,
13587
                              (start_reg - FIRST_VFP_REGNUM) / 2,
13588
                              (reg - start_reg) / 2);
13589
        }
13590
 
13591
      if (TARGET_IWMMXT)
13592
        {
13593
          /* The frame pointer is guaranteed to be non-double-word aligned.
13594
             This is because it is set to (old_stack_pointer - 4) and the
13595
             old_stack_pointer was double word aligned.  Thus the offset to
13596
             the iWMMXt registers to be loaded must also be non-double-word
13597
             sized, so that the resultant address *is* double-word aligned.
13598
             We can ignore floats_offset since that was already included in
13599
             the live_regs_mask.  */
13600
          lrm_count += (lrm_count % 2 ? 2 : 1);
13601
 
13602
          for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
13603
            if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
13604
              {
13605
                asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
13606
                             reg, FP_REGNUM, lrm_count * 4);
13607
                lrm_count += 2;
13608
              }
13609
        }
13610
 
13611
      /* saved_regs_mask should contain the IP, which at the time of stack
13612
         frame generation actually contains the old stack pointer.  So a
13613
         quick way to unwind the stack is just pop the IP register directly
13614
         into the stack pointer.  */
13615
      gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
13616
      saved_regs_mask &= ~ (1 << IP_REGNUM);
13617
      saved_regs_mask |=   (1 << SP_REGNUM);
13618
 
13619
      /* There are two registers left in saved_regs_mask - LR and PC.  We
13620
         only need to restore the LR register (the return address), but to
13621
         save time we can load it directly into the PC, unless we need a
13622
         special function exit sequence, or we are not really returning.  */
13623
      if (really_return
13624
          && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
13625
          && !crtl->calls_eh_return)
13626
        /* Delete the LR from the register mask, so that the LR on
13627
           the stack is loaded into the PC in the register mask.  */
13628
        saved_regs_mask &= ~ (1 << LR_REGNUM);
13629
      else
13630
        saved_regs_mask &= ~ (1 << PC_REGNUM);
13631
 
13632
      /* We must use SP as the base register, because SP is one of the
13633
         registers being restored.  If an interrupt or page fault
13634
         happens in the ldm instruction, the SP might or might not
13635
         have been restored.  That would be bad, as then SP will no
13636
         longer indicate the safe area of stack, and we can get stack
13637
         corruption.  Using SP as the base register means that it will
13638
         be reset correctly to the original value, should an interrupt
13639
         occur.  If the stack pointer already points at the right
13640
         place, then omit the subtraction.  */
13641
      if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
13642
          || cfun->calls_alloca)
13643
        asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
13644
                     4 * bit_count (saved_regs_mask));
13645
      print_multi_reg (f, "ldmfd\t%r, ", SP_REGNUM, saved_regs_mask, 0);
13646
 
13647
      if (IS_INTERRUPT (func_type))
13648
        /* Interrupt handlers will have pushed the
13649
           IP onto the stack, so restore it now.  */
13650
        print_multi_reg (f, "ldmfd\t%r!, ", SP_REGNUM, 1 << IP_REGNUM, 0);
13651
    }
13652
  else
13653
    {
13654
      /* This branch is executed for ARM mode (non-apcs frames) and
13655
         Thumb-2 mode. Frame layout is essentially the same for those
13656
         cases, except that in ARM mode frame pointer points to the
13657
         first saved register, while in Thumb-2 mode the frame pointer points
13658
         to the last saved register.
13659
 
13660
         It is possible to make frame pointer point to last saved
13661
         register in both cases, and remove some conditionals below.
13662
         That means that fp setup in prologue would be just "mov fp, sp"
13663
         and sp restore in epilogue would be just "mov sp, fp", whereas
13664
         now we have to use add/sub in those cases. However, the value
13665
         of that would be marginal, as both mov and add/sub are 32-bit
13666
         in ARM mode, and it would require extra conditionals
13667
         in arm_expand_prologue to distingish ARM-apcs-frame case
13668
         (where frame pointer is required to point at first register)
13669
         and ARM-non-apcs-frame. Therefore, such change is postponed
13670
         until real need arise.  */
13671
      unsigned HOST_WIDE_INT amount;
13672
      int rfe;
13673
      /* Restore stack pointer if necessary.  */
13674
      if (TARGET_ARM && frame_pointer_needed)
13675
        {
13676
          operands[0] = stack_pointer_rtx;
13677
          operands[1] = hard_frame_pointer_rtx;
13678
 
13679
          operands[2] = GEN_INT (offsets->frame - offsets->saved_regs);
13680
          output_add_immediate (operands);
13681
        }
13682
      else
13683
        {
13684
          if (frame_pointer_needed)
13685
            {
13686
              /* For Thumb-2 restore sp from the frame pointer.
13687
                 Operand restrictions mean we have to incrememnt FP, then copy
13688
                 to SP.  */
13689
              amount = offsets->locals_base - offsets->saved_regs;
13690
              operands[0] = hard_frame_pointer_rtx;
13691
            }
13692
          else
13693
            {
13694
              unsigned long count;
13695
              operands[0] = stack_pointer_rtx;
13696
              amount = offsets->outgoing_args - offsets->saved_regs;
13697
              /* pop call clobbered registers if it avoids a
13698
                 separate stack adjustment.  */
13699
              count = offsets->saved_regs - offsets->saved_args;
13700
              if (optimize_size
13701
                  && count != 0
13702
                  && !crtl->calls_eh_return
13703
                  && bit_count(saved_regs_mask) * 4 == count
13704
                  && !IS_INTERRUPT (func_type)
13705
                  && !crtl->tail_call_emit)
13706
                {
13707
                  unsigned long mask;
13708
                  mask = (1 << (arm_size_return_regs() / 4)) - 1;
13709
                  mask ^= 0xf;
13710
                  mask &= ~saved_regs_mask;
13711
                  reg = 0;
13712
                  while (bit_count (mask) * 4 > amount)
13713
                    {
13714
                      while ((mask & (1 << reg)) == 0)
13715
                        reg++;
13716
                      mask &= ~(1 << reg);
13717
                    }
13718
                  if (bit_count (mask) * 4 == amount) {
13719
                      amount = 0;
13720
                      saved_regs_mask |= mask;
13721
                  }
13722
                }
13723
            }
13724
 
13725
          if (amount)
13726
            {
13727
              operands[1] = operands[0];
13728
              operands[2] = GEN_INT (amount);
13729
              output_add_immediate (operands);
13730
            }
13731
          if (frame_pointer_needed)
13732
            asm_fprintf (f, "\tmov\t%r, %r\n",
13733
                         SP_REGNUM, HARD_FRAME_POINTER_REGNUM);
13734
        }
13735
 
13736
      if (TARGET_FPA_EMU2)
13737
        {
13738
          for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
13739
            if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
13740
              asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
13741
                           reg, SP_REGNUM);
13742
        }
13743
      else
13744
        {
13745
          start_reg = FIRST_FPA_REGNUM;
13746
 
13747
          for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
13748
            {
13749
              if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
13750
                {
13751
                  if (reg - start_reg == 3)
13752
                    {
13753
                      asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
13754
                                   start_reg, SP_REGNUM);
13755
                      start_reg = reg + 1;
13756
                    }
13757
                }
13758
              else
13759
                {
13760
                  if (reg != start_reg)
13761
                    asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
13762
                                 start_reg, reg - start_reg,
13763
                                 SP_REGNUM);
13764
 
13765
                  start_reg = reg + 1;
13766
                }
13767
            }
13768
 
13769
          /* Just in case the last register checked also needs unstacking.  */
13770
          if (reg != start_reg)
13771
            asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
13772
                         start_reg, reg - start_reg, SP_REGNUM);
13773
        }
13774
 
13775
      if (TARGET_HARD_FLOAT && TARGET_VFP)
13776
        {
13777
          int end_reg = LAST_VFP_REGNUM + 1;
13778
 
13779
          /* Scan the registers in reverse order.  We need to match
13780
             any groupings made in the prologue and generate matching
13781
             pop operations.  */
13782
          for (reg = LAST_VFP_REGNUM - 1; reg >= FIRST_VFP_REGNUM; reg -= 2)
13783
            {
13784
              if ((!df_regs_ever_live_p (reg) || call_used_regs[reg])
13785
                  && (!df_regs_ever_live_p (reg + 1)
13786
                      || call_used_regs[reg + 1]))
13787
                {
13788
                  if (end_reg > reg + 2)
13789
                    vfp_output_fldmd (f, SP_REGNUM,
13790
                                      (reg + 2 - FIRST_VFP_REGNUM) / 2,
13791
                                      (end_reg - (reg + 2)) / 2);
13792
                  end_reg = reg;
13793
                }
13794
            }
13795
          if (end_reg > reg + 2)
13796
            vfp_output_fldmd (f, SP_REGNUM, 0,
13797
                              (end_reg - (reg + 2)) / 2);
13798
        }
13799
 
13800
      if (TARGET_IWMMXT)
13801
        for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
13802
          if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
13803
            asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
13804
 
13805
      /* If we can, restore the LR into the PC.  */
13806
      if (ARM_FUNC_TYPE (func_type) != ARM_FT_INTERWORKED
13807
          && (TARGET_ARM || ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
13808
          && !IS_STACKALIGN (func_type)
13809
          && really_return
13810
          && crtl->args.pretend_args_size == 0
13811
          && saved_regs_mask & (1 << LR_REGNUM)
13812
          && !crtl->calls_eh_return)
13813
        {
13814
          saved_regs_mask &= ~ (1 << LR_REGNUM);
13815
          saved_regs_mask |=   (1 << PC_REGNUM);
13816
          rfe = IS_INTERRUPT (func_type);
13817
        }
13818
      else
13819
        rfe = 0;
13820
 
13821
      /* Load the registers off the stack.  If we only have one register
13822
         to load use the LDR instruction - it is faster.  For Thumb-2
13823
         always use pop and the assembler will pick the best instruction.*/
13824
      if (TARGET_ARM && saved_regs_mask == (1 << LR_REGNUM)
13825
          && !IS_INTERRUPT(func_type))
13826
        {
13827
          asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
13828
        }
13829
      else if (saved_regs_mask)
13830
        {
13831
          if (saved_regs_mask & (1 << SP_REGNUM))
13832
            /* Note - write back to the stack register is not enabled
13833
               (i.e. "ldmfd sp!...").  We know that the stack pointer is
13834
               in the list of registers and if we add writeback the
13835
               instruction becomes UNPREDICTABLE.  */
13836
            print_multi_reg (f, "ldmfd\t%r, ", SP_REGNUM, saved_regs_mask,
13837
                             rfe);
13838
          else if (TARGET_ARM)
13839
            print_multi_reg (f, "ldmfd\t%r!, ", SP_REGNUM, saved_regs_mask,
13840
                             rfe);
13841
          else
13842
            print_multi_reg (f, "pop\t", SP_REGNUM, saved_regs_mask, 0);
13843
        }
13844
 
13845
      if (crtl->args.pretend_args_size)
13846
        {
13847
          /* Unwind the pre-pushed regs.  */
13848
          operands[0] = operands[1] = stack_pointer_rtx;
13849
          operands[2] = GEN_INT (crtl->args.pretend_args_size);
13850
          output_add_immediate (operands);
13851
        }
13852
    }
13853
 
13854
  /* We may have already restored PC directly from the stack.  */
13855
  if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
13856
    return "";
13857
 
13858
  /* Stack adjustment for exception handler.  */
13859
  if (crtl->calls_eh_return)
13860
    asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
13861
                 ARM_EH_STACKADJ_REGNUM);
13862
 
13863
  /* Generate the return instruction.  */
13864
  switch ((int) ARM_FUNC_TYPE (func_type))
13865
    {
13866
    case ARM_FT_ISR:
13867
    case ARM_FT_FIQ:
13868
      asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
13869
      break;
13870
 
13871
    case ARM_FT_EXCEPTION:
13872
      asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
13873
      break;
13874
 
13875
    case ARM_FT_INTERWORKED:
13876
      asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
13877
      break;
13878
 
13879
    default:
13880
      if (IS_STACKALIGN (func_type))
13881
        {
13882
          /* See comment in arm_expand_prologue.  */
13883
          asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, 0);
13884
        }
13885
      if (arm_arch5 || arm_arch4t)
13886
        asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
13887
      else
13888
        asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
13889
      break;
13890
    }
13891
 
13892
  return "";
13893
}
13894
 
13895
static void
13896
arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
13897
                              HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
13898
{
13899
  arm_stack_offsets *offsets;
13900
 
13901
  if (TARGET_THUMB1)
13902
    {
13903
      int regno;
13904
 
13905
      /* Emit any call-via-reg trampolines that are needed for v4t support
13906
         of call_reg and call_value_reg type insns.  */
13907
      for (regno = 0; regno < LR_REGNUM; regno++)
13908
        {
13909
          rtx label = cfun->machine->call_via[regno];
13910
 
13911
          if (label != NULL)
13912
            {
13913
              switch_to_section (function_section (current_function_decl));
13914
              targetm.asm_out.internal_label (asm_out_file, "L",
13915
                                              CODE_LABEL_NUMBER (label));
13916
              asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
13917
            }
13918
        }
13919
 
13920
      /* ??? Probably not safe to set this here, since it assumes that a
13921
         function will be emitted as assembly immediately after we generate
13922
         RTL for it.  This does not happen for inline functions.  */
13923
      cfun->machine->return_used_this_function = 0;
13924
    }
13925
  else /* TARGET_32BIT */
13926
    {
13927
      /* We need to take into account any stack-frame rounding.  */
13928
      offsets = arm_get_frame_offsets ();
13929
 
13930
      gcc_assert (!use_return_insn (FALSE, NULL)
13931
                  || (cfun->machine->return_used_this_function != 0)
13932
                  || offsets->saved_regs == offsets->outgoing_args
13933
                  || frame_pointer_needed);
13934
 
13935
      /* Reset the ARM-specific per-function variables.  */
13936
      after_arm_reorg = 0;
13937
    }
13938
}
13939
 
13940
/* Generate and emit an insn that we will recognize as a push_multi.
13941
   Unfortunately, since this insn does not reflect very well the actual
13942
   semantics of the operation, we need to annotate the insn for the benefit
13943
   of DWARF2 frame unwind information.  */
13944
static rtx
13945
emit_multi_reg_push (unsigned long mask)
13946
{
13947
  int num_regs = 0;
13948
  int num_dwarf_regs;
13949
  int i, j;
13950
  rtx par;
13951
  rtx dwarf;
13952
  int dwarf_par_index;
13953
  rtx tmp, reg;
13954
 
13955
  for (i = 0; i <= LAST_ARM_REGNUM; i++)
13956
    if (mask & (1 << i))
13957
      num_regs++;
13958
 
13959
  gcc_assert (num_regs && num_regs <= 16);
13960
 
13961
  /* We don't record the PC in the dwarf frame information.  */
13962
  num_dwarf_regs = num_regs;
13963
  if (mask & (1 << PC_REGNUM))
13964
    num_dwarf_regs--;
13965
 
13966
  /* For the body of the insn we are going to generate an UNSPEC in
13967
     parallel with several USEs.  This allows the insn to be recognized
13968
     by the push_multi pattern in the arm.md file.
13969
 
13970
     The body of the insn looks something like this:
13971
 
13972
       (parallel [
13973
           (set (mem:BLK (pre_modify:SI (reg:SI sp)
13974
                                        (const_int:SI <num>)))
13975
                (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
13976
           (use (reg:SI XX))
13977
           (use (reg:SI YY))
13978
           ...
13979
        ])
13980
 
13981
     For the frame note however, we try to be more explicit and actually
13982
     show each register being stored into the stack frame, plus a (single)
13983
     decrement of the stack pointer.  We do it this way in order to be
13984
     friendly to the stack unwinding code, which only wants to see a single
13985
     stack decrement per instruction.  The RTL we generate for the note looks
13986
     something like this:
13987
 
13988
      (sequence [
13989
           (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
13990
           (set (mem:SI (reg:SI sp)) (reg:SI r4))
13991
           (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI XX))
13992
           (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI YY))
13993
           ...
13994
        ])
13995
 
13996
     FIXME:: In an ideal world the PRE_MODIFY would not exist and
13997
     instead we'd have a parallel expression detailing all
13998
     the stores to the various memory addresses so that debug
13999
     information is more up-to-date. Remember however while writing
14000
     this to take care of the constraints with the push instruction.
14001
 
14002
     Note also that this has to be taken care of for the VFP registers.
14003
 
14004
     For more see PR43399.  */
14005
 
14006
  par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
14007
  dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
14008
  dwarf_par_index = 1;
14009
 
14010
  for (i = 0; i <= LAST_ARM_REGNUM; i++)
14011
    {
14012
      if (mask & (1 << i))
14013
        {
14014
          reg = gen_rtx_REG (SImode, i);
14015
 
14016
          XVECEXP (par, 0, 0)
14017
            = gen_rtx_SET (VOIDmode,
14018
                           gen_frame_mem
14019
                           (BLKmode,
14020
                            gen_rtx_PRE_MODIFY (Pmode,
14021
                                                stack_pointer_rtx,
14022
                                                plus_constant
14023
                                                (stack_pointer_rtx,
14024
                                                 -4 * num_regs))
14025
                            ),
14026
                           gen_rtx_UNSPEC (BLKmode,
14027
                                           gen_rtvec (1, reg),
14028
                                           UNSPEC_PUSH_MULT));
14029
 
14030
          if (i != PC_REGNUM)
14031
            {
14032
              tmp = gen_rtx_SET (VOIDmode,
14033
                                 gen_frame_mem (SImode, stack_pointer_rtx),
14034
                                 reg);
14035
              RTX_FRAME_RELATED_P (tmp) = 1;
14036
              XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
14037
              dwarf_par_index++;
14038
            }
14039
 
14040
          break;
14041
        }
14042
    }
14043
 
14044
  for (j = 1, i++; j < num_regs; i++)
14045
    {
14046
      if (mask & (1 << i))
14047
        {
14048
          reg = gen_rtx_REG (SImode, i);
14049
 
14050
          XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
14051
 
14052
          if (i != PC_REGNUM)
14053
            {
14054
              tmp
14055
                = gen_rtx_SET (VOIDmode,
14056
                               gen_frame_mem
14057
                               (SImode,
14058
                                plus_constant (stack_pointer_rtx,
14059
                                               4 * j)),
14060
                               reg);
14061
              RTX_FRAME_RELATED_P (tmp) = 1;
14062
              XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
14063
            }
14064
 
14065
          j++;
14066
        }
14067
    }
14068
 
14069
  par = emit_insn (par);
14070
 
14071
  tmp = gen_rtx_SET (VOIDmode,
14072
                     stack_pointer_rtx,
14073
                     plus_constant (stack_pointer_rtx, -4 * num_regs));
14074
  RTX_FRAME_RELATED_P (tmp) = 1;
14075
  XVECEXP (dwarf, 0, 0) = tmp;
14076
 
14077
  add_reg_note (par, REG_FRAME_RELATED_EXPR, dwarf);
14078
 
14079
  return par;
14080
}
14081
 
14082
/* Calculate the size of the return value that is passed in registers.  */
14083
static unsigned
14084
arm_size_return_regs (void)
14085
{
14086
  enum machine_mode mode;
14087
 
14088
  if (crtl->return_rtx != 0)
14089
    mode = GET_MODE (crtl->return_rtx);
14090
  else
14091
    mode = DECL_MODE (DECL_RESULT (current_function_decl));
14092
 
14093
  return GET_MODE_SIZE (mode);
14094
}
14095
 
14096
static rtx
14097
emit_sfm (int base_reg, int count)
14098
{
14099
  rtx par;
14100
  rtx dwarf;
14101
  rtx tmp, reg;
14102
  int i;
14103
 
14104
  par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
14105
  dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
14106
 
14107
  reg = gen_rtx_REG (XFmode, base_reg++);
14108
 
14109
  XVECEXP (par, 0, 0)
14110
    = gen_rtx_SET (VOIDmode,
14111
                   gen_frame_mem
14112
                   (BLKmode,
14113
                    gen_rtx_PRE_MODIFY (Pmode,
14114
                                        stack_pointer_rtx,
14115
                                        plus_constant
14116
                                        (stack_pointer_rtx,
14117
                                         -12 * count))
14118
                    ),
14119
                   gen_rtx_UNSPEC (BLKmode,
14120
                                   gen_rtvec (1, reg),
14121
                                   UNSPEC_PUSH_MULT));
14122
  tmp = gen_rtx_SET (VOIDmode,
14123
                     gen_frame_mem (XFmode, stack_pointer_rtx), reg);
14124
  RTX_FRAME_RELATED_P (tmp) = 1;
14125
  XVECEXP (dwarf, 0, 1) = tmp;
14126
 
14127
  for (i = 1; i < count; i++)
14128
    {
14129
      reg = gen_rtx_REG (XFmode, base_reg++);
14130
      XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
14131
 
14132
      tmp = gen_rtx_SET (VOIDmode,
14133
                         gen_frame_mem (XFmode,
14134
                                        plus_constant (stack_pointer_rtx,
14135
                                                       i * 12)),
14136
                         reg);
14137
      RTX_FRAME_RELATED_P (tmp) = 1;
14138
      XVECEXP (dwarf, 0, i + 1) = tmp;
14139
    }
14140
 
14141
  tmp = gen_rtx_SET (VOIDmode,
14142
                     stack_pointer_rtx,
14143
                     plus_constant (stack_pointer_rtx, -12 * count));
14144
 
14145
  RTX_FRAME_RELATED_P (tmp) = 1;
14146
  XVECEXP (dwarf, 0, 0) = tmp;
14147
 
14148
  par = emit_insn (par);
14149
  add_reg_note (par, REG_FRAME_RELATED_EXPR, dwarf);
14150
 
14151
  return par;
14152
}
14153
 
14154
 
14155
/* Return true if the current function needs to save/restore LR.  */
14156
 
14157
static bool
14158
thumb_force_lr_save (void)
14159
{
14160
  return !cfun->machine->lr_save_eliminated
14161
         && (!leaf_function_p ()
14162
             || thumb_far_jump_used_p ()
14163
             || df_regs_ever_live_p (LR_REGNUM));
14164
}
14165
 
14166
 
14167
/* Compute the distance from register FROM to register TO.
14168
   These can be the arg pointer (26), the soft frame pointer (25),
14169
   the stack pointer (13) or the hard frame pointer (11).
14170
   In thumb mode r7 is used as the soft frame pointer, if needed.
14171
   Typical stack layout looks like this:
14172
 
14173
       old stack pointer -> |    |
14174
                             ----
14175
                            |    | \
14176
                            |    |   saved arguments for
14177
                            |    |   vararg functions
14178
                            |    | /
14179
                              --
14180
   hard FP & arg pointer -> |    | \
14181
                            |    |   stack
14182
                            |    |   frame
14183
                            |    | /
14184
                              --
14185
                            |    | \
14186
                            |    |   call saved
14187
                            |    |   registers
14188
      soft frame pointer -> |    | /
14189
                              --
14190
                            |    | \
14191
                            |    |   local
14192
                            |    |   variables
14193
     locals base pointer -> |    | /
14194
                              --
14195
                            |    | \
14196
                            |    |   outgoing
14197
                            |    |   arguments
14198
   current stack pointer -> |    | /
14199
                              --
14200
 
14201
  For a given function some or all of these stack components
14202
  may not be needed, giving rise to the possibility of
14203
  eliminating some of the registers.
14204
 
14205
  The values returned by this function must reflect the behavior
14206
  of arm_expand_prologue() and arm_compute_save_reg_mask().
14207
 
14208
  The sign of the number returned reflects the direction of stack
14209
  growth, so the values are positive for all eliminations except
14210
  from the soft frame pointer to the hard frame pointer.
14211
 
14212
  SFP may point just inside the local variables block to ensure correct
14213
  alignment.  */
14214
 
14215
 
14216
/* Calculate stack offsets.  These are used to calculate register elimination
14217
   offsets and in prologue/epilogue code.  Also calculates which registers
14218
   should be saved.  */
14219
 
14220
static arm_stack_offsets *
14221
arm_get_frame_offsets (void)
14222
{
14223
  struct arm_stack_offsets *offsets;
14224
  unsigned long func_type;
14225
  int leaf;
14226
  int saved;
14227
  int core_saved;
14228
  HOST_WIDE_INT frame_size;
14229
  int i;
14230
 
14231
  offsets = &cfun->machine->stack_offsets;
14232
 
14233
  /* We need to know if we are a leaf function.  Unfortunately, it
14234
     is possible to be called after start_sequence has been called,
14235
     which causes get_insns to return the insns for the sequence,
14236
     not the function, which will cause leaf_function_p to return
14237
     the incorrect result.
14238
 
14239
     to know about leaf functions once reload has completed, and the
14240
     frame size cannot be changed after that time, so we can safely
14241
     use the cached value.  */
14242
 
14243
  if (reload_completed)
14244
    return offsets;
14245
 
14246
  /* Initially this is the size of the local variables.  It will translated
14247
     into an offset once we have determined the size of preceding data.  */
14248
  frame_size = ROUND_UP_WORD (get_frame_size ());
14249
 
14250
  leaf = leaf_function_p ();
14251
 
14252
  /* Space for variadic functions.  */
14253
  offsets->saved_args = crtl->args.pretend_args_size;
14254
 
14255
  /* In Thumb mode this is incorrect, but never used.  */
14256
  offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0) +
14257
                   arm_compute_static_chain_stack_bytes();
14258
 
14259
  if (TARGET_32BIT)
14260
    {
14261
      unsigned int regno;
14262
 
14263
      offsets->saved_regs_mask = arm_compute_save_reg_mask ();
14264
      core_saved = bit_count (offsets->saved_regs_mask) * 4;
14265
      saved = core_saved;
14266
 
14267
      /* We know that SP will be doubleword aligned on entry, and we must
14268
         preserve that condition at any subroutine call.  We also require the
14269
         soft frame pointer to be doubleword aligned.  */
14270
 
14271
      if (TARGET_REALLY_IWMMXT)
14272
        {
14273
          /* Check for the call-saved iWMMXt registers.  */
14274
          for (regno = FIRST_IWMMXT_REGNUM;
14275
               regno <= LAST_IWMMXT_REGNUM;
14276
               regno++)
14277
            if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
14278
              saved += 8;
14279
        }
14280
 
14281
      func_type = arm_current_func_type ();
14282
      if (! IS_VOLATILE (func_type))
14283
        {
14284
          /* Space for saved FPA registers.  */
14285
          for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
14286
            if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
14287
            saved += 12;
14288
 
14289
          /* Space for saved VFP registers.  */
14290
          if (TARGET_HARD_FLOAT && TARGET_VFP)
14291
            saved += arm_get_vfp_saved_size ();
14292
        }
14293
    }
14294
  else /* TARGET_THUMB1 */
14295
    {
14296
      offsets->saved_regs_mask = thumb1_compute_save_reg_mask ();
14297
      core_saved = bit_count (offsets->saved_regs_mask) * 4;
14298
      saved = core_saved;
14299
      if (TARGET_BACKTRACE)
14300
        saved += 16;
14301
    }
14302
 
14303
  /* Saved registers include the stack frame.  */
14304
  offsets->saved_regs = offsets->saved_args + saved +
14305
                        arm_compute_static_chain_stack_bytes();
14306
  offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
14307
  /* A leaf function does not need any stack alignment if it has nothing
14308
     on the stack.  */
14309
  if (leaf && frame_size == 0)
14310
    {
14311
      offsets->outgoing_args = offsets->soft_frame;
14312
      offsets->locals_base = offsets->soft_frame;
14313
      return offsets;
14314
    }
14315
 
14316
  /* Ensure SFP has the correct alignment.  */
14317
  if (ARM_DOUBLEWORD_ALIGN
14318
      && (offsets->soft_frame & 7))
14319
    {
14320
      offsets->soft_frame += 4;
14321
      /* Try to align stack by pushing an extra reg.  Don't bother doing this
14322
         when there is a stack frame as the alignment will be rolled into
14323
         the normal stack adjustment.  */
14324
      if (frame_size + crtl->outgoing_args_size == 0)
14325
        {
14326
          int reg = -1;
14327
 
14328
          /* If it is safe to use r3, then do so.  This sometimes
14329
             generates better code on Thumb-2 by avoiding the need to
14330
             use 32-bit push/pop instructions.  */
14331
          if (!crtl->tail_call_emit
14332
              && arm_size_return_regs () <= 12
14333
              && (offsets->saved_regs_mask & (1 << 3)) == 0)
14334
            {
14335
              reg = 3;
14336
            }
14337
          else
14338
            for (i = 4; i <= (TARGET_THUMB1 ? LAST_LO_REGNUM : 11); i++)
14339
              {
14340
                if ((offsets->saved_regs_mask & (1 << i)) == 0)
14341
                  {
14342
                    reg = i;
14343
                    break;
14344
                  }
14345
              }
14346
 
14347
          if (reg != -1)
14348
            {
14349
              offsets->saved_regs += 4;
14350
              offsets->saved_regs_mask |= (1 << reg);
14351
            }
14352
        }
14353
    }
14354
 
14355
  offsets->locals_base = offsets->soft_frame + frame_size;
14356
  offsets->outgoing_args = (offsets->locals_base
14357
                            + crtl->outgoing_args_size);
14358
 
14359
  if (ARM_DOUBLEWORD_ALIGN)
14360
    {
14361
      /* Ensure SP remains doubleword aligned.  */
14362
      if (offsets->outgoing_args & 7)
14363
        offsets->outgoing_args += 4;
14364
      gcc_assert (!(offsets->outgoing_args & 7));
14365
    }
14366
 
14367
  return offsets;
14368
}
14369
 
14370
 
14371
/* Calculate the relative offsets for the different stack pointers.  Positive
14372
   offsets are in the direction of stack growth.  */
14373
 
14374
HOST_WIDE_INT
14375
arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
14376
{
14377
  arm_stack_offsets *offsets;
14378
 
14379
  offsets = arm_get_frame_offsets ();
14380
 
14381
  /* OK, now we have enough information to compute the distances.
14382
     There must be an entry in these switch tables for each pair
14383
     of registers in ELIMINABLE_REGS, even if some of the entries
14384
     seem to be redundant or useless.  */
14385
  switch (from)
14386
    {
14387
    case ARG_POINTER_REGNUM:
14388
      switch (to)
14389
        {
14390
        case THUMB_HARD_FRAME_POINTER_REGNUM:
14391
          return 0;
14392
 
14393
        case FRAME_POINTER_REGNUM:
14394
          /* This is the reverse of the soft frame pointer
14395
             to hard frame pointer elimination below.  */
14396
          return offsets->soft_frame - offsets->saved_args;
14397
 
14398
        case ARM_HARD_FRAME_POINTER_REGNUM:
14399
          /* This is only non-zero in the case where the static chain register
14400
             is stored above the frame.  */
14401
          return offsets->frame - offsets->saved_args - 4;
14402
 
14403
        case STACK_POINTER_REGNUM:
14404
          /* If nothing has been pushed on the stack at all
14405
             then this will return -4.  This *is* correct!  */
14406
          return offsets->outgoing_args - (offsets->saved_args + 4);
14407
 
14408
        default:
14409
          gcc_unreachable ();
14410
        }
14411
      gcc_unreachable ();
14412
 
14413
    case FRAME_POINTER_REGNUM:
14414
      switch (to)
14415
        {
14416
        case THUMB_HARD_FRAME_POINTER_REGNUM:
14417
          return 0;
14418
 
14419
        case ARM_HARD_FRAME_POINTER_REGNUM:
14420
          /* The hard frame pointer points to the top entry in the
14421
             stack frame.  The soft frame pointer to the bottom entry
14422
             in the stack frame.  If there is no stack frame at all,
14423
             then they are identical.  */
14424
 
14425
          return offsets->frame - offsets->soft_frame;
14426
 
14427
        case STACK_POINTER_REGNUM:
14428
          return offsets->outgoing_args - offsets->soft_frame;
14429
 
14430
        default:
14431
          gcc_unreachable ();
14432
        }
14433
      gcc_unreachable ();
14434
 
14435
    default:
14436
      /* You cannot eliminate from the stack pointer.
14437
         In theory you could eliminate from the hard frame
14438
         pointer to the stack pointer, but this will never
14439
         happen, since if a stack frame is not needed the
14440
         hard frame pointer will never be used.  */
14441
      gcc_unreachable ();
14442
    }
14443
}
14444
 
14445
/* Given FROM and TO register numbers, say whether this elimination is
14446
   allowed.  Frame pointer elimination is automatically handled.
14447
 
14448
   All eliminations are permissible.  Note that ARG_POINTER_REGNUM and
14449
   HARD_FRAME_POINTER_REGNUM are in fact the same thing.  If we need a frame
14450
   pointer, we must eliminate FRAME_POINTER_REGNUM into
14451
   HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM or
14452
   ARG_POINTER_REGNUM.  */
14453
 
14454
bool
14455
arm_can_eliminate (const int from, const int to)
14456
{
14457
  return ((to == FRAME_POINTER_REGNUM && from == ARG_POINTER_REGNUM) ? false :
14458
          (to == STACK_POINTER_REGNUM && frame_pointer_needed) ? false :
14459
          (to == ARM_HARD_FRAME_POINTER_REGNUM && TARGET_THUMB) ? false :
14460
          (to == THUMB_HARD_FRAME_POINTER_REGNUM && TARGET_ARM) ? false :
14461
           true);
14462
}
14463
 
14464
/* Emit RTL to save coprocessor registers on function entry.  Returns the
14465
   number of bytes pushed.  */
14466
 
14467
static int
14468
arm_save_coproc_regs(void)
14469
{
14470
  int saved_size = 0;
14471
  unsigned reg;
14472
  unsigned start_reg;
14473
  rtx insn;
14474
 
14475
  for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
14476
    if (df_regs_ever_live_p (reg) && ! call_used_regs[reg])
14477
      {
14478
        insn = gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx);
14479
        insn = gen_rtx_MEM (V2SImode, insn);
14480
        insn = emit_set_insn (insn, gen_rtx_REG (V2SImode, reg));
14481
        RTX_FRAME_RELATED_P (insn) = 1;
14482
        saved_size += 8;
14483
      }
14484
 
14485
  /* Save any floating point call-saved registers used by this
14486
     function.  */
14487
  if (TARGET_FPA_EMU2)
14488
    {
14489
      for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
14490
        if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
14491
          {
14492
            insn = gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx);
14493
            insn = gen_rtx_MEM (XFmode, insn);
14494
            insn = emit_set_insn (insn, gen_rtx_REG (XFmode, reg));
14495
            RTX_FRAME_RELATED_P (insn) = 1;
14496
            saved_size += 12;
14497
          }
14498
    }
14499
  else
14500
    {
14501
      start_reg = LAST_FPA_REGNUM;
14502
 
14503
      for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
14504
        {
14505
          if (df_regs_ever_live_p (reg) && !call_used_regs[reg])
14506
            {
14507
              if (start_reg - reg == 3)
14508
                {
14509
                  insn = emit_sfm (reg, 4);
14510
                  RTX_FRAME_RELATED_P (insn) = 1;
14511
                  saved_size += 48;
14512
                  start_reg = reg - 1;
14513
                }
14514
            }
14515
          else
14516
            {
14517
              if (start_reg != reg)
14518
                {
14519
                  insn = emit_sfm (reg + 1, start_reg - reg);
14520
                  RTX_FRAME_RELATED_P (insn) = 1;
14521
                  saved_size += (start_reg - reg) * 12;
14522
                }
14523
              start_reg = reg - 1;
14524
            }
14525
        }
14526
 
14527
      if (start_reg != reg)
14528
        {
14529
          insn = emit_sfm (reg + 1, start_reg - reg);
14530
          saved_size += (start_reg - reg) * 12;
14531
          RTX_FRAME_RELATED_P (insn) = 1;
14532
        }
14533
    }
14534
  if (TARGET_HARD_FLOAT && TARGET_VFP)
14535
    {
14536
      start_reg = FIRST_VFP_REGNUM;
14537
 
14538
      for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
14539
        {
14540
          if ((!df_regs_ever_live_p (reg) || call_used_regs[reg])
14541
              && (!df_regs_ever_live_p (reg + 1) || call_used_regs[reg + 1]))
14542
            {
14543
              if (start_reg != reg)
14544
                saved_size += vfp_emit_fstmd (start_reg,
14545
                                              (reg - start_reg) / 2);
14546
              start_reg = reg + 2;
14547
            }
14548
        }
14549
      if (start_reg != reg)
14550
        saved_size += vfp_emit_fstmd (start_reg,
14551
                                      (reg - start_reg) / 2);
14552
    }
14553
  return saved_size;
14554
}
14555
 
14556
 
14557
/* Set the Thumb frame pointer from the stack pointer.  */
14558
 
14559
static void
14560
thumb_set_frame_pointer (arm_stack_offsets *offsets)
14561
{
14562
  HOST_WIDE_INT amount;
14563
  rtx insn, dwarf;
14564
 
14565
  amount = offsets->outgoing_args - offsets->locals_base;
14566
  if (amount < 1024)
14567
    insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
14568
                                  stack_pointer_rtx, GEN_INT (amount)));
14569
  else
14570
    {
14571
      emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
14572
      /* Thumb-2 RTL patterns expect sp as the first input.  Thumb-1
14573
         expects the first two operands to be the same.  */
14574
      if (TARGET_THUMB2)
14575
        {
14576
          insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
14577
                                        stack_pointer_rtx,
14578
                                        hard_frame_pointer_rtx));
14579
        }
14580
      else
14581
        {
14582
          insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
14583
                                        hard_frame_pointer_rtx,
14584
                                        stack_pointer_rtx));
14585
        }
14586
      dwarf = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
14587
                           plus_constant (stack_pointer_rtx, amount));
14588
      RTX_FRAME_RELATED_P (dwarf) = 1;
14589
      add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
14590
    }
14591
 
14592
  RTX_FRAME_RELATED_P (insn) = 1;
14593
}
14594
 
14595
/* Generate the prologue instructions for entry into an ARM or Thumb-2
14596
   function.  */
14597
void
14598
arm_expand_prologue (void)
14599
{
14600
  rtx amount;
14601
  rtx insn;
14602
  rtx ip_rtx;
14603
  unsigned long live_regs_mask;
14604
  unsigned long func_type;
14605
  int fp_offset = 0;
14606
  int saved_pretend_args = 0;
14607
  int saved_regs = 0;
14608
  unsigned HOST_WIDE_INT args_to_push;
14609
  arm_stack_offsets *offsets;
14610
 
14611
  func_type = arm_current_func_type ();
14612
 
14613
  /* Naked functions don't have prologues.  */
14614
  if (IS_NAKED (func_type))
14615
    return;
14616
 
14617
  /* Make a copy of c_f_p_a_s as we may need to modify it locally.  */
14618
  args_to_push = crtl->args.pretend_args_size;
14619
 
14620
  /* Compute which register we will have to save onto the stack.  */
14621
  offsets = arm_get_frame_offsets ();
14622
  live_regs_mask = offsets->saved_regs_mask;
14623
 
14624
  ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
14625
 
14626
  if (IS_STACKALIGN (func_type))
14627
    {
14628
      rtx dwarf;
14629
      rtx r0;
14630
      rtx r1;
14631
      /* Handle a word-aligned stack pointer.  We generate the following:
14632
 
14633
          mov r0, sp
14634
          bic r1, r0, #7
14635
          mov sp, r1
14636
          <save and restore r0 in normal prologue/epilogue>
14637
          mov sp, r0
14638
          bx lr
14639
 
14640
         The unwinder doesn't need to know about the stack realignment.
14641
         Just tell it we saved SP in r0.  */
14642
      gcc_assert (TARGET_THUMB2 && !arm_arch_notm && args_to_push == 0);
14643
 
14644
      r0 = gen_rtx_REG (SImode, 0);
14645
      r1 = gen_rtx_REG (SImode, 1);
14646
      /* Use a real rtvec rather than NULL_RTVEC so the rest of the
14647
         compiler won't choke.  */
14648
      dwarf = gen_rtx_UNSPEC (SImode, rtvec_alloc (0), UNSPEC_STACK_ALIGN);
14649
      dwarf = gen_rtx_SET (VOIDmode, r0, dwarf);
14650
      insn = gen_movsi (r0, stack_pointer_rtx);
14651
      RTX_FRAME_RELATED_P (insn) = 1;
14652
      add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
14653
      emit_insn (insn);
14654
      emit_insn (gen_andsi3 (r1, r0, GEN_INT (~(HOST_WIDE_INT)7)));
14655
      emit_insn (gen_movsi (stack_pointer_rtx, r1));
14656
    }
14657
 
14658
  /* For APCS frames, if IP register is clobbered
14659
     when creating frame, save that register in a special
14660
     way.  */
14661
  if (TARGET_APCS_FRAME && frame_pointer_needed && TARGET_ARM)
14662
    {
14663
      if (IS_INTERRUPT (func_type))
14664
        {
14665
          /* Interrupt functions must not corrupt any registers.
14666
             Creating a frame pointer however, corrupts the IP
14667
             register, so we must push it first.  */
14668
          insn = emit_multi_reg_push (1 << IP_REGNUM);
14669
 
14670
          /* Do not set RTX_FRAME_RELATED_P on this insn.
14671
             The dwarf stack unwinding code only wants to see one
14672
             stack decrement per function, and this is not it.  If
14673
             this instruction is labeled as being part of the frame
14674
             creation sequence then dwarf2out_frame_debug_expr will
14675
             die when it encounters the assignment of IP to FP
14676
             later on, since the use of SP here establishes SP as
14677
             the CFA register and not IP.
14678
 
14679
             Anyway this instruction is not really part of the stack
14680
             frame creation although it is part of the prologue.  */
14681
        }
14682
      else if (IS_NESTED (func_type))
14683
        {
14684
          /* The Static chain register is the same as the IP register
14685
             used as a scratch register during stack frame creation.
14686
             To get around this need to find somewhere to store IP
14687
             whilst the frame is being created.  We try the following
14688
             places in order:
14689
 
14690
               1. The last argument register.
14691
               2. A slot on the stack above the frame.  (This only
14692
                  works if the function is not a varargs function).
14693
               3. Register r3, after pushing the argument registers
14694
                  onto the stack.
14695
 
14696
             Note - we only need to tell the dwarf2 backend about the SP
14697
             adjustment in the second variant; the static chain register
14698
             doesn't need to be unwound, as it doesn't contain a value
14699
             inherited from the caller.  */
14700
 
14701
          if (df_regs_ever_live_p (3) == false)
14702
            insn = emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
14703
          else if (args_to_push == 0)
14704
            {
14705
              rtx dwarf;
14706
 
14707
              gcc_assert(arm_compute_static_chain_stack_bytes() == 4);
14708
              saved_regs += 4;
14709
 
14710
              insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
14711
              insn = emit_set_insn (gen_frame_mem (SImode, insn), ip_rtx);
14712
              fp_offset = 4;
14713
 
14714
              /* Just tell the dwarf backend that we adjusted SP.  */
14715
              dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
14716
                                   plus_constant (stack_pointer_rtx,
14717
                                                  -fp_offset));
14718
              RTX_FRAME_RELATED_P (insn) = 1;
14719
              add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
14720
            }
14721
          else
14722
            {
14723
              /* Store the args on the stack.  */
14724
              if (cfun->machine->uses_anonymous_args)
14725
                insn = emit_multi_reg_push
14726
                  ((0xf0 >> (args_to_push / 4)) & 0xf);
14727
              else
14728
                insn = emit_insn
14729
                  (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
14730
                               GEN_INT (- args_to_push)));
14731
 
14732
              RTX_FRAME_RELATED_P (insn) = 1;
14733
 
14734
              saved_pretend_args = 1;
14735
              fp_offset = args_to_push;
14736
              args_to_push = 0;
14737
 
14738
              /* Now reuse r3 to preserve IP.  */
14739
              emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
14740
            }
14741
        }
14742
 
14743
      insn = emit_set_insn (ip_rtx,
14744
                            plus_constant (stack_pointer_rtx, fp_offset));
14745
      RTX_FRAME_RELATED_P (insn) = 1;
14746
    }
14747
 
14748
  if (args_to_push)
14749
    {
14750
      /* Push the argument registers, or reserve space for them.  */
14751
      if (cfun->machine->uses_anonymous_args)
14752
        insn = emit_multi_reg_push
14753
          ((0xf0 >> (args_to_push / 4)) & 0xf);
14754
      else
14755
        insn = emit_insn
14756
          (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
14757
                       GEN_INT (- args_to_push)));
14758
      RTX_FRAME_RELATED_P (insn) = 1;
14759
    }
14760
 
14761
  /* If this is an interrupt service routine, and the link register
14762
     is going to be pushed, and we're not generating extra
14763
     push of IP (needed when frame is needed and frame layout if apcs),
14764
     subtracting four from LR now will mean that the function return
14765
     can be done with a single instruction.  */
14766
  if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
14767
      && (live_regs_mask & (1 << LR_REGNUM)) != 0
14768
      && !(frame_pointer_needed && TARGET_APCS_FRAME)
14769
      && TARGET_ARM)
14770
    {
14771
      rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
14772
 
14773
      emit_set_insn (lr, plus_constant (lr, -4));
14774
    }
14775
 
14776
  if (live_regs_mask)
14777
    {
14778
      saved_regs += bit_count (live_regs_mask) * 4;
14779
      if (optimize_size && !frame_pointer_needed
14780
          && saved_regs == offsets->saved_regs - offsets->saved_args)
14781
        {
14782
          /* If no coprocessor registers are being pushed and we don't have
14783
             to worry about a frame pointer then push extra registers to
14784
             create the stack frame.  This is done is a way that does not
14785
             alter the frame layout, so is independent of the epilogue.  */
14786
          int n;
14787
          int frame;
14788
          n = 0;
14789
          while (n < 8 && (live_regs_mask & (1 << n)) == 0)
14790
            n++;
14791
          frame = offsets->outgoing_args - (offsets->saved_args + saved_regs);
14792
          if (frame && n * 4 >= frame)
14793
            {
14794
              n = frame / 4;
14795
              live_regs_mask |= (1 << n) - 1;
14796
              saved_regs += frame;
14797
            }
14798
        }
14799
      insn = emit_multi_reg_push (live_regs_mask);
14800
      RTX_FRAME_RELATED_P (insn) = 1;
14801
    }
14802
 
14803
  if (! IS_VOLATILE (func_type))
14804
    saved_regs += arm_save_coproc_regs ();
14805
 
14806
  if (frame_pointer_needed && TARGET_ARM)
14807
    {
14808
      /* Create the new frame pointer.  */
14809
      if (TARGET_APCS_FRAME)
14810
        {
14811
          insn = GEN_INT (-(4 + args_to_push + fp_offset));
14812
          insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
14813
          RTX_FRAME_RELATED_P (insn) = 1;
14814
 
14815
          if (IS_NESTED (func_type))
14816
            {
14817
              /* Recover the static chain register.  */
14818
              if (!df_regs_ever_live_p (3)
14819
                  || saved_pretend_args)
14820
                insn = gen_rtx_REG (SImode, 3);
14821
              else /* if (crtl->args.pretend_args_size == 0) */
14822
                {
14823
                  insn = plus_constant (hard_frame_pointer_rtx, 4);
14824
                  insn = gen_frame_mem (SImode, insn);
14825
                }
14826
              emit_set_insn (ip_rtx, insn);
14827
              /* Add a USE to stop propagate_one_insn() from barfing.  */
14828
              emit_insn (gen_prologue_use (ip_rtx));
14829
            }
14830
        }
14831
      else
14832
        {
14833
          insn = GEN_INT (saved_regs - 4);
14834
          insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
14835
                                        stack_pointer_rtx, insn));
14836
          RTX_FRAME_RELATED_P (insn) = 1;
14837
        }
14838
    }
14839
 
14840
  if (offsets->outgoing_args != offsets->saved_args + saved_regs)
14841
    {
14842
      /* This add can produce multiple insns for a large constant, so we
14843
         need to get tricky.  */
14844
      rtx last = get_last_insn ();
14845
 
14846
      amount = GEN_INT (offsets->saved_args + saved_regs
14847
                        - offsets->outgoing_args);
14848
 
14849
      insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
14850
                                    amount));
14851
      do
14852
        {
14853
          last = last ? NEXT_INSN (last) : get_insns ();
14854
          RTX_FRAME_RELATED_P (last) = 1;
14855
        }
14856
      while (last != insn);
14857
 
14858
      /* If the frame pointer is needed, emit a special barrier that
14859
         will prevent the scheduler from moving stores to the frame
14860
         before the stack adjustment.  */
14861
      if (frame_pointer_needed)
14862
        insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
14863
                                         hard_frame_pointer_rtx));
14864
    }
14865
 
14866
 
14867
  if (frame_pointer_needed && TARGET_THUMB2)
14868
    thumb_set_frame_pointer (offsets);
14869
 
14870
  if (flag_pic && arm_pic_register != INVALID_REGNUM)
14871
    {
14872
      unsigned long mask;
14873
 
14874
      mask = live_regs_mask;
14875
      mask &= THUMB2_WORK_REGS;
14876
      if (!IS_NESTED (func_type))
14877
        mask |= (1 << IP_REGNUM);
14878
      arm_load_pic_register (mask);
14879
    }
14880
 
14881
  /* If we are profiling, make sure no instructions are scheduled before
14882
     the call to mcount.  Similarly if the user has requested no
14883
     scheduling in the prolog.  Similarly if we want non-call exceptions
14884
     using the EABI unwinder, to prevent faulting instructions from being
14885
     swapped with a stack adjustment.  */
14886
  if (crtl->profile || !TARGET_SCHED_PROLOG
14887
      || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
14888
    emit_insn (gen_blockage ());
14889
 
14890
  /* If the link register is being kept alive, with the return address in it,
14891
     then make sure that it does not get reused by the ce2 pass.  */
14892
  if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
14893
    cfun->machine->lr_save_eliminated = 1;
14894
}
14895
 
14896
/* Print condition code to STREAM.  Helper function for arm_print_operand.  */
14897
static void
14898
arm_print_condition (FILE *stream)
14899
{
14900
  if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
14901
    {
14902
      /* Branch conversion is not implemented for Thumb-2.  */
14903
      if (TARGET_THUMB)
14904
        {
14905
          output_operand_lossage ("predicated Thumb instruction");
14906
          return;
14907
        }
14908
      if (current_insn_predicate != NULL)
14909
        {
14910
          output_operand_lossage
14911
            ("predicated instruction in conditional sequence");
14912
          return;
14913
        }
14914
 
14915
      fputs (arm_condition_codes[arm_current_cc], stream);
14916
    }
14917
  else if (current_insn_predicate)
14918
    {
14919
      enum arm_cond_code code;
14920
 
14921
      if (TARGET_THUMB1)
14922
        {
14923
          output_operand_lossage ("predicated Thumb instruction");
14924
          return;
14925
        }
14926
 
14927
      code = get_arm_condition_code (current_insn_predicate);
14928
      fputs (arm_condition_codes[code], stream);
14929
    }
14930
}
14931
 
14932
 
14933
/* If CODE is 'd', then the X is a condition operand and the instruction
14934
   should only be executed if the condition is true.
14935
   if CODE is 'D', then the X is a condition operand and the instruction
14936
   should only be executed if the condition is false: however, if the mode
14937
   of the comparison is CCFPEmode, then always execute the instruction -- we
14938
   do this because in these circumstances !GE does not necessarily imply LT;
14939
   in these cases the instruction pattern will take care to make sure that
14940
   an instruction containing %d will follow, thereby undoing the effects of
14941
   doing this instruction unconditionally.
14942
   If CODE is 'N' then X is a floating point operand that must be negated
14943
   before output.
14944
   If CODE is 'B' then output a bitwise inverted value of X (a const int).
14945
   If X is a REG and CODE is `M', output a ldm/stm style multi-reg.  */
14946
void
14947
arm_print_operand (FILE *stream, rtx x, int code)
14948
{
14949
  switch (code)
14950
    {
14951
    case '@':
14952
      fputs (ASM_COMMENT_START, stream);
14953
      return;
14954
 
14955
    case '_':
14956
      fputs (user_label_prefix, stream);
14957
      return;
14958
 
14959
    case '|':
14960
      fputs (REGISTER_PREFIX, stream);
14961
      return;
14962
 
14963
    case '?':
14964
      arm_print_condition (stream);
14965
      return;
14966
 
14967
    case '(':
14968
      /* Nothing in unified syntax, otherwise the current condition code.  */
14969
      if (!TARGET_UNIFIED_ASM)
14970
        arm_print_condition (stream);
14971
      break;
14972
 
14973
    case ')':
14974
      /* The current condition code in unified syntax, otherwise nothing.  */
14975
      if (TARGET_UNIFIED_ASM)
14976
        arm_print_condition (stream);
14977
      break;
14978
 
14979
    case '.':
14980
      /* The current condition code for a condition code setting instruction.
14981
         Preceded by 's' in unified syntax, otherwise followed by 's'.  */
14982
      if (TARGET_UNIFIED_ASM)
14983
        {
14984
          fputc('s', stream);
14985
          arm_print_condition (stream);
14986
        }
14987
      else
14988
        {
14989
          arm_print_condition (stream);
14990
          fputc('s', stream);
14991
        }
14992
      return;
14993
 
14994
    case '!':
14995
      /* If the instruction is conditionally executed then print
14996
         the current condition code, otherwise print 's'.  */
14997
      gcc_assert (TARGET_THUMB2 && TARGET_UNIFIED_ASM);
14998
      if (current_insn_predicate)
14999
        arm_print_condition (stream);
15000
      else
15001
        fputc('s', stream);
15002
      break;
15003
 
15004
    /* %# is a "break" sequence. It doesn't output anything, but is used to
15005
       separate e.g. operand numbers from following text, if that text consists
15006
       of further digits which we don't want to be part of the operand
15007
       number.  */
15008
    case '#':
15009
      return;
15010
 
15011
    case 'N':
15012
      {
15013
        REAL_VALUE_TYPE r;
15014
        REAL_VALUE_FROM_CONST_DOUBLE (r, x);
15015
        r = REAL_VALUE_NEGATE (r);
15016
        fprintf (stream, "%s", fp_const_from_val (&r));
15017
      }
15018
      return;
15019
 
15020
    /* An integer or symbol address without a preceding # sign.  */
15021
    case 'c':
15022
      switch (GET_CODE (x))
15023
        {
15024
        case CONST_INT:
15025
          fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
15026
          break;
15027
 
15028
        case SYMBOL_REF:
15029
          output_addr_const (stream, x);
15030
          break;
15031
 
15032
        default:
15033
          gcc_unreachable ();
15034
        }
15035
      return;
15036
 
15037
    case 'B':
15038
      if (GET_CODE (x) == CONST_INT)
15039
        {
15040
          HOST_WIDE_INT val;
15041
          val = ARM_SIGN_EXTEND (~INTVAL (x));
15042
          fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
15043
        }
15044
      else
15045
        {
15046
          putc ('~', stream);
15047
          output_addr_const (stream, x);
15048
        }
15049
      return;
15050
 
15051
    case 'L':
15052
      /* The low 16 bits of an immediate constant.  */
15053
      fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL(x) & 0xffff);
15054
      return;
15055
 
15056
    case 'i':
15057
      fprintf (stream, "%s", arithmetic_instr (x, 1));
15058
      return;
15059
 
15060
    /* Truncate Cirrus shift counts.  */
15061
    case 's':
15062
      if (GET_CODE (x) == CONST_INT)
15063
        {
15064
          fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
15065
          return;
15066
        }
15067
      arm_print_operand (stream, x, 0);
15068
      return;
15069
 
15070
    case 'I':
15071
      fprintf (stream, "%s", arithmetic_instr (x, 0));
15072
      return;
15073
 
15074
    case 'S':
15075
      {
15076
        HOST_WIDE_INT val;
15077
        const char *shift;
15078
 
15079
        if (!shift_operator (x, SImode))
15080
          {
15081
            output_operand_lossage ("invalid shift operand");
15082
            break;
15083
          }
15084
 
15085
        shift = shift_op (x, &val);
15086
 
15087
        if (shift)
15088
          {
15089
            fprintf (stream, ", %s ", shift);
15090
            if (val == -1)
15091
              arm_print_operand (stream, XEXP (x, 1), 0);
15092
            else
15093
              fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
15094
          }
15095
      }
15096
      return;
15097
 
15098
      /* An explanation of the 'Q', 'R' and 'H' register operands:
15099
 
15100
         In a pair of registers containing a DI or DF value the 'Q'
15101
         operand returns the register number of the register containing
15102
         the least significant part of the value.  The 'R' operand returns
15103
         the register number of the register containing the most
15104
         significant part of the value.
15105
 
15106
         The 'H' operand returns the higher of the two register numbers.
15107
         On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
15108
         same as the 'Q' operand, since the most significant part of the
15109
         value is held in the lower number register.  The reverse is true
15110
         on systems where WORDS_BIG_ENDIAN is false.
15111
 
15112
         The purpose of these operands is to distinguish between cases
15113
         where the endian-ness of the values is important (for example
15114
         when they are added together), and cases where the endian-ness
15115
         is irrelevant, but the order of register operations is important.
15116
         For example when loading a value from memory into a register
15117
         pair, the endian-ness does not matter.  Provided that the value
15118
         from the lower memory address is put into the lower numbered
15119
         register, and the value from the higher address is put into the
15120
         higher numbered register, the load will work regardless of whether
15121
         the value being loaded is big-wordian or little-wordian.  The
15122
         order of the two register loads can matter however, if the address
15123
         of the memory location is actually held in one of the registers
15124
         being overwritten by the load.  */
15125
    case 'Q':
15126
      if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
15127
        {
15128
          output_operand_lossage ("invalid operand for code '%c'", code);
15129
          return;
15130
        }
15131
 
15132
      asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
15133
      return;
15134
 
15135
    case 'R':
15136
      if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
15137
        {
15138
          output_operand_lossage ("invalid operand for code '%c'", code);
15139
          return;
15140
        }
15141
 
15142
      asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
15143
      return;
15144
 
15145
    case 'H':
15146
      if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
15147
        {
15148
          output_operand_lossage ("invalid operand for code '%c'", code);
15149
          return;
15150
        }
15151
 
15152
      asm_fprintf (stream, "%r", REGNO (x) + 1);
15153
      return;
15154
 
15155
    case 'J':
15156
      if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
15157
        {
15158
          output_operand_lossage ("invalid operand for code '%c'", code);
15159
          return;
15160
        }
15161
 
15162
      asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 3 : 2));
15163
      return;
15164
 
15165
    case 'K':
15166
      if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
15167
        {
15168
          output_operand_lossage ("invalid operand for code '%c'", code);
15169
          return;
15170
        }
15171
 
15172
      asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 2 : 3));
15173
      return;
15174
 
15175
    case 'm':
15176
      asm_fprintf (stream, "%r",
15177
                   GET_CODE (XEXP (x, 0)) == REG
15178
                   ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
15179
      return;
15180
 
15181
    case 'M':
15182
      asm_fprintf (stream, "{%r-%r}",
15183
                   REGNO (x),
15184
                   REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
15185
      return;
15186
 
15187
    /* Like 'M', but writing doubleword vector registers, for use by Neon
15188
       insns.  */
15189
    case 'h':
15190
      {
15191
        int regno = (REGNO (x) - FIRST_VFP_REGNUM) / 2;
15192
        int numregs = ARM_NUM_REGS (GET_MODE (x)) / 2;
15193
        if (numregs == 1)
15194
          asm_fprintf (stream, "{d%d}", regno);
15195
        else
15196
          asm_fprintf (stream, "{d%d-d%d}", regno, regno + numregs - 1);
15197
      }
15198
      return;
15199
 
15200
    case 'd':
15201
      /* CONST_TRUE_RTX means always -- that's the default.  */
15202
      if (x == const_true_rtx)
15203
        return;
15204
 
15205
      if (!COMPARISON_P (x))
15206
        {
15207
          output_operand_lossage ("invalid operand for code '%c'", code);
15208
          return;
15209
        }
15210
 
15211
      fputs (arm_condition_codes[get_arm_condition_code (x)],
15212
             stream);
15213
      return;
15214
 
15215
    case 'D':
15216
      /* CONST_TRUE_RTX means not always -- i.e. never.  We shouldn't ever
15217
         want to do that.  */
15218
      if (x == const_true_rtx)
15219
        {
15220
          output_operand_lossage ("instruction never executed");
15221
          return;
15222
        }
15223
      if (!COMPARISON_P (x))
15224
        {
15225
          output_operand_lossage ("invalid operand for code '%c'", code);
15226
          return;
15227
        }
15228
 
15229
      fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
15230
                                 (get_arm_condition_code (x))],
15231
             stream);
15232
      return;
15233
 
15234
    /* Cirrus registers can be accessed in a variety of ways:
15235
         single floating point (f)
15236
         double floating point (d)
15237
         32bit integer         (fx)
15238
         64bit integer         (dx).  */
15239
    case 'W':                   /* Cirrus register in F mode.  */
15240
    case 'X':                   /* Cirrus register in D mode.  */
15241
    case 'Y':                   /* Cirrus register in FX mode.  */
15242
    case 'Z':                   /* Cirrus register in DX mode.  */
15243
      gcc_assert (GET_CODE (x) == REG
15244
                  && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
15245
 
15246
      fprintf (stream, "mv%s%s",
15247
               code == 'W' ? "f"
15248
               : code == 'X' ? "d"
15249
               : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
15250
 
15251
      return;
15252
 
15253
    /* Print cirrus register in the mode specified by the register's mode.  */
15254
    case 'V':
15255
      {
15256
        int mode = GET_MODE (x);
15257
 
15258
        if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
15259
          {
15260
            output_operand_lossage ("invalid operand for code '%c'", code);
15261
            return;
15262
          }
15263
 
15264
        fprintf (stream, "mv%s%s",
15265
                 mode == DFmode ? "d"
15266
                 : mode == SImode ? "fx"
15267
                 : mode == DImode ? "dx"
15268
                 : "f", reg_names[REGNO (x)] + 2);
15269
 
15270
        return;
15271
      }
15272
 
15273
    case 'U':
15274
      if (GET_CODE (x) != REG
15275
          || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
15276
          || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
15277
        /* Bad value for wCG register number.  */
15278
        {
15279
          output_operand_lossage ("invalid operand for code '%c'", code);
15280
          return;
15281
        }
15282
 
15283
      else
15284
        fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
15285
      return;
15286
 
15287
      /* Print an iWMMXt control register name.  */
15288
    case 'w':
15289
      if (GET_CODE (x) != CONST_INT
15290
          || INTVAL (x) < 0
15291
          || INTVAL (x) >= 16)
15292
        /* Bad value for wC register number.  */
15293
        {
15294
          output_operand_lossage ("invalid operand for code '%c'", code);
15295
          return;
15296
        }
15297
 
15298
      else
15299
        {
15300
          static const char * wc_reg_names [16] =
15301
            {
15302
              "wCID",  "wCon",  "wCSSF", "wCASF",
15303
              "wC4",   "wC5",   "wC6",   "wC7",
15304
              "wCGR0", "wCGR1", "wCGR2", "wCGR3",
15305
              "wC12",  "wC13",  "wC14",  "wC15"
15306
            };
15307
 
15308
          fprintf (stream, wc_reg_names [INTVAL (x)]);
15309
        }
15310
      return;
15311
 
15312
    /* Print the high single-precision register of a VFP double-precision
15313
       register.  */
15314
    case 'p':
15315
      {
15316
        int mode = GET_MODE (x);
15317
        int regno;
15318
 
15319
        if (GET_MODE_SIZE (mode) != 8 || GET_CODE (x) != REG)
15320
          {
15321
            output_operand_lossage ("invalid operand for code '%c'", code);
15322
            return;
15323
          }
15324
 
15325
        regno = REGNO (x);
15326
        if (!VFP_REGNO_OK_FOR_DOUBLE (regno))
15327
          {
15328
            output_operand_lossage ("invalid operand for code '%c'", code);
15329
            return;
15330
          }
15331
 
15332
        fprintf (stream, "s%d", regno - FIRST_VFP_REGNUM + 1);
15333
      }
15334
      return;
15335
 
15336
    /* Print a VFP/Neon double precision or quad precision register name.  */
15337
    case 'P':
15338
    case 'q':
15339
      {
15340
        int mode = GET_MODE (x);
15341
        int is_quad = (code == 'q');
15342
        int regno;
15343
 
15344
        if (GET_MODE_SIZE (mode) != (is_quad ? 16 : 8))
15345
          {
15346
            output_operand_lossage ("invalid operand for code '%c'", code);
15347
            return;
15348
          }
15349
 
15350
        if (GET_CODE (x) != REG
15351
            || !IS_VFP_REGNUM (REGNO (x)))
15352
          {
15353
            output_operand_lossage ("invalid operand for code '%c'", code);
15354
            return;
15355
          }
15356
 
15357
        regno = REGNO (x);
15358
        if ((is_quad && !NEON_REGNO_OK_FOR_QUAD (regno))
15359
            || (!is_quad && !VFP_REGNO_OK_FOR_DOUBLE (regno)))
15360
          {
15361
            output_operand_lossage ("invalid operand for code '%c'", code);
15362
            return;
15363
          }
15364
 
15365
        fprintf (stream, "%c%d", is_quad ? 'q' : 'd',
15366
          (regno - FIRST_VFP_REGNUM) >> (is_quad ? 2 : 1));
15367
      }
15368
      return;
15369
 
15370
    /* These two codes print the low/high doubleword register of a Neon quad
15371
       register, respectively.  For pair-structure types, can also print
15372
       low/high quadword registers.  */
15373
    case 'e':
15374
    case 'f':
15375
      {
15376
        int mode = GET_MODE (x);
15377
        int regno;
15378
 
15379
        if ((GET_MODE_SIZE (mode) != 16
15380
             && GET_MODE_SIZE (mode) != 32) || GET_CODE (x) != REG)
15381
          {
15382
            output_operand_lossage ("invalid operand for code '%c'", code);
15383
            return;
15384
          }
15385
 
15386
        regno = REGNO (x);
15387
        if (!NEON_REGNO_OK_FOR_QUAD (regno))
15388
          {
15389
            output_operand_lossage ("invalid operand for code '%c'", code);
15390
            return;
15391
          }
15392
 
15393
        if (GET_MODE_SIZE (mode) == 16)
15394
          fprintf (stream, "d%d", ((regno - FIRST_VFP_REGNUM) >> 1)
15395
                                  + (code == 'f' ? 1 : 0));
15396
        else
15397
          fprintf (stream, "q%d", ((regno - FIRST_VFP_REGNUM) >> 2)
15398
                                  + (code == 'f' ? 1 : 0));
15399
      }
15400
      return;
15401
 
15402
    /* Print a VFPv3 floating-point constant, represented as an integer
15403
       index.  */
15404
    case 'G':
15405
      {
15406
        int index = vfp3_const_double_index (x);
15407
        gcc_assert (index != -1);
15408
        fprintf (stream, "%d", index);
15409
      }
15410
      return;
15411
 
15412
    /* Print bits representing opcode features for Neon.
15413
 
15414
       Bit 0 is 1 for signed, 0 for unsigned.  Floats count as signed
15415
       and polynomials as unsigned.
15416
 
15417
       Bit 1 is 1 for floats and polynomials, 0 for ordinary integers.
15418
 
15419
       Bit 2 is 1 for rounding functions, 0 otherwise.  */
15420
 
15421
    /* Identify the type as 's', 'u', 'p' or 'f'.  */
15422
    case 'T':
15423
      {
15424
        HOST_WIDE_INT bits = INTVAL (x);
15425
        fputc ("uspf"[bits & 3], stream);
15426
      }
15427
      return;
15428
 
15429
    /* Likewise, but signed and unsigned integers are both 'i'.  */
15430
    case 'F':
15431
      {
15432
        HOST_WIDE_INT bits = INTVAL (x);
15433
        fputc ("iipf"[bits & 3], stream);
15434
      }
15435
      return;
15436
 
15437
    /* As for 'T', but emit 'u' instead of 'p'.  */
15438
    case 't':
15439
      {
15440
        HOST_WIDE_INT bits = INTVAL (x);
15441
        fputc ("usuf"[bits & 3], stream);
15442
      }
15443
      return;
15444
 
15445
    /* Bit 2: rounding (vs none).  */
15446
    case 'O':
15447
      {
15448
        HOST_WIDE_INT bits = INTVAL (x);
15449
        fputs ((bits & 4) != 0 ? "r" : "", stream);
15450
      }
15451
      return;
15452
 
15453
    /* Memory operand for vld1/vst1 instruction.  */
15454
    case 'A':
15455
      {
15456
        rtx addr;
15457
        bool postinc = FALSE;
15458
        gcc_assert (GET_CODE (x) == MEM);
15459
        addr = XEXP (x, 0);
15460
        if (GET_CODE (addr) == POST_INC)
15461
          {
15462
            postinc = 1;
15463
            addr = XEXP (addr, 0);
15464
          }
15465
        asm_fprintf (stream, "[%r]", REGNO (addr));
15466
        if (postinc)
15467
          fputs("!", stream);
15468
      }
15469
      return;
15470
 
15471
    /* Translate an S register number into a D register number and element index.  */
15472
    case 'y':
15473
      {
15474
        int mode = GET_MODE (x);
15475
        int regno;
15476
 
15477
        if (GET_MODE_SIZE (mode) != 4 || GET_CODE (x) != REG)
15478
          {
15479
            output_operand_lossage ("invalid operand for code '%c'", code);
15480
            return;
15481
          }
15482
 
15483
        regno = REGNO (x);
15484
        if (!VFP_REGNO_OK_FOR_SINGLE (regno))
15485
          {
15486
            output_operand_lossage ("invalid operand for code '%c'", code);
15487
            return;
15488
          }
15489
 
15490
        regno = regno - FIRST_VFP_REGNUM;
15491
        fprintf (stream, "d%d[%d]", regno / 2, regno % 2);
15492
      }
15493
      return;
15494
 
15495
    /* Register specifier for vld1.16/vst1.16.  Translate the S register
15496
       number into a D register number and element index.  */
15497
    case 'z':
15498
      {
15499
        int mode = GET_MODE (x);
15500
        int regno;
15501
 
15502
        if (GET_MODE_SIZE (mode) != 2 || GET_CODE (x) != REG)
15503
          {
15504
            output_operand_lossage ("invalid operand for code '%c'", code);
15505
            return;
15506
          }
15507
 
15508
        regno = REGNO (x);
15509
        if (!VFP_REGNO_OK_FOR_SINGLE (regno))
15510
          {
15511
            output_operand_lossage ("invalid operand for code '%c'", code);
15512
            return;
15513
          }
15514
 
15515
        regno = regno - FIRST_VFP_REGNUM;
15516
        fprintf (stream, "d%d[%d]", regno/2, ((regno % 2) ? 2 : 0));
15517
      }
15518
      return;
15519
 
15520
    default:
15521
      if (x == 0)
15522
        {
15523
          output_operand_lossage ("missing operand");
15524
          return;
15525
        }
15526
 
15527
      switch (GET_CODE (x))
15528
        {
15529
        case REG:
15530
          asm_fprintf (stream, "%r", REGNO (x));
15531
          break;
15532
 
15533
        case MEM:
15534
          output_memory_reference_mode = GET_MODE (x);
15535
          output_address (XEXP (x, 0));
15536
          break;
15537
 
15538
        case CONST_DOUBLE:
15539
          if (TARGET_NEON)
15540
            {
15541
              char fpstr[20];
15542
              real_to_decimal (fpstr, CONST_DOUBLE_REAL_VALUE (x),
15543
                               sizeof (fpstr), 0, 1);
15544
              fprintf (stream, "#%s", fpstr);
15545
            }
15546
          else
15547
            fprintf (stream, "#%s", fp_immediate_constant (x));
15548
          break;
15549
 
15550
        default:
15551
          gcc_assert (GET_CODE (x) != NEG);
15552
          fputc ('#', stream);
15553
          if (GET_CODE (x) == HIGH)
15554
            {
15555
              fputs (":lower16:", stream);
15556
              x = XEXP (x, 0);
15557
            }
15558
 
15559
          output_addr_const (stream, x);
15560
          break;
15561
        }
15562
    }
15563
}
15564
 
15565
/* Target hook for assembling integer objects.  The ARM version needs to
15566
   handle word-sized values specially.  */
15567
static bool
15568
arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
15569
{
15570
  enum machine_mode mode;
15571
 
15572
  if (size == UNITS_PER_WORD && aligned_p)
15573
    {
15574
      fputs ("\t.word\t", asm_out_file);
15575
      output_addr_const (asm_out_file, x);
15576
 
15577
      /* Mark symbols as position independent.  We only do this in the
15578
         .text segment, not in the .data segment.  */
15579
      if (NEED_GOT_RELOC && flag_pic && making_const_table &&
15580
          (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
15581
        {
15582
          /* See legitimize_pic_address for an explanation of the
15583
             TARGET_VXWORKS_RTP check.  */
15584
          if (TARGET_VXWORKS_RTP
15585
              || (GET_CODE (x) == SYMBOL_REF && !SYMBOL_REF_LOCAL_P (x)))
15586
            fputs ("(GOT)", asm_out_file);
15587
          else
15588
            fputs ("(GOTOFF)", asm_out_file);
15589
        }
15590
      fputc ('\n', asm_out_file);
15591
      return true;
15592
    }
15593
 
15594
  mode = GET_MODE (x);
15595
 
15596
  if (arm_vector_mode_supported_p (mode))
15597
    {
15598
      int i, units;
15599
 
15600
      gcc_assert (GET_CODE (x) == CONST_VECTOR);
15601
 
15602
      units = CONST_VECTOR_NUNITS (x);
15603
      size = GET_MODE_SIZE (GET_MODE_INNER (mode));
15604
 
15605
      if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
15606
        for (i = 0; i < units; i++)
15607
          {
15608
            rtx elt = CONST_VECTOR_ELT (x, i);
15609
            assemble_integer
15610
              (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
15611
          }
15612
      else
15613
        for (i = 0; i < units; i++)
15614
          {
15615
            rtx elt = CONST_VECTOR_ELT (x, i);
15616
            REAL_VALUE_TYPE rval;
15617
 
15618
            REAL_VALUE_FROM_CONST_DOUBLE (rval, elt);
15619
 
15620
            assemble_real
15621
              (rval, GET_MODE_INNER (mode),
15622
              i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT);
15623
          }
15624
 
15625
      return true;
15626
    }
15627
 
15628
  return default_assemble_integer (x, size, aligned_p);
15629
}
15630
 
15631
static void
15632
arm_elf_asm_cdtor (rtx symbol, int priority, bool is_ctor)
15633
{
15634
  section *s;
15635
 
15636
  if (!TARGET_AAPCS_BASED)
15637
    {
15638
      (is_ctor ?
15639
       default_named_section_asm_out_constructor
15640
       : default_named_section_asm_out_destructor) (symbol, priority);
15641
      return;
15642
    }
15643
 
15644
  /* Put these in the .init_array section, using a special relocation.  */
15645
  if (priority != DEFAULT_INIT_PRIORITY)
15646
    {
15647
      char buf[18];
15648
      sprintf (buf, "%s.%.5u",
15649
               is_ctor ? ".init_array" : ".fini_array",
15650
               priority);
15651
      s = get_section (buf, SECTION_WRITE, NULL_TREE);
15652
    }
15653
  else if (is_ctor)
15654
    s = ctors_section;
15655
  else
15656
    s = dtors_section;
15657
 
15658
  switch_to_section (s);
15659
  assemble_align (POINTER_SIZE);
15660
  fputs ("\t.word\t", asm_out_file);
15661
  output_addr_const (asm_out_file, symbol);
15662
  fputs ("(target1)\n", asm_out_file);
15663
}
15664
 
15665
/* Add a function to the list of static constructors.  */
15666
 
15667
static void
15668
arm_elf_asm_constructor (rtx symbol, int priority)
15669
{
15670
  arm_elf_asm_cdtor (symbol, priority, /*is_ctor=*/true);
15671
}
15672
 
15673
/* Add a function to the list of static destructors.  */
15674
 
15675
static void
15676
arm_elf_asm_destructor (rtx symbol, int priority)
15677
{
15678
  arm_elf_asm_cdtor (symbol, priority, /*is_ctor=*/false);
15679
}
15680
 
15681
/* A finite state machine takes care of noticing whether or not instructions
15682
   can be conditionally executed, and thus decrease execution time and code
15683
   size by deleting branch instructions.  The fsm is controlled by
15684
   final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE.  */
15685
 
15686
/* The state of the fsm controlling condition codes are:
15687
   0: normal, do nothing special
15688
   1: make ASM_OUTPUT_OPCODE not output this instruction
15689
   2: make ASM_OUTPUT_OPCODE not output this instruction
15690
   3: make instructions conditional
15691
   4: make instructions conditional
15692
 
15693
   State transitions (state->state by whom under condition):
15694
 
15695
 
15696
   1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
15697
   2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
15698
   3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
15699
          (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
15700
   4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
15701
          (the target insn is arm_target_insn).
15702
 
15703
   If the jump clobbers the conditions then we use states 2 and 4.
15704
 
15705
   A similar thing can be done with conditional return insns.
15706
 
15707
   XXX In case the `target' is an unconditional branch, this conditionalising
15708
   of the instructions always reduces code size, but not always execution
15709
   time.  But then, I want to reduce the code size to somewhere near what
15710
   /bin/cc produces.  */
15711
 
15712
/* In addition to this, state is maintained for Thumb-2 COND_EXEC
15713
   instructions.  When a COND_EXEC instruction is seen the subsequent
15714
   instructions are scanned so that multiple conditional instructions can be
15715
   combined into a single IT block.  arm_condexec_count and arm_condexec_mask
15716
   specify the length and true/false mask for the IT block.  These will be
15717
   decremented/zeroed by arm_asm_output_opcode as the insns are output.  */
15718
 
15719
/* Returns the index of the ARM condition code string in
15720
   `arm_condition_codes'.  COMPARISON should be an rtx like
15721
   `(eq (...) (...))'.  */
15722
static enum arm_cond_code
15723
get_arm_condition_code (rtx comparison)
15724
{
15725
  enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
15726
  enum arm_cond_code code;
15727
  enum rtx_code comp_code = GET_CODE (comparison);
15728
 
15729
  if (GET_MODE_CLASS (mode) != MODE_CC)
15730
    mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
15731
                           XEXP (comparison, 1));
15732
 
15733
  switch (mode)
15734
    {
15735
    case CC_DNEmode: code = ARM_NE; goto dominance;
15736
    case CC_DEQmode: code = ARM_EQ; goto dominance;
15737
    case CC_DGEmode: code = ARM_GE; goto dominance;
15738
    case CC_DGTmode: code = ARM_GT; goto dominance;
15739
    case CC_DLEmode: code = ARM_LE; goto dominance;
15740
    case CC_DLTmode: code = ARM_LT; goto dominance;
15741
    case CC_DGEUmode: code = ARM_CS; goto dominance;
15742
    case CC_DGTUmode: code = ARM_HI; goto dominance;
15743
    case CC_DLEUmode: code = ARM_LS; goto dominance;
15744
    case CC_DLTUmode: code = ARM_CC;
15745
 
15746
    dominance:
15747
      gcc_assert (comp_code == EQ || comp_code == NE);
15748
 
15749
      if (comp_code == EQ)
15750
        return ARM_INVERSE_CONDITION_CODE (code);
15751
      return code;
15752
 
15753
    case CC_NOOVmode:
15754
      switch (comp_code)
15755
        {
15756
        case NE: return ARM_NE;
15757
        case EQ: return ARM_EQ;
15758
        case GE: return ARM_PL;
15759
        case LT: return ARM_MI;
15760
        default: gcc_unreachable ();
15761
        }
15762
 
15763
    case CC_Zmode:
15764
      switch (comp_code)
15765
        {
15766
        case NE: return ARM_NE;
15767
        case EQ: return ARM_EQ;
15768
        default: gcc_unreachable ();
15769
        }
15770
 
15771
    case CC_Nmode:
15772
      switch (comp_code)
15773
        {
15774
        case NE: return ARM_MI;
15775
        case EQ: return ARM_PL;
15776
        default: gcc_unreachable ();
15777
        }
15778
 
15779
    case CCFPEmode:
15780
    case CCFPmode:
15781
      /* These encodings assume that AC=1 in the FPA system control
15782
         byte.  This allows us to handle all cases except UNEQ and
15783
         LTGT.  */
15784
      switch (comp_code)
15785
        {
15786
        case GE: return ARM_GE;
15787
        case GT: return ARM_GT;
15788
        case LE: return ARM_LS;
15789
        case LT: return ARM_MI;
15790
        case NE: return ARM_NE;
15791
        case EQ: return ARM_EQ;
15792
        case ORDERED: return ARM_VC;
15793
        case UNORDERED: return ARM_VS;
15794
        case UNLT: return ARM_LT;
15795
        case UNLE: return ARM_LE;
15796
        case UNGT: return ARM_HI;
15797
        case UNGE: return ARM_PL;
15798
          /* UNEQ and LTGT do not have a representation.  */
15799
        case UNEQ: /* Fall through.  */
15800
        case LTGT: /* Fall through.  */
15801
        default: gcc_unreachable ();
15802
        }
15803
 
15804
    case CC_SWPmode:
15805
      switch (comp_code)
15806
        {
15807
        case NE: return ARM_NE;
15808
        case EQ: return ARM_EQ;
15809
        case GE: return ARM_LE;
15810
        case GT: return ARM_LT;
15811
        case LE: return ARM_GE;
15812
        case LT: return ARM_GT;
15813
        case GEU: return ARM_LS;
15814
        case GTU: return ARM_CC;
15815
        case LEU: return ARM_CS;
15816
        case LTU: return ARM_HI;
15817
        default: gcc_unreachable ();
15818
        }
15819
 
15820
    case CC_Cmode:
15821
      switch (comp_code)
15822
      {
15823
      case LTU: return ARM_CS;
15824
      case GEU: return ARM_CC;
15825
      default: gcc_unreachable ();
15826
      }
15827
 
15828
    case CCmode:
15829
      switch (comp_code)
15830
        {
15831
        case NE: return ARM_NE;
15832
        case EQ: return ARM_EQ;
15833
        case GE: return ARM_GE;
15834
        case GT: return ARM_GT;
15835
        case LE: return ARM_LE;
15836
        case LT: return ARM_LT;
15837
        case GEU: return ARM_CS;
15838
        case GTU: return ARM_HI;
15839
        case LEU: return ARM_LS;
15840
        case LTU: return ARM_CC;
15841
        default: gcc_unreachable ();
15842
        }
15843
 
15844
    default: gcc_unreachable ();
15845
    }
15846
}
15847
 
15848
/* Tell arm_asm_output_opcode to output IT blocks for conditionally executed
15849
   instructions.  */
15850
void
15851
thumb2_final_prescan_insn (rtx insn)
15852
{
15853
  rtx first_insn = insn;
15854
  rtx body = PATTERN (insn);
15855
  rtx predicate;
15856
  enum arm_cond_code code;
15857
  int n;
15858
  int mask;
15859
 
15860
  /* Remove the previous insn from the count of insns to be output.  */
15861
  if (arm_condexec_count)
15862
      arm_condexec_count--;
15863
 
15864
  /* Nothing to do if we are already inside a conditional block.  */
15865
  if (arm_condexec_count)
15866
    return;
15867
 
15868
  if (GET_CODE (body) != COND_EXEC)
15869
    return;
15870
 
15871
  /* Conditional jumps are implemented directly.  */
15872
  if (GET_CODE (insn) == JUMP_INSN)
15873
    return;
15874
 
15875
  predicate = COND_EXEC_TEST (body);
15876
  arm_current_cc = get_arm_condition_code (predicate);
15877
 
15878
  n = get_attr_ce_count (insn);
15879
  arm_condexec_count = 1;
15880
  arm_condexec_mask = (1 << n) - 1;
15881
  arm_condexec_masklen = n;
15882
  /* See if subsequent instructions can be combined into the same block.  */
15883
  for (;;)
15884
    {
15885
      insn = next_nonnote_insn (insn);
15886
 
15887
      /* Jumping into the middle of an IT block is illegal, so a label or
15888
         barrier terminates the block.  */
15889
      if (GET_CODE (insn) != INSN && GET_CODE(insn) != JUMP_INSN)
15890
        break;
15891
 
15892
      body = PATTERN (insn);
15893
      /* USE and CLOBBER aren't really insns, so just skip them.  */
15894
      if (GET_CODE (body) == USE
15895
          || GET_CODE (body) == CLOBBER)
15896
        continue;
15897
 
15898
      /* ??? Recognize conditional jumps, and combine them with IT blocks.  */
15899
      if (GET_CODE (body) != COND_EXEC)
15900
        break;
15901
      /* Allow up to 4 conditionally executed instructions in a block.  */
15902
      n = get_attr_ce_count (insn);
15903
      if (arm_condexec_masklen + n > 4)
15904
        break;
15905
 
15906
      predicate = COND_EXEC_TEST (body);
15907
      code = get_arm_condition_code (predicate);
15908
      mask = (1 << n) - 1;
15909
      if (arm_current_cc == code)
15910
        arm_condexec_mask |= (mask << arm_condexec_masklen);
15911
      else if (arm_current_cc != ARM_INVERSE_CONDITION_CODE(code))
15912
        break;
15913
 
15914
      arm_condexec_count++;
15915
      arm_condexec_masklen += n;
15916
 
15917
      /* A jump must be the last instruction in a conditional block.  */
15918
      if (GET_CODE(insn) == JUMP_INSN)
15919
        break;
15920
    }
15921
  /* Restore recog_data (getting the attributes of other insns can
15922
     destroy this array, but final.c assumes that it remains intact
15923
     across this call).  */
15924
  extract_constrain_insn_cached (first_insn);
15925
}
15926
 
15927
void
15928
arm_final_prescan_insn (rtx insn)
15929
{
15930
  /* BODY will hold the body of INSN.  */
15931
  rtx body = PATTERN (insn);
15932
 
15933
  /* This will be 1 if trying to repeat the trick, and things need to be
15934
     reversed if it appears to fail.  */
15935
  int reverse = 0;
15936
 
15937
  /* If we start with a return insn, we only succeed if we find another one.  */
15938
  int seeking_return = 0;
15939
 
15940
  /* START_INSN will hold the insn from where we start looking.  This is the
15941
     first insn after the following code_label if REVERSE is true.  */
15942
  rtx start_insn = insn;
15943
 
15944
  /* If in state 4, check if the target branch is reached, in order to
15945
     change back to state 0.  */
15946
  if (arm_ccfsm_state == 4)
15947
    {
15948
      if (insn == arm_target_insn)
15949
        {
15950
          arm_target_insn = NULL;
15951
          arm_ccfsm_state = 0;
15952
        }
15953
      return;
15954
    }
15955
 
15956
  /* If in state 3, it is possible to repeat the trick, if this insn is an
15957
     unconditional branch to a label, and immediately following this branch
15958
     is the previous target label which is only used once, and the label this
15959
     branch jumps to is not too far off.  */
15960
  if (arm_ccfsm_state == 3)
15961
    {
15962
      if (simplejump_p (insn))
15963
        {
15964
          start_insn = next_nonnote_insn (start_insn);
15965
          if (GET_CODE (start_insn) == BARRIER)
15966
            {
15967
              /* XXX Isn't this always a barrier?  */
15968
              start_insn = next_nonnote_insn (start_insn);
15969
            }
15970
          if (GET_CODE (start_insn) == CODE_LABEL
15971
              && CODE_LABEL_NUMBER (start_insn) == arm_target_label
15972
              && LABEL_NUSES (start_insn) == 1)
15973
            reverse = TRUE;
15974
          else
15975
            return;
15976
        }
15977
      else if (GET_CODE (body) == RETURN)
15978
        {
15979
          start_insn = next_nonnote_insn (start_insn);
15980
          if (GET_CODE (start_insn) == BARRIER)
15981
            start_insn = next_nonnote_insn (start_insn);
15982
          if (GET_CODE (start_insn) == CODE_LABEL
15983
              && CODE_LABEL_NUMBER (start_insn) == arm_target_label
15984
              && LABEL_NUSES (start_insn) == 1)
15985
            {
15986
              reverse = TRUE;
15987
              seeking_return = 1;
15988
            }
15989
          else
15990
            return;
15991
        }
15992
      else
15993
        return;
15994
    }
15995
 
15996
  gcc_assert (!arm_ccfsm_state || reverse);
15997
  if (GET_CODE (insn) != JUMP_INSN)
15998
    return;
15999
 
16000
  /* This jump might be paralleled with a clobber of the condition codes
16001
     the jump should always come first */
16002
  if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
16003
    body = XVECEXP (body, 0, 0);
16004
 
16005
  if (reverse
16006
      || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
16007
          && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
16008
    {
16009
      int insns_skipped;
16010
      int fail = FALSE, succeed = FALSE;
16011
      /* Flag which part of the IF_THEN_ELSE is the LABEL_REF.  */
16012
      int then_not_else = TRUE;
16013
      rtx this_insn = start_insn, label = 0;
16014
 
16015
      /* Register the insn jumped to.  */
16016
      if (reverse)
16017
        {
16018
          if (!seeking_return)
16019
            label = XEXP (SET_SRC (body), 0);
16020
        }
16021
      else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
16022
        label = XEXP (XEXP (SET_SRC (body), 1), 0);
16023
      else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
16024
        {
16025
          label = XEXP (XEXP (SET_SRC (body), 2), 0);
16026
          then_not_else = FALSE;
16027
        }
16028
      else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
16029
        seeking_return = 1;
16030
      else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
16031
        {
16032
          seeking_return = 1;
16033
          then_not_else = FALSE;
16034
        }
16035
      else
16036
        gcc_unreachable ();
16037
 
16038
      /* See how many insns this branch skips, and what kind of insns.  If all
16039
         insns are okay, and the label or unconditional branch to the same
16040
         label is not too far away, succeed.  */
16041
      for (insns_skipped = 0;
16042
           !fail && !succeed && insns_skipped++ < max_insns_skipped;)
16043
        {
16044
          rtx scanbody;
16045
 
16046
          this_insn = next_nonnote_insn (this_insn);
16047
          if (!this_insn)
16048
            break;
16049
 
16050
          switch (GET_CODE (this_insn))
16051
            {
16052
            case CODE_LABEL:
16053
              /* Succeed if it is the target label, otherwise fail since
16054
                 control falls in from somewhere else.  */
16055
              if (this_insn == label)
16056
                {
16057
                  arm_ccfsm_state = 1;
16058
                  succeed = TRUE;
16059
                }
16060
              else
16061
                fail = TRUE;
16062
              break;
16063
 
16064
            case BARRIER:
16065
              /* Succeed if the following insn is the target label.
16066
                 Otherwise fail.
16067
                 If return insns are used then the last insn in a function
16068
                 will be a barrier.  */
16069
              this_insn = next_nonnote_insn (this_insn);
16070
              if (this_insn && this_insn == label)
16071
                {
16072
                  arm_ccfsm_state = 1;
16073
                  succeed = TRUE;
16074
                }
16075
              else
16076
                fail = TRUE;
16077
              break;
16078
 
16079
            case CALL_INSN:
16080
              /* The AAPCS says that conditional calls should not be
16081
                 used since they make interworking inefficient (the
16082
                 linker can't transform BL<cond> into BLX).  That's
16083
                 only a problem if the machine has BLX.  */
16084
              if (arm_arch5)
16085
                {
16086
                  fail = TRUE;
16087
                  break;
16088
                }
16089
 
16090
              /* Succeed if the following insn is the target label, or
16091
                 if the following two insns are a barrier and the
16092
                 target label.  */
16093
              this_insn = next_nonnote_insn (this_insn);
16094
              if (this_insn && GET_CODE (this_insn) == BARRIER)
16095
                this_insn = next_nonnote_insn (this_insn);
16096
 
16097
              if (this_insn && this_insn == label
16098
                  && insns_skipped < max_insns_skipped)
16099
                {
16100
                  arm_ccfsm_state = 1;
16101
                  succeed = TRUE;
16102
                }
16103
              else
16104
                fail = TRUE;
16105
              break;
16106
 
16107
            case JUMP_INSN:
16108
              /* If this is an unconditional branch to the same label, succeed.
16109
                 If it is to another label, do nothing.  If it is conditional,
16110
                 fail.  */
16111
              /* XXX Probably, the tests for SET and the PC are
16112
                 unnecessary.  */
16113
 
16114
              scanbody = PATTERN (this_insn);
16115
              if (GET_CODE (scanbody) == SET
16116
                  && GET_CODE (SET_DEST (scanbody)) == PC)
16117
                {
16118
                  if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
16119
                      && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
16120
                    {
16121
                      arm_ccfsm_state = 2;
16122
                      succeed = TRUE;
16123
                    }
16124
                  else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
16125
                    fail = TRUE;
16126
                }
16127
              /* Fail if a conditional return is undesirable (e.g. on a
16128
                 StrongARM), but still allow this if optimizing for size.  */
16129
              else if (GET_CODE (scanbody) == RETURN
16130
                       && !use_return_insn (TRUE, NULL)
16131
                       && !optimize_size)
16132
                fail = TRUE;
16133
              else if (GET_CODE (scanbody) == RETURN
16134
                       && seeking_return)
16135
                {
16136
                  arm_ccfsm_state = 2;
16137
                  succeed = TRUE;
16138
                }
16139
              else if (GET_CODE (scanbody) == PARALLEL)
16140
                {
16141
                  switch (get_attr_conds (this_insn))
16142
                    {
16143
                    case CONDS_NOCOND:
16144
                      break;
16145
                    default:
16146
                      fail = TRUE;
16147
                      break;
16148
                    }
16149
                }
16150
              else
16151
                fail = TRUE;    /* Unrecognized jump (e.g. epilogue).  */
16152
 
16153
              break;
16154
 
16155
            case INSN:
16156
              /* Instructions using or affecting the condition codes make it
16157
                 fail.  */
16158
              scanbody = PATTERN (this_insn);
16159
              if (!(GET_CODE (scanbody) == SET
16160
                    || GET_CODE (scanbody) == PARALLEL)
16161
                  || get_attr_conds (this_insn) != CONDS_NOCOND)
16162
                fail = TRUE;
16163
 
16164
              /* A conditional cirrus instruction must be followed by
16165
                 a non Cirrus instruction.  However, since we
16166
                 conditionalize instructions in this function and by
16167
                 the time we get here we can't add instructions
16168
                 (nops), because shorten_branches() has already been
16169
                 called, we will disable conditionalizing Cirrus
16170
                 instructions to be safe.  */
16171
              if (GET_CODE (scanbody) != USE
16172
                  && GET_CODE (scanbody) != CLOBBER
16173
                  && get_attr_cirrus (this_insn) != CIRRUS_NOT)
16174
                fail = TRUE;
16175
              break;
16176
 
16177
            default:
16178
              break;
16179
            }
16180
        }
16181
      if (succeed)
16182
        {
16183
          if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
16184
            arm_target_label = CODE_LABEL_NUMBER (label);
16185
          else
16186
            {
16187
              gcc_assert (seeking_return || arm_ccfsm_state == 2);
16188
 
16189
              while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
16190
                {
16191
                  this_insn = next_nonnote_insn (this_insn);
16192
                  gcc_assert (!this_insn
16193
                              || (GET_CODE (this_insn) != BARRIER
16194
                                  && GET_CODE (this_insn) != CODE_LABEL));
16195
                }
16196
              if (!this_insn)
16197
                {
16198
                  /* Oh, dear! we ran off the end.. give up.  */
16199
                  extract_constrain_insn_cached (insn);
16200
                  arm_ccfsm_state = 0;
16201
                  arm_target_insn = NULL;
16202
                  return;
16203
                }
16204
              arm_target_insn = this_insn;
16205
            }
16206
 
16207
          /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
16208
             what it was.  */
16209
          if (!reverse)
16210
            arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body), 0));
16211
 
16212
          if (reverse || then_not_else)
16213
            arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
16214
        }
16215
 
16216
      /* Restore recog_data (getting the attributes of other insns can
16217
         destroy this array, but final.c assumes that it remains intact
16218
         across this call.  */
16219
      extract_constrain_insn_cached (insn);
16220
    }
16221
}
16222
 
16223
/* Output IT instructions.  */
16224
void
16225
thumb2_asm_output_opcode (FILE * stream)
16226
{
16227
  char buff[5];
16228
  int n;
16229
 
16230
  if (arm_condexec_mask)
16231
    {
16232
      for (n = 0; n < arm_condexec_masklen; n++)
16233
        buff[n] = (arm_condexec_mask & (1 << n)) ? 't' : 'e';
16234
      buff[n] = 0;
16235
      asm_fprintf(stream, "i%s\t%s\n\t", buff,
16236
                  arm_condition_codes[arm_current_cc]);
16237
      arm_condexec_mask = 0;
16238
    }
16239
}
16240
 
16241
/* Returns true if REGNO is a valid register
16242
   for holding a quantity of type MODE.  */
16243
int
16244
arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
16245
{
16246
  if (GET_MODE_CLASS (mode) == MODE_CC)
16247
    return (regno == CC_REGNUM
16248
            || (TARGET_HARD_FLOAT && TARGET_VFP
16249
                && regno == VFPCC_REGNUM));
16250
 
16251
  if (TARGET_THUMB1)
16252
    /* For the Thumb we only allow values bigger than SImode in
16253
       registers 0 - 6, so that there is always a second low
16254
       register available to hold the upper part of the value.
16255
       We probably we ought to ensure that the register is the
16256
       start of an even numbered register pair.  */
16257
    return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
16258
 
16259
  if (TARGET_HARD_FLOAT && TARGET_MAVERICK
16260
      && IS_CIRRUS_REGNUM (regno))
16261
    /* We have outlawed SI values in Cirrus registers because they
16262
       reside in the lower 32 bits, but SF values reside in the
16263
       upper 32 bits.  This causes gcc all sorts of grief.  We can't
16264
       even split the registers into pairs because Cirrus SI values
16265
       get sign extended to 64bits-- aldyh.  */
16266
    return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
16267
 
16268
  if (TARGET_HARD_FLOAT && TARGET_VFP
16269
      && IS_VFP_REGNUM (regno))
16270
    {
16271
      if (mode == SFmode || mode == SImode)
16272
        return VFP_REGNO_OK_FOR_SINGLE (regno);
16273
 
16274
      if (mode == DFmode)
16275
        return VFP_REGNO_OK_FOR_DOUBLE (regno);
16276
 
16277
      /* VFP registers can hold HFmode values, but there is no point in
16278
         putting them there unless we have hardware conversion insns. */
16279
      if (mode == HFmode)
16280
        return TARGET_FP16 && VFP_REGNO_OK_FOR_SINGLE (regno);
16281
 
16282
      if (TARGET_NEON)
16283
        return (VALID_NEON_DREG_MODE (mode) && VFP_REGNO_OK_FOR_DOUBLE (regno))
16284
               || (VALID_NEON_QREG_MODE (mode)
16285
                   && NEON_REGNO_OK_FOR_QUAD (regno))
16286
               || (mode == TImode && NEON_REGNO_OK_FOR_NREGS (regno, 2))
16287
               || (mode == EImode && NEON_REGNO_OK_FOR_NREGS (regno, 3))
16288
               || (mode == OImode && NEON_REGNO_OK_FOR_NREGS (regno, 4))
16289
               || (mode == CImode && NEON_REGNO_OK_FOR_NREGS (regno, 6))
16290
               || (mode == XImode && NEON_REGNO_OK_FOR_NREGS (regno, 8));
16291
 
16292
      return FALSE;
16293
    }
16294
 
16295
  if (TARGET_REALLY_IWMMXT)
16296
    {
16297
      if (IS_IWMMXT_GR_REGNUM (regno))
16298
        return mode == SImode;
16299
 
16300
      if (IS_IWMMXT_REGNUM (regno))
16301
        return VALID_IWMMXT_REG_MODE (mode);
16302
    }
16303
 
16304
  /* We allow almost any value to be stored in the general registers.
16305
     Restrict doubleword quantities to even register pairs so that we can
16306
     use ldrd.  Do not allow very large Neon structure opaque modes in
16307
     general registers; they would use too many.  */
16308
  if (regno <= LAST_ARM_REGNUM)
16309
    return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0)
16310
      && ARM_NUM_REGS (mode) <= 4;
16311
 
16312
  if (regno == FRAME_POINTER_REGNUM
16313
      || regno == ARG_POINTER_REGNUM)
16314
    /* We only allow integers in the fake hard registers.  */
16315
    return GET_MODE_CLASS (mode) == MODE_INT;
16316
 
16317
  /* The only registers left are the FPA registers
16318
     which we only allow to hold FP values.  */
16319
  return (TARGET_HARD_FLOAT && TARGET_FPA
16320
          && GET_MODE_CLASS (mode) == MODE_FLOAT
16321
          && regno >= FIRST_FPA_REGNUM
16322
          && regno <= LAST_FPA_REGNUM);
16323
}
16324
 
16325
/* For efficiency and historical reasons LO_REGS, HI_REGS and CC_REGS are
16326
   not used in arm mode.  */
16327
 
16328
enum reg_class
16329
arm_regno_class (int regno)
16330
{
16331
  if (TARGET_THUMB1)
16332
    {
16333
      if (regno == STACK_POINTER_REGNUM)
16334
        return STACK_REG;
16335
      if (regno == CC_REGNUM)
16336
        return CC_REG;
16337
      if (regno < 8)
16338
        return LO_REGS;
16339
      return HI_REGS;
16340
    }
16341
 
16342
  if (TARGET_THUMB2 && regno < 8)
16343
    return LO_REGS;
16344
 
16345
  if (   regno <= LAST_ARM_REGNUM
16346
      || regno == FRAME_POINTER_REGNUM
16347
      || regno == ARG_POINTER_REGNUM)
16348
    return TARGET_THUMB2 ? HI_REGS : GENERAL_REGS;
16349
 
16350
  if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
16351
    return TARGET_THUMB2 ? CC_REG : NO_REGS;
16352
 
16353
  if (IS_CIRRUS_REGNUM (regno))
16354
    return CIRRUS_REGS;
16355
 
16356
  if (IS_VFP_REGNUM (regno))
16357
    {
16358
      if (regno <= D7_VFP_REGNUM)
16359
        return VFP_D0_D7_REGS;
16360
      else if (regno <= LAST_LO_VFP_REGNUM)
16361
        return VFP_LO_REGS;
16362
      else
16363
        return VFP_HI_REGS;
16364
    }
16365
 
16366
  if (IS_IWMMXT_REGNUM (regno))
16367
    return IWMMXT_REGS;
16368
 
16369
  if (IS_IWMMXT_GR_REGNUM (regno))
16370
    return IWMMXT_GR_REGS;
16371
 
16372
  return FPA_REGS;
16373
}
16374
 
16375
/* Handle a special case when computing the offset
16376
   of an argument from the frame pointer.  */
16377
int
16378
arm_debugger_arg_offset (int value, rtx addr)
16379
{
16380
  rtx insn;
16381
 
16382
  /* We are only interested if dbxout_parms() failed to compute the offset.  */
16383
  if (value != 0)
16384
    return 0;
16385
 
16386
  /* We can only cope with the case where the address is held in a register.  */
16387
  if (GET_CODE (addr) != REG)
16388
    return 0;
16389
 
16390
  /* If we are using the frame pointer to point at the argument, then
16391
     an offset of 0 is correct.  */
16392
  if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
16393
    return 0;
16394
 
16395
  /* If we are using the stack pointer to point at the
16396
     argument, then an offset of 0 is correct.  */
16397
  /* ??? Check this is consistent with thumb2 frame layout.  */
16398
  if ((TARGET_THUMB || !frame_pointer_needed)
16399
      && REGNO (addr) == SP_REGNUM)
16400
    return 0;
16401
 
16402
  /* Oh dear.  The argument is pointed to by a register rather
16403
     than being held in a register, or being stored at a known
16404
     offset from the frame pointer.  Since GDB only understands
16405
     those two kinds of argument we must translate the address
16406
     held in the register into an offset from the frame pointer.
16407
     We do this by searching through the insns for the function
16408
     looking to see where this register gets its value.  If the
16409
     register is initialized from the frame pointer plus an offset
16410
     then we are in luck and we can continue, otherwise we give up.
16411
 
16412
     This code is exercised by producing debugging information
16413
     for a function with arguments like this:
16414
 
16415
           double func (double a, double b, int c, double d) {return d;}
16416
 
16417
     Without this code the stab for parameter 'd' will be set to
16418
     an offset of 0 from the frame pointer, rather than 8.  */
16419
 
16420
  /* The if() statement says:
16421
 
16422
     If the insn is a normal instruction
16423
     and if the insn is setting the value in a register
16424
     and if the register being set is the register holding the address of the argument
16425
     and if the address is computing by an addition
16426
     that involves adding to a register
16427
     which is the frame pointer
16428
     a constant integer
16429
 
16430
     then...  */
16431
 
16432
  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
16433
    {
16434
      if (   GET_CODE (insn) == INSN
16435
          && GET_CODE (PATTERN (insn)) == SET
16436
          && REGNO    (XEXP (PATTERN (insn), 0)) == REGNO (addr)
16437
          && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
16438
          && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
16439
          && REGNO    (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
16440
          && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
16441
             )
16442
        {
16443
          value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
16444
 
16445
          break;
16446
        }
16447
    }
16448
 
16449
  if (value == 0)
16450
    {
16451
      debug_rtx (addr);
16452
      warning (0, "unable to compute real location of stacked parameter");
16453
      value = 8; /* XXX magic hack */
16454
    }
16455
 
16456
  return value;
16457
}
16458
 
16459
#define def_mbuiltin(MASK, NAME, TYPE, CODE)                            \
16460
  do                                                                    \
16461
    {                                                                   \
16462
      if ((MASK) & insn_flags)                                          \
16463
        add_builtin_function ((NAME), (TYPE), (CODE),                   \
16464
                             BUILT_IN_MD, NULL, NULL_TREE);             \
16465
    }                                                                   \
16466
  while (0)
16467
 
16468
struct builtin_description
16469
{
16470
  const unsigned int       mask;
16471
  const enum insn_code     icode;
16472
  const char * const       name;
16473
  const enum arm_builtins  code;
16474
  const enum rtx_code      comparison;
16475
  const unsigned int       flag;
16476
};
16477
 
16478
static const struct builtin_description bdesc_2arg[] =
16479
{
16480
#define IWMMXT_BUILTIN(code, string, builtin) \
16481
  { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
16482
    ARM_BUILTIN_##builtin, UNKNOWN, 0 },
16483
 
16484
  IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
16485
  IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
16486
  IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
16487
  IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
16488
  IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
16489
  IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
16490
  IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
16491
  IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
16492
  IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
16493
  IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
16494
  IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
16495
  IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
16496
  IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
16497
  IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
16498
  IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
16499
  IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
16500
  IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
16501
  IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
16502
  IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
16503
  IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
16504
  IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
16505
  IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
16506
  IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
16507
  IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
16508
  IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
16509
  IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
16510
  IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
16511
  IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
16512
  IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
16513
  IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
16514
  IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
16515
  IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
16516
  IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
16517
  IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
16518
  IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
16519
  IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
16520
  IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
16521
  IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
16522
  IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
16523
  IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
16524
  IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
16525
  IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
16526
  IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
16527
  IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
16528
  IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
16529
  IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
16530
  IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
16531
  IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
16532
  IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
16533
  IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
16534
  IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
16535
  IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
16536
  IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
16537
  IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
16538
  IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
16539
  IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
16540
  IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
16541
  IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
16542
 
16543
#define IWMMXT_BUILTIN2(code, builtin) \
16544
  { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, UNKNOWN, 0 },
16545
 
16546
  IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
16547
  IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
16548
  IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
16549
  IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
16550
  IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
16551
  IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
16552
  IWMMXT_BUILTIN2 (ashlv4hi3_di,    WSLLH)
16553
  IWMMXT_BUILTIN2 (ashlv4hi3_iwmmxt, WSLLHI)
16554
  IWMMXT_BUILTIN2 (ashlv2si3_di,    WSLLW)
16555
  IWMMXT_BUILTIN2 (ashlv2si3_iwmmxt, WSLLWI)
16556
  IWMMXT_BUILTIN2 (ashldi3_di,      WSLLD)
16557
  IWMMXT_BUILTIN2 (ashldi3_iwmmxt,  WSLLDI)
16558
  IWMMXT_BUILTIN2 (lshrv4hi3_di,    WSRLH)
16559
  IWMMXT_BUILTIN2 (lshrv4hi3_iwmmxt, WSRLHI)
16560
  IWMMXT_BUILTIN2 (lshrv2si3_di,    WSRLW)
16561
  IWMMXT_BUILTIN2 (lshrv2si3_iwmmxt, WSRLWI)
16562
  IWMMXT_BUILTIN2 (lshrdi3_di,      WSRLD)
16563
  IWMMXT_BUILTIN2 (lshrdi3_iwmmxt,  WSRLDI)
16564
  IWMMXT_BUILTIN2 (ashrv4hi3_di,    WSRAH)
16565
  IWMMXT_BUILTIN2 (ashrv4hi3_iwmmxt, WSRAHI)
16566
  IWMMXT_BUILTIN2 (ashrv2si3_di,    WSRAW)
16567
  IWMMXT_BUILTIN2 (ashrv2si3_iwmmxt, WSRAWI)
16568
  IWMMXT_BUILTIN2 (ashrdi3_di,      WSRAD)
16569
  IWMMXT_BUILTIN2 (ashrdi3_iwmmxt,  WSRADI)
16570
  IWMMXT_BUILTIN2 (rorv4hi3_di,     WRORH)
16571
  IWMMXT_BUILTIN2 (rorv4hi3,        WRORHI)
16572
  IWMMXT_BUILTIN2 (rorv2si3_di,     WRORW)
16573
  IWMMXT_BUILTIN2 (rorv2si3,        WRORWI)
16574
  IWMMXT_BUILTIN2 (rordi3_di,       WRORD)
16575
  IWMMXT_BUILTIN2 (rordi3,          WRORDI)
16576
  IWMMXT_BUILTIN2 (iwmmxt_wmacuz,   WMACUZ)
16577
  IWMMXT_BUILTIN2 (iwmmxt_wmacsz,   WMACSZ)
16578
};
16579
 
16580
static const struct builtin_description bdesc_1arg[] =
16581
{
16582
  IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
16583
  IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
16584
  IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
16585
  IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
16586
  IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
16587
  IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
16588
  IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
16589
  IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
16590
  IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
16591
  IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
16592
  IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
16593
  IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
16594
  IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
16595
  IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
16596
  IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
16597
  IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
16598
  IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
16599
  IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
16600
};
16601
 
16602
/* Set up all the iWMMXt builtins.  This is
16603
   not called if TARGET_IWMMXT is zero.  */
16604
 
16605
static void
16606
arm_init_iwmmxt_builtins (void)
16607
{
16608
  const struct builtin_description * d;
16609
  size_t i;
16610
  tree endlink = void_list_node;
16611
 
16612
  tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
16613
  tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
16614
  tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
16615
 
16616
  tree int_ftype_int
16617
    = build_function_type (integer_type_node,
16618
                           tree_cons (NULL_TREE, integer_type_node, endlink));
16619
  tree v8qi_ftype_v8qi_v8qi_int
16620
    = build_function_type (V8QI_type_node,
16621
                           tree_cons (NULL_TREE, V8QI_type_node,
16622
                                      tree_cons (NULL_TREE, V8QI_type_node,
16623
                                                 tree_cons (NULL_TREE,
16624
                                                            integer_type_node,
16625
                                                            endlink))));
16626
  tree v4hi_ftype_v4hi_int
16627
    = build_function_type (V4HI_type_node,
16628
                           tree_cons (NULL_TREE, V4HI_type_node,
16629
                                      tree_cons (NULL_TREE, integer_type_node,
16630
                                                 endlink)));
16631
  tree v2si_ftype_v2si_int
16632
    = build_function_type (V2SI_type_node,
16633
                           tree_cons (NULL_TREE, V2SI_type_node,
16634
                                      tree_cons (NULL_TREE, integer_type_node,
16635
                                                 endlink)));
16636
  tree v2si_ftype_di_di
16637
    = build_function_type (V2SI_type_node,
16638
                           tree_cons (NULL_TREE, long_long_integer_type_node,
16639
                                      tree_cons (NULL_TREE, long_long_integer_type_node,
16640
                                                 endlink)));
16641
  tree di_ftype_di_int
16642
    = build_function_type (long_long_integer_type_node,
16643
                           tree_cons (NULL_TREE, long_long_integer_type_node,
16644
                                      tree_cons (NULL_TREE, integer_type_node,
16645
                                                 endlink)));
16646
  tree di_ftype_di_int_int
16647
    = build_function_type (long_long_integer_type_node,
16648
                           tree_cons (NULL_TREE, long_long_integer_type_node,
16649
                                      tree_cons (NULL_TREE, integer_type_node,
16650
                                                 tree_cons (NULL_TREE,
16651
                                                            integer_type_node,
16652
                                                            endlink))));
16653
  tree int_ftype_v8qi
16654
    = build_function_type (integer_type_node,
16655
                           tree_cons (NULL_TREE, V8QI_type_node,
16656
                                      endlink));
16657
  tree int_ftype_v4hi
16658
    = build_function_type (integer_type_node,
16659
                           tree_cons (NULL_TREE, V4HI_type_node,
16660
                                      endlink));
16661
  tree int_ftype_v2si
16662
    = build_function_type (integer_type_node,
16663
                           tree_cons (NULL_TREE, V2SI_type_node,
16664
                                      endlink));
16665
  tree int_ftype_v8qi_int
16666
    = build_function_type (integer_type_node,
16667
                           tree_cons (NULL_TREE, V8QI_type_node,
16668
                                      tree_cons (NULL_TREE, integer_type_node,
16669
                                                 endlink)));
16670
  tree int_ftype_v4hi_int
16671
    = build_function_type (integer_type_node,
16672
                           tree_cons (NULL_TREE, V4HI_type_node,
16673
                                      tree_cons (NULL_TREE, integer_type_node,
16674
                                                 endlink)));
16675
  tree int_ftype_v2si_int
16676
    = build_function_type (integer_type_node,
16677
                           tree_cons (NULL_TREE, V2SI_type_node,
16678
                                      tree_cons (NULL_TREE, integer_type_node,
16679
                                                 endlink)));
16680
  tree v8qi_ftype_v8qi_int_int
16681
    = build_function_type (V8QI_type_node,
16682
                           tree_cons (NULL_TREE, V8QI_type_node,
16683
                                      tree_cons (NULL_TREE, integer_type_node,
16684
                                                 tree_cons (NULL_TREE,
16685
                                                            integer_type_node,
16686
                                                            endlink))));
16687
  tree v4hi_ftype_v4hi_int_int
16688
    = build_function_type (V4HI_type_node,
16689
                           tree_cons (NULL_TREE, V4HI_type_node,
16690
                                      tree_cons (NULL_TREE, integer_type_node,
16691
                                                 tree_cons (NULL_TREE,
16692
                                                            integer_type_node,
16693
                                                            endlink))));
16694
  tree v2si_ftype_v2si_int_int
16695
    = build_function_type (V2SI_type_node,
16696
                           tree_cons (NULL_TREE, V2SI_type_node,
16697
                                      tree_cons (NULL_TREE, integer_type_node,
16698
                                                 tree_cons (NULL_TREE,
16699
                                                            integer_type_node,
16700
                                                            endlink))));
16701
  /* Miscellaneous.  */
16702
  tree v8qi_ftype_v4hi_v4hi
16703
    = build_function_type (V8QI_type_node,
16704
                           tree_cons (NULL_TREE, V4HI_type_node,
16705
                                      tree_cons (NULL_TREE, V4HI_type_node,
16706
                                                 endlink)));
16707
  tree v4hi_ftype_v2si_v2si
16708
    = build_function_type (V4HI_type_node,
16709
                           tree_cons (NULL_TREE, V2SI_type_node,
16710
                                      tree_cons (NULL_TREE, V2SI_type_node,
16711
                                                 endlink)));
16712
  tree v2si_ftype_v4hi_v4hi
16713
    = build_function_type (V2SI_type_node,
16714
                           tree_cons (NULL_TREE, V4HI_type_node,
16715
                                      tree_cons (NULL_TREE, V4HI_type_node,
16716
                                                 endlink)));
16717
  tree v2si_ftype_v8qi_v8qi
16718
    = build_function_type (V2SI_type_node,
16719
                           tree_cons (NULL_TREE, V8QI_type_node,
16720
                                      tree_cons (NULL_TREE, V8QI_type_node,
16721
                                                 endlink)));
16722
  tree v4hi_ftype_v4hi_di
16723
    = build_function_type (V4HI_type_node,
16724
                           tree_cons (NULL_TREE, V4HI_type_node,
16725
                                      tree_cons (NULL_TREE,
16726
                                                 long_long_integer_type_node,
16727
                                                 endlink)));
16728
  tree v2si_ftype_v2si_di
16729
    = build_function_type (V2SI_type_node,
16730
                           tree_cons (NULL_TREE, V2SI_type_node,
16731
                                      tree_cons (NULL_TREE,
16732
                                                 long_long_integer_type_node,
16733
                                                 endlink)));
16734
  tree void_ftype_int_int
16735
    = build_function_type (void_type_node,
16736
                           tree_cons (NULL_TREE, integer_type_node,
16737
                                      tree_cons (NULL_TREE, integer_type_node,
16738
                                                 endlink)));
16739
  tree di_ftype_void
16740
    = build_function_type (long_long_unsigned_type_node, endlink);
16741
  tree di_ftype_v8qi
16742
    = build_function_type (long_long_integer_type_node,
16743
                           tree_cons (NULL_TREE, V8QI_type_node,
16744
                                      endlink));
16745
  tree di_ftype_v4hi
16746
    = build_function_type (long_long_integer_type_node,
16747
                           tree_cons (NULL_TREE, V4HI_type_node,
16748
                                      endlink));
16749
  tree di_ftype_v2si
16750
    = build_function_type (long_long_integer_type_node,
16751
                           tree_cons (NULL_TREE, V2SI_type_node,
16752
                                      endlink));
16753
  tree v2si_ftype_v4hi
16754
    = build_function_type (V2SI_type_node,
16755
                           tree_cons (NULL_TREE, V4HI_type_node,
16756
                                      endlink));
16757
  tree v4hi_ftype_v8qi
16758
    = build_function_type (V4HI_type_node,
16759
                           tree_cons (NULL_TREE, V8QI_type_node,
16760
                                      endlink));
16761
 
16762
  tree di_ftype_di_v4hi_v4hi
16763
    = build_function_type (long_long_unsigned_type_node,
16764
                           tree_cons (NULL_TREE,
16765
                                      long_long_unsigned_type_node,
16766
                                      tree_cons (NULL_TREE, V4HI_type_node,
16767
                                                 tree_cons (NULL_TREE,
16768
                                                            V4HI_type_node,
16769
                                                            endlink))));
16770
 
16771
  tree di_ftype_v4hi_v4hi
16772
    = build_function_type (long_long_unsigned_type_node,
16773
                           tree_cons (NULL_TREE, V4HI_type_node,
16774
                                      tree_cons (NULL_TREE, V4HI_type_node,
16775
                                                 endlink)));
16776
 
16777
  /* Normal vector binops.  */
16778
  tree v8qi_ftype_v8qi_v8qi
16779
    = build_function_type (V8QI_type_node,
16780
                           tree_cons (NULL_TREE, V8QI_type_node,
16781
                                      tree_cons (NULL_TREE, V8QI_type_node,
16782
                                                 endlink)));
16783
  tree v4hi_ftype_v4hi_v4hi
16784
    = build_function_type (V4HI_type_node,
16785
                           tree_cons (NULL_TREE, V4HI_type_node,
16786
                                      tree_cons (NULL_TREE, V4HI_type_node,
16787
                                                 endlink)));
16788
  tree v2si_ftype_v2si_v2si
16789
    = build_function_type (V2SI_type_node,
16790
                           tree_cons (NULL_TREE, V2SI_type_node,
16791
                                      tree_cons (NULL_TREE, V2SI_type_node,
16792
                                                 endlink)));
16793
  tree di_ftype_di_di
16794
    = build_function_type (long_long_unsigned_type_node,
16795
                           tree_cons (NULL_TREE, long_long_unsigned_type_node,
16796
                                      tree_cons (NULL_TREE,
16797
                                                 long_long_unsigned_type_node,
16798
                                                 endlink)));
16799
 
16800
  /* Add all builtins that are more or less simple operations on two
16801
     operands.  */
16802
  for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16803
    {
16804
      /* Use one of the operands; the target can have a different mode for
16805
         mask-generating compares.  */
16806
      enum machine_mode mode;
16807
      tree type;
16808
 
16809
      if (d->name == 0)
16810
        continue;
16811
 
16812
      mode = insn_data[d->icode].operand[1].mode;
16813
 
16814
      switch (mode)
16815
        {
16816
        case V8QImode:
16817
          type = v8qi_ftype_v8qi_v8qi;
16818
          break;
16819
        case V4HImode:
16820
          type = v4hi_ftype_v4hi_v4hi;
16821
          break;
16822
        case V2SImode:
16823
          type = v2si_ftype_v2si_v2si;
16824
          break;
16825
        case DImode:
16826
          type = di_ftype_di_di;
16827
          break;
16828
 
16829
        default:
16830
          gcc_unreachable ();
16831
        }
16832
 
16833
      def_mbuiltin (d->mask, d->name, type, d->code);
16834
    }
16835
 
16836
  /* Add the remaining MMX insns with somewhat more complicated types.  */
16837
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
16838
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
16839
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
16840
 
16841
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
16842
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
16843
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
16844
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
16845
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
16846
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
16847
 
16848
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
16849
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
16850
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
16851
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
16852
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
16853
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
16854
 
16855
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
16856
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
16857
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
16858
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
16859
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
16860
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
16861
 
16862
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
16863
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
16864
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
16865
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
16866
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
16867
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
16868
 
16869
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
16870
 
16871
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
16872
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
16873
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
16874
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
16875
 
16876
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
16877
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
16878
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
16879
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
16880
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
16881
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
16882
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
16883
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
16884
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
16885
 
16886
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
16887
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
16888
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
16889
 
16890
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
16891
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
16892
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
16893
 
16894
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
16895
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
16896
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
16897
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
16898
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
16899
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
16900
 
16901
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
16902
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
16903
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
16904
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
16905
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
16906
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
16907
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
16908
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
16909
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
16910
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
16911
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
16912
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
16913
 
16914
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
16915
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
16916
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
16917
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
16918
 
16919
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
16920
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
16921
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
16922
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
16923
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
16924
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
16925
  def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
16926
}
16927
 
16928
static void
16929
arm_init_tls_builtins (void)
16930
{
16931
  tree ftype, decl;
16932
 
16933
  ftype = build_function_type (ptr_type_node, void_list_node);
16934
  decl = add_builtin_function ("__builtin_thread_pointer", ftype,
16935
                               ARM_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
16936
                               NULL, NULL_TREE);
16937
  TREE_NOTHROW (decl) = 1;
16938
  TREE_READONLY (decl) = 1;
16939
}
16940
 
16941
enum neon_builtin_type_bits {
16942
  T_V8QI  = 0x0001,
16943
  T_V4HI  = 0x0002,
16944
  T_V2SI  = 0x0004,
16945
  T_V2SF  = 0x0008,
16946
  T_DI    = 0x0010,
16947
  T_V16QI = 0x0020,
16948
  T_V8HI  = 0x0040,
16949
  T_V4SI  = 0x0080,
16950
  T_V4SF  = 0x0100,
16951
  T_V2DI  = 0x0200,
16952
  T_TI    = 0x0400,
16953
  T_EI    = 0x0800,
16954
  T_OI    = 0x1000
16955
};
16956
 
16957
#define v8qi_UP  T_V8QI
16958
#define v4hi_UP  T_V4HI
16959
#define v2si_UP  T_V2SI
16960
#define v2sf_UP  T_V2SF
16961
#define di_UP    T_DI
16962
#define v16qi_UP T_V16QI
16963
#define v8hi_UP  T_V8HI
16964
#define v4si_UP  T_V4SI
16965
#define v4sf_UP  T_V4SF
16966
#define v2di_UP  T_V2DI
16967
#define ti_UP    T_TI
16968
#define ei_UP    T_EI
16969
#define oi_UP    T_OI
16970
 
16971
#define UP(X) X##_UP
16972
 
16973
#define T_MAX 13
16974
 
16975
typedef enum {
16976
  NEON_BINOP,
16977
  NEON_TERNOP,
16978
  NEON_UNOP,
16979
  NEON_GETLANE,
16980
  NEON_SETLANE,
16981
  NEON_CREATE,
16982
  NEON_DUP,
16983
  NEON_DUPLANE,
16984
  NEON_COMBINE,
16985
  NEON_SPLIT,
16986
  NEON_LANEMUL,
16987
  NEON_LANEMULL,
16988
  NEON_LANEMULH,
16989
  NEON_LANEMAC,
16990
  NEON_SCALARMUL,
16991
  NEON_SCALARMULL,
16992
  NEON_SCALARMULH,
16993
  NEON_SCALARMAC,
16994
  NEON_CONVERT,
16995
  NEON_FIXCONV,
16996
  NEON_SELECT,
16997
  NEON_RESULTPAIR,
16998
  NEON_REINTERP,
16999
  NEON_VTBL,
17000
  NEON_VTBX,
17001
  NEON_LOAD1,
17002
  NEON_LOAD1LANE,
17003
  NEON_STORE1,
17004
  NEON_STORE1LANE,
17005
  NEON_LOADSTRUCT,
17006
  NEON_LOADSTRUCTLANE,
17007
  NEON_STORESTRUCT,
17008
  NEON_STORESTRUCTLANE,
17009
  NEON_LOGICBINOP,
17010
  NEON_SHIFTINSERT,
17011
  NEON_SHIFTIMM,
17012
  NEON_SHIFTACC
17013
} neon_itype;
17014
 
17015
typedef struct {
17016
  const char *name;
17017
  const neon_itype itype;
17018
  const int bits;
17019
  const enum insn_code codes[T_MAX];
17020
  const unsigned int num_vars;
17021
  unsigned int base_fcode;
17022
} neon_builtin_datum;
17023
 
17024
#define CF(N,X) CODE_FOR_neon_##N##X
17025
 
17026
#define VAR1(T, N, A) \
17027
  #N, NEON_##T, UP (A), { CF (N, A) }, 1, 0
17028
#define VAR2(T, N, A, B) \
17029
  #N, NEON_##T, UP (A) | UP (B), { CF (N, A), CF (N, B) }, 2, 0
17030
#define VAR3(T, N, A, B, C) \
17031
  #N, NEON_##T, UP (A) | UP (B) | UP (C), \
17032
  { CF (N, A), CF (N, B), CF (N, C) }, 3, 0
17033
#define VAR4(T, N, A, B, C, D) \
17034
  #N, NEON_##T, UP (A) | UP (B) | UP (C) | UP (D), \
17035
  { CF (N, A), CF (N, B), CF (N, C), CF (N, D) }, 4, 0
17036
#define VAR5(T, N, A, B, C, D, E) \
17037
  #N, NEON_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E), \
17038
  { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E) }, 5, 0
17039
#define VAR6(T, N, A, B, C, D, E, F) \
17040
  #N, NEON_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E) | UP (F), \
17041
  { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F) }, 6, 0
17042
#define VAR7(T, N, A, B, C, D, E, F, G) \
17043
  #N, NEON_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E) | UP (F) | UP (G), \
17044
  { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
17045
    CF (N, G) }, 7, 0
17046
#define VAR8(T, N, A, B, C, D, E, F, G, H) \
17047
  #N, NEON_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E) | UP (F) | UP (G) \
17048
                | UP (H), \
17049
  { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
17050
    CF (N, G), CF (N, H) }, 8, 0
17051
#define VAR9(T, N, A, B, C, D, E, F, G, H, I) \
17052
  #N, NEON_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E) | UP (F) | UP (G) \
17053
                | UP (H) | UP (I), \
17054
  { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
17055
    CF (N, G), CF (N, H), CF (N, I) }, 9, 0
17056
#define VAR10(T, N, A, B, C, D, E, F, G, H, I, J) \
17057
  #N, NEON_##T, UP (A) | UP (B) | UP (C) | UP (D) | UP (E) | UP (F) | UP (G) \
17058
                | UP (H) | UP (I) | UP (J), \
17059
  { CF (N, A), CF (N, B), CF (N, C), CF (N, D), CF (N, E), CF (N, F), \
17060
    CF (N, G), CF (N, H), CF (N, I), CF (N, J) }, 10, 0
17061
 
17062
/* The mode entries in the following table correspond to the "key" type of the
17063
   instruction variant, i.e. equivalent to that which would be specified after
17064
   the assembler mnemonic, which usually refers to the last vector operand.
17065
   (Signed/unsigned/polynomial types are not differentiated between though, and
17066
   are all mapped onto the same mode for a given element size.) The modes
17067
   listed per instruction should be the same as those defined for that
17068
   instruction's pattern in neon.md.
17069
   WARNING: Variants should be listed in the same increasing order as
17070
   neon_builtin_type_bits.  */
17071
 
17072
static neon_builtin_datum neon_builtin_data[] =
17073
{
17074
  { VAR10 (BINOP, vadd,
17075
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17076
  { VAR3 (BINOP, vaddl, v8qi, v4hi, v2si) },
17077
  { VAR3 (BINOP, vaddw, v8qi, v4hi, v2si) },
17078
  { VAR6 (BINOP, vhadd, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
17079
  { VAR8 (BINOP, vqadd, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
17080
  { VAR3 (BINOP, vaddhn, v8hi, v4si, v2di) },
17081
  { VAR8 (BINOP, vmul, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17082
  { VAR8 (TERNOP, vmla, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17083
  { VAR3 (TERNOP, vmlal, v8qi, v4hi, v2si) },
17084
  { VAR8 (TERNOP, vmls, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17085
  { VAR3 (TERNOP, vmlsl, v8qi, v4hi, v2si) },
17086
  { VAR4 (BINOP, vqdmulh, v4hi, v2si, v8hi, v4si) },
17087
  { VAR2 (TERNOP, vqdmlal, v4hi, v2si) },
17088
  { VAR2 (TERNOP, vqdmlsl, v4hi, v2si) },
17089
  { VAR3 (BINOP, vmull, v8qi, v4hi, v2si) },
17090
  { VAR2 (SCALARMULL, vmull_n, v4hi, v2si) },
17091
  { VAR2 (LANEMULL, vmull_lane, v4hi, v2si) },
17092
  { VAR2 (SCALARMULL, vqdmull_n, v4hi, v2si) },
17093
  { VAR2 (LANEMULL, vqdmull_lane, v4hi, v2si) },
17094
  { VAR4 (SCALARMULH, vqdmulh_n, v4hi, v2si, v8hi, v4si) },
17095
  { VAR4 (LANEMULH, vqdmulh_lane, v4hi, v2si, v8hi, v4si) },
17096
  { VAR2 (BINOP, vqdmull, v4hi, v2si) },
17097
  { VAR8 (BINOP, vshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
17098
  { VAR8 (BINOP, vqshl, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
17099
  { VAR8 (SHIFTIMM, vshr_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
17100
  { VAR3 (SHIFTIMM, vshrn_n, v8hi, v4si, v2di) },
17101
  { VAR3 (SHIFTIMM, vqshrn_n, v8hi, v4si, v2di) },
17102
  { VAR3 (SHIFTIMM, vqshrun_n, v8hi, v4si, v2di) },
17103
  { VAR8 (SHIFTIMM, vshl_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
17104
  { VAR8 (SHIFTIMM, vqshl_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
17105
  { VAR8 (SHIFTIMM, vqshlu_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
17106
  { VAR3 (SHIFTIMM, vshll_n, v8qi, v4hi, v2si) },
17107
  { VAR8 (SHIFTACC, vsra_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
17108
  { VAR10 (BINOP, vsub,
17109
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17110
  { VAR3 (BINOP, vsubl, v8qi, v4hi, v2si) },
17111
  { VAR3 (BINOP, vsubw, v8qi, v4hi, v2si) },
17112
  { VAR8 (BINOP, vqsub, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
17113
  { VAR6 (BINOP, vhsub, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
17114
  { VAR3 (BINOP, vsubhn, v8hi, v4si, v2di) },
17115
  { VAR8 (BINOP, vceq, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17116
  { VAR8 (BINOP, vcge, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17117
  { VAR8 (BINOP, vcgt, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17118
  { VAR2 (BINOP, vcage, v2sf, v4sf) },
17119
  { VAR2 (BINOP, vcagt, v2sf, v4sf) },
17120
  { VAR6 (BINOP, vtst, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
17121
  { VAR8 (BINOP, vabd, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17122
  { VAR3 (BINOP, vabdl, v8qi, v4hi, v2si) },
17123
  { VAR6 (TERNOP, vaba, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
17124
  { VAR3 (TERNOP, vabal, v8qi, v4hi, v2si) },
17125
  { VAR8 (BINOP, vmax, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17126
  { VAR8 (BINOP, vmin, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17127
  { VAR4 (BINOP, vpadd, v8qi, v4hi, v2si, v2sf) },
17128
  { VAR6 (UNOP, vpaddl, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
17129
  { VAR6 (BINOP, vpadal, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
17130
  { VAR4 (BINOP, vpmax, v8qi, v4hi, v2si, v2sf) },
17131
  { VAR4 (BINOP, vpmin, v8qi, v4hi, v2si, v2sf) },
17132
  { VAR2 (BINOP, vrecps, v2sf, v4sf) },
17133
  { VAR2 (BINOP, vrsqrts, v2sf, v4sf) },
17134
  { VAR8 (SHIFTINSERT, vsri_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
17135
  { VAR8 (SHIFTINSERT, vsli_n, v8qi, v4hi, v2si, di, v16qi, v8hi, v4si, v2di) },
17136
  { VAR8 (UNOP, vabs, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17137
  { VAR6 (UNOP, vqabs, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
17138
  { VAR8 (UNOP, vneg, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17139
  { VAR6 (UNOP, vqneg, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
17140
  { VAR6 (UNOP, vcls, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
17141
  { VAR6 (UNOP, vclz, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
17142
  { VAR2 (UNOP, vcnt, v8qi, v16qi) },
17143
  { VAR4 (UNOP, vrecpe, v2si, v2sf, v4si, v4sf) },
17144
  { VAR4 (UNOP, vrsqrte, v2si, v2sf, v4si, v4sf) },
17145
  { VAR6 (UNOP, vmvn, v8qi, v4hi, v2si, v16qi, v8hi, v4si) },
17146
  /* FIXME: vget_lane supports more variants than this!  */
17147
  { VAR10 (GETLANE, vget_lane,
17148
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17149
  { VAR10 (SETLANE, vset_lane,
17150
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17151
  { VAR5 (CREATE, vcreate, v8qi, v4hi, v2si, v2sf, di) },
17152
  { VAR10 (DUP, vdup_n,
17153
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17154
  { VAR10 (DUPLANE, vdup_lane,
17155
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17156
  { VAR5 (COMBINE, vcombine, v8qi, v4hi, v2si, v2sf, di) },
17157
  { VAR5 (SPLIT, vget_high, v16qi, v8hi, v4si, v4sf, v2di) },
17158
  { VAR5 (SPLIT, vget_low, v16qi, v8hi, v4si, v4sf, v2di) },
17159
  { VAR3 (UNOP, vmovn, v8hi, v4si, v2di) },
17160
  { VAR3 (UNOP, vqmovn, v8hi, v4si, v2di) },
17161
  { VAR3 (UNOP, vqmovun, v8hi, v4si, v2di) },
17162
  { VAR3 (UNOP, vmovl, v8qi, v4hi, v2si) },
17163
  { VAR6 (LANEMUL, vmul_lane, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
17164
  { VAR6 (LANEMAC, vmla_lane, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
17165
  { VAR2 (LANEMAC, vmlal_lane, v4hi, v2si) },
17166
  { VAR2 (LANEMAC, vqdmlal_lane, v4hi, v2si) },
17167
  { VAR6 (LANEMAC, vmls_lane, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
17168
  { VAR2 (LANEMAC, vmlsl_lane, v4hi, v2si) },
17169
  { VAR2 (LANEMAC, vqdmlsl_lane, v4hi, v2si) },
17170
  { VAR6 (SCALARMUL, vmul_n, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
17171
  { VAR6 (SCALARMAC, vmla_n, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
17172
  { VAR2 (SCALARMAC, vmlal_n, v4hi, v2si) },
17173
  { VAR2 (SCALARMAC, vqdmlal_n, v4hi, v2si) },
17174
  { VAR6 (SCALARMAC, vmls_n, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
17175
  { VAR2 (SCALARMAC, vmlsl_n, v4hi, v2si) },
17176
  { VAR2 (SCALARMAC, vqdmlsl_n, v4hi, v2si) },
17177
  { VAR10 (BINOP, vext,
17178
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17179
  { VAR8 (UNOP, vrev64, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17180
  { VAR4 (UNOP, vrev32, v8qi, v4hi, v16qi, v8hi) },
17181
  { VAR2 (UNOP, vrev16, v8qi, v16qi) },
17182
  { VAR4 (CONVERT, vcvt, v2si, v2sf, v4si, v4sf) },
17183
  { VAR4 (FIXCONV, vcvt_n, v2si, v2sf, v4si, v4sf) },
17184
  { VAR10 (SELECT, vbsl,
17185
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17186
  { VAR1 (VTBL, vtbl1, v8qi) },
17187
  { VAR1 (VTBL, vtbl2, v8qi) },
17188
  { VAR1 (VTBL, vtbl3, v8qi) },
17189
  { VAR1 (VTBL, vtbl4, v8qi) },
17190
  { VAR1 (VTBX, vtbx1, v8qi) },
17191
  { VAR1 (VTBX, vtbx2, v8qi) },
17192
  { VAR1 (VTBX, vtbx3, v8qi) },
17193
  { VAR1 (VTBX, vtbx4, v8qi) },
17194
  { VAR8 (RESULTPAIR, vtrn, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17195
  { VAR8 (RESULTPAIR, vzip, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17196
  { VAR8 (RESULTPAIR, vuzp, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf) },
17197
  { VAR5 (REINTERP, vreinterpretv8qi, v8qi, v4hi, v2si, v2sf, di) },
17198
  { VAR5 (REINTERP, vreinterpretv4hi, v8qi, v4hi, v2si, v2sf, di) },
17199
  { VAR5 (REINTERP, vreinterpretv2si, v8qi, v4hi, v2si, v2sf, di) },
17200
  { VAR5 (REINTERP, vreinterpretv2sf, v8qi, v4hi, v2si, v2sf, di) },
17201
  { VAR5 (REINTERP, vreinterpretdi, v8qi, v4hi, v2si, v2sf, di) },
17202
  { VAR5 (REINTERP, vreinterpretv16qi, v16qi, v8hi, v4si, v4sf, v2di) },
17203
  { VAR5 (REINTERP, vreinterpretv8hi, v16qi, v8hi, v4si, v4sf, v2di) },
17204
  { VAR5 (REINTERP, vreinterpretv4si, v16qi, v8hi, v4si, v4sf, v2di) },
17205
  { VAR5 (REINTERP, vreinterpretv4sf, v16qi, v8hi, v4si, v4sf, v2di) },
17206
  { VAR5 (REINTERP, vreinterpretv2di, v16qi, v8hi, v4si, v4sf, v2di) },
17207
  { VAR10 (LOAD1, vld1,
17208
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17209
  { VAR10 (LOAD1LANE, vld1_lane,
17210
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17211
  { VAR10 (LOAD1, vld1_dup,
17212
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17213
  { VAR10 (STORE1, vst1,
17214
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17215
  { VAR10 (STORE1LANE, vst1_lane,
17216
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17217
  { VAR9 (LOADSTRUCT,
17218
          vld2, v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
17219
  { VAR7 (LOADSTRUCTLANE, vld2_lane,
17220
          v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
17221
  { VAR5 (LOADSTRUCT, vld2_dup, v8qi, v4hi, v2si, v2sf, di) },
17222
  { VAR9 (STORESTRUCT, vst2,
17223
          v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
17224
  { VAR7 (STORESTRUCTLANE, vst2_lane,
17225
          v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
17226
  { VAR9 (LOADSTRUCT,
17227
          vld3, v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
17228
  { VAR7 (LOADSTRUCTLANE, vld3_lane,
17229
          v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
17230
  { VAR5 (LOADSTRUCT, vld3_dup, v8qi, v4hi, v2si, v2sf, di) },
17231
  { VAR9 (STORESTRUCT, vst3,
17232
          v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
17233
  { VAR7 (STORESTRUCTLANE, vst3_lane,
17234
          v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
17235
  { VAR9 (LOADSTRUCT, vld4,
17236
          v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
17237
  { VAR7 (LOADSTRUCTLANE, vld4_lane,
17238
          v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
17239
  { VAR5 (LOADSTRUCT, vld4_dup, v8qi, v4hi, v2si, v2sf, di) },
17240
  { VAR9 (STORESTRUCT, vst4,
17241
          v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf) },
17242
  { VAR7 (STORESTRUCTLANE, vst4_lane,
17243
          v8qi, v4hi, v2si, v2sf, v8hi, v4si, v4sf) },
17244
  { VAR10 (LOGICBINOP, vand,
17245
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17246
  { VAR10 (LOGICBINOP, vorr,
17247
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17248
  { VAR10 (BINOP, veor,
17249
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17250
  { VAR10 (LOGICBINOP, vbic,
17251
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) },
17252
  { VAR10 (LOGICBINOP, vorn,
17253
           v8qi, v4hi, v2si, v2sf, di, v16qi, v8hi, v4si, v4sf, v2di) }
17254
};
17255
 
17256
#undef CF
17257
#undef VAR1
17258
#undef VAR2
17259
#undef VAR3
17260
#undef VAR4
17261
#undef VAR5
17262
#undef VAR6
17263
#undef VAR7
17264
#undef VAR8
17265
#undef VAR9
17266
#undef VAR10
17267
 
17268
static void
17269
arm_init_neon_builtins (void)
17270
{
17271
  unsigned int i, fcode = ARM_BUILTIN_NEON_BASE;
17272
 
17273
  tree neon_intQI_type_node;
17274
  tree neon_intHI_type_node;
17275
  tree neon_polyQI_type_node;
17276
  tree neon_polyHI_type_node;
17277
  tree neon_intSI_type_node;
17278
  tree neon_intDI_type_node;
17279
  tree neon_float_type_node;
17280
 
17281
  tree intQI_pointer_node;
17282
  tree intHI_pointer_node;
17283
  tree intSI_pointer_node;
17284
  tree intDI_pointer_node;
17285
  tree float_pointer_node;
17286
 
17287
  tree const_intQI_node;
17288
  tree const_intHI_node;
17289
  tree const_intSI_node;
17290
  tree const_intDI_node;
17291
  tree const_float_node;
17292
 
17293
  tree const_intQI_pointer_node;
17294
  tree const_intHI_pointer_node;
17295
  tree const_intSI_pointer_node;
17296
  tree const_intDI_pointer_node;
17297
  tree const_float_pointer_node;
17298
 
17299
  tree V8QI_type_node;
17300
  tree V4HI_type_node;
17301
  tree V2SI_type_node;
17302
  tree V2SF_type_node;
17303
  tree V16QI_type_node;
17304
  tree V8HI_type_node;
17305
  tree V4SI_type_node;
17306
  tree V4SF_type_node;
17307
  tree V2DI_type_node;
17308
 
17309
  tree intUQI_type_node;
17310
  tree intUHI_type_node;
17311
  tree intUSI_type_node;
17312
  tree intUDI_type_node;
17313
 
17314
  tree intEI_type_node;
17315
  tree intOI_type_node;
17316
  tree intCI_type_node;
17317
  tree intXI_type_node;
17318
 
17319
  tree V8QI_pointer_node;
17320
  tree V4HI_pointer_node;
17321
  tree V2SI_pointer_node;
17322
  tree V2SF_pointer_node;
17323
  tree V16QI_pointer_node;
17324
  tree V8HI_pointer_node;
17325
  tree V4SI_pointer_node;
17326
  tree V4SF_pointer_node;
17327
  tree V2DI_pointer_node;
17328
 
17329
  tree void_ftype_pv8qi_v8qi_v8qi;
17330
  tree void_ftype_pv4hi_v4hi_v4hi;
17331
  tree void_ftype_pv2si_v2si_v2si;
17332
  tree void_ftype_pv2sf_v2sf_v2sf;
17333
  tree void_ftype_pdi_di_di;
17334
  tree void_ftype_pv16qi_v16qi_v16qi;
17335
  tree void_ftype_pv8hi_v8hi_v8hi;
17336
  tree void_ftype_pv4si_v4si_v4si;
17337
  tree void_ftype_pv4sf_v4sf_v4sf;
17338
  tree void_ftype_pv2di_v2di_v2di;
17339
 
17340
  tree reinterp_ftype_dreg[5][5];
17341
  tree reinterp_ftype_qreg[5][5];
17342
  tree dreg_types[5], qreg_types[5];
17343
 
17344
  /* Create distinguished type nodes for NEON vector element types,
17345
     and pointers to values of such types, so we can detect them later.  */
17346
  neon_intQI_type_node = make_signed_type (GET_MODE_PRECISION (QImode));
17347
  neon_intHI_type_node = make_signed_type (GET_MODE_PRECISION (HImode));
17348
  neon_polyQI_type_node = make_signed_type (GET_MODE_PRECISION (QImode));
17349
  neon_polyHI_type_node = make_signed_type (GET_MODE_PRECISION (HImode));
17350
  neon_intSI_type_node = make_signed_type (GET_MODE_PRECISION (SImode));
17351
  neon_intDI_type_node = make_signed_type (GET_MODE_PRECISION (DImode));
17352
  neon_float_type_node = make_node (REAL_TYPE);
17353
  TYPE_PRECISION (neon_float_type_node) = FLOAT_TYPE_SIZE;
17354
  layout_type (neon_float_type_node);
17355
 
17356
  /* Define typedefs which exactly correspond to the modes we are basing vector
17357
     types on.  If you change these names you'll need to change
17358
     the table used by arm_mangle_type too.  */
17359
  (*lang_hooks.types.register_builtin_type) (neon_intQI_type_node,
17360
                                             "__builtin_neon_qi");
17361
  (*lang_hooks.types.register_builtin_type) (neon_intHI_type_node,
17362
                                             "__builtin_neon_hi");
17363
  (*lang_hooks.types.register_builtin_type) (neon_intSI_type_node,
17364
                                             "__builtin_neon_si");
17365
  (*lang_hooks.types.register_builtin_type) (neon_float_type_node,
17366
                                             "__builtin_neon_sf");
17367
  (*lang_hooks.types.register_builtin_type) (neon_intDI_type_node,
17368
                                             "__builtin_neon_di");
17369
  (*lang_hooks.types.register_builtin_type) (neon_polyQI_type_node,
17370
                                             "__builtin_neon_poly8");
17371
  (*lang_hooks.types.register_builtin_type) (neon_polyHI_type_node,
17372
                                             "__builtin_neon_poly16");
17373
 
17374
  intQI_pointer_node = build_pointer_type (neon_intQI_type_node);
17375
  intHI_pointer_node = build_pointer_type (neon_intHI_type_node);
17376
  intSI_pointer_node = build_pointer_type (neon_intSI_type_node);
17377
  intDI_pointer_node = build_pointer_type (neon_intDI_type_node);
17378
  float_pointer_node = build_pointer_type (neon_float_type_node);
17379
 
17380
  /* Next create constant-qualified versions of the above types.  */
17381
  const_intQI_node = build_qualified_type (neon_intQI_type_node,
17382
                                           TYPE_QUAL_CONST);
17383
  const_intHI_node = build_qualified_type (neon_intHI_type_node,
17384
                                           TYPE_QUAL_CONST);
17385
  const_intSI_node = build_qualified_type (neon_intSI_type_node,
17386
                                           TYPE_QUAL_CONST);
17387
  const_intDI_node = build_qualified_type (neon_intDI_type_node,
17388
                                           TYPE_QUAL_CONST);
17389
  const_float_node = build_qualified_type (neon_float_type_node,
17390
                                           TYPE_QUAL_CONST);
17391
 
17392
  const_intQI_pointer_node = build_pointer_type (const_intQI_node);
17393
  const_intHI_pointer_node = build_pointer_type (const_intHI_node);
17394
  const_intSI_pointer_node = build_pointer_type (const_intSI_node);
17395
  const_intDI_pointer_node = build_pointer_type (const_intDI_node);
17396
  const_float_pointer_node = build_pointer_type (const_float_node);
17397
 
17398
  /* Now create vector types based on our NEON element types.  */
17399
  /* 64-bit vectors.  */
17400
  V8QI_type_node =
17401
    build_vector_type_for_mode (neon_intQI_type_node, V8QImode);
17402
  V4HI_type_node =
17403
    build_vector_type_for_mode (neon_intHI_type_node, V4HImode);
17404
  V2SI_type_node =
17405
    build_vector_type_for_mode (neon_intSI_type_node, V2SImode);
17406
  V2SF_type_node =
17407
    build_vector_type_for_mode (neon_float_type_node, V2SFmode);
17408
  /* 128-bit vectors.  */
17409
  V16QI_type_node =
17410
    build_vector_type_for_mode (neon_intQI_type_node, V16QImode);
17411
  V8HI_type_node =
17412
    build_vector_type_for_mode (neon_intHI_type_node, V8HImode);
17413
  V4SI_type_node =
17414
    build_vector_type_for_mode (neon_intSI_type_node, V4SImode);
17415
  V4SF_type_node =
17416
    build_vector_type_for_mode (neon_float_type_node, V4SFmode);
17417
  V2DI_type_node =
17418
    build_vector_type_for_mode (neon_intDI_type_node, V2DImode);
17419
 
17420
  /* Unsigned integer types for various mode sizes.  */
17421
  intUQI_type_node = make_unsigned_type (GET_MODE_PRECISION (QImode));
17422
  intUHI_type_node = make_unsigned_type (GET_MODE_PRECISION (HImode));
17423
  intUSI_type_node = make_unsigned_type (GET_MODE_PRECISION (SImode));
17424
  intUDI_type_node = make_unsigned_type (GET_MODE_PRECISION (DImode));
17425
 
17426
  (*lang_hooks.types.register_builtin_type) (intUQI_type_node,
17427
                                             "__builtin_neon_uqi");
17428
  (*lang_hooks.types.register_builtin_type) (intUHI_type_node,
17429
                                             "__builtin_neon_uhi");
17430
  (*lang_hooks.types.register_builtin_type) (intUSI_type_node,
17431
                                             "__builtin_neon_usi");
17432
  (*lang_hooks.types.register_builtin_type) (intUDI_type_node,
17433
                                             "__builtin_neon_udi");
17434
 
17435
  /* Opaque integer types for structures of vectors.  */
17436
  intEI_type_node = make_signed_type (GET_MODE_PRECISION (EImode));
17437
  intOI_type_node = make_signed_type (GET_MODE_PRECISION (OImode));
17438
  intCI_type_node = make_signed_type (GET_MODE_PRECISION (CImode));
17439
  intXI_type_node = make_signed_type (GET_MODE_PRECISION (XImode));
17440
 
17441
  (*lang_hooks.types.register_builtin_type) (intTI_type_node,
17442
                                             "__builtin_neon_ti");
17443
  (*lang_hooks.types.register_builtin_type) (intEI_type_node,
17444
                                             "__builtin_neon_ei");
17445
  (*lang_hooks.types.register_builtin_type) (intOI_type_node,
17446
                                             "__builtin_neon_oi");
17447
  (*lang_hooks.types.register_builtin_type) (intCI_type_node,
17448
                                             "__builtin_neon_ci");
17449
  (*lang_hooks.types.register_builtin_type) (intXI_type_node,
17450
                                             "__builtin_neon_xi");
17451
 
17452
  /* Pointers to vector types.  */
17453
  V8QI_pointer_node = build_pointer_type (V8QI_type_node);
17454
  V4HI_pointer_node = build_pointer_type (V4HI_type_node);
17455
  V2SI_pointer_node = build_pointer_type (V2SI_type_node);
17456
  V2SF_pointer_node = build_pointer_type (V2SF_type_node);
17457
  V16QI_pointer_node = build_pointer_type (V16QI_type_node);
17458
  V8HI_pointer_node = build_pointer_type (V8HI_type_node);
17459
  V4SI_pointer_node = build_pointer_type (V4SI_type_node);
17460
  V4SF_pointer_node = build_pointer_type (V4SF_type_node);
17461
  V2DI_pointer_node = build_pointer_type (V2DI_type_node);
17462
 
17463
  /* Operations which return results as pairs.  */
17464
  void_ftype_pv8qi_v8qi_v8qi =
17465
    build_function_type_list (void_type_node, V8QI_pointer_node, V8QI_type_node,
17466
                              V8QI_type_node, NULL);
17467
  void_ftype_pv4hi_v4hi_v4hi =
17468
    build_function_type_list (void_type_node, V4HI_pointer_node, V4HI_type_node,
17469
                              V4HI_type_node, NULL);
17470
  void_ftype_pv2si_v2si_v2si =
17471
    build_function_type_list (void_type_node, V2SI_pointer_node, V2SI_type_node,
17472
                              V2SI_type_node, NULL);
17473
  void_ftype_pv2sf_v2sf_v2sf =
17474
    build_function_type_list (void_type_node, V2SF_pointer_node, V2SF_type_node,
17475
                              V2SF_type_node, NULL);
17476
  void_ftype_pdi_di_di =
17477
    build_function_type_list (void_type_node, intDI_pointer_node,
17478
                              neon_intDI_type_node, neon_intDI_type_node, NULL);
17479
  void_ftype_pv16qi_v16qi_v16qi =
17480
    build_function_type_list (void_type_node, V16QI_pointer_node,
17481
                              V16QI_type_node, V16QI_type_node, NULL);
17482
  void_ftype_pv8hi_v8hi_v8hi =
17483
    build_function_type_list (void_type_node, V8HI_pointer_node, V8HI_type_node,
17484
                              V8HI_type_node, NULL);
17485
  void_ftype_pv4si_v4si_v4si =
17486
    build_function_type_list (void_type_node, V4SI_pointer_node, V4SI_type_node,
17487
                              V4SI_type_node, NULL);
17488
  void_ftype_pv4sf_v4sf_v4sf =
17489
    build_function_type_list (void_type_node, V4SF_pointer_node, V4SF_type_node,
17490
                              V4SF_type_node, NULL);
17491
  void_ftype_pv2di_v2di_v2di =
17492
    build_function_type_list (void_type_node, V2DI_pointer_node, V2DI_type_node,
17493
                              V2DI_type_node, NULL);
17494
 
17495
  dreg_types[0] = V8QI_type_node;
17496
  dreg_types[1] = V4HI_type_node;
17497
  dreg_types[2] = V2SI_type_node;
17498
  dreg_types[3] = V2SF_type_node;
17499
  dreg_types[4] = neon_intDI_type_node;
17500
 
17501
  qreg_types[0] = V16QI_type_node;
17502
  qreg_types[1] = V8HI_type_node;
17503
  qreg_types[2] = V4SI_type_node;
17504
  qreg_types[3] = V4SF_type_node;
17505
  qreg_types[4] = V2DI_type_node;
17506
 
17507
  for (i = 0; i < 5; i++)
17508
    {
17509
      int j;
17510
      for (j = 0; j < 5; j++)
17511
        {
17512
          reinterp_ftype_dreg[i][j]
17513
            = build_function_type_list (dreg_types[i], dreg_types[j], NULL);
17514
          reinterp_ftype_qreg[i][j]
17515
            = build_function_type_list (qreg_types[i], qreg_types[j], NULL);
17516
        }
17517
    }
17518
 
17519
  for (i = 0; i < ARRAY_SIZE (neon_builtin_data); i++)
17520
    {
17521
      neon_builtin_datum *d = &neon_builtin_data[i];
17522
      unsigned int j, codeidx = 0;
17523
 
17524
      d->base_fcode = fcode;
17525
 
17526
      for (j = 0; j < T_MAX; j++)
17527
        {
17528
          const char* const modenames[] = {
17529
            "v8qi", "v4hi", "v2si", "v2sf", "di",
17530
            "v16qi", "v8hi", "v4si", "v4sf", "v2di"
17531
          };
17532
          char namebuf[60];
17533
          tree ftype = NULL;
17534
          enum insn_code icode;
17535
          int is_load = 0, is_store = 0;
17536
 
17537
          if ((d->bits & (1 << j)) == 0)
17538
            continue;
17539
 
17540
          icode = d->codes[codeidx++];
17541
 
17542
          switch (d->itype)
17543
            {
17544
            case NEON_LOAD1:
17545
            case NEON_LOAD1LANE:
17546
            case NEON_LOADSTRUCT:
17547
            case NEON_LOADSTRUCTLANE:
17548
              is_load = 1;
17549
              /* Fall through.  */
17550
            case NEON_STORE1:
17551
            case NEON_STORE1LANE:
17552
            case NEON_STORESTRUCT:
17553
            case NEON_STORESTRUCTLANE:
17554
              if (!is_load)
17555
                is_store = 1;
17556
              /* Fall through.  */
17557
            case NEON_UNOP:
17558
            case NEON_BINOP:
17559
            case NEON_LOGICBINOP:
17560
            case NEON_SHIFTINSERT:
17561
            case NEON_TERNOP:
17562
            case NEON_GETLANE:
17563
            case NEON_SETLANE:
17564
            case NEON_CREATE:
17565
            case NEON_DUP:
17566
            case NEON_DUPLANE:
17567
            case NEON_SHIFTIMM:
17568
            case NEON_SHIFTACC:
17569
            case NEON_COMBINE:
17570
            case NEON_SPLIT:
17571
            case NEON_CONVERT:
17572
            case NEON_FIXCONV:
17573
            case NEON_LANEMUL:
17574
            case NEON_LANEMULL:
17575
            case NEON_LANEMULH:
17576
            case NEON_LANEMAC:
17577
            case NEON_SCALARMUL:
17578
            case NEON_SCALARMULL:
17579
            case NEON_SCALARMULH:
17580
            case NEON_SCALARMAC:
17581
            case NEON_SELECT:
17582
            case NEON_VTBL:
17583
            case NEON_VTBX:
17584
              {
17585
                int k;
17586
                tree return_type = void_type_node, args = void_list_node;
17587
 
17588
                /* Build a function type directly from the insn_data for this
17589
                   builtin.  The build_function_type() function takes care of
17590
                   removing duplicates for us.  */
17591
                for (k = insn_data[icode].n_operands - 1; k >= 0; k--)
17592
                  {
17593
                    tree eltype;
17594
 
17595
                    if (is_load && k == 1)
17596
                      {
17597
                        /* Neon load patterns always have the memory operand
17598
                           (a SImode pointer) in the operand 1 position.  We
17599
                           want a const pointer to the element type in that
17600
                           position.  */
17601
                        gcc_assert (insn_data[icode].operand[k].mode == SImode);
17602
 
17603
                        switch (1 << j)
17604
                          {
17605
                          case T_V8QI:
17606
                          case T_V16QI:
17607
                            eltype = const_intQI_pointer_node;
17608
                            break;
17609
 
17610
                          case T_V4HI:
17611
                          case T_V8HI:
17612
                            eltype = const_intHI_pointer_node;
17613
                            break;
17614
 
17615
                          case T_V2SI:
17616
                          case T_V4SI:
17617
                            eltype = const_intSI_pointer_node;
17618
                            break;
17619
 
17620
                          case T_V2SF:
17621
                          case T_V4SF:
17622
                            eltype = const_float_pointer_node;
17623
                            break;
17624
 
17625
                          case T_DI:
17626
                          case T_V2DI:
17627
                            eltype = const_intDI_pointer_node;
17628
                            break;
17629
 
17630
                          default: gcc_unreachable ();
17631
                          }
17632
                      }
17633
                    else if (is_store && k == 0)
17634
                      {
17635
                        /* Similarly, Neon store patterns use operand 0 as
17636
                           the memory location to store to (a SImode pointer).
17637
                           Use a pointer to the element type of the store in
17638
                           that position.  */
17639
                        gcc_assert (insn_data[icode].operand[k].mode == SImode);
17640
 
17641
                        switch (1 << j)
17642
                          {
17643
                          case T_V8QI:
17644
                          case T_V16QI:
17645
                            eltype = intQI_pointer_node;
17646
                            break;
17647
 
17648
                          case T_V4HI:
17649
                          case T_V8HI:
17650
                            eltype = intHI_pointer_node;
17651
                            break;
17652
 
17653
                          case T_V2SI:
17654
                          case T_V4SI:
17655
                            eltype = intSI_pointer_node;
17656
                            break;
17657
 
17658
                          case T_V2SF:
17659
                          case T_V4SF:
17660
                            eltype = float_pointer_node;
17661
                            break;
17662
 
17663
                          case T_DI:
17664
                          case T_V2DI:
17665
                            eltype = intDI_pointer_node;
17666
                            break;
17667
 
17668
                          default: gcc_unreachable ();
17669
                          }
17670
                      }
17671
                    else
17672
                      {
17673
                        switch (insn_data[icode].operand[k].mode)
17674
                          {
17675
                          case VOIDmode: eltype = void_type_node; break;
17676
                          /* Scalars.  */
17677
                          case QImode: eltype = neon_intQI_type_node; break;
17678
                          case HImode: eltype = neon_intHI_type_node; break;
17679
                          case SImode: eltype = neon_intSI_type_node; break;
17680
                          case SFmode: eltype = neon_float_type_node; break;
17681
                          case DImode: eltype = neon_intDI_type_node; break;
17682
                          case TImode: eltype = intTI_type_node; break;
17683
                          case EImode: eltype = intEI_type_node; break;
17684
                          case OImode: eltype = intOI_type_node; break;
17685
                          case CImode: eltype = intCI_type_node; break;
17686
                          case XImode: eltype = intXI_type_node; break;
17687
                          /* 64-bit vectors.  */
17688
                          case V8QImode: eltype = V8QI_type_node; break;
17689
                          case V4HImode: eltype = V4HI_type_node; break;
17690
                          case V2SImode: eltype = V2SI_type_node; break;
17691
                          case V2SFmode: eltype = V2SF_type_node; break;
17692
                          /* 128-bit vectors.  */
17693
                          case V16QImode: eltype = V16QI_type_node; break;
17694
                          case V8HImode: eltype = V8HI_type_node; break;
17695
                          case V4SImode: eltype = V4SI_type_node; break;
17696
                          case V4SFmode: eltype = V4SF_type_node; break;
17697
                          case V2DImode: eltype = V2DI_type_node; break;
17698
                          default: gcc_unreachable ();
17699
                          }
17700
                      }
17701
 
17702
                    if (k == 0 && !is_store)
17703
                      return_type = eltype;
17704
                    else
17705
                      args = tree_cons (NULL_TREE, eltype, args);
17706
                  }
17707
 
17708
                ftype = build_function_type (return_type, args);
17709
              }
17710
              break;
17711
 
17712
            case NEON_RESULTPAIR:
17713
              {
17714
                switch (insn_data[icode].operand[1].mode)
17715
                  {
17716
                  case V8QImode: ftype = void_ftype_pv8qi_v8qi_v8qi; break;
17717
                  case V4HImode: ftype = void_ftype_pv4hi_v4hi_v4hi; break;
17718
                  case V2SImode: ftype = void_ftype_pv2si_v2si_v2si; break;
17719
                  case V2SFmode: ftype = void_ftype_pv2sf_v2sf_v2sf; break;
17720
                  case DImode: ftype = void_ftype_pdi_di_di; break;
17721
                  case V16QImode: ftype = void_ftype_pv16qi_v16qi_v16qi; break;
17722
                  case V8HImode: ftype = void_ftype_pv8hi_v8hi_v8hi; break;
17723
                  case V4SImode: ftype = void_ftype_pv4si_v4si_v4si; break;
17724
                  case V4SFmode: ftype = void_ftype_pv4sf_v4sf_v4sf; break;
17725
                  case V2DImode: ftype = void_ftype_pv2di_v2di_v2di; break;
17726
                  default: gcc_unreachable ();
17727
                  }
17728
              }
17729
              break;
17730
 
17731
            case NEON_REINTERP:
17732
              {
17733
                /* We iterate over 5 doubleword types, then 5 quadword
17734
                   types.  */
17735
                int rhs = j % 5;
17736
                switch (insn_data[icode].operand[0].mode)
17737
                  {
17738
                  case V8QImode: ftype = reinterp_ftype_dreg[0][rhs]; break;
17739
                  case V4HImode: ftype = reinterp_ftype_dreg[1][rhs]; break;
17740
                  case V2SImode: ftype = reinterp_ftype_dreg[2][rhs]; break;
17741
                  case V2SFmode: ftype = reinterp_ftype_dreg[3][rhs]; break;
17742
                  case DImode: ftype = reinterp_ftype_dreg[4][rhs]; break;
17743
                  case V16QImode: ftype = reinterp_ftype_qreg[0][rhs]; break;
17744
                  case V8HImode: ftype = reinterp_ftype_qreg[1][rhs]; break;
17745
                  case V4SImode: ftype = reinterp_ftype_qreg[2][rhs]; break;
17746
                  case V4SFmode: ftype = reinterp_ftype_qreg[3][rhs]; break;
17747
                  case V2DImode: ftype = reinterp_ftype_qreg[4][rhs]; break;
17748
                  default: gcc_unreachable ();
17749
                  }
17750
              }
17751
              break;
17752
 
17753
            default:
17754
              gcc_unreachable ();
17755
            }
17756
 
17757
          gcc_assert (ftype != NULL);
17758
 
17759
          sprintf (namebuf, "__builtin_neon_%s%s", d->name, modenames[j]);
17760
 
17761
          add_builtin_function (namebuf, ftype, fcode++, BUILT_IN_MD, NULL,
17762
                                NULL_TREE);
17763
        }
17764
    }
17765
}
17766
 
17767
static void
17768
arm_init_fp16_builtins (void)
17769
{
17770
  tree fp16_type = make_node (REAL_TYPE);
17771
  TYPE_PRECISION (fp16_type) = 16;
17772
  layout_type (fp16_type);
17773
  (*lang_hooks.types.register_builtin_type) (fp16_type, "__fp16");
17774
}
17775
 
17776
static void
17777
arm_init_builtins (void)
17778
{
17779
  arm_init_tls_builtins ();
17780
 
17781
  if (TARGET_REALLY_IWMMXT)
17782
    arm_init_iwmmxt_builtins ();
17783
 
17784
  if (TARGET_NEON)
17785
    arm_init_neon_builtins ();
17786
 
17787
  if (arm_fp16_format)
17788
    arm_init_fp16_builtins ();
17789
}
17790
 
17791
/* Implement TARGET_INVALID_PARAMETER_TYPE.  */
17792
 
17793
static const char *
17794
arm_invalid_parameter_type (const_tree t)
17795
{
17796
  if (SCALAR_FLOAT_TYPE_P (t) && TYPE_PRECISION (t) == 16)
17797
    return N_("function parameters cannot have __fp16 type");
17798
  return NULL;
17799
}
17800
 
17801
/* Implement TARGET_INVALID_PARAMETER_TYPE.  */
17802
 
17803
static const char *
17804
arm_invalid_return_type (const_tree t)
17805
{
17806
  if (SCALAR_FLOAT_TYPE_P (t) && TYPE_PRECISION (t) == 16)
17807
    return N_("functions cannot return __fp16 type");
17808
  return NULL;
17809
}
17810
 
17811
/* Implement TARGET_PROMOTED_TYPE.  */
17812
 
17813
static tree
17814
arm_promoted_type (const_tree t)
17815
{
17816
  if (SCALAR_FLOAT_TYPE_P (t) && TYPE_PRECISION (t) == 16)
17817
    return float_type_node;
17818
  return NULL_TREE;
17819
}
17820
 
17821
/* Implement TARGET_CONVERT_TO_TYPE.
17822
   Specifically, this hook implements the peculiarity of the ARM
17823
   half-precision floating-point C semantics that requires conversions between
17824
   __fp16 to or from double to do an intermediate conversion to float.  */
17825
 
17826
static tree
17827
arm_convert_to_type (tree type, tree expr)
17828
{
17829
  tree fromtype = TREE_TYPE (expr);
17830
  if (!SCALAR_FLOAT_TYPE_P (fromtype) || !SCALAR_FLOAT_TYPE_P (type))
17831
    return NULL_TREE;
17832
  if ((TYPE_PRECISION (fromtype) == 16 && TYPE_PRECISION (type) > 32)
17833
      || (TYPE_PRECISION (type) == 16 && TYPE_PRECISION (fromtype) > 32))
17834
    return convert (type, convert (float_type_node, expr));
17835
  return NULL_TREE;
17836
}
17837
 
17838
/* Implement TARGET_SCALAR_MODE_SUPPORTED_P.
17839
   This simply adds HFmode as a supported mode; even though we don't
17840
   implement arithmetic on this type directly, it's supported by
17841
   optabs conversions, much the way the double-word arithmetic is
17842
   special-cased in the default hook.  */
17843
 
17844
static bool
17845
arm_scalar_mode_supported_p (enum machine_mode mode)
17846
{
17847
  if (mode == HFmode)
17848
    return (arm_fp16_format != ARM_FP16_FORMAT_NONE);
17849
  else
17850
    return default_scalar_mode_supported_p (mode);
17851
}
17852
 
17853
/* Errors in the source file can cause expand_expr to return const0_rtx
17854
   where we expect a vector.  To avoid crashing, use one of the vector
17855
   clear instructions.  */
17856
 
17857
static rtx
17858
safe_vector_operand (rtx x, enum machine_mode mode)
17859
{
17860
  if (x != const0_rtx)
17861
    return x;
17862
  x = gen_reg_rtx (mode);
17863
 
17864
  emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
17865
                               : gen_rtx_SUBREG (DImode, x, 0)));
17866
  return x;
17867
}
17868
 
17869
/* Subroutine of arm_expand_builtin to take care of binop insns.  */
17870
 
17871
static rtx
17872
arm_expand_binop_builtin (enum insn_code icode,
17873
                          tree exp, rtx target)
17874
{
17875
  rtx pat;
17876
  tree arg0 = CALL_EXPR_ARG (exp, 0);
17877
  tree arg1 = CALL_EXPR_ARG (exp, 1);
17878
  rtx op0 = expand_normal (arg0);
17879
  rtx op1 = expand_normal (arg1);
17880
  enum machine_mode tmode = insn_data[icode].operand[0].mode;
17881
  enum machine_mode mode0 = insn_data[icode].operand[1].mode;
17882
  enum machine_mode mode1 = insn_data[icode].operand[2].mode;
17883
 
17884
  if (VECTOR_MODE_P (mode0))
17885
    op0 = safe_vector_operand (op0, mode0);
17886
  if (VECTOR_MODE_P (mode1))
17887
    op1 = safe_vector_operand (op1, mode1);
17888
 
17889
  if (! target
17890
      || GET_MODE (target) != tmode
17891
      || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17892
    target = gen_reg_rtx (tmode);
17893
 
17894
  gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
17895
 
17896
  if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
17897
    op0 = copy_to_mode_reg (mode0, op0);
17898
  if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
17899
    op1 = copy_to_mode_reg (mode1, op1);
17900
 
17901
  pat = GEN_FCN (icode) (target, op0, op1);
17902
  if (! pat)
17903
    return 0;
17904
  emit_insn (pat);
17905
  return target;
17906
}
17907
 
17908
/* Subroutine of arm_expand_builtin to take care of unop insns.  */
17909
 
17910
static rtx
17911
arm_expand_unop_builtin (enum insn_code icode,
17912
                         tree exp, rtx target, int do_load)
17913
{
17914
  rtx pat;
17915
  tree arg0 = CALL_EXPR_ARG (exp, 0);
17916
  rtx op0 = expand_normal (arg0);
17917
  enum machine_mode tmode = insn_data[icode].operand[0].mode;
17918
  enum machine_mode mode0 = insn_data[icode].operand[1].mode;
17919
 
17920
  if (! target
17921
      || GET_MODE (target) != tmode
17922
      || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
17923
    target = gen_reg_rtx (tmode);
17924
  if (do_load)
17925
    op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
17926
  else
17927
    {
17928
      if (VECTOR_MODE_P (mode0))
17929
        op0 = safe_vector_operand (op0, mode0);
17930
 
17931
      if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
17932
        op0 = copy_to_mode_reg (mode0, op0);
17933
    }
17934
 
17935
  pat = GEN_FCN (icode) (target, op0);
17936
  if (! pat)
17937
    return 0;
17938
  emit_insn (pat);
17939
  return target;
17940
}
17941
 
17942
static int
17943
neon_builtin_compare (const void *a, const void *b)
17944
{
17945
  const neon_builtin_datum *const key = (const neon_builtin_datum *) a;
17946
  const neon_builtin_datum *const memb = (const neon_builtin_datum *) b;
17947
  unsigned int soughtcode = key->base_fcode;
17948
 
17949
  if (soughtcode >= memb->base_fcode
17950
      && soughtcode < memb->base_fcode + memb->num_vars)
17951
    return 0;
17952
  else if (soughtcode < memb->base_fcode)
17953
    return -1;
17954
  else
17955
    return 1;
17956
}
17957
 
17958
static enum insn_code
17959
locate_neon_builtin_icode (int fcode, neon_itype *itype)
17960
{
17961
  neon_builtin_datum key, *found;
17962
  int idx;
17963
 
17964
  key.base_fcode = fcode;
17965
  found = (neon_builtin_datum *)
17966
    bsearch (&key, &neon_builtin_data[0], ARRAY_SIZE (neon_builtin_data),
17967
                   sizeof (neon_builtin_data[0]), neon_builtin_compare);
17968
  gcc_assert (found);
17969
  idx = fcode - (int) found->base_fcode;
17970
  gcc_assert (idx >= 0 && idx < T_MAX && idx < (int)found->num_vars);
17971
 
17972
  if (itype)
17973
    *itype = found->itype;
17974
 
17975
  return found->codes[idx];
17976
}
17977
 
17978
typedef enum {
17979
  NEON_ARG_COPY_TO_REG,
17980
  NEON_ARG_CONSTANT,
17981
  NEON_ARG_STOP
17982
} builtin_arg;
17983
 
17984
#define NEON_MAX_BUILTIN_ARGS 5
17985
 
17986
/* Expand a Neon builtin.  */
17987
static rtx
17988
arm_expand_neon_args (rtx target, int icode, int have_retval,
17989
                      tree exp, ...)
17990
{
17991
  va_list ap;
17992
  rtx pat;
17993
  tree arg[NEON_MAX_BUILTIN_ARGS];
17994
  rtx op[NEON_MAX_BUILTIN_ARGS];
17995
  enum machine_mode tmode = insn_data[icode].operand[0].mode;
17996
  enum machine_mode mode[NEON_MAX_BUILTIN_ARGS];
17997
  int argc = 0;
17998
 
17999
  if (have_retval
18000
      && (!target
18001
          || GET_MODE (target) != tmode
18002
          || !(*insn_data[icode].operand[0].predicate) (target, tmode)))
18003
    target = gen_reg_rtx (tmode);
18004
 
18005
  va_start (ap, exp);
18006
 
18007
  for (;;)
18008
    {
18009
      builtin_arg thisarg = (builtin_arg) va_arg (ap, int);
18010
 
18011
      if (thisarg == NEON_ARG_STOP)
18012
        break;
18013
      else
18014
        {
18015
          arg[argc] = CALL_EXPR_ARG (exp, argc);
18016
          op[argc] = expand_normal (arg[argc]);
18017
          mode[argc] = insn_data[icode].operand[argc + have_retval].mode;
18018
 
18019
          switch (thisarg)
18020
            {
18021
            case NEON_ARG_COPY_TO_REG:
18022
              /*gcc_assert (GET_MODE (op[argc]) == mode[argc]);*/
18023
              if (!(*insn_data[icode].operand[argc + have_retval].predicate)
18024
                     (op[argc], mode[argc]))
18025
                op[argc] = copy_to_mode_reg (mode[argc], op[argc]);
18026
              break;
18027
 
18028
            case NEON_ARG_CONSTANT:
18029
              /* FIXME: This error message is somewhat unhelpful.  */
18030
              if (!(*insn_data[icode].operand[argc + have_retval].predicate)
18031
                    (op[argc], mode[argc]))
18032
                error ("argument must be a constant");
18033
              break;
18034
 
18035
            case NEON_ARG_STOP:
18036
              gcc_unreachable ();
18037
            }
18038
 
18039
          argc++;
18040
        }
18041
    }
18042
 
18043
  va_end (ap);
18044
 
18045
  if (have_retval)
18046
    switch (argc)
18047
      {
18048
      case 1:
18049
        pat = GEN_FCN (icode) (target, op[0]);
18050
        break;
18051
 
18052
      case 2:
18053
        pat = GEN_FCN (icode) (target, op[0], op[1]);
18054
        break;
18055
 
18056
      case 3:
18057
        pat = GEN_FCN (icode) (target, op[0], op[1], op[2]);
18058
        break;
18059
 
18060
      case 4:
18061
        pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3]);
18062
        break;
18063
 
18064
      case 5:
18065
        pat = GEN_FCN (icode) (target, op[0], op[1], op[2], op[3], op[4]);
18066
        break;
18067
 
18068
      default:
18069
        gcc_unreachable ();
18070
      }
18071
  else
18072
    switch (argc)
18073
      {
18074
      case 1:
18075
        pat = GEN_FCN (icode) (op[0]);
18076
        break;
18077
 
18078
      case 2:
18079
        pat = GEN_FCN (icode) (op[0], op[1]);
18080
        break;
18081
 
18082
      case 3:
18083
        pat = GEN_FCN (icode) (op[0], op[1], op[2]);
18084
        break;
18085
 
18086
      case 4:
18087
        pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
18088
        break;
18089
 
18090
      case 5:
18091
        pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3], op[4]);
18092
        break;
18093
 
18094
      default:
18095
        gcc_unreachable ();
18096
      }
18097
 
18098
  if (!pat)
18099
    return 0;
18100
 
18101
  emit_insn (pat);
18102
 
18103
  return target;
18104
}
18105
 
18106
/* Expand a Neon builtin. These are "special" because they don't have symbolic
18107
   constants defined per-instruction or per instruction-variant. Instead, the
18108
   required info is looked up in the table neon_builtin_data.  */
18109
static rtx
18110
arm_expand_neon_builtin (int fcode, tree exp, rtx target)
18111
{
18112
  neon_itype itype;
18113
  enum insn_code icode = locate_neon_builtin_icode (fcode, &itype);
18114
 
18115
  switch (itype)
18116
    {
18117
    case NEON_UNOP:
18118
    case NEON_CONVERT:
18119
    case NEON_DUPLANE:
18120
      return arm_expand_neon_args (target, icode, 1, exp,
18121
        NEON_ARG_COPY_TO_REG, NEON_ARG_CONSTANT, NEON_ARG_STOP);
18122
 
18123
    case NEON_BINOP:
18124
    case NEON_SETLANE:
18125
    case NEON_SCALARMUL:
18126
    case NEON_SCALARMULL:
18127
    case NEON_SCALARMULH:
18128
    case NEON_SHIFTINSERT:
18129
    case NEON_LOGICBINOP:
18130
      return arm_expand_neon_args (target, icode, 1, exp,
18131
        NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_CONSTANT,
18132
        NEON_ARG_STOP);
18133
 
18134
    case NEON_TERNOP:
18135
      return arm_expand_neon_args (target, icode, 1, exp,
18136
        NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG,
18137
        NEON_ARG_CONSTANT, NEON_ARG_STOP);
18138
 
18139
    case NEON_GETLANE:
18140
    case NEON_FIXCONV:
18141
    case NEON_SHIFTIMM:
18142
      return arm_expand_neon_args (target, icode, 1, exp,
18143
        NEON_ARG_COPY_TO_REG, NEON_ARG_CONSTANT, NEON_ARG_CONSTANT,
18144
        NEON_ARG_STOP);
18145
 
18146
    case NEON_CREATE:
18147
      return arm_expand_neon_args (target, icode, 1, exp,
18148
        NEON_ARG_COPY_TO_REG, NEON_ARG_STOP);
18149
 
18150
    case NEON_DUP:
18151
    case NEON_SPLIT:
18152
    case NEON_REINTERP:
18153
      return arm_expand_neon_args (target, icode, 1, exp,
18154
        NEON_ARG_COPY_TO_REG, NEON_ARG_STOP);
18155
 
18156
    case NEON_COMBINE:
18157
    case NEON_VTBL:
18158
      return arm_expand_neon_args (target, icode, 1, exp,
18159
        NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_STOP);
18160
 
18161
    case NEON_RESULTPAIR:
18162
      return arm_expand_neon_args (target, icode, 0, exp,
18163
        NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG,
18164
        NEON_ARG_STOP);
18165
 
18166
    case NEON_LANEMUL:
18167
    case NEON_LANEMULL:
18168
    case NEON_LANEMULH:
18169
      return arm_expand_neon_args (target, icode, 1, exp,
18170
        NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_CONSTANT,
18171
        NEON_ARG_CONSTANT, NEON_ARG_STOP);
18172
 
18173
    case NEON_LANEMAC:
18174
      return arm_expand_neon_args (target, icode, 1, exp,
18175
        NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG,
18176
        NEON_ARG_CONSTANT, NEON_ARG_CONSTANT, NEON_ARG_STOP);
18177
 
18178
    case NEON_SHIFTACC:
18179
      return arm_expand_neon_args (target, icode, 1, exp,
18180
        NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_CONSTANT,
18181
        NEON_ARG_CONSTANT, NEON_ARG_STOP);
18182
 
18183
    case NEON_SCALARMAC:
18184
      return arm_expand_neon_args (target, icode, 1, exp,
18185
        NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG,
18186
        NEON_ARG_CONSTANT, NEON_ARG_STOP);
18187
 
18188
    case NEON_SELECT:
18189
    case NEON_VTBX:
18190
      return arm_expand_neon_args (target, icode, 1, exp,
18191
        NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG,
18192
        NEON_ARG_STOP);
18193
 
18194
    case NEON_LOAD1:
18195
    case NEON_LOADSTRUCT:
18196
      return arm_expand_neon_args (target, icode, 1, exp,
18197
        NEON_ARG_COPY_TO_REG, NEON_ARG_STOP);
18198
 
18199
    case NEON_LOAD1LANE:
18200
    case NEON_LOADSTRUCTLANE:
18201
      return arm_expand_neon_args (target, icode, 1, exp,
18202
        NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_CONSTANT,
18203
        NEON_ARG_STOP);
18204
 
18205
    case NEON_STORE1:
18206
    case NEON_STORESTRUCT:
18207
      return arm_expand_neon_args (target, icode, 0, exp,
18208
        NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_STOP);
18209
 
18210
    case NEON_STORE1LANE:
18211
    case NEON_STORESTRUCTLANE:
18212
      return arm_expand_neon_args (target, icode, 0, exp,
18213
        NEON_ARG_COPY_TO_REG, NEON_ARG_COPY_TO_REG, NEON_ARG_CONSTANT,
18214
        NEON_ARG_STOP);
18215
    }
18216
 
18217
  gcc_unreachable ();
18218
}
18219
 
18220
/* Emit code to reinterpret one Neon type as another, without altering bits.  */
18221
void
18222
neon_reinterpret (rtx dest, rtx src)
18223
{
18224
  emit_move_insn (dest, gen_lowpart (GET_MODE (dest), src));
18225
}
18226
 
18227
/* Emit code to place a Neon pair result in memory locations (with equal
18228
   registers).  */
18229
void
18230
neon_emit_pair_result_insn (enum machine_mode mode,
18231
                            rtx (*intfn) (rtx, rtx, rtx, rtx), rtx destaddr,
18232
                            rtx op1, rtx op2)
18233
{
18234
  rtx mem = gen_rtx_MEM (mode, destaddr);
18235
  rtx tmp1 = gen_reg_rtx (mode);
18236
  rtx tmp2 = gen_reg_rtx (mode);
18237
 
18238
  emit_insn (intfn (tmp1, op1, tmp2, op2));
18239
 
18240
  emit_move_insn (mem, tmp1);
18241
  mem = adjust_address (mem, mode, GET_MODE_SIZE (mode));
18242
  emit_move_insn (mem, tmp2);
18243
}
18244
 
18245
/* Set up operands for a register copy from src to dest, taking care not to
18246
   clobber registers in the process.
18247
   FIXME: This has rather high polynomial complexity (O(n^3)?) but shouldn't
18248
   be called with a large N, so that should be OK.  */
18249
 
18250
void
18251
neon_disambiguate_copy (rtx *operands, rtx *dest, rtx *src, unsigned int count)
18252
{
18253
  unsigned int copied = 0, opctr = 0;
18254
  unsigned int done = (1 << count) - 1;
18255
  unsigned int i, j;
18256
 
18257
  while (copied != done)
18258
    {
18259
      for (i = 0; i < count; i++)
18260
        {
18261
          int good = 1;
18262
 
18263
          for (j = 0; good && j < count; j++)
18264
            if (i != j && (copied & (1 << j)) == 0
18265
                && reg_overlap_mentioned_p (src[j], dest[i]))
18266
              good = 0;
18267
 
18268
          if (good)
18269
            {
18270
              operands[opctr++] = dest[i];
18271
              operands[opctr++] = src[i];
18272
              copied |= 1 << i;
18273
            }
18274
        }
18275
    }
18276
 
18277
  gcc_assert (opctr == count * 2);
18278
}
18279
 
18280
/* Expand an expression EXP that calls a built-in function,
18281
   with result going to TARGET if that's convenient
18282
   (and in mode MODE if that's convenient).
18283
   SUBTARGET may be used as the target for computing one of EXP's operands.
18284
   IGNORE is nonzero if the value is to be ignored.  */
18285
 
18286
static rtx
18287
arm_expand_builtin (tree exp,
18288
                    rtx target,
18289
                    rtx subtarget ATTRIBUTE_UNUSED,
18290
                    enum machine_mode mode ATTRIBUTE_UNUSED,
18291
                    int ignore ATTRIBUTE_UNUSED)
18292
{
18293
  const struct builtin_description * d;
18294
  enum insn_code    icode;
18295
  tree              fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
18296
  tree              arg0;
18297
  tree              arg1;
18298
  tree              arg2;
18299
  rtx               op0;
18300
  rtx               op1;
18301
  rtx               op2;
18302
  rtx               pat;
18303
  int               fcode = DECL_FUNCTION_CODE (fndecl);
18304
  size_t            i;
18305
  enum machine_mode tmode;
18306
  enum machine_mode mode0;
18307
  enum machine_mode mode1;
18308
  enum machine_mode mode2;
18309
 
18310
  if (fcode >= ARM_BUILTIN_NEON_BASE)
18311
    return arm_expand_neon_builtin (fcode, exp, target);
18312
 
18313
  switch (fcode)
18314
    {
18315
    case ARM_BUILTIN_TEXTRMSB:
18316
    case ARM_BUILTIN_TEXTRMUB:
18317
    case ARM_BUILTIN_TEXTRMSH:
18318
    case ARM_BUILTIN_TEXTRMUH:
18319
    case ARM_BUILTIN_TEXTRMSW:
18320
    case ARM_BUILTIN_TEXTRMUW:
18321
      icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
18322
               : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
18323
               : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
18324
               : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
18325
               : CODE_FOR_iwmmxt_textrmw);
18326
 
18327
      arg0 = CALL_EXPR_ARG (exp, 0);
18328
      arg1 = CALL_EXPR_ARG (exp, 1);
18329
      op0 = expand_normal (arg0);
18330
      op1 = expand_normal (arg1);
18331
      tmode = insn_data[icode].operand[0].mode;
18332
      mode0 = insn_data[icode].operand[1].mode;
18333
      mode1 = insn_data[icode].operand[2].mode;
18334
 
18335
      if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
18336
        op0 = copy_to_mode_reg (mode0, op0);
18337
      if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
18338
        {
18339
          /* @@@ better error message */
18340
          error ("selector must be an immediate");
18341
          return gen_reg_rtx (tmode);
18342
        }
18343
      if (target == 0
18344
          || GET_MODE (target) != tmode
18345
          || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
18346
        target = gen_reg_rtx (tmode);
18347
      pat = GEN_FCN (icode) (target, op0, op1);
18348
      if (! pat)
18349
        return 0;
18350
      emit_insn (pat);
18351
      return target;
18352
 
18353
    case ARM_BUILTIN_TINSRB:
18354
    case ARM_BUILTIN_TINSRH:
18355
    case ARM_BUILTIN_TINSRW:
18356
      icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
18357
               : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
18358
               : CODE_FOR_iwmmxt_tinsrw);
18359
      arg0 = CALL_EXPR_ARG (exp, 0);
18360
      arg1 = CALL_EXPR_ARG (exp, 1);
18361
      arg2 = CALL_EXPR_ARG (exp, 2);
18362
      op0 = expand_normal (arg0);
18363
      op1 = expand_normal (arg1);
18364
      op2 = expand_normal (arg2);
18365
      tmode = insn_data[icode].operand[0].mode;
18366
      mode0 = insn_data[icode].operand[1].mode;
18367
      mode1 = insn_data[icode].operand[2].mode;
18368
      mode2 = insn_data[icode].operand[3].mode;
18369
 
18370
      if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
18371
        op0 = copy_to_mode_reg (mode0, op0);
18372
      if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
18373
        op1 = copy_to_mode_reg (mode1, op1);
18374
      if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
18375
        {
18376
          /* @@@ better error message */
18377
          error ("selector must be an immediate");
18378
          return const0_rtx;
18379
        }
18380
      if (target == 0
18381
          || GET_MODE (target) != tmode
18382
          || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
18383
        target = gen_reg_rtx (tmode);
18384
      pat = GEN_FCN (icode) (target, op0, op1, op2);
18385
      if (! pat)
18386
        return 0;
18387
      emit_insn (pat);
18388
      return target;
18389
 
18390
    case ARM_BUILTIN_SETWCX:
18391
      arg0 = CALL_EXPR_ARG (exp, 0);
18392
      arg1 = CALL_EXPR_ARG (exp, 1);
18393
      op0 = force_reg (SImode, expand_normal (arg0));
18394
      op1 = expand_normal (arg1);
18395
      emit_insn (gen_iwmmxt_tmcr (op1, op0));
18396
      return 0;
18397
 
18398
    case ARM_BUILTIN_GETWCX:
18399
      arg0 = CALL_EXPR_ARG (exp, 0);
18400
      op0 = expand_normal (arg0);
18401
      target = gen_reg_rtx (SImode);
18402
      emit_insn (gen_iwmmxt_tmrc (target, op0));
18403
      return target;
18404
 
18405
    case ARM_BUILTIN_WSHUFH:
18406
      icode = CODE_FOR_iwmmxt_wshufh;
18407
      arg0 = CALL_EXPR_ARG (exp, 0);
18408
      arg1 = CALL_EXPR_ARG (exp, 1);
18409
      op0 = expand_normal (arg0);
18410
      op1 = expand_normal (arg1);
18411
      tmode = insn_data[icode].operand[0].mode;
18412
      mode1 = insn_data[icode].operand[1].mode;
18413
      mode2 = insn_data[icode].operand[2].mode;
18414
 
18415
      if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
18416
        op0 = copy_to_mode_reg (mode1, op0);
18417
      if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
18418
        {
18419
          /* @@@ better error message */
18420
          error ("mask must be an immediate");
18421
          return const0_rtx;
18422
        }
18423
      if (target == 0
18424
          || GET_MODE (target) != tmode
18425
          || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
18426
        target = gen_reg_rtx (tmode);
18427
      pat = GEN_FCN (icode) (target, op0, op1);
18428
      if (! pat)
18429
        return 0;
18430
      emit_insn (pat);
18431
      return target;
18432
 
18433
    case ARM_BUILTIN_WSADB:
18434
      return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, exp, target);
18435
    case ARM_BUILTIN_WSADH:
18436
      return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, exp, target);
18437
    case ARM_BUILTIN_WSADBZ:
18438
      return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, exp, target);
18439
    case ARM_BUILTIN_WSADHZ:
18440
      return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, exp, target);
18441
 
18442
      /* Several three-argument builtins.  */
18443
    case ARM_BUILTIN_WMACS:
18444
    case ARM_BUILTIN_WMACU:
18445
    case ARM_BUILTIN_WALIGN:
18446
    case ARM_BUILTIN_TMIA:
18447
    case ARM_BUILTIN_TMIAPH:
18448
    case ARM_BUILTIN_TMIATT:
18449
    case ARM_BUILTIN_TMIATB:
18450
    case ARM_BUILTIN_TMIABT:
18451
    case ARM_BUILTIN_TMIABB:
18452
      icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
18453
               : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
18454
               : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
18455
               : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
18456
               : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
18457
               : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
18458
               : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
18459
               : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
18460
               : CODE_FOR_iwmmxt_walign);
18461
      arg0 = CALL_EXPR_ARG (exp, 0);
18462
      arg1 = CALL_EXPR_ARG (exp, 1);
18463
      arg2 = CALL_EXPR_ARG (exp, 2);
18464
      op0 = expand_normal (arg0);
18465
      op1 = expand_normal (arg1);
18466
      op2 = expand_normal (arg2);
18467
      tmode = insn_data[icode].operand[0].mode;
18468
      mode0 = insn_data[icode].operand[1].mode;
18469
      mode1 = insn_data[icode].operand[2].mode;
18470
      mode2 = insn_data[icode].operand[3].mode;
18471
 
18472
      if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
18473
        op0 = copy_to_mode_reg (mode0, op0);
18474
      if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
18475
        op1 = copy_to_mode_reg (mode1, op1);
18476
      if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
18477
        op2 = copy_to_mode_reg (mode2, op2);
18478
      if (target == 0
18479
          || GET_MODE (target) != tmode
18480
          || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
18481
        target = gen_reg_rtx (tmode);
18482
      pat = GEN_FCN (icode) (target, op0, op1, op2);
18483
      if (! pat)
18484
        return 0;
18485
      emit_insn (pat);
18486
      return target;
18487
 
18488
    case ARM_BUILTIN_WZERO:
18489
      target = gen_reg_rtx (DImode);
18490
      emit_insn (gen_iwmmxt_clrdi (target));
18491
      return target;
18492
 
18493
    case ARM_BUILTIN_THREAD_POINTER:
18494
      return arm_load_tp (target);
18495
 
18496
    default:
18497
      break;
18498
    }
18499
 
18500
  for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
18501
    if (d->code == (const enum arm_builtins) fcode)
18502
      return arm_expand_binop_builtin (d->icode, exp, target);
18503
 
18504
  for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18505
    if (d->code == (const enum arm_builtins) fcode)
18506
      return arm_expand_unop_builtin (d->icode, exp, target, 0);
18507
 
18508
  /* @@@ Should really do something sensible here.  */
18509
  return NULL_RTX;
18510
}
18511
 
18512
/* Return the number (counting from 0) of
18513
   the least significant set bit in MASK.  */
18514
 
18515
inline static int
18516
number_of_first_bit_set (unsigned mask)
18517
{
18518
  int bit;
18519
 
18520
  for (bit = 0;
18521
       (mask & (1 << bit)) == 0;
18522
       ++bit)
18523
    continue;
18524
 
18525
  return bit;
18526
}
18527
 
18528
/* Emit code to push or pop registers to or from the stack.  F is the
18529
   assembly file.  MASK is the registers to push or pop.  PUSH is
18530
   nonzero if we should push, and zero if we should pop.  For debugging
18531
   output, if pushing, adjust CFA_OFFSET by the amount of space added
18532
   to the stack.  REAL_REGS should have the same number of bits set as
18533
   MASK, and will be used instead (in the same order) to describe which
18534
   registers were saved - this is used to mark the save slots when we
18535
   push high registers after moving them to low registers.  */
18536
static void
18537
thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
18538
               unsigned long real_regs)
18539
{
18540
  int regno;
18541
  int lo_mask = mask & 0xFF;
18542
  int pushed_words = 0;
18543
 
18544
  gcc_assert (mask);
18545
 
18546
  if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
18547
    {
18548
      /* Special case.  Do not generate a POP PC statement here, do it in
18549
         thumb_exit() */
18550
      thumb_exit (f, -1);
18551
      return;
18552
    }
18553
 
18554
  if (ARM_EABI_UNWIND_TABLES && push)
18555
    {
18556
      fprintf (f, "\t.save\t{");
18557
      for (regno = 0; regno < 15; regno++)
18558
        {
18559
          if (real_regs & (1 << regno))
18560
            {
18561
              if (real_regs & ((1 << regno) -1))
18562
                fprintf (f, ", ");
18563
              asm_fprintf (f, "%r", regno);
18564
            }
18565
        }
18566
      fprintf (f, "}\n");
18567
    }
18568
 
18569
  fprintf (f, "\t%s\t{", push ? "push" : "pop");
18570
 
18571
  /* Look at the low registers first.  */
18572
  for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
18573
    {
18574
      if (lo_mask & 1)
18575
        {
18576
          asm_fprintf (f, "%r", regno);
18577
 
18578
          if ((lo_mask & ~1) != 0)
18579
            fprintf (f, ", ");
18580
 
18581
          pushed_words++;
18582
        }
18583
    }
18584
 
18585
  if (push && (mask & (1 << LR_REGNUM)))
18586
    {
18587
      /* Catch pushing the LR.  */
18588
      if (mask & 0xFF)
18589
        fprintf (f, ", ");
18590
 
18591
      asm_fprintf (f, "%r", LR_REGNUM);
18592
 
18593
      pushed_words++;
18594
    }
18595
  else if (!push && (mask & (1 << PC_REGNUM)))
18596
    {
18597
      /* Catch popping the PC.  */
18598
      if (TARGET_INTERWORK || TARGET_BACKTRACE
18599
          || crtl->calls_eh_return)
18600
        {
18601
          /* The PC is never poped directly, instead
18602
             it is popped into r3 and then BX is used.  */
18603
          fprintf (f, "}\n");
18604
 
18605
          thumb_exit (f, -1);
18606
 
18607
          return;
18608
        }
18609
      else
18610
        {
18611
          if (mask & 0xFF)
18612
            fprintf (f, ", ");
18613
 
18614
          asm_fprintf (f, "%r", PC_REGNUM);
18615
        }
18616
    }
18617
 
18618
  fprintf (f, "}\n");
18619
 
18620
  if (push && pushed_words && dwarf2out_do_frame ())
18621
    {
18622
      char *l = dwarf2out_cfi_label (false);
18623
      int pushed_mask = real_regs;
18624
 
18625
      *cfa_offset += pushed_words * 4;
18626
      dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
18627
 
18628
      pushed_words = 0;
18629
      pushed_mask = real_regs;
18630
      for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
18631
        {
18632
          if (pushed_mask & 1)
18633
            dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
18634
        }
18635
    }
18636
}
18637
 
18638
/* Generate code to return from a thumb function.
18639
   If 'reg_containing_return_addr' is -1, then the return address is
18640
   actually on the stack, at the stack pointer.  */
18641
static void
18642
thumb_exit (FILE *f, int reg_containing_return_addr)
18643
{
18644
  unsigned regs_available_for_popping;
18645
  unsigned regs_to_pop;
18646
  int pops_needed;
18647
  unsigned available;
18648
  unsigned required;
18649
  int mode;
18650
  int size;
18651
  int restore_a4 = FALSE;
18652
 
18653
  /* Compute the registers we need to pop.  */
18654
  regs_to_pop = 0;
18655
  pops_needed = 0;
18656
 
18657
  if (reg_containing_return_addr == -1)
18658
    {
18659
      regs_to_pop |= 1 << LR_REGNUM;
18660
      ++pops_needed;
18661
    }
18662
 
18663
  if (TARGET_BACKTRACE)
18664
    {
18665
      /* Restore the (ARM) frame pointer and stack pointer.  */
18666
      regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
18667
      pops_needed += 2;
18668
    }
18669
 
18670
  /* If there is nothing to pop then just emit the BX instruction and
18671
     return.  */
18672
  if (pops_needed == 0)
18673
    {
18674
      if (crtl->calls_eh_return)
18675
        asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
18676
 
18677
      asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
18678
      return;
18679
    }
18680
  /* Otherwise if we are not supporting interworking and we have not created
18681
     a backtrace structure and the function was not entered in ARM mode then
18682
     just pop the return address straight into the PC.  */
18683
  else if (!TARGET_INTERWORK
18684
           && !TARGET_BACKTRACE
18685
           && !is_called_in_ARM_mode (current_function_decl)
18686
           && !crtl->calls_eh_return)
18687
    {
18688
      asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
18689
      return;
18690
    }
18691
 
18692
  /* Find out how many of the (return) argument registers we can corrupt.  */
18693
  regs_available_for_popping = 0;
18694
 
18695
  /* If returning via __builtin_eh_return, the bottom three registers
18696
     all contain information needed for the return.  */
18697
  if (crtl->calls_eh_return)
18698
    size = 12;
18699
  else
18700
    {
18701
      /* If we can deduce the registers used from the function's
18702
         return value.  This is more reliable that examining
18703
         df_regs_ever_live_p () because that will be set if the register is
18704
         ever used in the function, not just if the register is used
18705
         to hold a return value.  */
18706
 
18707
      if (crtl->return_rtx != 0)
18708
        mode = GET_MODE (crtl->return_rtx);
18709
      else
18710
        mode = DECL_MODE (DECL_RESULT (current_function_decl));
18711
 
18712
      size = GET_MODE_SIZE (mode);
18713
 
18714
      if (size == 0)
18715
        {
18716
          /* In a void function we can use any argument register.
18717
             In a function that returns a structure on the stack
18718
             we can use the second and third argument registers.  */
18719
          if (mode == VOIDmode)
18720
            regs_available_for_popping =
18721
              (1 << ARG_REGISTER (1))
18722
              | (1 << ARG_REGISTER (2))
18723
              | (1 << ARG_REGISTER (3));
18724
          else
18725
            regs_available_for_popping =
18726
              (1 << ARG_REGISTER (2))
18727
              | (1 << ARG_REGISTER (3));
18728
        }
18729
      else if (size <= 4)
18730
        regs_available_for_popping =
18731
          (1 << ARG_REGISTER (2))
18732
          | (1 << ARG_REGISTER (3));
18733
      else if (size <= 8)
18734
        regs_available_for_popping =
18735
          (1 << ARG_REGISTER (3));
18736
    }
18737
 
18738
  /* Match registers to be popped with registers into which we pop them.  */
18739
  for (available = regs_available_for_popping,
18740
       required  = regs_to_pop;
18741
       required != 0 && available != 0;
18742
       available &= ~(available & - available),
18743
       required  &= ~(required  & - required))
18744
    -- pops_needed;
18745
 
18746
  /* If we have any popping registers left over, remove them.  */
18747
  if (available > 0)
18748
    regs_available_for_popping &= ~available;
18749
 
18750
  /* Otherwise if we need another popping register we can use
18751
     the fourth argument register.  */
18752
  else if (pops_needed)
18753
    {
18754
      /* If we have not found any free argument registers and
18755
         reg a4 contains the return address, we must move it.  */
18756
      if (regs_available_for_popping == 0
18757
          && reg_containing_return_addr == LAST_ARG_REGNUM)
18758
        {
18759
          asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
18760
          reg_containing_return_addr = LR_REGNUM;
18761
        }
18762
      else if (size > 12)
18763
        {
18764
          /* Register a4 is being used to hold part of the return value,
18765
             but we have dire need of a free, low register.  */
18766
          restore_a4 = TRUE;
18767
 
18768
          asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
18769
        }
18770
 
18771
      if (reg_containing_return_addr != LAST_ARG_REGNUM)
18772
        {
18773
          /* The fourth argument register is available.  */
18774
          regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
18775
 
18776
          --pops_needed;
18777
        }
18778
    }
18779
 
18780
  /* Pop as many registers as we can.  */
18781
  thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
18782
                 regs_available_for_popping);
18783
 
18784
  /* Process the registers we popped.  */
18785
  if (reg_containing_return_addr == -1)
18786
    {
18787
      /* The return address was popped into the lowest numbered register.  */
18788
      regs_to_pop &= ~(1 << LR_REGNUM);
18789
 
18790
      reg_containing_return_addr =
18791
        number_of_first_bit_set (regs_available_for_popping);
18792
 
18793
      /* Remove this register for the mask of available registers, so that
18794
         the return address will not be corrupted by further pops.  */
18795
      regs_available_for_popping &= ~(1 << reg_containing_return_addr);
18796
    }
18797
 
18798
  /* If we popped other registers then handle them here.  */
18799
  if (regs_available_for_popping)
18800
    {
18801
      int frame_pointer;
18802
 
18803
      /* Work out which register currently contains the frame pointer.  */
18804
      frame_pointer = number_of_first_bit_set (regs_available_for_popping);
18805
 
18806
      /* Move it into the correct place.  */
18807
      asm_fprintf (f, "\tmov\t%r, %r\n",
18808
                   ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
18809
 
18810
      /* (Temporarily) remove it from the mask of popped registers.  */
18811
      regs_available_for_popping &= ~(1 << frame_pointer);
18812
      regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
18813
 
18814
      if (regs_available_for_popping)
18815
        {
18816
          int stack_pointer;
18817
 
18818
          /* We popped the stack pointer as well,
18819
             find the register that contains it.  */
18820
          stack_pointer = number_of_first_bit_set (regs_available_for_popping);
18821
 
18822
          /* Move it into the stack register.  */
18823
          asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
18824
 
18825
          /* At this point we have popped all necessary registers, so
18826
             do not worry about restoring regs_available_for_popping
18827
             to its correct value:
18828
 
18829
             assert (pops_needed == 0)
18830
             assert (regs_available_for_popping == (1 << frame_pointer))
18831
             assert (regs_to_pop == (1 << STACK_POINTER))  */
18832
        }
18833
      else
18834
        {
18835
          /* Since we have just move the popped value into the frame
18836
             pointer, the popping register is available for reuse, and
18837
             we know that we still have the stack pointer left to pop.  */
18838
          regs_available_for_popping |= (1 << frame_pointer);
18839
        }
18840
    }
18841
 
18842
  /* If we still have registers left on the stack, but we no longer have
18843
     any registers into which we can pop them, then we must move the return
18844
     address into the link register and make available the register that
18845
     contained it.  */
18846
  if (regs_available_for_popping == 0 && pops_needed > 0)
18847
    {
18848
      regs_available_for_popping |= 1 << reg_containing_return_addr;
18849
 
18850
      asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
18851
                   reg_containing_return_addr);
18852
 
18853
      reg_containing_return_addr = LR_REGNUM;
18854
    }
18855
 
18856
  /* If we have registers left on the stack then pop some more.
18857
     We know that at most we will want to pop FP and SP.  */
18858
  if (pops_needed > 0)
18859
    {
18860
      int  popped_into;
18861
      int  move_to;
18862
 
18863
      thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
18864
                     regs_available_for_popping);
18865
 
18866
      /* We have popped either FP or SP.
18867
         Move whichever one it is into the correct register.  */
18868
      popped_into = number_of_first_bit_set (regs_available_for_popping);
18869
      move_to     = number_of_first_bit_set (regs_to_pop);
18870
 
18871
      asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
18872
 
18873
      regs_to_pop &= ~(1 << move_to);
18874
 
18875
      --pops_needed;
18876
    }
18877
 
18878
  /* If we still have not popped everything then we must have only
18879
     had one register available to us and we are now popping the SP.  */
18880
  if (pops_needed > 0)
18881
    {
18882
      int  popped_into;
18883
 
18884
      thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
18885
                     regs_available_for_popping);
18886
 
18887
      popped_into = number_of_first_bit_set (regs_available_for_popping);
18888
 
18889
      asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
18890
      /*
18891
        assert (regs_to_pop == (1 << STACK_POINTER))
18892
        assert (pops_needed == 1)
18893
      */
18894
    }
18895
 
18896
  /* If necessary restore the a4 register.  */
18897
  if (restore_a4)
18898
    {
18899
      if (reg_containing_return_addr != LR_REGNUM)
18900
        {
18901
          asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
18902
          reg_containing_return_addr = LR_REGNUM;
18903
        }
18904
 
18905
      asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
18906
    }
18907
 
18908
  if (crtl->calls_eh_return)
18909
    asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
18910
 
18911
  /* Return to caller.  */
18912
  asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
18913
}
18914
 
18915
 
18916
void
18917
thumb1_final_prescan_insn (rtx insn)
18918
{
18919
  if (flag_print_asm_name)
18920
    asm_fprintf (asm_out_file, "%@ 0x%04x\n",
18921
                 INSN_ADDRESSES (INSN_UID (insn)));
18922
}
18923
 
18924
int
18925
thumb_shiftable_const (unsigned HOST_WIDE_INT val)
18926
{
18927
  unsigned HOST_WIDE_INT mask = 0xff;
18928
  int i;
18929
 
18930
  val = val & (unsigned HOST_WIDE_INT)0xffffffffu;
18931
  if (val == 0) /* XXX */
18932
    return 0;
18933
 
18934
  for (i = 0; i < 25; i++)
18935
    if ((val & (mask << i)) == val)
18936
      return 1;
18937
 
18938
  return 0;
18939
}
18940
 
18941
/* Returns nonzero if the current function contains,
18942
   or might contain a far jump.  */
18943
static int
18944
thumb_far_jump_used_p (void)
18945
{
18946
  rtx insn;
18947
 
18948
  /* This test is only important for leaf functions.  */
18949
  /* assert (!leaf_function_p ()); */
18950
 
18951
  /* If we have already decided that far jumps may be used,
18952
     do not bother checking again, and always return true even if
18953
     it turns out that they are not being used.  Once we have made
18954
     the decision that far jumps are present (and that hence the link
18955
     register will be pushed onto the stack) we cannot go back on it.  */
18956
  if (cfun->machine->far_jump_used)
18957
    return 1;
18958
 
18959
  /* If this function is not being called from the prologue/epilogue
18960
     generation code then it must be being called from the
18961
     INITIAL_ELIMINATION_OFFSET macro.  */
18962
  if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
18963
    {
18964
      /* In this case we know that we are being asked about the elimination
18965
         of the arg pointer register.  If that register is not being used,
18966
         then there are no arguments on the stack, and we do not have to
18967
         worry that a far jump might force the prologue to push the link
18968
         register, changing the stack offsets.  In this case we can just
18969
         return false, since the presence of far jumps in the function will
18970
         not affect stack offsets.
18971
 
18972
         If the arg pointer is live (or if it was live, but has now been
18973
         eliminated and so set to dead) then we do have to test to see if
18974
         the function might contain a far jump.  This test can lead to some
18975
         false negatives, since before reload is completed, then length of
18976
         branch instructions is not known, so gcc defaults to returning their
18977
         longest length, which in turn sets the far jump attribute to true.
18978
 
18979
         A false negative will not result in bad code being generated, but it
18980
         will result in a needless push and pop of the link register.  We
18981
         hope that this does not occur too often.
18982
 
18983
         If we need doubleword stack alignment this could affect the other
18984
         elimination offsets so we can't risk getting it wrong.  */
18985
      if (df_regs_ever_live_p (ARG_POINTER_REGNUM))
18986
        cfun->machine->arg_pointer_live = 1;
18987
      else if (!cfun->machine->arg_pointer_live)
18988
        return 0;
18989
    }
18990
 
18991
  /* Check to see if the function contains a branch
18992
     insn with the far jump attribute set.  */
18993
  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
18994
    {
18995
      if (GET_CODE (insn) == JUMP_INSN
18996
          /* Ignore tablejump patterns.  */
18997
          && GET_CODE (PATTERN (insn)) != ADDR_VEC
18998
          && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
18999
          && get_attr_far_jump (insn) == FAR_JUMP_YES
19000
          )
19001
        {
19002
          /* Record the fact that we have decided that
19003
             the function does use far jumps.  */
19004
          cfun->machine->far_jump_used = 1;
19005
          return 1;
19006
        }
19007
    }
19008
 
19009
  return 0;
19010
}
19011
 
19012
/* Return nonzero if FUNC must be entered in ARM mode.  */
19013
int
19014
is_called_in_ARM_mode (tree func)
19015
{
19016
  gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
19017
 
19018
  /* Ignore the problem about functions whose address is taken.  */
19019
  if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
19020
    return TRUE;
19021
 
19022
#ifdef ARM_PE
19023
  return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
19024
#else
19025
  return FALSE;
19026
#endif
19027
}
19028
 
19029
/* The bits which aren't usefully expanded as rtl.  */
19030
const char *
19031
thumb_unexpanded_epilogue (void)
19032
{
19033
  arm_stack_offsets *offsets;
19034
  int regno;
19035
  unsigned long live_regs_mask = 0;
19036
  int high_regs_pushed = 0;
19037
  int had_to_push_lr;
19038
  int size;
19039
 
19040
  if (cfun->machine->return_used_this_function != 0)
19041
    return "";
19042
 
19043
  if (IS_NAKED (arm_current_func_type ()))
19044
    return "";
19045
 
19046
  offsets = arm_get_frame_offsets ();
19047
  live_regs_mask = offsets->saved_regs_mask;
19048
  high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
19049
 
19050
  /* If we can deduce the registers used from the function's return value.
19051
     This is more reliable that examining df_regs_ever_live_p () because that
19052
     will be set if the register is ever used in the function, not just if
19053
     the register is used to hold a return value.  */
19054
  size = arm_size_return_regs ();
19055
 
19056
  /* The prolog may have pushed some high registers to use as
19057
     work registers.  e.g. the testsuite file:
19058
     gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
19059
     compiles to produce:
19060
        push    {r4, r5, r6, r7, lr}
19061
        mov     r7, r9
19062
        mov     r6, r8
19063
        push    {r6, r7}
19064
     as part of the prolog.  We have to undo that pushing here.  */
19065
 
19066
  if (high_regs_pushed)
19067
    {
19068
      unsigned long mask = live_regs_mask & 0xff;
19069
      int next_hi_reg;
19070
 
19071
      /* The available low registers depend on the size of the value we are
19072
         returning.  */
19073
      if (size <= 12)
19074
        mask |=  1 << 3;
19075
      if (size <= 8)
19076
        mask |= 1 << 2;
19077
 
19078
      if (mask == 0)
19079
        /* Oh dear!  We have no low registers into which we can pop
19080
           high registers!  */
19081
        internal_error
19082
          ("no low registers available for popping high registers");
19083
 
19084
      for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
19085
        if (live_regs_mask & (1 << next_hi_reg))
19086
          break;
19087
 
19088
      while (high_regs_pushed)
19089
        {
19090
          /* Find lo register(s) into which the high register(s) can
19091
             be popped.  */
19092
          for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
19093
            {
19094
              if (mask & (1 << regno))
19095
                high_regs_pushed--;
19096
              if (high_regs_pushed == 0)
19097
                break;
19098
            }
19099
 
19100
          mask &= (2 << regno) - 1;     /* A noop if regno == 8 */
19101
 
19102
          /* Pop the values into the low register(s).  */
19103
          thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
19104
 
19105
          /* Move the value(s) into the high registers.  */
19106
          for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
19107
            {
19108
              if (mask & (1 << regno))
19109
                {
19110
                  asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
19111
                               regno);
19112
 
19113
                  for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
19114
                    if (live_regs_mask & (1 << next_hi_reg))
19115
                      break;
19116
                }
19117
            }
19118
        }
19119
      live_regs_mask &= ~0x0f00;
19120
    }
19121
 
19122
  had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
19123
  live_regs_mask &= 0xff;
19124
 
19125
  if (crtl->args.pretend_args_size == 0 || TARGET_BACKTRACE)
19126
    {
19127
      /* Pop the return address into the PC.  */
19128
      if (had_to_push_lr)
19129
        live_regs_mask |= 1 << PC_REGNUM;
19130
 
19131
      /* Either no argument registers were pushed or a backtrace
19132
         structure was created which includes an adjusted stack
19133
         pointer, so just pop everything.  */
19134
      if (live_regs_mask)
19135
        thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
19136
                       live_regs_mask);
19137
 
19138
      /* We have either just popped the return address into the
19139
         PC or it is was kept in LR for the entire function.  */
19140
      if (!had_to_push_lr)
19141
        thumb_exit (asm_out_file, LR_REGNUM);
19142
    }
19143
  else
19144
    {
19145
      /* Pop everything but the return address.  */
19146
      if (live_regs_mask)
19147
        thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
19148
                       live_regs_mask);
19149
 
19150
      if (had_to_push_lr)
19151
        {
19152
          if (size > 12)
19153
            {
19154
              /* We have no free low regs, so save one.  */
19155
              asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
19156
                           LAST_ARG_REGNUM);
19157
            }
19158
 
19159
          /* Get the return address into a temporary register.  */
19160
          thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
19161
                         1 << LAST_ARG_REGNUM);
19162
 
19163
          if (size > 12)
19164
            {
19165
              /* Move the return address to lr.  */
19166
              asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
19167
                           LAST_ARG_REGNUM);
19168
              /* Restore the low register.  */
19169
              asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
19170
                           IP_REGNUM);
19171
              regno = LR_REGNUM;
19172
            }
19173
          else
19174
            regno = LAST_ARG_REGNUM;
19175
        }
19176
      else
19177
        regno = LR_REGNUM;
19178
 
19179
      /* Remove the argument registers that were pushed onto the stack.  */
19180
      asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
19181
                   SP_REGNUM, SP_REGNUM,
19182
                   crtl->args.pretend_args_size);
19183
 
19184
      thumb_exit (asm_out_file, regno);
19185
    }
19186
 
19187
  return "";
19188
}
19189
 
19190
/* Functions to save and restore machine-specific function data.  */
19191
static struct machine_function *
19192
arm_init_machine_status (void)
19193
{
19194
  struct machine_function *machine;
19195
  machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
19196
 
19197
#if ARM_FT_UNKNOWN != 0
19198
  machine->func_type = ARM_FT_UNKNOWN;
19199
#endif
19200
  return machine;
19201
}
19202
 
19203
/* Return an RTX indicating where the return address to the
19204
   calling function can be found.  */
19205
rtx
19206
arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
19207
{
19208
  if (count != 0)
19209
    return NULL_RTX;
19210
 
19211
  return get_hard_reg_initial_val (Pmode, LR_REGNUM);
19212
}
19213
 
19214
/* Do anything needed before RTL is emitted for each function.  */
19215
void
19216
arm_init_expanders (void)
19217
{
19218
  /* Arrange to initialize and mark the machine per-function status.  */
19219
  init_machine_status = arm_init_machine_status;
19220
 
19221
  /* This is to stop the combine pass optimizing away the alignment
19222
     adjustment of va_arg.  */
19223
  /* ??? It is claimed that this should not be necessary.  */
19224
  if (cfun)
19225
    mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
19226
}
19227
 
19228
 
19229
/* Like arm_compute_initial_elimination offset.  Simpler because there
19230
   isn't an ABI specified frame pointer for Thumb.  Instead, we set it
19231
   to point at the base of the local variables after static stack
19232
   space for a function has been allocated.  */
19233
 
19234
HOST_WIDE_INT
19235
thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
19236
{
19237
  arm_stack_offsets *offsets;
19238
 
19239
  offsets = arm_get_frame_offsets ();
19240
 
19241
  switch (from)
19242
    {
19243
    case ARG_POINTER_REGNUM:
19244
      switch (to)
19245
        {
19246
        case STACK_POINTER_REGNUM:
19247
          return offsets->outgoing_args - offsets->saved_args;
19248
 
19249
        case FRAME_POINTER_REGNUM:
19250
          return offsets->soft_frame - offsets->saved_args;
19251
 
19252
        case ARM_HARD_FRAME_POINTER_REGNUM:
19253
          return offsets->saved_regs - offsets->saved_args;
19254
 
19255
        case THUMB_HARD_FRAME_POINTER_REGNUM:
19256
          return offsets->locals_base - offsets->saved_args;
19257
 
19258
        default:
19259
          gcc_unreachable ();
19260
        }
19261
      break;
19262
 
19263
    case FRAME_POINTER_REGNUM:
19264
      switch (to)
19265
        {
19266
        case STACK_POINTER_REGNUM:
19267
          return offsets->outgoing_args - offsets->soft_frame;
19268
 
19269
        case ARM_HARD_FRAME_POINTER_REGNUM:
19270
          return offsets->saved_regs - offsets->soft_frame;
19271
 
19272
        case THUMB_HARD_FRAME_POINTER_REGNUM:
19273
          return offsets->locals_base - offsets->soft_frame;
19274
 
19275
        default:
19276
          gcc_unreachable ();
19277
        }
19278
      break;
19279
 
19280
    default:
19281
      gcc_unreachable ();
19282
    }
19283
}
19284
 
19285
/* Generate the rest of a function's prologue.  */
19286
void
19287
thumb1_expand_prologue (void)
19288
{
19289
  rtx insn, dwarf;
19290
 
19291
  HOST_WIDE_INT amount;
19292
  arm_stack_offsets *offsets;
19293
  unsigned long func_type;
19294
  int regno;
19295
  unsigned long live_regs_mask;
19296
 
19297
  func_type = arm_current_func_type ();
19298
 
19299
  /* Naked functions don't have prologues.  */
19300
  if (IS_NAKED (func_type))
19301
    return;
19302
 
19303
  if (IS_INTERRUPT (func_type))
19304
    {
19305
      error ("interrupt Service Routines cannot be coded in Thumb mode");
19306
      return;
19307
    }
19308
 
19309
  offsets = arm_get_frame_offsets ();
19310
  live_regs_mask = offsets->saved_regs_mask;
19311
  /* Load the pic register before setting the frame pointer,
19312
     so we can use r7 as a temporary work register.  */
19313
  if (flag_pic && arm_pic_register != INVALID_REGNUM)
19314
    arm_load_pic_register (live_regs_mask);
19315
 
19316
  if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
19317
    emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
19318
                    stack_pointer_rtx);
19319
 
19320
  amount = offsets->outgoing_args - offsets->saved_regs;
19321
  if (amount)
19322
    {
19323
      if (amount < 512)
19324
        {
19325
          insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
19326
                                        GEN_INT (- amount)));
19327
          RTX_FRAME_RELATED_P (insn) = 1;
19328
        }
19329
      else
19330
        {
19331
          rtx reg;
19332
 
19333
          /* The stack decrement is too big for an immediate value in a single
19334
             insn.  In theory we could issue multiple subtracts, but after
19335
             three of them it becomes more space efficient to place the full
19336
             value in the constant pool and load into a register.  (Also the
19337
             ARM debugger really likes to see only one stack decrement per
19338
             function).  So instead we look for a scratch register into which
19339
             we can load the decrement, and then we subtract this from the
19340
             stack pointer.  Unfortunately on the thumb the only available
19341
             scratch registers are the argument registers, and we cannot use
19342
             these as they may hold arguments to the function.  Instead we
19343
             attempt to locate a call preserved register which is used by this
19344
             function.  If we can find one, then we know that it will have
19345
             been pushed at the start of the prologue and so we can corrupt
19346
             it now.  */
19347
          for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
19348
            if (live_regs_mask & (1 << regno))
19349
              break;
19350
 
19351
          gcc_assert(regno <= LAST_LO_REGNUM);
19352
 
19353
          reg = gen_rtx_REG (SImode, regno);
19354
 
19355
          emit_insn (gen_movsi (reg, GEN_INT (- amount)));
19356
 
19357
          insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
19358
                                        stack_pointer_rtx, reg));
19359
          RTX_FRAME_RELATED_P (insn) = 1;
19360
          dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
19361
                               plus_constant (stack_pointer_rtx,
19362
                                              -amount));
19363
          RTX_FRAME_RELATED_P (dwarf) = 1;
19364
          add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
19365
        }
19366
    }
19367
 
19368
  if (frame_pointer_needed)
19369
    thumb_set_frame_pointer (offsets);
19370
 
19371
  /* If we are profiling, make sure no instructions are scheduled before
19372
     the call to mcount.  Similarly if the user has requested no
19373
     scheduling in the prolog.  Similarly if we want non-call exceptions
19374
     using the EABI unwinder, to prevent faulting instructions from being
19375
     swapped with a stack adjustment.  */
19376
  if (crtl->profile || !TARGET_SCHED_PROLOG
19377
      || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
19378
    emit_insn (gen_blockage ());
19379
 
19380
  cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
19381
  if (live_regs_mask & 0xff)
19382
    cfun->machine->lr_save_eliminated = 0;
19383
}
19384
 
19385
 
19386
void
19387
thumb1_expand_epilogue (void)
19388
{
19389
  HOST_WIDE_INT amount;
19390
  arm_stack_offsets *offsets;
19391
  int regno;
19392
 
19393
  /* Naked functions don't have prologues.  */
19394
  if (IS_NAKED (arm_current_func_type ()))
19395
    return;
19396
 
19397
  offsets = arm_get_frame_offsets ();
19398
  amount = offsets->outgoing_args - offsets->saved_regs;
19399
 
19400
  if (frame_pointer_needed)
19401
    {
19402
      emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
19403
      amount = offsets->locals_base - offsets->saved_regs;
19404
    }
19405
 
19406
  gcc_assert (amount >= 0);
19407
  if (amount)
19408
    {
19409
      if (amount < 512)
19410
        emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
19411
                               GEN_INT (amount)));
19412
      else
19413
        {
19414
          /* r3 is always free in the epilogue.  */
19415
          rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
19416
 
19417
          emit_insn (gen_movsi (reg, GEN_INT (amount)));
19418
          emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
19419
        }
19420
    }
19421
 
19422
  /* Emit a USE (stack_pointer_rtx), so that
19423
     the stack adjustment will not be deleted.  */
19424
  emit_insn (gen_prologue_use (stack_pointer_rtx));
19425
 
19426
  if (crtl->profile || !TARGET_SCHED_PROLOG)
19427
    emit_insn (gen_blockage ());
19428
 
19429
  /* Emit a clobber for each insn that will be restored in the epilogue,
19430
     so that flow2 will get register lifetimes correct.  */
19431
  for (regno = 0; regno < 13; regno++)
19432
    if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
19433
      emit_clobber (gen_rtx_REG (SImode, regno));
19434
 
19435
  if (! df_regs_ever_live_p (LR_REGNUM))
19436
    emit_use (gen_rtx_REG (SImode, LR_REGNUM));
19437
}
19438
 
19439
static void
19440
thumb1_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
19441
{
19442
  arm_stack_offsets *offsets;
19443
  unsigned long live_regs_mask = 0;
19444
  unsigned long l_mask;
19445
  unsigned high_regs_pushed = 0;
19446
  int cfa_offset = 0;
19447
  int regno;
19448
 
19449
  if (IS_NAKED (arm_current_func_type ()))
19450
    return;
19451
 
19452
  if (is_called_in_ARM_mode (current_function_decl))
19453
    {
19454
      const char * name;
19455
 
19456
      gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
19457
      gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
19458
                  == SYMBOL_REF);
19459
      name = XSTR  (XEXP (DECL_RTL (current_function_decl), 0), 0);
19460
 
19461
      /* Generate code sequence to switch us into Thumb mode.  */
19462
      /* The .code 32 directive has already been emitted by
19463
         ASM_DECLARE_FUNCTION_NAME.  */
19464
      asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
19465
      asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
19466
 
19467
      /* Generate a label, so that the debugger will notice the
19468
         change in instruction sets.  This label is also used by
19469
         the assembler to bypass the ARM code when this function
19470
         is called from a Thumb encoded function elsewhere in the
19471
         same file.  Hence the definition of STUB_NAME here must
19472
         agree with the definition in gas/config/tc-arm.c.  */
19473
 
19474
#define STUB_NAME ".real_start_of"
19475
 
19476
      fprintf (f, "\t.code\t16\n");
19477
#ifdef ARM_PE
19478
      if (arm_dllexport_name_p (name))
19479
        name = arm_strip_name_encoding (name);
19480
#endif
19481
      asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
19482
      fprintf (f, "\t.thumb_func\n");
19483
      asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
19484
    }
19485
 
19486
  if (crtl->args.pretend_args_size)
19487
    {
19488
      /* Output unwind directive for the stack adjustment.  */
19489
      if (ARM_EABI_UNWIND_TABLES)
19490
        fprintf (f, "\t.pad #%d\n",
19491
                 crtl->args.pretend_args_size);
19492
 
19493
      if (cfun->machine->uses_anonymous_args)
19494
        {
19495
          int num_pushes;
19496
 
19497
          fprintf (f, "\tpush\t{");
19498
 
19499
          num_pushes = ARM_NUM_INTS (crtl->args.pretend_args_size);
19500
 
19501
          for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
19502
               regno <= LAST_ARG_REGNUM;
19503
               regno++)
19504
            asm_fprintf (f, "%r%s", regno,
19505
                         regno == LAST_ARG_REGNUM ? "" : ", ");
19506
 
19507
          fprintf (f, "}\n");
19508
        }
19509
      else
19510
        asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
19511
                     SP_REGNUM, SP_REGNUM,
19512
                     crtl->args.pretend_args_size);
19513
 
19514
      /* We don't need to record the stores for unwinding (would it
19515
         help the debugger any if we did?), but record the change in
19516
         the stack pointer.  */
19517
      if (dwarf2out_do_frame ())
19518
        {
19519
          char *l = dwarf2out_cfi_label (false);
19520
 
19521
          cfa_offset = cfa_offset + crtl->args.pretend_args_size;
19522
          dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
19523
        }
19524
    }
19525
 
19526
  /* Get the registers we are going to push.  */
19527
  offsets = arm_get_frame_offsets ();
19528
  live_regs_mask = offsets->saved_regs_mask;
19529
  /* Extract a mask of the ones we can give to the Thumb's push instruction.  */
19530
  l_mask = live_regs_mask & 0x40ff;
19531
  /* Then count how many other high registers will need to be pushed.  */
19532
  high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
19533
 
19534
  if (TARGET_BACKTRACE)
19535
    {
19536
      unsigned offset;
19537
      unsigned work_register;
19538
 
19539
      /* We have been asked to create a stack backtrace structure.
19540
         The code looks like this:
19541
 
19542
 
19543
 
19544
 
19545
         2     push  {R7}            Push low registers.
19546
         4     add   R7, SP, #20     Get the stack pointer before the push.
19547
         6     str   R7, [SP, #8]    Store the stack pointer (before reserving the space).
19548
         8     mov   R7, PC          Get hold of the start of this code plus 12.
19549
        10     str   R7, [SP, #16]   Store it.
19550
        12     mov   R7, FP          Get hold of the current frame pointer.
19551
        14     str   R7, [SP, #4]    Store it.
19552
        16     mov   R7, LR          Get hold of the current return address.
19553
        18     str   R7, [SP, #12]   Store it.
19554
        20     add   R7, SP, #16     Point at the start of the backtrace structure.
19555
        22     mov   FP, R7          Put this value into the frame pointer.  */
19556
 
19557
      work_register = thumb_find_work_register (live_regs_mask);
19558
 
19559
      if (ARM_EABI_UNWIND_TABLES)
19560
        asm_fprintf (f, "\t.pad #16\n");
19561
 
19562
      asm_fprintf
19563
        (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
19564
         SP_REGNUM, SP_REGNUM);
19565
 
19566
      if (dwarf2out_do_frame ())
19567
        {
19568
          char *l = dwarf2out_cfi_label (false);
19569
 
19570
          cfa_offset = cfa_offset + 16;
19571
          dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
19572
        }
19573
 
19574
      if (l_mask)
19575
        {
19576
          thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
19577
          offset = bit_count (l_mask) * UNITS_PER_WORD;
19578
        }
19579
      else
19580
        offset = 0;
19581
 
19582
      asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
19583
                   offset + 16 + crtl->args.pretend_args_size);
19584
 
19585
      asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
19586
                   offset + 4);
19587
 
19588
      /* Make sure that the instruction fetching the PC is in the right place
19589
         to calculate "start of backtrace creation code + 12".  */
19590
      if (l_mask)
19591
        {
19592
          asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
19593
          asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
19594
                       offset + 12);
19595
          asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
19596
                       ARM_HARD_FRAME_POINTER_REGNUM);
19597
          asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
19598
                       offset);
19599
        }
19600
      else
19601
        {
19602
          asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
19603
                       ARM_HARD_FRAME_POINTER_REGNUM);
19604
          asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
19605
                       offset);
19606
          asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
19607
          asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
19608
                       offset + 12);
19609
        }
19610
 
19611
      asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
19612
      asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
19613
                   offset + 8);
19614
      asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
19615
                   offset + 12);
19616
      asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
19617
                   ARM_HARD_FRAME_POINTER_REGNUM, work_register);
19618
    }
19619
  /* Optimization:  If we are not pushing any low registers but we are going
19620
     to push some high registers then delay our first push.  This will just
19621
     be a push of LR and we can combine it with the push of the first high
19622
     register.  */
19623
  else if ((l_mask & 0xff) != 0
19624
           || (high_regs_pushed == 0 && l_mask))
19625
    thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
19626
 
19627
  if (high_regs_pushed)
19628
    {
19629
      unsigned pushable_regs;
19630
      unsigned next_hi_reg;
19631
 
19632
      for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
19633
        if (live_regs_mask & (1 << next_hi_reg))
19634
          break;
19635
 
19636
      pushable_regs = l_mask & 0xff;
19637
 
19638
      if (pushable_regs == 0)
19639
        pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
19640
 
19641
      while (high_regs_pushed > 0)
19642
        {
19643
          unsigned long real_regs_mask = 0;
19644
 
19645
          for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
19646
            {
19647
              if (pushable_regs & (1 << regno))
19648
                {
19649
                  asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
19650
 
19651
                  high_regs_pushed --;
19652
                  real_regs_mask |= (1 << next_hi_reg);
19653
 
19654
                  if (high_regs_pushed)
19655
                    {
19656
                      for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
19657
                           next_hi_reg --)
19658
                        if (live_regs_mask & (1 << next_hi_reg))
19659
                          break;
19660
                    }
19661
                  else
19662
                    {
19663
                      pushable_regs &= ~((1 << regno) - 1);
19664
                      break;
19665
                    }
19666
                }
19667
            }
19668
 
19669
          /* If we had to find a work register and we have not yet
19670
             saved the LR then add it to the list of regs to push.  */
19671
          if (l_mask == (1 << LR_REGNUM))
19672
            {
19673
              thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
19674
                             1, &cfa_offset,
19675
                             real_regs_mask | (1 << LR_REGNUM));
19676
              l_mask = 0;
19677
            }
19678
          else
19679
            thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
19680
        }
19681
    }
19682
}
19683
 
19684
/* Handle the case of a double word load into a low register from
19685
   a computed memory address.  The computed address may involve a
19686
   register which is overwritten by the load.  */
19687
const char *
19688
thumb_load_double_from_address (rtx *operands)
19689
{
19690
  rtx addr;
19691
  rtx base;
19692
  rtx offset;
19693
  rtx arg1;
19694
  rtx arg2;
19695
 
19696
  gcc_assert (GET_CODE (operands[0]) == REG);
19697
  gcc_assert (GET_CODE (operands[1]) == MEM);
19698
 
19699
  /* Get the memory address.  */
19700
  addr = XEXP (operands[1], 0);
19701
 
19702
  /* Work out how the memory address is computed.  */
19703
  switch (GET_CODE (addr))
19704
    {
19705
    case REG:
19706
      operands[2] = adjust_address (operands[1], SImode, 4);
19707
 
19708
      if (REGNO (operands[0]) == REGNO (addr))
19709
        {
19710
          output_asm_insn ("ldr\t%H0, %2", operands);
19711
          output_asm_insn ("ldr\t%0, %1", operands);
19712
        }
19713
      else
19714
        {
19715
          output_asm_insn ("ldr\t%0, %1", operands);
19716
          output_asm_insn ("ldr\t%H0, %2", operands);
19717
        }
19718
      break;
19719
 
19720
    case CONST:
19721
      /* Compute <address> + 4 for the high order load.  */
19722
      operands[2] = adjust_address (operands[1], SImode, 4);
19723
 
19724
      output_asm_insn ("ldr\t%0, %1", operands);
19725
      output_asm_insn ("ldr\t%H0, %2", operands);
19726
      break;
19727
 
19728
    case PLUS:
19729
      arg1   = XEXP (addr, 0);
19730
      arg2   = XEXP (addr, 1);
19731
 
19732
      if (CONSTANT_P (arg1))
19733
        base = arg2, offset = arg1;
19734
      else
19735
        base = arg1, offset = arg2;
19736
 
19737
      gcc_assert (GET_CODE (base) == REG);
19738
 
19739
      /* Catch the case of <address> = <reg> + <reg> */
19740
      if (GET_CODE (offset) == REG)
19741
        {
19742
          int reg_offset = REGNO (offset);
19743
          int reg_base   = REGNO (base);
19744
          int reg_dest   = REGNO (operands[0]);
19745
 
19746
          /* Add the base and offset registers together into the
19747
             higher destination register.  */
19748
          asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
19749
                       reg_dest + 1, reg_base, reg_offset);
19750
 
19751
          /* Load the lower destination register from the address in
19752
             the higher destination register.  */
19753
          asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
19754
                       reg_dest, reg_dest + 1);
19755
 
19756
          /* Load the higher destination register from its own address
19757
             plus 4.  */
19758
          asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
19759
                       reg_dest + 1, reg_dest + 1);
19760
        }
19761
      else
19762
        {
19763
          /* Compute <address> + 4 for the high order load.  */
19764
          operands[2] = adjust_address (operands[1], SImode, 4);
19765
 
19766
          /* If the computed address is held in the low order register
19767
             then load the high order register first, otherwise always
19768
             load the low order register first.  */
19769
          if (REGNO (operands[0]) == REGNO (base))
19770
            {
19771
              output_asm_insn ("ldr\t%H0, %2", operands);
19772
              output_asm_insn ("ldr\t%0, %1", operands);
19773
            }
19774
          else
19775
            {
19776
              output_asm_insn ("ldr\t%0, %1", operands);
19777
              output_asm_insn ("ldr\t%H0, %2", operands);
19778
            }
19779
        }
19780
      break;
19781
 
19782
    case LABEL_REF:
19783
      /* With no registers to worry about we can just load the value
19784
         directly.  */
19785
      operands[2] = adjust_address (operands[1], SImode, 4);
19786
 
19787
      output_asm_insn ("ldr\t%H0, %2", operands);
19788
      output_asm_insn ("ldr\t%0, %1", operands);
19789
      break;
19790
 
19791
    default:
19792
      gcc_unreachable ();
19793
    }
19794
 
19795
  return "";
19796
}
19797
 
19798
const char *
19799
thumb_output_move_mem_multiple (int n, rtx *operands)
19800
{
19801
  rtx tmp;
19802
 
19803
  switch (n)
19804
    {
19805
    case 2:
19806
      if (REGNO (operands[4]) > REGNO (operands[5]))
19807
        {
19808
          tmp = operands[4];
19809
          operands[4] = operands[5];
19810
          operands[5] = tmp;
19811
        }
19812
      output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
19813
      output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
19814
      break;
19815
 
19816
    case 3:
19817
      if (REGNO (operands[4]) > REGNO (operands[5]))
19818
        {
19819
          tmp = operands[4];
19820
          operands[4] = operands[5];
19821
          operands[5] = tmp;
19822
        }
19823
      if (REGNO (operands[5]) > REGNO (operands[6]))
19824
        {
19825
          tmp = operands[5];
19826
          operands[5] = operands[6];
19827
          operands[6] = tmp;
19828
        }
19829
      if (REGNO (operands[4]) > REGNO (operands[5]))
19830
        {
19831
          tmp = operands[4];
19832
          operands[4] = operands[5];
19833
          operands[5] = tmp;
19834
        }
19835
 
19836
      output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
19837
      output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
19838
      break;
19839
 
19840
    default:
19841
      gcc_unreachable ();
19842
    }
19843
 
19844
  return "";
19845
}
19846
 
19847
/* Output a call-via instruction for thumb state.  */
19848
const char *
19849
thumb_call_via_reg (rtx reg)
19850
{
19851
  int regno = REGNO (reg);
19852
  rtx *labelp;
19853
 
19854
  gcc_assert (regno < LR_REGNUM);
19855
 
19856
  /* If we are in the normal text section we can use a single instance
19857
     per compilation unit.  If we are doing function sections, then we need
19858
     an entry per section, since we can't rely on reachability.  */
19859
  if (in_section == text_section)
19860
    {
19861
      thumb_call_reg_needed = 1;
19862
 
19863
      if (thumb_call_via_label[regno] == NULL)
19864
        thumb_call_via_label[regno] = gen_label_rtx ();
19865
      labelp = thumb_call_via_label + regno;
19866
    }
19867
  else
19868
    {
19869
      if (cfun->machine->call_via[regno] == NULL)
19870
        cfun->machine->call_via[regno] = gen_label_rtx ();
19871
      labelp = cfun->machine->call_via + regno;
19872
    }
19873
 
19874
  output_asm_insn ("bl\t%a0", labelp);
19875
  return "";
19876
}
19877
 
19878
/* Routines for generating rtl.  */
19879
void
19880
thumb_expand_movmemqi (rtx *operands)
19881
{
19882
  rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
19883
  rtx in  = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
19884
  HOST_WIDE_INT len = INTVAL (operands[2]);
19885
  HOST_WIDE_INT offset = 0;
19886
 
19887
  while (len >= 12)
19888
    {
19889
      emit_insn (gen_movmem12b (out, in, out, in));
19890
      len -= 12;
19891
    }
19892
 
19893
  if (len >= 8)
19894
    {
19895
      emit_insn (gen_movmem8b (out, in, out, in));
19896
      len -= 8;
19897
    }
19898
 
19899
  if (len >= 4)
19900
    {
19901
      rtx reg = gen_reg_rtx (SImode);
19902
      emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
19903
      emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
19904
      len -= 4;
19905
      offset += 4;
19906
    }
19907
 
19908
  if (len >= 2)
19909
    {
19910
      rtx reg = gen_reg_rtx (HImode);
19911
      emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
19912
                                              plus_constant (in, offset))));
19913
      emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
19914
                            reg));
19915
      len -= 2;
19916
      offset += 2;
19917
    }
19918
 
19919
  if (len)
19920
    {
19921
      rtx reg = gen_reg_rtx (QImode);
19922
      emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
19923
                                              plus_constant (in, offset))));
19924
      emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
19925
                            reg));
19926
    }
19927
}
19928
 
19929
void
19930
thumb_reload_out_hi (rtx *operands)
19931
{
19932
  emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
19933
}
19934
 
19935
/* Handle reading a half-word from memory during reload.  */
19936
void
19937
thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
19938
{
19939
  gcc_unreachable ();
19940
}
19941
 
19942
/* Return the length of a function name prefix
19943
    that starts with the character 'c'.  */
19944
static int
19945
arm_get_strip_length (int c)
19946
{
19947
  switch (c)
19948
    {
19949
    ARM_NAME_ENCODING_LENGTHS
19950
      default: return 0;
19951
    }
19952
}
19953
 
19954
/* Return a pointer to a function's name with any
19955
   and all prefix encodings stripped from it.  */
19956
const char *
19957
arm_strip_name_encoding (const char *name)
19958
{
19959
  int skip;
19960
 
19961
  while ((skip = arm_get_strip_length (* name)))
19962
    name += skip;
19963
 
19964
  return name;
19965
}
19966
 
19967
/* If there is a '*' anywhere in the name's prefix, then
19968
   emit the stripped name verbatim, otherwise prepend an
19969
   underscore if leading underscores are being used.  */
19970
void
19971
arm_asm_output_labelref (FILE *stream, const char *name)
19972
{
19973
  int skip;
19974
  int verbatim = 0;
19975
 
19976
  while ((skip = arm_get_strip_length (* name)))
19977
    {
19978
      verbatim |= (*name == '*');
19979
      name += skip;
19980
    }
19981
 
19982
  if (verbatim)
19983
    fputs (name, stream);
19984
  else
19985
    asm_fprintf (stream, "%U%s", name);
19986
}
19987
 
19988
static void
19989
arm_file_start (void)
19990
{
19991
  int val;
19992
 
19993
  if (TARGET_UNIFIED_ASM)
19994
    asm_fprintf (asm_out_file, "\t.syntax unified\n");
19995
 
19996
  if (TARGET_BPABI)
19997
    {
19998
      const char *fpu_name;
19999
      if (arm_select[0].string)
20000
        asm_fprintf (asm_out_file, "\t.cpu %s\n", arm_select[0].string);
20001
      else if (arm_select[1].string)
20002
        asm_fprintf (asm_out_file, "\t.arch %s\n", arm_select[1].string);
20003
      else
20004
        asm_fprintf (asm_out_file, "\t.cpu %s\n",
20005
                     all_cores[arm_default_cpu].name);
20006
 
20007
      if (TARGET_SOFT_FLOAT)
20008
        {
20009
          if (TARGET_VFP)
20010
            fpu_name = "softvfp";
20011
          else
20012
            fpu_name = "softfpa";
20013
        }
20014
      else
20015
        {
20016
          fpu_name = arm_fpu_desc->name;
20017
          if (arm_fpu_desc->model == ARM_FP_MODEL_VFP)
20018
            {
20019
              if (TARGET_HARD_FLOAT)
20020
                asm_fprintf (asm_out_file, "\t.eabi_attribute 27, 3\n");
20021
              if (TARGET_HARD_FLOAT_ABI)
20022
                asm_fprintf (asm_out_file, "\t.eabi_attribute 28, 1\n");
20023
            }
20024
        }
20025
      asm_fprintf (asm_out_file, "\t.fpu %s\n", fpu_name);
20026
 
20027
      /* Some of these attributes only apply when the corresponding features
20028
         are used.  However we don't have any easy way of figuring this out.
20029
         Conservatively record the setting that would have been used.  */
20030
 
20031
      /* Tag_ABI_FP_rounding.  */
20032
      if (flag_rounding_math)
20033
        asm_fprintf (asm_out_file, "\t.eabi_attribute 19, 1\n");
20034
      if (!flag_unsafe_math_optimizations)
20035
        {
20036
          /* Tag_ABI_FP_denomal.  */
20037
          asm_fprintf (asm_out_file, "\t.eabi_attribute 20, 1\n");
20038
          /* Tag_ABI_FP_exceptions.  */
20039
          asm_fprintf (asm_out_file, "\t.eabi_attribute 21, 1\n");
20040
        }
20041
      /* Tag_ABI_FP_user_exceptions.  */
20042
      if (flag_signaling_nans)
20043
        asm_fprintf (asm_out_file, "\t.eabi_attribute 22, 1\n");
20044
      /* Tag_ABI_FP_number_model.  */
20045
      asm_fprintf (asm_out_file, "\t.eabi_attribute 23, %d\n",
20046
                   flag_finite_math_only ? 1 : 3);
20047
 
20048
      /* Tag_ABI_align8_needed.  */
20049
      asm_fprintf (asm_out_file, "\t.eabi_attribute 24, 1\n");
20050
      /* Tag_ABI_align8_preserved.  */
20051
      asm_fprintf (asm_out_file, "\t.eabi_attribute 25, 1\n");
20052
      /* Tag_ABI_enum_size.  */
20053
      asm_fprintf (asm_out_file, "\t.eabi_attribute 26, %d\n",
20054
                   flag_short_enums ? 1 : 2);
20055
 
20056
      /* Tag_ABI_optimization_goals.  */
20057
      if (optimize_size)
20058
        val = 4;
20059
      else if (optimize >= 2)
20060
        val = 2;
20061
      else if (optimize)
20062
        val = 1;
20063
      else
20064
        val = 6;
20065
      asm_fprintf (asm_out_file, "\t.eabi_attribute 30, %d\n", val);
20066
 
20067
      /* Tag_ABI_FP_16bit_format.  */
20068
      if (arm_fp16_format)
20069
        asm_fprintf (asm_out_file, "\t.eabi_attribute 38, %d\n",
20070
                     (int)arm_fp16_format);
20071
 
20072
      if (arm_lang_output_object_attributes_hook)
20073
        arm_lang_output_object_attributes_hook();
20074
    }
20075
  default_file_start();
20076
}
20077
 
20078
static void
20079
arm_file_end (void)
20080
{
20081
  int regno;
20082
 
20083
  if (NEED_INDICATE_EXEC_STACK)
20084
    /* Add .note.GNU-stack.  */
20085
    file_end_indicate_exec_stack ();
20086
 
20087
  if (! thumb_call_reg_needed)
20088
    return;
20089
 
20090
  switch_to_section (text_section);
20091
  asm_fprintf (asm_out_file, "\t.code 16\n");
20092
  ASM_OUTPUT_ALIGN (asm_out_file, 1);
20093
 
20094
  for (regno = 0; regno < LR_REGNUM; regno++)
20095
    {
20096
      rtx label = thumb_call_via_label[regno];
20097
 
20098
      if (label != 0)
20099
        {
20100
          targetm.asm_out.internal_label (asm_out_file, "L",
20101
                                          CODE_LABEL_NUMBER (label));
20102
          asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
20103
        }
20104
    }
20105
}
20106
 
20107
#ifndef ARM_PE
20108
/* Symbols in the text segment can be accessed without indirecting via the
20109
   constant pool; it may take an extra binary operation, but this is still
20110
   faster than indirecting via memory.  Don't do this when not optimizing,
20111
   since we won't be calculating al of the offsets necessary to do this
20112
   simplification.  */
20113
 
20114
static void
20115
arm_encode_section_info (tree decl, rtx rtl, int first)
20116
{
20117
  if (optimize > 0 && TREE_CONSTANT (decl))
20118
    SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
20119
 
20120
  default_encode_section_info (decl, rtl, first);
20121
}
20122
#endif /* !ARM_PE */
20123
 
20124
static void
20125
arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
20126
{
20127
  if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
20128
      && !strcmp (prefix, "L"))
20129
    {
20130
      arm_ccfsm_state = 0;
20131
      arm_target_insn = NULL;
20132
    }
20133
  default_internal_label (stream, prefix, labelno);
20134
}
20135
 
20136
/* Output code to add DELTA to the first argument, and then jump
20137
   to FUNCTION.  Used for C++ multiple inheritance.  */
20138
static void
20139
arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
20140
                     HOST_WIDE_INT delta,
20141
                     HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
20142
                     tree function)
20143
{
20144
  static int thunk_label = 0;
20145
  char label[256];
20146
  char labelpc[256];
20147
  int mi_delta = delta;
20148
  const char *const mi_op = mi_delta < 0 ? "sub" : "add";
20149
  int shift = 0;
20150
  int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
20151
                    ? 1 : 0);
20152
  if (mi_delta < 0)
20153
    mi_delta = - mi_delta;
20154
 
20155
  if (TARGET_THUMB1)
20156
    {
20157
      int labelno = thunk_label++;
20158
      ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
20159
      /* Thunks are entered in arm mode when avaiable.  */
20160
      if (TARGET_THUMB1_ONLY)
20161
        {
20162
          /* push r3 so we can use it as a temporary.  */
20163
          /* TODO: Omit this save if r3 is not used.  */
20164
          fputs ("\tpush {r3}\n", file);
20165
          fputs ("\tldr\tr3, ", file);
20166
        }
20167
      else
20168
        {
20169
          fputs ("\tldr\tr12, ", file);
20170
        }
20171
      assemble_name (file, label);
20172
      fputc ('\n', file);
20173
      if (flag_pic)
20174
        {
20175
          /* If we are generating PIC, the ldr instruction below loads
20176
             "(target - 7) - .LTHUNKPCn" into r12.  The pc reads as
20177
             the address of the add + 8, so we have:
20178
 
20179
             r12 = (target - 7) - .LTHUNKPCn + (.LTHUNKPCn + 8)
20180
                 = target + 1.
20181
 
20182
             Note that we have "+ 1" because some versions of GNU ld
20183
             don't set the low bit of the result for R_ARM_REL32
20184
             relocations against thumb function symbols.
20185
             On ARMv6M this is +4, not +8.  */
20186
          ASM_GENERATE_INTERNAL_LABEL (labelpc, "LTHUNKPC", labelno);
20187
          assemble_name (file, labelpc);
20188
          fputs (":\n", file);
20189
          if (TARGET_THUMB1_ONLY)
20190
            {
20191
              /* This is 2 insns after the start of the thunk, so we know it
20192
                 is 4-byte aligned.  */
20193
              fputs ("\tadd\tr3, pc, r3\n", file);
20194
              fputs ("\tmov r12, r3\n", file);
20195
            }
20196
          else
20197
            fputs ("\tadd\tr12, pc, r12\n", file);
20198
        }
20199
      else if (TARGET_THUMB1_ONLY)
20200
        fputs ("\tmov r12, r3\n", file);
20201
    }
20202
  if (TARGET_THUMB1_ONLY)
20203
    {
20204
      if (mi_delta > 255)
20205
        {
20206
          fputs ("\tldr\tr3, ", file);
20207
          assemble_name (file, label);
20208
          fputs ("+4\n", file);
20209
          asm_fprintf (file, "\t%s\t%r, %r, r3\n",
20210
                       mi_op, this_regno, this_regno);
20211
        }
20212
      else if (mi_delta != 0)
20213
        {
20214
          asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
20215
                       mi_op, this_regno, this_regno,
20216
                       mi_delta);
20217
        }
20218
    }
20219
  else
20220
    {
20221
      /* TODO: Use movw/movt for large constants when available.  */
20222
      while (mi_delta != 0)
20223
        {
20224
          if ((mi_delta & (3 << shift)) == 0)
20225
            shift += 2;
20226
          else
20227
            {
20228
              asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
20229
                           mi_op, this_regno, this_regno,
20230
                           mi_delta & (0xff << shift));
20231
              mi_delta &= ~(0xff << shift);
20232
              shift += 8;
20233
            }
20234
        }
20235
    }
20236
  if (TARGET_THUMB1)
20237
    {
20238
      if (TARGET_THUMB1_ONLY)
20239
        fputs ("\tpop\t{r3}\n", file);
20240
 
20241
      fprintf (file, "\tbx\tr12\n");
20242
      ASM_OUTPUT_ALIGN (file, 2);
20243
      assemble_name (file, label);
20244
      fputs (":\n", file);
20245
      if (flag_pic)
20246
        {
20247
          /* Output ".word .LTHUNKn-7-.LTHUNKPCn".  */
20248
          rtx tem = XEXP (DECL_RTL (function), 0);
20249
          tem = gen_rtx_PLUS (GET_MODE (tem), tem, GEN_INT (-7));
20250
          tem = gen_rtx_MINUS (GET_MODE (tem),
20251
                               tem,
20252
                               gen_rtx_SYMBOL_REF (Pmode,
20253
                                                   ggc_strdup (labelpc)));
20254
          assemble_integer (tem, 4, BITS_PER_WORD, 1);
20255
        }
20256
      else
20257
        /* Output ".word .LTHUNKn".  */
20258
        assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
20259
 
20260
      if (TARGET_THUMB1_ONLY && mi_delta > 255)
20261
        assemble_integer (GEN_INT(mi_delta), 4, BITS_PER_WORD, 1);
20262
    }
20263
  else
20264
    {
20265
      fputs ("\tb\t", file);
20266
      assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
20267
      if (NEED_PLT_RELOC)
20268
        fputs ("(PLT)", file);
20269
      fputc ('\n', file);
20270
    }
20271
}
20272
 
20273
int
20274
arm_emit_vector_const (FILE *file, rtx x)
20275
{
20276
  int i;
20277
  const char * pattern;
20278
 
20279
  gcc_assert (GET_CODE (x) == CONST_VECTOR);
20280
 
20281
  switch (GET_MODE (x))
20282
    {
20283
    case V2SImode: pattern = "%08x"; break;
20284
    case V4HImode: pattern = "%04x"; break;
20285
    case V8QImode: pattern = "%02x"; break;
20286
    default:       gcc_unreachable ();
20287
    }
20288
 
20289
  fprintf (file, "0x");
20290
  for (i = CONST_VECTOR_NUNITS (x); i--;)
20291
    {
20292
      rtx element;
20293
 
20294
      element = CONST_VECTOR_ELT (x, i);
20295
      fprintf (file, pattern, INTVAL (element));
20296
    }
20297
 
20298
  return 1;
20299
}
20300
 
20301
/* Emit a fp16 constant appropriately padded to occupy a 4-byte word.
20302
   HFmode constant pool entries are actually loaded with ldr.  */
20303
void
20304
arm_emit_fp16_const (rtx c)
20305
{
20306
  REAL_VALUE_TYPE r;
20307
  long bits;
20308
 
20309
  REAL_VALUE_FROM_CONST_DOUBLE (r, c);
20310
  bits = real_to_target (NULL, &r, HFmode);
20311
  if (WORDS_BIG_ENDIAN)
20312
    assemble_zeros (2);
20313
  assemble_integer (GEN_INT (bits), 2, BITS_PER_WORD, 1);
20314
  if (!WORDS_BIG_ENDIAN)
20315
    assemble_zeros (2);
20316
}
20317
 
20318
const char *
20319
arm_output_load_gr (rtx *operands)
20320
{
20321
  rtx reg;
20322
  rtx offset;
20323
  rtx wcgr;
20324
  rtx sum;
20325
 
20326
  if (GET_CODE (operands [1]) != MEM
20327
      || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
20328
      || GET_CODE (reg = XEXP (sum, 0)) != REG
20329
      || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
20330
      || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
20331
    return "wldrw%?\t%0, %1";
20332
 
20333
  /* Fix up an out-of-range load of a GR register.  */
20334
  output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
20335
  wcgr = operands[0];
20336
  operands[0] = reg;
20337
  output_asm_insn ("ldr%?\t%0, %1", operands);
20338
 
20339
  operands[0] = wcgr;
20340
  operands[1] = reg;
20341
  output_asm_insn ("tmcr%?\t%0, %1", operands);
20342
  output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
20343
 
20344
  return "";
20345
}
20346
 
20347
/* Worker function for TARGET_SETUP_INCOMING_VARARGS.
20348
 
20349
   On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
20350
   named arg and all anonymous args onto the stack.
20351
   XXX I know the prologue shouldn't be pushing registers, but it is faster
20352
   that way.  */
20353
 
20354
static void
20355
arm_setup_incoming_varargs (CUMULATIVE_ARGS *pcum,
20356
                            enum machine_mode mode,
20357
                            tree type,
20358
                            int *pretend_size,
20359
                            int second_time ATTRIBUTE_UNUSED)
20360
{
20361
  int nregs;
20362
 
20363
  cfun->machine->uses_anonymous_args = 1;
20364
  if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
20365
    {
20366
      nregs = pcum->aapcs_ncrn;
20367
      if ((nregs & 1) && arm_needs_doubleword_align (mode, type))
20368
        nregs++;
20369
    }
20370
  else
20371
    nregs = pcum->nregs;
20372
 
20373
  if (nregs < NUM_ARG_REGS)
20374
    *pretend_size = (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
20375
}
20376
 
20377
/* Return nonzero if the CONSUMER instruction (a store) does not need
20378
   PRODUCER's value to calculate the address.  */
20379
 
20380
int
20381
arm_no_early_store_addr_dep (rtx producer, rtx consumer)
20382
{
20383
  rtx value = PATTERN (producer);
20384
  rtx addr = PATTERN (consumer);
20385
 
20386
  if (GET_CODE (value) == COND_EXEC)
20387
    value = COND_EXEC_CODE (value);
20388
  if (GET_CODE (value) == PARALLEL)
20389
    value = XVECEXP (value, 0, 0);
20390
  value = XEXP (value, 0);
20391
  if (GET_CODE (addr) == COND_EXEC)
20392
    addr = COND_EXEC_CODE (addr);
20393
  if (GET_CODE (addr) == PARALLEL)
20394
    addr = XVECEXP (addr, 0, 0);
20395
  addr = XEXP (addr, 0);
20396
 
20397
  return !reg_overlap_mentioned_p (value, addr);
20398
}
20399
 
20400
/* Return nonzero if the CONSUMER instruction (an ALU op) does not
20401
   have an early register shift value or amount dependency on the
20402
   result of PRODUCER.  */
20403
 
20404
int
20405
arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
20406
{
20407
  rtx value = PATTERN (producer);
20408
  rtx op = PATTERN (consumer);
20409
  rtx early_op;
20410
 
20411
  if (GET_CODE (value) == COND_EXEC)
20412
    value = COND_EXEC_CODE (value);
20413
  if (GET_CODE (value) == PARALLEL)
20414
    value = XVECEXP (value, 0, 0);
20415
  value = XEXP (value, 0);
20416
  if (GET_CODE (op) == COND_EXEC)
20417
    op = COND_EXEC_CODE (op);
20418
  if (GET_CODE (op) == PARALLEL)
20419
    op = XVECEXP (op, 0, 0);
20420
  op = XEXP (op, 1);
20421
 
20422
  early_op = XEXP (op, 0);
20423
  /* This is either an actual independent shift, or a shift applied to
20424
     the first operand of another operation.  We want the whole shift
20425
     operation.  */
20426
  if (GET_CODE (early_op) == REG)
20427
    early_op = op;
20428
 
20429
  return !reg_overlap_mentioned_p (value, early_op);
20430
}
20431
 
20432
/* Return nonzero if the CONSUMER instruction (an ALU op) does not
20433
   have an early register shift value dependency on the result of
20434
   PRODUCER.  */
20435
 
20436
int
20437
arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
20438
{
20439
  rtx value = PATTERN (producer);
20440
  rtx op = PATTERN (consumer);
20441
  rtx early_op;
20442
 
20443
  if (GET_CODE (value) == COND_EXEC)
20444
    value = COND_EXEC_CODE (value);
20445
  if (GET_CODE (value) == PARALLEL)
20446
    value = XVECEXP (value, 0, 0);
20447
  value = XEXP (value, 0);
20448
  if (GET_CODE (op) == COND_EXEC)
20449
    op = COND_EXEC_CODE (op);
20450
  if (GET_CODE (op) == PARALLEL)
20451
    op = XVECEXP (op, 0, 0);
20452
  op = XEXP (op, 1);
20453
 
20454
  early_op = XEXP (op, 0);
20455
 
20456
  /* This is either an actual independent shift, or a shift applied to
20457
     the first operand of another operation.  We want the value being
20458
     shifted, in either case.  */
20459
  if (GET_CODE (early_op) != REG)
20460
    early_op = XEXP (early_op, 0);
20461
 
20462
  return !reg_overlap_mentioned_p (value, early_op);
20463
}
20464
 
20465
/* Return nonzero if the CONSUMER (a mul or mac op) does not
20466
   have an early register mult dependency on the result of
20467
   PRODUCER.  */
20468
 
20469
int
20470
arm_no_early_mul_dep (rtx producer, rtx consumer)
20471
{
20472
  rtx value = PATTERN (producer);
20473
  rtx op = PATTERN (consumer);
20474
 
20475
  if (GET_CODE (value) == COND_EXEC)
20476
    value = COND_EXEC_CODE (value);
20477
  if (GET_CODE (value) == PARALLEL)
20478
    value = XVECEXP (value, 0, 0);
20479
  value = XEXP (value, 0);
20480
  if (GET_CODE (op) == COND_EXEC)
20481
    op = COND_EXEC_CODE (op);
20482
  if (GET_CODE (op) == PARALLEL)
20483
    op = XVECEXP (op, 0, 0);
20484
  op = XEXP (op, 1);
20485
 
20486
  if (GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
20487
    {
20488
      if (GET_CODE (XEXP (op, 0)) == MULT)
20489
        return !reg_overlap_mentioned_p (value, XEXP (op, 0));
20490
      else
20491
        return !reg_overlap_mentioned_p (value, XEXP (op, 1));
20492
    }
20493
 
20494
  return 0;
20495
}
20496
 
20497
/* We can't rely on the caller doing the proper promotion when
20498
   using APCS or ATPCS.  */
20499
 
20500
static bool
20501
arm_promote_prototypes (const_tree t ATTRIBUTE_UNUSED)
20502
{
20503
    return !TARGET_AAPCS_BASED;
20504
}
20505
 
20506
static enum machine_mode
20507
arm_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
20508
                           enum machine_mode mode,
20509
                           int *punsignedp ATTRIBUTE_UNUSED,
20510
                           const_tree fntype ATTRIBUTE_UNUSED,
20511
                           int for_return ATTRIBUTE_UNUSED)
20512
{
20513
  if (GET_MODE_CLASS (mode) == MODE_INT
20514
      && GET_MODE_SIZE (mode) < 4)
20515
    return SImode;
20516
 
20517
  return mode;
20518
}
20519
 
20520
/* AAPCS based ABIs use short enums by default.  */
20521
 
20522
static bool
20523
arm_default_short_enums (void)
20524
{
20525
  return TARGET_AAPCS_BASED && arm_abi != ARM_ABI_AAPCS_LINUX;
20526
}
20527
 
20528
 
20529
/* AAPCS requires that anonymous bitfields affect structure alignment.  */
20530
 
20531
static bool
20532
arm_align_anon_bitfield (void)
20533
{
20534
  return TARGET_AAPCS_BASED;
20535
}
20536
 
20537
 
20538
/* The generic C++ ABI says 64-bit (long long).  The EABI says 32-bit.  */
20539
 
20540
static tree
20541
arm_cxx_guard_type (void)
20542
{
20543
  return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
20544
}
20545
 
20546
/* Return non-zero if the consumer (a multiply-accumulate instruction)
20547
   has an accumulator dependency on the result of the producer (a
20548
   multiplication instruction) and no other dependency on that result.  */
20549
int
20550
arm_mac_accumulator_is_mul_result (rtx producer, rtx consumer)
20551
{
20552
  rtx mul = PATTERN (producer);
20553
  rtx mac = PATTERN (consumer);
20554
  rtx mul_result;
20555
  rtx mac_op0, mac_op1, mac_acc;
20556
 
20557
  if (GET_CODE (mul) == COND_EXEC)
20558
    mul = COND_EXEC_CODE (mul);
20559
  if (GET_CODE (mac) == COND_EXEC)
20560
    mac = COND_EXEC_CODE (mac);
20561
 
20562
  /* Check that mul is of the form (set (...) (mult ...))
20563
     and mla is of the form (set (...) (plus (mult ...) (...))).  */
20564
  if ((GET_CODE (mul) != SET || GET_CODE (XEXP (mul, 1)) != MULT)
20565
      || (GET_CODE (mac) != SET || GET_CODE (XEXP (mac, 1)) != PLUS
20566
          || GET_CODE (XEXP (XEXP (mac, 1), 0)) != MULT))
20567
    return 0;
20568
 
20569
  mul_result = XEXP (mul, 0);
20570
  mac_op0 = XEXP (XEXP (XEXP (mac, 1), 0), 0);
20571
  mac_op1 = XEXP (XEXP (XEXP (mac, 1), 0), 1);
20572
  mac_acc = XEXP (XEXP (mac, 1), 1);
20573
 
20574
  return (reg_overlap_mentioned_p (mul_result, mac_acc)
20575
          && !reg_overlap_mentioned_p (mul_result, mac_op0)
20576
          && !reg_overlap_mentioned_p (mul_result, mac_op1));
20577
}
20578
 
20579
 
20580
/* The EABI says test the least significant bit of a guard variable.  */
20581
 
20582
static bool
20583
arm_cxx_guard_mask_bit (void)
20584
{
20585
  return TARGET_AAPCS_BASED;
20586
}
20587
 
20588
 
20589
/* The EABI specifies that all array cookies are 8 bytes long.  */
20590
 
20591
static tree
20592
arm_get_cookie_size (tree type)
20593
{
20594
  tree size;
20595
 
20596
  if (!TARGET_AAPCS_BASED)
20597
    return default_cxx_get_cookie_size (type);
20598
 
20599
  size = build_int_cst (sizetype, 8);
20600
  return size;
20601
}
20602
 
20603
 
20604
/* The EABI says that array cookies should also contain the element size.  */
20605
 
20606
static bool
20607
arm_cookie_has_size (void)
20608
{
20609
  return TARGET_AAPCS_BASED;
20610
}
20611
 
20612
 
20613
/* The EABI says constructors and destructors should return a pointer to
20614
   the object constructed/destroyed.  */
20615
 
20616
static bool
20617
arm_cxx_cdtor_returns_this (void)
20618
{
20619
  return TARGET_AAPCS_BASED;
20620
}
20621
 
20622
/* The EABI says that an inline function may never be the key
20623
   method.  */
20624
 
20625
static bool
20626
arm_cxx_key_method_may_be_inline (void)
20627
{
20628
  return !TARGET_AAPCS_BASED;
20629
}
20630
 
20631
static void
20632
arm_cxx_determine_class_data_visibility (tree decl)
20633
{
20634
  if (!TARGET_AAPCS_BASED
20635
      || !TARGET_DLLIMPORT_DECL_ATTRIBUTES)
20636
    return;
20637
 
20638
  /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
20639
     is exported.  However, on systems without dynamic vague linkage,
20640
     \S 3.2.5.6 says that COMDAT class data has hidden linkage.  */
20641
  if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
20642
    DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
20643
  else
20644
    DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
20645
  DECL_VISIBILITY_SPECIFIED (decl) = 1;
20646
}
20647
 
20648
static bool
20649
arm_cxx_class_data_always_comdat (void)
20650
{
20651
  /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
20652
     vague linkage if the class has no key function.  */
20653
  return !TARGET_AAPCS_BASED;
20654
}
20655
 
20656
 
20657
/* The EABI says __aeabi_atexit should be used to register static
20658
   destructors.  */
20659
 
20660
static bool
20661
arm_cxx_use_aeabi_atexit (void)
20662
{
20663
  return TARGET_AAPCS_BASED;
20664
}
20665
 
20666
 
20667
void
20668
arm_set_return_address (rtx source, rtx scratch)
20669
{
20670
  arm_stack_offsets *offsets;
20671
  HOST_WIDE_INT delta;
20672
  rtx addr;
20673
  unsigned long saved_regs;
20674
 
20675
  offsets = arm_get_frame_offsets ();
20676
  saved_regs = offsets->saved_regs_mask;
20677
 
20678
  if ((saved_regs & (1 << LR_REGNUM)) == 0)
20679
    emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
20680
  else
20681
    {
20682
      if (frame_pointer_needed)
20683
        addr = plus_constant(hard_frame_pointer_rtx, -4);
20684
      else
20685
        {
20686
          /* LR will be the first saved register.  */
20687
          delta = offsets->outgoing_args - (offsets->frame + 4);
20688
 
20689
 
20690
          if (delta >= 4096)
20691
            {
20692
              emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
20693
                                     GEN_INT (delta & ~4095)));
20694
              addr = scratch;
20695
              delta &= 4095;
20696
            }
20697
          else
20698
            addr = stack_pointer_rtx;
20699
 
20700
          addr = plus_constant (addr, delta);
20701
        }
20702
      emit_move_insn (gen_frame_mem (Pmode, addr), source);
20703
    }
20704
}
20705
 
20706
 
20707
void
20708
thumb_set_return_address (rtx source, rtx scratch)
20709
{
20710
  arm_stack_offsets *offsets;
20711
  HOST_WIDE_INT delta;
20712
  HOST_WIDE_INT limit;
20713
  int reg;
20714
  rtx addr;
20715
  unsigned long mask;
20716
 
20717
  emit_use (source);
20718
 
20719
  offsets = arm_get_frame_offsets ();
20720
  mask = offsets->saved_regs_mask;
20721
  if (mask & (1 << LR_REGNUM))
20722
    {
20723
      limit = 1024;
20724
      /* Find the saved regs.  */
20725
      if (frame_pointer_needed)
20726
        {
20727
          delta = offsets->soft_frame - offsets->saved_args;
20728
          reg = THUMB_HARD_FRAME_POINTER_REGNUM;
20729
          if (TARGET_THUMB1)
20730
            limit = 128;
20731
        }
20732
      else
20733
        {
20734
          delta = offsets->outgoing_args - offsets->saved_args;
20735
          reg = SP_REGNUM;
20736
        }
20737
      /* Allow for the stack frame.  */
20738
      if (TARGET_THUMB1 && TARGET_BACKTRACE)
20739
        delta -= 16;
20740
      /* The link register is always the first saved register.  */
20741
      delta -= 4;
20742
 
20743
      /* Construct the address.  */
20744
      addr = gen_rtx_REG (SImode, reg);
20745
      if (delta > limit)
20746
        {
20747
          emit_insn (gen_movsi (scratch, GEN_INT (delta)));
20748
          emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
20749
          addr = scratch;
20750
        }
20751
      else
20752
        addr = plus_constant (addr, delta);
20753
 
20754
      emit_move_insn (gen_frame_mem (Pmode, addr), source);
20755
    }
20756
  else
20757
    emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
20758
}
20759
 
20760
/* Implements target hook vector_mode_supported_p.  */
20761
bool
20762
arm_vector_mode_supported_p (enum machine_mode mode)
20763
{
20764
  /* Neon also supports V2SImode, etc. listed in the clause below.  */
20765
  if (TARGET_NEON && (mode == V2SFmode || mode == V4SImode || mode == V8HImode
20766
      || mode == V16QImode || mode == V4SFmode || mode == V2DImode))
20767
    return true;
20768
 
20769
  if ((TARGET_NEON || TARGET_IWMMXT)
20770
      && ((mode == V2SImode)
20771
          || (mode == V4HImode)
20772
          || (mode == V8QImode)))
20773
    return true;
20774
 
20775
  return false;
20776
}
20777
 
20778
/* Implement TARGET_SHIFT_TRUNCATION_MASK.  SImode shifts use normal
20779
   ARM insns and therefore guarantee that the shift count is modulo 256.
20780
   DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
20781
   guarantee no particular behavior for out-of-range counts.  */
20782
 
20783
static unsigned HOST_WIDE_INT
20784
arm_shift_truncation_mask (enum machine_mode mode)
20785
{
20786
  return mode == SImode ? 255 : 0;
20787
}
20788
 
20789
 
20790
/* Map internal gcc register numbers to DWARF2 register numbers.  */
20791
 
20792
unsigned int
20793
arm_dbx_register_number (unsigned int regno)
20794
{
20795
  if (regno < 16)
20796
    return regno;
20797
 
20798
  /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
20799
     compatibility.  The EABI defines them as registers 96-103.  */
20800
  if (IS_FPA_REGNUM (regno))
20801
    return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
20802
 
20803
  if (IS_VFP_REGNUM (regno))
20804
    {
20805
      /* See comment in arm_dwarf_register_span.  */
20806
      if (VFP_REGNO_OK_FOR_SINGLE (regno))
20807
        return 64 + regno - FIRST_VFP_REGNUM;
20808
      else
20809
        return 256 + (regno - FIRST_VFP_REGNUM) / 2;
20810
    }
20811
 
20812
  if (IS_IWMMXT_GR_REGNUM (regno))
20813
    return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
20814
 
20815
  if (IS_IWMMXT_REGNUM (regno))
20816
    return 112 + regno - FIRST_IWMMXT_REGNUM;
20817
 
20818
  gcc_unreachable ();
20819
}
20820
 
20821
/* Dwarf models VFPv3 registers as 32 64-bit registers.
20822
   GCC models tham as 64 32-bit registers, so we need to describe this to
20823
   the DWARF generation code.  Other registers can use the default.  */
20824
static rtx
20825
arm_dwarf_register_span (rtx rtl)
20826
{
20827
  unsigned regno;
20828
  int nregs;
20829
  int i;
20830
  rtx p;
20831
 
20832
  regno = REGNO (rtl);
20833
  if (!IS_VFP_REGNUM (regno))
20834
    return NULL_RTX;
20835
 
20836
  /* XXX FIXME: The EABI defines two VFP register ranges:
20837
        64-95: Legacy VFPv2 numbering for S0-S31 (obsolescent)
20838
        256-287: D0-D31
20839
     The recommended encoding for S0-S31 is a DW_OP_bit_piece of the
20840
     corresponding D register.  Until GDB supports this, we shall use the
20841
     legacy encodings.  We also use these encodings for D0-D15 for
20842
     compatibility with older debuggers.  */
20843
  if (VFP_REGNO_OK_FOR_SINGLE (regno))
20844
    return NULL_RTX;
20845
 
20846
  nregs = GET_MODE_SIZE (GET_MODE (rtl)) / 8;
20847
  p = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nregs));
20848
  regno = (regno - FIRST_VFP_REGNUM) / 2;
20849
  for (i = 0; i < nregs; i++)
20850
    XVECEXP (p, 0, i) = gen_rtx_REG (DImode, 256 + regno + i);
20851
 
20852
  return p;
20853
}
20854
 
20855
#ifdef TARGET_UNWIND_INFO
20856
/* Emit unwind directives for a store-multiple instruction or stack pointer
20857
   push during alignment.
20858
   These should only ever be generated by the function prologue code, so
20859
   expect them to have a particular form.  */
20860
 
20861
static void
20862
arm_unwind_emit_sequence (FILE * asm_out_file, rtx p)
20863
{
20864
  int i;
20865
  HOST_WIDE_INT offset;
20866
  HOST_WIDE_INT nregs;
20867
  int reg_size;
20868
  unsigned reg;
20869
  unsigned lastreg;
20870
  rtx e;
20871
 
20872
  e = XVECEXP (p, 0, 0);
20873
  if (GET_CODE (e) != SET)
20874
    abort ();
20875
 
20876
  /* First insn will adjust the stack pointer.  */
20877
  if (GET_CODE (e) != SET
20878
      || GET_CODE (XEXP (e, 0)) != REG
20879
      || REGNO (XEXP (e, 0)) != SP_REGNUM
20880
      || GET_CODE (XEXP (e, 1)) != PLUS)
20881
    abort ();
20882
 
20883
  offset = -INTVAL (XEXP (XEXP (e, 1), 1));
20884
  nregs = XVECLEN (p, 0) - 1;
20885
 
20886
  reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
20887
  if (reg < 16)
20888
    {
20889
      /* The function prologue may also push pc, but not annotate it as it is
20890
         never restored.  We turn this into a stack pointer adjustment.  */
20891
      if (nregs * 4 == offset - 4)
20892
        {
20893
          fprintf (asm_out_file, "\t.pad #4\n");
20894
          offset -= 4;
20895
        }
20896
      reg_size = 4;
20897
      fprintf (asm_out_file, "\t.save {");
20898
    }
20899
  else if (IS_VFP_REGNUM (reg))
20900
    {
20901
      reg_size = 8;
20902
      fprintf (asm_out_file, "\t.vsave {");
20903
    }
20904
  else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
20905
    {
20906
      /* FPA registers are done differently.  */
20907
      asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
20908
      return;
20909
    }
20910
  else
20911
    /* Unknown register type.  */
20912
    abort ();
20913
 
20914
  /* If the stack increment doesn't match the size of the saved registers,
20915
     something has gone horribly wrong.  */
20916
  if (offset != nregs * reg_size)
20917
    abort ();
20918
 
20919
  offset = 0;
20920
  lastreg = 0;
20921
  /* The remaining insns will describe the stores.  */
20922
  for (i = 1; i <= nregs; i++)
20923
    {
20924
      /* Expect (set (mem <addr>) (reg)).
20925
         Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)).  */
20926
      e = XVECEXP (p, 0, i);
20927
      if (GET_CODE (e) != SET
20928
          || GET_CODE (XEXP (e, 0)) != MEM
20929
          || GET_CODE (XEXP (e, 1)) != REG)
20930
        abort ();
20931
 
20932
      reg = REGNO (XEXP (e, 1));
20933
      if (reg < lastreg)
20934
        abort ();
20935
 
20936
      if (i != 1)
20937
        fprintf (asm_out_file, ", ");
20938
      /* We can't use %r for vfp because we need to use the
20939
         double precision register names.  */
20940
      if (IS_VFP_REGNUM (reg))
20941
        asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
20942
      else
20943
        asm_fprintf (asm_out_file, "%r", reg);
20944
 
20945
#ifdef ENABLE_CHECKING
20946
      /* Check that the addresses are consecutive.  */
20947
      e = XEXP (XEXP (e, 0), 0);
20948
      if (GET_CODE (e) == PLUS)
20949
        {
20950
          offset += reg_size;
20951
          if (GET_CODE (XEXP (e, 0)) != REG
20952
              || REGNO (XEXP (e, 0)) != SP_REGNUM
20953
              || GET_CODE (XEXP (e, 1)) != CONST_INT
20954
              || offset != INTVAL (XEXP (e, 1)))
20955
            abort ();
20956
        }
20957
      else if (i != 1
20958
               || GET_CODE (e) != REG
20959
               || REGNO (e) != SP_REGNUM)
20960
        abort ();
20961
#endif
20962
    }
20963
  fprintf (asm_out_file, "}\n");
20964
}
20965
 
20966
/*  Emit unwind directives for a SET.  */
20967
 
20968
static void
20969
arm_unwind_emit_set (FILE * asm_out_file, rtx p)
20970
{
20971
  rtx e0;
20972
  rtx e1;
20973
  unsigned reg;
20974
 
20975
  e0 = XEXP (p, 0);
20976
  e1 = XEXP (p, 1);
20977
  switch (GET_CODE (e0))
20978
    {
20979
    case MEM:
20980
      /* Pushing a single register.  */
20981
      if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
20982
          || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
20983
          || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
20984
        abort ();
20985
 
20986
      asm_fprintf (asm_out_file, "\t.save ");
20987
      if (IS_VFP_REGNUM (REGNO (e1)))
20988
        asm_fprintf(asm_out_file, "{d%d}\n",
20989
                    (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
20990
      else
20991
        asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
20992
      break;
20993
 
20994
    case REG:
20995
      if (REGNO (e0) == SP_REGNUM)
20996
        {
20997
          /* A stack increment.  */
20998
          if (GET_CODE (e1) != PLUS
20999
              || GET_CODE (XEXP (e1, 0)) != REG
21000
              || REGNO (XEXP (e1, 0)) != SP_REGNUM
21001
              || GET_CODE (XEXP (e1, 1)) != CONST_INT)
21002
            abort ();
21003
 
21004
          asm_fprintf (asm_out_file, "\t.pad #%wd\n",
21005
                       -INTVAL (XEXP (e1, 1)));
21006
        }
21007
      else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
21008
        {
21009
          HOST_WIDE_INT offset;
21010
 
21011
          if (GET_CODE (e1) == PLUS)
21012
            {
21013
              if (GET_CODE (XEXP (e1, 0)) != REG
21014
                  || GET_CODE (XEXP (e1, 1)) != CONST_INT)
21015
                abort ();
21016
              reg = REGNO (XEXP (e1, 0));
21017
              offset = INTVAL (XEXP (e1, 1));
21018
              asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
21019
                           HARD_FRAME_POINTER_REGNUM, reg,
21020
                           INTVAL (XEXP (e1, 1)));
21021
            }
21022
          else if (GET_CODE (e1) == REG)
21023
            {
21024
              reg = REGNO (e1);
21025
              asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
21026
                           HARD_FRAME_POINTER_REGNUM, reg);
21027
            }
21028
          else
21029
            abort ();
21030
        }
21031
      else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
21032
        {
21033
          /* Move from sp to reg.  */
21034
          asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
21035
        }
21036
     else if (GET_CODE (e1) == PLUS
21037
              && GET_CODE (XEXP (e1, 0)) == REG
21038
              && REGNO (XEXP (e1, 0)) == SP_REGNUM
21039
              && GET_CODE (XEXP (e1, 1)) == CONST_INT)
21040
        {
21041
          /* Set reg to offset from sp.  */
21042
          asm_fprintf (asm_out_file, "\t.movsp %r, #%d\n",
21043
                       REGNO (e0), (int)INTVAL(XEXP (e1, 1)));
21044
        }
21045
      else if (GET_CODE (e1) == UNSPEC && XINT (e1, 1) == UNSPEC_STACK_ALIGN)
21046
        {
21047
          /* Stack pointer save before alignment.  */
21048
          reg = REGNO (e0);
21049
          asm_fprintf (asm_out_file, "\t.unwind_raw 0, 0x%x @ vsp = r%d\n",
21050
                       reg + 0x90, reg);
21051
        }
21052
      else
21053
        abort ();
21054
      break;
21055
 
21056
    default:
21057
      abort ();
21058
    }
21059
}
21060
 
21061
 
21062
/* Emit unwind directives for the given insn.  */
21063
 
21064
static void
21065
arm_unwind_emit (FILE * asm_out_file, rtx insn)
21066
{
21067
  rtx pat;
21068
 
21069
  if (!ARM_EABI_UNWIND_TABLES)
21070
    return;
21071
 
21072
  if (!(flag_unwind_tables || crtl->uses_eh_lsda)
21073
      && (TREE_NOTHROW (current_function_decl)
21074
          || crtl->all_throwers_are_sibcalls))
21075
    return;
21076
 
21077
  if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
21078
    return;
21079
 
21080
  pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
21081
  if (pat)
21082
    pat = XEXP (pat, 0);
21083
  else
21084
    pat = PATTERN (insn);
21085
 
21086
  switch (GET_CODE (pat))
21087
    {
21088
    case SET:
21089
      arm_unwind_emit_set (asm_out_file, pat);
21090
      break;
21091
 
21092
    case SEQUENCE:
21093
      /* Store multiple.  */
21094
      arm_unwind_emit_sequence (asm_out_file, pat);
21095
      break;
21096
 
21097
    default:
21098
      abort();
21099
    }
21100
}
21101
 
21102
 
21103
/* Output a reference from a function exception table to the type_info
21104
   object X.  The EABI specifies that the symbol should be relocated by
21105
   an R_ARM_TARGET2 relocation.  */
21106
 
21107
static bool
21108
arm_output_ttype (rtx x)
21109
{
21110
  fputs ("\t.word\t", asm_out_file);
21111
  output_addr_const (asm_out_file, x);
21112
  /* Use special relocations for symbol references.  */
21113
  if (GET_CODE (x) != CONST_INT)
21114
    fputs ("(TARGET2)", asm_out_file);
21115
  fputc ('\n', asm_out_file);
21116
 
21117
  return TRUE;
21118
}
21119
#endif /* TARGET_UNWIND_INFO */
21120
 
21121
 
21122
/* Handle UNSPEC DWARF call frame instructions.  These are needed for dynamic
21123
   stack alignment.  */
21124
 
21125
static void
21126
arm_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
21127
{
21128
  rtx unspec = SET_SRC (pattern);
21129
  gcc_assert (GET_CODE (unspec) == UNSPEC);
21130
 
21131
  switch (index)
21132
    {
21133
    case UNSPEC_STACK_ALIGN:
21134
      /* ??? We should set the CFA = (SP & ~7).  At this point we haven't
21135
         put anything on the stack, so hopefully it won't matter.
21136
         CFA = SP will be correct after alignment.  */
21137
      dwarf2out_reg_save_reg (label, stack_pointer_rtx,
21138
                              SET_DEST (pattern));
21139
      break;
21140
    default:
21141
      gcc_unreachable ();
21142
    }
21143
}
21144
 
21145
 
21146
/* Output unwind directives for the start/end of a function.  */
21147
 
21148
void
21149
arm_output_fn_unwind (FILE * f, bool prologue)
21150
{
21151
  if (!ARM_EABI_UNWIND_TABLES)
21152
    return;
21153
 
21154
  if (prologue)
21155
    fputs ("\t.fnstart\n", f);
21156
  else
21157
    {
21158
      /* If this function will never be unwound, then mark it as such.
21159
         The came condition is used in arm_unwind_emit to suppress
21160
         the frame annotations.  */
21161
      if (!(flag_unwind_tables || crtl->uses_eh_lsda)
21162
          && (TREE_NOTHROW (current_function_decl)
21163
              || crtl->all_throwers_are_sibcalls))
21164
        fputs("\t.cantunwind\n", f);
21165
 
21166
      fputs ("\t.fnend\n", f);
21167
    }
21168
}
21169
 
21170
static bool
21171
arm_emit_tls_decoration (FILE *fp, rtx x)
21172
{
21173
  enum tls_reloc reloc;
21174
  rtx val;
21175
 
21176
  val = XVECEXP (x, 0, 0);
21177
  reloc = (enum tls_reloc) INTVAL (XVECEXP (x, 0, 1));
21178
 
21179
  output_addr_const (fp, val);
21180
 
21181
  switch (reloc)
21182
    {
21183
    case TLS_GD32:
21184
      fputs ("(tlsgd)", fp);
21185
      break;
21186
    case TLS_LDM32:
21187
      fputs ("(tlsldm)", fp);
21188
      break;
21189
    case TLS_LDO32:
21190
      fputs ("(tlsldo)", fp);
21191
      break;
21192
    case TLS_IE32:
21193
      fputs ("(gottpoff)", fp);
21194
      break;
21195
    case TLS_LE32:
21196
      fputs ("(tpoff)", fp);
21197
      break;
21198
    default:
21199
      gcc_unreachable ();
21200
    }
21201
 
21202
  switch (reloc)
21203
    {
21204
    case TLS_GD32:
21205
    case TLS_LDM32:
21206
    case TLS_IE32:
21207
      fputs (" + (. - ", fp);
21208
      output_addr_const (fp, XVECEXP (x, 0, 2));
21209
      fputs (" - ", fp);
21210
      output_addr_const (fp, XVECEXP (x, 0, 3));
21211
      fputc (')', fp);
21212
      break;
21213
    default:
21214
      break;
21215
    }
21216
 
21217
  return TRUE;
21218
}
21219
 
21220
/* ARM implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL.  */
21221
 
21222
static void
21223
arm_output_dwarf_dtprel (FILE *file, int size, rtx x)
21224
{
21225
  gcc_assert (size == 4);
21226
  fputs ("\t.word\t", file);
21227
  output_addr_const (file, x);
21228
  fputs ("(tlsldo)", file);
21229
}
21230
 
21231
bool
21232
arm_output_addr_const_extra (FILE *fp, rtx x)
21233
{
21234
  if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
21235
    return arm_emit_tls_decoration (fp, x);
21236
  else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PIC_LABEL)
21237
    {
21238
      char label[256];
21239
      int labelno = INTVAL (XVECEXP (x, 0, 0));
21240
 
21241
      ASM_GENERATE_INTERNAL_LABEL (label, "LPIC", labelno);
21242
      assemble_name_raw (fp, label);
21243
 
21244
      return TRUE;
21245
    }
21246
  else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_GOTSYM_OFF)
21247
    {
21248
      assemble_name (fp, "_GLOBAL_OFFSET_TABLE_");
21249
      if (GOT_PCREL)
21250
        fputs ("+.", fp);
21251
      fputs ("-(", fp);
21252
      output_addr_const (fp, XVECEXP (x, 0, 0));
21253
      fputc (')', fp);
21254
      return TRUE;
21255
    }
21256
  else if (GET_CODE (x) == CONST_VECTOR)
21257
    return arm_emit_vector_const (fp, x);
21258
 
21259
  return FALSE;
21260
}
21261
 
21262
/* Output assembly for a shift instruction.
21263
   SET_FLAGS determines how the instruction modifies the condition codes.
21264
 
21265
   1 - Set condition codes.
21266
   2 - Use smallest instruction.  */
21267
const char *
21268
arm_output_shift(rtx * operands, int set_flags)
21269
{
21270
  char pattern[100];
21271
  static const char flag_chars[3] = {'?', '.', '!'};
21272
  const char *shift;
21273
  HOST_WIDE_INT val;
21274
  char c;
21275
 
21276
  c = flag_chars[set_flags];
21277
  if (TARGET_UNIFIED_ASM)
21278
    {
21279
      shift = shift_op(operands[3], &val);
21280
      if (shift)
21281
        {
21282
          if (val != -1)
21283
            operands[2] = GEN_INT(val);
21284
          sprintf (pattern, "%s%%%c\t%%0, %%1, %%2", shift, c);
21285
        }
21286
      else
21287
        sprintf (pattern, "mov%%%c\t%%0, %%1", c);
21288
    }
21289
  else
21290
    sprintf (pattern, "mov%%%c\t%%0, %%1%%S3", c);
21291
  output_asm_insn (pattern, operands);
21292
  return "";
21293
}
21294
 
21295
/* Output a Thumb-1 casesi dispatch sequence.  */
21296
const char *
21297
thumb1_output_casesi (rtx *operands)
21298
{
21299
  rtx diff_vec = PATTERN (next_real_insn (operands[0]));
21300
  addr_diff_vec_flags flags;
21301
 
21302
  gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
21303
 
21304
  flags = ADDR_DIFF_VEC_FLAGS (diff_vec);
21305
 
21306
  switch (GET_MODE(diff_vec))
21307
    {
21308
    case QImode:
21309
      return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned ?
21310
              "bl\t%___gnu_thumb1_case_uqi" : "bl\t%___gnu_thumb1_case_sqi");
21311
    case HImode:
21312
      return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned ?
21313
              "bl\t%___gnu_thumb1_case_uhi" : "bl\t%___gnu_thumb1_case_shi");
21314
    case SImode:
21315
      return "bl\t%___gnu_thumb1_case_si";
21316
    default:
21317
      gcc_unreachable ();
21318
    }
21319
}
21320
 
21321
/* Output a Thumb-2 casesi instruction.  */
21322
const char *
21323
thumb2_output_casesi (rtx *operands)
21324
{
21325
  rtx diff_vec = PATTERN (next_real_insn (operands[2]));
21326
 
21327
  gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
21328
 
21329
  output_asm_insn ("cmp\t%0, %1", operands);
21330
  output_asm_insn ("bhi\t%l3", operands);
21331
  switch (GET_MODE(diff_vec))
21332
    {
21333
    case QImode:
21334
      return "tbb\t[%|pc, %0]";
21335
    case HImode:
21336
      return "tbh\t[%|pc, %0, lsl #1]";
21337
    case SImode:
21338
      if (flag_pic)
21339
        {
21340
          output_asm_insn ("adr\t%4, %l2", operands);
21341
          output_asm_insn ("ldr\t%5, [%4, %0, lsl #2]", operands);
21342
          output_asm_insn ("add\t%4, %4, %5", operands);
21343
          return "bx\t%4";
21344
        }
21345
      else
21346
        {
21347
          output_asm_insn ("adr\t%4, %l2", operands);
21348
          return "ldr\t%|pc, [%4, %0, lsl #2]";
21349
        }
21350
    default:
21351
      gcc_unreachable ();
21352
    }
21353
}
21354
 
21355
/* Most ARM cores are single issue, but some newer ones can dual issue.
21356
   The scheduler descriptions rely on this being correct.  */
21357
static int
21358
arm_issue_rate (void)
21359
{
21360
  switch (arm_tune)
21361
    {
21362
    case cortexr4:
21363
    case cortexr4f:
21364
    case cortexa8:
21365
    case cortexa9:
21366
      return 2;
21367
 
21368
    default:
21369
      return 1;
21370
    }
21371
}
21372
 
21373
/* A table and a function to perform ARM-specific name mangling for
21374
   NEON vector types in order to conform to the AAPCS (see "Procedure
21375
   Call Standard for the ARM Architecture", Appendix A).  To qualify
21376
   for emission with the mangled names defined in that document, a
21377
   vector type must not only be of the correct mode but also be
21378
   composed of NEON vector element types (e.g. __builtin_neon_qi).  */
21379
typedef struct
21380
{
21381
  enum machine_mode mode;
21382
  const char *element_type_name;
21383
  const char *aapcs_name;
21384
} arm_mangle_map_entry;
21385
 
21386
static arm_mangle_map_entry arm_mangle_map[] = {
21387
  /* 64-bit containerized types.  */
21388
  { V8QImode,  "__builtin_neon_qi",     "15__simd64_int8_t" },
21389
  { V8QImode,  "__builtin_neon_uqi",    "16__simd64_uint8_t" },
21390
  { V4HImode,  "__builtin_neon_hi",     "16__simd64_int16_t" },
21391
  { V4HImode,  "__builtin_neon_uhi",    "17__simd64_uint16_t" },
21392
  { V2SImode,  "__builtin_neon_si",     "16__simd64_int32_t" },
21393
  { V2SImode,  "__builtin_neon_usi",    "17__simd64_uint32_t" },
21394
  { V2SFmode,  "__builtin_neon_sf",     "18__simd64_float32_t" },
21395
  { V8QImode,  "__builtin_neon_poly8",  "16__simd64_poly8_t" },
21396
  { V4HImode,  "__builtin_neon_poly16", "17__simd64_poly16_t" },
21397
  /* 128-bit containerized types.  */
21398
  { V16QImode, "__builtin_neon_qi",     "16__simd128_int8_t" },
21399
  { V16QImode, "__builtin_neon_uqi",    "17__simd128_uint8_t" },
21400
  { V8HImode,  "__builtin_neon_hi",     "17__simd128_int16_t" },
21401
  { V8HImode,  "__builtin_neon_uhi",    "18__simd128_uint16_t" },
21402
  { V4SImode,  "__builtin_neon_si",     "17__simd128_int32_t" },
21403
  { V4SImode,  "__builtin_neon_usi",    "18__simd128_uint32_t" },
21404
  { V4SFmode,  "__builtin_neon_sf",     "19__simd128_float32_t" },
21405
  { V16QImode, "__builtin_neon_poly8",  "17__simd128_poly8_t" },
21406
  { V8HImode,  "__builtin_neon_poly16", "18__simd128_poly16_t" },
21407
  { VOIDmode, NULL, NULL }
21408
};
21409
 
21410
const char *
21411
arm_mangle_type (const_tree type)
21412
{
21413
  arm_mangle_map_entry *pos = arm_mangle_map;
21414
 
21415
  /* The ARM ABI documents (10th October 2008) say that "__va_list"
21416
     has to be managled as if it is in the "std" namespace.  */
21417
  if (TARGET_AAPCS_BASED
21418
      && lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type))
21419
    {
21420
      static bool warned;
21421
      if (!warned && warn_psabi && !in_system_header)
21422
        {
21423
          warned = true;
21424
          inform (input_location,
21425
                  "the mangling of %<va_list%> has changed in GCC 4.4");
21426
        }
21427
      return "St9__va_list";
21428
    }
21429
 
21430
  /* Half-precision float.  */
21431
  if (TREE_CODE (type) == REAL_TYPE && TYPE_PRECISION (type) == 16)
21432
    return "Dh";
21433
 
21434
  if (TREE_CODE (type) != VECTOR_TYPE)
21435
    return NULL;
21436
 
21437
  /* Check the mode of the vector type, and the name of the vector
21438
     element type, against the table.  */
21439
  while (pos->mode != VOIDmode)
21440
    {
21441
      tree elt_type = TREE_TYPE (type);
21442
 
21443
      if (pos->mode == TYPE_MODE (type)
21444
          && TREE_CODE (TYPE_NAME (elt_type)) == TYPE_DECL
21445
          && !strcmp (IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (elt_type))),
21446
                      pos->element_type_name))
21447
        return pos->aapcs_name;
21448
 
21449
      pos++;
21450
    }
21451
 
21452
  /* Use the default mangling for unrecognized (possibly user-defined)
21453
     vector types.  */
21454
  return NULL;
21455
}
21456
 
21457
/* Order of allocation of core registers for Thumb: this allocation is
21458
   written over the corresponding initial entries of the array
21459
   initialized with REG_ALLOC_ORDER.  We allocate all low registers
21460
   first.  Saving and restoring a low register is usually cheaper than
21461
   using a call-clobbered high register.  */
21462
 
21463
static const int thumb_core_reg_alloc_order[] =
21464
{
21465
   3,  2,  1,  0,  4,  5,  6,  7,
21466
  14, 12,  8,  9, 10, 11, 13, 15
21467
};
21468
 
21469
/* Adjust register allocation order when compiling for Thumb.  */
21470
 
21471
void
21472
arm_order_regs_for_local_alloc (void)
21473
{
21474
  const int arm_reg_alloc_order[] = REG_ALLOC_ORDER;
21475
  memcpy(reg_alloc_order, arm_reg_alloc_order, sizeof (reg_alloc_order));
21476
  if (TARGET_THUMB)
21477
    memcpy (reg_alloc_order, thumb_core_reg_alloc_order,
21478
            sizeof (thumb_core_reg_alloc_order));
21479
}
21480
 
21481
/* Set default optimization options.  */
21482
void
21483
arm_optimization_options (int level, int size ATTRIBUTE_UNUSED)
21484
{
21485
  /* Enable section anchors by default at -O1 or higher.
21486
     Use 2 to distinguish from an explicit -fsection-anchors
21487
     given on the command line.  */
21488
  if (level > 0)
21489
    flag_section_anchors = 2;
21490
}
21491
 
21492
/* Implement TARGET_FRAME_POINTER_REQUIRED.  */
21493
 
21494
bool
21495
arm_frame_pointer_required (void)
21496
{
21497
  return (cfun->has_nonlocal_label
21498
          || SUBTARGET_FRAME_POINTER_REQUIRED
21499
          || (TARGET_ARM && TARGET_APCS_FRAME && ! leaf_function_p ()));
21500
}
21501
 
21502
/* Only thumb1 can't support conditional execution, so return true if
21503
   the target is not thumb1.  */
21504
static bool
21505
arm_have_conditional_execution (void)
21506
{
21507
  return !TARGET_THUMB1;
21508
}
21509
 
21510
#include "gt-arm.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.