OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.5.1/] [gcc/] [config/] [sh/] [sh.c] - Blame information for rev 294

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 282 jeremybenn
/* Output routines for GCC for Renesas / SuperH SH.
2
   Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3
   2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4
   Free Software Foundation, Inc.
5
   Contributed by Steve Chamberlain (sac@cygnus.com).
6
   Improved by Jim Wilson (wilson@cygnus.com).
7
 
8
This file is part of GCC.
9
 
10
GCC is free software; you can redistribute it and/or modify
11
it under the terms of the GNU General Public License as published by
12
the Free Software Foundation; either version 3, or (at your option)
13
any later version.
14
 
15
GCC is distributed in the hope that it will be useful,
16
but WITHOUT ANY WARRANTY; without even the implied warranty of
17
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18
GNU General Public License for more details.
19
 
20
You should have received a copy of the GNU General Public License
21
along with GCC; see the file COPYING3.  If not see
22
<http://www.gnu.org/licenses/>.  */
23
 
24
#include "config.h"
25
#include "system.h"
26
#include "coretypes.h"
27
#include "tm.h"
28
#include "insn-config.h"
29
#include "rtl.h"
30
#include "tree.h"
31
#include "flags.h"
32
#include "expr.h"
33
#include "optabs.h"
34
#include "function.h"
35
#include "regs.h"
36
#include "hard-reg-set.h"
37
#include "output.h"
38
#include "insn-attr.h"
39
#include "toplev.h"
40
#include "recog.h"
41
#include "integrate.h"
42
#include "dwarf2.h"
43
#include "tm_p.h"
44
#include "target.h"
45
#include "target-def.h"
46
#include "real.h"
47
#include "langhooks.h"
48
#include "basic-block.h"
49
#include "df.h"
50
#include "cfglayout.h"
51
#include "intl.h"
52
#include "sched-int.h"
53
#include "params.h"
54
#include "ggc.h"
55
#include "gimple.h"
56
#include "cfgloop.h"
57
#include "alloc-pool.h"
58
#include "tm-constrs.h"
59
 
60
 
61
int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
62
 
63
#define MSW (TARGET_LITTLE_ENDIAN ? 1 : 0)
64
#define LSW (TARGET_LITTLE_ENDIAN ? 0 : 1)
65
 
66
/* These are some macros to abstract register modes.  */
67
#define CONST_OK_FOR_ADD(size) \
68
  (TARGET_SHMEDIA ? CONST_OK_FOR_I10 (size) : CONST_OK_FOR_I08 (size))
69
#define GEN_MOV (*(TARGET_SHMEDIA64 ? gen_movdi : gen_movsi))
70
#define GEN_ADD3 (*(TARGET_SHMEDIA64 ? gen_adddi3 : gen_addsi3))
71
#define GEN_SUB3 (*(TARGET_SHMEDIA64 ? gen_subdi3 : gen_subsi3))
72
 
73
/* Used to simplify the logic below.  Find the attributes wherever
74
   they may be.  */
75
#define SH_ATTRIBUTES(decl) \
76
  (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
77
                  : DECL_ATTRIBUTES (decl) \
78
                  ? (DECL_ATTRIBUTES (decl)) \
79
                  : TYPE_ATTRIBUTES (TREE_TYPE (decl))
80
 
81
/* Set to 1 by expand_prologue() when the function is an interrupt handler.  */
82
int current_function_interrupt;
83
 
84
tree sh_deferred_function_attributes;
85
tree *sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
86
 
87
/* Global variables for machine-dependent things.  */
88
 
89
/* Which cpu are we scheduling for.  */
90
enum processor_type sh_cpu;
91
 
92
/* Definitions used in ready queue reordering for first scheduling pass.  */
93
 
94
/* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID.  */
95
static short *regmode_weight[2];
96
 
97
/* Total SFmode and SImode weights of scheduled insns.  */
98
static int curr_regmode_pressure[2];
99
 
100
/* Number of r0 life regions.  */
101
static int r0_life_regions;
102
 
103
/* If true, skip cycles for Q -> R movement.  */
104
static int skip_cycles = 0;
105
 
106
/* Cached value of can_issue_more. This is cached in sh_variable_issue hook
107
   and returned from sh_reorder2.  */
108
static short cached_can_issue_more;
109
 
110
/* Unique number for UNSPEC_BBR pattern.  */
111
static unsigned int unspec_bbr_uid = 1;
112
 
113
/* Provides the class number of the smallest class containing
114
   reg number.  */
115
 
116
enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
117
{
118
  R0_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
119
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
120
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
121
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
122
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
123
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
124
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
125
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
126
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
127
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
128
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
129
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
130
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
131
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
132
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
133
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
134
  FP0_REGS,FP_REGS, FP_REGS, FP_REGS,
135
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
136
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
137
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
138
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
139
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
140
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
141
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
142
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
143
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
144
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
145
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
146
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
147
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
148
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
149
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
150
  TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
151
  TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
152
  DF_REGS, DF_REGS, DF_REGS, DF_REGS,
153
  DF_REGS, DF_REGS, DF_REGS, DF_REGS,
154
  NO_REGS, GENERAL_REGS, PR_REGS, T_REGS,
155
  MAC_REGS, MAC_REGS, FPUL_REGS, FPSCR_REGS,
156
  GENERAL_REGS, GENERAL_REGS,
157
};
158
 
159
char sh_register_names[FIRST_PSEUDO_REGISTER] \
160
  [MAX_REGISTER_NAME_LENGTH + 1] = SH_REGISTER_NAMES_INITIALIZER;
161
 
162
char sh_additional_register_names[ADDREGNAMES_SIZE] \
163
  [MAX_ADDITIONAL_REGISTER_NAME_LENGTH + 1]
164
  = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER;
165
 
166
int assembler_dialect;
167
 
168
static bool shmedia_space_reserved_for_target_registers;
169
 
170
static bool sh_handle_option (size_t, const char *, int);
171
static void split_branches (rtx);
172
static int branch_dest (rtx);
173
static void force_into (rtx, rtx);
174
static void print_slot (rtx);
175
static rtx add_constant (rtx, enum machine_mode, rtx);
176
static void dump_table (rtx, rtx);
177
static int hi_const (rtx);
178
static int broken_move (rtx);
179
static int mova_p (rtx);
180
static rtx find_barrier (int, rtx, rtx);
181
static int noncall_uses_reg (rtx, rtx, rtx *);
182
static rtx gen_block_redirect (rtx, int, int);
183
static void sh_reorg (void);
184
static void output_stack_adjust (int, rtx, int, HARD_REG_SET *, bool);
185
static rtx frame_insn (rtx);
186
static rtx push (int);
187
static void pop (int);
188
static void push_regs (HARD_REG_SET *, int);
189
static int calc_live_regs (HARD_REG_SET *);
190
static HOST_WIDE_INT rounded_frame_size (int);
191
static rtx mark_constant_pool_use (rtx);
192
static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree, int, bool *);
193
static tree sh_handle_resbank_handler_attribute (tree *, tree,
194
                                                 tree, int, bool *);
195
static tree sh2a_handle_function_vector_handler_attribute (tree *, tree,
196
                                                           tree, int, bool *);
197
static tree sh_handle_sp_switch_attribute (tree *, tree, tree, int, bool *);
198
static tree sh_handle_trap_exit_attribute (tree *, tree, tree, int, bool *);
199
static tree sh_handle_renesas_attribute (tree *, tree, tree, int, bool *);
200
static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT);
201
static void sh_insert_attributes (tree, tree *);
202
static const char *sh_check_pch_target_flags (int);
203
static int sh_adjust_cost (rtx, rtx, rtx, int);
204
static int sh_issue_rate (void);
205
static int sh_dfa_new_cycle (FILE *, int, rtx, int, int, int *sort_p);
206
static short find_set_regmode_weight (rtx, enum machine_mode);
207
static short find_insn_regmode_weight (rtx, enum machine_mode);
208
static void find_regmode_weight (basic_block, enum machine_mode);
209
static int find_r0_life_regions (basic_block);
210
static void  sh_md_init_global (FILE *, int, int);
211
static void  sh_md_finish_global (FILE *, int);
212
static int rank_for_reorder (const void *, const void *);
213
static void swap_reorder (rtx *, int);
214
static void ready_reorder (rtx *, int);
215
static short high_pressure (enum machine_mode);
216
static int sh_reorder (FILE *, int, rtx *, int *, int);
217
static int sh_reorder2 (FILE *, int, rtx *, int *, int);
218
static void sh_md_init (FILE *, int, int);
219
static int sh_variable_issue (FILE *, int, rtx, int);
220
 
221
static bool sh_function_ok_for_sibcall (tree, tree);
222
 
223
static bool sh_cannot_modify_jumps_p (void);
224
static enum reg_class sh_target_reg_class (void);
225
static bool sh_optimize_target_register_callee_saved (bool);
226
static bool sh_ms_bitfield_layout_p (const_tree);
227
 
228
static void sh_init_builtins (void);
229
static tree sh_builtin_decl (unsigned, bool);
230
static void sh_media_init_builtins (void);
231
static tree sh_media_builtin_decl (unsigned, bool);
232
static rtx sh_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
233
static void sh_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
234
static void sh_file_start (void);
235
static int flow_dependent_p (rtx, rtx);
236
static void flow_dependent_p_1 (rtx, const_rtx, void *);
237
static int shiftcosts (rtx);
238
static int andcosts (rtx);
239
static int addsubcosts (rtx);
240
static int multcosts (rtx);
241
static bool unspec_caller_rtx_p (rtx);
242
static bool sh_cannot_copy_insn_p (rtx);
243
static bool sh_rtx_costs (rtx, int, int, int *, bool);
244
static int sh_address_cost (rtx, bool);
245
static int sh_pr_n_sets (void);
246
static rtx sh_allocate_initial_value (rtx);
247
static bool sh_legitimate_address_p (enum machine_mode, rtx, bool);
248
static rtx sh_legitimize_address (rtx, rtx, enum machine_mode);
249
static int shmedia_target_regs_stack_space (HARD_REG_SET *);
250
static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
251
static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
252
static int scavenge_reg (HARD_REG_SET *s);
253
struct save_schedule_s;
254
static struct save_entry_s *sh5_schedule_saves (HARD_REG_SET *,
255
                                                struct save_schedule_s *, int);
256
 
257
static rtx sh_struct_value_rtx (tree, int);
258
static rtx sh_function_value (const_tree, const_tree, bool);
259
static rtx sh_libcall_value (enum machine_mode, const_rtx);
260
static bool sh_return_in_memory (const_tree, const_tree);
261
static rtx sh_builtin_saveregs (void);
262
static void sh_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
263
static bool sh_strict_argument_naming (CUMULATIVE_ARGS *);
264
static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *);
265
static tree sh_build_builtin_va_list (void);
266
static void sh_va_start (tree, rtx);
267
static tree sh_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
268
static bool sh_promote_prototypes (const_tree);
269
static enum machine_mode sh_promote_function_mode (const_tree type,
270
                                                   enum machine_mode,
271
                                                   int *punsignedp,
272
                                                   const_tree funtype,
273
                                                   int for_return);
274
static bool sh_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
275
                                  const_tree, bool);
276
static bool sh_callee_copies (CUMULATIVE_ARGS *, enum machine_mode,
277
                              const_tree, bool);
278
static int sh_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
279
                                 tree, bool);
280
static bool sh_scalar_mode_supported_p (enum machine_mode);
281
static int sh_dwarf_calling_convention (const_tree);
282
static void sh_encode_section_info (tree, rtx, int);
283
static int sh2a_function_vector_p (tree);
284
static void sh_trampoline_init (rtx, tree, rtx);
285
static rtx sh_trampoline_adjust_address (rtx);
286
 
287
static const struct attribute_spec sh_attribute_table[] =
288
{
289
  /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
290
  { "interrupt_handler", 0, 0, true,  false, false, sh_handle_interrupt_handler_attribute },
291
  { "sp_switch",         1, 1, true,  false, false, sh_handle_sp_switch_attribute },
292
  { "trap_exit",         1, 1, true,  false, false, sh_handle_trap_exit_attribute },
293
  { "renesas",           0, 0, false, true, false, sh_handle_renesas_attribute },
294
  { "trapa_handler",     0, 0, true,  false, false, sh_handle_interrupt_handler_attribute },
295
  { "nosave_low_regs",   0, 0, true,  false, false, sh_handle_interrupt_handler_attribute },
296
  { "resbank",           0, 0, true,  false, false, sh_handle_resbank_handler_attribute },
297
  { "function_vector",   1, 1, true,  false, false, sh2a_handle_function_vector_handler_attribute },
298
#ifdef SYMBIAN
299
  /* Symbian support adds three new attributes:
300
     dllexport - for exporting a function/variable that will live in a dll
301
     dllimport - for importing a function/variable from a dll
302
 
303
     Microsoft allows multiple declspecs in one __declspec, separating
304
     them with spaces.  We do NOT support this.  Instead, use __declspec
305
     multiple times.  */
306
  { "dllimport",         0, 0, true,  false, false, sh_symbian_handle_dll_attribute },
307
  { "dllexport",         0, 0, true,  false, false, sh_symbian_handle_dll_attribute },
308
#endif
309
  { NULL,                0, 0, false, false, false, NULL }
310
};
311
 
312
/* Initialize the GCC target structure.  */
313
#undef TARGET_ATTRIBUTE_TABLE
314
#define TARGET_ATTRIBUTE_TABLE sh_attribute_table
315
 
316
/* The next two are used for debug info when compiling with -gdwarf.  */
317
#undef TARGET_ASM_UNALIGNED_HI_OP
318
#define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t"
319
#undef TARGET_ASM_UNALIGNED_SI_OP
320
#define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
321
 
322
/* These are NULLed out on non-SH5 in OVERRIDE_OPTIONS.  */
323
#undef TARGET_ASM_UNALIGNED_DI_OP
324
#define TARGET_ASM_UNALIGNED_DI_OP "\t.uaquad\t"
325
#undef TARGET_ASM_ALIGNED_DI_OP
326
#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
327
 
328
#undef TARGET_ASM_FUNCTION_EPILOGUE
329
#define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
330
 
331
#undef TARGET_ASM_OUTPUT_MI_THUNK
332
#define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk
333
 
334
#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
335
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
336
 
337
#undef TARGET_ASM_FILE_START
338
#define TARGET_ASM_FILE_START sh_file_start
339
#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
340
#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
341
 
342
#undef TARGET_DEFAULT_TARGET_FLAGS
343
#define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
344
#undef TARGET_HANDLE_OPTION
345
#define TARGET_HANDLE_OPTION sh_handle_option
346
 
347
#undef TARGET_INSERT_ATTRIBUTES
348
#define TARGET_INSERT_ATTRIBUTES sh_insert_attributes
349
 
350
#undef TARGET_SCHED_ADJUST_COST
351
#define TARGET_SCHED_ADJUST_COST sh_adjust_cost
352
 
353
#undef TARGET_SCHED_ISSUE_RATE
354
#define TARGET_SCHED_ISSUE_RATE sh_issue_rate
355
 
356
/* The next 5 hooks have been implemented for reenabling sched1.  With the
357
   help of these macros we are limiting the movement of insns in sched1 to
358
   reduce the register pressure.  The overall idea is to keep count of SImode
359
   and SFmode regs required by already scheduled insns. When these counts
360
   cross some threshold values; give priority to insns that free registers.
361
   The insn that frees registers is most likely to be the insn with lowest
362
   LUID (original insn order); but such an insn might be there in the stalled
363
   queue (Q) instead of the ready queue (R).  To solve this, we skip cycles
364
   upto a max of 8 cycles so that such insns may move from Q -> R.
365
 
366
   The description of the hooks are as below:
367
 
368
   TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
369
   scheduler; it is called inside the sched_init function just after
370
   find_insn_reg_weights function call. It is used to calculate the SImode
371
   and SFmode weights of insns of basic blocks; much similar to what
372
   find_insn_reg_weights does.
373
   TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
374
 
375
   TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
376
   indicated by TARGET_SCHED_REORDER2; doing this may move insns from
377
   (Q)->(R).
378
 
379
   TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
380
   high; reorder the ready queue so that the insn with lowest LUID will be
381
   issued next.
382
 
383
   TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
384
   TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
385
 
386
   TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
387
   can be returned from TARGET_SCHED_REORDER2.
388
 
389
   TARGET_SCHED_INIT: Reset the register pressure counting variables.  */
390
 
391
#undef TARGET_SCHED_DFA_NEW_CYCLE
392
#define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
393
 
394
#undef TARGET_SCHED_INIT_GLOBAL
395
#define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
396
 
397
#undef TARGET_SCHED_FINISH_GLOBAL
398
#define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
399
 
400
#undef TARGET_SCHED_VARIABLE_ISSUE
401
#define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
402
 
403
#undef TARGET_SCHED_REORDER
404
#define TARGET_SCHED_REORDER sh_reorder
405
 
406
#undef TARGET_SCHED_REORDER2
407
#define TARGET_SCHED_REORDER2 sh_reorder2
408
 
409
#undef TARGET_SCHED_INIT
410
#define TARGET_SCHED_INIT sh_md_init
411
 
412
#undef TARGET_LEGITIMIZE_ADDRESS
413
#define TARGET_LEGITIMIZE_ADDRESS sh_legitimize_address
414
 
415
#undef TARGET_CANNOT_MODIFY_JUMPS_P
416
#define TARGET_CANNOT_MODIFY_JUMPS_P sh_cannot_modify_jumps_p
417
#undef TARGET_BRANCH_TARGET_REGISTER_CLASS
418
#define TARGET_BRANCH_TARGET_REGISTER_CLASS sh_target_reg_class
419
#undef TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED
420
#define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED \
421
 sh_optimize_target_register_callee_saved
422
 
423
#undef TARGET_MS_BITFIELD_LAYOUT_P
424
#define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p
425
 
426
#undef TARGET_INIT_BUILTINS
427
#define TARGET_INIT_BUILTINS sh_init_builtins
428
#undef TARGET_BUILTIN_DECL
429
#define TARGET_BUILTIN_DECL sh_builtin_decl
430
#undef TARGET_EXPAND_BUILTIN
431
#define TARGET_EXPAND_BUILTIN sh_expand_builtin
432
 
433
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
434
#define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall
435
 
436
#undef TARGET_CANNOT_COPY_INSN_P
437
#define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p
438
#undef TARGET_RTX_COSTS
439
#define TARGET_RTX_COSTS sh_rtx_costs
440
#undef TARGET_ADDRESS_COST
441
#define TARGET_ADDRESS_COST sh_address_cost
442
#undef TARGET_ALLOCATE_INITIAL_VALUE
443
#define TARGET_ALLOCATE_INITIAL_VALUE sh_allocate_initial_value
444
 
445
#undef TARGET_MACHINE_DEPENDENT_REORG
446
#define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
447
 
448
#undef TARGET_DWARF_REGISTER_SPAN
449
#define TARGET_DWARF_REGISTER_SPAN sh_dwarf_register_span
450
 
451
#ifdef HAVE_AS_TLS
452
#undef TARGET_HAVE_TLS
453
#define TARGET_HAVE_TLS true
454
#endif
455
 
456
#undef TARGET_PROMOTE_PROTOTYPES
457
#define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
458
#undef TARGET_PROMOTE_FUNCTION_MODE
459
#define TARGET_PROMOTE_FUNCTION_MODE sh_promote_function_mode
460
 
461
#undef TARGET_FUNCTION_VALUE
462
#define TARGET_FUNCTION_VALUE sh_function_value
463
#undef TARGET_LIBCALL_VALUE
464
#define TARGET_LIBCALL_VALUE sh_libcall_value
465
#undef TARGET_STRUCT_VALUE_RTX
466
#define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
467
#undef TARGET_RETURN_IN_MEMORY
468
#define TARGET_RETURN_IN_MEMORY sh_return_in_memory
469
 
470
#undef TARGET_EXPAND_BUILTIN_SAVEREGS
471
#define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
472
#undef TARGET_SETUP_INCOMING_VARARGS
473
#define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
474
#undef TARGET_STRICT_ARGUMENT_NAMING
475
#define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
476
#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
477
#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
478
#undef TARGET_MUST_PASS_IN_STACK
479
#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
480
#undef TARGET_PASS_BY_REFERENCE
481
#define TARGET_PASS_BY_REFERENCE sh_pass_by_reference
482
#undef TARGET_CALLEE_COPIES
483
#define TARGET_CALLEE_COPIES sh_callee_copies
484
#undef TARGET_ARG_PARTIAL_BYTES
485
#define TARGET_ARG_PARTIAL_BYTES sh_arg_partial_bytes
486
 
487
#undef TARGET_BUILD_BUILTIN_VA_LIST
488
#define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
489
#undef TARGET_EXPAND_BUILTIN_VA_START
490
#define TARGET_EXPAND_BUILTIN_VA_START sh_va_start
491
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
492
#define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
493
 
494
#undef TARGET_SCALAR_MODE_SUPPORTED_P
495
#define TARGET_SCALAR_MODE_SUPPORTED_P sh_scalar_mode_supported_p
496
#undef TARGET_VECTOR_MODE_SUPPORTED_P
497
#define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
498
 
499
#undef TARGET_CHECK_PCH_TARGET_FLAGS
500
#define TARGET_CHECK_PCH_TARGET_FLAGS sh_check_pch_target_flags
501
 
502
#undef TARGET_DWARF_CALLING_CONVENTION
503
#define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
504
 
505
/* Return regmode weight for insn.  */
506
#define INSN_REGMODE_WEIGHT(INSN, MODE)  regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
507
 
508
/* Return current register pressure for regmode.  */
509
#define CURR_REGMODE_PRESSURE(MODE)     curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
510
 
511
#undef  TARGET_ENCODE_SECTION_INFO
512
#define TARGET_ENCODE_SECTION_INFO      sh_encode_section_info
513
 
514
#ifdef SYMBIAN
515
 
516
#undef  TARGET_ENCODE_SECTION_INFO
517
#define TARGET_ENCODE_SECTION_INFO      sh_symbian_encode_section_info
518
#undef  TARGET_STRIP_NAME_ENCODING
519
#define TARGET_STRIP_NAME_ENCODING      sh_symbian_strip_name_encoding
520
#undef  TARGET_CXX_IMPORT_EXPORT_CLASS
521
#define TARGET_CXX_IMPORT_EXPORT_CLASS  sh_symbian_import_export_class
522
 
523
#endif /* SYMBIAN */
524
 
525
#undef TARGET_SECONDARY_RELOAD
526
#define TARGET_SECONDARY_RELOAD sh_secondary_reload
527
 
528
#undef TARGET_LEGITIMATE_ADDRESS_P
529
#define TARGET_LEGITIMATE_ADDRESS_P     sh_legitimate_address_p
530
 
531
#undef TARGET_TRAMPOLINE_INIT
532
#define TARGET_TRAMPOLINE_INIT          sh_trampoline_init
533
#undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
534
#define TARGET_TRAMPOLINE_ADJUST_ADDRESS sh_trampoline_adjust_address
535
 
536
/* Machine-specific symbol_ref flags.  */
537
#define SYMBOL_FLAG_FUNCVEC_FUNCTION    (SYMBOL_FLAG_MACH_DEP << 0)
538
 
539
struct gcc_target targetm = TARGET_INITIALIZER;
540
 
541
/* Implement TARGET_HANDLE_OPTION.  */
542
 
543
static bool
544
sh_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED,
545
                  int value ATTRIBUTE_UNUSED)
546
{
547
  switch (code)
548
    {
549
    case OPT_m1:
550
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH1;
551
      return true;
552
 
553
    case OPT_m2:
554
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2;
555
      return true;
556
 
557
    case OPT_m2a:
558
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A;
559
      return true;
560
 
561
    case OPT_m2a_nofpu:
562
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_NOFPU;
563
      return true;
564
 
565
    case OPT_m2a_single:
566
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE;
567
      return true;
568
 
569
    case OPT_m2a_single_only:
570
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE_ONLY;
571
      return true;
572
 
573
    case OPT_m2e:
574
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2E;
575
      return true;
576
 
577
    case OPT_m3:
578
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3;
579
      return true;
580
 
581
    case OPT_m3e:
582
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3E;
583
      return true;
584
 
585
    case OPT_m4:
586
    case OPT_m4_100:
587
    case OPT_m4_200:
588
    case OPT_m4_300:
589
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4;
590
      return true;
591
 
592
    case OPT_m4_nofpu:
593
    case OPT_m4_100_nofpu:
594
    case OPT_m4_200_nofpu:
595
    case OPT_m4_300_nofpu:
596
    case OPT_m4_340:
597
    case OPT_m4_400:
598
    case OPT_m4_500:
599
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_NOFPU;
600
      return true;
601
 
602
    case OPT_m4_single:
603
    case OPT_m4_100_single:
604
    case OPT_m4_200_single:
605
    case OPT_m4_300_single:
606
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE;
607
      return true;
608
 
609
    case OPT_m4_single_only:
610
    case OPT_m4_100_single_only:
611
    case OPT_m4_200_single_only:
612
    case OPT_m4_300_single_only:
613
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE_ONLY;
614
      return true;
615
 
616
    case OPT_m4a:
617
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A;
618
      return true;
619
 
620
    case OPT_m4a_nofpu:
621
    case OPT_m4al:
622
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_NOFPU;
623
      return true;
624
 
625
    case OPT_m4a_single:
626
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE;
627
      return true;
628
 
629
    case OPT_m4a_single_only:
630
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE_ONLY;
631
      return true;
632
 
633
    case OPT_m5_32media:
634
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA;
635
      return true;
636
 
637
    case OPT_m5_32media_nofpu:
638
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA_NOFPU;
639
      return true;
640
 
641
    case OPT_m5_64media:
642
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA;
643
      return true;
644
 
645
    case OPT_m5_64media_nofpu:
646
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA_NOFPU;
647
      return true;
648
 
649
    case OPT_m5_compact:
650
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT;
651
      return true;
652
 
653
    case OPT_m5_compact_nofpu:
654
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT_NOFPU;
655
      return true;
656
 
657
    default:
658
      return true;
659
    }
660
}
661
 
662
/* Set default optimization options.  */
663
void
664
sh_optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
665
{
666
  if (level)
667
    {
668
      flag_omit_frame_pointer = 2;
669
      if (!size)
670
        sh_div_str = "inv:minlat";
671
    }
672
  if (size)
673
    {
674
      target_flags |= MASK_SMALLCODE;
675
      sh_div_str = SH_DIV_STR_FOR_SIZE ;
676
    }
677
  else
678
    TARGET_CBRANCHDI4 = 1;
679
  /* We can't meaningfully test TARGET_SHMEDIA here, because -m options
680
     haven't been parsed yet, hence we'd read only the default.
681
     sh_target_reg_class will return NO_REGS if this is not SHMEDIA, so
682
     it's OK to always set flag_branch_target_load_optimize.  */
683
  if (level > 1)
684
    {
685
      flag_branch_target_load_optimize = 1;
686
      if (!size)
687
        target_flags |= MASK_SAVE_ALL_TARGET_REGS;
688
    }
689
  /* Likewise, we can't meaningfully test TARGET_SH2E / TARGET_IEEE
690
     here, so leave it to OVERRIDE_OPTIONS to set
691
    flag_finite_math_only.  We set it to 2 here so we know if the user
692
    explicitly requested this to be on or off.  */
693
  flag_finite_math_only = 2;
694
  /* If flag_schedule_insns is 1, we set it to 2 here so we know if
695
     the user explicitly requested this to be on or off.  */
696
  if (flag_schedule_insns > 0)
697
    flag_schedule_insns = 2;
698
 
699
  set_param_value ("simultaneous-prefetches", 2);
700
}
701
 
702
/* Implement OVERRIDE_OPTIONS macro.  Validate and override various
703
   options, and do some machine dependent initialization.  */
704
void
705
sh_override_options (void)
706
{
707
  int regno;
708
 
709
  SUBTARGET_OVERRIDE_OPTIONS;
710
  if (flag_finite_math_only == 2)
711
    flag_finite_math_only
712
      = !flag_signaling_nans && TARGET_SH2E && ! TARGET_IEEE;
713
  if (TARGET_SH2E && !flag_finite_math_only)
714
    target_flags |= MASK_IEEE;
715
  sh_cpu = PROCESSOR_SH1;
716
  assembler_dialect = 0;
717
  if (TARGET_SH2)
718
    sh_cpu = PROCESSOR_SH2;
719
  if (TARGET_SH2E)
720
    sh_cpu = PROCESSOR_SH2E;
721
  if (TARGET_SH2A)
722
    sh_cpu = PROCESSOR_SH2A;
723
  if (TARGET_SH3)
724
    sh_cpu = PROCESSOR_SH3;
725
  if (TARGET_SH3E)
726
    sh_cpu = PROCESSOR_SH3E;
727
  if (TARGET_SH4)
728
    {
729
      assembler_dialect = 1;
730
      sh_cpu = PROCESSOR_SH4;
731
    }
732
  if (TARGET_SH4A_ARCH)
733
    {
734
      assembler_dialect = 1;
735
      sh_cpu = PROCESSOR_SH4A;
736
    }
737
  if (TARGET_SH5)
738
    {
739
      sh_cpu = PROCESSOR_SH5;
740
      target_flags |= MASK_ALIGN_DOUBLE;
741
      if (TARGET_SHMEDIA_FPU)
742
        target_flags |= MASK_FMOVD;
743
      if (TARGET_SHMEDIA)
744
        {
745
          /* There are no delay slots on SHmedia.  */
746
          flag_delayed_branch = 0;
747
          /* Relaxation isn't yet supported for SHmedia */
748
          target_flags &= ~MASK_RELAX;
749
          /* After reload, if conversion does little good but can cause
750
             ICEs:
751
             - find_if_block doesn't do anything for SH because we don't
752
               have conditional execution patterns.  (We use conditional
753
               move patterns, which are handled differently, and only
754
               before reload).
755
             - find_cond_trap doesn't do anything for the SH because we
756
               don't have conditional traps.
757
             - find_if_case_1 uses redirect_edge_and_branch_force in
758
               the only path that does an optimization, and this causes
759
               an ICE when branch targets are in registers.
760
             - find_if_case_2 doesn't do anything for the SHmedia after
761
               reload except when it can redirect a tablejump - and
762
               that's rather rare.  */
763
          flag_if_conversion2 = 0;
764
          if (! strcmp (sh_div_str, "call"))
765
            sh_div_strategy = SH_DIV_CALL;
766
          else if (! strcmp (sh_div_str, "call2"))
767
            sh_div_strategy = SH_DIV_CALL2;
768
          if (! strcmp (sh_div_str, "fp") && TARGET_FPU_ANY)
769
            sh_div_strategy = SH_DIV_FP;
770
          else if (! strcmp (sh_div_str, "inv"))
771
            sh_div_strategy = SH_DIV_INV;
772
          else if (! strcmp (sh_div_str, "inv:minlat"))
773
            sh_div_strategy = SH_DIV_INV_MINLAT;
774
          else if (! strcmp (sh_div_str, "inv20u"))
775
            sh_div_strategy = SH_DIV_INV20U;
776
          else if (! strcmp (sh_div_str, "inv20l"))
777
            sh_div_strategy = SH_DIV_INV20L;
778
          else if (! strcmp (sh_div_str, "inv:call2"))
779
            sh_div_strategy = SH_DIV_INV_CALL2;
780
          else if (! strcmp (sh_div_str, "inv:call"))
781
            sh_div_strategy = SH_DIV_INV_CALL;
782
          else if (! strcmp (sh_div_str, "inv:fp"))
783
            {
784
              if (TARGET_FPU_ANY)
785
                sh_div_strategy = SH_DIV_INV_FP;
786
              else
787
                sh_div_strategy = SH_DIV_INV;
788
            }
789
          TARGET_CBRANCHDI4 = 0;
790
          /* Assembler CFI isn't yet fully supported for SHmedia.  */
791
          flag_dwarf2_cfi_asm = 0;
792
        }
793
    }
794
  else
795
    {
796
       /* Only the sh64-elf assembler fully supports .quad properly.  */
797
       targetm.asm_out.aligned_op.di = NULL;
798
       targetm.asm_out.unaligned_op.di = NULL;
799
    }
800
  if (TARGET_SH1)
801
    {
802
      if (! strcmp (sh_div_str, "call-div1"))
803
        sh_div_strategy = SH_DIV_CALL_DIV1;
804
      else if (! strcmp (sh_div_str, "call-fp")
805
               && (TARGET_FPU_DOUBLE
806
                   || (TARGET_HARD_SH4 && TARGET_SH2E)
807
                   || (TARGET_SHCOMPACT && TARGET_FPU_ANY)))
808
        sh_div_strategy = SH_DIV_CALL_FP;
809
      else if (! strcmp (sh_div_str, "call-table") && TARGET_SH2)
810
        sh_div_strategy = SH_DIV_CALL_TABLE;
811
      else
812
        /* Pick one that makes most sense for the target in general.
813
           It is not much good to use different functions depending
814
           on -Os, since then we'll end up with two different functions
815
           when some of the code is compiled for size, and some for
816
           speed.  */
817
 
818
        /* SH4 tends to emphasize speed.  */
819
        if (TARGET_HARD_SH4)
820
          sh_div_strategy = SH_DIV_CALL_TABLE;
821
        /* These have their own way of doing things.  */
822
        else if (TARGET_SH2A)
823
          sh_div_strategy = SH_DIV_INTRINSIC;
824
        /* ??? Should we use the integer SHmedia function instead?  */
825
        else if (TARGET_SHCOMPACT && TARGET_FPU_ANY)
826
          sh_div_strategy = SH_DIV_CALL_FP;
827
        /* SH1 .. SH3 cores often go into small-footprint systems, so
828
           default to the smallest implementation available.  */
829
        else if (TARGET_SH2)    /* ??? EXPERIMENTAL */
830
          sh_div_strategy = SH_DIV_CALL_TABLE;
831
        else
832
          sh_div_strategy = SH_DIV_CALL_DIV1;
833
    }
834
  if (!TARGET_SH1)
835
    TARGET_PRETEND_CMOVE = 0;
836
  if (sh_divsi3_libfunc[0])
837
    ; /* User supplied - leave it alone.  */
838
  else if (TARGET_DIVIDE_CALL_FP)
839
    sh_divsi3_libfunc = "__sdivsi3_i4";
840
  else if (TARGET_DIVIDE_CALL_TABLE)
841
    sh_divsi3_libfunc = "__sdivsi3_i4i";
842
  else if (TARGET_SH5)
843
    sh_divsi3_libfunc = "__sdivsi3_1";
844
  else
845
    sh_divsi3_libfunc = "__sdivsi3";
846
  if (sh_branch_cost == -1)
847
    sh_branch_cost
848
      = TARGET_SH5 ? 1 : ! TARGET_SH2 || TARGET_HARD_SH4 ? 2 : 1;
849
 
850
  for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
851
    if (! VALID_REGISTER_P (regno))
852
      sh_register_names[regno][0] = '\0';
853
 
854
  for (regno = 0; regno < ADDREGNAMES_SIZE; regno++)
855
    if (! VALID_REGISTER_P (ADDREGNAMES_REGNO (regno)))
856
      sh_additional_register_names[regno][0] = '\0';
857
 
858
  if (flag_omit_frame_pointer == 2)
859
   {
860
     /* The debugging information is sufficient,
861
        but gdb doesn't implement this yet */
862
     if (0)
863
      flag_omit_frame_pointer
864
        = (PREFERRED_DEBUGGING_TYPE == DWARF2_DEBUG);
865
     else
866
      flag_omit_frame_pointer = 0;
867
   }
868
 
869
  if ((flag_pic && ! TARGET_PREFERGOT)
870
      || (TARGET_SHMEDIA && !TARGET_PT_FIXED))
871
    flag_no_function_cse = 1;
872
 
873
  if (SMALL_REGISTER_CLASSES)
874
    {
875
      /* Never run scheduling before reload, since that can
876
         break global alloc, and generates slower code anyway due
877
         to the pressure on R0.  */
878
      /* Enable sched1 for SH4 if the user explicitly requests.
879
         When sched1 is enabled, the ready queue will be reordered by
880
         the target hooks if pressure is high.  We can not do this for
881
         PIC, SH3 and lower as they give spill failures for R0.  */
882
      if (!TARGET_HARD_SH4 || flag_pic)
883
        flag_schedule_insns = 0;
884
      /* ??? Current exception handling places basic block boundaries
885
         after call_insns.  It causes the high pressure on R0 and gives
886
         spill failures for R0 in reload.  See PR 22553 and the thread
887
         on gcc-patches
888
         <http://gcc.gnu.org/ml/gcc-patches/2005-10/msg00816.html>.  */
889
      else if (flag_exceptions)
890
        {
891
          if (flag_schedule_insns == 1)
892
            warning (0, "ignoring -fschedule-insns because of exception handling bug");
893
          flag_schedule_insns = 0;
894
        }
895
      else if (flag_schedule_insns == 2)
896
        flag_schedule_insns = 0;
897
    }
898
 
899
  /* Unwinding with -freorder-blocks-and-partition does not work on this
900
     architecture, because it requires far jumps to label crossing between
901
     hot/cold sections which are rejected on this architecture.  */
902
  if (flag_reorder_blocks_and_partition)
903
    {
904
      if (flag_exceptions)
905
        {
906
          inform (input_location,
907
                  "-freorder-blocks-and-partition does not work with "
908
                  "exceptions on this architecture");
909
          flag_reorder_blocks_and_partition = 0;
910
          flag_reorder_blocks = 1;
911
        }
912
      else if (flag_unwind_tables)
913
        {
914
          inform (input_location,
915
                  "-freorder-blocks-and-partition does not support unwind "
916
                  "info on this architecture");
917
          flag_reorder_blocks_and_partition = 0;
918
          flag_reorder_blocks = 1;
919
        }
920
    }
921
 
922
  if (align_loops == 0)
923
    align_loops =  1 << (TARGET_SH5 ? 3 : 2);
924
  if (align_jumps == 0)
925
    align_jumps = 1 << CACHE_LOG;
926
  else if (align_jumps < (TARGET_SHMEDIA ? 4 : 2))
927
    align_jumps = TARGET_SHMEDIA ? 4 : 2;
928
 
929
  /* Allocation boundary (in *bytes*) for the code of a function.
930
     SH1: 32 bit alignment is faster, because instructions are always
931
     fetched as a pair from a longword boundary.
932
     SH2 .. SH5 : align to cache line start.  */
933
  if (align_functions == 0)
934
    align_functions
935
      = TARGET_SMALLCODE ? FUNCTION_BOUNDARY/8 : (1 << CACHE_LOG);
936
  /* The linker relaxation code breaks when a function contains
937
     alignments that are larger than that at the start of a
938
     compilation unit.  */
939
  if (TARGET_RELAX)
940
    {
941
      int min_align
942
        = align_loops > align_jumps ? align_loops : align_jumps;
943
 
944
      /* Also take possible .long constants / mova tables int account.  */
945
      if (min_align < 4)
946
        min_align = 4;
947
      if (align_functions < min_align)
948
        align_functions = min_align;
949
    }
950
 
951
  if (sh_fixed_range_str)
952
    sh_fix_range (sh_fixed_range_str);
953
}
954
 
955
/* Print the operand address in x to the stream.  */
956
 
957
void
958
print_operand_address (FILE *stream, rtx x)
959
{
960
  switch (GET_CODE (x))
961
    {
962
    case REG:
963
    case SUBREG:
964
      fprintf (stream, "@%s", reg_names[true_regnum (x)]);
965
      break;
966
 
967
    case PLUS:
968
      {
969
        rtx base = XEXP (x, 0);
970
        rtx index = XEXP (x, 1);
971
 
972
        switch (GET_CODE (index))
973
          {
974
          case CONST_INT:
975
            fprintf (stream, "@(%d,%s)", (int) INTVAL (index),
976
                     reg_names[true_regnum (base)]);
977
            break;
978
 
979
          case REG:
980
          case SUBREG:
981
            {
982
              int base_num = true_regnum (base);
983
              int index_num = true_regnum (index);
984
 
985
              fprintf (stream, "@(r0,%s)",
986
                       reg_names[MAX (base_num, index_num)]);
987
              break;
988
            }
989
 
990
          default:
991
            gcc_unreachable ();
992
          }
993
      }
994
      break;
995
 
996
    case PRE_DEC:
997
      fprintf (stream, "@-%s", reg_names[true_regnum (XEXP (x, 0))]);
998
      break;
999
 
1000
    case POST_INC:
1001
      fprintf (stream, "@%s+", reg_names[true_regnum (XEXP (x, 0))]);
1002
      break;
1003
 
1004
    default:
1005
      x = mark_constant_pool_use (x);
1006
      output_addr_const (stream, x);
1007
      break;
1008
    }
1009
}
1010
 
1011
/* Print operand x (an rtx) in assembler syntax to file stream
1012
   according to modifier code.
1013
 
1014
   '.'  print a .s if insn needs delay slot
1015
   ','  print LOCAL_LABEL_PREFIX
1016
   '@'  print trap, rte or rts depending upon pragma interruptness
1017
   '#'  output a nop if there is nothing to put in the delay slot
1018
   '''  print likelihood suffix (/u for unlikely).
1019
   '>'  print branch target if -fverbose-asm
1020
   'O'  print a constant without the #
1021
   'R'  print the LSW of a dp value - changes if in little endian
1022
   'S'  print the MSW of a dp value - changes if in little endian
1023
   'T'  print the next word of a dp value - same as 'R' in big endian mode.
1024
   'M'  SHMEDIA: print an `x' if `m' will print `base,index'.
1025
        otherwise: print .b / .w / .l / .s / .d suffix if operand is a MEM.
1026
   'N'  print 'r63' if the operand is (const_int 0).
1027
   'd'  print a V2SF reg as dN instead of fpN.
1028
   'm'  print a pair `base,offset' or `base,index', for LD and ST.
1029
   'U'  Likewise for {LD,ST}{HI,LO}.
1030
   'V'  print the position of a single bit set.
1031
   'W'  print the position of a single bit cleared.
1032
   't'  print a memory address which is a register.
1033
   'u'  prints the lowest 16 bits of CONST_INT, as an unsigned value.
1034
   'o'  output an operator.  */
1035
 
1036
void
1037
print_operand (FILE *stream, rtx x, int code)
1038
{
1039
  int regno;
1040
  enum machine_mode mode;
1041
 
1042
  switch (code)
1043
    {
1044
      tree trapa_attr;
1045
 
1046
    case '.':
1047
      if (final_sequence
1048
          && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
1049
          && get_attr_length (XVECEXP (final_sequence, 0, 1)))
1050
        fprintf (stream, ASSEMBLER_DIALECT ? "/s" : ".s");
1051
      break;
1052
    case ',':
1053
      fprintf (stream, "%s", LOCAL_LABEL_PREFIX);
1054
      break;
1055
    case '@':
1056
      trapa_attr = lookup_attribute ("trap_exit",
1057
                                      DECL_ATTRIBUTES (current_function_decl));
1058
      if (trapa_attr)
1059
        fprintf (stream, "trapa #%ld",
1060
                 (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr))));
1061
      else if (sh_cfun_interrupt_handler_p ())
1062
        {
1063
          if (sh_cfun_resbank_handler_p ())
1064
            fprintf (stream, "resbank\n");
1065
          fprintf (stream, "rte");
1066
        }
1067
      else
1068
        fprintf (stream, "rts");
1069
      break;
1070
    case '#':
1071
      /* Output a nop if there's nothing in the delay slot.  */
1072
      if (dbr_sequence_length () == 0)
1073
        fprintf (stream, "\n\tnop");
1074
      break;
1075
    case '\'':
1076
      {
1077
        rtx note = find_reg_note (current_output_insn, REG_BR_PROB, 0);
1078
 
1079
        if (note && INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
1080
          fputs ("/u", stream);
1081
        break;
1082
      }
1083
    case '>':
1084
      if (flag_verbose_asm && JUMP_LABEL (current_output_insn))
1085
        {
1086
          fputs ("\t! target: ", stream);
1087
          output_addr_const (stream, JUMP_LABEL (current_output_insn));
1088
        }
1089
      break;
1090
    case 'O':
1091
      x = mark_constant_pool_use (x);
1092
      output_addr_const (stream, x);
1093
      break;
1094
    /* N.B.: %R / %S / %T adjust memory addresses by four.
1095
       For SHMEDIA, that means they can be used to access the first and
1096
       second 32 bit part of a 64 bit (or larger) value that
1097
       might be held in floating point registers or memory.
1098
       While they can be used to access 64 bit parts of a larger value
1099
       held in general purpose registers, that won't work with memory -
1100
       neither for fp registers, since the frxx names are used.  */
1101
    case 'R':
1102
      if (REG_P (x) || GET_CODE (x) == SUBREG)
1103
        {
1104
          regno = true_regnum (x);
1105
          regno += FP_REGISTER_P (regno) ? 1 : LSW;
1106
          fputs (reg_names[regno], (stream));
1107
        }
1108
      else if (MEM_P (x))
1109
        {
1110
          x = adjust_address (x, SImode, 4 * LSW);
1111
          print_operand_address (stream, XEXP (x, 0));
1112
        }
1113
      else
1114
        {
1115
          rtx sub = NULL_RTX;
1116
 
1117
          mode = GET_MODE (x);
1118
          if (mode == VOIDmode)
1119
            mode = DImode;
1120
          if (GET_MODE_SIZE (mode) >= 8)
1121
            sub = simplify_subreg (SImode, x, mode, 4 * LSW);
1122
          if (sub)
1123
            print_operand (stream, sub, 0);
1124
          else
1125
            output_operand_lossage ("invalid operand to %%R");
1126
        }
1127
      break;
1128
    case 'S':
1129
      if (REG_P (x) || GET_CODE (x) == SUBREG)
1130
        {
1131
          regno = true_regnum (x);
1132
          regno += FP_REGISTER_P (regno) ? 0 : MSW;
1133
          fputs (reg_names[regno], (stream));
1134
        }
1135
      else if (MEM_P (x))
1136
        {
1137
          x = adjust_address (x, SImode, 4 * MSW);
1138
          print_operand_address (stream, XEXP (x, 0));
1139
        }
1140
      else
1141
        {
1142
          rtx sub = NULL_RTX;
1143
 
1144
          mode = GET_MODE (x);
1145
          if (mode == VOIDmode)
1146
            mode = DImode;
1147
          if (GET_MODE_SIZE (mode) >= 8)
1148
            sub = simplify_subreg (SImode, x, mode, 4 * MSW);
1149
          if (sub)
1150
            print_operand (stream, sub, 0);
1151
          else
1152
            output_operand_lossage ("invalid operand to %%S");
1153
        }
1154
      break;
1155
    case 'T':
1156
      /* Next word of a double.  */
1157
      switch (GET_CODE (x))
1158
        {
1159
        case REG:
1160
          fputs (reg_names[REGNO (x) + 1], (stream));
1161
          break;
1162
        case MEM:
1163
          if (GET_CODE (XEXP (x, 0)) != PRE_DEC
1164
              && GET_CODE (XEXP (x, 0)) != POST_INC)
1165
            x = adjust_address (x, SImode, 4);
1166
          print_operand_address (stream, XEXP (x, 0));
1167
          break;
1168
        default:
1169
          break;
1170
        }
1171
      break;
1172
 
1173
    case 't':
1174
      gcc_assert (MEM_P (x));
1175
      x = XEXP (x, 0);
1176
      switch (GET_CODE (x))
1177
        {
1178
        case REG:
1179
        case SUBREG:
1180
          print_operand (stream, x, 0);
1181
          break;
1182
        default:
1183
          break;
1184
        }
1185
      break;
1186
 
1187
    case 'o':
1188
      switch (GET_CODE (x))
1189
        {
1190
        case PLUS:  fputs ("add", stream); break;
1191
        case MINUS: fputs ("sub", stream); break;
1192
        case MULT:  fputs ("mul", stream); break;
1193
        case DIV:   fputs ("div", stream); break;
1194
        case EQ:    fputs ("eq",  stream); break;
1195
        case NE:    fputs ("ne",  stream); break;
1196
        case GT:  case LT:  fputs ("gt",  stream); break;
1197
        case GE:  case LE:  fputs ("ge",  stream); break;
1198
        case GTU: case LTU: fputs ("gtu", stream); break;
1199
        case GEU: case LEU: fputs ("geu", stream); break;
1200
        default:
1201
          break;
1202
        }
1203
      break;
1204
    case 'M':
1205
      if (TARGET_SHMEDIA)
1206
        {
1207
          if (MEM_P (x)
1208
              && GET_CODE (XEXP (x, 0)) == PLUS
1209
              && (REG_P (XEXP (XEXP (x, 0), 1))
1210
                  || GET_CODE (XEXP (XEXP (x, 0), 1)) == SUBREG))
1211
            fputc ('x', stream);
1212
        }
1213
      else
1214
        {
1215
          if (MEM_P (x))
1216
            {
1217
              switch (GET_MODE (x))
1218
                {
1219
                case QImode: fputs (".b", stream); break;
1220
                case HImode: fputs (".w", stream); break;
1221
                case SImode: fputs (".l", stream); break;
1222
                case SFmode: fputs (".s", stream); break;
1223
                case DFmode: fputs (".d", stream); break;
1224
                default: gcc_unreachable ();
1225
                }
1226
            }
1227
        }
1228
      break;
1229
 
1230
    case 'm':
1231
      gcc_assert (MEM_P (x));
1232
      x = XEXP (x, 0);
1233
      /* Fall through.  */
1234
    case 'U':
1235
      switch (GET_CODE (x))
1236
        {
1237
        case REG:
1238
        case SUBREG:
1239
          print_operand (stream, x, 0);
1240
          fputs (", 0", stream);
1241
          break;
1242
 
1243
        case PLUS:
1244
          print_operand (stream, XEXP (x, 0), 0);
1245
          fputs (", ", stream);
1246
          print_operand (stream, XEXP (x, 1), 0);
1247
          break;
1248
 
1249
        default:
1250
          gcc_unreachable ();
1251
        }
1252
      break;
1253
 
1254
    case 'V':
1255
      {
1256
        int num = exact_log2 (INTVAL (x));
1257
        gcc_assert (num >= 0);
1258
        fprintf (stream, "#%d", num);
1259
      }
1260
      break;
1261
 
1262
    case 'W':
1263
      {
1264
        int num = exact_log2 (~INTVAL (x));
1265
        gcc_assert (num >= 0);
1266
        fprintf (stream, "#%d", num);
1267
      }
1268
      break;
1269
 
1270
    case 'd':
1271
      gcc_assert (REG_P (x) && GET_MODE (x) == V2SFmode);
1272
 
1273
      fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
1274
      break;
1275
 
1276
    case 'N':
1277
      if (x == CONST0_RTX (GET_MODE (x)))
1278
        {
1279
          fprintf ((stream), "r63");
1280
          break;
1281
        }
1282
      goto default_output;
1283
    case 'u':
1284
      if (CONST_INT_P (x))
1285
        {
1286
          fprintf ((stream), "%u", (unsigned) INTVAL (x) & (0x10000 - 1));
1287
          break;
1288
        }
1289
      /* Fall through.  */
1290
 
1291
    default_output:
1292
    default:
1293
      regno = 0;
1294
      mode = GET_MODE (x);
1295
 
1296
      switch (GET_CODE (x))
1297
        {
1298
        case TRUNCATE:
1299
          {
1300
            rtx inner = XEXP (x, 0);
1301
            int offset = 0;
1302
            enum machine_mode inner_mode;
1303
 
1304
            /* We might see SUBREGs with vector mode registers inside.  */
1305
            if (GET_CODE (inner) == SUBREG
1306
                && (GET_MODE_SIZE (GET_MODE (inner))
1307
                    == GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1308
                && subreg_lowpart_p (inner))
1309
              inner = SUBREG_REG (inner);
1310
            if (CONST_INT_P (inner))
1311
              {
1312
                x = GEN_INT (trunc_int_for_mode (INTVAL (inner), GET_MODE (x)));
1313
                goto default_output;
1314
              }
1315
            inner_mode = GET_MODE (inner);
1316
            if (GET_CODE (inner) == SUBREG
1317
                && (GET_MODE_SIZE (GET_MODE (inner))
1318
                    < GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
1319
                && REG_P (SUBREG_REG (inner)))
1320
              {
1321
                offset = subreg_regno_offset (REGNO (SUBREG_REG (inner)),
1322
                                              GET_MODE (SUBREG_REG (inner)),
1323
                                              SUBREG_BYTE (inner),
1324
                                              GET_MODE (inner));
1325
                inner = SUBREG_REG (inner);
1326
              }
1327
            if (!REG_P (inner) || GET_MODE_SIZE (inner_mode) > 8)
1328
              abort ();
1329
            /* Floating point register pairs are always big endian;
1330
               general purpose registers are 64 bit wide.  */
1331
            regno = REGNO (inner);
1332
            regno = (HARD_REGNO_NREGS (regno, inner_mode)
1333
                     - HARD_REGNO_NREGS (regno, mode))
1334
                     + offset;
1335
            x = inner;
1336
            goto reg;
1337
          }
1338
        case SIGN_EXTEND:
1339
          x = XEXP (x, 0);
1340
          goto reg;
1341
          /* FIXME: We need this on SHmedia32 because reload generates
1342
             some sign-extended HI or QI loads into DImode registers
1343
             but, because Pmode is SImode, the address ends up with a
1344
             subreg:SI of the DImode register.  Maybe reload should be
1345
             fixed so as to apply alter_subreg to such loads?  */
1346
        case IF_THEN_ELSE:
1347
          gcc_assert (trapping_target_operand (x, VOIDmode));
1348
          x = XEXP (XEXP (x, 2), 0);
1349
          goto default_output;
1350
        case SUBREG:
1351
          gcc_assert (SUBREG_BYTE (x) == 0
1352
                      && REG_P (SUBREG_REG (x)));
1353
 
1354
          x = SUBREG_REG (x);
1355
          /* Fall through.  */
1356
 
1357
        reg:
1358
        case REG:
1359
          regno += REGNO (x);
1360
          if (FP_REGISTER_P (regno)
1361
              && mode == V16SFmode)
1362
            fprintf ((stream), "mtrx%s", reg_names[regno] + 2);
1363
          else if (FP_REGISTER_P (REGNO (x))
1364
                   && mode == V4SFmode)
1365
            fprintf ((stream), "fv%s", reg_names[regno] + 2);
1366
          else if (REG_P (x)
1367
                   && mode == V2SFmode)
1368
            fprintf ((stream), "fp%s", reg_names[regno] + 2);
1369
          else if (FP_REGISTER_P (REGNO (x))
1370
                   && GET_MODE_SIZE (mode) > 4)
1371
            fprintf ((stream), "d%s", reg_names[regno] + 1);
1372
          else
1373
            fputs (reg_names[regno], (stream));
1374
          break;
1375
 
1376
        case MEM:
1377
          output_address (XEXP (x, 0));
1378
          break;
1379
 
1380
        default:
1381
          if (TARGET_SH1)
1382
            fputc ('#', stream);
1383
          output_addr_const (stream, x);
1384
          break;
1385
        }
1386
      break;
1387
    }
1388
}
1389
 
1390
 
1391
/* Encode symbol attributes of a SYMBOL_REF into its
1392
   SYMBOL_REF_FLAGS.  */
1393
static void
1394
sh_encode_section_info (tree decl, rtx rtl, int first)
1395
{
1396
  default_encode_section_info (decl, rtl, first);
1397
 
1398
  if (TREE_CODE (decl) == FUNCTION_DECL
1399
      && sh2a_function_vector_p (decl) && TARGET_SH2A)
1400
    SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_FUNCVEC_FUNCTION;
1401
}
1402
 
1403
/* Like force_operand, but guarantees that VALUE ends up in TARGET.  */
1404
static void
1405
force_into (rtx value, rtx target)
1406
{
1407
  value = force_operand (value, target);
1408
  if (! rtx_equal_p (value, target))
1409
    emit_insn (gen_move_insn (target, value));
1410
}
1411
 
1412
/* Emit code to perform a block move.  Choose the best method.
1413
 
1414
   OPERANDS[0] is the destination.
1415
   OPERANDS[1] is the source.
1416
   OPERANDS[2] is the size.
1417
   OPERANDS[3] is the alignment safe to use.  */
1418
 
1419
int
1420
expand_block_move (rtx *operands)
1421
{
1422
  int align = INTVAL (operands[3]);
1423
  int constp = (CONST_INT_P (operands[2]));
1424
  int bytes = (constp ? INTVAL (operands[2]) : 0);
1425
 
1426
  if (! constp)
1427
    return 0;
1428
 
1429
  /* If we could use mov.l to move words and dest is word-aligned, we
1430
     can use movua.l for loads and still generate a relatively short
1431
     and efficient sequence.  */
1432
  if (TARGET_SH4A_ARCH && align < 4
1433
      && MEM_ALIGN (operands[0]) >= 32
1434
      && can_move_by_pieces (bytes, 32))
1435
    {
1436
      rtx dest = copy_rtx (operands[0]);
1437
      rtx src = copy_rtx (operands[1]);
1438
      /* We could use different pseudos for each copied word, but
1439
         since movua can only load into r0, it's kind of
1440
         pointless.  */
1441
      rtx temp = gen_reg_rtx (SImode);
1442
      rtx src_addr = copy_addr_to_reg (XEXP (src, 0));
1443
      int copied = 0;
1444
 
1445
      while (copied + 4 <= bytes)
1446
        {
1447
          rtx to = adjust_address (dest, SImode, copied);
1448
          rtx from = adjust_automodify_address (src, BLKmode,
1449
                                                src_addr, copied);
1450
 
1451
          set_mem_size (from, GEN_INT (4));
1452
          emit_insn (gen_movua (temp, from));
1453
          emit_move_insn (src_addr, plus_constant (src_addr, 4));
1454
          emit_move_insn (to, temp);
1455
          copied += 4;
1456
        }
1457
 
1458
      if (copied < bytes)
1459
        move_by_pieces (adjust_address (dest, BLKmode, copied),
1460
                        adjust_automodify_address (src, BLKmode,
1461
                                                   src_addr, copied),
1462
                        bytes - copied, align, 0);
1463
 
1464
      return 1;
1465
    }
1466
 
1467
  /* If it isn't a constant number of bytes, or if it doesn't have 4 byte
1468
     alignment, or if it isn't a multiple of 4 bytes, then fail.  */
1469
  if (align < 4 || (bytes % 4 != 0))
1470
    return 0;
1471
 
1472
  if (TARGET_HARD_SH4)
1473
    {
1474
      if (bytes < 12)
1475
        return 0;
1476
      else if (bytes == 12)
1477
        {
1478
          rtx func_addr_rtx = gen_reg_rtx (Pmode);
1479
          rtx r4 = gen_rtx_REG (SImode, 4);
1480
          rtx r5 = gen_rtx_REG (SImode, 5);
1481
 
1482
          function_symbol (func_addr_rtx, "__movmemSI12_i4", SFUNC_STATIC);
1483
          force_into (XEXP (operands[0], 0), r4);
1484
          force_into (XEXP (operands[1], 0), r5);
1485
          emit_insn (gen_block_move_real_i4 (func_addr_rtx));
1486
          return 1;
1487
        }
1488
      else if (! TARGET_SMALLCODE)
1489
        {
1490
          const char *entry_name;
1491
          rtx func_addr_rtx = gen_reg_rtx (Pmode);
1492
          int dwords;
1493
          rtx r4 = gen_rtx_REG (SImode, 4);
1494
          rtx r5 = gen_rtx_REG (SImode, 5);
1495
          rtx r6 = gen_rtx_REG (SImode, 6);
1496
 
1497
          entry_name = (bytes & 4 ? "__movmem_i4_odd" : "__movmem_i4_even");
1498
          function_symbol (func_addr_rtx, entry_name, SFUNC_STATIC);
1499
          force_into (XEXP (operands[0], 0), r4);
1500
          force_into (XEXP (operands[1], 0), r5);
1501
 
1502
          dwords = bytes >> 3;
1503
          emit_insn (gen_move_insn (r6, GEN_INT (dwords - 1)));
1504
          emit_insn (gen_block_lump_real_i4 (func_addr_rtx));
1505
          return 1;
1506
        }
1507
      else
1508
        return 0;
1509
    }
1510
  if (bytes < 64)
1511
    {
1512
      char entry[30];
1513
      rtx func_addr_rtx = gen_reg_rtx (Pmode);
1514
      rtx r4 = gen_rtx_REG (SImode, 4);
1515
      rtx r5 = gen_rtx_REG (SImode, 5);
1516
 
1517
      sprintf (entry, "__movmemSI%d", bytes);
1518
      function_symbol (func_addr_rtx, entry, SFUNC_STATIC);
1519
      force_into (XEXP (operands[0], 0), r4);
1520
      force_into (XEXP (operands[1], 0), r5);
1521
      emit_insn (gen_block_move_real (func_addr_rtx));
1522
      return 1;
1523
    }
1524
 
1525
  /* This is the same number of bytes as a memcpy call, but to a different
1526
     less common function name, so this will occasionally use more space.  */
1527
  if (! TARGET_SMALLCODE)
1528
    {
1529
      rtx func_addr_rtx = gen_reg_rtx (Pmode);
1530
      int final_switch, while_loop;
1531
      rtx r4 = gen_rtx_REG (SImode, 4);
1532
      rtx r5 = gen_rtx_REG (SImode, 5);
1533
      rtx r6 = gen_rtx_REG (SImode, 6);
1534
 
1535
      function_symbol (func_addr_rtx, "__movmem", SFUNC_STATIC);
1536
      force_into (XEXP (operands[0], 0), r4);
1537
      force_into (XEXP (operands[1], 0), r5);
1538
 
1539
      /* r6 controls the size of the move.  16 is decremented from it
1540
         for each 64 bytes moved.  Then the negative bit left over is used
1541
         as an index into a list of move instructions.  e.g., a 72 byte move
1542
         would be set up with size(r6) = 14, for one iteration through the
1543
         big while loop, and a switch of -2 for the last part.  */
1544
 
1545
      final_switch = 16 - ((bytes / 4) % 16);
1546
      while_loop = ((bytes / 4) / 16 - 1) * 16;
1547
      emit_insn (gen_move_insn (r6, GEN_INT (while_loop + final_switch)));
1548
      emit_insn (gen_block_lump_real (func_addr_rtx));
1549
      return 1;
1550
    }
1551
 
1552
  return 0;
1553
}
1554
 
1555
/* Prepare operands for a move define_expand; specifically, one of the
1556
   operands must be in a register.  */
1557
 
1558
int
1559
prepare_move_operands (rtx operands[], enum machine_mode mode)
1560
{
1561
  if ((mode == SImode || mode == DImode)
1562
      && flag_pic
1563
      && ! ((mode == Pmode || mode == ptr_mode)
1564
            && tls_symbolic_operand (operands[1], Pmode) != TLS_MODEL_NONE))
1565
    {
1566
      rtx temp;
1567
      if (SYMBOLIC_CONST_P (operands[1]))
1568
        {
1569
          if (MEM_P (operands[0]))
1570
            operands[1] = force_reg (Pmode, operands[1]);
1571
          else if (TARGET_SHMEDIA
1572
                   && GET_CODE (operands[1]) == LABEL_REF
1573
                   && target_reg_operand (operands[0], mode))
1574
            /* It's ok.  */;
1575
          else
1576
            {
1577
              temp = (!can_create_pseudo_p ()
1578
                      ? operands[0]
1579
                      : gen_reg_rtx (Pmode));
1580
              operands[1] = legitimize_pic_address (operands[1], mode, temp);
1581
            }
1582
        }
1583
      else if (GET_CODE (operands[1]) == CONST
1584
               && GET_CODE (XEXP (operands[1], 0)) == PLUS
1585
               && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0)))
1586
        {
1587
          temp = !can_create_pseudo_p () ? operands[0] : gen_reg_rtx (Pmode);
1588
          temp = legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0),
1589
                                         mode, temp);
1590
          operands[1] = expand_binop (mode, add_optab, temp,
1591
                                      XEXP (XEXP (operands[1], 0), 1),
1592
                                      (!can_create_pseudo_p ()
1593
                                       ? temp
1594
                                       : gen_reg_rtx (Pmode)),
1595
                                      0, OPTAB_LIB_WIDEN);
1596
        }
1597
    }
1598
 
1599
  if (! reload_in_progress && ! reload_completed)
1600
    {
1601
      /* Copy the source to a register if both operands aren't registers.  */
1602
      if (! register_operand (operands[0], mode)
1603
          && ! sh_register_operand (operands[1], mode))
1604
        operands[1] = copy_to_mode_reg (mode, operands[1]);
1605
 
1606
      if (MEM_P (operands[0]) && ! memory_operand (operands[0], mode))
1607
        {
1608
          /* This is like change_address_1 (operands[0], mode, 0, 1) ,
1609
             except that we can't use that function because it is static.  */
1610
          rtx new_rtx = change_address (operands[0], mode, 0);
1611
          MEM_COPY_ATTRIBUTES (new_rtx, operands[0]);
1612
          operands[0] = new_rtx;
1613
        }
1614
 
1615
      /* This case can happen while generating code to move the result
1616
         of a library call to the target.  Reject `st r0,@(rX,rY)' because
1617
         reload will fail to find a spill register for rX, since r0 is already
1618
         being used for the source.  */
1619
      else if (TARGET_SH1
1620
               && refers_to_regno_p (R0_REG, R0_REG + 1, operands[1], (rtx *)0)
1621
               && MEM_P (operands[0])
1622
               && GET_CODE (XEXP (operands[0], 0)) == PLUS
1623
               && REG_P (XEXP (XEXP (operands[0], 0), 1)))
1624
        operands[1] = copy_to_mode_reg (mode, operands[1]);
1625
    }
1626
 
1627
  if (mode == Pmode || mode == ptr_mode)
1628
    {
1629
      rtx op0, op1, opc;
1630
      enum tls_model tls_kind;
1631
 
1632
      op0 = operands[0];
1633
      op1 = operands[1];
1634
      if (GET_CODE (op1) == CONST
1635
          && GET_CODE (XEXP (op1, 0)) == PLUS
1636
          && (tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode)
1637
              != TLS_MODEL_NONE))
1638
        {
1639
          opc = XEXP (XEXP (op1, 0), 1);
1640
          op1 = XEXP (XEXP (op1, 0), 0);
1641
        }
1642
      else
1643
        opc = NULL_RTX;
1644
 
1645
      if ((tls_kind = tls_symbolic_operand (op1, Pmode)) != TLS_MODEL_NONE)
1646
        {
1647
          rtx tga_op1, tga_ret, tmp, tmp2;
1648
 
1649
          switch (tls_kind)
1650
            {
1651
            case TLS_MODEL_GLOBAL_DYNAMIC:
1652
              tga_ret = gen_rtx_REG (Pmode, R0_REG);
1653
              emit_call_insn (gen_tls_global_dynamic (tga_ret, op1));
1654
              op1 = tga_ret;
1655
              break;
1656
 
1657
            case TLS_MODEL_LOCAL_DYNAMIC:
1658
              tga_ret = gen_rtx_REG (Pmode, R0_REG);
1659
              emit_call_insn (gen_tls_local_dynamic (tga_ret, op1));
1660
 
1661
              tmp = gen_reg_rtx (Pmode);
1662
              emit_move_insn (tmp, tga_ret);
1663
 
1664
              if (register_operand (op0, Pmode))
1665
                tmp2 = op0;
1666
              else
1667
                tmp2 = gen_reg_rtx (Pmode);
1668
 
1669
              emit_insn (gen_symDTPOFF2reg (tmp2, op1, tmp));
1670
              op1 = tmp2;
1671
              break;
1672
 
1673
            case TLS_MODEL_INITIAL_EXEC:
1674
              if (! flag_pic)
1675
                {
1676
                  /* Don't schedule insns for getting GOT address when
1677
                     the first scheduling is enabled, to avoid spill
1678
                     failures for R0.  */
1679
                  if (flag_schedule_insns)
1680
                    emit_insn (gen_blockage ());
1681
                  emit_insn (gen_GOTaddr2picreg ());
1682
                  emit_use (gen_rtx_REG (SImode, PIC_REG));
1683
                  if (flag_schedule_insns)
1684
                    emit_insn (gen_blockage ());
1685
                }
1686
              tga_op1 = !can_create_pseudo_p () ? op0 : gen_reg_rtx (Pmode);
1687
              tmp = gen_sym2GOTTPOFF (op1);
1688
              emit_insn (gen_tls_initial_exec (tga_op1, tmp));
1689
              op1 = tga_op1;
1690
              break;
1691
 
1692
            case TLS_MODEL_LOCAL_EXEC:
1693
              tmp2 = gen_reg_rtx (Pmode);
1694
              emit_insn (gen_load_gbr (tmp2));
1695
              tmp = gen_reg_rtx (Pmode);
1696
              emit_insn (gen_symTPOFF2reg (tmp, op1));
1697
 
1698
              if (register_operand (op0, Pmode))
1699
                op1 = op0;
1700
              else
1701
                op1 = gen_reg_rtx (Pmode);
1702
 
1703
              emit_insn (gen_addsi3 (op1, tmp, tmp2));
1704
              break;
1705
 
1706
            default:
1707
              gcc_unreachable ();
1708
            }
1709
          if (opc)
1710
            emit_insn (gen_addsi3 (op1, op1, force_reg (SImode, opc)));
1711
          operands[1] = op1;
1712
        }
1713
    }
1714
 
1715
  return 0;
1716
}
1717
 
1718
enum rtx_code
1719
prepare_cbranch_operands (rtx *operands, enum machine_mode mode,
1720
                          enum rtx_code comparison)
1721
{
1722
  rtx op1;
1723
  rtx scratch = NULL_RTX;
1724
 
1725
  if (comparison == LAST_AND_UNUSED_RTX_CODE)
1726
    comparison = GET_CODE (operands[0]);
1727
  else
1728
    scratch = operands[4];
1729
  if (CONST_INT_P (operands[1])
1730
      && !CONST_INT_P (operands[2]))
1731
    {
1732
      rtx tmp = operands[1];
1733
 
1734
      operands[1] = operands[2];
1735
      operands[2] = tmp;
1736
      comparison = swap_condition (comparison);
1737
    }
1738
  if (CONST_INT_P (operands[2]))
1739
    {
1740
      HOST_WIDE_INT val = INTVAL (operands[2]);
1741
      if ((val == -1 || val == -0x81)
1742
          && (comparison == GT || comparison == LE))
1743
        {
1744
          comparison = (comparison == GT) ? GE : LT;
1745
          operands[2] = gen_int_mode (val + 1, mode);
1746
        }
1747
      else if ((val == 1 || val == 0x80)
1748
               && (comparison == GE || comparison == LT))
1749
        {
1750
          comparison = (comparison == GE) ? GT : LE;
1751
          operands[2] = gen_int_mode (val - 1, mode);
1752
        }
1753
      else if (val == 1 && (comparison == GEU || comparison == LTU))
1754
        {
1755
          comparison = (comparison == GEU) ? NE : EQ;
1756
          operands[2] = CONST0_RTX (mode);
1757
        }
1758
      else if (val == 0x80 && (comparison == GEU || comparison == LTU))
1759
        {
1760
          comparison = (comparison == GEU) ? GTU : LEU;
1761
          operands[2] = gen_int_mode (val - 1, mode);
1762
        }
1763
      else if (val == 0 && (comparison == GTU || comparison == LEU))
1764
        comparison = (comparison == GTU) ? NE : EQ;
1765
      else if (mode == SImode
1766
               && ((val == 0x7fffffff
1767
                    && (comparison == GTU || comparison == LEU))
1768
                   || ((unsigned HOST_WIDE_INT) val
1769
                        == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
1770
                       && (comparison == GEU || comparison == LTU))))
1771
        {
1772
          comparison = (comparison == GTU || comparison == GEU) ? LT : GE;
1773
          operands[2] = CONST0_RTX (mode);
1774
        }
1775
    }
1776
  op1 = operands[1];
1777
  if (can_create_pseudo_p ())
1778
    operands[1] = force_reg (mode, op1);
1779
  /* When we are handling DImode comparisons, we want to keep constants so
1780
     that we can optimize the component comparisons; however, memory loads
1781
     are better issued as a whole so that they can be scheduled well.
1782
     SImode equality comparisons allow I08 constants, but only when they
1783
     compare r0.  Hence, if operands[1] has to be loaded from somewhere else
1784
     into a register, that register might as well be r0, and we allow the
1785
     constant.  If it is already in a register, this is likely to be
1786
     allocated to a different hard register, thus we load the constant into
1787
     a register unless it is zero.  */
1788
  if (!REG_P (operands[2])
1789
      && (!CONST_INT_P (operands[2])
1790
          || (mode == SImode && operands[2] != CONST0_RTX (SImode)
1791
              && ((comparison != EQ && comparison != NE)
1792
                  || (REG_P (op1) && REGNO (op1) != R0_REG)
1793
                  || !satisfies_constraint_I08 (operands[2])))))
1794
    {
1795
      if (scratch && GET_MODE (scratch) == mode)
1796
        {
1797
          emit_move_insn (scratch, operands[2]);
1798
          operands[2] = scratch;
1799
        }
1800
      else if (can_create_pseudo_p ())
1801
        operands[2] = force_reg (mode, operands[2]);
1802
    }
1803
  return comparison;
1804
}
1805
 
1806
void
1807
expand_cbranchsi4 (rtx *operands, enum rtx_code comparison, int probability)
1808
{
1809
  rtx (*branch_expander) (rtx) = gen_branch_true;
1810
  rtx jump;
1811
 
1812
  comparison = prepare_cbranch_operands (operands, SImode, comparison);
1813
  switch (comparison)
1814
    {
1815
    case NE: case LT: case LE: case LTU: case LEU:
1816
      comparison = reverse_condition (comparison);
1817
      branch_expander = gen_branch_false;
1818
    default: ;
1819
    }
1820
  emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, T_REG),
1821
                          gen_rtx_fmt_ee (comparison, SImode,
1822
                                          operands[1], operands[2])));
1823
  jump = emit_jump_insn (branch_expander (operands[3]));
1824
  if (probability >= 0)
1825
    add_reg_note (jump, REG_BR_PROB, GEN_INT (probability));
1826
 
1827
}
1828
 
1829
/* ??? How should we distribute probabilities when more than one branch
1830
   is generated.  So far we only have soem ad-hoc observations:
1831
   - If the operands are random, they are likely to differ in both parts.
1832
   - If comparing items in a hash chain, the operands are random or equal;
1833
     operation should be EQ or NE.
1834
   - If items are searched in an ordered tree from the root, we can expect
1835
     the highpart to be unequal about half of the time; operation should be
1836
     an inequality comparison, operands non-constant, and overall probability
1837
     about 50%.  Likewise for quicksort.
1838
   - Range checks will be often made against constants.  Even if we assume for
1839
     simplicity an even distribution of the non-constant operand over a
1840
     sub-range here, the same probability could be generated with differently
1841
     wide sub-ranges - as long as the ratio of the part of the subrange that
1842
     is before the threshold to the part that comes after the threshold stays
1843
     the same.  Thus, we can't really tell anything here;
1844
     assuming random distribution is at least simple.
1845
 */
1846
 
1847
bool
1848
expand_cbranchdi4 (rtx *operands, enum rtx_code comparison)
1849
{
1850
  enum rtx_code msw_taken, msw_skip, lsw_taken;
1851
  rtx skip_label = NULL_RTX;
1852
  rtx op1h, op1l, op2h, op2l;
1853
  int num_branches;
1854
  int prob, rev_prob;
1855
  int msw_taken_prob = -1, msw_skip_prob = -1, lsw_taken_prob = -1;
1856
  rtx scratch = operands[4];
1857
 
1858
  comparison = prepare_cbranch_operands (operands, DImode, comparison);
1859
  op1h = gen_highpart_mode (SImode, DImode, operands[1]);
1860
  op2h = gen_highpart_mode (SImode, DImode, operands[2]);
1861
  op1l = gen_lowpart (SImode, operands[1]);
1862
  op2l = gen_lowpart (SImode, operands[2]);
1863
  msw_taken = msw_skip = lsw_taken = LAST_AND_UNUSED_RTX_CODE;
1864
  prob = split_branch_probability;
1865
  rev_prob = REG_BR_PROB_BASE - prob;
1866
  switch (comparison)
1867
    {
1868
    /* ??? Should we use the cmpeqdi_t pattern for equality comparisons?
1869
       That costs 1 cycle more when the first branch can be predicted taken,
1870
       but saves us mispredicts because only one branch needs prediction.
1871
       It also enables generating the cmpeqdi_t-1 pattern.  */
1872
    case EQ:
1873
      if (TARGET_CMPEQDI_T)
1874
        {
1875
          emit_insn (gen_cmpeqdi_t (operands[1], operands[2]));
1876
          emit_jump_insn (gen_branch_true (operands[3]));
1877
          return true;
1878
        }
1879
      msw_skip = NE;
1880
      lsw_taken = EQ;
1881
      if (prob >= 0)
1882
        {
1883
          /* If we had more precision, we'd use rev_prob - (rev_prob >> 32) .
1884
           */
1885
          msw_skip_prob = rev_prob;
1886
          if (REG_BR_PROB_BASE <= 65535)
1887
            lsw_taken_prob = prob ? REG_BR_PROB_BASE : 0;
1888
          else
1889
            {
1890
              gcc_assert (HOST_BITS_PER_WIDEST_INT >= 64);
1891
              lsw_taken_prob
1892
                = (prob
1893
                   ? (REG_BR_PROB_BASE
1894
                      - ((HOST_WIDEST_INT) REG_BR_PROB_BASE * rev_prob
1895
                         / ((HOST_WIDEST_INT) prob << 32)))
1896
                   : 0);
1897
            }
1898
        }
1899
      break;
1900
    case NE:
1901
      if (TARGET_CMPEQDI_T)
1902
        {
1903
          emit_insn (gen_cmpeqdi_t (operands[1], operands[2]));
1904
          emit_jump_insn (gen_branch_false (operands[3]));
1905
          return true;
1906
        }
1907
      msw_taken = NE;
1908
      msw_taken_prob = prob;
1909
      lsw_taken = NE;
1910
      lsw_taken_prob = 0;
1911
      break;
1912
    case GTU: case GT:
1913
      msw_taken = comparison;
1914
      if (CONST_INT_P (op2l) && INTVAL (op2l) == -1)
1915
        break;
1916
      if (comparison != GTU || op2h != CONST0_RTX (SImode))
1917
        msw_skip = swap_condition (msw_taken);
1918
      lsw_taken = GTU;
1919
      break;
1920
    case GEU: case GE:
1921
      if (op2l == CONST0_RTX (SImode))
1922
        msw_taken = comparison;
1923
      else
1924
        {
1925
          msw_taken = comparison == GE ? GT : GTU;
1926
          msw_skip = swap_condition (msw_taken);
1927
          lsw_taken = GEU;
1928
        }
1929
      break;
1930
    case LTU: case LT:
1931
      msw_taken = comparison;
1932
      if (op2l == CONST0_RTX (SImode))
1933
        break;
1934
      msw_skip = swap_condition (msw_taken);
1935
      lsw_taken = LTU;
1936
      break;
1937
    case LEU: case LE:
1938
      if (CONST_INT_P (op2l) && INTVAL (op2l) == -1)
1939
        msw_taken = comparison;
1940
      else
1941
        {
1942
          lsw_taken = LEU;
1943
          if (comparison == LE)
1944
            msw_taken = LT;
1945
          else if (op2h != CONST0_RTX (SImode))
1946
            msw_taken = LTU;
1947
          else
1948
            break;
1949
          msw_skip = swap_condition (msw_taken);
1950
        }
1951
      break;
1952
    default: return false;
1953
    }
1954
  num_branches = ((msw_taken != LAST_AND_UNUSED_RTX_CODE)
1955
                  + (msw_skip != LAST_AND_UNUSED_RTX_CODE)
1956
                  + (lsw_taken != LAST_AND_UNUSED_RTX_CODE));
1957
  if (comparison != EQ && comparison != NE && num_branches > 1)
1958
    {
1959
      if (!CONSTANT_P (operands[2])
1960
          && prob >= (int) (REG_BR_PROB_BASE * 3 / 8U)
1961
          && prob <= (int) (REG_BR_PROB_BASE * 5 / 8U))
1962
        {
1963
          msw_taken_prob = prob / 2U;
1964
          msw_skip_prob
1965
            = REG_BR_PROB_BASE * rev_prob / (REG_BR_PROB_BASE + rev_prob);
1966
          lsw_taken_prob = prob;
1967
        }
1968
      else
1969
        {
1970
          msw_taken_prob = prob;
1971
          msw_skip_prob = REG_BR_PROB_BASE;
1972
          /* ??? If we have a constant op2h, should we use that when
1973
             calculating lsw_taken_prob?  */
1974
          lsw_taken_prob = prob;
1975
        }
1976
    }
1977
  operands[1] = op1h;
1978
  operands[2] = op2h;
1979
  operands[4] = NULL_RTX;
1980
  if (reload_completed
1981
      && ! arith_reg_or_0_operand (op2h, SImode)
1982
      && (true_regnum (op1h) || (comparison != EQ && comparison != NE))
1983
      && (msw_taken != LAST_AND_UNUSED_RTX_CODE
1984
          || msw_skip != LAST_AND_UNUSED_RTX_CODE))
1985
    {
1986
      emit_move_insn (scratch, operands[2]);
1987
      operands[2] = scratch;
1988
    }
1989
  if (msw_taken != LAST_AND_UNUSED_RTX_CODE)
1990
    expand_cbranchsi4 (operands, msw_taken, msw_taken_prob);
1991
  if (msw_skip != LAST_AND_UNUSED_RTX_CODE)
1992
    {
1993
      rtx taken_label = operands[3];
1994
 
1995
      /* Operands were possibly modified, but msw_skip doesn't expect this.
1996
         Always use the original ones.  */
1997
      if (msw_taken != LAST_AND_UNUSED_RTX_CODE)
1998
        {
1999
          operands[1] = op1h;
2000
          operands[2] = op2h;
2001
        }
2002
 
2003
      operands[3] = skip_label = gen_label_rtx ();
2004
      expand_cbranchsi4 (operands, msw_skip, msw_skip_prob);
2005
      operands[3] = taken_label;
2006
    }
2007
  operands[1] = op1l;
2008
  operands[2] = op2l;
2009
  if (lsw_taken != LAST_AND_UNUSED_RTX_CODE)
2010
    {
2011
      if (reload_completed
2012
          && ! arith_reg_or_0_operand (op2l, SImode)
2013
          && (true_regnum (op1l) || (lsw_taken != EQ && lsw_taken != NE)))
2014
        {
2015
          emit_move_insn (scratch, operands[2]);
2016
          operands[2] = scratch;
2017
        }
2018
      expand_cbranchsi4 (operands, lsw_taken, lsw_taken_prob);
2019
    }
2020
  if (msw_skip != LAST_AND_UNUSED_RTX_CODE)
2021
    emit_label (skip_label);
2022
  return true;
2023
}
2024
 
2025
/* Emit INSN, possibly in a PARALLEL with an USE of fpscr for SH4.  */
2026
 
2027
static void
2028
sh_emit_set_t_insn (rtx insn, enum machine_mode mode)
2029
{
2030
  if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
2031
    {
2032
      insn = gen_rtx_PARALLEL (VOIDmode,
2033
                       gen_rtvec (2, insn,
2034
                                  gen_rtx_USE (VOIDmode, get_fpscr_rtx ())));
2035
      (mode == SFmode ? emit_sf_insn : emit_df_insn) (insn);
2036
    }
2037
  else
2038
    emit_insn (insn);
2039
}
2040
 
2041
/* Prepare the operands for an scc instruction; make sure that the
2042
   compare has been done and the result is in T_REG.  */
2043
void
2044
sh_emit_scc_to_t (enum rtx_code code, rtx op0, rtx op1)
2045
{
2046
  rtx t_reg = gen_rtx_REG (SImode, T_REG);
2047
  enum rtx_code oldcode = code;
2048
  enum machine_mode mode;
2049
 
2050
  /* First need a compare insn.  */
2051
  switch (code)
2052
    {
2053
    case NE:
2054
      /* It isn't possible to handle this case.  */
2055
      gcc_unreachable ();
2056
    case LT:
2057
      code = GT;
2058
      break;
2059
    case LE:
2060
      code = GE;
2061
      break;
2062
    case LTU:
2063
      code = GTU;
2064
      break;
2065
    case LEU:
2066
      code = GEU;
2067
      break;
2068
    default:
2069
      break;
2070
    }
2071
  if (code != oldcode)
2072
    {
2073
      rtx tmp = op0;
2074
      op0 = op1;
2075
      op1 = tmp;
2076
    }
2077
 
2078
  mode = GET_MODE (op0);
2079
  if (mode == VOIDmode)
2080
    mode = GET_MODE (op1);
2081
 
2082
  op0 = force_reg (mode, op0);
2083
  if ((code != EQ && code != NE
2084
       && (op1 != const0_rtx
2085
           || code == GTU  || code == GEU || code == LTU || code == LEU))
2086
      || (mode == DImode && op1 != const0_rtx)
2087
      || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
2088
    op1 = force_reg (mode, op1);
2089
 
2090
  sh_emit_set_t_insn (gen_rtx_SET (VOIDmode, t_reg,
2091
                                   gen_rtx_fmt_ee (code, SImode, op0, op1)),
2092
                      mode);
2093
}
2094
 
2095
rtx
2096
sh_emit_cheap_store_flag (enum machine_mode mode, enum rtx_code code,
2097
                          rtx op0, rtx op1)
2098
{
2099
  rtx target = gen_reg_rtx (SImode);
2100
  rtx tmp;
2101
 
2102
  gcc_assert (TARGET_SHMEDIA);
2103
  switch (code)
2104
    {
2105
    case EQ:
2106
    case GT:
2107
    case LT:
2108
    case UNORDERED:
2109
    case GTU:
2110
    case LTU:
2111
      tmp = gen_rtx_fmt_ee (code, SImode, op0, op1);
2112
      emit_insn (gen_cstore4_media (target, tmp, op0, op1));
2113
      code = NE;
2114
      break;
2115
 
2116
    case NE:
2117
    case GE:
2118
    case LE:
2119
    case ORDERED:
2120
    case GEU:
2121
    case LEU:
2122
      tmp = gen_rtx_fmt_ee (reverse_condition (code), mode, op0, op1);
2123
      emit_insn (gen_cstore4_media (target, tmp, op0, op1));
2124
      code = EQ;
2125
      break;
2126
 
2127
    case UNEQ:
2128
    case UNGE:
2129
    case UNGT:
2130
    case UNLE:
2131
    case UNLT:
2132
    case LTGT:
2133
      return NULL_RTX;
2134
 
2135
    default:
2136
      gcc_unreachable ();
2137
    }
2138
 
2139
  if (mode == DImode)
2140
    {
2141
      rtx t2 = gen_reg_rtx (DImode);
2142
      emit_insn (gen_extendsidi2 (t2, target));
2143
      target = t2;
2144
    }
2145
 
2146
  return gen_rtx_fmt_ee (code, VOIDmode, target, const0_rtx);
2147
}
2148
 
2149
/* Called from the md file, set up the operands of a compare instruction.  */
2150
 
2151
void
2152
sh_emit_compare_and_branch (rtx *operands, enum machine_mode mode)
2153
{
2154
  enum rtx_code code = GET_CODE (operands[0]);
2155
  enum rtx_code branch_code;
2156
  rtx op0 = operands[1];
2157
  rtx op1 = operands[2];
2158
  rtx insn, tem;
2159
  bool need_ccmpeq = false;
2160
 
2161
  if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT)
2162
    {
2163
      op0 = force_reg (mode, op0);
2164
      op1 = force_reg (mode, op1);
2165
    }
2166
  else
2167
    {
2168
      if (code != EQ || mode == DImode)
2169
        {
2170
          /* Force args into regs, since we can't use constants here.  */
2171
          op0 = force_reg (mode, op0);
2172
          if (op1 != const0_rtx || code == GTU  || code == GEU)
2173
            op1 = force_reg (mode, op1);
2174
        }
2175
    }
2176
 
2177
  if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2178
    {
2179
      if (code == LT
2180
          || (code == LE && TARGET_IEEE && TARGET_SH2E)
2181
          || (code == GE && !(TARGET_IEEE && TARGET_SH2E)))
2182
        {
2183
          tem = op0, op0 = op1, op1 = tem;
2184
          code = swap_condition (code);
2185
        }
2186
 
2187
      /* GE becomes fcmp/gt+fcmp/eq, for SH2E and TARGET_IEEE only.  */
2188
      if (code == GE)
2189
        {
2190
          gcc_assert (TARGET_IEEE && TARGET_SH2E);
2191
          need_ccmpeq = true;
2192
          code = GT;
2193
        }
2194
 
2195
      /* Now we can have EQ, NE, GT, LE.  NE and LE are then transformed
2196
         to EQ/GT respectively.  */
2197
      gcc_assert (code == EQ || code == GT || code == NE || code == LE);
2198
    }
2199
 
2200
  switch (code)
2201
    {
2202
    case EQ:
2203
    case GT:
2204
    case GE:
2205
    case GTU:
2206
    case GEU:
2207
      branch_code = code;
2208
      break;
2209
    case NE:
2210
    case LT:
2211
    case LE:
2212
    case LTU:
2213
    case LEU:
2214
      branch_code = reverse_condition (code);
2215
      break;
2216
    default:
2217
      gcc_unreachable ();
2218
    }
2219
 
2220
  insn = gen_rtx_SET (VOIDmode,
2221
                      gen_rtx_REG (SImode, T_REG),
2222
                      gen_rtx_fmt_ee (branch_code, SImode, op0, op1));
2223
 
2224
  sh_emit_set_t_insn (insn, mode);
2225
  if (need_ccmpeq)
2226
    sh_emit_set_t_insn (gen_ieee_ccmpeqsf_t (op0, op1), mode);
2227
 
2228
  if (branch_code == code)
2229
    emit_jump_insn (gen_branch_true (operands[3]));
2230
  else
2231
    emit_jump_insn (gen_branch_false (operands[3]));
2232
}
2233
 
2234
void
2235
sh_emit_compare_and_set (rtx *operands, enum machine_mode mode)
2236
{
2237
  enum rtx_code code = GET_CODE (operands[1]);
2238
  rtx op0 = operands[2];
2239
  rtx op1 = operands[3];
2240
  rtx lab = NULL_RTX;
2241
  bool invert = false;
2242
  rtx tem;
2243
 
2244
  op0 = force_reg (mode, op0);
2245
  if ((code != EQ && code != NE
2246
       && (op1 != const0_rtx
2247
           || code == GTU  || code == GEU || code == LTU || code == LEU))
2248
      || (mode == DImode && op1 != const0_rtx)
2249
      || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
2250
    op1 = force_reg (mode, op1);
2251
 
2252
  if (GET_MODE_CLASS (mode) == MODE_FLOAT)
2253
    {
2254
      if (code == LT || code == LE)
2255
        {
2256
          code = swap_condition (code);
2257
          tem = op0, op0 = op1, op1 = tem;
2258
        }
2259
      if (code == GE)
2260
        {
2261
          if (TARGET_IEEE)
2262
            {
2263
              lab = gen_label_rtx ();
2264
              sh_emit_scc_to_t (EQ, op0, op1);
2265
              emit_jump_insn (gen_branch_true (lab));
2266
              code = GT;
2267
           }
2268
          else
2269
            {
2270
              code = LT;
2271
              invert = true;
2272
            }
2273
        }
2274
    }
2275
 
2276
  if (code == NE)
2277
    {
2278
      code = EQ;
2279
      invert = true;
2280
    }
2281
 
2282
  sh_emit_scc_to_t (code, op0, op1);
2283
  if (lab)
2284
    emit_label (lab);
2285
  if (invert)
2286
    emit_insn (gen_movnegt (operands[0]));
2287
  else
2288
    emit_move_insn (operands[0], gen_rtx_REG (SImode, T_REG));
2289
}
2290
 
2291
/* Functions to output assembly code.  */
2292
 
2293
/* Return a sequence of instructions to perform DI or DF move.
2294
 
2295
   Since the SH cannot move a DI or DF in one instruction, we have
2296
   to take care when we see overlapping source and dest registers.  */
2297
 
2298
const char *
2299
output_movedouble (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
2300
                   enum machine_mode mode)
2301
{
2302
  rtx dst = operands[0];
2303
  rtx src = operands[1];
2304
 
2305
  if (MEM_P (dst)
2306
      && GET_CODE (XEXP (dst, 0)) == PRE_DEC)
2307
    return "mov.l       %T1,%0\n\tmov.l %1,%0";
2308
 
2309
  if (register_operand (dst, mode)
2310
      && register_operand (src, mode))
2311
    {
2312
      if (REGNO (src) == MACH_REG)
2313
        return "sts     mach,%S0\n\tsts macl,%R0";
2314
 
2315
      /* When mov.d r1,r2 do r2->r3 then r1->r2;
2316
         when mov.d r1,r0 do r1->r0 then r2->r1.  */
2317
 
2318
      if (REGNO (src) + 1 == REGNO (dst))
2319
        return "mov     %T1,%T0\n\tmov  %1,%0";
2320
      else
2321
        return "mov     %1,%0\n\tmov    %T1,%T0";
2322
    }
2323
  else if (CONST_INT_P (src))
2324
    {
2325
      if (INTVAL (src) < 0)
2326
        output_asm_insn ("mov   #-1,%S0", operands);
2327
      else
2328
        output_asm_insn ("mov   #0,%S0", operands);
2329
 
2330
      return "mov       %1,%R0";
2331
    }
2332
  else if (MEM_P (src))
2333
    {
2334
      int ptrreg = -1;
2335
      int dreg = REGNO (dst);
2336
      rtx inside = XEXP (src, 0);
2337
 
2338
      switch (GET_CODE (inside))
2339
        {
2340
        case REG:
2341
          ptrreg = REGNO (inside);
2342
          break;
2343
 
2344
        case SUBREG:
2345
          ptrreg = subreg_regno (inside);
2346
          break;
2347
 
2348
        case PLUS:
2349
          ptrreg = REGNO (XEXP (inside, 0));
2350
          /* ??? A r0+REG address shouldn't be possible here, because it isn't
2351
             an offsettable address.  Unfortunately, offsettable addresses use
2352
             QImode to check the offset, and a QImode offsettable address
2353
             requires r0 for the other operand, which is not currently
2354
             supported, so we can't use the 'o' constraint.
2355
             Thus we must check for and handle r0+REG addresses here.
2356
             We punt for now, since this is likely very rare.  */
2357
          gcc_assert (!REG_P (XEXP (inside, 1)));
2358
          break;
2359
 
2360
        case LABEL_REF:
2361
          return "mov.l %1,%0\n\tmov.l  %1+4,%T0";
2362
        case POST_INC:
2363
          return "mov.l %1,%0\n\tmov.l  %1,%T0";
2364
        default:
2365
          gcc_unreachable ();
2366
        }
2367
 
2368
      /* Work out the safe way to copy.  Copy into the second half first.  */
2369
      if (dreg == ptrreg)
2370
        return "mov.l   %T1,%T0\n\tmov.l        %1,%0";
2371
    }
2372
 
2373
  return "mov.l %1,%0\n\tmov.l  %T1,%T0";
2374
}
2375
 
2376
/* Print an instruction which would have gone into a delay slot after
2377
   another instruction, but couldn't because the other instruction expanded
2378
   into a sequence where putting the slot insn at the end wouldn't work.  */
2379
 
2380
static void
2381
print_slot (rtx insn)
2382
{
2383
  final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 1, NULL);
2384
 
2385
  INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
2386
}
2387
 
2388
const char *
2389
output_far_jump (rtx insn, rtx op)
2390
{
2391
  struct { rtx lab, reg, op; } this_jmp;
2392
  rtx braf_base_lab = NULL_RTX;
2393
  const char *jump;
2394
  int far;
2395
  int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
2396
  rtx prev;
2397
 
2398
  this_jmp.lab = gen_label_rtx ();
2399
 
2400
  if (TARGET_SH2
2401
      && offset >= -32764
2402
      && offset - get_attr_length (insn) <= 32766)
2403
    {
2404
      far = 0;
2405
      jump = "mov.w     %O0,%1; braf    %1";
2406
    }
2407
  else
2408
    {
2409
      far = 1;
2410
      if (flag_pic)
2411
        {
2412
          if (TARGET_SH2)
2413
            jump = "mov.l       %O0,%1; braf    %1";
2414
          else
2415
            jump = "mov.l       r0,@-r15; mova  %O0,r0; mov.l   @r0,%1; add     r0,%1; mov.l    @r15+,r0; jmp   @%1";
2416
        }
2417
      else
2418
        jump = "mov.l   %O0,%1; jmp     @%1";
2419
    }
2420
  /* If we have a scratch register available, use it.  */
2421
  if (NONJUMP_INSN_P ((prev = prev_nonnote_insn (insn)))
2422
      && INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
2423
    {
2424
      this_jmp.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
2425
      if (REGNO (this_jmp.reg) == R0_REG && flag_pic && ! TARGET_SH2)
2426
        jump = "mov.l   r1,@-r15; mova  %O0,r0; mov.l   @r0,r1; add     r1,r0; mov.l    @r15+,r1; jmp   @%1";
2427
      output_asm_insn (jump, &this_jmp.lab);
2428
      if (dbr_sequence_length ())
2429
        print_slot (final_sequence);
2430
      else
2431
        output_asm_insn ("nop", 0);
2432
    }
2433
  else
2434
    {
2435
      /* Output the delay slot insn first if any.  */
2436
      if (dbr_sequence_length ())
2437
        print_slot (final_sequence);
2438
 
2439
      this_jmp.reg = gen_rtx_REG (SImode, 13);
2440
      /* We must keep the stack aligned to 8-byte boundaries on SH5.
2441
         Fortunately, MACL is fixed and call-clobbered, and we never
2442
         need its value across jumps, so save r13 in it instead of in
2443
         the stack.  */
2444
      if (TARGET_SH5)
2445
        output_asm_insn ("lds   r13, macl", 0);
2446
      else
2447
        output_asm_insn ("mov.l r13,@-r15", 0);
2448
      output_asm_insn (jump, &this_jmp.lab);
2449
      if (TARGET_SH5)
2450
        output_asm_insn ("sts   macl, r13", 0);
2451
      else
2452
        output_asm_insn ("mov.l @r15+,r13", 0);
2453
    }
2454
  if (far && flag_pic && TARGET_SH2)
2455
    {
2456
      braf_base_lab = gen_label_rtx ();
2457
      (*targetm.asm_out.internal_label) (asm_out_file, "L",
2458
                                 CODE_LABEL_NUMBER (braf_base_lab));
2459
    }
2460
  if (far)
2461
    output_asm_insn (".align    2", 0);
2462
  (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this_jmp.lab));
2463
  this_jmp.op = op;
2464
  if (far && flag_pic)
2465
    {
2466
      if (TARGET_SH2)
2467
        this_jmp.lab = braf_base_lab;
2468
      output_asm_insn (".long   %O2-%O0", &this_jmp.lab);
2469
    }
2470
  else
2471
    output_asm_insn (far ? ".long       %O2" : ".word %O2-%O0", &this_jmp.lab);
2472
  return "";
2473
}
2474
 
2475
/* Local label counter, used for constants in the pool and inside
2476
   pattern branches.  */
2477
 
2478
static int lf = 100;
2479
 
2480
/* Output code for ordinary branches.  */
2481
 
2482
const char *
2483
output_branch (int logic, rtx insn, rtx *operands)
2484
{
2485
  switch (get_attr_length (insn))
2486
    {
2487
    case 6:
2488
      /* This can happen if filling the delay slot has caused a forward
2489
         branch to exceed its range (we could reverse it, but only
2490
         when we know we won't overextend other branches; this should
2491
         best be handled by relaxation).
2492
         It can also happen when other condbranches hoist delay slot insn
2493
         from their destination, thus leading to code size increase.
2494
         But the branch will still be in the range -4092..+4098 bytes.  */
2495
 
2496
      if (! TARGET_RELAX)
2497
        {
2498
          int label = lf++;
2499
          /* The call to print_slot will clobber the operands.  */
2500
          rtx op0 = operands[0];
2501
 
2502
          /* If the instruction in the delay slot is annulled (true), then
2503
             there is no delay slot where we can put it now.  The only safe
2504
             place for it is after the label.  final will do that by default.  */
2505
 
2506
          if (final_sequence
2507
              && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
2508
              && get_attr_length (XVECEXP (final_sequence, 0, 1)))
2509
            {
2510
              asm_fprintf (asm_out_file, "\tb%s%ss\t%LLF%d\n", logic ? "f" : "t",
2511
                           ASSEMBLER_DIALECT ? "/" : ".", label);
2512
              print_slot (final_sequence);
2513
            }
2514
          else
2515
            asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
2516
 
2517
          output_asm_insn ("bra\t%l0", &op0);
2518
          fprintf (asm_out_file, "\tnop\n");
2519
          (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
2520
 
2521
          return "";
2522
        }
2523
      /* When relaxing, handle this like a short branch.  The linker
2524
         will fix it up if it still doesn't fit after relaxation.  */
2525
    case 2:
2526
      return logic ? "bt%.\t%l0" : "bf%.\t%l0";
2527
 
2528
      /* These are for SH2e, in which we have to account for the
2529
         extra nop because of the hardware bug in annulled branches.  */
2530
    case 8:
2531
      if (! TARGET_RELAX)
2532
        {
2533
          int label = lf++;
2534
 
2535
          gcc_assert (!final_sequence
2536
                      || !(INSN_ANNULLED_BRANCH_P
2537
                           (XVECEXP (final_sequence, 0, 0))));
2538
          asm_fprintf (asm_out_file, "b%s%ss\t%LLF%d\n",
2539
                       logic ? "f" : "t",
2540
                       ASSEMBLER_DIALECT ? "/" : ".", label);
2541
          fprintf (asm_out_file, "\tnop\n");
2542
          output_asm_insn ("bra\t%l0", operands);
2543
          fprintf (asm_out_file, "\tnop\n");
2544
          (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
2545
 
2546
          return "";
2547
        }
2548
      /* When relaxing, fall through.  */
2549
    case 4:
2550
      {
2551
        char buffer[10];
2552
 
2553
        sprintf (buffer, "b%s%ss\t%%l0",
2554
                 logic ? "t" : "f",
2555
                 ASSEMBLER_DIALECT ? "/" : ".");
2556
        output_asm_insn (buffer, &operands[0]);
2557
        return "nop";
2558
      }
2559
 
2560
    default:
2561
      /* There should be no longer branches now - that would
2562
         indicate that something has destroyed the branches set
2563
         up in machine_dependent_reorg.  */
2564
      gcc_unreachable ();
2565
    }
2566
}
2567
 
2568
/* Output a code sequence for INSN using TEMPL with OPERANDS; but before,
2569
   fill in operands 9 as a label to the successor insn.
2570
   We try to use jump threading where possible.
2571
   IF CODE matches the comparison in the IF_THEN_ELSE of a following jump,
2572
   we assume the jump is taken.  I.e. EQ means follow jmp and bf, NE means
2573
   follow jmp and bt, if the address is in range.  */
2574
const char *
2575
output_branchy_insn (enum rtx_code code, const char *templ,
2576
                     rtx insn, rtx *operands)
2577
{
2578
  rtx next_insn = NEXT_INSN (insn);
2579
 
2580
  if (next_insn && JUMP_P (next_insn) && condjump_p (next_insn))
2581
    {
2582
      rtx src = SET_SRC (PATTERN (next_insn));
2583
      if (GET_CODE (src) == IF_THEN_ELSE && GET_CODE (XEXP (src, 0)) != code)
2584
        {
2585
          /* Following branch not taken */
2586
          operands[9] = gen_label_rtx ();
2587
          emit_label_after (operands[9], next_insn);
2588
          INSN_ADDRESSES_NEW (operands[9],
2589
                              INSN_ADDRESSES (INSN_UID (next_insn))
2590
                              + get_attr_length (next_insn));
2591
          return templ;
2592
        }
2593
      else
2594
        {
2595
          int offset = (branch_dest (next_insn)
2596
                        - INSN_ADDRESSES (INSN_UID (next_insn)) + 4);
2597
          if (offset >= -252 && offset <= 258)
2598
            {
2599
              if (GET_CODE (src) == IF_THEN_ELSE)
2600
                /* branch_true */
2601
                src = XEXP (src, 1);
2602
              operands[9] = src;
2603
              return templ;
2604
            }
2605
        }
2606
    }
2607
  operands[9] = gen_label_rtx ();
2608
  emit_label_after (operands[9], insn);
2609
  INSN_ADDRESSES_NEW (operands[9],
2610
                      INSN_ADDRESSES (INSN_UID (insn))
2611
                      + get_attr_length (insn));
2612
  return templ;
2613
}
2614
 
2615
const char *
2616
output_ieee_ccmpeq (rtx insn, rtx *operands)
2617
{
2618
  return output_branchy_insn (NE, "bt\t%l9\n\tfcmp/eq\t%1,%0",
2619
                              insn, operands);
2620
}
2621
 
2622
/* Output the start of the assembler file.  */
2623
 
2624
static void
2625
sh_file_start (void)
2626
{
2627
  default_file_start ();
2628
 
2629
#ifdef SYMBIAN
2630
  /* Declare the .directive section before it is used.  */
2631
  fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file);
2632
  fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file);
2633
#endif
2634
 
2635
  if (TARGET_ELF)
2636
    /* We need to show the text section with the proper
2637
       attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
2638
       emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
2639
       will complain.  We can teach GAS specifically about the
2640
       default attributes for our choice of text section, but
2641
       then we would have to change GAS again if/when we change
2642
       the text section name.  */
2643
    fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP);
2644
  else
2645
    /* Switch to the data section so that the coffsem symbol
2646
       isn't in the text section.  */
2647
    switch_to_section (data_section);
2648
 
2649
  if (TARGET_LITTLE_ENDIAN)
2650
    fputs ("\t.little\n", asm_out_file);
2651
 
2652
  if (!TARGET_ELF)
2653
    {
2654
      if (TARGET_SHCOMPACT)
2655
        fputs ("\t.mode\tSHcompact\n", asm_out_file);
2656
      else if (TARGET_SHMEDIA)
2657
        fprintf (asm_out_file, "\t.mode\tSHmedia\n\t.abi\t%i\n",
2658
                 TARGET_SHMEDIA64 ? 64 : 32);
2659
    }
2660
}
2661
 
2662
/* Check if PAT includes UNSPEC_CALLER unspec pattern.  */
2663
 
2664
static bool
2665
unspec_caller_rtx_p (rtx pat)
2666
{
2667
  rtx base, offset;
2668
  int i;
2669
 
2670
  split_const (pat, &base, &offset);
2671
  if (GET_CODE (base) == UNSPEC)
2672
    {
2673
      if (XINT (base, 1) == UNSPEC_CALLER)
2674
        return true;
2675
      for (i = 0; i < XVECLEN (base, 0); i++)
2676
        if (unspec_caller_rtx_p (XVECEXP (base, 0, i)))
2677
          return true;
2678
    }
2679
  return false;
2680
}
2681
 
2682
/* Indicate that INSN cannot be duplicated.  This is true for insn
2683
   that generates a unique label.  */
2684
 
2685
static bool
2686
sh_cannot_copy_insn_p (rtx insn)
2687
{
2688
  rtx pat;
2689
 
2690
  if (!reload_completed || !flag_pic)
2691
    return false;
2692
 
2693
  if (!NONJUMP_INSN_P (insn))
2694
    return false;
2695
  if (asm_noperands (insn) >= 0)
2696
    return false;
2697
 
2698
  pat = PATTERN (insn);
2699
  if (GET_CODE (pat) != SET)
2700
    return false;
2701
  pat = SET_SRC (pat);
2702
 
2703
  if (unspec_caller_rtx_p (pat))
2704
    return true;
2705
 
2706
  return false;
2707
}
2708
 
2709
/* Actual number of instructions used to make a shift by N.  */
2710
static const char ashiftrt_insns[] =
2711
  { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2};
2712
 
2713
/* Left shift and logical right shift are the same.  */
2714
static const char shift_insns[]    =
2715
  { 0,1,1,2,2,3,3,4,1,2,2,3,3,4,3,3,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
2716
 
2717
/* Individual shift amounts needed to get the above length sequences.
2718
   One bit right shifts clobber the T bit, so when possible, put one bit
2719
   shifts in the middle of the sequence, so the ends are eligible for
2720
   branch delay slots.  */
2721
static const short shift_amounts[32][5] = {
2722
  {0}, {1}, {2}, {2, 1},
2723
  {2, 2}, {2, 1, 2}, {2, 2, 2}, {2, 2, 1, 2},
2724
  {8}, {8, 1}, {8, 2}, {8, 1, 2},
2725
  {8, 2, 2}, {8, 2, 1, 2}, {8, -2, 8}, {8, -1, 8},
2726
  {16}, {16, 1}, {16, 2}, {16, 1, 2},
2727
  {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
2728
  {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
2729
  {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
2730
 
2731
/* Likewise, but for shift amounts < 16, up to three highmost bits
2732
   might be clobbered.  This is typically used when combined with some
2733
   kind of sign or zero extension.  */
2734
 
2735
static const char ext_shift_insns[]    =
2736
  { 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
2737
 
2738
static const short ext_shift_amounts[32][4] = {
2739
  {0}, {1}, {2}, {2, 1},
2740
  {2, 2}, {2, 1, 2}, {8, -2}, {8, -1},
2741
  {8}, {8, 1}, {8, 2}, {8, 1, 2},
2742
  {8, 2, 2}, {16, -2, -1}, {16, -2}, {16, -1},
2743
  {16}, {16, 1}, {16, 2}, {16, 1, 2},
2744
  {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
2745
  {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
2746
  {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
2747
 
2748
/* Assuming we have a value that has been sign-extended by at least one bit,
2749
   can we use the ext_shift_amounts with the last shift turned to an arithmetic shift
2750
   to shift it by N without data loss, and quicker than by other means?  */
2751
#define EXT_SHIFT_SIGNED(n) (((n) | 8) == 15)
2752
 
2753
/* This is used in length attributes in sh.md to help compute the length
2754
   of arbitrary constant shift instructions.  */
2755
 
2756
int
2757
shift_insns_rtx (rtx insn)
2758
{
2759
  rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2760
  int shift_count = INTVAL (XEXP (set_src, 1)) & 31;
2761
  enum rtx_code shift_code = GET_CODE (set_src);
2762
 
2763
  switch (shift_code)
2764
    {
2765
    case ASHIFTRT:
2766
      return ashiftrt_insns[shift_count];
2767
    case LSHIFTRT:
2768
    case ASHIFT:
2769
      return shift_insns[shift_count];
2770
    default:
2771
      gcc_unreachable ();
2772
    }
2773
}
2774
 
2775
/* Return the cost of a shift.  */
2776
 
2777
static inline int
2778
shiftcosts (rtx x)
2779
{
2780
  int value;
2781
 
2782
  if (TARGET_SHMEDIA)
2783
    return 1;
2784
 
2785
  if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
2786
    {
2787
      if (GET_MODE (x) == DImode
2788
          && CONST_INT_P (XEXP (x, 1))
2789
          && INTVAL (XEXP (x, 1)) == 1)
2790
        return 2;
2791
 
2792
      /* Everything else is invalid, because there is no pattern for it.  */
2793
      return MAX_COST;
2794
    }
2795
  /* If shift by a non constant, then this will be expensive.  */
2796
  if (!CONST_INT_P (XEXP (x, 1)))
2797
    return SH_DYNAMIC_SHIFT_COST;
2798
 
2799
  /* Otherwise, return the true cost in instructions.  Cope with out of range
2800
     shift counts more or less arbitrarily.  */
2801
  value = INTVAL (XEXP (x, 1)) & 31;
2802
 
2803
  if (GET_CODE (x) == ASHIFTRT)
2804
    {
2805
      int cost = ashiftrt_insns[value];
2806
      /* If SH3, then we put the constant in a reg and use shad.  */
2807
      if (cost > 1 + SH_DYNAMIC_SHIFT_COST)
2808
        cost = 1 + SH_DYNAMIC_SHIFT_COST;
2809
      return cost;
2810
    }
2811
  else
2812
    return shift_insns[value];
2813
}
2814
 
2815
/* Return the cost of an AND operation.  */
2816
 
2817
static inline int
2818
andcosts (rtx x)
2819
{
2820
  int i;
2821
 
2822
  /* Anding with a register is a single cycle and instruction.  */
2823
  if (!CONST_INT_P (XEXP (x, 1)))
2824
    return 1;
2825
 
2826
  i = INTVAL (XEXP (x, 1));
2827
 
2828
  if (TARGET_SHMEDIA)
2829
    {
2830
      if (satisfies_constraint_I10 (XEXP (x, 1))
2831
          || satisfies_constraint_J16 (XEXP (x, 1)))
2832
        return 1;
2833
      else
2834
        return 1 + rtx_cost (XEXP (x, 1), AND, !optimize_size);
2835
    }
2836
 
2837
  /* These constants are single cycle extu.[bw] instructions.  */
2838
  if (i == 0xff || i == 0xffff)
2839
    return 1;
2840
  /* Constants that can be used in an and immediate instruction in a single
2841
     cycle, but this requires r0, so make it a little more expensive.  */
2842
  if (CONST_OK_FOR_K08 (i))
2843
    return 2;
2844
  /* Constants that can be loaded with a mov immediate and an and.
2845
     This case is probably unnecessary.  */
2846
  if (CONST_OK_FOR_I08 (i))
2847
    return 2;
2848
  /* Any other constants requires a 2 cycle pc-relative load plus an and.
2849
     This case is probably unnecessary.  */
2850
  return 3;
2851
}
2852
 
2853
/* Return the cost of an addition or a subtraction.  */
2854
 
2855
static inline int
2856
addsubcosts (rtx x)
2857
{
2858
  /* Adding a register is a single cycle insn.  */
2859
  if (REG_P (XEXP (x, 1))
2860
      || GET_CODE (XEXP (x, 1)) == SUBREG)
2861
    return 1;
2862
 
2863
  /* Likewise for small constants.  */
2864
  if (CONST_INT_P (XEXP (x, 1))
2865
      && CONST_OK_FOR_ADD (INTVAL (XEXP (x, 1))))
2866
    return 1;
2867
 
2868
  if (TARGET_SHMEDIA)
2869
    switch (GET_CODE (XEXP (x, 1)))
2870
      {
2871
      case CONST:
2872
      case LABEL_REF:
2873
      case SYMBOL_REF:
2874
        return TARGET_SHMEDIA64 ? 5 : 3;
2875
 
2876
      case CONST_INT:
2877
        if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1))))
2878
          return 2;
2879
        else if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1)) >> 16))
2880
          return 3;
2881
        else if (CONST_OK_FOR_I16 ((INTVAL (XEXP (x, 1)) >> 16) >> 16))
2882
          return 4;
2883
 
2884
        /* Fall through.  */
2885
      default:
2886
        return 5;
2887
      }
2888
 
2889
  /* Any other constant requires a 2 cycle pc-relative load plus an
2890
     addition.  */
2891
  return 3;
2892
}
2893
 
2894
/* Return the cost of a multiply.  */
2895
static inline int
2896
multcosts (rtx x ATTRIBUTE_UNUSED)
2897
{
2898
  if (sh_multcost >= 0)
2899
    return sh_multcost;
2900
  if (TARGET_SHMEDIA)
2901
    /* ??? We have a mul insn, but it has a latency of three, and doesn't
2902
       accept constants.  Ideally, we would use a cost of one or two and
2903
       add the cost of the operand, but disregard the latter when inside loops
2904
       and loop invariant code motion is still to follow.
2905
       Using a multiply first and splitting it later if it's a loss
2906
       doesn't work because of different sign / zero extension semantics
2907
       of multiplies vs. shifts.  */
2908
    return TARGET_SMALLCODE ? 2 : 3;
2909
 
2910
  if (TARGET_SH2)
2911
    {
2912
      /* We have a mul insn, so we can never take more than the mul and the
2913
         read of the mac reg, but count more because of the latency and extra
2914
         reg usage.  */
2915
      if (TARGET_SMALLCODE)
2916
        return 2;
2917
      return 3;
2918
    }
2919
 
2920
  /* If we're aiming at small code, then just count the number of
2921
     insns in a multiply call sequence.  */
2922
  if (TARGET_SMALLCODE)
2923
    return 5;
2924
 
2925
  /* Otherwise count all the insns in the routine we'd be calling too.  */
2926
  return 20;
2927
}
2928
 
2929
/* Compute a (partial) cost for rtx X.  Return true if the complete
2930
   cost has been computed, and false if subexpressions should be
2931
   scanned.  In either case, *TOTAL contains the cost result.  */
2932
 
2933
static bool
2934
sh_rtx_costs (rtx x, int code, int outer_code, int *total,
2935
              bool speed ATTRIBUTE_UNUSED)
2936
{
2937
  switch (code)
2938
    {
2939
    case CONST_INT:
2940
      if (TARGET_SHMEDIA)
2941
        {
2942
          if (INTVAL (x) == 0)
2943
            *total = 0;
2944
          else if (outer_code == AND && and_operand ((x), DImode))
2945
            *total = 0;
2946
          else if ((outer_code == IOR || outer_code == XOR
2947
                    || outer_code == PLUS)
2948
                   && CONST_OK_FOR_I10 (INTVAL (x)))
2949
            *total = 0;
2950
          else if (CONST_OK_FOR_I16 (INTVAL (x)))
2951
            *total = COSTS_N_INSNS (outer_code != SET);
2952
          else if (CONST_OK_FOR_I16 (INTVAL (x) >> 16))
2953
            *total = COSTS_N_INSNS ((outer_code != SET) + 1);
2954
          else if (CONST_OK_FOR_I16 ((INTVAL (x) >> 16) >> 16))
2955
            *total = COSTS_N_INSNS ((outer_code != SET) + 2);
2956
          else
2957
            *total = COSTS_N_INSNS ((outer_code != SET) + 3);
2958
          return true;
2959
        }
2960
      if (CONST_OK_FOR_I08 (INTVAL (x)))
2961
        *total = 0;
2962
      else if ((outer_code == AND || outer_code == IOR || outer_code == XOR)
2963
               && CONST_OK_FOR_K08 (INTVAL (x)))
2964
        *total = 1;
2965
      /* prepare_cmp_insn will force costly constants int registers before
2966
         the cbranch[sd]i4 patterns can see them, so preserve potentially
2967
         interesting ones not covered by I08 above.  */
2968
      else if (outer_code == COMPARE
2969
               && ((unsigned HOST_WIDE_INT) INTVAL (x)
2970
                    == (unsigned HOST_WIDE_INT) 0x7fffffff + 1
2971
                    || INTVAL (x) == 0x7fffffff
2972
                   || INTVAL (x) == 0x80 || INTVAL (x) == -0x81))
2973
        *total = 1;
2974
      else
2975
        *total = 8;
2976
      return true;
2977
 
2978
    case CONST:
2979
    case LABEL_REF:
2980
    case SYMBOL_REF:
2981
      if (TARGET_SHMEDIA64)
2982
        *total = COSTS_N_INSNS (4);
2983
      else if (TARGET_SHMEDIA32)
2984
        *total = COSTS_N_INSNS (2);
2985
      else
2986
        *total = 5;
2987
      return true;
2988
 
2989
    case CONST_DOUBLE:
2990
      if (TARGET_SHMEDIA)
2991
        *total = COSTS_N_INSNS (4);
2992
      /* prepare_cmp_insn will force costly constants int registers before
2993
         the cbranchdi4 pattern can see them, so preserve potentially
2994
         interesting ones.  */
2995
      else if (outer_code == COMPARE && GET_MODE (x) == DImode)
2996
        *total = 1;
2997
      else
2998
        *total = 10;
2999
      return true;
3000
    case CONST_VECTOR:
3001
      if (x == CONST0_RTX (GET_MODE (x)))
3002
        *total = 0;
3003
      else if (sh_1el_vec (x, VOIDmode))
3004
        *total = outer_code != SET;
3005
      if (sh_rep_vec (x, VOIDmode))
3006
        *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
3007
                  + (outer_code != SET));
3008
      *total = COSTS_N_INSNS (3) + (outer_code != SET);
3009
      return true;
3010
 
3011
    case PLUS:
3012
    case MINUS:
3013
      *total = COSTS_N_INSNS (addsubcosts (x));
3014
      return true;
3015
 
3016
    case AND:
3017
      *total = COSTS_N_INSNS (andcosts (x));
3018
      return true;
3019
 
3020
    case MULT:
3021
      *total = COSTS_N_INSNS (multcosts (x));
3022
      return true;
3023
 
3024
    case ASHIFT:
3025
    case ASHIFTRT:
3026
    case LSHIFTRT:
3027
      *total = COSTS_N_INSNS (shiftcosts (x));
3028
      return true;
3029
 
3030
    case DIV:
3031
    case UDIV:
3032
    case MOD:
3033
    case UMOD:
3034
      *total = COSTS_N_INSNS (20);
3035
      return true;
3036
 
3037
    case PARALLEL:
3038
      if (sh_1el_vec (x, VOIDmode))
3039
        *total = outer_code != SET;
3040
      if (sh_rep_vec (x, VOIDmode))
3041
        *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
3042
                  + (outer_code != SET));
3043
      *total = COSTS_N_INSNS (3) + (outer_code != SET);
3044
      return true;
3045
 
3046
    case FLOAT:
3047
    case FIX:
3048
      *total = 100;
3049
      return true;
3050
 
3051
    default:
3052
      return false;
3053
    }
3054
}
3055
 
3056
/* Compute the cost of an address.  For the SH, all valid addresses are
3057
   the same cost.  Use a slightly higher cost for reg + reg addressing,
3058
   since it increases pressure on r0.  */
3059
 
3060
static int
3061
sh_address_cost (rtx X,
3062
                 bool speed ATTRIBUTE_UNUSED)
3063
{
3064
  return (GET_CODE (X) == PLUS
3065
          && ! CONSTANT_P (XEXP (X, 1))
3066
          && ! TARGET_SHMEDIA ? 1 : 0);
3067
}
3068
 
3069
/* Code to expand a shift.  */
3070
 
3071
void
3072
gen_ashift (int type, int n, rtx reg)
3073
{
3074
  /* Negative values here come from the shift_amounts array.  */
3075
  if (n < 0)
3076
    {
3077
      if (type == ASHIFT)
3078
        type = LSHIFTRT;
3079
      else
3080
        type = ASHIFT;
3081
      n = -n;
3082
    }
3083
 
3084
  switch (type)
3085
    {
3086
    case ASHIFTRT:
3087
      emit_insn (gen_ashrsi3_k (reg, reg, GEN_INT (n)));
3088
      break;
3089
    case LSHIFTRT:
3090
      if (n == 1)
3091
        emit_insn (gen_lshrsi3_m (reg, reg, GEN_INT (n)));
3092
      else
3093
        emit_insn (gen_lshrsi3_k (reg, reg, GEN_INT (n)));
3094
      break;
3095
    case ASHIFT:
3096
      emit_insn (gen_ashlsi3_std (reg, reg, GEN_INT (n)));
3097
      break;
3098
    }
3099
}
3100
 
3101
/* Same for HImode */
3102
 
3103
void
3104
gen_ashift_hi (int type, int n, rtx reg)
3105
{
3106
  /* Negative values here come from the shift_amounts array.  */
3107
  if (n < 0)
3108
    {
3109
      if (type == ASHIFT)
3110
        type = LSHIFTRT;
3111
      else
3112
        type = ASHIFT;
3113
      n = -n;
3114
    }
3115
 
3116
  switch (type)
3117
    {
3118
    case ASHIFTRT:
3119
    case LSHIFTRT:
3120
      /* We don't have HImode right shift operations because using the
3121
         ordinary 32 bit shift instructions for that doesn't generate proper
3122
         zero/sign extension.
3123
         gen_ashift_hi is only called in contexts where we know that the
3124
         sign extension works out correctly.  */
3125
      {
3126
        int offset = 0;
3127
        if (GET_CODE (reg) == SUBREG)
3128
          {
3129
            offset = SUBREG_BYTE (reg);
3130
            reg = SUBREG_REG (reg);
3131
          }
3132
        gen_ashift (type, n, gen_rtx_SUBREG (SImode, reg, offset));
3133
        break;
3134
      }
3135
    case ASHIFT:
3136
      emit_insn (gen_ashlhi3_k (reg, reg, GEN_INT (n)));
3137
      break;
3138
    }
3139
}
3140
 
3141
/* Output RTL to split a constant shift into its component SH constant
3142
   shift instructions.  */
3143
 
3144
void
3145
gen_shifty_op (int code, rtx *operands)
3146
{
3147
  int value = INTVAL (operands[2]);
3148
  int max, i;
3149
 
3150
  /* Truncate the shift count in case it is out of bounds.  */
3151
  value = value & 31;
3152
 
3153
  if (value == 31)
3154
    {
3155
      if (code == LSHIFTRT)
3156
        {
3157
          emit_insn (gen_rotlsi3_1 (operands[0], operands[0]));
3158
          emit_insn (gen_movt (operands[0]));
3159
          return;
3160
        }
3161
      else if (code == ASHIFT)
3162
        {
3163
          /* There is a two instruction sequence for 31 bit left shifts,
3164
             but it requires r0.  */
3165
          if (REG_P (operands[0]) && REGNO (operands[0]) == 0)
3166
            {
3167
              emit_insn (gen_andsi3 (operands[0], operands[0], const1_rtx));
3168
              emit_insn (gen_rotlsi3_31 (operands[0], operands[0]));
3169
              return;
3170
            }
3171
        }
3172
    }
3173
  else if (value == 0)
3174
    {
3175
      /* This can happen even when optimizing, if there were subregs before
3176
         reload.  Don't output a nop here, as this is never optimized away;
3177
         use a no-op move instead.  */
3178
      emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[0]));
3179
      return;
3180
    }
3181
 
3182
  max = shift_insns[value];
3183
  for (i = 0; i < max; i++)
3184
    gen_ashift (code, shift_amounts[value][i], operands[0]);
3185
}
3186
 
3187
/* Same as above, but optimized for values where the topmost bits don't
3188
   matter.  */
3189
 
3190
void
3191
gen_shifty_hi_op (int code, rtx *operands)
3192
{
3193
  int value = INTVAL (operands[2]);
3194
  int max, i;
3195
  void (*gen_fun) (int, int, rtx);
3196
 
3197
  /* This operation is used by and_shl for SImode values with a few
3198
     high bits known to be cleared.  */
3199
  value &= 31;
3200
  if (value == 0)
3201
    {
3202
      emit_insn (gen_nop ());
3203
      return;
3204
    }
3205
 
3206
  gen_fun = GET_MODE (operands[0]) == HImode ? gen_ashift_hi : gen_ashift;
3207
  if (code == ASHIFT)
3208
    {
3209
      max = ext_shift_insns[value];
3210
      for (i = 0; i < max; i++)
3211
        gen_fun (code, ext_shift_amounts[value][i], operands[0]);
3212
    }
3213
  else
3214
    /* When shifting right, emit the shifts in reverse order, so that
3215
       solitary negative values come first.  */
3216
    for (i = ext_shift_insns[value] - 1; i >= 0; i--)
3217
      gen_fun (code, ext_shift_amounts[value][i], operands[0]);
3218
}
3219
 
3220
/* Output RTL for an arithmetic right shift.  */
3221
 
3222
/* ??? Rewrite to use super-optimizer sequences.  */
3223
 
3224
int
3225
expand_ashiftrt (rtx *operands)
3226
{
3227
  rtx wrk;
3228
  char func[18];
3229
  int value;
3230
 
3231
  if (TARGET_SH3)
3232
    {
3233
      if (!CONST_INT_P (operands[2]))
3234
        {
3235
          rtx count = copy_to_mode_reg (SImode, operands[2]);
3236
          emit_insn (gen_negsi2 (count, count));
3237
          emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
3238
          return 1;
3239
        }
3240
      else if (ashiftrt_insns[INTVAL (operands[2]) & 31]
3241
               > 1 + SH_DYNAMIC_SHIFT_COST)
3242
        {
3243
          rtx count
3244
            = force_reg (SImode, GEN_INT (- (INTVAL (operands[2]) & 31)));
3245
          emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
3246
          return 1;
3247
        }
3248
    }
3249
  if (!CONST_INT_P (operands[2]))
3250
    return 0;
3251
 
3252
  value = INTVAL (operands[2]) & 31;
3253
 
3254
  if (value == 31)
3255
    {
3256
      /* If we are called from abs expansion, arrange things so that we
3257
         we can use a single MT instruction that doesn't clobber the source,
3258
         if LICM can hoist out the load of the constant zero.  */
3259
      if (currently_expanding_to_rtl)
3260
        {
3261
          emit_insn (gen_cmpgtsi_t (force_reg (SImode, CONST0_RTX (SImode)),
3262
                                    operands[1]));
3263
          emit_insn (gen_mov_neg_si_t (operands[0]));
3264
          return 1;
3265
        }
3266
      emit_insn (gen_ashrsi2_31 (operands[0], operands[1]));
3267
      return 1;
3268
    }
3269
  else if (value >= 16 && value <= 19)
3270
    {
3271
      wrk = gen_reg_rtx (SImode);
3272
      emit_insn (gen_ashrsi2_16 (wrk, operands[1]));
3273
      value -= 16;
3274
      while (value--)
3275
        gen_ashift (ASHIFTRT, 1, wrk);
3276
      emit_move_insn (operands[0], wrk);
3277
      return 1;
3278
    }
3279
  /* Expand a short sequence inline, longer call a magic routine.  */
3280
  else if (value <= 5)
3281
    {
3282
      wrk = gen_reg_rtx (SImode);
3283
      emit_move_insn (wrk, operands[1]);
3284
      while (value--)
3285
        gen_ashift (ASHIFTRT, 1, wrk);
3286
      emit_move_insn (operands[0], wrk);
3287
      return 1;
3288
    }
3289
 
3290
  wrk = gen_reg_rtx (Pmode);
3291
 
3292
  /* Load the value into an arg reg and call a helper.  */
3293
  emit_move_insn (gen_rtx_REG (SImode, 4), operands[1]);
3294
  sprintf (func, "__ashiftrt_r4_%d", value);
3295
  function_symbol (wrk, func, SFUNC_STATIC);
3296
  emit_insn (gen_ashrsi3_n (GEN_INT (value), wrk));
3297
  emit_move_insn (operands[0], gen_rtx_REG (SImode, 4));
3298
  return 1;
3299
}
3300
 
3301
int
3302
sh_dynamicalize_shift_p (rtx count)
3303
{
3304
  return shift_insns[INTVAL (count) & 31] > 1 + SH_DYNAMIC_SHIFT_COST;
3305
}
3306
 
3307
/* Try to find a good way to implement the combiner pattern
3308
  [(set (match_operand:SI 0 "register_operand" "r")
3309
        (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
3310
                           (match_operand:SI 2 "const_int_operand" "n"))
3311
                (match_operand:SI 3 "const_int_operand" "n"))) .
3312
  LEFT_RTX is operand 2 in the above pattern, and MASK_RTX is operand 3.
3313
  return 0 for simple right / left or left/right shift combination.
3314
  return 1 for a combination of shifts with zero_extend.
3315
  return 2 for a combination of shifts with an AND that needs r0.
3316
  return 3 for a combination of shifts with an AND that needs an extra
3317
    scratch register, when the three highmost bits of the AND mask are clear.
3318
  return 4 for a combination of shifts with an AND that needs an extra
3319
    scratch register, when any of the three highmost bits of the AND mask
3320
    is set.
3321
  If ATTRP is set, store an initial right shift width in ATTRP[0],
3322
  and the instruction length in ATTRP[1] .  These values are not valid
3323
  when returning 0.
3324
  When ATTRP is set and returning 1, ATTRP[2] gets set to the index into
3325
  shift_amounts for the last shift value that is to be used before the
3326
  sign extend.  */
3327
int
3328
shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
3329
{
3330
  unsigned HOST_WIDE_INT mask, lsb, mask2, lsb2;
3331
  int left = INTVAL (left_rtx), right;
3332
  int best = 0;
3333
  int cost, best_cost = 10000;
3334
  int best_right = 0, best_len = 0;
3335
  int i;
3336
  int can_ext;
3337
 
3338
  if (left < 0 || left > 31)
3339
    return 0;
3340
  if (CONST_INT_P (mask_rtx))
3341
    mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
3342
  else
3343
    mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
3344
  /* Can this be expressed as a right shift / left shift pair?  */
3345
  lsb = ((mask ^ (mask - 1)) >> 1) + 1;
3346
  right = exact_log2 (lsb);
3347
  mask2 = ~(mask + lsb - 1);
3348
  lsb2 = ((mask2 ^ (mask2 - 1)) >> 1) + 1;
3349
  /* mask has no zeroes but trailing zeroes <==> ! mask2 */
3350
  if (! mask2)
3351
    best_cost = shift_insns[right] + shift_insns[right + left];
3352
  /* mask has no trailing zeroes <==> ! right */
3353
  else if (! right && mask2 == ~(lsb2 - 1))
3354
    {
3355
      int late_right = exact_log2 (lsb2);
3356
      best_cost = shift_insns[left + late_right] + shift_insns[late_right];
3357
    }
3358
  /* Try to use zero extend.  */
3359
  if (mask2 == ~(lsb2 - 1))
3360
    {
3361
      int width, first;
3362
 
3363
      for (width = 8; width <= 16; width += 8)
3364
        {
3365
          /* Can we zero-extend right away?  */
3366
          if (lsb2 == (unsigned HOST_WIDE_INT) 1 << width)
3367
            {
3368
              cost
3369
                = 1 + ext_shift_insns[right] + ext_shift_insns[left + right];
3370
              if (cost < best_cost)
3371
                {
3372
                  best = 1;
3373
                  best_cost = cost;
3374
                  best_right = right;
3375
                  best_len = cost;
3376
                  if (attrp)
3377
                    attrp[2] = -1;
3378
                }
3379
              continue;
3380
            }
3381
          /* ??? Could try to put zero extend into initial right shift,
3382
             or even shift a bit left before the right shift.  */
3383
          /* Determine value of first part of left shift, to get to the
3384
             zero extend cut-off point.  */
3385
          first = width - exact_log2 (lsb2) + right;
3386
          if (first >= 0 && right + left - first >= 0)
3387
            {
3388
              cost = ext_shift_insns[right] + ext_shift_insns[first] + 1
3389
                + ext_shift_insns[right + left - first];
3390
              if (cost < best_cost)
3391
                {
3392
                  best = 1;
3393
                  best_cost = cost;
3394
                  best_right = right;
3395
                  best_len = cost;
3396
                  if (attrp)
3397
                    attrp[2] = first;
3398
                }
3399
            }
3400
        }
3401
    }
3402
  /* Try to use r0 AND pattern */
3403
  for (i = 0; i <= 2; i++)
3404
    {
3405
      if (i > right)
3406
        break;
3407
      if (! CONST_OK_FOR_K08 (mask >> i))
3408
        continue;
3409
      cost = (i != 0) + 2 + ext_shift_insns[left + i];
3410
      if (cost < best_cost)
3411
        {
3412
          best = 2;
3413
          best_cost = cost;
3414
          best_right = i;
3415
          best_len = cost - 1;
3416
        }
3417
    }
3418
  /* Try to use a scratch register to hold the AND operand.  */
3419
  can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT) 3 << 30)) == 0;
3420
  for (i = 0; i <= 2; i++)
3421
    {
3422
      if (i > right)
3423
        break;
3424
      cost = (i != 0) + (CONST_OK_FOR_I08 (mask >> i) ? 2 : 3)
3425
        + (can_ext ? ext_shift_insns : shift_insns)[left + i];
3426
      if (cost < best_cost)
3427
        {
3428
          best = 4 - can_ext;
3429
          best_cost = cost;
3430
          best_right = i;
3431
          best_len = cost - 1 - ! CONST_OK_FOR_I08 (mask >> i);
3432
        }
3433
    }
3434
 
3435
  if (attrp)
3436
    {
3437
      attrp[0] = best_right;
3438
      attrp[1] = best_len;
3439
    }
3440
  return best;
3441
}
3442
 
3443
/* This is used in length attributes of the unnamed instructions
3444
   corresponding to shl_and_kind return values of 1 and 2.  */
3445
int
3446
shl_and_length (rtx insn)
3447
{
3448
  rtx set_src, left_rtx, mask_rtx;
3449
  int attributes[3];
3450
 
3451
  set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
3452
  left_rtx = XEXP (XEXP (set_src, 0), 1);
3453
  mask_rtx = XEXP (set_src, 1);
3454
  shl_and_kind (left_rtx, mask_rtx, attributes);
3455
  return attributes[1];
3456
}
3457
 
3458
/* This is used in length attribute of the and_shl_scratch instruction.  */
3459
 
3460
int
3461
shl_and_scr_length (rtx insn)
3462
{
3463
  rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
3464
  int len = shift_insns[INTVAL (XEXP (set_src, 1)) & 31];
3465
  rtx op = XEXP (set_src, 0);
3466
  len += shift_insns[INTVAL (XEXP (op, 1)) & 31] + 1;
3467
  op = XEXP (XEXP (op, 0), 0);
3468
  return len + shift_insns[INTVAL (XEXP (op, 1)) & 31];
3469
}
3470
 
3471
/* Generate rtl for instructions for which shl_and_kind advised a particular
3472
   method of generating them, i.e. returned zero.  */
3473
 
3474
int
3475
gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
3476
{
3477
  int attributes[3];
3478
  unsigned HOST_WIDE_INT mask;
3479
  int kind = shl_and_kind (left_rtx, mask_rtx, attributes);
3480
  int right, total_shift;
3481
  void (*shift_gen_fun) (int, rtx *) = gen_shifty_hi_op;
3482
 
3483
  right = attributes[0];
3484
  total_shift = INTVAL (left_rtx) + right;
3485
  mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> total_shift;
3486
  switch (kind)
3487
    {
3488
    default:
3489
      return -1;
3490
    case 1:
3491
      {
3492
        int first = attributes[2];
3493
        rtx operands[3];
3494
 
3495
        if (first < 0)
3496
          {
3497
            emit_insn ((mask << right) <= 0xff
3498
                       ? gen_zero_extendqisi2 (dest,
3499
                                               gen_lowpart (QImode, source))
3500
                       : gen_zero_extendhisi2 (dest,
3501
                                               gen_lowpart (HImode, source)));
3502
            source = dest;
3503
          }
3504
        if (source != dest)
3505
          emit_insn (gen_movsi (dest, source));
3506
        operands[0] = dest;
3507
        if (right)
3508
          {
3509
            operands[2] = GEN_INT (right);
3510
            gen_shifty_hi_op (LSHIFTRT, operands);
3511
          }
3512
        if (first > 0)
3513
          {
3514
            operands[2] = GEN_INT (first);
3515
            gen_shifty_hi_op (ASHIFT, operands);
3516
            total_shift -= first;
3517
            mask <<= first;
3518
          }
3519
        if (first >= 0)
3520
          emit_insn (mask <= 0xff
3521
                     ? gen_zero_extendqisi2 (dest, gen_lowpart (QImode, dest))
3522
                     : gen_zero_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3523
        if (total_shift > 0)
3524
          {
3525
            operands[2] = GEN_INT (total_shift);
3526
            gen_shifty_hi_op (ASHIFT, operands);
3527
          }
3528
        break;
3529
      }
3530
    case 4:
3531
      shift_gen_fun = gen_shifty_op;
3532
    case 3:
3533
      /* If the topmost bit that matters is set, set the topmost bits
3534
         that don't matter.  This way, we might be able to get a shorter
3535
         signed constant.  */
3536
      if (mask & ((HOST_WIDE_INT) 1 << (31 - total_shift)))
3537
        mask |= (HOST_WIDE_INT) ~0 << (31 - total_shift);
3538
    case 2:
3539
      /* Don't expand fine-grained when combining, because that will
3540
         make the pattern fail.  */
3541
      if (currently_expanding_to_rtl
3542
          || reload_in_progress || reload_completed)
3543
        {
3544
          rtx operands[3];
3545
 
3546
          /* Cases 3 and 4 should be handled by this split
3547
             only while combining  */
3548
          gcc_assert (kind <= 2);
3549
          if (right)
3550
            {
3551
              emit_insn (gen_lshrsi3 (dest, source, GEN_INT (right)));
3552
              source = dest;
3553
            }
3554
          emit_insn (gen_andsi3 (dest, source, GEN_INT (mask)));
3555
          if (total_shift)
3556
            {
3557
              operands[0] = dest;
3558
              operands[1] = dest;
3559
              operands[2] = GEN_INT (total_shift);
3560
              shift_gen_fun (ASHIFT, operands);
3561
            }
3562
          break;
3563
        }
3564
      else
3565
        {
3566
          int neg = 0;
3567
          if (kind != 4 && total_shift < 16)
3568
            {
3569
              neg = -ext_shift_amounts[total_shift][1];
3570
              if (neg > 0)
3571
                neg -= ext_shift_amounts[total_shift][2];
3572
              else
3573
                neg = 0;
3574
            }
3575
          emit_insn (gen_and_shl_scratch (dest, source,
3576
                                          GEN_INT (right),
3577
                                          GEN_INT (mask),
3578
                                          GEN_INT (total_shift + neg),
3579
                                          GEN_INT (neg)));
3580
          emit_insn (gen_movsi (dest, dest));
3581
          break;
3582
        }
3583
    }
3584
  return 0;
3585
}
3586
 
3587
/* Try to find a good way to implement the combiner pattern
3588
  [(set (match_operand:SI 0 "register_operand" "=r")
3589
        (sign_extract:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
3590
                                    (match_operand:SI 2 "const_int_operand" "n")
3591
                         (match_operand:SI 3 "const_int_operand" "n")
3592
                         (const_int 0)))
3593
   (clobber (reg:SI T_REG))]
3594
  LEFT_RTX is operand 2 in the above pattern, and SIZE_RTX is operand 3.
3595
  return 0 for simple left / right shift combination.
3596
  return 1 for left shift / 8 bit sign extend / left shift.
3597
  return 2 for left shift / 16 bit sign extend / left shift.
3598
  return 3 for left shift / 8 bit sign extend / shift / sign extend.
3599
  return 4 for left shift / 16 bit sign extend / shift / sign extend.
3600
  return 5 for left shift / 16 bit sign extend / right shift
3601
  return 6 for < 8 bit sign extend / left shift.
3602
  return 7 for < 8 bit sign extend / left shift / single right shift.
3603
  If COSTP is nonzero, assign the calculated cost to *COSTP.  */
3604
 
3605
int
3606
shl_sext_kind (rtx left_rtx, rtx size_rtx, int *costp)
3607
{
3608
  int left, size, insize, ext;
3609
  int cost = 0, best_cost;
3610
  int kind;
3611
 
3612
  left = INTVAL (left_rtx);
3613
  size = INTVAL (size_rtx);
3614
  insize = size - left;
3615
  gcc_assert (insize > 0);
3616
  /* Default to left / right shift.  */
3617
  kind = 0;
3618
  best_cost = shift_insns[32 - insize] + ashiftrt_insns[32 - size];
3619
  if (size <= 16)
3620
    {
3621
      /* 16 bit shift / sign extend / 16 bit shift */
3622
      cost = shift_insns[16 - insize] + 1 + ashiftrt_insns[16 - size];
3623
      /* If ashiftrt_insns[16 - size] is 8, this choice will be overridden
3624
         below, by alternative 3 or something even better.  */
3625
      if (cost < best_cost)
3626
        {
3627
          kind = 5;
3628
          best_cost = cost;
3629
        }
3630
    }
3631
  /* Try a plain sign extend between two shifts.  */
3632
  for (ext = 16; ext >= insize; ext -= 8)
3633
    {
3634
      if (ext <= size)
3635
        {
3636
          cost = ext_shift_insns[ext - insize] + 1 + shift_insns[size - ext];
3637
          if (cost < best_cost)
3638
            {
3639
              kind = ext / (unsigned) 8;
3640
              best_cost = cost;
3641
            }
3642
        }
3643
      /* Check if we can do a sloppy shift with a final signed shift
3644
         restoring the sign.  */
3645
      if (EXT_SHIFT_SIGNED (size - ext))
3646
        cost = ext_shift_insns[ext - insize] + ext_shift_insns[size - ext] + 1;
3647
      /* If not, maybe it's still cheaper to do the second shift sloppy,
3648
         and do a final sign extend?  */
3649
      else if (size <= 16)
3650
        cost = ext_shift_insns[ext - insize] + 1
3651
          + ext_shift_insns[size > ext ? size - ext : ext - size] + 1;
3652
      else
3653
        continue;
3654
      if (cost < best_cost)
3655
        {
3656
          kind = ext / (unsigned) 8 + 2;
3657
          best_cost = cost;
3658
        }
3659
    }
3660
  /* Check if we can sign extend in r0 */
3661
  if (insize < 8)
3662
    {
3663
      cost = 3 + shift_insns[left];
3664
      if (cost < best_cost)
3665
        {
3666
          kind = 6;
3667
          best_cost = cost;
3668
        }
3669
      /* Try the same with a final signed shift.  */
3670
      if (left < 31)
3671
        {
3672
          cost = 3 + ext_shift_insns[left + 1] + 1;
3673
          if (cost < best_cost)
3674
            {
3675
              kind = 7;
3676
              best_cost = cost;
3677
            }
3678
        }
3679
    }
3680
  if (TARGET_SH3)
3681
    {
3682
      /* Try to use a dynamic shift.  */
3683
      cost = shift_insns[32 - insize] + 1 + SH_DYNAMIC_SHIFT_COST;
3684
      if (cost < best_cost)
3685
        {
3686
          kind = 0;
3687
          best_cost = cost;
3688
        }
3689
    }
3690
  if (costp)
3691
    *costp = cost;
3692
  return kind;
3693
}
3694
 
3695
/* Function to be used in the length attribute of the instructions
3696
   implementing this pattern.  */
3697
 
3698
int
3699
shl_sext_length (rtx insn)
3700
{
3701
  rtx set_src, left_rtx, size_rtx;
3702
  int cost;
3703
 
3704
  set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
3705
  left_rtx = XEXP (XEXP (set_src, 0), 1);
3706
  size_rtx = XEXP (set_src, 1);
3707
  shl_sext_kind (left_rtx, size_rtx, &cost);
3708
  return cost;
3709
}
3710
 
3711
/* Generate rtl for this pattern */
3712
 
3713
int
3714
gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
3715
{
3716
  int kind;
3717
  int left, size, insize, cost;
3718
  rtx operands[3];
3719
 
3720
  kind = shl_sext_kind (left_rtx, size_rtx, &cost);
3721
  left = INTVAL (left_rtx);
3722
  size = INTVAL (size_rtx);
3723
  insize = size - left;
3724
  switch (kind)
3725
    {
3726
    case 1:
3727
    case 2:
3728
    case 3:
3729
    case 4:
3730
      {
3731
        int ext = kind & 1 ? 8 : 16;
3732
        int shift2 = size - ext;
3733
 
3734
        /* Don't expand fine-grained when combining, because that will
3735
           make the pattern fail.  */
3736
        if (! currently_expanding_to_rtl
3737
            && ! reload_in_progress && ! reload_completed)
3738
          {
3739
            emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3740
            emit_insn (gen_movsi (dest, source));
3741
            break;
3742
          }
3743
        if (dest != source)
3744
          emit_insn (gen_movsi (dest, source));
3745
        operands[0] = dest;
3746
        if (ext - insize)
3747
          {
3748
            operands[2] = GEN_INT (ext - insize);
3749
            gen_shifty_hi_op (ASHIFT, operands);
3750
          }
3751
        emit_insn (kind & 1
3752
                   ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
3753
                   : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3754
        if (kind <= 2)
3755
          {
3756
            if (shift2)
3757
              {
3758
                operands[2] = GEN_INT (shift2);
3759
                gen_shifty_op (ASHIFT, operands);
3760
              }
3761
          }
3762
        else
3763
          {
3764
            if (shift2 > 0)
3765
              {
3766
                if (EXT_SHIFT_SIGNED (shift2))
3767
                  {
3768
                    operands[2] = GEN_INT (shift2 + 1);
3769
                    gen_shifty_op (ASHIFT, operands);
3770
                    operands[2] = const1_rtx;
3771
                    gen_shifty_op (ASHIFTRT, operands);
3772
                    break;
3773
                  }
3774
                operands[2] = GEN_INT (shift2);
3775
                gen_shifty_hi_op (ASHIFT, operands);
3776
              }
3777
            else if (shift2)
3778
              {
3779
                operands[2] = GEN_INT (-shift2);
3780
                gen_shifty_hi_op (LSHIFTRT, operands);
3781
              }
3782
            emit_insn (size <= 8
3783
                       ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
3784
                       : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3785
          }
3786
        break;
3787
      }
3788
    case 5:
3789
      {
3790
        int i = 16 - size;
3791
        if (! currently_expanding_to_rtl
3792
            && ! reload_in_progress && ! reload_completed)
3793
          emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3794
        else
3795
          {
3796
            operands[0] = dest;
3797
            operands[2] = GEN_INT (16 - insize);
3798
            gen_shifty_hi_op (ASHIFT, operands);
3799
            emit_insn (gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
3800
          }
3801
        /* Don't use gen_ashrsi3 because it generates new pseudos.  */
3802
        while (--i >= 0)
3803
          gen_ashift (ASHIFTRT, 1, dest);
3804
        break;
3805
      }
3806
    case 6:
3807
    case 7:
3808
      /* Don't expand fine-grained when combining, because that will
3809
         make the pattern fail.  */
3810
      if (! currently_expanding_to_rtl
3811
          && ! reload_in_progress && ! reload_completed)
3812
        {
3813
          emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
3814
          emit_insn (gen_movsi (dest, source));
3815
          break;
3816
        }
3817
      emit_insn (gen_andsi3 (dest, source, GEN_INT ((1 << insize) - 1)));
3818
      emit_insn (gen_xorsi3 (dest, dest, GEN_INT (1 << (insize - 1))));
3819
      emit_insn (gen_addsi3 (dest, dest, GEN_INT (-1 << (insize - 1))));
3820
      operands[0] = dest;
3821
      operands[2] = kind == 7 ? GEN_INT (left + 1) : left_rtx;
3822
      gen_shifty_op (ASHIFT, operands);
3823
      if (kind == 7)
3824
        emit_insn (gen_ashrsi3_k (dest, dest, const1_rtx));
3825
      break;
3826
    default:
3827
      return -1;
3828
    }
3829
  return 0;
3830
}
3831
 
3832
/* Prefix a symbol_ref name with "datalabel".  */
3833
 
3834
rtx
3835
gen_datalabel_ref (rtx sym)
3836
{
3837
  const char *str;
3838
 
3839
  if (GET_CODE (sym) == LABEL_REF)
3840
    return gen_rtx_CONST (GET_MODE (sym),
3841
                          gen_rtx_UNSPEC (GET_MODE (sym),
3842
                                          gen_rtvec (1, sym),
3843
                                          UNSPEC_DATALABEL));
3844
 
3845
  gcc_assert (GET_CODE (sym) == SYMBOL_REF);
3846
 
3847
  str = XSTR (sym, 0);
3848
  /* Share all SYMBOL_REF strings with the same value - that is important
3849
     for cse.  */
3850
  str = IDENTIFIER_POINTER (get_identifier (str));
3851
  XSTR (sym, 0) = str;
3852
 
3853
  return sym;
3854
}
3855
 
3856
 
3857
static alloc_pool label_ref_list_pool;
3858
 
3859
typedef struct label_ref_list_d
3860
{
3861
  rtx label;
3862
  struct label_ref_list_d *next;
3863
} *label_ref_list_t;
3864
 
3865
/* The SH cannot load a large constant into a register, constants have to
3866
   come from a pc relative load.  The reference of a pc relative load
3867
   instruction must be less than 1k in front of the instruction.  This
3868
   means that we often have to dump a constant inside a function, and
3869
   generate code to branch around it.
3870
 
3871
   It is important to minimize this, since the branches will slow things
3872
   down and make things bigger.
3873
 
3874
   Worst case code looks like:
3875
 
3876
   mov.l L1,rn
3877
   bra   L2
3878
   nop
3879
   align
3880
   L1:   .long value
3881
   L2:
3882
   ..
3883
 
3884
   mov.l L3,rn
3885
   bra   L4
3886
   nop
3887
   align
3888
   L3:   .long value
3889
   L4:
3890
   ..
3891
 
3892
   We fix this by performing a scan before scheduling, which notices which
3893
   instructions need to have their operands fetched from the constant table
3894
   and builds the table.
3895
 
3896
   The algorithm is:
3897
 
3898
   scan, find an instruction which needs a pcrel move.  Look forward, find the
3899
   last barrier which is within MAX_COUNT bytes of the requirement.
3900
   If there isn't one, make one.  Process all the instructions between
3901
   the find and the barrier.
3902
 
3903
   In the above example, we can tell that L3 is within 1k of L1, so
3904
   the first move can be shrunk from the 3 insn+constant sequence into
3905
   just 1 insn, and the constant moved to L3 to make:
3906
 
3907
   mov.l        L1,rn
3908
   ..
3909
   mov.l        L3,rn
3910
   bra          L4
3911
   nop
3912
   align
3913
   L3:.long value
3914
   L4:.long value
3915
 
3916
   Then the second move becomes the target for the shortening process.  */
3917
 
3918
typedef struct
3919
{
3920
  rtx value;                    /* Value in table.  */
3921
  rtx label;                    /* Label of value.  */
3922
  label_ref_list_t wend;        /* End of window.  */
3923
  enum machine_mode mode;       /* Mode of value.  */
3924
 
3925
  /* True if this constant is accessed as part of a post-increment
3926
     sequence.  Note that HImode constants are never accessed in this way.  */
3927
  bool part_of_sequence_p;
3928
} pool_node;
3929
 
3930
/* The maximum number of constants that can fit into one pool, since
3931
   constants in the range 0..510 are at least 2 bytes long, and in the
3932
   range from there to 1018 at least 4 bytes.  */
3933
 
3934
#define MAX_POOL_SIZE 372
3935
static pool_node pool_vector[MAX_POOL_SIZE];
3936
static int pool_size;
3937
static rtx pool_window_label;
3938
static int pool_window_last;
3939
 
3940
static int max_labelno_before_reorg;
3941
 
3942
/* ??? If we need a constant in HImode which is the truncated value of a
3943
   constant we need in SImode, we could combine the two entries thus saving
3944
   two bytes.  Is this common enough to be worth the effort of implementing
3945
   it?  */
3946
 
3947
/* ??? This stuff should be done at the same time that we shorten branches.
3948
   As it is now, we must assume that all branches are the maximum size, and
3949
   this causes us to almost always output constant pools sooner than
3950
   necessary.  */
3951
 
3952
/* Add a constant to the pool and return its label.  */
3953
 
3954
static rtx
3955
add_constant (rtx x, enum machine_mode mode, rtx last_value)
3956
{
3957
  int i;
3958
  rtx lab, new_rtx;
3959
  label_ref_list_t ref, newref;
3960
 
3961
  /* First see if we've already got it.  */
3962
  for (i = 0; i < pool_size; i++)
3963
    {
3964
      if (x->code == pool_vector[i].value->code
3965
          && mode == pool_vector[i].mode)
3966
        {
3967
          if (x->code == CODE_LABEL)
3968
            {
3969
              if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
3970
                continue;
3971
            }
3972
          if (rtx_equal_p (x, pool_vector[i].value))
3973
            {
3974
              lab = new_rtx = 0;
3975
              if (! last_value
3976
                  || ! i
3977
                  || ! rtx_equal_p (last_value, pool_vector[i-1].value))
3978
                {
3979
                  new_rtx = gen_label_rtx ();
3980
                  LABEL_REFS (new_rtx) = pool_vector[i].label;
3981
                  pool_vector[i].label = lab = new_rtx;
3982
                }
3983
              if (lab && pool_window_label)
3984
                {
3985
                  newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3986
                  newref->label = pool_window_label;
3987
                  ref = pool_vector[pool_window_last].wend;
3988
                  newref->next = ref;
3989
                  pool_vector[pool_window_last].wend = newref;
3990
                }
3991
              if (new_rtx)
3992
                pool_window_label = new_rtx;
3993
              pool_window_last = i;
3994
              return lab;
3995
            }
3996
        }
3997
    }
3998
 
3999
  /* Need a new one.  */
4000
  pool_vector[pool_size].value = x;
4001
  if (last_value && rtx_equal_p (last_value, pool_vector[pool_size - 1].value))
4002
    {
4003
      lab = 0;
4004
      pool_vector[pool_size - 1].part_of_sequence_p = true;
4005
    }
4006
  else
4007
    lab = gen_label_rtx ();
4008
  pool_vector[pool_size].mode = mode;
4009
  pool_vector[pool_size].label = lab;
4010
  pool_vector[pool_size].wend = NULL;
4011
  pool_vector[pool_size].part_of_sequence_p = (lab == 0);
4012
  if (lab && pool_window_label)
4013
    {
4014
      newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
4015
      newref->label = pool_window_label;
4016
      ref = pool_vector[pool_window_last].wend;
4017
      newref->next = ref;
4018
      pool_vector[pool_window_last].wend = newref;
4019
    }
4020
  if (lab)
4021
    pool_window_label = lab;
4022
  pool_window_last = pool_size;
4023
  pool_size++;
4024
  return lab;
4025
}
4026
 
4027
/* Output the literal table.  START, if nonzero, is the first instruction
4028
   this table is needed for, and also indicates that there is at least one
4029
   casesi_worker_2 instruction; We have to emit the operand3 labels from
4030
   these insns at a 4-byte  aligned position.  BARRIER is the barrier
4031
   after which we are to place the table.  */
4032
 
4033
static void
4034
dump_table (rtx start, rtx barrier)
4035
{
4036
  rtx scan = barrier;
4037
  int i;
4038
  int need_align = 1;
4039
  rtx lab;
4040
  label_ref_list_t ref;
4041
  int have_df = 0;
4042
 
4043
  /* Do two passes, first time dump out the HI sized constants.  */
4044
 
4045
  for (i = 0; i < pool_size; i++)
4046
    {
4047
      pool_node *p = &pool_vector[i];
4048
 
4049
      if (p->mode == HImode)
4050
        {
4051
          if (need_align)
4052
            {
4053
              scan = emit_insn_after (gen_align_2 (), scan);
4054
              need_align = 0;
4055
            }
4056
          for (lab = p->label; lab; lab = LABEL_REFS (lab))
4057
            scan = emit_label_after (lab, scan);
4058
          scan = emit_insn_after (gen_consttable_2 (p->value, const0_rtx),
4059
                                  scan);
4060
          for (ref = p->wend; ref; ref = ref->next)
4061
            {
4062
              lab = ref->label;
4063
              scan = emit_insn_after (gen_consttable_window_end (lab), scan);
4064
            }
4065
        }
4066
      else if (p->mode == DFmode)
4067
        have_df = 1;
4068
    }
4069
 
4070
  need_align = 1;
4071
 
4072
  if (start)
4073
    {
4074
      scan = emit_insn_after (gen_align_4 (), scan);
4075
      need_align = 0;
4076
      for (; start != barrier; start = NEXT_INSN (start))
4077
        if (NONJUMP_INSN_P (start)
4078
            && recog_memoized (start) == CODE_FOR_casesi_worker_2)
4079
          {
4080
            rtx src = SET_SRC (XVECEXP (PATTERN (start), 0, 0));
4081
            rtx lab = XEXP (XVECEXP (src, 0, 3), 0);
4082
 
4083
            scan = emit_label_after (lab, scan);
4084
          }
4085
    }
4086
  if (TARGET_FMOVD && TARGET_ALIGN_DOUBLE && have_df)
4087
    {
4088
      rtx align_insn = NULL_RTX;
4089
 
4090
      scan = emit_label_after (gen_label_rtx (), scan);
4091
      scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
4092
      need_align = 0;
4093
 
4094
      for (i = 0; i < pool_size; i++)
4095
        {
4096
          pool_node *p = &pool_vector[i];
4097
 
4098
          switch (p->mode)
4099
            {
4100
            case HImode:
4101
              break;
4102
            case SImode:
4103
            case SFmode:
4104
              if (align_insn && !p->part_of_sequence_p)
4105
                {
4106
                  for (lab = p->label; lab; lab = LABEL_REFS (lab))
4107
                    emit_label_before (lab, align_insn);
4108
                  emit_insn_before (gen_consttable_4 (p->value, const0_rtx),
4109
                                    align_insn);
4110
                  for (ref = p->wend; ref; ref = ref->next)
4111
                    {
4112
                      lab = ref->label;
4113
                      emit_insn_before (gen_consttable_window_end (lab),
4114
                                        align_insn);
4115
                    }
4116
                  delete_insn (align_insn);
4117
                  align_insn = NULL_RTX;
4118
                  continue;
4119
                }
4120
              else
4121
                {
4122
                  for (lab = p->label; lab; lab = LABEL_REFS (lab))
4123
                    scan = emit_label_after (lab, scan);
4124
                  scan = emit_insn_after (gen_consttable_4 (p->value,
4125
                                                            const0_rtx), scan);
4126
                  need_align = ! need_align;
4127
                }
4128
              break;
4129
            case DFmode:
4130
              if (need_align)
4131
                {
4132
                  scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
4133
                  align_insn = scan;
4134
                  need_align = 0;
4135
                }
4136
            case DImode:
4137
              for (lab = p->label; lab; lab = LABEL_REFS (lab))
4138
                scan = emit_label_after (lab, scan);
4139
              scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
4140
                                      scan);
4141
              break;
4142
            default:
4143
              gcc_unreachable ();
4144
            }
4145
 
4146
          if (p->mode != HImode)
4147
            {
4148
              for (ref = p->wend; ref; ref = ref->next)
4149
                {
4150
                  lab = ref->label;
4151
                  scan = emit_insn_after (gen_consttable_window_end (lab),
4152
                                          scan);
4153
                }
4154
            }
4155
        }
4156
 
4157
      pool_size = 0;
4158
    }
4159
 
4160
  for (i = 0; i < pool_size; i++)
4161
    {
4162
      pool_node *p = &pool_vector[i];
4163
 
4164
      switch (p->mode)
4165
        {
4166
        case HImode:
4167
          break;
4168
        case SImode:
4169
        case SFmode:
4170
          if (need_align)
4171
            {
4172
              need_align = 0;
4173
              scan = emit_label_after (gen_label_rtx (), scan);
4174
              scan = emit_insn_after (gen_align_4 (), scan);
4175
            }
4176
          for (lab = p->label; lab; lab = LABEL_REFS (lab))
4177
            scan = emit_label_after (lab, scan);
4178
          scan = emit_insn_after (gen_consttable_4 (p->value, const0_rtx),
4179
                                  scan);
4180
          break;
4181
        case DFmode:
4182
        case DImode:
4183
          if (need_align)
4184
            {
4185
              need_align = 0;
4186
              scan = emit_label_after (gen_label_rtx (), scan);
4187
              scan = emit_insn_after (gen_align_4 (), scan);
4188
            }
4189
          for (lab = p->label; lab; lab = LABEL_REFS (lab))
4190
            scan = emit_label_after (lab, scan);
4191
          scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
4192
                                  scan);
4193
          break;
4194
        default:
4195
          gcc_unreachable ();
4196
        }
4197
 
4198
      if (p->mode != HImode)
4199
        {
4200
          for (ref = p->wend; ref; ref = ref->next)
4201
            {
4202
              lab = ref->label;
4203
              scan = emit_insn_after (gen_consttable_window_end (lab), scan);
4204
            }
4205
        }
4206
    }
4207
 
4208
  scan = emit_insn_after (gen_consttable_end (), scan);
4209
  scan = emit_barrier_after (scan);
4210
  pool_size = 0;
4211
  pool_window_label = NULL_RTX;
4212
  pool_window_last = 0;
4213
}
4214
 
4215
/* Return nonzero if constant would be an ok source for a
4216
   mov.w instead of a mov.l.  */
4217
 
4218
static int
4219
hi_const (rtx src)
4220
{
4221
  return (CONST_INT_P (src)
4222
          && INTVAL (src) >= -32768
4223
          && INTVAL (src) <= 32767);
4224
}
4225
 
4226
#define MOVA_LABELREF(mova) XVECEXP (SET_SRC (PATTERN (mova)), 0, 0)
4227
 
4228
/* Nonzero if the insn is a move instruction which needs to be fixed.  */
4229
 
4230
/* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the
4231
   CONST_DOUBLE input value is CONST_OK_FOR_I08.  For a SFmode move, we don't
4232
   need to fix it if the input value is CONST_OK_FOR_I08.  */
4233
 
4234
static int
4235
broken_move (rtx insn)
4236
{
4237
  if (NONJUMP_INSN_P (insn))
4238
    {
4239
      rtx pat = PATTERN (insn);
4240
      if (GET_CODE (pat) == PARALLEL)
4241
        pat = XVECEXP (pat, 0, 0);
4242
      if (GET_CODE (pat) == SET
4243
          /* We can load any 8-bit value if we don't care what the high
4244
             order bits end up as.  */
4245
          && GET_MODE (SET_DEST (pat)) != QImode
4246
          && (CONSTANT_P (SET_SRC (pat))
4247
              /* Match mova_const.  */
4248
              || (GET_CODE (SET_SRC (pat)) == UNSPEC
4249
                  && XINT (SET_SRC (pat), 1) == UNSPEC_MOVA
4250
                  && GET_CODE (XVECEXP (SET_SRC (pat), 0, 0)) == CONST))
4251
          && ! (TARGET_SH2E
4252
                && GET_CODE (SET_SRC (pat)) == CONST_DOUBLE
4253
                && (fp_zero_operand (SET_SRC (pat))
4254
                    || fp_one_operand (SET_SRC (pat)))
4255
                /* In general we don't know the current setting of fpscr, so disable fldi.
4256
                   There is an exception if this was a register-register move
4257
                   before reload - and hence it was ascertained that we have
4258
                   single precision setting - and in a post-reload optimization
4259
                   we changed this to do a constant load.  In that case
4260
                   we don't have an r0 clobber, hence we must use fldi.  */
4261
                && (TARGET_FMOVD
4262
                    || (GET_CODE (XEXP (XVECEXP (PATTERN (insn), 0, 2), 0))
4263
                        == SCRATCH))
4264
                && REG_P (SET_DEST (pat))
4265
                && FP_REGISTER_P (REGNO (SET_DEST (pat))))
4266
          && ! (TARGET_SH2A
4267
                && GET_MODE (SET_DEST (pat)) == SImode
4268
                && (satisfies_constraint_I20 (SET_SRC (pat))
4269
                   || satisfies_constraint_I28 (SET_SRC (pat))))
4270
          && ! satisfies_constraint_I08 (SET_SRC (pat)))
4271
        return 1;
4272
    }
4273
 
4274
  return 0;
4275
}
4276
 
4277
static int
4278
mova_p (rtx insn)
4279
{
4280
  return (NONJUMP_INSN_P (insn)
4281
          && GET_CODE (PATTERN (insn)) == SET
4282
          && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
4283
          && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_MOVA
4284
          /* Don't match mova_const.  */
4285
          && GET_CODE (MOVA_LABELREF (insn)) == LABEL_REF);
4286
}
4287
 
4288
/* Fix up a mova from a switch that went out of range.  */
4289
static void
4290
fixup_mova (rtx mova)
4291
{
4292
  PUT_MODE (XEXP (MOVA_LABELREF (mova), 0), QImode);
4293
  if (! flag_pic)
4294
    {
4295
      SET_SRC (PATTERN (mova)) = MOVA_LABELREF (mova);
4296
      INSN_CODE (mova) = -1;
4297
    }
4298
  else
4299
    {
4300
      rtx worker = mova;
4301
      rtx lab = gen_label_rtx ();
4302
      rtx wpat, wpat0, wpat1, wsrc, target, base, diff;
4303
 
4304
      do
4305
        {
4306
          worker = NEXT_INSN (worker);
4307
          gcc_assert (worker
4308
                      && !LABEL_P (worker)
4309
                      && !JUMP_P (worker));
4310
        } while (NOTE_P (worker)
4311
                 || recog_memoized (worker) != CODE_FOR_casesi_worker_1);
4312
      wpat = PATTERN (worker);
4313
      wpat0 = XVECEXP (wpat, 0, 0);
4314
      wpat1 = XVECEXP (wpat, 0, 1);
4315
      wsrc = SET_SRC (wpat0);
4316
      PATTERN (worker) = (gen_casesi_worker_2
4317
                          (SET_DEST (wpat0), XVECEXP (wsrc, 0, 1),
4318
                           XEXP (XVECEXP (wsrc, 0, 2), 0), lab,
4319
                           XEXP (wpat1, 0)));
4320
      INSN_CODE (worker) = -1;
4321
      target = XVECEXP (SET_SRC (PATTERN (mova)), 0, 0);
4322
      base = gen_rtx_LABEL_REF (Pmode, lab);
4323
      diff = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, target, base), UNSPEC_SYMOFF);
4324
      SET_SRC (PATTERN (mova)) = gen_rtx_CONST (Pmode, diff);
4325
      INSN_CODE (mova) = -1;
4326
    }
4327
}
4328
 
4329
/* NEW_MOVA is a mova we've just encountered while scanning forward.  Update
4330
   *num_mova, and check if the new mova is not nested within the first one.
4331
   return 0 if *first_mova was replaced, 1 if new_mova was replaced,
4332
   2 if new_mova has been assigned to *first_mova, -1 otherwise..  */
4333
static int
4334
untangle_mova (int *num_mova, rtx *first_mova, rtx new_mova)
4335
{
4336
  int n_addr = 0; /* Initialization to shut up spurious warning.  */
4337
  int f_target, n_target = 0; /* Likewise.  */
4338
 
4339
  if (optimize)
4340
    {
4341
      /* If NEW_MOVA has no address yet, it will be handled later.  */
4342
      if (INSN_ADDRESSES_SIZE() <= (unsigned) INSN_UID (new_mova))
4343
        return -1;
4344
 
4345
      n_addr = INSN_ADDRESSES (INSN_UID (new_mova));
4346
      n_target = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (new_mova), 0)));
4347
      if (n_addr > n_target || n_addr + 1022 < n_target)
4348
        {
4349
          /* Change the mova into a load.
4350
             broken_move will then return true for it.  */
4351
          fixup_mova (new_mova);
4352
          return 1;
4353
        }
4354
    }
4355
  if (!(*num_mova)++)
4356
    {
4357
      *first_mova = new_mova;
4358
      return 2;
4359
    }
4360
  if (!optimize
4361
      || ((f_target
4362
           = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (*first_mova), 0))))
4363
          >= n_target))
4364
    return -1;
4365
 
4366
  (*num_mova)--;
4367
  if (f_target - INSN_ADDRESSES (INSN_UID (*first_mova))
4368
      > n_target - n_addr)
4369
    {
4370
      fixup_mova (*first_mova);
4371
      return 0;
4372
    }
4373
  else
4374
    {
4375
      fixup_mova (new_mova);
4376
      return 1;
4377
    }
4378
}
4379
 
4380
/* Find the last barrier from insn FROM which is close enough to hold the
4381
   constant pool.  If we can't find one, then create one near the end of
4382
   the range.  */
4383
 
4384
static rtx
4385
find_barrier (int num_mova, rtx mova, rtx from)
4386
{
4387
  int count_si = 0;
4388
  int count_hi = 0;
4389
  int found_hi = 0;
4390
  int found_si = 0;
4391
  int found_di = 0;
4392
  int hi_align = 2;
4393
  int si_align = 2;
4394
  int leading_mova = num_mova;
4395
  rtx barrier_before_mova = 0, found_barrier = 0, good_barrier = 0;
4396
  int si_limit;
4397
  int hi_limit;
4398
  rtx orig = from;
4399
  rtx last_got = NULL_RTX;
4400
  rtx last_symoff = NULL_RTX;
4401
 
4402
  /* For HImode: range is 510, add 4 because pc counts from address of
4403
     second instruction after this one, subtract 2 for the jump instruction
4404
     that we may need to emit before the table, subtract 2 for the instruction
4405
     that fills the jump delay slot (in very rare cases, reorg will take an
4406
     instruction from after the constant pool or will leave the delay slot
4407
     empty).  This gives 510.
4408
     For SImode: range is 1020, add 4 because pc counts from address of
4409
     second instruction after this one, subtract 2 in case pc is 2 byte
4410
     aligned, subtract 2 for the jump instruction that we may need to emit
4411
     before the table, subtract 2 for the instruction that fills the jump
4412
     delay slot.  This gives 1018.  */
4413
 
4414
  /* The branch will always be shortened now that the reference address for
4415
     forward branches is the successor address, thus we need no longer make
4416
     adjustments to the [sh]i_limit for -O0.  */
4417
 
4418
  si_limit = 1018;
4419
  hi_limit = 510;
4420
 
4421
  while (from && count_si < si_limit && count_hi < hi_limit)
4422
    {
4423
      int inc = get_attr_length (from);
4424
      int new_align = 1;
4425
 
4426
      /* If this is a label that existed at the time of the compute_alignments
4427
         call, determine the alignment.  N.B.  When find_barrier recurses for
4428
         an out-of-reach mova, we might see labels at the start of previously
4429
         inserted constant tables.  */
4430
      if (LABEL_P (from)
4431
          && CODE_LABEL_NUMBER (from) <= max_labelno_before_reorg)
4432
        {
4433
          if (optimize)
4434
            new_align = 1 << label_to_alignment (from);
4435
          else if (BARRIER_P (prev_nonnote_insn (from)))
4436
            new_align = 1 << barrier_align (from);
4437
          else
4438
            new_align = 1;
4439
          inc = 0;
4440
        }
4441
      /* In case we are scanning a constant table because of recursion, check
4442
         for explicit alignments.  If the table is long, we might be forced
4443
         to emit the new table in front of it; the length of the alignment
4444
         might be the last straw.  */
4445
      else if (NONJUMP_INSN_P (from)
4446
               && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
4447
               && XINT (PATTERN (from), 1) == UNSPECV_ALIGN)
4448
        new_align = INTVAL (XVECEXP (PATTERN (from), 0, 0));
4449
      /* When we find the end of a constant table, paste the new constant
4450
         at the end.  That is better than putting it in front because
4451
         this way, we don't need extra alignment for adding a 4-byte-aligned
4452
         mov(a) label to a 2/4 or 8/4 byte aligned table.  */
4453
      else if (NONJUMP_INSN_P (from)
4454
               && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
4455
               && XINT (PATTERN (from), 1) == UNSPECV_CONST_END)
4456
        return from;
4457
 
4458
      if (BARRIER_P (from))
4459
        {
4460
          rtx next;
4461
 
4462
          found_barrier = from;
4463
 
4464
          /* If we are at the end of the function, or in front of an alignment
4465
             instruction, we need not insert an extra alignment.  We prefer
4466
             this kind of barrier.  */
4467
          if (barrier_align (from) > 2)
4468
            good_barrier = from;
4469
 
4470
          /* If we are at the end of a hot/cold block, dump the constants
4471
             here.  */
4472
          next = NEXT_INSN (from);
4473
          if (next
4474
              && NOTE_P (next)
4475
              && NOTE_KIND (next) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
4476
            break;
4477
        }
4478
 
4479
      if (broken_move (from))
4480
        {
4481
          rtx pat, src, dst;
4482
          enum machine_mode mode;
4483
 
4484
          pat = PATTERN (from);
4485
          if (GET_CODE (pat) == PARALLEL)
4486
            pat = XVECEXP (pat, 0, 0);
4487
          src = SET_SRC (pat);
4488
          dst = SET_DEST (pat);
4489
          mode = GET_MODE (dst);
4490
 
4491
          /* GOT pcrelat setting comes in pair of
4492
             mova       .L8,r0
4493
             mov.l      .L8,r12
4494
             instructions.  (plus add r0,r12).
4495
             Remember if we see one without the other.  */
4496
          if (GET_CODE (src) == UNSPEC && PIC_ADDR_P (XVECEXP (src, 0, 0)))
4497
            last_got = last_got ? NULL_RTX : from;
4498
          else if (PIC_ADDR_P (src))
4499
            last_got = last_got ? NULL_RTX : from;
4500
 
4501
          /* We must explicitly check the mode, because sometimes the
4502
             front end will generate code to load unsigned constants into
4503
             HImode targets without properly sign extending them.  */
4504
          if (mode == HImode
4505
              || (mode == SImode && hi_const (src) && REGNO (dst) != FPUL_REG))
4506
            {
4507
              found_hi += 2;
4508
              /* We put the short constants before the long constants, so
4509
                 we must count the length of short constants in the range
4510
                 for the long constants.  */
4511
              /* ??? This isn't optimal, but is easy to do.  */
4512
              si_limit -= 2;
4513
            }
4514
          else
4515
            {
4516
              /* We dump DF/DI constants before SF/SI ones, because
4517
                 the limit is the same, but the alignment requirements
4518
                 are higher.  We may waste up to 4 additional bytes
4519
                 for alignment, and the DF/DI constant may have
4520
                 another SF/SI constant placed before it.  */
4521
              if (TARGET_SHCOMPACT
4522
                  && ! found_di
4523
                  && (mode == DFmode || mode == DImode))
4524
                {
4525
                  found_di = 1;
4526
                  si_limit -= 8;
4527
                }
4528
              while (si_align > 2 && found_si + si_align - 2 > count_si)
4529
                si_align >>= 1;
4530
              if (found_si > count_si)
4531
                count_si = found_si;
4532
              found_si += GET_MODE_SIZE (mode);
4533
              if (num_mova)
4534
                si_limit -= GET_MODE_SIZE (mode);
4535
            }
4536
        }
4537
 
4538
      if (mova_p (from))
4539
        {
4540
          switch (untangle_mova (&num_mova, &mova, from))
4541
            {
4542
              case 1:
4543
                if (flag_pic)
4544
                  {
4545
                    rtx src = SET_SRC (PATTERN (from));
4546
                    if (GET_CODE (src) == CONST
4547
                        && GET_CODE (XEXP (src, 0)) == UNSPEC
4548
                        && XINT (XEXP (src, 0), 1) == UNSPEC_SYMOFF)
4549
                      last_symoff = from;
4550
                  }
4551
                break;
4552
              case 0:    return find_barrier (0, 0, mova);
4553
              case 2:
4554
                {
4555
                  leading_mova = 0;
4556
                  barrier_before_mova
4557
                    = good_barrier ? good_barrier : found_barrier;
4558
                }
4559
              default:  break;
4560
            }
4561
          if (found_si > count_si)
4562
            count_si = found_si;
4563
        }
4564
      else if (JUMP_TABLE_DATA_P (from))
4565
        {
4566
          if ((num_mova > 1 && GET_MODE (prev_nonnote_insn (from)) == VOIDmode)
4567
              || (num_mova
4568
                  && (prev_nonnote_insn (from)
4569
                      == XEXP (MOVA_LABELREF (mova), 0))))
4570
            num_mova--;
4571
          if (barrier_align (next_real_insn (from)) == align_jumps_log)
4572
            {
4573
              /* We have just passed the barrier in front of the
4574
                 ADDR_DIFF_VEC, which is stored in found_barrier.  Since
4575
                 the ADDR_DIFF_VEC is accessed as data, just like our pool
4576
                 constants, this is a good opportunity to accommodate what
4577
                 we have gathered so far.
4578
                 If we waited any longer, we could end up at a barrier in
4579
                 front of code, which gives worse cache usage for separated
4580
                 instruction / data caches.  */
4581
              good_barrier = found_barrier;
4582
              break;
4583
            }
4584
          else
4585
            {
4586
              rtx body = PATTERN (from);
4587
              inc = XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body));
4588
            }
4589
        }
4590
      /* For the SH1, we generate alignments even after jumps-around-jumps.  */
4591
      else if (JUMP_P (from)
4592
               && ! TARGET_SH2
4593
               && ! TARGET_SMALLCODE)
4594
        new_align = 4;
4595
 
4596
      /* There is a possibility that a bf is transformed into a bf/s by the
4597
         delay slot scheduler.  */
4598
      if (JUMP_P (from) && !JUMP_TABLE_DATA_P (from)
4599
          && get_attr_type (from) == TYPE_CBRANCH
4600
          && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (from)))) != SEQUENCE)
4601
        inc += 2;
4602
 
4603
      if (found_si)
4604
        {
4605
          count_si += inc;
4606
          if (new_align > si_align)
4607
            {
4608
              si_limit -= (count_si - 1) & (new_align - si_align);
4609
              si_align = new_align;
4610
            }
4611
          count_si = (count_si + new_align - 1) & -new_align;
4612
        }
4613
      if (found_hi)
4614
        {
4615
          count_hi += inc;
4616
          if (new_align > hi_align)
4617
            {
4618
              hi_limit -= (count_hi - 1) & (new_align - hi_align);
4619
              hi_align = new_align;
4620
            }
4621
          count_hi = (count_hi + new_align - 1) & -new_align;
4622
        }
4623
      from = NEXT_INSN (from);
4624
    }
4625
 
4626
  if (num_mova)
4627
    {
4628
      if (leading_mova)
4629
        {
4630
          /* Try as we might, the leading mova is out of range.  Change
4631
             it into a load (which will become a pcload) and retry.  */
4632
          fixup_mova (mova);
4633
          return find_barrier (0, 0, mova);
4634
        }
4635
      else
4636
        {
4637
          /* Insert the constant pool table before the mova instruction,
4638
             to prevent the mova label reference from going out of range.  */
4639
          from = mova;
4640
          good_barrier = found_barrier = barrier_before_mova;
4641
        }
4642
    }
4643
 
4644
  if (found_barrier)
4645
    {
4646
      if (good_barrier && next_real_insn (found_barrier))
4647
        found_barrier = good_barrier;
4648
    }
4649
  else
4650
    {
4651
      /* We didn't find a barrier in time to dump our stuff,
4652
         so we'll make one.  */
4653
      rtx label = gen_label_rtx ();
4654
 
4655
      /* Don't emit a constant table in the middle of insns for
4656
         casesi_worker_2.  This is a bit overkill but is enough
4657
         because casesi_worker_2 wouldn't appear so frequently.  */
4658
      if (last_symoff)
4659
        from = last_symoff;
4660
 
4661
      /* If we exceeded the range, then we must back up over the last
4662
         instruction we looked at.  Otherwise, we just need to undo the
4663
         NEXT_INSN at the end of the loop.  */
4664
      if (PREV_INSN (from) != orig
4665
          && (count_hi > hi_limit || count_si > si_limit))
4666
        from = PREV_INSN (PREV_INSN (from));
4667
      else
4668
        from = PREV_INSN (from);
4669
 
4670
      /* Don't emit a constant table int the middle of global pointer setting,
4671
         since that that would move the addressing base GOT into another table.
4672
         We need the first mov instruction before the _GLOBAL_OFFSET_TABLE_
4673
         in the pool anyway, so just move up the whole constant pool.  */
4674
      if (last_got)
4675
        from = PREV_INSN (last_got);
4676
 
4677
      /* Don't insert the constant pool table at the position which
4678
         may be the landing pad.  */
4679
      if (flag_exceptions
4680
          && CALL_P (from)
4681
          && find_reg_note (from, REG_EH_REGION, NULL_RTX))
4682
        from = PREV_INSN (from);
4683
 
4684
      /* Walk back to be just before any jump or label.
4685
         Putting it before a label reduces the number of times the branch
4686
         around the constant pool table will be hit.  Putting it before
4687
         a jump makes it more likely that the bra delay slot will be
4688
         filled.  */
4689
      while (NOTE_P (from) || JUMP_P (from)
4690
             || LABEL_P (from))
4691
        from = PREV_INSN (from);
4692
 
4693
      from = emit_jump_insn_after (gen_jump (label), from);
4694
      JUMP_LABEL (from) = label;
4695
      LABEL_NUSES (label) = 1;
4696
      found_barrier = emit_barrier_after (from);
4697
      emit_label_after (label, found_barrier);
4698
    }
4699
 
4700
  return found_barrier;
4701
}
4702
 
4703
/* If the instruction INSN is implemented by a special function, and we can
4704
   positively find the register that is used to call the sfunc, and this
4705
   register is not used anywhere else in this instruction - except as the
4706
   destination of a set, return this register; else, return 0.  */
4707
rtx
4708
sfunc_uses_reg (rtx insn)
4709
{
4710
  int i;
4711
  rtx pattern, part, reg_part, reg;
4712
 
4713
  if (!NONJUMP_INSN_P (insn))
4714
    return 0;
4715
  pattern = PATTERN (insn);
4716
  if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC)
4717
    return 0;
4718
 
4719
  for (reg_part = 0, i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
4720
    {
4721
      part = XVECEXP (pattern, 0, i);
4722
      if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == SImode)
4723
        reg_part = part;
4724
    }
4725
  if (! reg_part)
4726
    return 0;
4727
  reg = XEXP (reg_part, 0);
4728
  for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
4729
    {
4730
      part = XVECEXP (pattern, 0, i);
4731
      if (part == reg_part || GET_CODE (part) == CLOBBER)
4732
        continue;
4733
      if (reg_mentioned_p (reg, ((GET_CODE (part) == SET
4734
                                  && REG_P (SET_DEST (part)))
4735
                                 ? SET_SRC (part) : part)))
4736
        return 0;
4737
    }
4738
  return reg;
4739
}
4740
 
4741
/* See if the only way in which INSN uses REG is by calling it, or by
4742
   setting it while calling it.  Set *SET to a SET rtx if the register
4743
   is set by INSN.  */
4744
 
4745
static int
4746
noncall_uses_reg (rtx reg, rtx insn, rtx *set)
4747
{
4748
  rtx pattern, reg2;
4749
 
4750
  *set = NULL_RTX;
4751
 
4752
  reg2 = sfunc_uses_reg (insn);
4753
  if (reg2 && REGNO (reg2) == REGNO (reg))
4754
    {
4755
      pattern = single_set (insn);
4756
      if (pattern
4757
          && REG_P (SET_DEST (pattern))
4758
          && REGNO (reg) == REGNO (SET_DEST (pattern)))
4759
        *set = pattern;
4760
      return 0;
4761
    }
4762
  if (!CALL_P (insn))
4763
    {
4764
      /* We don't use rtx_equal_p because we don't care if the mode is
4765
         different.  */
4766
      pattern = single_set (insn);
4767
      if (pattern
4768
          && REG_P (SET_DEST (pattern))
4769
          && REGNO (reg) == REGNO (SET_DEST (pattern)))
4770
        {
4771
          rtx par, part;
4772
          int i;
4773
 
4774
          *set = pattern;
4775
          par = PATTERN (insn);
4776
          if (GET_CODE (par) == PARALLEL)
4777
            for (i = XVECLEN (par, 0) - 1; i >= 0; i--)
4778
              {
4779
                part = XVECEXP (par, 0, i);
4780
                if (GET_CODE (part) != SET && reg_mentioned_p (reg, part))
4781
                  return 1;
4782
              }
4783
          return reg_mentioned_p (reg, SET_SRC (pattern));
4784
        }
4785
 
4786
      return 1;
4787
    }
4788
 
4789
  pattern = PATTERN (insn);
4790
 
4791
  if (GET_CODE (pattern) == PARALLEL)
4792
    {
4793
      int i;
4794
 
4795
      for (i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
4796
        if (reg_mentioned_p (reg, XVECEXP (pattern, 0, i)))
4797
          return 1;
4798
      pattern = XVECEXP (pattern, 0, 0);
4799
    }
4800
 
4801
  if (GET_CODE (pattern) == SET)
4802
    {
4803
      if (reg_mentioned_p (reg, SET_DEST (pattern)))
4804
        {
4805
          /* We don't use rtx_equal_p, because we don't care if the
4806
             mode is different.  */
4807
          if (!REG_P (SET_DEST (pattern))
4808
              || REGNO (reg) != REGNO (SET_DEST (pattern)))
4809
            return 1;
4810
 
4811
          *set = pattern;
4812
        }
4813
 
4814
      pattern = SET_SRC (pattern);
4815
    }
4816
 
4817
  if (GET_CODE (pattern) != CALL
4818
      || !MEM_P (XEXP (pattern, 0))
4819
      || ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0)))
4820
    return 1;
4821
 
4822
  return 0;
4823
}
4824
 
4825
/* Given a X, a pattern of an insn or a part of it, return a mask of used
4826
   general registers.  Bits 0..15 mean that the respective registers
4827
   are used as inputs in the instruction.  Bits 16..31 mean that the
4828
   registers 0..15, respectively, are used as outputs, or are clobbered.
4829
   IS_DEST should be set to 16 if X is the destination of a SET, else to 0.  */
4830
int
4831
regs_used (rtx x, int is_dest)
4832
{
4833
  enum rtx_code code;
4834
  const char *fmt;
4835
  int i, used = 0;
4836
 
4837
  if (! x)
4838
    return used;
4839
  code = GET_CODE (x);
4840
  switch (code)
4841
    {
4842
    case REG:
4843
      if (REGNO (x) < 16)
4844
        return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
4845
                << (REGNO (x) + is_dest));
4846
      return 0;
4847
    case SUBREG:
4848
      {
4849
        rtx y = SUBREG_REG (x);
4850
 
4851
        if (!REG_P (y))
4852
          break;
4853
        if (REGNO (y) < 16)
4854
          return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
4855
                  << (REGNO (y) +
4856
                      subreg_regno_offset (REGNO (y),
4857
                                           GET_MODE (y),
4858
                                           SUBREG_BYTE (x),
4859
                                           GET_MODE (x)) + is_dest));
4860
        return 0;
4861
      }
4862
    case SET:
4863
      return regs_used (SET_SRC (x), 0) | regs_used (SET_DEST (x), 16);
4864
    case RETURN:
4865
      /* If there was a return value, it must have been indicated with USE.  */
4866
      return 0x00ffff00;
4867
    case CLOBBER:
4868
      is_dest = 1;
4869
      break;
4870
    case MEM:
4871
      is_dest = 0;
4872
      break;
4873
    case CALL:
4874
      used |= 0x00ff00f0;
4875
      break;
4876
    default:
4877
      break;
4878
    }
4879
 
4880
  fmt = GET_RTX_FORMAT (code);
4881
 
4882
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4883
    {
4884
      if (fmt[i] == 'E')
4885
        {
4886
          register int j;
4887
          for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4888
            used |= regs_used (XVECEXP (x, i, j), is_dest);
4889
        }
4890
      else if (fmt[i] == 'e')
4891
        used |= regs_used (XEXP (x, i), is_dest);
4892
    }
4893
  return used;
4894
}
4895
 
4896
/* Create an instruction that prevents redirection of a conditional branch
4897
   to the destination of the JUMP with address ADDR.
4898
   If the branch needs to be implemented as an indirect jump, try to find
4899
   a scratch register for it.
4900
   If NEED_BLOCK is 0, don't do anything unless we need a scratch register.
4901
   If any preceding insn that doesn't fit into a delay slot is good enough,
4902
   pass 1.  Pass 2 if a definite blocking insn is needed.
4903
   -1 is used internally to avoid deep recursion.
4904
   If a blocking instruction is made or recognized, return it.  */
4905
 
4906
static rtx
4907
gen_block_redirect (rtx jump, int addr, int need_block)
4908
{
4909
  int dead = 0;
4910
  rtx prev = prev_nonnote_insn (jump);
4911
  rtx dest;
4912
 
4913
  /* First, check if we already have an instruction that satisfies our need.  */
4914
  if (prev && NONJUMP_INSN_P (prev) && ! INSN_DELETED_P (prev))
4915
    {
4916
      if (INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
4917
        return prev;
4918
      if (GET_CODE (PATTERN (prev)) == USE
4919
          || GET_CODE (PATTERN (prev)) == CLOBBER
4920
          || get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
4921
        prev = jump;
4922
      else if ((need_block &= ~1) < 0)
4923
        return prev;
4924
      else if (recog_memoized (prev) == CODE_FOR_block_branch_redirect)
4925
        need_block = 0;
4926
    }
4927
  if (GET_CODE (PATTERN (jump)) == RETURN)
4928
    {
4929
      if (! need_block)
4930
        return prev;
4931
      /* Reorg even does nasty things with return insns that cause branches
4932
         to go out of range - see find_end_label and callers.  */
4933
      return emit_insn_before (gen_block_branch_redirect (const0_rtx) , jump);
4934
    }
4935
  /* We can't use JUMP_LABEL here because it might be undefined
4936
     when not optimizing.  */
4937
  dest = XEXP (SET_SRC (PATTERN (jump)), 0);
4938
  /* If the branch is out of range, try to find a scratch register for it.  */
4939
  if (optimize
4940
      && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4941
          > 4092 + 4098))
4942
    {
4943
      rtx scan;
4944
      /* Don't look for the stack pointer as a scratch register,
4945
         it would cause trouble if an interrupt occurred.  */
4946
      unsigned attempt = 0x7fff, used;
4947
      int jump_left = flag_expensive_optimizations + 1;
4948
 
4949
      /* It is likely that the most recent eligible instruction is wanted for
4950
         the delay slot.  Therefore, find out which registers it uses, and
4951
         try to avoid using them.  */
4952
 
4953
      for (scan = jump; (scan = PREV_INSN (scan)); )
4954
        {
4955
          enum rtx_code code;
4956
 
4957
          if (INSN_DELETED_P (scan))
4958
            continue;
4959
          code = GET_CODE (scan);
4960
          if (code == CODE_LABEL || code == JUMP_INSN)
4961
            break;
4962
          if (code == INSN
4963
              && GET_CODE (PATTERN (scan)) != USE
4964
              && GET_CODE (PATTERN (scan)) != CLOBBER
4965
              && get_attr_in_delay_slot (scan) == IN_DELAY_SLOT_YES)
4966
            {
4967
              attempt &= ~regs_used (PATTERN (scan), 0);
4968
              break;
4969
            }
4970
        }
4971
      for (used = dead = 0, scan = JUMP_LABEL (jump);
4972
           (scan = NEXT_INSN (scan)); )
4973
        {
4974
          enum rtx_code code;
4975
 
4976
          if (INSN_DELETED_P (scan))
4977
            continue;
4978
          code = GET_CODE (scan);
4979
          if (INSN_P (scan))
4980
            {
4981
              used |= regs_used (PATTERN (scan), 0);
4982
              if (code == CALL_INSN)
4983
                used |= regs_used (CALL_INSN_FUNCTION_USAGE (scan), 0);
4984
              dead |= (used >> 16) & ~used;
4985
              if (dead & attempt)
4986
                {
4987
                  dead &= attempt;
4988
                  break;
4989
                }
4990
              if (code == JUMP_INSN)
4991
                {
4992
                  if (jump_left-- && simplejump_p (scan))
4993
                    scan = JUMP_LABEL (scan);
4994
                  else
4995
                    break;
4996
                }
4997
            }
4998
        }
4999
      /* Mask out the stack pointer again, in case it was
5000
         the only 'free' register we have found.  */
5001
      dead &= 0x7fff;
5002
    }
5003
  /* If the immediate destination is still in range, check for possible
5004
     threading with a jump beyond the delay slot insn.
5005
     Don't check if we are called recursively; the jump has been or will be
5006
     checked in a different invocation then.  */
5007
 
5008
  else if (optimize && need_block >= 0)
5009
    {
5010
      rtx next = next_active_insn (next_active_insn (dest));
5011
      if (next && JUMP_P (next)
5012
          && GET_CODE (PATTERN (next)) == SET
5013
          && recog_memoized (next) == CODE_FOR_jump_compact)
5014
        {
5015
          dest = JUMP_LABEL (next);
5016
          if (dest
5017
              && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
5018
                  > 4092 + 4098))
5019
            gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), -1);
5020
        }
5021
    }
5022
 
5023
  if (dead)
5024
    {
5025
      rtx reg = gen_rtx_REG (SImode, exact_log2 (dead & -dead));
5026
 
5027
      /* It would be nice if we could convert the jump into an indirect
5028
         jump / far branch right now, and thus exposing all constituent
5029
         instructions to further optimization.  However, reorg uses
5030
         simplejump_p to determine if there is an unconditional jump where
5031
         it should try to schedule instructions from the target of the
5032
         branch; simplejump_p fails for indirect jumps even if they have
5033
         a JUMP_LABEL.  */
5034
      rtx insn = emit_insn_before (gen_indirect_jump_scratch
5035
                                   (reg, GEN_INT (unspec_bbr_uid++)),
5036
                                   jump);
5037
      /* ??? We would like this to have the scope of the jump, but that
5038
         scope will change when a delay slot insn of an inner scope is added.
5039
         Hence, after delay slot scheduling, we'll have to expect
5040
         NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
5041
         the jump.  */
5042
 
5043
      INSN_LOCATOR (insn) = INSN_LOCATOR (jump);
5044
      INSN_CODE (insn) = CODE_FOR_indirect_jump_scratch;
5045
      return insn;
5046
    }
5047
  else if (need_block)
5048
    /* We can't use JUMP_LABEL here because it might be undefined
5049
       when not optimizing.  */
5050
    return emit_insn_before (gen_block_branch_redirect
5051
                             (GEN_INT (unspec_bbr_uid++)),
5052
                             jump);
5053
  return prev;
5054
}
5055
 
5056
#define CONDJUMP_MIN -252
5057
#define CONDJUMP_MAX 262
5058
struct far_branch
5059
{
5060
  /* A label (to be placed) in front of the jump
5061
     that jumps to our ultimate destination.  */
5062
  rtx near_label;
5063
  /* Where we are going to insert it if we cannot move the jump any farther,
5064
     or the jump itself if we have picked up an existing jump.  */
5065
  rtx insert_place;
5066
  /* The ultimate destination.  */
5067
  rtx far_label;
5068
  struct far_branch *prev;
5069
  /* If the branch has already been created, its address;
5070
     else the address of its first prospective user.  */
5071
  int address;
5072
};
5073
 
5074
static void gen_far_branch (struct far_branch *);
5075
enum mdep_reorg_phase_e mdep_reorg_phase;
5076
static void
5077
gen_far_branch (struct far_branch *bp)
5078
{
5079
  rtx insn = bp->insert_place;
5080
  rtx jump;
5081
  rtx label = gen_label_rtx ();
5082
  int ok;
5083
 
5084
  emit_label_after (label, insn);
5085
  if (bp->far_label)
5086
    {
5087
      jump = emit_jump_insn_after (gen_jump (bp->far_label), insn);
5088
      LABEL_NUSES (bp->far_label)++;
5089
    }
5090
  else
5091
    jump = emit_jump_insn_after (gen_return (), insn);
5092
  /* Emit a barrier so that reorg knows that any following instructions
5093
     are not reachable via a fall-through path.
5094
     But don't do this when not optimizing, since we wouldn't suppress the
5095
     alignment for the barrier then, and could end up with out-of-range
5096
     pc-relative loads.  */
5097
  if (optimize)
5098
    emit_barrier_after (jump);
5099
  emit_label_after (bp->near_label, insn);
5100
  JUMP_LABEL (jump) = bp->far_label;
5101
  ok = invert_jump (insn, label, 1);
5102
  gcc_assert (ok);
5103
 
5104
  /* If we are branching around a jump (rather than a return), prevent
5105
     reorg from using an insn from the jump target as the delay slot insn -
5106
     when reorg did this, it pessimized code (we rather hide the delay slot)
5107
     and it could cause branches to go out of range.  */
5108
  if (bp->far_label)
5109
    (emit_insn_after
5110
     (gen_stuff_delay_slot
5111
      (GEN_INT (unspec_bbr_uid++),
5112
       GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
5113
      insn));
5114
  /* Prevent reorg from undoing our splits.  */
5115
  gen_block_redirect (jump, bp->address += 2, 2);
5116
}
5117
 
5118
/* Fix up ADDR_DIFF_VECs.  */
5119
void
5120
fixup_addr_diff_vecs (rtx first)
5121
{
5122
  rtx insn;
5123
 
5124
  for (insn = first; insn; insn = NEXT_INSN (insn))
5125
    {
5126
      rtx vec_lab, pat, prev, prevpat, x, braf_label;
5127
 
5128
      if (!JUMP_P (insn)
5129
          || GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
5130
        continue;
5131
      pat = PATTERN (insn);
5132
      vec_lab = XEXP (XEXP (pat, 0), 0);
5133
 
5134
      /* Search the matching casesi_jump_2.  */
5135
      for (prev = vec_lab; ; prev = PREV_INSN (prev))
5136
        {
5137
          if (!JUMP_P (prev))
5138
            continue;
5139
          prevpat = PATTERN (prev);
5140
          if (GET_CODE (prevpat) != PARALLEL || XVECLEN (prevpat, 0) != 2)
5141
            continue;
5142
          x = XVECEXP (prevpat, 0, 1);
5143
          if (GET_CODE (x) != USE)
5144
            continue;
5145
          x = XEXP (x, 0);
5146
          if (GET_CODE (x) == LABEL_REF && XEXP (x, 0) == vec_lab)
5147
            break;
5148
        }
5149
      /* FIXME: This is a bug in the optimizer, but it seems harmless
5150
         to just avoid panicing.  */
5151
      if (!prev)
5152
        continue;
5153
 
5154
      /* Emit the reference label of the braf where it belongs, right after
5155
         the casesi_jump_2 (i.e. braf).  */
5156
      braf_label = XEXP (XEXP (SET_SRC (XVECEXP (prevpat, 0, 0)), 1), 0);
5157
      emit_label_after (braf_label, prev);
5158
 
5159
      /* Fix up the ADDR_DIF_VEC to be relative
5160
         to the reference address of the braf.  */
5161
      XEXP (XEXP (pat, 0), 0) = braf_label;
5162
    }
5163
}
5164
 
5165
/* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
5166
   a barrier.  Return the base 2 logarithm of the desired alignment.  */
5167
int
5168
barrier_align (rtx barrier_or_label)
5169
{
5170
  rtx next = next_real_insn (barrier_or_label), pat, prev;
5171
  int slot, credit, jump_to_next = 0;
5172
 
5173
  if (! next)
5174
    return 0;
5175
 
5176
  pat = PATTERN (next);
5177
 
5178
  if (GET_CODE (pat) == ADDR_DIFF_VEC)
5179
    return 2;
5180
 
5181
  if (GET_CODE (pat) == UNSPEC_VOLATILE && XINT (pat, 1) == UNSPECV_ALIGN)
5182
    /* This is a barrier in front of a constant table.  */
5183
    return 0;
5184
 
5185
  prev = prev_real_insn (barrier_or_label);
5186
  if (GET_CODE (PATTERN (prev)) == ADDR_DIFF_VEC)
5187
    {
5188
      pat = PATTERN (prev);
5189
      /* If this is a very small table, we want to keep the alignment after
5190
         the table to the minimum for proper code alignment.  */
5191
      return ((TARGET_SMALLCODE
5192
               || ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
5193
                   <= (unsigned) 1 << (CACHE_LOG - 2)))
5194
              ? 1 << TARGET_SHMEDIA : align_jumps_log);
5195
    }
5196
 
5197
  if (TARGET_SMALLCODE)
5198
    return 0;
5199
 
5200
  if (! TARGET_SH2 || ! optimize)
5201
    return align_jumps_log;
5202
 
5203
  /* When fixing up pcloads, a constant table might be inserted just before
5204
     the basic block that ends with the barrier.  Thus, we can't trust the
5205
     instruction lengths before that.  */
5206
  if (mdep_reorg_phase > SH_FIXUP_PCLOAD)
5207
    {
5208
      /* Check if there is an immediately preceding branch to the insn beyond
5209
         the barrier.  We must weight the cost of discarding useful information
5210
         from the current cache line when executing this branch and there is
5211
         an alignment, against that of fetching unneeded insn in front of the
5212
         branch target when there is no alignment.  */
5213
 
5214
      /* There are two delay_slot cases to consider.  One is the simple case
5215
         where the preceding branch is to the insn beyond the barrier (simple
5216
         delay slot filling), and the other is where the preceding branch has
5217
         a delay slot that is a duplicate of the insn after the barrier
5218
         (fill_eager_delay_slots) and the branch is to the insn after the insn
5219
         after the barrier.  */
5220
 
5221
      /* PREV is presumed to be the JUMP_INSN for the barrier under
5222
         investigation.  Skip to the insn before it.  */
5223
      prev = prev_real_insn (prev);
5224
 
5225
      for (slot = 2, credit = (1 << (CACHE_LOG - 2)) + 2;
5226
           credit >= 0 && prev && NONJUMP_INSN_P (prev);
5227
           prev = prev_real_insn (prev))
5228
        {
5229
          jump_to_next = 0;
5230
          if (GET_CODE (PATTERN (prev)) == USE
5231
              || GET_CODE (PATTERN (prev)) == CLOBBER)
5232
            continue;
5233
          if (GET_CODE (PATTERN (prev)) == SEQUENCE)
5234
            {
5235
              prev = XVECEXP (PATTERN (prev), 0, 1);
5236
              if (INSN_UID (prev) == INSN_UID (next))
5237
                {
5238
                  /* Delay slot was filled with insn at jump target.  */
5239
                  jump_to_next = 1;
5240
                  continue;
5241
                }
5242
            }
5243
 
5244
          if (slot &&
5245
              get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
5246
            slot = 0;
5247
          credit -= get_attr_length (prev);
5248
        }
5249
      if (prev
5250
          && JUMP_P (prev)
5251
          && JUMP_LABEL (prev))
5252
        {
5253
          rtx x;
5254
          if (jump_to_next
5255
              || next_real_insn (JUMP_LABEL (prev)) == next
5256
              /* If relax_delay_slots() decides NEXT was redundant
5257
                 with some previous instruction, it will have
5258
                 redirected PREV's jump to the following insn.  */
5259
              || JUMP_LABEL (prev) == next_nonnote_insn (next)
5260
              /* There is no upper bound on redundant instructions
5261
                 that might have been skipped, but we must not put an
5262
                 alignment where none had been before.  */
5263
              || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
5264
                  (INSN_P (x)
5265
                   && (INSN_CODE (x) == CODE_FOR_block_branch_redirect
5266
                       || INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
5267
                       || INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
5268
            {
5269
              rtx pat = PATTERN (prev);
5270
              if (GET_CODE (pat) == PARALLEL)
5271
                pat = XVECEXP (pat, 0, 0);
5272
              if (credit - slot >= (GET_CODE (SET_SRC (pat)) == PC ? 2 : 0))
5273
                return 0;
5274
            }
5275
        }
5276
    }
5277
 
5278
  return align_jumps_log;
5279
}
5280
 
5281
/* If we are inside a phony loop, almost any kind of label can turn up as the
5282
   first one in the loop.  Aligning a braf label causes incorrect switch
5283
   destination addresses; we can detect braf labels because they are
5284
   followed by a BARRIER.
5285
   Applying loop alignment to small constant or switch tables is a waste
5286
   of space, so we suppress this too.  */
5287
int
5288
sh_loop_align (rtx label)
5289
{
5290
  rtx next = label;
5291
 
5292
  do
5293
    next = next_nonnote_insn (next);
5294
  while (next && LABEL_P (next));
5295
 
5296
  if (! next
5297
      || ! INSN_P (next)
5298
      || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC
5299
      || recog_memoized (next) == CODE_FOR_consttable_2)
5300
    return 0;
5301
 
5302
  return align_loops_log;
5303
}
5304
 
5305
/* Do a final pass over the function, just before delayed branch
5306
   scheduling.  */
5307
 
5308
static void
5309
sh_reorg (void)
5310
{
5311
  rtx first, insn, mova = NULL_RTX;
5312
  int num_mova;
5313
  rtx r0_rtx = gen_rtx_REG (Pmode, 0);
5314
  rtx r0_inc_rtx = gen_rtx_POST_INC (Pmode, r0_rtx);
5315
 
5316
  first = get_insns ();
5317
  max_labelno_before_reorg = max_label_num ();
5318
 
5319
  /* We must split call insns before introducing `mova's.  If we're
5320
     optimizing, they'll have already been split.  Otherwise, make
5321
     sure we don't split them too late.  */
5322
  if (! optimize)
5323
    split_all_insns_noflow ();
5324
 
5325
  if (TARGET_SHMEDIA)
5326
    return;
5327
 
5328
  /* If relaxing, generate pseudo-ops to associate function calls with
5329
     the symbols they call.  It does no harm to not generate these
5330
     pseudo-ops.  However, when we can generate them, it enables to
5331
     linker to potentially relax the jsr to a bsr, and eliminate the
5332
     register load and, possibly, the constant pool entry.  */
5333
 
5334
  mdep_reorg_phase = SH_INSERT_USES_LABELS;
5335
  if (TARGET_RELAX)
5336
    {
5337
      /* Remove all REG_LABEL_OPERAND notes.  We want to use them for our
5338
         own purposes.  This works because none of the remaining passes
5339
         need to look at them.
5340
 
5341
         ??? But it may break in the future.  We should use a machine
5342
         dependent REG_NOTE, or some other approach entirely.  */
5343
      for (insn = first; insn; insn = NEXT_INSN (insn))
5344
        {
5345
          if (INSN_P (insn))
5346
            {
5347
              rtx note;
5348
 
5349
              while ((note = find_reg_note (insn, REG_LABEL_OPERAND,
5350
                                            NULL_RTX)) != 0)
5351
                remove_note (insn, note);
5352
            }
5353
        }
5354
 
5355
      for (insn = first; insn; insn = NEXT_INSN (insn))
5356
        {
5357
          rtx pattern, reg, link, set, scan, dies, label;
5358
          int rescan = 0, foundinsn = 0;
5359
 
5360
          if (CALL_P (insn))
5361
            {
5362
              pattern = PATTERN (insn);
5363
 
5364
              if (GET_CODE (pattern) == PARALLEL)
5365
                pattern = XVECEXP (pattern, 0, 0);
5366
              if (GET_CODE (pattern) == SET)
5367
                pattern = SET_SRC (pattern);
5368
 
5369
              if (GET_CODE (pattern) != CALL
5370
                  || !MEM_P (XEXP (pattern, 0)))
5371
                continue;
5372
 
5373
              reg = XEXP (XEXP (pattern, 0), 0);
5374
            }
5375
          else
5376
            {
5377
              reg = sfunc_uses_reg (insn);
5378
              if (! reg)
5379
                continue;
5380
            }
5381
 
5382
          if (!REG_P (reg))
5383
            continue;
5384
 
5385
          /* Try scanning backward to find where the register is set.  */
5386
          link = NULL;
5387
          for (scan = PREV_INSN (insn);
5388
               scan && !LABEL_P (scan);
5389
               scan = PREV_INSN (scan))
5390
            {
5391
              if (! INSN_P (scan))
5392
                continue;
5393
 
5394
              if (! reg_mentioned_p (reg, scan))
5395
                continue;
5396
 
5397
              if (noncall_uses_reg (reg, scan, &set))
5398
                break;
5399
 
5400
              if (set)
5401
                {
5402
                  link = scan;
5403
                  break;
5404
                }
5405
            }
5406
 
5407
          if (! link)
5408
            continue;
5409
 
5410
          /* The register is set at LINK.  */
5411
 
5412
          /* We can only optimize the function call if the register is
5413
             being set to a symbol.  In theory, we could sometimes
5414
             optimize calls to a constant location, but the assembler
5415
             and linker do not support that at present.  */
5416
          if (GET_CODE (SET_SRC (set)) != SYMBOL_REF
5417
              && GET_CODE (SET_SRC (set)) != LABEL_REF)
5418
            continue;
5419
 
5420
          /* Scan forward from LINK to the place where REG dies, and
5421
             make sure that the only insns which use REG are
5422
             themselves function calls.  */
5423
 
5424
          /* ??? This doesn't work for call targets that were allocated
5425
             by reload, since there may not be a REG_DEAD note for the
5426
             register.  */
5427
 
5428
          dies = NULL_RTX;
5429
          for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
5430
            {
5431
              rtx scanset;
5432
 
5433
              /* Don't try to trace forward past a CODE_LABEL if we haven't
5434
                 seen INSN yet.  Ordinarily, we will only find the setting insn
5435
                 if it is in the same basic block.  However,
5436
                 cross-jumping can insert code labels in between the load and
5437
                 the call, and can result in situations where a single call
5438
                 insn may have two targets depending on where we came from.  */
5439
 
5440
              if (LABEL_P (scan) && ! foundinsn)
5441
                break;
5442
 
5443
              if (! INSN_P (scan))
5444
                continue;
5445
 
5446
              /* Don't try to trace forward past a JUMP.  To optimize
5447
                 safely, we would have to check that all the
5448
                 instructions at the jump destination did not use REG.  */
5449
 
5450
              if (JUMP_P (scan))
5451
                break;
5452
 
5453
              if (! reg_mentioned_p (reg, scan))
5454
                continue;
5455
 
5456
              if (noncall_uses_reg (reg, scan, &scanset))
5457
                break;
5458
 
5459
              if (scan == insn)
5460
                foundinsn = 1;
5461
 
5462
              if (scan != insn
5463
                  && (CALL_P (scan) || sfunc_uses_reg (scan)))
5464
                {
5465
                  /* There is a function call to this register other
5466
                     than the one we are checking.  If we optimize
5467
                     this call, we need to rescan again below.  */
5468
                  rescan = 1;
5469
                }
5470
 
5471
              /* ??? We shouldn't have to worry about SCANSET here.
5472
                 We should just be able to check for a REG_DEAD note
5473
                 on a function call.  However, the REG_DEAD notes are
5474
                 apparently not dependable around libcalls; c-torture
5475
                 execute/920501-2 is a test case.  If SCANSET is set,
5476
                 then this insn sets the register, so it must have
5477
                 died earlier.  Unfortunately, this will only handle
5478
                 the cases in which the register is, in fact, set in a
5479
                 later insn.  */
5480
 
5481
              /* ??? We shouldn't have to use FOUNDINSN here.
5482
                 This dates back to when we used LOG_LINKS to find
5483
                 the most recent insn which sets the register.  */
5484
 
5485
              if (foundinsn
5486
                  && (scanset
5487
                      || find_reg_note (scan, REG_DEAD, reg)))
5488
                {
5489
                  dies = scan;
5490
                  break;
5491
                }
5492
            }
5493
 
5494
          if (! dies)
5495
            {
5496
              /* Either there was a branch, or some insn used REG
5497
                 other than as a function call address.  */
5498
              continue;
5499
            }
5500
 
5501
          /* Create a code label, and put it in a REG_LABEL_OPERAND note
5502
             on the insn which sets the register, and on each call insn
5503
             which uses the register.  In final_prescan_insn we look for
5504
             the REG_LABEL_OPERAND notes, and output the appropriate label
5505
             or pseudo-op.  */
5506
 
5507
          label = gen_label_rtx ();
5508
          add_reg_note (link, REG_LABEL_OPERAND, label);
5509
          add_reg_note (insn, REG_LABEL_OPERAND, label);
5510
          if (rescan)
5511
            {
5512
              scan = link;
5513
              do
5514
                {
5515
                  rtx reg2;
5516
 
5517
                  scan = NEXT_INSN (scan);
5518
                  if (scan != insn
5519
                      && ((CALL_P (scan)
5520
                           && reg_mentioned_p (reg, scan))
5521
                          || ((reg2 = sfunc_uses_reg (scan))
5522
                              && REGNO (reg2) == REGNO (reg))))
5523
                    add_reg_note (scan, REG_LABEL_OPERAND, label);
5524
                }
5525
              while (scan != dies);
5526
            }
5527
        }
5528
    }
5529
 
5530
  if (TARGET_SH2)
5531
    fixup_addr_diff_vecs (first);
5532
 
5533
  if (optimize)
5534
    {
5535
      mdep_reorg_phase = SH_SHORTEN_BRANCHES0;
5536
      shorten_branches (first);
5537
    }
5538
 
5539
  /* Scan the function looking for move instructions which have to be
5540
     changed to pc-relative loads and insert the literal tables.  */
5541
  label_ref_list_pool = create_alloc_pool ("label references list",
5542
                                           sizeof (struct label_ref_list_d),
5543
                                           30);
5544
  mdep_reorg_phase = SH_FIXUP_PCLOAD;
5545
  for (insn = first, num_mova = 0; insn; insn = NEXT_INSN (insn))
5546
    {
5547
      if (mova_p (insn))
5548
        {
5549
          /* ??? basic block reordering can move a switch table dispatch
5550
             below the switch table.  Check if that has happened.
5551
             We only have the addresses available when optimizing; but then,
5552
             this check shouldn't be needed when not optimizing.  */
5553
          if (!untangle_mova (&num_mova, &mova, insn))
5554
            {
5555
              insn = mova;
5556
              num_mova = 0;
5557
            }
5558
        }
5559
      else if (JUMP_P (insn)
5560
               && GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
5561
               && num_mova
5562
               /* ??? loop invariant motion can also move a mova out of a
5563
                  loop.  Since loop does this code motion anyway, maybe we
5564
                  should wrap UNSPEC_MOVA into a CONST, so that reload can
5565
                  move it back.  */
5566
               && ((num_mova > 1
5567
                    && GET_MODE (prev_nonnote_insn (insn)) == VOIDmode)
5568
                   || (prev_nonnote_insn (insn)
5569
                       == XEXP (MOVA_LABELREF (mova), 0))))
5570
        {
5571
          rtx scan;
5572
          int total;
5573
 
5574
          num_mova--;
5575
 
5576
          /* Some code might have been inserted between the mova and
5577
             its ADDR_DIFF_VEC.  Check if the mova is still in range.  */
5578
          for (scan = mova, total = 0; scan != insn; scan = NEXT_INSN (scan))
5579
            total += get_attr_length (scan);
5580
 
5581
          /* range of mova is 1020, add 4 because pc counts from address of
5582
             second instruction after this one, subtract 2 in case pc is 2
5583
             byte aligned.  Possible alignment needed for the ADDR_DIFF_VEC
5584
             cancels out with alignment effects of the mova itself.  */
5585
          if (total > 1022)
5586
            {
5587
              /* Change the mova into a load, and restart scanning
5588
                 there.  broken_move will then return true for mova.  */
5589
              fixup_mova (mova);
5590
              insn = mova;
5591
            }
5592
        }
5593
      if (broken_move (insn)
5594
          || (NONJUMP_INSN_P (insn)
5595
              && recog_memoized (insn) == CODE_FOR_casesi_worker_2))
5596
        {
5597
          rtx scan;
5598
          /* Scan ahead looking for a barrier to stick the constant table
5599
             behind.  */
5600
          rtx barrier = find_barrier (num_mova, mova, insn);
5601
          rtx last_float_move = NULL_RTX, last_float = 0, *last_float_addr = NULL;
5602
          int need_aligned_label = 0;
5603
 
5604
          if (num_mova && ! mova_p (mova))
5605
            {
5606
              /* find_barrier had to change the first mova into a
5607
                 pcload; thus, we have to start with this new pcload.  */
5608
              insn = mova;
5609
              num_mova = 0;
5610
            }
5611
          /* Now find all the moves between the points and modify them.  */
5612
          for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
5613
            {
5614
              if (LABEL_P (scan))
5615
                last_float = 0;
5616
              if (NONJUMP_INSN_P (scan)
5617
                  && recog_memoized (scan) == CODE_FOR_casesi_worker_2)
5618
                need_aligned_label = 1;
5619
              if (broken_move (scan))
5620
                {
5621
                  rtx *patp = &PATTERN (scan), pat = *patp;
5622
                  rtx src, dst;
5623
                  rtx lab;
5624
                  rtx newsrc;
5625
                  enum machine_mode mode;
5626
 
5627
                  if (GET_CODE (pat) == PARALLEL)
5628
                    patp = &XVECEXP (pat, 0, 0), pat = *patp;
5629
                  src = SET_SRC (pat);
5630
                  dst = SET_DEST (pat);
5631
                  mode = GET_MODE (dst);
5632
 
5633
                  if (mode == SImode && hi_const (src)
5634
                      && REGNO (dst) != FPUL_REG)
5635
                    {
5636
                      int offset = 0;
5637
 
5638
                      mode = HImode;
5639
                      while (GET_CODE (dst) == SUBREG)
5640
                        {
5641
                          offset += subreg_regno_offset (REGNO (SUBREG_REG (dst)),
5642
                                                         GET_MODE (SUBREG_REG (dst)),
5643
                                                         SUBREG_BYTE (dst),
5644
                                                         GET_MODE (dst));
5645
                          dst = SUBREG_REG (dst);
5646
                        }
5647
                      dst = gen_rtx_REG (HImode, REGNO (dst) + offset);
5648
                    }
5649
                  if (REG_P (dst) && FP_ANY_REGISTER_P (REGNO (dst)))
5650
                    {
5651
                      /* This must be an insn that clobbers r0.  */
5652
                      rtx *clobberp = &XVECEXP (PATTERN (scan), 0,
5653
                                                XVECLEN (PATTERN (scan), 0)
5654
                                                - 1);
5655
                      rtx clobber = *clobberp;
5656
 
5657
                      gcc_assert (GET_CODE (clobber) == CLOBBER
5658
                                  && rtx_equal_p (XEXP (clobber, 0), r0_rtx));
5659
 
5660
                      if (last_float
5661
                          && reg_set_between_p (r0_rtx, last_float_move, scan))
5662
                        last_float = 0;
5663
                      if (last_float
5664
                          && TARGET_SHCOMPACT
5665
                          && GET_MODE_SIZE (mode) != 4
5666
                          && GET_MODE_SIZE (GET_MODE (last_float)) == 4)
5667
                        last_float = 0;
5668
                      lab = add_constant (src, mode, last_float);
5669
                      if (lab)
5670
                        emit_insn_before (gen_mova (lab), scan);
5671
                      else
5672
                        {
5673
                          /* There will be a REG_UNUSED note for r0 on
5674
                             LAST_FLOAT_MOVE; we have to change it to REG_INC,
5675
                             lest reorg:mark_target_live_regs will not
5676
                             consider r0 to be used, and we end up with delay
5677
                             slot insn in front of SCAN that clobbers r0.  */
5678
                          rtx note
5679
                            = find_regno_note (last_float_move, REG_UNUSED, 0);
5680
 
5681
                          /* If we are not optimizing, then there may not be
5682
                             a note.  */
5683
                          if (note)
5684
                            PUT_REG_NOTE_KIND (note, REG_INC);
5685
 
5686
                          *last_float_addr = r0_inc_rtx;
5687
                        }
5688
                      last_float_move = scan;
5689
                      last_float = src;
5690
                      newsrc = gen_const_mem (mode,
5691
                                        (((TARGET_SH4 && ! TARGET_FMOVD)
5692
                                          || REGNO (dst) == FPUL_REG)
5693
                                         ? r0_inc_rtx
5694
                                         : r0_rtx));
5695
                      last_float_addr = &XEXP (newsrc, 0);
5696
 
5697
                      /* Remove the clobber of r0.  */
5698
                      *clobberp = gen_rtx_CLOBBER (GET_MODE (clobber),
5699
                                                   gen_rtx_SCRATCH (Pmode));
5700
                    }
5701
                  /* This is a mova needing a label.  Create it.  */
5702
                  else if (GET_CODE (src) == UNSPEC
5703
                           && XINT (src, 1) == UNSPEC_MOVA
5704
                           && GET_CODE (XVECEXP (src, 0, 0)) == CONST)
5705
                    {
5706
                      lab = add_constant (XVECEXP (src, 0, 0), mode, 0);
5707
                      newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
5708
                      newsrc = gen_rtx_UNSPEC (SImode,
5709
                                               gen_rtvec (1, newsrc),
5710
                                               UNSPEC_MOVA);
5711
                    }
5712
                  else
5713
                    {
5714
                      lab = add_constant (src, mode, 0);
5715
                      newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
5716
                      newsrc = gen_const_mem (mode, newsrc);
5717
                    }
5718
                  *patp = gen_rtx_SET (VOIDmode, dst, newsrc);
5719
                  INSN_CODE (scan) = -1;
5720
                }
5721
            }
5722
          dump_table (need_aligned_label ? insn : 0, barrier);
5723
          insn = barrier;
5724
        }
5725
    }
5726
  free_alloc_pool (label_ref_list_pool);
5727
  for (insn = first; insn; insn = NEXT_INSN (insn))
5728
    PUT_MODE (insn, VOIDmode);
5729
 
5730
  mdep_reorg_phase = SH_SHORTEN_BRANCHES1;
5731
  INSN_ADDRESSES_FREE ();
5732
  split_branches (first);
5733
 
5734
  /* The INSN_REFERENCES_ARE_DELAYED in sh.h is problematic because it
5735
     also has an effect on the register that holds the address of the sfunc.
5736
     Insert an extra dummy insn in front of each sfunc that pretends to
5737
     use this register.  */
5738
  if (flag_delayed_branch)
5739
    {
5740
      for (insn = first; insn; insn = NEXT_INSN (insn))
5741
        {
5742
          rtx reg = sfunc_uses_reg (insn);
5743
 
5744
          if (! reg)
5745
            continue;
5746
          emit_insn_before (gen_use_sfunc_addr (reg), insn);
5747
        }
5748
    }
5749
#if 0
5750
  /* fpscr is not actually a user variable, but we pretend it is for the
5751
     sake of the previous optimization passes, since we want it handled like
5752
     one.  However, we don't have any debugging information for it, so turn
5753
     it into a non-user variable now.  */
5754
  if (TARGET_SH4)
5755
    REG_USERVAR_P (get_fpscr_rtx ()) = 0;
5756
#endif
5757
  mdep_reorg_phase = SH_AFTER_MDEP_REORG;
5758
}
5759
 
5760
int
5761
get_dest_uid (rtx label, int max_uid)
5762
{
5763
  rtx dest = next_real_insn (label);
5764
  int dest_uid;
5765
  if (! dest)
5766
    /* This can happen for an undefined label.  */
5767
    return 0;
5768
  dest_uid = INSN_UID (dest);
5769
  /* If this is a newly created branch redirection blocking instruction,
5770
     we cannot index the branch_uid or insn_addresses arrays with its
5771
     uid.  But then, we won't need to, because the actual destination is
5772
     the following branch.  */
5773
  while (dest_uid >= max_uid)
5774
    {
5775
      dest = NEXT_INSN (dest);
5776
      dest_uid = INSN_UID (dest);
5777
    }
5778
  if (JUMP_P (dest) && GET_CODE (PATTERN (dest)) == RETURN)
5779
    return 0;
5780
  return dest_uid;
5781
}
5782
 
5783
/* Split condbranches that are out of range.  Also add clobbers for
5784
   scratch registers that are needed in far jumps.
5785
   We do this before delay slot scheduling, so that it can take our
5786
   newly created instructions into account.  It also allows us to
5787
   find branches with common targets more easily.  */
5788
 
5789
static void
5790
split_branches (rtx first)
5791
{
5792
  rtx insn;
5793
  struct far_branch **uid_branch, *far_branch_list = 0;
5794
  int max_uid = get_max_uid ();
5795
  int ok;
5796
 
5797
  /* Find out which branches are out of range.  */
5798
  shorten_branches (first);
5799
 
5800
  uid_branch = (struct far_branch **) alloca (max_uid * sizeof *uid_branch);
5801
  memset ((char *) uid_branch, 0, max_uid * sizeof *uid_branch);
5802
 
5803
  for (insn = first; insn; insn = NEXT_INSN (insn))
5804
    if (! INSN_P (insn))
5805
      continue;
5806
    else if (INSN_DELETED_P (insn))
5807
      {
5808
        /* Shorten_branches would split this instruction again,
5809
           so transform it into a note.  */
5810
        SET_INSN_DELETED (insn);
5811
      }
5812
    else if (JUMP_P (insn)
5813
             /* Don't mess with ADDR_DIFF_VEC */
5814
             && (GET_CODE (PATTERN (insn)) == SET
5815
                 || GET_CODE (PATTERN (insn)) == RETURN))
5816
      {
5817
        enum attr_type type = get_attr_type (insn);
5818
        if (type == TYPE_CBRANCH)
5819
          {
5820
            rtx next, beyond;
5821
 
5822
            if (get_attr_length (insn) > 4)
5823
              {
5824
                rtx src = SET_SRC (PATTERN (insn));
5825
                rtx olabel = XEXP (XEXP (src, 1), 0);
5826
                int addr = INSN_ADDRESSES (INSN_UID (insn));
5827
                rtx label = 0;
5828
                int dest_uid = get_dest_uid (olabel, max_uid);
5829
                struct far_branch *bp = uid_branch[dest_uid];
5830
 
5831
                /* redirect_jump needs a valid JUMP_LABEL, and it might delete
5832
                   the label if the LABEL_NUSES count drops to zero.  There is
5833
                   always a jump_optimize pass that sets these values, but it
5834
                   proceeds to delete unreferenced code, and then if not
5835
                   optimizing, to un-delete the deleted instructions, thus
5836
                   leaving labels with too low uses counts.  */
5837
                if (! optimize)
5838
                  {
5839
                    JUMP_LABEL (insn) = olabel;
5840
                    LABEL_NUSES (olabel)++;
5841
                  }
5842
                if (! bp)
5843
                  {
5844
                    bp = (struct far_branch *) alloca (sizeof *bp);
5845
                    uid_branch[dest_uid] = bp;
5846
                    bp->prev = far_branch_list;
5847
                    far_branch_list = bp;
5848
                    bp->far_label
5849
                      = XEXP (XEXP (SET_SRC (PATTERN (insn)), 1), 0);
5850
                    LABEL_NUSES (bp->far_label)++;
5851
                  }
5852
                else
5853
                  {
5854
                    label = bp->near_label;
5855
                    if (! label && bp->address - addr >= CONDJUMP_MIN)
5856
                      {
5857
                        rtx block = bp->insert_place;
5858
 
5859
                        if (GET_CODE (PATTERN (block)) == RETURN)
5860
                          block = PREV_INSN (block);
5861
                        else
5862
                          block = gen_block_redirect (block,
5863
                                                      bp->address, 2);
5864
                        label = emit_label_after (gen_label_rtx (),
5865
                                                  PREV_INSN (block));
5866
                        bp->near_label = label;
5867
                      }
5868
                    else if (label && ! NEXT_INSN (label))
5869
                      {
5870
                        if (addr + 2 - bp->address <= CONDJUMP_MAX)
5871
                          bp->insert_place = insn;
5872
                        else
5873
                          gen_far_branch (bp);
5874
                      }
5875
                  }
5876
                if (! label
5877
                    || (NEXT_INSN (label) && bp->address - addr < CONDJUMP_MIN))
5878
                  {
5879
                    bp->near_label = label = gen_label_rtx ();
5880
                    bp->insert_place = insn;
5881
                    bp->address = addr;
5882
                  }
5883
                ok = redirect_jump (insn, label, 0);
5884
                gcc_assert (ok);
5885
              }
5886
            else
5887
              {
5888
                /* get_attr_length (insn) == 2 */
5889
                /* Check if we have a pattern where reorg wants to redirect
5890
                   the branch to a label from an unconditional branch that
5891
                   is too far away.  */
5892
                /* We can't use JUMP_LABEL here because it might be undefined
5893
                   when not optimizing.  */
5894
                /* A syntax error might cause beyond to be NULL_RTX.  */
5895
                beyond
5896
                  = next_active_insn (XEXP (XEXP (SET_SRC (PATTERN (insn)), 1),
5897
                                            0));
5898
 
5899
                if (beyond
5900
                    && (JUMP_P (beyond)
5901
                        || ((beyond = next_active_insn (beyond))
5902
                            && JUMP_P (beyond)))
5903
                    && GET_CODE (PATTERN (beyond)) == SET
5904
                    && recog_memoized (beyond) == CODE_FOR_jump_compact
5905
                    && ((INSN_ADDRESSES
5906
                         (INSN_UID (XEXP (SET_SRC (PATTERN (beyond)), 0)))
5907
                         - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5908
                        > 252 + 258 + 2))
5909
                  gen_block_redirect (beyond,
5910
                                      INSN_ADDRESSES (INSN_UID (beyond)), 1);
5911
              }
5912
 
5913
            next = next_active_insn (insn);
5914
 
5915
            if (next
5916
                && (JUMP_P (next)
5917
                    || ((next = next_active_insn (next))
5918
                        && JUMP_P (next)))
5919
                && GET_CODE (PATTERN (next)) == SET
5920
                && recog_memoized (next) == CODE_FOR_jump_compact
5921
                && ((INSN_ADDRESSES
5922
                     (INSN_UID (XEXP (SET_SRC (PATTERN (next)), 0)))
5923
                     - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5924
                    > 252 + 258 + 2))
5925
              gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), 1);
5926
          }
5927
        else if (type == TYPE_JUMP || type == TYPE_RETURN)
5928
          {
5929
            int addr = INSN_ADDRESSES (INSN_UID (insn));
5930
            rtx far_label = 0;
5931
            int dest_uid = 0;
5932
            struct far_branch *bp;
5933
 
5934
            if (type == TYPE_JUMP)
5935
              {
5936
                far_label = XEXP (SET_SRC (PATTERN (insn)), 0);
5937
                dest_uid = get_dest_uid (far_label, max_uid);
5938
                if (! dest_uid)
5939
                  {
5940
                    /* Parse errors can lead to labels outside
5941
                      the insn stream.  */
5942
                    if (! NEXT_INSN (far_label))
5943
                      continue;
5944
 
5945
                    if (! optimize)
5946
                      {
5947
                        JUMP_LABEL (insn) = far_label;
5948
                        LABEL_NUSES (far_label)++;
5949
                      }
5950
                    redirect_jump (insn, NULL_RTX, 1);
5951
                    far_label = 0;
5952
                  }
5953
              }
5954
            bp = uid_branch[dest_uid];
5955
            if (! bp)
5956
              {
5957
                bp = (struct far_branch *) alloca (sizeof *bp);
5958
                uid_branch[dest_uid] = bp;
5959
                bp->prev = far_branch_list;
5960
                far_branch_list = bp;
5961
                bp->near_label = 0;
5962
                bp->far_label = far_label;
5963
                if (far_label)
5964
                  LABEL_NUSES (far_label)++;
5965
              }
5966
            else if (bp->near_label && ! NEXT_INSN (bp->near_label))
5967
              if (addr - bp->address <= CONDJUMP_MAX)
5968
                emit_label_after (bp->near_label, PREV_INSN (insn));
5969
              else
5970
                {
5971
                  gen_far_branch (bp);
5972
                  bp->near_label = 0;
5973
                }
5974
            else
5975
              bp->near_label = 0;
5976
            bp->address = addr;
5977
            bp->insert_place = insn;
5978
            if (! far_label)
5979
              emit_insn_before (gen_block_branch_redirect (const0_rtx), insn);
5980
            else
5981
              gen_block_redirect (insn, addr, bp->near_label ? 2 : 0);
5982
          }
5983
      }
5984
  /* Generate all pending far branches,
5985
     and free our references to the far labels.  */
5986
  while (far_branch_list)
5987
    {
5988
      if (far_branch_list->near_label
5989
          && ! NEXT_INSN (far_branch_list->near_label))
5990
        gen_far_branch (far_branch_list);
5991
      if (optimize
5992
          && far_branch_list->far_label
5993
          && ! --LABEL_NUSES (far_branch_list->far_label))
5994
        delete_insn (far_branch_list->far_label);
5995
      far_branch_list = far_branch_list->prev;
5996
    }
5997
 
5998
  /* Instruction length information is no longer valid due to the new
5999
     instructions that have been generated.  */
6000
  init_insn_lengths ();
6001
}
6002
 
6003
/* Dump out instruction addresses, which is useful for debugging the
6004
   constant pool table stuff.
6005
 
6006
   If relaxing, output the label and pseudo-ops used to link together
6007
   calls and the instruction which set the registers.  */
6008
 
6009
/* ??? The addresses printed by this routine for insns are nonsense for
6010
   insns which are inside of a sequence where none of the inner insns have
6011
   variable length.  This is because the second pass of shorten_branches
6012
   does not bother to update them.  */
6013
 
6014
void
6015
final_prescan_insn (rtx insn, rtx *opvec ATTRIBUTE_UNUSED,
6016
                    int noperands ATTRIBUTE_UNUSED)
6017
{
6018
  if (TARGET_DUMPISIZE)
6019
    fprintf (asm_out_file, "\n! at %04x\n", INSN_ADDRESSES (INSN_UID (insn)));
6020
 
6021
  if (TARGET_RELAX)
6022
    {
6023
      rtx note;
6024
 
6025
      note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX);
6026
      if (note)
6027
        {
6028
          rtx pattern;
6029
 
6030
          pattern = PATTERN (insn);
6031
          if (GET_CODE (pattern) == PARALLEL)
6032
            pattern = XVECEXP (pattern, 0, 0);
6033
          switch (GET_CODE (pattern))
6034
            {
6035
            case SET:
6036
              if (GET_CODE (SET_SRC (pattern)) != CALL
6037
                  && get_attr_type (insn) != TYPE_SFUNC)
6038
                {
6039
                  targetm.asm_out.internal_label
6040
                    (asm_out_file, "L", CODE_LABEL_NUMBER (XEXP (note, 0)));
6041
                  break;
6042
                }
6043
              /* else FALLTHROUGH */
6044
            case CALL:
6045
              asm_fprintf (asm_out_file, "\t.uses %LL%d\n",
6046
                           CODE_LABEL_NUMBER (XEXP (note, 0)));
6047
              break;
6048
 
6049
            default:
6050
              gcc_unreachable ();
6051
            }
6052
        }
6053
    }
6054
}
6055
 
6056
/* Dump out any constants accumulated in the final pass.  These will
6057
   only be labels.  */
6058
 
6059
const char *
6060
output_jump_label_table (void)
6061
{
6062
  int i;
6063
 
6064
  if (pool_size)
6065
    {
6066
      fprintf (asm_out_file, "\t.align 2\n");
6067
      for (i = 0; i < pool_size; i++)
6068
        {
6069
          pool_node *p = &pool_vector[i];
6070
 
6071
          (*targetm.asm_out.internal_label) (asm_out_file, "L",
6072
                                     CODE_LABEL_NUMBER (p->label));
6073
          output_asm_insn (".long       %O0", &p->value);
6074
        }
6075
      pool_size = 0;
6076
    }
6077
 
6078
  return "";
6079
}
6080
 
6081
/* A full frame looks like:
6082
 
6083
   arg-5
6084
   arg-4
6085
   [ if current_function_anonymous_args
6086
   arg-3
6087
   arg-2
6088
   arg-1
6089
   arg-0 ]
6090
   saved-fp
6091
   saved-r10
6092
   saved-r11
6093
   saved-r12
6094
   saved-pr
6095
   local-n
6096
   ..
6097
   local-1
6098
   local-0        <- fp points here.  */
6099
 
6100
/* Number of bytes pushed for anonymous args, used to pass information
6101
   between expand_prologue and expand_epilogue.  */
6102
 
6103
/* Adjust the stack by SIZE bytes.  REG holds the rtl of the register to be
6104
   adjusted.  If epilogue_p is zero, this is for a prologue; otherwise, it's
6105
   for an epilogue and a negative value means that it's for a sibcall
6106
   epilogue.  If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET of
6107
   all the registers that are about to be restored, and hence dead.  */
6108
 
6109
static void
6110
output_stack_adjust (int size, rtx reg, int epilogue_p,
6111
                     HARD_REG_SET *live_regs_mask, bool frame_p)
6112
{
6113
  rtx (*emit_fn) (rtx) = frame_p ? &frame_insn : &emit_insn;
6114
  if (size)
6115
    {
6116
      HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
6117
 
6118
/* This test is bogus, as output_stack_adjust is used to re-align the
6119
   stack.  */
6120
#if 0
6121
      gcc_assert (!(size % align));
6122
#endif
6123
 
6124
      if (CONST_OK_FOR_ADD (size))
6125
        emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size)));
6126
      /* Try to do it with two partial adjustments; however, we must make
6127
         sure that the stack is properly aligned at all times, in case
6128
         an interrupt occurs between the two partial adjustments.  */
6129
      else if (CONST_OK_FOR_ADD (size / 2 & -align)
6130
               && CONST_OK_FOR_ADD (size - (size / 2 & -align)))
6131
        {
6132
          emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size / 2 & -align)));
6133
          emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size - (size / 2 & -align))));
6134
        }
6135
      else
6136
        {
6137
          rtx const_reg;
6138
          rtx insn;
6139
          int temp = epilogue_p ? 7 : (TARGET_SH5 ? 0 : 1);
6140
          int i;
6141
 
6142
          /* If TEMP is invalid, we could temporarily save a general
6143
             register to MACL.  However, there is currently no need
6144
             to handle this case, so just die when we see it.  */
6145
          if (epilogue_p < 0
6146
              || current_function_interrupt
6147
              || ! call_really_used_regs[temp] || fixed_regs[temp])
6148
            temp = -1;
6149
          if (temp < 0 && ! current_function_interrupt
6150
              && (TARGET_SHMEDIA || epilogue_p >= 0))
6151
            {
6152
              HARD_REG_SET temps;
6153
              COPY_HARD_REG_SET (temps, call_used_reg_set);
6154
              AND_COMPL_HARD_REG_SET (temps, call_fixed_reg_set);
6155
              if (epilogue_p > 0)
6156
                {
6157
                  int nreg = 0;
6158
                  if (crtl->return_rtx)
6159
                    {
6160
                      enum machine_mode mode;
6161
                      mode = GET_MODE (crtl->return_rtx);
6162
                      if (BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG)
6163
                        nreg = HARD_REGNO_NREGS (FIRST_RET_REG, mode);
6164
                    }
6165
                  for (i = 0; i < nreg; i++)
6166
                    CLEAR_HARD_REG_BIT (temps, FIRST_RET_REG + i);
6167
                  if (crtl->calls_eh_return)
6168
                    {
6169
                      CLEAR_HARD_REG_BIT (temps, EH_RETURN_STACKADJ_REGNO);
6170
                      for (i = 0; i <= 3; i++)
6171
                        CLEAR_HARD_REG_BIT (temps, EH_RETURN_DATA_REGNO (i));
6172
                    }
6173
                }
6174
              if (TARGET_SHMEDIA && epilogue_p < 0)
6175
                for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
6176
                  CLEAR_HARD_REG_BIT (temps, i);
6177
              if (epilogue_p <= 0)
6178
                {
6179
                  for (i = FIRST_PARM_REG;
6180
                       i < FIRST_PARM_REG + NPARM_REGS (SImode); i++)
6181
                    CLEAR_HARD_REG_BIT (temps, i);
6182
                  if (cfun->static_chain_decl != NULL)
6183
                    CLEAR_HARD_REG_BIT (temps, STATIC_CHAIN_REGNUM);
6184
                }
6185
              temp = scavenge_reg (&temps);
6186
            }
6187
          if (temp < 0 && live_regs_mask)
6188
            {
6189
              HARD_REG_SET temps;
6190
 
6191
              COPY_HARD_REG_SET (temps, *live_regs_mask);
6192
              CLEAR_HARD_REG_BIT (temps, REGNO (reg));
6193
              temp = scavenge_reg (&temps);
6194
            }
6195
          if (temp < 0)
6196
            {
6197
              rtx adj_reg, tmp_reg, mem;
6198
 
6199
              /* If we reached here, the most likely case is the (sibcall)
6200
                 epilogue for non SHmedia.  Put a special push/pop sequence
6201
                 for such case as the last resort.  This looks lengthy but
6202
                 would not be problem because it seems to be very
6203
                 rare.  */
6204
 
6205
              gcc_assert (!TARGET_SHMEDIA && epilogue_p);
6206
 
6207
 
6208
               /* ??? There is still the slight possibility that r4 or
6209
                  r5 have been reserved as fixed registers or assigned
6210
                  as global registers, and they change during an
6211
                  interrupt.  There are possible ways to handle this:
6212
 
6213
                  - If we are adjusting the frame pointer (r14), we can do
6214
                    with a single temp register and an ordinary push / pop
6215
                    on the stack.
6216
                  - Grab any call-used or call-saved registers (i.e. not
6217
                    fixed or globals) for the temps we need.  We might
6218
                    also grab r14 if we are adjusting the stack pointer.
6219
                    If we can't find enough available registers, issue
6220
                    a diagnostic and die - the user must have reserved
6221
                    way too many registers.
6222
                 But since all this is rather unlikely to happen and
6223
                 would require extra testing, we just die if r4 / r5
6224
                 are not available.  */
6225
              gcc_assert (!fixed_regs[4] && !fixed_regs[5]
6226
                          && !global_regs[4] && !global_regs[5]);
6227
 
6228
              adj_reg = gen_rtx_REG (GET_MODE (reg), 4);
6229
              tmp_reg = gen_rtx_REG (GET_MODE (reg), 5);
6230
              emit_move_insn (gen_tmp_stack_mem (Pmode, reg), adj_reg);
6231
              emit_insn (GEN_MOV (adj_reg, GEN_INT (size)));
6232
              emit_insn (GEN_ADD3 (adj_reg, adj_reg, reg));
6233
              mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
6234
              emit_move_insn (mem, tmp_reg);
6235
              emit_move_insn (tmp_reg, gen_tmp_stack_mem (Pmode, reg));
6236
              mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
6237
              emit_move_insn (mem, tmp_reg);
6238
              emit_move_insn (reg, adj_reg);
6239
              mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
6240
              emit_move_insn (adj_reg, mem);
6241
              mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
6242
              emit_move_insn (tmp_reg, mem);
6243
              /* Tell flow the insns that pop r4/r5 aren't dead.  */
6244
              emit_use (tmp_reg);
6245
              emit_use (adj_reg);
6246
              return;
6247
            }
6248
          const_reg = gen_rtx_REG (GET_MODE (reg), temp);
6249
 
6250
          /* If SIZE is negative, subtract the positive value.
6251
             This sometimes allows a constant pool entry to be shared
6252
             between prologue and epilogue code.  */
6253
          if (size < 0)
6254
            {
6255
              emit_insn (GEN_MOV (const_reg, GEN_INT (-size)));
6256
              insn = emit_fn (GEN_SUB3 (reg, reg, const_reg));
6257
            }
6258
          else
6259
            {
6260
              emit_insn (GEN_MOV (const_reg, GEN_INT (size)));
6261
              insn = emit_fn (GEN_ADD3 (reg, reg, const_reg));
6262
            }
6263
          if (! epilogue_p)
6264
            add_reg_note (insn, REG_FRAME_RELATED_EXPR,
6265
                          gen_rtx_SET (VOIDmode, reg,
6266
                                       gen_rtx_PLUS (SImode, reg,
6267
                                                     GEN_INT (size))));
6268
        }
6269
    }
6270
}
6271
 
6272
static rtx
6273
frame_insn (rtx x)
6274
{
6275
  x = emit_insn (x);
6276
  RTX_FRAME_RELATED_P (x) = 1;
6277
  return x;
6278
}
6279
 
6280
/* Output RTL to push register RN onto the stack.  */
6281
 
6282
static rtx
6283
push (int rn)
6284
{
6285
  rtx x;
6286
  if (rn == FPUL_REG)
6287
    x = gen_push_fpul ();
6288
  else if (rn == FPSCR_REG)
6289
    x = gen_push_fpscr ();
6290
  else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
6291
           && FP_OR_XD_REGISTER_P (rn))
6292
    {
6293
      if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
6294
        return NULL_RTX;
6295
      x = gen_push_4 (gen_rtx_REG (DFmode, rn));
6296
    }
6297
  else if (TARGET_SH2E && FP_REGISTER_P (rn))
6298
    x = gen_push_e (gen_rtx_REG (SFmode, rn));
6299
  else
6300
    x = gen_push (gen_rtx_REG (SImode, rn));
6301
 
6302
  x = frame_insn (x);
6303
  add_reg_note (x, REG_INC, gen_rtx_REG (SImode, STACK_POINTER_REGNUM));
6304
  return x;
6305
}
6306
 
6307
/* Output RTL to pop register RN from the stack.  */
6308
 
6309
static void
6310
pop (int rn)
6311
{
6312
  rtx x;
6313
  if (rn == FPUL_REG)
6314
    x = gen_pop_fpul ();
6315
  else if (rn == FPSCR_REG)
6316
    x = gen_pop_fpscr ();
6317
  else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
6318
           && FP_OR_XD_REGISTER_P (rn))
6319
    {
6320
      if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
6321
        return;
6322
      x = gen_pop_4 (gen_rtx_REG (DFmode, rn));
6323
    }
6324
  else if (TARGET_SH2E && FP_REGISTER_P (rn))
6325
    x = gen_pop_e (gen_rtx_REG (SFmode, rn));
6326
  else
6327
    x = gen_pop (gen_rtx_REG (SImode, rn));
6328
 
6329
  x = emit_insn (x);
6330
  add_reg_note (x, REG_INC, gen_rtx_REG (SImode, STACK_POINTER_REGNUM));
6331
}
6332
 
6333
/* Generate code to push the regs specified in the mask.  */
6334
 
6335
static void
6336
push_regs (HARD_REG_SET *mask, int interrupt_handler)
6337
{
6338
  int i = interrupt_handler ? LAST_BANKED_REG + 1 : 0;
6339
  int skip_fpscr = 0;
6340
 
6341
  /* Push PR last; this gives better latencies after the prologue, and
6342
     candidates for the return delay slot when there are no general
6343
     registers pushed.  */
6344
  for (; i < FIRST_PSEUDO_REGISTER; i++)
6345
    {
6346
      /* If this is an interrupt handler, and the SZ bit varies,
6347
         and we have to push any floating point register, we need
6348
         to switch to the correct precision first.  */
6349
      if (i == FIRST_FP_REG && interrupt_handler && TARGET_FMOVD
6350
          && hard_reg_set_intersect_p (*mask, reg_class_contents[DF_REGS]))
6351
        {
6352
          HARD_REG_SET unsaved;
6353
 
6354
          push (FPSCR_REG);
6355
          COMPL_HARD_REG_SET (unsaved, *mask);
6356
          fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
6357
          skip_fpscr = 1;
6358
        }
6359
      if (i != PR_REG
6360
          && (i != FPSCR_REG || ! skip_fpscr)
6361
          && TEST_HARD_REG_BIT (*mask, i))
6362
           {
6363
        /* If the ISR has RESBANK attribute assigned, don't push any of
6364
           the following registers - R0-R14, MACH, MACL and GBR.  */
6365
      if (! (sh_cfun_resbank_handler_p ()
6366
             && ((i >= FIRST_GENERAL_REG && i < LAST_GENERAL_REG)
6367
                 || i == MACH_REG
6368
                 || i == MACL_REG
6369
                 || i == GBR_REG)))
6370
          push (i);
6371
        }
6372
    }
6373
 
6374
  /* Push banked registers last to improve delay slot opportunities.  */
6375
  if (interrupt_handler)
6376
    for (i = FIRST_BANKED_REG; i <= LAST_BANKED_REG; i++)
6377
      if (TEST_HARD_REG_BIT (*mask, i))
6378
        push (i);
6379
 
6380
  /* Don't push PR register for an ISR with RESBANK attribute assigned.  */
6381
  if (TEST_HARD_REG_BIT (*mask, PR_REG) && !sh_cfun_resbank_handler_p ())
6382
    push (PR_REG);
6383
}
6384
 
6385
/* Calculate how much extra space is needed to save all callee-saved
6386
   target registers.
6387
   LIVE_REGS_MASK is the register mask calculated by calc_live_regs.  */
6388
 
6389
static int
6390
shmedia_target_regs_stack_space (HARD_REG_SET *live_regs_mask)
6391
{
6392
  int reg;
6393
  int stack_space = 0;
6394
  int interrupt_handler = sh_cfun_interrupt_handler_p ();
6395
 
6396
  for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
6397
    if ((! call_really_used_regs[reg] || interrupt_handler)
6398
        && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
6399
      /* Leave space to save this target register on the stack,
6400
         in case target register allocation wants to use it.  */
6401
      stack_space += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
6402
  return stack_space;
6403
}
6404
 
6405
/* Decide whether we should reserve space for callee-save target registers,
6406
   in case target register allocation wants to use them.  REGS_SAVED is
6407
   the space, in bytes, that is already required for register saves.
6408
   LIVE_REGS_MASK is the register mask calculated by calc_live_regs.  */
6409
 
6410
static int
6411
shmedia_reserve_space_for_target_registers_p (int regs_saved,
6412
                                              HARD_REG_SET *live_regs_mask)
6413
{
6414
  if (optimize_size)
6415
    return 0;
6416
  return shmedia_target_regs_stack_space (live_regs_mask) <= regs_saved;
6417
}
6418
 
6419
/* Decide how much space to reserve for callee-save target registers
6420
   in case target register allocation wants to use them.
6421
   LIVE_REGS_MASK is the register mask calculated by calc_live_regs.  */
6422
 
6423
static int
6424
shmedia_target_regs_stack_adjust (HARD_REG_SET *live_regs_mask)
6425
{
6426
  if (shmedia_space_reserved_for_target_registers)
6427
    return shmedia_target_regs_stack_space (live_regs_mask);
6428
  else
6429
    return 0;
6430
}
6431
 
6432
/* Work out the registers which need to be saved, both as a mask and a
6433
   count of saved words.  Return the count.
6434
 
6435
   If doing a pragma interrupt function, then push all regs used by the
6436
   function, and if we call another function (we can tell by looking at PR),
6437
   make sure that all the regs it clobbers are safe too.  */
6438
 
6439
static int
6440
calc_live_regs (HARD_REG_SET *live_regs_mask)
6441
{
6442
  unsigned int reg;
6443
  int count;
6444
  tree attrs;
6445
  bool interrupt_or_trapa_handler, trapa_handler, interrupt_handler;
6446
  bool nosave_low_regs;
6447
  int pr_live, has_call;
6448
 
6449
  attrs = DECL_ATTRIBUTES (current_function_decl);
6450
  interrupt_or_trapa_handler = sh_cfun_interrupt_handler_p ();
6451
  trapa_handler = lookup_attribute ("trapa_handler", attrs) != NULL_TREE;
6452
  interrupt_handler = interrupt_or_trapa_handler && ! trapa_handler;
6453
  nosave_low_regs = lookup_attribute ("nosave_low_regs", attrs) != NULL_TREE;
6454
 
6455
  CLEAR_HARD_REG_SET (*live_regs_mask);
6456
  if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && interrupt_handler
6457
      && df_regs_ever_live_p (FPSCR_REG))
6458
    target_flags &= ~MASK_FPU_SINGLE;
6459
  /* If we can save a lot of saves by switching to double mode, do that.  */
6460
  else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && TARGET_FPU_SINGLE)
6461
    for (count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
6462
      if (df_regs_ever_live_p (reg) && df_regs_ever_live_p (reg+1)
6463
          && (! call_really_used_regs[reg]
6464
              || interrupt_handler)
6465
          && ++count > 2)
6466
        {
6467
          target_flags &= ~MASK_FPU_SINGLE;
6468
          break;
6469
        }
6470
  /* PR_MEDIA_REG is a general purpose register, thus global_alloc already
6471
     knows how to use it.  That means the pseudo originally allocated for
6472
     the initial value can become the PR_MEDIA_REG hard register, as seen for
6473
     execute/20010122-1.c:test9.  */
6474
  if (TARGET_SHMEDIA)
6475
    /* ??? this function is called from initial_elimination_offset, hence we
6476
       can't use the result of sh_media_register_for_return here.  */
6477
    pr_live = sh_pr_n_sets ();
6478
  else
6479
    {
6480
      rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
6481
      pr_live = (pr_initial
6482
                 ? (!REG_P (pr_initial)
6483
                    || REGNO (pr_initial) != (PR_REG))
6484
                 : df_regs_ever_live_p (PR_REG));
6485
      /* For Shcompact, if not optimizing, we end up with a memory reference
6486
         using the return address pointer for __builtin_return_address even
6487
         though there is no actual need to put the PR register on the stack.  */
6488
      pr_live |= df_regs_ever_live_p (RETURN_ADDRESS_POINTER_REGNUM);
6489
    }
6490
  /* Force PR to be live if the prologue has to call the SHmedia
6491
     argument decoder or register saver.  */
6492
  if (TARGET_SHCOMPACT
6493
      && ((crtl->args.info.call_cookie
6494
           & ~ CALL_COOKIE_RET_TRAMP (1))
6495
          || crtl->saves_all_registers))
6496
    pr_live = 1;
6497
  has_call = TARGET_SHMEDIA ? ! leaf_function_p () : pr_live;
6498
  for (count = 0, reg = FIRST_PSEUDO_REGISTER; reg-- != 0; )
6499
    {
6500
      if (reg == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG)
6501
          ? pr_live
6502
          : interrupt_handler
6503
          ? (/* Need to save all the regs ever live.  */
6504
             (df_regs_ever_live_p (reg)
6505
              || (call_really_used_regs[reg]
6506
                  && (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG
6507
                      || reg == PIC_OFFSET_TABLE_REGNUM)
6508
                  && has_call)
6509
              || (TARGET_SHMEDIA && has_call
6510
                  && REGISTER_NATURAL_MODE (reg) == SImode
6511
                  && (GENERAL_REGISTER_P (reg) || TARGET_REGISTER_P (reg))))
6512
             && reg != STACK_POINTER_REGNUM && reg != ARG_POINTER_REGNUM
6513
             && reg != RETURN_ADDRESS_POINTER_REGNUM
6514
             && reg != T_REG && reg != GBR_REG
6515
             /* Push fpscr only on targets which have FPU */
6516
             && (reg != FPSCR_REG || TARGET_FPU_ANY))
6517
          : (/* Only push those regs which are used and need to be saved.  */
6518
             (TARGET_SHCOMPACT
6519
              && flag_pic
6520
              && crtl->args.info.call_cookie
6521
              && reg == PIC_OFFSET_TABLE_REGNUM)
6522
             || (df_regs_ever_live_p (reg)
6523
                 && ((!call_really_used_regs[reg]
6524
                      && !(reg != PIC_OFFSET_TABLE_REGNUM
6525
                           && fixed_regs[reg] && call_used_regs[reg]))
6526
                     || (trapa_handler && reg == FPSCR_REG && TARGET_FPU_ANY)))
6527
             || (crtl->calls_eh_return
6528
                 && (reg == EH_RETURN_DATA_REGNO (0)
6529
                     || reg == EH_RETURN_DATA_REGNO (1)
6530
                     || reg == EH_RETURN_DATA_REGNO (2)
6531
                     || reg == EH_RETURN_DATA_REGNO (3)))
6532
             || ((reg == MACL_REG || reg == MACH_REG)
6533
                 && df_regs_ever_live_p (reg)
6534
                 && sh_cfun_attr_renesas_p ())
6535
             ))
6536
        {
6537
          SET_HARD_REG_BIT (*live_regs_mask, reg);
6538
          count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
6539
 
6540
          if ((TARGET_SH4 || TARGET_SH2A_DOUBLE || TARGET_SH5) && TARGET_FMOVD
6541
              && GET_MODE_CLASS (REGISTER_NATURAL_MODE (reg)) == MODE_FLOAT)
6542
            {
6543
              if (FP_REGISTER_P (reg))
6544
                {
6545
                  if (! TARGET_FPU_SINGLE && ! df_regs_ever_live_p (reg ^ 1))
6546
                    {
6547
                      SET_HARD_REG_BIT (*live_regs_mask, (reg ^ 1));
6548
                      count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg ^ 1));
6549
                    }
6550
                }
6551
              else if (XD_REGISTER_P (reg))
6552
                {
6553
                  /* Must switch to double mode to access these registers.  */
6554
                  target_flags &= ~MASK_FPU_SINGLE;
6555
                }
6556
            }
6557
        }
6558
      if (nosave_low_regs && reg == R8_REG)
6559
        break;
6560
    }
6561
  /* If we have a target register optimization pass after prologue / epilogue
6562
     threading, we need to assume all target registers will be live even if
6563
     they aren't now.  */
6564
  if (flag_branch_target_load_optimize2
6565
      && TARGET_SAVE_ALL_TARGET_REGS
6566
      && shmedia_space_reserved_for_target_registers)
6567
    for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
6568
      if ((! call_really_used_regs[reg] || interrupt_handler)
6569
          && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
6570
        {
6571
          SET_HARD_REG_BIT (*live_regs_mask, reg);
6572
          count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
6573
        }
6574
  /* If this is an interrupt handler, we don't have any call-clobbered
6575
     registers we can conveniently use for target register save/restore.
6576
     Make sure we save at least one general purpose register when we need
6577
     to save target registers.  */
6578
  if (interrupt_handler
6579
      && hard_reg_set_intersect_p (*live_regs_mask,
6580
                                   reg_class_contents[TARGET_REGS])
6581
      && ! hard_reg_set_intersect_p (*live_regs_mask,
6582
                                     reg_class_contents[GENERAL_REGS]))
6583
    {
6584
      SET_HARD_REG_BIT (*live_regs_mask, R0_REG);
6585
      count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (R0_REG));
6586
    }
6587
 
6588
  return count;
6589
}
6590
 
6591
/* Code to generate prologue and epilogue sequences */
6592
 
6593
/* PUSHED is the number of bytes that are being pushed on the
6594
   stack for register saves.  Return the frame size, padded
6595
   appropriately so that the stack stays properly aligned.  */
6596
static HOST_WIDE_INT
6597
rounded_frame_size (int pushed)
6598
{
6599
  HOST_WIDE_INT size = get_frame_size ();
6600
  HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
6601
 
6602
  return ((size + pushed + align - 1) & -align) - pushed;
6603
}
6604
 
6605
/* Choose a call-clobbered target-branch register that remains
6606
   unchanged along the whole function.  We set it up as the return
6607
   value in the prologue.  */
6608
int
6609
sh_media_register_for_return (void)
6610
{
6611
  int regno;
6612
  int tr0_used;
6613
 
6614
  if (! current_function_is_leaf)
6615
    return -1;
6616
  if (lookup_attribute ("interrupt_handler",
6617
                        DECL_ATTRIBUTES (current_function_decl)))
6618
    return -1;
6619
  if (sh_cfun_interrupt_handler_p ())
6620
    return -1;
6621
 
6622
  tr0_used = flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
6623
 
6624
  for (regno = FIRST_TARGET_REG + tr0_used; regno <= LAST_TARGET_REG; regno++)
6625
    if (call_really_used_regs[regno] && ! df_regs_ever_live_p (regno))
6626
      return regno;
6627
 
6628
  return -1;
6629
}
6630
 
6631
/* The maximum registers we need to save are:
6632
   - 62 general purpose registers (r15 is stack pointer, r63 is zero)
6633
   - 32 floating point registers (for each pair, we save none,
6634
         one single precision value, or a double precision value).
6635
   -  8 target registers
6636
   -  add 1 entry for a delimiter.  */
6637
#define MAX_SAVED_REGS (62+32+8)
6638
 
6639
typedef struct save_entry_s
6640
{
6641
  unsigned char reg;
6642
  unsigned char mode;
6643
  short offset;
6644
} save_entry;
6645
 
6646
#define MAX_TEMPS 4
6647
 
6648
/* There will be a delimiter entry with VOIDmode both at the start and the
6649
   end of a filled in schedule.  The end delimiter has the offset of the
6650
   save with the smallest (i.e. most negative) offset.  */
6651
typedef struct save_schedule_s
6652
{
6653
  save_entry entries[MAX_SAVED_REGS + 2];
6654
  int temps[MAX_TEMPS+1];
6655
} save_schedule;
6656
 
6657
/* Fill in SCHEDULE according to LIVE_REGS_MASK.  If RESTORE is nonzero,
6658
   use reverse order.  Returns the last entry written to (not counting
6659
   the delimiter).  OFFSET_BASE is a number to be added to all offset
6660
   entries.  */
6661
 
6662
static save_entry *
6663
sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
6664
                    int offset_base)
6665
{
6666
  int align, i;
6667
  save_entry *entry = schedule->entries;
6668
  int tmpx = 0;
6669
  int offset;
6670
 
6671
  if (! current_function_interrupt)
6672
    for (i = FIRST_GENERAL_REG; tmpx < MAX_TEMPS && i <= LAST_GENERAL_REG; i++)
6673
      if (call_really_used_regs[i] && ! fixed_regs[i] && i != PR_MEDIA_REG
6674
          && ! FUNCTION_ARG_REGNO_P (i)
6675
          && i != FIRST_RET_REG
6676
          && ! (cfun->static_chain_decl != NULL && i == STATIC_CHAIN_REGNUM)
6677
          && ! (crtl->calls_eh_return
6678
                && (i == EH_RETURN_STACKADJ_REGNO
6679
                    || ((unsigned) i >= EH_RETURN_DATA_REGNO (0)
6680
                        && (unsigned) i <= EH_RETURN_DATA_REGNO (3)))))
6681
        schedule->temps[tmpx++] = i;
6682
  entry->reg = -1;
6683
  entry->mode = VOIDmode;
6684
  entry->offset = offset_base;
6685
  entry++;
6686
  /* We loop twice: first, we save 8-byte aligned registers in the
6687
     higher addresses, that are known to be aligned.  Then, we
6688
     proceed to saving 32-bit registers that don't need 8-byte
6689
     alignment.
6690
     If this is an interrupt function, all registers that need saving
6691
     need to be saved in full.  moreover, we need to postpone saving
6692
     target registers till we have saved some general purpose registers
6693
     we can then use as scratch registers.  */
6694
  offset = offset_base;
6695
  for (align = 1; align >= 0; align--)
6696
    {
6697
      for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
6698
        if (TEST_HARD_REG_BIT (*live_regs_mask, i))
6699
          {
6700
            enum machine_mode mode = REGISTER_NATURAL_MODE (i);
6701
            int reg = i;
6702
 
6703
            if (current_function_interrupt)
6704
              {
6705
                if (TARGET_REGISTER_P (i))
6706
                  continue;
6707
                if (GENERAL_REGISTER_P (i))
6708
                  mode = DImode;
6709
              }
6710
            if (mode == SFmode && (i % 2) == 1
6711
                && ! TARGET_FPU_SINGLE && FP_REGISTER_P (i)
6712
                && (TEST_HARD_REG_BIT (*live_regs_mask, (i ^ 1))))
6713
              {
6714
                mode = DFmode;
6715
                i--;
6716
                reg--;
6717
              }
6718
 
6719
            /* If we're doing the aligned pass and this is not aligned,
6720
               or we're doing the unaligned pass and this is aligned,
6721
               skip it.  */
6722
            if ((GET_MODE_SIZE (mode) % (STACK_BOUNDARY / BITS_PER_UNIT) == 0)
6723
                != align)
6724
              continue;
6725
 
6726
            if (current_function_interrupt
6727
                && GENERAL_REGISTER_P (i)
6728
                && tmpx < MAX_TEMPS)
6729
              schedule->temps[tmpx++] = i;
6730
 
6731
            offset -= GET_MODE_SIZE (mode);
6732
            entry->reg = i;
6733
            entry->mode = mode;
6734
            entry->offset = offset;
6735
            entry++;
6736
          }
6737
      if (align && current_function_interrupt)
6738
        for (i = LAST_TARGET_REG; i >= FIRST_TARGET_REG; i--)
6739
          if (TEST_HARD_REG_BIT (*live_regs_mask, i))
6740
            {
6741
              offset -= GET_MODE_SIZE (DImode);
6742
              entry->reg = i;
6743
              entry->mode = DImode;
6744
              entry->offset = offset;
6745
              entry++;
6746
            }
6747
    }
6748
  entry->reg = -1;
6749
  entry->mode = VOIDmode;
6750
  entry->offset = offset;
6751
  schedule->temps[tmpx] = -1;
6752
  return entry - 1;
6753
}
6754
 
6755
void
6756
sh_expand_prologue (void)
6757
{
6758
  HARD_REG_SET live_regs_mask;
6759
  int d, i;
6760
  int d_rounding = 0;
6761
  int save_flags = target_flags;
6762
  int pretend_args;
6763
  tree sp_switch_attr
6764
    = lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl));
6765
 
6766
  current_function_interrupt = sh_cfun_interrupt_handler_p ();
6767
 
6768
  /* We have pretend args if we had an object sent partially in registers
6769
     and partially on the stack, e.g. a large structure.  */
6770
  pretend_args = crtl->args.pretend_args_size;
6771
  if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl)
6772
      && (NPARM_REGS(SImode)
6773
          > crtl->args.info.arg_count[(int) SH_ARG_INT]))
6774
    pretend_args = 0;
6775
  /* Dwarf2 module doesn't expect frame related insns here.  */
6776
  output_stack_adjust (-pretend_args
6777
                       - crtl->args.info.stack_regs * 8,
6778
                       stack_pointer_rtx, 0, NULL, false);
6779
 
6780
  if (TARGET_SHCOMPACT && flag_pic && crtl->args.info.call_cookie)
6781
    /* We're going to use the PIC register to load the address of the
6782
       incoming-argument decoder and/or of the return trampoline from
6783
       the GOT, so make sure the PIC register is preserved and
6784
       initialized.  */
6785
    df_set_regs_ever_live (PIC_OFFSET_TABLE_REGNUM, true);
6786
 
6787
  if (TARGET_SHCOMPACT
6788
      && (crtl->args.info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
6789
    {
6790
      int reg;
6791
 
6792
      /* First, make all registers with incoming arguments that will
6793
         be pushed onto the stack live, so that register renaming
6794
         doesn't overwrite them.  */
6795
      for (reg = 0; reg < NPARM_REGS (SImode); reg++)
6796
        if (CALL_COOKIE_STACKSEQ_GET (crtl->args.info.call_cookie)
6797
            >= NPARM_REGS (SImode) - reg)
6798
          for (; reg < NPARM_REGS (SImode); reg++)
6799
            emit_insn (gen_shcompact_preserve_incoming_args
6800
                       (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
6801
        else if (CALL_COOKIE_INT_REG_GET
6802
                 (crtl->args.info.call_cookie, reg) == 1)
6803
          emit_insn (gen_shcompact_preserve_incoming_args
6804
                     (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
6805
 
6806
      emit_move_insn (gen_rtx_REG (Pmode, MACL_REG),
6807
                      stack_pointer_rtx);
6808
      emit_move_insn (gen_rtx_REG (SImode, R0_REG),
6809
                      GEN_INT (crtl->args.info.call_cookie));
6810
      emit_move_insn (gen_rtx_REG (SImode, MACH_REG),
6811
                      gen_rtx_REG (SImode, R0_REG));
6812
    }
6813
  else if (TARGET_SHMEDIA)
6814
    {
6815
      int tr = sh_media_register_for_return ();
6816
 
6817
      if (tr >= 0)
6818
        emit_move_insn (gen_rtx_REG (DImode, tr),
6819
                        gen_rtx_REG (DImode, PR_MEDIA_REG));
6820
    }
6821
 
6822
  /* Emit the code for SETUP_VARARGS.  */
6823
  if (cfun->stdarg)
6824
    {
6825
      if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
6826
        {
6827
          /* Push arg regs as if they'd been provided by caller in stack.  */
6828
          for (i = 0; i < NPARM_REGS(SImode); i++)
6829
            {
6830
              int rn = NPARM_REGS(SImode) + FIRST_PARM_REG - i - 1;
6831
              rtx insn;
6832
 
6833
              if (i >= (NPARM_REGS(SImode)
6834
                        - crtl->args.info.arg_count[(int) SH_ARG_INT]
6835
                        ))
6836
                break;
6837
              insn = push (rn);
6838
            }
6839
        }
6840
    }
6841
 
6842
  /* If we're supposed to switch stacks at function entry, do so now.  */
6843
  if (sp_switch_attr)
6844
    {
6845
      rtx lab, newsrc;
6846
      /* The argument specifies a variable holding the address of the
6847
         stack the interrupt function should switch to/from at entry/exit.  */
6848
      tree arg = TREE_VALUE ( TREE_VALUE (sp_switch_attr));
6849
      const char *s
6850
        = ggc_strdup (TREE_STRING_POINTER (arg));
6851
      rtx sp_switch = gen_rtx_SYMBOL_REF (Pmode, s);
6852
 
6853
      lab = add_constant (sp_switch, SImode, 0);
6854
      newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
6855
      newsrc = gen_const_mem (SImode, newsrc);
6856
 
6857
      emit_insn (gen_sp_switch_1 (newsrc));
6858
    }
6859
 
6860
  d = calc_live_regs (&live_regs_mask);
6861
  /* ??? Maybe we could save some switching if we can move a mode switch
6862
     that already happens to be at the function start into the prologue.  */
6863
  if (target_flags != save_flags && ! current_function_interrupt)
6864
    emit_insn (gen_toggle_sz ());
6865
 
6866
  if (TARGET_SH5)
6867
    {
6868
      int offset_base, offset;
6869
      rtx r0 = NULL_RTX;
6870
      int offset_in_r0 = -1;
6871
      int sp_in_r0 = 0;
6872
      int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
6873
      int total_size, save_size;
6874
      save_schedule schedule;
6875
      save_entry *entry;
6876
      int *tmp_pnt;
6877
 
6878
      if (call_really_used_regs[R0_REG] && ! fixed_regs[R0_REG]
6879
          && ! current_function_interrupt)
6880
        r0 = gen_rtx_REG (Pmode, R0_REG);
6881
 
6882
      /* D is the actual number of bytes that we need for saving registers,
6883
         however, in initial_elimination_offset we have committed to using
6884
         an additional TREGS_SPACE amount of bytes - in order to keep both
6885
         addresses to arguments supplied by the caller and local variables
6886
         valid, we must keep this gap.  Place it between the incoming
6887
         arguments and the actually saved registers in a bid to optimize
6888
         locality of reference.  */
6889
      total_size = d + tregs_space;
6890
      total_size += rounded_frame_size (total_size);
6891
      save_size = total_size - rounded_frame_size (d);
6892
      if (save_size % (STACK_BOUNDARY / BITS_PER_UNIT))
6893
        d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
6894
                        - save_size % (STACK_BOUNDARY / BITS_PER_UNIT));
6895
 
6896
      /* If adjusting the stack in a single step costs nothing extra, do so.
6897
         I.e. either if a single addi is enough, or we need a movi anyway,
6898
         and we don't exceed the maximum offset range (the test for the
6899
         latter is conservative for simplicity).  */
6900
      if (TARGET_SHMEDIA
6901
          && (CONST_OK_FOR_I10 (-total_size)
6902
              || (! CONST_OK_FOR_I10 (-(save_size + d_rounding))
6903
                  && total_size <= 2044)))
6904
        d_rounding = total_size - save_size;
6905
 
6906
      offset_base = d + d_rounding;
6907
 
6908
      output_stack_adjust (-(save_size + d_rounding), stack_pointer_rtx,
6909
                           0, NULL, true);
6910
 
6911
      sh5_schedule_saves (&live_regs_mask, &schedule, offset_base);
6912
      tmp_pnt = schedule.temps;
6913
      for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
6914
        {
6915
          enum machine_mode mode = (enum machine_mode) entry->mode;
6916
          unsigned int reg = entry->reg;
6917
          rtx reg_rtx, mem_rtx, pre_dec = NULL_RTX;
6918
          rtx orig_reg_rtx;
6919
 
6920
          offset = entry->offset;
6921
 
6922
          reg_rtx = gen_rtx_REG (mode, reg);
6923
 
6924
          mem_rtx = gen_frame_mem (mode,
6925
                                   gen_rtx_PLUS (Pmode,
6926
                                                 stack_pointer_rtx,
6927
                                                 GEN_INT (offset)));
6928
 
6929
          if (!memory_address_p (mode, XEXP (mem_rtx, 0)))
6930
            {
6931
              gcc_assert (r0);
6932
              mem_rtx = NULL_RTX;
6933
            }
6934
 
6935
          if (HAVE_PRE_DECREMENT
6936
              && (offset_in_r0 - offset == GET_MODE_SIZE (mode)
6937
                  || mem_rtx == NULL_RTX
6938
                  || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
6939
            {
6940
              pre_dec = gen_frame_mem (mode, gen_rtx_PRE_DEC (Pmode, r0));
6941
 
6942
              if (!memory_address_p (mode, XEXP (pre_dec, 0)))
6943
                pre_dec = NULL_RTX;
6944
              else
6945
                {
6946
                  mem_rtx = NULL_RTX;
6947
                  offset += GET_MODE_SIZE (mode);
6948
                }
6949
            }
6950
 
6951
          if (mem_rtx != NULL_RTX)
6952
            goto addr_ok;
6953
 
6954
          if (offset_in_r0 == -1)
6955
            {
6956
              emit_move_insn (r0, GEN_INT (offset));
6957
              offset_in_r0 = offset;
6958
            }
6959
          else if (offset != offset_in_r0)
6960
            {
6961
              emit_move_insn (r0,
6962
                              gen_rtx_PLUS
6963
                              (Pmode, r0,
6964
                               GEN_INT (offset - offset_in_r0)));
6965
              offset_in_r0 += offset - offset_in_r0;
6966
            }
6967
 
6968
          if (pre_dec != NULL_RTX)
6969
            {
6970
              if (! sp_in_r0)
6971
                {
6972
                  emit_move_insn (r0,
6973
                                  gen_rtx_PLUS
6974
                                  (Pmode, r0, stack_pointer_rtx));
6975
                  sp_in_r0 = 1;
6976
                }
6977
 
6978
              offset -= GET_MODE_SIZE (mode);
6979
              offset_in_r0 -= GET_MODE_SIZE (mode);
6980
 
6981
              mem_rtx = pre_dec;
6982
            }
6983
          else if (sp_in_r0)
6984
            mem_rtx = gen_frame_mem (mode, r0);
6985
          else
6986
            mem_rtx = gen_frame_mem (mode,
6987
                                     gen_rtx_PLUS (Pmode,
6988
                                                   stack_pointer_rtx,
6989
                                                   r0));
6990
 
6991
          /* We must not use an r0-based address for target-branch
6992
             registers or for special registers without pre-dec
6993
             memory addresses, since we store their values in r0
6994
             first.  */
6995
          gcc_assert (!TARGET_REGISTER_P (reg)
6996
                      && ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
6997
                          || mem_rtx == pre_dec));
6998
 
6999
        addr_ok:
7000
          orig_reg_rtx = reg_rtx;
7001
          if (TARGET_REGISTER_P (reg)
7002
              || ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
7003
                  && mem_rtx != pre_dec))
7004
            {
7005
              rtx tmp_reg = gen_rtx_REG (GET_MODE (reg_rtx), *tmp_pnt);
7006
 
7007
              emit_move_insn (tmp_reg, reg_rtx);
7008
 
7009
              if (REGNO (tmp_reg) == R0_REG)
7010
                {
7011
                  offset_in_r0 = -1;
7012
                  sp_in_r0 = 0;
7013
                  gcc_assert (!refers_to_regno_p
7014
                              (R0_REG, R0_REG+1, mem_rtx, (rtx *) 0));
7015
                }
7016
 
7017
              if (*++tmp_pnt <= 0)
7018
                tmp_pnt = schedule.temps;
7019
 
7020
              reg_rtx = tmp_reg;
7021
            }
7022
          {
7023
            rtx insn;
7024
 
7025
            /* Mark as interesting for dwarf cfi generator */
7026
            insn = emit_move_insn (mem_rtx, reg_rtx);
7027
            RTX_FRAME_RELATED_P (insn) = 1;
7028
            /* If we use an intermediate register for the save, we can't
7029
               describe this exactly in cfi as a copy of the to-be-saved
7030
               register into the temporary register and then the temporary
7031
               register on the stack, because the temporary register can
7032
               have a different natural size than the to-be-saved register.
7033
               Thus, we gloss over the intermediate copy and pretend we do
7034
               a direct save from the to-be-saved register.  */
7035
            if (REGNO (reg_rtx) != reg)
7036
              {
7037
                rtx set;
7038
 
7039
                set = gen_rtx_SET (VOIDmode, mem_rtx, orig_reg_rtx);
7040
                add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
7041
              }
7042
 
7043
            if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
7044
              {
7045
                rtx reg_rtx = gen_rtx_REG (mode, reg);
7046
                rtx set;
7047
                rtx mem_rtx = gen_frame_mem (mode,
7048
                                             gen_rtx_PLUS (Pmode,
7049
                                                           stack_pointer_rtx,
7050
                                                           GEN_INT (offset)));
7051
 
7052
                set = gen_rtx_SET (VOIDmode, mem_rtx, reg_rtx);
7053
                add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
7054
              }
7055
          }
7056
        }
7057
 
7058
      gcc_assert (entry->offset == d_rounding);
7059
    }
7060
  else
7061
    push_regs (&live_regs_mask, current_function_interrupt);
7062
 
7063
  if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
7064
    emit_insn (gen_GOTaddr2picreg ());
7065
 
7066
  if (SHMEDIA_REGS_STACK_ADJUST ())
7067
    {
7068
      /* This must NOT go through the PLT, otherwise mach and macl
7069
         may be clobbered.  */
7070
      function_symbol (gen_rtx_REG (Pmode, R0_REG),
7071
                       (TARGET_FPU_ANY
7072
                        ? "__GCC_push_shmedia_regs"
7073
                        : "__GCC_push_shmedia_regs_nofpu"), SFUNC_GOT);
7074
      emit_insn (gen_shmedia_save_restore_regs_compact
7075
                 (GEN_INT (-SHMEDIA_REGS_STACK_ADJUST ())));
7076
    }
7077
 
7078
  if (target_flags != save_flags && ! current_function_interrupt)
7079
    emit_insn (gen_toggle_sz ());
7080
 
7081
  target_flags = save_flags;
7082
 
7083
  output_stack_adjust (-rounded_frame_size (d) + d_rounding,
7084
                       stack_pointer_rtx, 0, NULL, true);
7085
 
7086
  if (frame_pointer_needed)
7087
    frame_insn (GEN_MOV (hard_frame_pointer_rtx, stack_pointer_rtx));
7088
 
7089
  if (TARGET_SHCOMPACT
7090
      && (crtl->args.info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
7091
    {
7092
      /* This must NOT go through the PLT, otherwise mach and macl
7093
         may be clobbered.  */
7094
      function_symbol (gen_rtx_REG (Pmode, R0_REG),
7095
                      "__GCC_shcompact_incoming_args", SFUNC_GOT);
7096
      emit_insn (gen_shcompact_incoming_args ());
7097
    }
7098
}
7099
 
7100
void
7101
sh_expand_epilogue (bool sibcall_p)
7102
{
7103
  HARD_REG_SET live_regs_mask;
7104
  int d, i;
7105
  int d_rounding = 0;
7106
 
7107
  int save_flags = target_flags;
7108
  int frame_size, save_size;
7109
  int fpscr_deferred = 0;
7110
  int e = sibcall_p ? -1 : 1;
7111
 
7112
  d = calc_live_regs (&live_regs_mask);
7113
 
7114
  save_size = d;
7115
  frame_size = rounded_frame_size (d);
7116
 
7117
  if (TARGET_SH5)
7118
    {
7119
      int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
7120
      int total_size;
7121
      if (d % (STACK_BOUNDARY / BITS_PER_UNIT))
7122
      d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
7123
                    - d % (STACK_BOUNDARY / BITS_PER_UNIT));
7124
 
7125
      total_size = d + tregs_space;
7126
      total_size += rounded_frame_size (total_size);
7127
      save_size = total_size - frame_size;
7128
 
7129
      /* If adjusting the stack in a single step costs nothing extra, do so.
7130
         I.e. either if a single addi is enough, or we need a movi anyway,
7131
         and we don't exceed the maximum offset range (the test for the
7132
         latter is conservative for simplicity).  */
7133
      if (TARGET_SHMEDIA
7134
          && ! frame_pointer_needed
7135
          && (CONST_OK_FOR_I10 (total_size)
7136
              || (! CONST_OK_FOR_I10 (save_size + d_rounding)
7137
                  && total_size <= 2044)))
7138
        d_rounding = frame_size;
7139
 
7140
      frame_size -= d_rounding;
7141
    }
7142
 
7143
  if (frame_pointer_needed)
7144
    {
7145
      /* We must avoid scheduling the epilogue with previous basic blocks.
7146
         See PR/18032 and PR/40313.  */
7147
      emit_insn (gen_blockage ());
7148
      output_stack_adjust (frame_size, hard_frame_pointer_rtx, e,
7149
                           &live_regs_mask, false);
7150
 
7151
      /* We must avoid moving the stack pointer adjustment past code
7152
         which reads from the local frame, else an interrupt could
7153
         occur after the SP adjustment and clobber data in the local
7154
         frame.  */
7155
      emit_insn (gen_blockage ());
7156
      emit_insn (GEN_MOV (stack_pointer_rtx, hard_frame_pointer_rtx));
7157
    }
7158
  else if (frame_size)
7159
    {
7160
      /* We must avoid moving the stack pointer adjustment past code
7161
         which reads from the local frame, else an interrupt could
7162
         occur after the SP adjustment and clobber data in the local
7163
         frame.  */
7164
      emit_insn (gen_blockage ());
7165
      output_stack_adjust (frame_size, stack_pointer_rtx, e,
7166
                           &live_regs_mask, false);
7167
    }
7168
 
7169
  if (SHMEDIA_REGS_STACK_ADJUST ())
7170
    {
7171
      function_symbol (gen_rtx_REG (Pmode, R0_REG),
7172
                       (TARGET_FPU_ANY
7173
                        ? "__GCC_pop_shmedia_regs"
7174
                        : "__GCC_pop_shmedia_regs_nofpu"), SFUNC_GOT);
7175
      /* This must NOT go through the PLT, otherwise mach and macl
7176
         may be clobbered.  */
7177
      emit_insn (gen_shmedia_save_restore_regs_compact
7178
                 (GEN_INT (SHMEDIA_REGS_STACK_ADJUST ())));
7179
    }
7180
 
7181
  /* Pop all the registers.  */
7182
 
7183
  if (target_flags != save_flags && ! current_function_interrupt)
7184
    emit_insn (gen_toggle_sz ());
7185
  if (TARGET_SH5)
7186
    {
7187
      int offset_base, offset;
7188
      int offset_in_r0 = -1;
7189
      int sp_in_r0 = 0;
7190
      rtx r0 = gen_rtx_REG (Pmode, R0_REG);
7191
      save_schedule schedule;
7192
      save_entry *entry;
7193
      int *tmp_pnt;
7194
 
7195
      entry = sh5_schedule_saves (&live_regs_mask, &schedule, d_rounding);
7196
      offset_base = -entry[1].offset + d_rounding;
7197
      tmp_pnt = schedule.temps;
7198
      for (; entry->mode != VOIDmode; entry--)
7199
        {
7200
          enum machine_mode mode = (enum machine_mode) entry->mode;
7201
          int reg = entry->reg;
7202
          rtx reg_rtx, mem_rtx, post_inc = NULL_RTX, insn;
7203
 
7204
          offset = offset_base + entry->offset;
7205
          reg_rtx = gen_rtx_REG (mode, reg);
7206
 
7207
          mem_rtx = gen_frame_mem (mode,
7208
                                   gen_rtx_PLUS (Pmode,
7209
                                                 stack_pointer_rtx,
7210
                                                 GEN_INT (offset)));
7211
 
7212
          if (!memory_address_p (mode, XEXP (mem_rtx, 0)))
7213
            mem_rtx = NULL_RTX;
7214
 
7215
          if (HAVE_POST_INCREMENT
7216
              && (offset == offset_in_r0
7217
                  || (offset + GET_MODE_SIZE (mode) != d + d_rounding
7218
                      && mem_rtx == NULL_RTX)
7219
                  || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
7220
            {
7221
              post_inc = gen_frame_mem (mode, gen_rtx_POST_INC (Pmode, r0));
7222
 
7223
              if (!memory_address_p (mode, XEXP (post_inc, 0)))
7224
                post_inc = NULL_RTX;
7225
              else
7226
                mem_rtx = NULL_RTX;
7227
            }
7228
 
7229
          if (mem_rtx != NULL_RTX)
7230
            goto addr_ok;
7231
 
7232
          if (offset_in_r0 == -1)
7233
            {
7234
              emit_move_insn (r0, GEN_INT (offset));
7235
              offset_in_r0 = offset;
7236
            }
7237
          else if (offset != offset_in_r0)
7238
            {
7239
              emit_move_insn (r0,
7240
                              gen_rtx_PLUS
7241
                              (Pmode, r0,
7242
                               GEN_INT (offset - offset_in_r0)));
7243
              offset_in_r0 += offset - offset_in_r0;
7244
            }
7245
 
7246
          if (post_inc != NULL_RTX)
7247
            {
7248
              if (! sp_in_r0)
7249
                {
7250
                  emit_move_insn (r0,
7251
                                  gen_rtx_PLUS
7252
                                  (Pmode, r0, stack_pointer_rtx));
7253
                  sp_in_r0 = 1;
7254
                }
7255
 
7256
              mem_rtx = post_inc;
7257
 
7258
              offset_in_r0 += GET_MODE_SIZE (mode);
7259
            }
7260
          else if (sp_in_r0)
7261
            mem_rtx = gen_frame_mem (mode, r0);
7262
          else
7263
            mem_rtx = gen_frame_mem (mode,
7264
                                     gen_rtx_PLUS (Pmode,
7265
                                                   stack_pointer_rtx,
7266
                                                   r0));
7267
 
7268
          gcc_assert ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
7269
                      || mem_rtx == post_inc);
7270
 
7271
        addr_ok:
7272
          if ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
7273
              && mem_rtx != post_inc)
7274
            {
7275
              insn = emit_move_insn (r0, mem_rtx);
7276
              mem_rtx = r0;
7277
            }
7278
          else if (TARGET_REGISTER_P (reg))
7279
            {
7280
              rtx tmp_reg = gen_rtx_REG (mode, *tmp_pnt);
7281
 
7282
              /* Give the scheduler a bit of freedom by using up to
7283
                 MAX_TEMPS registers in a round-robin fashion.  */
7284
              insn = emit_move_insn (tmp_reg, mem_rtx);
7285
              mem_rtx = tmp_reg;
7286
              if (*++tmp_pnt < 0)
7287
                tmp_pnt = schedule.temps;
7288
            }
7289
 
7290
          insn = emit_move_insn (reg_rtx, mem_rtx);
7291
        }
7292
 
7293
      gcc_assert (entry->offset + offset_base == d + d_rounding);
7294
    }
7295
  else /* ! TARGET_SH5 */
7296
    {
7297
      int last_reg;
7298
 
7299
      save_size = 0;
7300
        /* For an ISR with RESBANK attribute assigned, don't pop PR
7301
           register.  */
7302
      if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG)
7303
          && !sh_cfun_resbank_handler_p ())
7304
        {
7305
          if (!frame_pointer_needed)
7306
            emit_insn (gen_blockage ());
7307
          pop (PR_REG);
7308
        }
7309
 
7310
      /* Banked registers are popped first to avoid being scheduled in the
7311
         delay slot. RTE switches banks before the ds instruction.  */
7312
      if (current_function_interrupt)
7313
        {
7314
          for (i = LAST_BANKED_REG; i >= FIRST_BANKED_REG; i--)
7315
            if (TEST_HARD_REG_BIT (live_regs_mask, i))
7316
              pop (i);
7317
 
7318
          last_reg = FIRST_PSEUDO_REGISTER - LAST_BANKED_REG - 1;
7319
        }
7320
      else
7321
        last_reg = FIRST_PSEUDO_REGISTER;
7322
 
7323
      for (i = 0; i < last_reg; i++)
7324
        {
7325
          int j = (FIRST_PSEUDO_REGISTER - 1) - i;
7326
 
7327
          if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
7328
              && hard_reg_set_intersect_p (live_regs_mask,
7329
                                          reg_class_contents[DF_REGS]))
7330
            fpscr_deferred = 1;
7331
          /* For an ISR with RESBANK attribute assigned, don't pop
7332
             following registers, R0-R14, MACH, MACL and GBR.  */
7333
          else if (j != PR_REG && TEST_HARD_REG_BIT (live_regs_mask, j)
7334
                   && ! (sh_cfun_resbank_handler_p ()
7335
                         && ((j >= FIRST_GENERAL_REG
7336
                              && j < LAST_GENERAL_REG)
7337
                              || j == MACH_REG
7338
                              || j == MACL_REG
7339
                              || j == GBR_REG)))
7340
            pop (j);
7341
 
7342
          if (j == FIRST_FP_REG && fpscr_deferred)
7343
            pop (FPSCR_REG);
7344
        }
7345
    }
7346
  if (target_flags != save_flags && ! current_function_interrupt)
7347
    emit_insn (gen_toggle_sz ());
7348
  target_flags = save_flags;
7349
 
7350
  output_stack_adjust (crtl->args.pretend_args_size
7351
                       + save_size + d_rounding
7352
                       + crtl->args.info.stack_regs * 8,
7353
                       stack_pointer_rtx, e, NULL, false);
7354
 
7355
  if (crtl->calls_eh_return)
7356
    emit_insn (GEN_ADD3 (stack_pointer_rtx, stack_pointer_rtx,
7357
                         EH_RETURN_STACKADJ_RTX));
7358
 
7359
  /* Switch back to the normal stack if necessary.  */
7360
  if (lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl)))
7361
    emit_insn (gen_sp_switch_2 ());
7362
 
7363
  /* Tell flow the insn that pops PR isn't dead.  */
7364
  /* PR_REG will never be live in SHmedia mode, and we don't need to
7365
     USE PR_MEDIA_REG, since it will be explicitly copied to TR0_REG
7366
     by the return pattern.  */
7367
  if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
7368
    emit_use (gen_rtx_REG (SImode, PR_REG));
7369
}
7370
 
7371
static int sh_need_epilogue_known = 0;
7372
 
7373
int
7374
sh_need_epilogue (void)
7375
{
7376
  if (! sh_need_epilogue_known)
7377
    {
7378
      rtx epilogue;
7379
 
7380
      start_sequence ();
7381
      sh_expand_epilogue (0);
7382
      epilogue = get_insns ();
7383
      end_sequence ();
7384
      sh_need_epilogue_known = (epilogue == NULL ? -1 : 1);
7385
    }
7386
  return sh_need_epilogue_known > 0;
7387
}
7388
 
7389
/* Emit code to change the current function's return address to RA.
7390
   TEMP is available as a scratch register, if needed.  */
7391
 
7392
void
7393
sh_set_return_address (rtx ra, rtx tmp)
7394
{
7395
  HARD_REG_SET live_regs_mask;
7396
  int d;
7397
  int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
7398
  int pr_offset;
7399
 
7400
  d = calc_live_regs (&live_regs_mask);
7401
 
7402
  /* If pr_reg isn't life, we can set it (or the register given in
7403
     sh_media_register_for_return) directly.  */
7404
  if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
7405
    {
7406
      rtx rr;
7407
 
7408
      if (TARGET_SHMEDIA)
7409
        {
7410
          int rr_regno = sh_media_register_for_return ();
7411
 
7412
          if (rr_regno < 0)
7413
            rr_regno = pr_reg;
7414
 
7415
          rr = gen_rtx_REG (DImode, rr_regno);
7416
        }
7417
      else
7418
        rr = gen_rtx_REG (SImode, pr_reg);
7419
 
7420
      emit_insn (GEN_MOV (rr, ra));
7421
      /* Tell flow the register for return isn't dead.  */
7422
      emit_use (rr);
7423
      return;
7424
    }
7425
 
7426
  if (TARGET_SH5)
7427
    {
7428
      int offset;
7429
      save_schedule schedule;
7430
      save_entry *entry;
7431
 
7432
      entry = sh5_schedule_saves (&live_regs_mask, &schedule, 0);
7433
      offset = entry[1].offset;
7434
      for (; entry->mode != VOIDmode; entry--)
7435
        if (entry->reg == pr_reg)
7436
          goto found;
7437
 
7438
      /* We can't find pr register.  */
7439
      gcc_unreachable ();
7440
 
7441
    found:
7442
      offset = entry->offset - offset;
7443
      pr_offset = (rounded_frame_size (d) + offset
7444
                   + SHMEDIA_REGS_STACK_ADJUST ());
7445
    }
7446
  else
7447
    pr_offset = rounded_frame_size (d);
7448
 
7449
  emit_insn (GEN_MOV (tmp, GEN_INT (pr_offset)));
7450
  emit_insn (GEN_ADD3 (tmp, tmp, hard_frame_pointer_rtx));
7451
 
7452
  tmp = gen_frame_mem (Pmode, tmp);
7453
  emit_insn (GEN_MOV (tmp, ra));
7454
  /* Tell this store isn't dead.  */
7455
  emit_use (tmp);
7456
}
7457
 
7458
/* Clear variables at function end.  */
7459
 
7460
static void
7461
sh_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
7462
                             HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7463
{
7464
  sh_need_epilogue_known = 0;
7465
}
7466
 
7467
static rtx
7468
sh_builtin_saveregs (void)
7469
{
7470
  /* First unnamed integer register.  */
7471
  int first_intreg = crtl->args.info.arg_count[(int) SH_ARG_INT];
7472
  /* Number of integer registers we need to save.  */
7473
  int n_intregs = MAX (0, NPARM_REGS (SImode) - first_intreg);
7474
  /* First unnamed SFmode float reg */
7475
  int first_floatreg = crtl->args.info.arg_count[(int) SH_ARG_FLOAT];
7476
  /* Number of SFmode float regs to save.  */
7477
  int n_floatregs = MAX (0, NPARM_REGS (SFmode) - first_floatreg);
7478
  rtx regbuf, fpregs;
7479
  int bufsize, regno;
7480
  alias_set_type alias_set;
7481
 
7482
  if (TARGET_SH5)
7483
    {
7484
      if (n_intregs)
7485
        {
7486
          int pushregs = n_intregs;
7487
 
7488
          while (pushregs < NPARM_REGS (SImode) - 1
7489
                 && (CALL_COOKIE_INT_REG_GET
7490
                        (crtl->args.info.call_cookie,
7491
                         NPARM_REGS (SImode) - pushregs)
7492
                     == 1))
7493
            {
7494
              crtl->args.info.call_cookie
7495
                &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
7496
                                          - pushregs, 1);
7497
              pushregs++;
7498
            }
7499
 
7500
          if (pushregs == NPARM_REGS (SImode))
7501
            crtl->args.info.call_cookie
7502
              |= (CALL_COOKIE_INT_REG (0, 1)
7503
                  | CALL_COOKIE_STACKSEQ (pushregs - 1));
7504
          else
7505
            crtl->args.info.call_cookie
7506
              |= CALL_COOKIE_STACKSEQ (pushregs);
7507
 
7508
          crtl->args.pretend_args_size += 8 * n_intregs;
7509
        }
7510
      if (TARGET_SHCOMPACT)
7511
        return const0_rtx;
7512
    }
7513
 
7514
  if (! TARGET_SH2E && ! TARGET_SH4 && ! TARGET_SH5)
7515
    {
7516
      error ("__builtin_saveregs not supported by this subtarget");
7517
      return const0_rtx;
7518
    }
7519
 
7520
  if (TARGET_SHMEDIA)
7521
    n_floatregs = 0;
7522
 
7523
  /* Allocate block of memory for the regs.  */
7524
  /* ??? If n_intregs + n_floatregs == 0, should we allocate at least 1 byte?
7525
     Or can assign_stack_local accept a 0 SIZE argument?  */
7526
  bufsize = (n_intregs * UNITS_PER_WORD) + (n_floatregs * UNITS_PER_WORD);
7527
 
7528
  if (TARGET_SHMEDIA)
7529
    regbuf = gen_frame_mem (BLKmode, gen_rtx_REG (Pmode, ARG_POINTER_REGNUM));
7530
  else if (n_floatregs & 1)
7531
    {
7532
      rtx addr;
7533
 
7534
      regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
7535
      addr = copy_to_mode_reg (Pmode, XEXP (regbuf, 0));
7536
      emit_insn (gen_iorsi3 (addr, addr, GEN_INT (UNITS_PER_WORD)));
7537
      regbuf = change_address (regbuf, BLKmode, addr);
7538
    }
7539
  else if (STACK_BOUNDARY < 64 && TARGET_FPU_DOUBLE && n_floatregs)
7540
    {
7541
      rtx addr, mask;
7542
 
7543
      regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
7544
      addr = copy_to_mode_reg (Pmode, plus_constant (XEXP (regbuf, 0), 4));
7545
      mask = copy_to_mode_reg (Pmode, GEN_INT (-8));
7546
      emit_insn (gen_andsi3 (addr, addr, mask));
7547
      regbuf = change_address (regbuf, BLKmode, addr);
7548
    }
7549
  else
7550
    regbuf = assign_stack_local (BLKmode, bufsize, TARGET_FPU_DOUBLE ? 64 : 0);
7551
  alias_set = get_varargs_alias_set ();
7552
  set_mem_alias_set (regbuf, alias_set);
7553
 
7554
  /* Save int args.
7555
     This is optimized to only save the regs that are necessary.  Explicitly
7556
     named args need not be saved.  */
7557
  if (n_intregs > 0)
7558
    move_block_from_reg (BASE_ARG_REG (SImode) + first_intreg,
7559
                         adjust_address (regbuf, BLKmode,
7560
                                         n_floatregs * UNITS_PER_WORD),
7561
                         n_intregs);
7562
 
7563
  if (TARGET_SHMEDIA)
7564
    /* Return the address of the regbuf.  */
7565
    return XEXP (regbuf, 0);
7566
 
7567
  /* Save float args.
7568
     This is optimized to only save the regs that are necessary.  Explicitly
7569
     named args need not be saved.
7570
     We explicitly build a pointer to the buffer because it halves the insn
7571
     count when not optimizing (otherwise the pointer is built for each reg
7572
     saved).
7573
     We emit the moves in reverse order so that we can use predecrement.  */
7574
 
7575
  fpregs = copy_to_mode_reg (Pmode,
7576
                             plus_constant (XEXP (regbuf, 0),
7577
                                            n_floatregs * UNITS_PER_WORD));
7578
  if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
7579
    {
7580
      rtx mem;
7581
      for (regno = NPARM_REGS (DFmode) - 2; regno >= first_floatreg; regno -= 2)
7582
        {
7583
          emit_insn (gen_addsi3 (fpregs, fpregs,
7584
                                 GEN_INT (-2 * UNITS_PER_WORD)));
7585
          mem = change_address (regbuf, DFmode, fpregs);
7586
          emit_move_insn (mem,
7587
                          gen_rtx_REG (DFmode, BASE_ARG_REG (DFmode) + regno));
7588
        }
7589
      regno = first_floatreg;
7590
      if (regno & 1)
7591
        {
7592
          emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
7593
          mem = change_address (regbuf, SFmode, fpregs);
7594
          emit_move_insn (mem,
7595
                          gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno
7596
                                                - (TARGET_LITTLE_ENDIAN != 0)));
7597
        }
7598
    }
7599
  else
7600
    for (regno = NPARM_REGS (SFmode) - 1; regno >= first_floatreg; regno--)
7601
      {
7602
        rtx mem;
7603
 
7604
        emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
7605
        mem = change_address (regbuf, SFmode, fpregs);
7606
        emit_move_insn (mem,
7607
                        gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno));
7608
      }
7609
 
7610
  /* Return the address of the regbuf.  */
7611
  return XEXP (regbuf, 0);
7612
}
7613
 
7614
/* Define the `__builtin_va_list' type for the ABI.  */
7615
 
7616
static tree
7617
sh_build_builtin_va_list (void)
7618
{
7619
  tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7620
  tree record;
7621
 
7622
  if (TARGET_SH5 || (! TARGET_SH2E && ! TARGET_SH4)
7623
      || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
7624
    return ptr_type_node;
7625
 
7626
  record = (*lang_hooks.types.make_type) (RECORD_TYPE);
7627
 
7628
  f_next_o = build_decl (BUILTINS_LOCATION,
7629
                         FIELD_DECL, get_identifier ("__va_next_o"),
7630
                         ptr_type_node);
7631
  f_next_o_limit = build_decl (BUILTINS_LOCATION,
7632
                               FIELD_DECL,
7633
                               get_identifier ("__va_next_o_limit"),
7634
                               ptr_type_node);
7635
  f_next_fp = build_decl (BUILTINS_LOCATION,
7636
                          FIELD_DECL, get_identifier ("__va_next_fp"),
7637
                          ptr_type_node);
7638
  f_next_fp_limit = build_decl (BUILTINS_LOCATION,
7639
                                FIELD_DECL,
7640
                                get_identifier ("__va_next_fp_limit"),
7641
                                ptr_type_node);
7642
  f_next_stack = build_decl (BUILTINS_LOCATION,
7643
                             FIELD_DECL, get_identifier ("__va_next_stack"),
7644
                             ptr_type_node);
7645
 
7646
  DECL_FIELD_CONTEXT (f_next_o) = record;
7647
  DECL_FIELD_CONTEXT (f_next_o_limit) = record;
7648
  DECL_FIELD_CONTEXT (f_next_fp) = record;
7649
  DECL_FIELD_CONTEXT (f_next_fp_limit) = record;
7650
  DECL_FIELD_CONTEXT (f_next_stack) = record;
7651
 
7652
  TYPE_FIELDS (record) = f_next_o;
7653
  TREE_CHAIN (f_next_o) = f_next_o_limit;
7654
  TREE_CHAIN (f_next_o_limit) = f_next_fp;
7655
  TREE_CHAIN (f_next_fp) = f_next_fp_limit;
7656
  TREE_CHAIN (f_next_fp_limit) = f_next_stack;
7657
 
7658
  layout_type (record);
7659
 
7660
  return record;
7661
}
7662
 
7663
/* Implement `va_start' for varargs and stdarg.  */
7664
 
7665
static void
7666
sh_va_start (tree valist, rtx nextarg)
7667
{
7668
  tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7669
  tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
7670
  tree t, u;
7671
  int nfp, nint;
7672
 
7673
  if (TARGET_SH5)
7674
    {
7675
      expand_builtin_saveregs ();
7676
      std_expand_builtin_va_start (valist, nextarg);
7677
      return;
7678
    }
7679
 
7680
  if ((! TARGET_SH2E && ! TARGET_SH4)
7681
      || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
7682
    {
7683
      std_expand_builtin_va_start (valist, nextarg);
7684
      return;
7685
    }
7686
 
7687
  f_next_o = TYPE_FIELDS (va_list_type_node);
7688
  f_next_o_limit = TREE_CHAIN (f_next_o);
7689
  f_next_fp = TREE_CHAIN (f_next_o_limit);
7690
  f_next_fp_limit = TREE_CHAIN (f_next_fp);
7691
  f_next_stack = TREE_CHAIN (f_next_fp_limit);
7692
 
7693
  next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
7694
                   NULL_TREE);
7695
  next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
7696
                         valist, f_next_o_limit, NULL_TREE);
7697
  next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp), valist, f_next_fp,
7698
                    NULL_TREE);
7699
  next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
7700
                          valist, f_next_fp_limit, NULL_TREE);
7701
  next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
7702
                       valist, f_next_stack, NULL_TREE);
7703
 
7704
  /* Call __builtin_saveregs.  */
7705
  u = make_tree (sizetype, expand_builtin_saveregs ());
7706
  u = fold_convert (ptr_type_node, u);
7707
  t = build2 (MODIFY_EXPR, ptr_type_node, next_fp, u);
7708
  TREE_SIDE_EFFECTS (t) = 1;
7709
  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7710
 
7711
  nfp = crtl->args.info.arg_count[SH_ARG_FLOAT];
7712
  if (nfp < 8)
7713
    nfp = 8 - nfp;
7714
  else
7715
    nfp = 0;
7716
  u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
7717
                   size_int (UNITS_PER_WORD * nfp));
7718
  t = build2 (MODIFY_EXPR, ptr_type_node, next_fp_limit, u);
7719
  TREE_SIDE_EFFECTS (t) = 1;
7720
  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7721
 
7722
  t = build2 (MODIFY_EXPR, ptr_type_node, next_o, u);
7723
  TREE_SIDE_EFFECTS (t) = 1;
7724
  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7725
 
7726
  nint = crtl->args.info.arg_count[SH_ARG_INT];
7727
  if (nint < 4)
7728
    nint = 4 - nint;
7729
  else
7730
    nint = 0;
7731
  u = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, u,
7732
                   size_int (UNITS_PER_WORD * nint));
7733
  t = build2 (MODIFY_EXPR, ptr_type_node, next_o_limit, u);
7734
  TREE_SIDE_EFFECTS (t) = 1;
7735
  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7736
 
7737
  u = make_tree (ptr_type_node, nextarg);
7738
  t = build2 (MODIFY_EXPR, ptr_type_node, next_stack, u);
7739
  TREE_SIDE_EFFECTS (t) = 1;
7740
  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
7741
}
7742
 
7743
/* TYPE is a RECORD_TYPE.  If there is only a single nonzero-sized
7744
   member, return it.  */
7745
static tree
7746
find_sole_member (tree type)
7747
{
7748
  tree field, member = NULL_TREE;
7749
 
7750
  for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
7751
    {
7752
      if (TREE_CODE (field) != FIELD_DECL)
7753
        continue;
7754
      if (!DECL_SIZE (field))
7755
        return NULL_TREE;
7756
      if (integer_zerop (DECL_SIZE (field)))
7757
        continue;
7758
      if (member)
7759
        return NULL_TREE;
7760
      member = field;
7761
    }
7762
  return member;
7763
}
7764
/* Implement `va_arg'.  */
7765
 
7766
static tree
7767
sh_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
7768
                         gimple_seq *post_p ATTRIBUTE_UNUSED)
7769
{
7770
  HOST_WIDE_INT size, rsize;
7771
  tree tmp, pptr_type_node;
7772
  tree addr, lab_over = NULL, result = NULL;
7773
  int pass_by_ref = targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
7774
  tree eff_type;
7775
 
7776
  if (pass_by_ref)
7777
    type = build_pointer_type (type);
7778
 
7779
  size = int_size_in_bytes (type);
7780
  rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
7781
  pptr_type_node = build_pointer_type (ptr_type_node);
7782
 
7783
  if (! TARGET_SH5 && (TARGET_SH2E || TARGET_SH4)
7784
      && ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
7785
    {
7786
      tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
7787
      tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
7788
      int pass_as_float;
7789
      tree lab_false;
7790
      tree member;
7791
 
7792
      f_next_o = TYPE_FIELDS (va_list_type_node);
7793
      f_next_o_limit = TREE_CHAIN (f_next_o);
7794
      f_next_fp = TREE_CHAIN (f_next_o_limit);
7795
      f_next_fp_limit = TREE_CHAIN (f_next_fp);
7796
      f_next_stack = TREE_CHAIN (f_next_fp_limit);
7797
 
7798
      next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
7799
                       NULL_TREE);
7800
      next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
7801
                             valist, f_next_o_limit, NULL_TREE);
7802
      next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp),
7803
                        valist, f_next_fp, NULL_TREE);
7804
      next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
7805
                              valist, f_next_fp_limit, NULL_TREE);
7806
      next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
7807
                           valist, f_next_stack, NULL_TREE);
7808
 
7809
      /* Structures with a single member with a distinct mode are passed
7810
         like their member.  This is relevant if the latter has a REAL_TYPE
7811
         or COMPLEX_TYPE type.  */
7812
      eff_type = type;
7813
      while (TREE_CODE (eff_type) == RECORD_TYPE
7814
             && (member = find_sole_member (eff_type))
7815
             && (TREE_CODE (TREE_TYPE (member)) == REAL_TYPE
7816
                 || TREE_CODE (TREE_TYPE (member)) == COMPLEX_TYPE
7817
                 || TREE_CODE (TREE_TYPE (member)) == RECORD_TYPE))
7818
        {
7819
          tree field_type = TREE_TYPE (member);
7820
 
7821
          if (TYPE_MODE (eff_type) == TYPE_MODE (field_type))
7822
            eff_type = field_type;
7823
          else
7824
            {
7825
              gcc_assert ((TYPE_ALIGN (eff_type)
7826
                           < GET_MODE_ALIGNMENT (TYPE_MODE (field_type)))
7827
                          || (TYPE_ALIGN (eff_type)
7828
                              > GET_MODE_BITSIZE (TYPE_MODE (field_type))));
7829
              break;
7830
            }
7831
        }
7832
 
7833
      if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
7834
        {
7835
          pass_as_float = ((TREE_CODE (eff_type) == REAL_TYPE && size <= 8)
7836
                           || (TREE_CODE (eff_type) == COMPLEX_TYPE
7837
                               && TREE_CODE (TREE_TYPE (eff_type)) == REAL_TYPE
7838
                               && size <= 16));
7839
        }
7840
      else
7841
        {
7842
          pass_as_float = (TREE_CODE (eff_type) == REAL_TYPE && size == 4);
7843
        }
7844
 
7845
      addr = create_tmp_var (pptr_type_node, NULL);
7846
      lab_false = create_artificial_label (UNKNOWN_LOCATION);
7847
      lab_over = create_artificial_label (UNKNOWN_LOCATION);
7848
 
7849
      valist = build1 (INDIRECT_REF, ptr_type_node, addr);
7850
 
7851
      if (pass_as_float)
7852
        {
7853
          tree next_fp_tmp = create_tmp_var (TREE_TYPE (f_next_fp), NULL);
7854
          tree cmp;
7855
          bool is_double = size == 8 && TREE_CODE (eff_type) == REAL_TYPE;
7856
 
7857
          tmp = build1 (ADDR_EXPR, pptr_type_node, unshare_expr (next_fp));
7858
          gimplify_assign (unshare_expr (addr), tmp, pre_p);
7859
 
7860
          gimplify_assign (unshare_expr (next_fp_tmp), valist, pre_p);
7861
          tmp = next_fp_limit;
7862
          if (size > 4 && !is_double)
7863
            tmp = build2 (POINTER_PLUS_EXPR, TREE_TYPE (tmp),
7864
                          unshare_expr (tmp), size_int (4 - size));
7865
          tmp = build2 (GE_EXPR, boolean_type_node,
7866
                        unshare_expr (next_fp_tmp), unshare_expr (tmp));
7867
          cmp = build3 (COND_EXPR, void_type_node, tmp,
7868
                        build1 (GOTO_EXPR, void_type_node,
7869
                                unshare_expr (lab_false)), NULL_TREE);
7870
          if (!is_double)
7871
            gimplify_and_add (cmp, pre_p);
7872
 
7873
          if (TYPE_ALIGN (eff_type) > BITS_PER_WORD
7874
              || (is_double || size == 16))
7875
            {
7876
              tmp = fold_convert (sizetype, next_fp_tmp);
7877
              tmp = build2 (BIT_AND_EXPR, sizetype, tmp,
7878
                            size_int (UNITS_PER_WORD));
7879
              tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node,
7880
                            unshare_expr (next_fp_tmp), tmp);
7881
              gimplify_assign (unshare_expr (next_fp_tmp), tmp, pre_p);
7882
            }
7883
          if (is_double)
7884
            gimplify_and_add (cmp, pre_p);
7885
 
7886
#ifdef FUNCTION_ARG_SCmode_WART
7887
          if (TYPE_MODE (eff_type) == SCmode
7888
              && TARGET_SH4 && TARGET_LITTLE_ENDIAN)
7889
            {
7890
              tree subtype = TREE_TYPE (eff_type);
7891
              tree real, imag;
7892
 
7893
              imag
7894
                = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7895
              imag = get_initialized_tmp_var (imag, pre_p, NULL);
7896
 
7897
              real
7898
                = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7899
              real = get_initialized_tmp_var (real, pre_p, NULL);
7900
 
7901
              result = build2 (COMPLEX_EXPR, eff_type, real, imag);
7902
              if (type != eff_type)
7903
                result = build1 (VIEW_CONVERT_EXPR, type, result);
7904
              result = get_initialized_tmp_var (result, pre_p, NULL);
7905
            }
7906
#endif /* FUNCTION_ARG_SCmode_WART */
7907
 
7908
          tmp = build1 (GOTO_EXPR, void_type_node, unshare_expr (lab_over));
7909
          gimplify_and_add (tmp, pre_p);
7910
 
7911
          tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (lab_false));
7912
          gimplify_and_add (tmp, pre_p);
7913
 
7914
          tmp = build1 (ADDR_EXPR, pptr_type_node, unshare_expr (next_stack));
7915
          gimplify_assign (unshare_expr (addr), tmp, pre_p);
7916
          gimplify_assign (unshare_expr (next_fp_tmp),
7917
                           unshare_expr (valist), pre_p);
7918
 
7919
          gimplify_assign (unshare_expr (valist),
7920
                           unshare_expr (next_fp_tmp), post_p);
7921
          valist = next_fp_tmp;
7922
        }
7923
      else
7924
        {
7925
          tmp = build2 (POINTER_PLUS_EXPR, ptr_type_node,
7926
                        unshare_expr (next_o), size_int (rsize));
7927
          tmp = build2 (GT_EXPR, boolean_type_node, tmp,
7928
                        unshare_expr (next_o_limit));
7929
          tmp = build3 (COND_EXPR, void_type_node, tmp,
7930
                        build1 (GOTO_EXPR, void_type_node,
7931
                                unshare_expr (lab_false)),
7932
                        NULL_TREE);
7933
          gimplify_and_add (tmp, pre_p);
7934
 
7935
          tmp = build1 (ADDR_EXPR, pptr_type_node, unshare_expr (next_o));
7936
          gimplify_assign (unshare_expr (addr), tmp, pre_p);
7937
 
7938
          tmp = build1 (GOTO_EXPR, void_type_node, unshare_expr (lab_over));
7939
          gimplify_and_add (tmp, pre_p);
7940
 
7941
          tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (lab_false));
7942
          gimplify_and_add (tmp, pre_p);
7943
 
7944
          if (size > 4 && ! (TARGET_SH4 || TARGET_SH2A))
7945
            gimplify_assign (unshare_expr (next_o),
7946
                             unshare_expr (next_o_limit), pre_p);
7947
 
7948
          tmp = build1 (ADDR_EXPR, pptr_type_node, unshare_expr (next_stack));
7949
          gimplify_assign (unshare_expr (addr), tmp, pre_p);
7950
        }
7951
 
7952
      if (!result)
7953
        {
7954
          tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (lab_over));
7955
          gimplify_and_add (tmp, pre_p);
7956
        }
7957
    }
7958
 
7959
  /* ??? In va-sh.h, there had been code to make values larger than
7960
     size 8 indirect.  This does not match the FUNCTION_ARG macros.  */
7961
 
7962
  tmp = std_gimplify_va_arg_expr (valist, type, pre_p, NULL);
7963
  if (result)
7964
    {
7965
      gimplify_assign (result, tmp, pre_p);
7966
      result = build1 (NOP_EXPR, TREE_TYPE (result), result);
7967
      tmp = build1 (LABEL_EXPR, void_type_node, unshare_expr (lab_over));
7968
      gimplify_and_add (tmp, pre_p);
7969
    }
7970
  else
7971
    result = tmp;
7972
 
7973
  if (pass_by_ref)
7974
    result = build_va_arg_indirect_ref (result);
7975
 
7976
  return result;
7977
}
7978
 
7979
/* 64 bit floating points memory transfers are paired single precision loads
7980
   or store. So DWARF information needs fixing in little endian (unless
7981
   PR=SZ=1 in FPSCR).  */
7982
rtx
7983
sh_dwarf_register_span (rtx reg)
7984
{
7985
  unsigned regno = REGNO (reg);
7986
 
7987
  if (WORDS_BIG_ENDIAN || GET_MODE (reg) != DFmode)
7988
    return NULL_RTX;
7989
 
7990
  return
7991
    gen_rtx_PARALLEL (VOIDmode,
7992
                      gen_rtvec (2,
7993
                                 gen_rtx_REG (SFmode,
7994
                                              DBX_REGISTER_NUMBER (regno+1)),
7995
                                 gen_rtx_REG (SFmode,
7996
                                              DBX_REGISTER_NUMBER (regno))));
7997
}
7998
 
7999
static enum machine_mode
8000
sh_promote_function_mode (const_tree type, enum machine_mode mode,
8001
                          int *punsignedp, const_tree funtype,
8002
                          int for_return ATTRIBUTE_UNUSED)
8003
{
8004
  if (sh_promote_prototypes (funtype))
8005
    return promote_mode (type, mode, punsignedp);
8006
  else
8007
    return mode;
8008
}
8009
 
8010
static bool
8011
sh_promote_prototypes (const_tree type)
8012
{
8013
  if (TARGET_HITACHI)
8014
    return 0;
8015
  if (! type)
8016
    return 1;
8017
  return ! sh_attr_renesas_p (type);
8018
}
8019
 
8020
/* Whether an argument must be passed by reference.  On SHcompact, we
8021
   pretend arguments wider than 32-bits that would have been passed in
8022
   registers are passed by reference, so that an SHmedia trampoline
8023
   loads them into the full 64-bits registers.  */
8024
 
8025
static int
8026
shcompact_byref (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
8027
                 const_tree type, bool named)
8028
{
8029
  unsigned HOST_WIDE_INT size;
8030
 
8031
  if (type)
8032
    size = int_size_in_bytes (type);
8033
  else
8034
    size = GET_MODE_SIZE (mode);
8035
 
8036
  if (cum->arg_count[SH_ARG_INT] < NPARM_REGS (SImode)
8037
      && (!named
8038
          || GET_SH_ARG_CLASS (mode) == SH_ARG_INT
8039
          || (GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT
8040
              && cum->arg_count[SH_ARG_FLOAT] >= NPARM_REGS (SFmode)))
8041
      && size > 4
8042
      && !SHCOMPACT_FORCE_ON_STACK (mode, type)
8043
      && !SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
8044
    return size;
8045
  else
8046
    return 0;
8047
}
8048
 
8049
static bool
8050
sh_pass_by_reference (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8051
                      const_tree type, bool named)
8052
{
8053
  if (targetm.calls.must_pass_in_stack (mode, type))
8054
    return true;
8055
 
8056
  /* ??? std_gimplify_va_arg_expr passes NULL for cum.  That function
8057
     wants to know about pass-by-reference semantics for incoming
8058
     arguments.  */
8059
  if (! cum)
8060
    return false;
8061
 
8062
  if (TARGET_SHCOMPACT)
8063
    {
8064
      cum->byref = shcompact_byref (cum, mode, type, named);
8065
      return cum->byref != 0;
8066
    }
8067
 
8068
  return false;
8069
}
8070
 
8071
static bool
8072
sh_callee_copies (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8073
                  const_tree type, bool named ATTRIBUTE_UNUSED)
8074
{
8075
  /* ??? How can it possibly be correct to return true only on the
8076
     caller side of the equation?  Is there someplace else in the
8077
     sh backend that's magically producing the copies?  */
8078
  return (cum->outgoing
8079
          && ((mode == BLKmode ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode))
8080
              % SH_MIN_ALIGN_FOR_CALLEE_COPY == 0));
8081
}
8082
 
8083
static int
8084
sh_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8085
                      tree type, bool named ATTRIBUTE_UNUSED)
8086
{
8087
  int words = 0;
8088
 
8089
  if (!TARGET_SH5
8090
      && PASS_IN_REG_P (*cum, mode, type)
8091
      && !(TARGET_SH4 || TARGET_SH2A_DOUBLE)
8092
      && (ROUND_REG (*cum, mode)
8093
          + (mode != BLKmode
8094
             ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
8095
             : ROUND_ADVANCE (int_size_in_bytes (type)))
8096
          > NPARM_REGS (mode)))
8097
    words = NPARM_REGS (mode) - ROUND_REG (*cum, mode);
8098
 
8099
  else if (!TARGET_SHCOMPACT
8100
           && SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
8101
    words = NPARM_REGS (SImode) - cum->arg_count[SH_ARG_INT];
8102
 
8103
  return words * UNITS_PER_WORD;
8104
}
8105
 
8106
 
8107
/* Define where to put the arguments to a function.
8108
   Value is zero to push the argument on the stack,
8109
   or a hard register in which to store the argument.
8110
 
8111
   MODE is the argument's machine mode.
8112
   TYPE is the data type of the argument (as a tree).
8113
    This is null for libcalls where that information may
8114
    not be available.
8115
   CUM is a variable of type CUMULATIVE_ARGS which gives info about
8116
    the preceding args and about the function being called.
8117
   NAMED is nonzero if this argument is a named parameter
8118
    (otherwise it is an extra parameter matching an ellipsis).
8119
 
8120
   On SH the first args are normally in registers
8121
   and the rest are pushed.  Any arg that starts within the first
8122
   NPARM_REGS words is at least partially passed in a register unless
8123
   its data type forbids.  */
8124
 
8125
 
8126
rtx
8127
sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode,
8128
                 tree type, int named)
8129
{
8130
  if (! TARGET_SH5 && mode == VOIDmode)
8131
    return GEN_INT (ca->renesas_abi ? 1 : 0);
8132
 
8133
  if (! TARGET_SH5
8134
      && PASS_IN_REG_P (*ca, mode, type)
8135
      && (named || ! (TARGET_HITACHI || ca->renesas_abi)))
8136
    {
8137
      int regno;
8138
 
8139
      if (mode == SCmode && TARGET_SH4 && TARGET_LITTLE_ENDIAN
8140
          && (! FUNCTION_ARG_SCmode_WART || (ROUND_REG (*ca, mode) & 1)))
8141
        {
8142
          rtx r1 = gen_rtx_EXPR_LIST (VOIDmode,
8143
                                      gen_rtx_REG (SFmode,
8144
                                                   BASE_ARG_REG (mode)
8145
                                                   + (ROUND_REG (*ca, mode) ^ 1)),
8146
                                      const0_rtx);
8147
          rtx r2 = gen_rtx_EXPR_LIST (VOIDmode,
8148
                                      gen_rtx_REG (SFmode,
8149
                                                   BASE_ARG_REG (mode)
8150
                                                   + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
8151
                                      GEN_INT (4));
8152
          return gen_rtx_PARALLEL(SCmode, gen_rtvec(2, r1, r2));
8153
        }
8154
 
8155
     /* If the alignment of a DF value causes an SF register to be
8156
        skipped, we will use that skipped register for the next SF
8157
        value.  */
8158
      if ((TARGET_HITACHI || ca->renesas_abi)
8159
          && ca->free_single_fp_reg
8160
          && mode == SFmode)
8161
        return gen_rtx_REG (mode, ca->free_single_fp_reg);
8162
 
8163
      regno = (BASE_ARG_REG (mode) + ROUND_REG (*ca, mode))
8164
               ^ (mode == SFmode && TARGET_SH4
8165
                  && TARGET_LITTLE_ENDIAN != 0
8166
                  && ! TARGET_HITACHI && ! ca->renesas_abi);
8167
      return gen_rtx_REG (mode, regno);
8168
 
8169
    }
8170
 
8171
  if (TARGET_SH5)
8172
    {
8173
      if (mode == VOIDmode && TARGET_SHCOMPACT)
8174
        return GEN_INT (ca->call_cookie);
8175
 
8176
      /* The following test assumes unnamed arguments are promoted to
8177
         DFmode.  */
8178
      if (mode == SFmode && ca->free_single_fp_reg)
8179
        return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode, ca->free_single_fp_reg);
8180
 
8181
      if ((GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT)
8182
          && (named || ! ca->prototype_p)
8183
          && ca->arg_count[(int) SH_ARG_FLOAT] < NPARM_REGS (SFmode))
8184
        {
8185
          if (! ca->prototype_p && TARGET_SHMEDIA)
8186
            return SH5_PROTOTYPELESS_FLOAT_ARG (*ca, mode);
8187
 
8188
          return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode,
8189
                                           FIRST_FP_PARM_REG
8190
                                           + ca->arg_count[(int) SH_ARG_FLOAT]);
8191
        }
8192
 
8193
      if (ca->arg_count[(int) SH_ARG_INT] < NPARM_REGS (SImode)
8194
          && (! TARGET_SHCOMPACT
8195
              || (! SHCOMPACT_FORCE_ON_STACK (mode, type)
8196
                  && ! SH5_WOULD_BE_PARTIAL_NREGS (*ca, mode,
8197
                                                   type, named))))
8198
        {
8199
          return gen_rtx_REG (mode, (FIRST_PARM_REG
8200
                                       + ca->arg_count[(int) SH_ARG_INT]));
8201
        }
8202
 
8203
      return 0;
8204
    }
8205
 
8206
  return 0;
8207
}
8208
 
8209
/* Update the data in CUM to advance over an argument
8210
   of mode MODE and data type TYPE.
8211
   (TYPE is null for libcalls where that information may not be
8212
   available.)  */
8213
 
8214
void
8215
sh_function_arg_advance (CUMULATIVE_ARGS *ca, enum machine_mode mode,
8216
                         tree type, int named)
8217
{
8218
  if (ca->force_mem)
8219
    ca->force_mem = 0;
8220
  else if (TARGET_SH5)
8221
    {
8222
      tree type2 = (ca->byref && type
8223
                    ? TREE_TYPE (type)
8224
                    : type);
8225
      enum machine_mode mode2 = (ca->byref && type
8226
                                 ? TYPE_MODE (type2)
8227
                                 : mode);
8228
      int dwords = ((ca->byref
8229
                     ? ca->byref
8230
                     : mode2 == BLKmode
8231
                     ? int_size_in_bytes (type2)
8232
                     : GET_MODE_SIZE (mode2)) + 7) / 8;
8233
      int numregs = MIN (dwords, NPARM_REGS (SImode)
8234
                         - ca->arg_count[(int) SH_ARG_INT]);
8235
 
8236
      if (numregs)
8237
        {
8238
          ca->arg_count[(int) SH_ARG_INT] += numregs;
8239
          if (TARGET_SHCOMPACT
8240
              && SHCOMPACT_FORCE_ON_STACK (mode2, type2))
8241
            {
8242
              ca->call_cookie
8243
                |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
8244
                                        - numregs, 1);
8245
              /* N.B. We want this also for outgoing.  */
8246
              ca->stack_regs += numregs;
8247
            }
8248
          else if (ca->byref)
8249
            {
8250
              if (! ca->outgoing)
8251
                ca->stack_regs += numregs;
8252
              ca->byref_regs += numregs;
8253
              ca->byref = 0;
8254
              do
8255
                ca->call_cookie
8256
                  |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
8257
                                          - numregs, 2);
8258
              while (--numregs);
8259
              ca->call_cookie
8260
                |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
8261
                                        - 1, 1);
8262
            }
8263
          else if (dwords > numregs)
8264
            {
8265
              int pushregs = numregs;
8266
 
8267
              if (TARGET_SHCOMPACT)
8268
                ca->stack_regs += numregs;
8269
              while (pushregs < NPARM_REGS (SImode) - 1
8270
                     && (CALL_COOKIE_INT_REG_GET
8271
                         (ca->call_cookie,
8272
                          NPARM_REGS (SImode) - pushregs)
8273
                         == 1))
8274
                {
8275
                  ca->call_cookie
8276
                    &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
8277
                                              - pushregs, 1);
8278
                  pushregs++;
8279
                }
8280
              if (numregs == NPARM_REGS (SImode))
8281
                ca->call_cookie
8282
                  |= CALL_COOKIE_INT_REG (0, 1)
8283
                  | CALL_COOKIE_STACKSEQ (numregs - 1);
8284
              else
8285
                ca->call_cookie
8286
                  |= CALL_COOKIE_STACKSEQ (numregs);
8287
            }
8288
        }
8289
      if (GET_SH_ARG_CLASS (mode2) == SH_ARG_FLOAT
8290
          && (named || ! ca->prototype_p))
8291
        {
8292
          if (mode2 == SFmode && ca->free_single_fp_reg)
8293
            ca->free_single_fp_reg = 0;
8294
          else if (ca->arg_count[(int) SH_ARG_FLOAT]
8295
                   < NPARM_REGS (SFmode))
8296
            {
8297
              int numfpregs
8298
                = MIN ((GET_MODE_SIZE (mode2) + 7) / 8 * 2,
8299
                       NPARM_REGS (SFmode)
8300
                       - ca->arg_count[(int) SH_ARG_FLOAT]);
8301
 
8302
              ca->arg_count[(int) SH_ARG_FLOAT] += numfpregs;
8303
 
8304
              if (TARGET_SHCOMPACT && ! ca->prototype_p)
8305
                {
8306
                  if (ca->outgoing && numregs > 0)
8307
                    do
8308
                      {
8309
                        ca->call_cookie
8310
                          |= (CALL_COOKIE_INT_REG
8311
                              (ca->arg_count[(int) SH_ARG_INT]
8312
                               - numregs + ((numfpregs - 2) / 2),
8313
                               4 + (ca->arg_count[(int) SH_ARG_FLOAT]
8314
                                    - numfpregs) / 2));
8315
                      }
8316
                    while (numfpregs -= 2);
8317
                }
8318
              else if (mode2 == SFmode && (named)
8319
                       && (ca->arg_count[(int) SH_ARG_FLOAT]
8320
                           < NPARM_REGS (SFmode)))
8321
                ca->free_single_fp_reg
8322
                  = FIRST_FP_PARM_REG - numfpregs
8323
                  + ca->arg_count[(int) SH_ARG_FLOAT] + 1;
8324
            }
8325
        }
8326
      return;
8327
    }
8328
 
8329
  if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
8330
    {
8331
      /* Note that we've used the skipped register.  */
8332
      if (mode == SFmode && ca->free_single_fp_reg)
8333
        {
8334
          ca->free_single_fp_reg = 0;
8335
          return;
8336
        }
8337
      /* When we have a DF after an SF, there's an SF register that get
8338
         skipped in order to align the DF value.  We note this skipped
8339
         register, because the next SF value will use it, and not the
8340
         SF that follows the DF.  */
8341
      if (mode == DFmode
8342
          && ROUND_REG (*ca, DFmode) != ROUND_REG (*ca, SFmode))
8343
        {
8344
          ca->free_single_fp_reg = (ROUND_REG (*ca, SFmode)
8345
                                    + BASE_ARG_REG (mode));
8346
        }
8347
    }
8348
 
8349
  if (! ((TARGET_SH4 || TARGET_SH2A) || ca->renesas_abi)
8350
      || PASS_IN_REG_P (*ca, mode, type))
8351
    (ca->arg_count[(int) GET_SH_ARG_CLASS (mode)]
8352
     = (ROUND_REG (*ca, mode)
8353
        + (mode == BLKmode
8354
           ? ROUND_ADVANCE (int_size_in_bytes (type))
8355
           : ROUND_ADVANCE (GET_MODE_SIZE (mode)))));
8356
}
8357
 
8358
/* The Renesas calling convention doesn't quite fit into this scheme since
8359
   the address is passed like an invisible argument, but one that is always
8360
   passed in memory.  */
8361
static rtx
8362
sh_struct_value_rtx (tree fndecl, int incoming ATTRIBUTE_UNUSED)
8363
{
8364
  if (TARGET_HITACHI || sh_attr_renesas_p (fndecl))
8365
    return 0;
8366
  return gen_rtx_REG (Pmode, 2);
8367
}
8368
 
8369
/* Worker function for TARGET_FUNCTION_VALUE.
8370
 
8371
   For the SH, this is like LIBCALL_VALUE, except that we must change the
8372
   mode like PROMOTE_MODE does.
8373
   ??? PROMOTE_MODE is ignored for non-scalar types.  The set of types
8374
   tested here has to be kept in sync with the one in explow.c:promote_mode.
8375
*/
8376
 
8377
static rtx
8378
sh_function_value (const_tree valtype,
8379
                   const_tree fn_decl_or_type,
8380
                   bool outgoing ATTRIBUTE_UNUSED)
8381
{
8382
  if (fn_decl_or_type
8383
      && !DECL_P (fn_decl_or_type))
8384
    fn_decl_or_type = NULL;
8385
 
8386
  return gen_rtx_REG (
8387
           ((GET_MODE_CLASS (TYPE_MODE (valtype)) == MODE_INT
8388
             && GET_MODE_SIZE (TYPE_MODE (valtype)) < 4
8389
             && (TREE_CODE (valtype) == INTEGER_TYPE
8390
                 || TREE_CODE (valtype) == ENUMERAL_TYPE
8391
                 || TREE_CODE (valtype) == BOOLEAN_TYPE
8392
                 || TREE_CODE (valtype) == REAL_TYPE
8393
                 || TREE_CODE (valtype) == OFFSET_TYPE))
8394
            && sh_promote_prototypes (fn_decl_or_type)
8395
            ? (TARGET_SHMEDIA64 ? DImode : SImode) : TYPE_MODE (valtype)),
8396
           BASE_RETURN_VALUE_REG (TYPE_MODE (valtype)));
8397
}
8398
 
8399
/* Worker function for TARGET_LIBCALL_VALUE.  */
8400
 
8401
static rtx
8402
sh_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
8403
{
8404
  return gen_rtx_REG (mode, BASE_RETURN_VALUE_REG (mode));
8405
}
8406
 
8407
/* Worker function for FUNCTION_VALUE_REGNO_P.  */
8408
 
8409
bool
8410
sh_function_value_regno_p (const unsigned int regno)
8411
{
8412
  return ((regno) == FIRST_RET_REG
8413
          || (TARGET_SH2E && (regno) == FIRST_FP_RET_REG)
8414
          || (TARGET_SHMEDIA_FPU && (regno) == FIRST_FP_RET_REG));
8415
}
8416
 
8417
/* Worker function for TARGET_RETURN_IN_MEMORY.  */
8418
 
8419
static bool
8420
sh_return_in_memory (const_tree type, const_tree fndecl)
8421
{
8422
  if (TARGET_SH5)
8423
    {
8424
      if (TYPE_MODE (type) == BLKmode)
8425
        return ((unsigned HOST_WIDE_INT) int_size_in_bytes (type)) > 8;
8426
      else
8427
        return GET_MODE_SIZE (TYPE_MODE (type)) > 8;
8428
    }
8429
  else
8430
    {
8431
      return (TYPE_MODE (type) == BLKmode
8432
              || ((TARGET_HITACHI || sh_attr_renesas_p (fndecl))
8433
                  && TREE_CODE (type) == RECORD_TYPE));
8434
    }
8435
}
8436
 
8437
/* We actually emit the code in sh_expand_prologue.  We used to use
8438
   a static variable to flag that we need to emit this code, but that
8439
   doesn't when inlining, when functions are deferred and then emitted
8440
   later.  Fortunately, we already have two flags that are part of struct
8441
   function that tell if a function uses varargs or stdarg.  */
8442
static void
8443
sh_setup_incoming_varargs (CUMULATIVE_ARGS *ca,
8444
                           enum machine_mode mode,
8445
                           tree type,
8446
                           int *pretend_arg_size,
8447
                           int second_time ATTRIBUTE_UNUSED)
8448
{
8449
  gcc_assert (cfun->stdarg);
8450
  if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
8451
    {
8452
      int named_parm_regs, anon_parm_regs;
8453
 
8454
      named_parm_regs = (ROUND_REG (*ca, mode)
8455
                         + (mode == BLKmode
8456
                            ? ROUND_ADVANCE (int_size_in_bytes (type))
8457
                            : ROUND_ADVANCE (GET_MODE_SIZE (mode))));
8458
      anon_parm_regs = NPARM_REGS (SImode) - named_parm_regs;
8459
      if (anon_parm_regs > 0)
8460
        *pretend_arg_size = anon_parm_regs * 4;
8461
    }
8462
}
8463
 
8464
static bool
8465
sh_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
8466
{
8467
  return TARGET_SH5;
8468
}
8469
 
8470
static bool
8471
sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *ca)
8472
{
8473
  return ! (TARGET_HITACHI || ca->renesas_abi) && ! TARGET_SH5;
8474
}
8475
 
8476
 
8477
/* Define the offset between two registers, one to be eliminated, and
8478
   the other its replacement, at the start of a routine.  */
8479
 
8480
int
8481
initial_elimination_offset (int from, int to)
8482
{
8483
  int regs_saved;
8484
  int regs_saved_rounding = 0;
8485
  int total_saved_regs_space;
8486
  int total_auto_space;
8487
  int save_flags = target_flags;
8488
  int copy_flags;
8489
  HARD_REG_SET live_regs_mask;
8490
 
8491
  shmedia_space_reserved_for_target_registers = false;
8492
  regs_saved = calc_live_regs (&live_regs_mask);
8493
  regs_saved += SHMEDIA_REGS_STACK_ADJUST ();
8494
 
8495
  if (shmedia_reserve_space_for_target_registers_p (regs_saved, &live_regs_mask))
8496
    {
8497
      shmedia_space_reserved_for_target_registers = true;
8498
      regs_saved += shmedia_target_regs_stack_adjust (&live_regs_mask);
8499
    }
8500
 
8501
  if (TARGET_SH5 && regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT))
8502
    regs_saved_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
8503
                           - regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT));
8504
 
8505
  total_auto_space = rounded_frame_size (regs_saved) - regs_saved_rounding;
8506
  copy_flags = target_flags;
8507
  target_flags = save_flags;
8508
 
8509
  total_saved_regs_space = regs_saved + regs_saved_rounding;
8510
 
8511
  if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8512
    return total_saved_regs_space + total_auto_space
8513
      + crtl->args.info.byref_regs * 8;
8514
 
8515
  if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
8516
    return total_saved_regs_space + total_auto_space
8517
      + crtl->args.info.byref_regs * 8;
8518
 
8519
  /* Initial gap between fp and sp is 0.  */
8520
  if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
8521
    return 0;
8522
 
8523
  if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
8524
    return rounded_frame_size (0);
8525
 
8526
  if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
8527
    return rounded_frame_size (0);
8528
 
8529
  gcc_assert (from == RETURN_ADDRESS_POINTER_REGNUM
8530
              && (to == HARD_FRAME_POINTER_REGNUM
8531
                  || to == STACK_POINTER_REGNUM));
8532
  if (TARGET_SH5)
8533
    {
8534
      int n = total_saved_regs_space;
8535
      int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
8536
      save_schedule schedule;
8537
      save_entry *entry;
8538
 
8539
      n += total_auto_space;
8540
 
8541
      /* If it wasn't saved, there's not much we can do.  */
8542
      if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
8543
        return n;
8544
 
8545
      target_flags = copy_flags;
8546
 
8547
      sh5_schedule_saves (&live_regs_mask, &schedule, n);
8548
      for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
8549
        if (entry->reg == pr_reg)
8550
          {
8551
            target_flags = save_flags;
8552
            return entry->offset;
8553
          }
8554
      gcc_unreachable ();
8555
    }
8556
  else
8557
    return total_auto_space;
8558
}
8559
 
8560
/* Parse the -mfixed-range= option string.  */
8561
void
8562
sh_fix_range (const char *const_str)
8563
{
8564
  int i, first, last;
8565
  char *str, *dash, *comma;
8566
 
8567
  /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
8568
     REG2 are either register names or register numbers.  The effect
8569
     of this option is to mark the registers in the range from REG1 to
8570
     REG2 as ``fixed'' so they won't be used by the compiler.  */
8571
 
8572
  i = strlen (const_str);
8573
  str = (char *) alloca (i + 1);
8574
  memcpy (str, const_str, i + 1);
8575
 
8576
  while (1)
8577
    {
8578
      dash = strchr (str, '-');
8579
      if (!dash)
8580
        {
8581
          warning (0, "value of -mfixed-range must have form REG1-REG2");
8582
          return;
8583
        }
8584
      *dash = '\0';
8585
      comma = strchr (dash + 1, ',');
8586
      if (comma)
8587
        *comma = '\0';
8588
 
8589
      first = decode_reg_name (str);
8590
      if (first < 0)
8591
        {
8592
          warning (0, "unknown register name: %s", str);
8593
          return;
8594
        }
8595
 
8596
      last = decode_reg_name (dash + 1);
8597
      if (last < 0)
8598
        {
8599
          warning (0, "unknown register name: %s", dash + 1);
8600
          return;
8601
        }
8602
 
8603
      *dash = '-';
8604
 
8605
      if (first > last)
8606
        {
8607
          warning (0, "%s-%s is an empty range", str, dash + 1);
8608
          return;
8609
        }
8610
 
8611
      for (i = first; i <= last; ++i)
8612
        fixed_regs[i] = call_used_regs[i] = 1;
8613
 
8614
      if (!comma)
8615
        break;
8616
 
8617
      *comma = ',';
8618
      str = comma + 1;
8619
    }
8620
}
8621
 
8622
/* Insert any deferred function attributes from earlier pragmas.  */
8623
static void
8624
sh_insert_attributes (tree node, tree *attributes)
8625
{
8626
  tree attrs;
8627
 
8628
  if (TREE_CODE (node) != FUNCTION_DECL)
8629
    return;
8630
 
8631
  /* We are only interested in fields.  */
8632
  if (!DECL_P (node))
8633
    return;
8634
 
8635
  /* Append the attributes to the deferred attributes.  */
8636
  *sh_deferred_function_attributes_tail = *attributes;
8637
  attrs = sh_deferred_function_attributes;
8638
  if (!attrs)
8639
    return;
8640
 
8641
  /* Some attributes imply or require the interrupt attribute.  */
8642
  if (!lookup_attribute ("interrupt_handler", attrs)
8643
      && !lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (node)))
8644
    {
8645
      /* If we have a trapa_handler, but no interrupt_handler attribute,
8646
         insert an interrupt_handler attribute.  */
8647
      if (lookup_attribute ("trapa_handler", attrs) != NULL_TREE)
8648
        /* We can't use sh_pr_interrupt here because that's not in the
8649
           java frontend.  */
8650
        attrs
8651
          = tree_cons (get_identifier("interrupt_handler"), NULL_TREE, attrs);
8652
      /* However, for sp_switch, trap_exit, nosave_low_regs and resbank,
8653
         if the interrupt attribute is missing, we ignore the attribute
8654
         and warn.  */
8655
      else if (lookup_attribute ("sp_switch", attrs)
8656
               || lookup_attribute ("trap_exit", attrs)
8657
               || lookup_attribute ("nosave_low_regs", attrs)
8658
               || lookup_attribute ("resbank", attrs))
8659
        {
8660
          tree *tail;
8661
 
8662
          for (tail = attributes; attrs; attrs = TREE_CHAIN (attrs))
8663
            {
8664
              if (is_attribute_p ("sp_switch", TREE_PURPOSE (attrs))
8665
                  || is_attribute_p ("trap_exit", TREE_PURPOSE (attrs))
8666
                  || is_attribute_p ("nosave_low_regs", TREE_PURPOSE (attrs))
8667
                  || is_attribute_p ("resbank", TREE_PURPOSE (attrs)))
8668
                warning (OPT_Wattributes,
8669
                         "%qE attribute only applies to interrupt functions",
8670
                         TREE_PURPOSE (attrs));
8671
              else
8672
                {
8673
                  *tail = tree_cons (TREE_PURPOSE (attrs), NULL_TREE,
8674
                                     NULL_TREE);
8675
                  tail = &TREE_CHAIN (*tail);
8676
                }
8677
            }
8678
          attrs = *attributes;
8679
        }
8680
    }
8681
 
8682
  /* Install the processed list.  */
8683
  *attributes = attrs;
8684
 
8685
  /* Clear deferred attributes.  */
8686
  sh_deferred_function_attributes = NULL_TREE;
8687
  sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
8688
 
8689
  return;
8690
}
8691
 
8692
/* Supported attributes:
8693
 
8694
   interrupt_handler -- specifies this function is an interrupt handler.
8695
 
8696
   trapa_handler - like above, but don't save all registers.
8697
 
8698
   sp_switch -- specifies an alternate stack for an interrupt handler
8699
   to run on.
8700
 
8701
   trap_exit -- use a trapa to exit an interrupt function instead of
8702
   an rte instruction.
8703
 
8704
   nosave_low_regs - don't save r0..r7 in an interrupt handler.
8705
     This is useful on the SH3 and upwards,
8706
     which has a separate set of low regs for User and Supervisor modes.
8707
     This should only be used for the lowest level of interrupts.  Higher levels
8708
     of interrupts must save the registers in case they themselves are
8709
     interrupted.
8710
 
8711
   renesas -- use Renesas calling/layout conventions (functions and
8712
   structures).
8713
 
8714
   resbank -- In case of an ISR, use a register bank to save registers
8715
   R0-R14, MACH, MACL, GBR and PR.  This is useful only on SH2A targets.
8716
*/
8717
 
8718
/* Handle a 'resbank' attribute.  */
8719
static tree
8720
sh_handle_resbank_handler_attribute (tree * node, tree name,
8721
                                     tree args ATTRIBUTE_UNUSED,
8722
                                     int flags ATTRIBUTE_UNUSED,
8723
                                     bool * no_add_attrs)
8724
{
8725
  if (!TARGET_SH2A)
8726
    {
8727
      warning (OPT_Wattributes, "%qE attribute is supported only for SH2A",
8728
               name);
8729
      *no_add_attrs = true;
8730
    }
8731
  if (TREE_CODE (*node) != FUNCTION_DECL)
8732
    {
8733
      warning (OPT_Wattributes, "%qE attribute only applies to functions",
8734
               name);
8735
      *no_add_attrs = true;
8736
    }
8737
 
8738
  return NULL_TREE;
8739
}
8740
 
8741
/* Handle an "interrupt_handler" attribute; arguments as in
8742
   struct attribute_spec.handler.  */
8743
static tree
8744
sh_handle_interrupt_handler_attribute (tree *node, tree name,
8745
                                       tree args ATTRIBUTE_UNUSED,
8746
                                       int flags ATTRIBUTE_UNUSED,
8747
                                       bool *no_add_attrs)
8748
{
8749
  if (TREE_CODE (*node) != FUNCTION_DECL)
8750
    {
8751
      warning (OPT_Wattributes, "%qE attribute only applies to functions",
8752
               name);
8753
      *no_add_attrs = true;
8754
    }
8755
  else if (TARGET_SHCOMPACT)
8756
    {
8757
      error ("attribute interrupt_handler is not compatible with -m5-compact");
8758
      *no_add_attrs = true;
8759
    }
8760
 
8761
  return NULL_TREE;
8762
}
8763
 
8764
/* Handle an 'function_vector' attribute; arguments as in
8765
   struct attribute_spec.handler.  */
8766
static tree
8767
sh2a_handle_function_vector_handler_attribute (tree * node, tree name,
8768
                                               tree args ATTRIBUTE_UNUSED,
8769
                                               int flags ATTRIBUTE_UNUSED,
8770
                                               bool * no_add_attrs)
8771
{
8772
  if (!TARGET_SH2A)
8773
    {
8774
      warning (OPT_Wattributes, "%qE attribute only applies to SH2A",
8775
               name);
8776
      *no_add_attrs = true;
8777
    }
8778
  else if (TREE_CODE (*node) != FUNCTION_DECL)
8779
    {
8780
      warning (OPT_Wattributes, "%qE attribute only applies to functions",
8781
               name);
8782
      *no_add_attrs = true;
8783
    }
8784
  else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
8785
    {
8786
      /* The argument must be a constant integer.  */
8787
      warning (OPT_Wattributes,
8788
               "%qE attribute argument not an integer constant",
8789
               name);
8790
      *no_add_attrs = true;
8791
    }
8792
  else if (TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
8793
    {
8794
      /* The argument value must be between 0 to 255.  */
8795
      warning (OPT_Wattributes,
8796
               "%qE attribute argument should be between 0 to 255",
8797
               name);
8798
      *no_add_attrs = true;
8799
    }
8800
  return NULL_TREE;
8801
}
8802
 
8803
/* Returns 1 if current function has been assigned the attribute
8804
   'function_vector'.  */
8805
int
8806
sh2a_is_function_vector_call (rtx x)
8807
{
8808
  if (GET_CODE (x) == SYMBOL_REF
8809
      && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
8810
    {
8811
      tree tr = SYMBOL_REF_DECL (x);
8812
 
8813
      if (sh2a_function_vector_p (tr))
8814
        return 1;
8815
    }
8816
 
8817
  return 0;
8818
}
8819
 
8820
/* Returns the function vector number, if the the attribute
8821
   'function_vector' is assigned, otherwise returns zero.  */
8822
int
8823
sh2a_get_function_vector_number (rtx x)
8824
{
8825
  int num;
8826
  tree list, t;
8827
 
8828
  if ((GET_CODE (x) == SYMBOL_REF)
8829
      && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
8830
    {
8831
      t = SYMBOL_REF_DECL (x);
8832
 
8833
      if (TREE_CODE (t) != FUNCTION_DECL)
8834
        return 0;
8835
 
8836
      list = SH_ATTRIBUTES (t);
8837
      while (list)
8838
        {
8839
          if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
8840
            {
8841
              num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
8842
              return num;
8843
            }
8844
 
8845
          list = TREE_CHAIN (list);
8846
        }
8847
 
8848
      return 0;
8849
    }
8850
  else
8851
    return 0;
8852
}
8853
 
8854
/* Handle an "sp_switch" attribute; arguments as in
8855
   struct attribute_spec.handler.  */
8856
static tree
8857
sh_handle_sp_switch_attribute (tree *node, tree name, tree args,
8858
                               int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
8859
{
8860
  if (TREE_CODE (*node) != FUNCTION_DECL)
8861
    {
8862
      warning (OPT_Wattributes, "%qE attribute only applies to functions",
8863
               name);
8864
      *no_add_attrs = true;
8865
    }
8866
  else if (TREE_CODE (TREE_VALUE (args)) != STRING_CST)
8867
    {
8868
      /* The argument must be a constant string.  */
8869
      warning (OPT_Wattributes, "%qE attribute argument not a string constant",
8870
               name);
8871
      *no_add_attrs = true;
8872
    }
8873
 
8874
  return NULL_TREE;
8875
}
8876
 
8877
/* Handle an "trap_exit" attribute; arguments as in
8878
   struct attribute_spec.handler.  */
8879
static tree
8880
sh_handle_trap_exit_attribute (tree *node, tree name, tree args,
8881
                               int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
8882
{
8883
  if (TREE_CODE (*node) != FUNCTION_DECL)
8884
    {
8885
      warning (OPT_Wattributes, "%qE attribute only applies to functions",
8886
               name);
8887
      *no_add_attrs = true;
8888
    }
8889
  /* The argument specifies a trap number to be used in a trapa instruction
8890
     at function exit (instead of an rte instruction).  */
8891
  else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
8892
    {
8893
      /* The argument must be a constant integer.  */
8894
      warning (OPT_Wattributes, "%qE attribute argument not an "
8895
               "integer constant", name);
8896
      *no_add_attrs = true;
8897
    }
8898
 
8899
  return NULL_TREE;
8900
}
8901
 
8902
static tree
8903
sh_handle_renesas_attribute (tree *node ATTRIBUTE_UNUSED,
8904
                             tree name ATTRIBUTE_UNUSED,
8905
                             tree args ATTRIBUTE_UNUSED,
8906
                             int flags ATTRIBUTE_UNUSED,
8907
                             bool *no_add_attrs ATTRIBUTE_UNUSED)
8908
{
8909
  return NULL_TREE;
8910
}
8911
 
8912
/* True if __attribute__((renesas)) or -mrenesas.  */
8913
int
8914
sh_attr_renesas_p (const_tree td)
8915
{
8916
  if (TARGET_HITACHI)
8917
    return 1;
8918
  if (td == 0)
8919
    return 0;
8920
  if (DECL_P (td))
8921
    td = TREE_TYPE (td);
8922
  if (td == error_mark_node)
8923
    return 0;
8924
  return (lookup_attribute ("renesas", TYPE_ATTRIBUTES (td))
8925
          != NULL_TREE);
8926
}
8927
 
8928
/* True if __attribute__((renesas)) or -mrenesas, for the current
8929
   function.  */
8930
int
8931
sh_cfun_attr_renesas_p (void)
8932
{
8933
  return sh_attr_renesas_p (current_function_decl);
8934
}
8935
 
8936
int
8937
sh_cfun_interrupt_handler_p (void)
8938
{
8939
  return (lookup_attribute ("interrupt_handler",
8940
                            DECL_ATTRIBUTES (current_function_decl))
8941
          != NULL_TREE);
8942
}
8943
 
8944
/* Returns 1 if FUNC has been assigned the attribute
8945
   "function_vector".  */
8946
int
8947
sh2a_function_vector_p (tree func)
8948
{
8949
  tree list;
8950
  if (TREE_CODE (func) != FUNCTION_DECL)
8951
    return 0;
8952
 
8953
  list = SH_ATTRIBUTES (func);
8954
  while (list)
8955
    {
8956
      if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
8957
        return 1;
8958
 
8959
      list = TREE_CHAIN (list);
8960
    }
8961
  return 0;
8962
}
8963
 
8964
/* Returns TRUE if given tree has the "resbank" attribute.  */
8965
 
8966
int
8967
sh_cfun_resbank_handler_p (void)
8968
{
8969
  return ((lookup_attribute ("resbank",
8970
                             DECL_ATTRIBUTES (current_function_decl))
8971
           != NULL_TREE)
8972
          && (lookup_attribute ("interrupt_handler",
8973
                                DECL_ATTRIBUTES (current_function_decl))
8974
              != NULL_TREE) && TARGET_SH2A);
8975
}
8976
 
8977
/* Implement TARGET_CHECK_PCH_TARGET_FLAGS.  */
8978
 
8979
static const char *
8980
sh_check_pch_target_flags (int old_flags)
8981
{
8982
  if ((old_flags ^ target_flags) & (MASK_SH1 | MASK_SH2 | MASK_SH3
8983
                                    | MASK_SH_E | MASK_HARD_SH4
8984
                                    | MASK_FPU_SINGLE | MASK_SH4))
8985
    return _("created and used with different architectures / ABIs");
8986
  if ((old_flags ^ target_flags) & MASK_HITACHI)
8987
    return _("created and used with different ABIs");
8988
  if ((old_flags ^ target_flags) & MASK_LITTLE_ENDIAN)
8989
    return _("created and used with different endianness");
8990
  return NULL;
8991
}
8992
 
8993
/* Predicates used by the templates.  */
8994
 
8995
/* Returns 1 if OP is MACL, MACH or PR.  The input must be a REG rtx.
8996
   Used only in general_movsrc_operand.  */
8997
 
8998
int
8999
system_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
9000
{
9001
  switch (REGNO (op))
9002
    {
9003
    case PR_REG:
9004
    case MACL_REG:
9005
    case MACH_REG:
9006
      return 1;
9007
    }
9008
  return 0;
9009
}
9010
 
9011
/* Nonzero if OP is a floating point value with value 0.0.  */
9012
 
9013
int
9014
fp_zero_operand (rtx op)
9015
{
9016
  REAL_VALUE_TYPE r;
9017
 
9018
  if (GET_MODE (op) != SFmode)
9019
    return 0;
9020
 
9021
  REAL_VALUE_FROM_CONST_DOUBLE (r, op);
9022
  return REAL_VALUES_EQUAL (r, dconst0) && ! REAL_VALUE_MINUS_ZERO (r);
9023
}
9024
 
9025
/* Nonzero if OP is a floating point value with value 1.0.  */
9026
 
9027
int
9028
fp_one_operand (rtx op)
9029
{
9030
  REAL_VALUE_TYPE r;
9031
 
9032
  if (GET_MODE (op) != SFmode)
9033
    return 0;
9034
 
9035
  REAL_VALUE_FROM_CONST_DOUBLE (r, op);
9036
  return REAL_VALUES_EQUAL (r, dconst1);
9037
}
9038
 
9039
/* In general mode switching is used.  If we are
9040
   compiling without -mfmovd, movsf_ie isn't taken into account for
9041
   mode switching.  We could check in machine_dependent_reorg for
9042
   cases where we know we are in single precision mode, but there is
9043
   interface to find that out during reload, so we must avoid
9044
   choosing an fldi alternative during reload and thus failing to
9045
   allocate a scratch register for the constant loading.  */
9046
int
9047
fldi_ok (void)
9048
{
9049
  return 1;
9050
}
9051
 
9052
int
9053
tertiary_reload_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
9054
{
9055
  enum rtx_code code = GET_CODE (op);
9056
  return code == MEM || (TARGET_SH4 && code == CONST_DOUBLE);
9057
}
9058
 
9059
/* Return the TLS type for TLS symbols, 0 for otherwise.  */
9060
enum tls_model
9061
tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
9062
{
9063
  if (GET_CODE (op) != SYMBOL_REF)
9064
    return TLS_MODEL_NONE;
9065
  return SYMBOL_REF_TLS_MODEL (op);
9066
}
9067
 
9068
/* Return the destination address of a branch.  */
9069
 
9070
static int
9071
branch_dest (rtx branch)
9072
{
9073
  rtx dest = SET_SRC (PATTERN (branch));
9074
  int dest_uid;
9075
 
9076
  if (GET_CODE (dest) == IF_THEN_ELSE)
9077
    dest = XEXP (dest, 1);
9078
  dest = XEXP (dest, 0);
9079
  dest_uid = INSN_UID (dest);
9080
  return INSN_ADDRESSES (dest_uid);
9081
}
9082
 
9083
/* Return nonzero if REG is not used after INSN.
9084
   We assume REG is a reload reg, and therefore does
9085
   not live past labels.  It may live past calls or jumps though.  */
9086
int
9087
reg_unused_after (rtx reg, rtx insn)
9088
{
9089
  enum rtx_code code;
9090
  rtx set;
9091
 
9092
  /* If the reg is set by this instruction, then it is safe for our
9093
     case.  Disregard the case where this is a store to memory, since
9094
     we are checking a register used in the store address.  */
9095
  set = single_set (insn);
9096
  if (set && !MEM_P (SET_DEST (set))
9097
      && reg_overlap_mentioned_p (reg, SET_DEST (set)))
9098
    return 1;
9099
 
9100
  while ((insn = NEXT_INSN (insn)))
9101
    {
9102
      rtx set;
9103
      if (!INSN_P (insn))
9104
        continue;
9105
 
9106
      code = GET_CODE (insn);
9107
 
9108
#if 0
9109
      /* If this is a label that existed before reload, then the register
9110
         if dead here.  However, if this is a label added by reorg, then
9111
         the register may still be live here.  We can't tell the difference,
9112
         so we just ignore labels completely.  */
9113
      if (code == CODE_LABEL)
9114
        return 1;
9115
      /* else */
9116
#endif
9117
 
9118
      if (code == JUMP_INSN)
9119
        return 0;
9120
 
9121
      /* If this is a sequence, we must handle them all at once.
9122
         We could have for instance a call that sets the target register,
9123
         and an insn in a delay slot that uses the register.  In this case,
9124
         we must return 0.  */
9125
      else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
9126
        {
9127
          int i;
9128
          int retval = 0;
9129
 
9130
          for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
9131
            {
9132
              rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
9133
              rtx set = single_set (this_insn);
9134
 
9135
              if (CALL_P (this_insn))
9136
                code = CALL_INSN;
9137
              else if (JUMP_P (this_insn))
9138
                {
9139
                  if (INSN_ANNULLED_BRANCH_P (this_insn))
9140
                    return 0;
9141
                  code = JUMP_INSN;
9142
                }
9143
 
9144
              if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
9145
                return 0;
9146
              if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
9147
                {
9148
                  if (!MEM_P (SET_DEST (set)))
9149
                    retval = 1;
9150
                  else
9151
                    return 0;
9152
                }
9153
              if (set == 0
9154
                  && reg_overlap_mentioned_p (reg, PATTERN (this_insn)))
9155
                return 0;
9156
            }
9157
          if (retval == 1)
9158
            return 1;
9159
          else if (code == JUMP_INSN)
9160
            return 0;
9161
        }
9162
 
9163
      set = single_set (insn);
9164
      if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
9165
        return 0;
9166
      if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
9167
        return !MEM_P (SET_DEST (set));
9168
      if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
9169
        return 0;
9170
 
9171
      if (code == CALL_INSN && call_really_used_regs[REGNO (reg)])
9172
        return 1;
9173
    }
9174
  return 1;
9175
}
9176
 
9177
#include "ggc.h"
9178
 
9179
static GTY(()) rtx fpscr_rtx;
9180
rtx
9181
get_fpscr_rtx (void)
9182
{
9183
  if (! fpscr_rtx)
9184
    {
9185
      fpscr_rtx = gen_rtx_REG (PSImode, FPSCR_REG);
9186
      REG_USERVAR_P (fpscr_rtx) = 1;
9187
      mark_user_reg (fpscr_rtx);
9188
    }
9189
  if (! reload_completed || mdep_reorg_phase != SH_AFTER_MDEP_REORG)
9190
    mark_user_reg (fpscr_rtx);
9191
  return fpscr_rtx;
9192
}
9193
 
9194
static GTY(()) tree fpscr_values;
9195
 
9196
static void
9197
emit_fpu_switch (rtx scratch, int index)
9198
{
9199
  rtx dst, src;
9200
 
9201
  if (fpscr_values == NULL)
9202
    {
9203
      tree t;
9204
 
9205
      t = build_index_type (integer_one_node);
9206
      t = build_array_type (integer_type_node, t);
9207
      t = build_decl (BUILTINS_LOCATION,
9208
                      VAR_DECL, get_identifier ("__fpscr_values"), t);
9209
      DECL_ARTIFICIAL (t) = 1;
9210
      DECL_IGNORED_P (t) = 1;
9211
      DECL_EXTERNAL (t) = 1;
9212
      TREE_STATIC (t) = 1;
9213
      TREE_PUBLIC (t) = 1;
9214
      TREE_USED (t) = 1;
9215
 
9216
      fpscr_values = t;
9217
    }
9218
 
9219
  src = DECL_RTL (fpscr_values);
9220
  if (!can_create_pseudo_p ())
9221
    {
9222
      emit_move_insn (scratch, XEXP (src, 0));
9223
      if (index != 0)
9224
        emit_insn (gen_addsi3 (scratch, scratch, GEN_INT (index * 4)));
9225
      src = adjust_automodify_address (src, PSImode, scratch, index * 4);
9226
    }
9227
  else
9228
    src = adjust_address (src, PSImode, index * 4);
9229
 
9230
  dst = get_fpscr_rtx ();
9231
  emit_move_insn (dst, src);
9232
}
9233
 
9234
void
9235
emit_sf_insn (rtx pat)
9236
{
9237
  emit_insn (pat);
9238
}
9239
 
9240
void
9241
emit_df_insn (rtx pat)
9242
{
9243
  emit_insn (pat);
9244
}
9245
 
9246
void
9247
expand_sf_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
9248
{
9249
  emit_sf_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
9250
}
9251
 
9252
void
9253
expand_sf_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
9254
{
9255
  emit_sf_insn ((*fun) (operands[0], operands[1], operands[2],
9256
                         get_fpscr_rtx ()));
9257
}
9258
 
9259
void
9260
expand_df_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
9261
{
9262
  emit_df_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
9263
}
9264
 
9265
void
9266
expand_df_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
9267
{
9268
  emit_df_insn ((*fun) (operands[0], operands[1], operands[2],
9269
                        get_fpscr_rtx ()));
9270
}
9271
 
9272
static rtx get_free_reg (HARD_REG_SET);
9273
 
9274
/* This function returns a register to use to load the address to load
9275
   the fpscr from.  Currently it always returns r1 or r7, but when we are
9276
   able to use pseudo registers after combine, or have a better mechanism
9277
   for choosing a register, it should be done here.  */
9278
/* REGS_LIVE is the liveness information for the point for which we
9279
   need this allocation.  In some bare-bones exit blocks, r1 is live at the
9280
   start.  We can even have all of r0..r3 being live:
9281
__complex__ long long f (double d) { if (d == 0) return 2; else return 3; }
9282
   INSN before which new insns are placed with will clobber the register
9283
   we return.  If a basic block consists only of setting the return value
9284
   register to a pseudo and using that register, the return value is not
9285
   live before or after this block, yet we we'll insert our insns right in
9286
   the middle.  */
9287
 
9288
static rtx
9289
get_free_reg (HARD_REG_SET regs_live)
9290
{
9291
  if (! TEST_HARD_REG_BIT (regs_live, 1))
9292
    return gen_rtx_REG (Pmode, 1);
9293
 
9294
  /* Hard reg 1 is live; since this is a SMALL_REGISTER_CLASSES target,
9295
     there shouldn't be anything but a jump before the function end.  */
9296
  gcc_assert (!TEST_HARD_REG_BIT (regs_live, 7));
9297
  return gen_rtx_REG (Pmode, 7);
9298
}
9299
 
9300
/* This function will set the fpscr from memory.
9301
   MODE is the mode we are setting it to.  */
9302
void
9303
fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
9304
{
9305
  enum attr_fp_mode fp_mode = (enum attr_fp_mode) mode;
9306
  enum attr_fp_mode norm_mode = ACTUAL_NORMAL_MODE (FP_MODE);
9307
  rtx addr_reg;
9308
 
9309
  addr_reg = !can_create_pseudo_p () ? get_free_reg (regs_live) : NULL_RTX;
9310
  emit_fpu_switch (addr_reg, fp_mode == norm_mode);
9311
}
9312
 
9313
/* Is the given character a logical line separator for the assembler?  */
9314
#ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
9315
#define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) ((C) == ';')
9316
#endif
9317
 
9318
int
9319
sh_insn_length_adjustment (rtx insn)
9320
{
9321
  /* Instructions with unfilled delay slots take up an extra two bytes for
9322
     the nop in the delay slot.  */
9323
  if (((NONJUMP_INSN_P (insn)
9324
        && GET_CODE (PATTERN (insn)) != USE
9325
        && GET_CODE (PATTERN (insn)) != CLOBBER)
9326
       || CALL_P (insn)
9327
       || (JUMP_P (insn) && !JUMP_TABLE_DATA_P (insn)))
9328
      && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE
9329
      && get_attr_needs_delay_slot (insn) == NEEDS_DELAY_SLOT_YES)
9330
    return 2;
9331
 
9332
  /* SH2e has a bug that prevents the use of annulled branches, so if
9333
     the delay slot is not filled, we'll have to put a NOP in it.  */
9334
  if (sh_cpu_attr == CPU_SH2E
9335
      && JUMP_P (insn) && !JUMP_TABLE_DATA_P (insn)
9336
      && get_attr_type (insn) == TYPE_CBRANCH
9337
      && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE)
9338
    return 2;
9339
 
9340
  /* sh-dsp parallel processing insn take four bytes instead of two.  */
9341
 
9342
  if (NONJUMP_INSN_P (insn))
9343
    {
9344
      int sum = 0;
9345
      rtx body = PATTERN (insn);
9346
      const char *templ;
9347
      char c;
9348
      int maybe_label = 1;
9349
 
9350
      if (GET_CODE (body) == ASM_INPUT)
9351
        templ = XSTR (body, 0);
9352
      else if (asm_noperands (body) >= 0)
9353
        templ
9354
          = decode_asm_operands (body, NULL, NULL, NULL, NULL, NULL);
9355
      else
9356
        return 0;
9357
      do
9358
        {
9359
          int ppi_adjust = 0;
9360
 
9361
          do
9362
            c = *templ++;
9363
          while (c == ' ' || c == '\t');
9364
          /* all sh-dsp parallel-processing insns start with p.
9365
             The only non-ppi sh insn starting with p is pref.
9366
             The only ppi starting with pr is prnd.  */
9367
          if ((c == 'p' || c == 'P') && strncasecmp ("re", templ, 2))
9368
            ppi_adjust = 2;
9369
          /* The repeat pseudo-insn expands two three insns, a total of
9370
             six bytes in size.  */
9371
          else if ((c == 'r' || c == 'R')
9372
                   && ! strncasecmp ("epeat", templ, 5))
9373
            ppi_adjust = 4;
9374
          while (c && c != '\n'
9375
                 && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c, templ))
9376
            {
9377
              /* If this is a label, it is obviously not a ppi insn.  */
9378
              if (c == ':' && maybe_label)
9379
                {
9380
                  ppi_adjust = 0;
9381
                  break;
9382
                }
9383
              else if (c == '\'' || c == '"')
9384
                maybe_label = 0;
9385
              c = *templ++;
9386
            }
9387
          sum += ppi_adjust;
9388
          maybe_label = c != ':';
9389
        }
9390
      while (c);
9391
      return sum;
9392
    }
9393
  return 0;
9394
}
9395
 
9396
/* Return TRUE for a valid displacement for the REG+disp addressing
9397
   with MODE.  */
9398
 
9399
/* ??? The SH2e does not have the REG+disp addressing mode when loading values
9400
   into the FRx registers.  We implement this by setting the maximum offset
9401
   to zero when the value is SFmode.  This also restricts loading of SFmode
9402
   values into the integer registers, but that can't be helped.  */
9403
 
9404
/* The SH allows a displacement in a QI or HI amode, but only when the
9405
   other operand is R0. GCC doesn't handle this very well, so we forgot
9406
   all of that.
9407
 
9408
   A legitimate index for a QI or HI is 0, SI can be any number 0..63,
9409
   DI can be any number 0..60.  */
9410
 
9411
bool
9412
sh_legitimate_index_p (enum machine_mode mode, rtx op)
9413
{
9414
  if (CONST_INT_P (op))
9415
    {
9416
      if (TARGET_SHMEDIA)
9417
        {
9418
          int size;
9419
 
9420
          /* Check if this the address of an unaligned load / store.  */
9421
          if (mode == VOIDmode)
9422
            return CONST_OK_FOR_I06 (INTVAL (op));
9423
 
9424
          size = GET_MODE_SIZE (mode);
9425
          return (!(INTVAL (op) & (size - 1))
9426
                  && INTVAL (op) >= -512 * size
9427
                  && INTVAL (op) < 512 * size);
9428
        }
9429
 
9430
      if (TARGET_SH2A)
9431
        {
9432
          if (GET_MODE_SIZE (mode) == 1
9433
                && (unsigned) INTVAL (op) < 4096)
9434
            return true;
9435
        }
9436
 
9437
      if ((GET_MODE_SIZE (mode) == 4
9438
           && (unsigned) INTVAL (op) < 64
9439
           && !(INTVAL (op) & 3)
9440
           && !(TARGET_SH2E && mode == SFmode))
9441
          || (GET_MODE_SIZE (mode) == 4
9442
              && (unsigned) INTVAL (op) < 16383
9443
              && !(INTVAL (op) & 3) && TARGET_SH2A))
9444
        return true;
9445
 
9446
      if ((GET_MODE_SIZE (mode) == 8
9447
           && (unsigned) INTVAL (op) < 60
9448
           && !(INTVAL (op) & 3)
9449
           && !((TARGET_SH4 || TARGET_SH2A) && mode == DFmode))
9450
          || ((GET_MODE_SIZE (mode)==8)
9451
              && (unsigned) INTVAL (op) < 8192
9452
              && !(INTVAL (op) & (TARGET_SH2A_DOUBLE ? 7 : 3))
9453
              && (TARGET_SH2A && mode == DFmode)))
9454
        return true;
9455
    }
9456
 
9457
  return false;
9458
}
9459
 
9460
/* Recognize an RTL expression that is a valid memory address for
9461
   an instruction.
9462
   The MODE argument is the machine mode for the MEM expression
9463
   that wants to use this address.
9464
   Allow  REG
9465
          REG+disp
9466
          REG+r0
9467
          REG++
9468
          --REG  */
9469
 
9470
static bool
9471
sh_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
9472
{
9473
  if (MAYBE_BASE_REGISTER_RTX_P (x, strict))
9474
    return true;
9475
  else if ((GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_DEC)
9476
           && ! TARGET_SHMEDIA
9477
           && MAYBE_BASE_REGISTER_RTX_P (XEXP (x, 0), strict))
9478
    return true;
9479
  else if (GET_CODE (x) == PLUS
9480
           && (mode != PSImode || reload_completed))
9481
    {
9482
      rtx xop0 = XEXP (x, 0);
9483
      rtx xop1 = XEXP (x, 1);
9484
 
9485
      if (GET_MODE_SIZE (mode) <= 8
9486
          && MAYBE_BASE_REGISTER_RTX_P (xop0, strict)
9487
          && sh_legitimate_index_p (mode, xop1))
9488
        return true;
9489
 
9490
      if ((ALLOW_INDEXED_ADDRESS || GET_MODE (x) == DImode
9491
           || ((xop0 == stack_pointer_rtx
9492
                || xop0 == hard_frame_pointer_rtx)
9493
               && REG_P (xop1) && REGNO (xop1) == R0_REG)
9494
           || ((xop1 == stack_pointer_rtx
9495
                || xop1 == hard_frame_pointer_rtx)
9496
               && REG_P (xop0) && REGNO (xop0) == R0_REG))
9497
          && ((!TARGET_SHMEDIA && GET_MODE_SIZE (mode) <= 4)
9498
              || (TARGET_SHMEDIA && GET_MODE_SIZE (mode) <= 8)
9499
              || ((TARGET_SH4 || TARGET_SH2A_DOUBLE)
9500
                  && TARGET_FMOVD && mode == DFmode)))
9501
        {
9502
          if (MAYBE_BASE_REGISTER_RTX_P (xop1, strict)
9503
              && MAYBE_INDEX_REGISTER_RTX_P (xop0, strict))
9504
            return true;
9505
          if (MAYBE_INDEX_REGISTER_RTX_P (xop1, strict)
9506
              && MAYBE_BASE_REGISTER_RTX_P (xop0, strict))
9507
            return true;
9508
        }
9509
    }
9510
 
9511
  return false;
9512
}
9513
 
9514
/* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
9515
   isn't protected by a PIC unspec.  */
9516
int
9517
nonpic_symbol_mentioned_p (rtx x)
9518
{
9519
  register const char *fmt;
9520
  register int i;
9521
 
9522
  if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF
9523
      || GET_CODE (x) == PC)
9524
    return 1;
9525
 
9526
  /* We don't want to look into the possible MEM location of a
9527
     CONST_DOUBLE, since we're not going to use it, in general.  */
9528
  if (GET_CODE (x) == CONST_DOUBLE)
9529
    return 0;
9530
 
9531
  if (GET_CODE (x) == UNSPEC
9532
      && (XINT (x, 1) == UNSPEC_PIC
9533
          || XINT (x, 1) == UNSPEC_GOT
9534
          || XINT (x, 1) == UNSPEC_GOTOFF
9535
          || XINT (x, 1) == UNSPEC_GOTPLT
9536
          || XINT (x, 1) == UNSPEC_GOTTPOFF
9537
          || XINT (x, 1) == UNSPEC_DTPOFF
9538
          || XINT (x, 1) == UNSPEC_TPOFF
9539
          || XINT (x, 1) == UNSPEC_PLT
9540
          || XINT (x, 1) == UNSPEC_SYMOFF
9541
          || XINT (x, 1) == UNSPEC_PCREL_SYMOFF))
9542
    return 0;
9543
 
9544
  fmt = GET_RTX_FORMAT (GET_CODE (x));
9545
  for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9546
    {
9547
      if (fmt[i] == 'E')
9548
        {
9549
          register int j;
9550
 
9551
          for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9552
            if (nonpic_symbol_mentioned_p (XVECEXP (x, i, j)))
9553
              return 1;
9554
        }
9555
      else if (fmt[i] == 'e' && nonpic_symbol_mentioned_p (XEXP (x, i)))
9556
        return 1;
9557
    }
9558
 
9559
  return 0;
9560
}
9561
 
9562
/* Convert a non-PIC address in `orig' to a PIC address using @GOT or
9563
   @GOTOFF in `reg'.  */
9564
rtx
9565
legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
9566
                        rtx reg)
9567
{
9568
  if (tls_symbolic_operand (orig, Pmode) != TLS_MODEL_NONE)
9569
    return orig;
9570
 
9571
  if (GET_CODE (orig) == LABEL_REF
9572
      || (GET_CODE (orig) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (orig)))
9573
    {
9574
      if (reg == 0)
9575
        reg = gen_reg_rtx (Pmode);
9576
 
9577
      emit_insn (gen_symGOTOFF2reg (reg, orig));
9578
      return reg;
9579
    }
9580
  else if (GET_CODE (orig) == SYMBOL_REF)
9581
    {
9582
      if (reg == 0)
9583
        reg = gen_reg_rtx (Pmode);
9584
 
9585
      emit_insn (gen_symGOT2reg (reg, orig));
9586
      return reg;
9587
    }
9588
  return orig;
9589
}
9590
 
9591
/* Try machine-dependent ways of modifying an illegitimate address
9592
   to be legitimate.  If we find one, return the new, valid address.
9593
   Otherwise, return X.
9594
 
9595
   For the SH, if X is almost suitable for indexing, but the offset is
9596
   out of range, convert it into a normal form so that CSE has a chance
9597
   of reducing the number of address registers used.  */
9598
 
9599
static rtx
9600
sh_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
9601
{
9602
  if (flag_pic)
9603
    x = legitimize_pic_address (oldx, mode, NULL_RTX);
9604
 
9605
  if (GET_CODE (x) == PLUS
9606
      && (GET_MODE_SIZE (mode) == 4
9607
          || GET_MODE_SIZE (mode) == 8)
9608
      && CONST_INT_P (XEXP (x, 1))
9609
      && BASE_REGISTER_RTX_P (XEXP (x, 0))
9610
      && ! TARGET_SHMEDIA
9611
      && ! ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && mode == DFmode)
9612
      && ! (TARGET_SH2E && mode == SFmode))
9613
    {
9614
      rtx index_rtx = XEXP (x, 1);
9615
      HOST_WIDE_INT offset = INTVAL (index_rtx), offset_base;
9616
      rtx sum;
9617
 
9618
      /* On rare occasions, we might get an unaligned pointer
9619
         that is indexed in a way to give an aligned address.
9620
         Therefore, keep the lower two bits in offset_base.  */
9621
      /* Instead of offset_base 128..131 use 124..127, so that
9622
         simple add suffices.  */
9623
      if (offset > 127)
9624
        offset_base = ((offset + 4) & ~60) - 4;
9625
      else
9626
        offset_base = offset & ~60;
9627
 
9628
      /* Sometimes the normal form does not suit DImode.  We
9629
         could avoid that by using smaller ranges, but that
9630
         would give less optimized code when SImode is
9631
         prevalent.  */
9632
      if (GET_MODE_SIZE (mode) + offset - offset_base <= 64)
9633
        {
9634
          sum = expand_binop (Pmode, add_optab, XEXP (x, 0),
9635
                              GEN_INT (offset_base), NULL_RTX, 0,
9636
                              OPTAB_LIB_WIDEN);
9637
 
9638
          return gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - offset_base));
9639
        }
9640
    }
9641
 
9642
  return x;
9643
}
9644
 
9645
/* Mark the use of a constant in the literal table. If the constant
9646
   has multiple labels, make it unique.  */
9647
static rtx
9648
mark_constant_pool_use (rtx x)
9649
{
9650
  rtx insn, lab, pattern;
9651
 
9652
  if (x == NULL)
9653
    return x;
9654
 
9655
  switch (GET_CODE (x))
9656
    {
9657
    case LABEL_REF:
9658
      x = XEXP (x, 0);
9659
    case CODE_LABEL:
9660
      break;
9661
    default:
9662
      return x;
9663
    }
9664
 
9665
  /* Get the first label in the list of labels for the same constant
9666
     and delete another labels in the list.  */
9667
  lab = x;
9668
  for (insn = PREV_INSN (x); insn; insn = PREV_INSN (insn))
9669
    {
9670
      if (!LABEL_P (insn)
9671
          || LABEL_REFS (insn) != NEXT_INSN (insn))
9672
        break;
9673
      lab = insn;
9674
    }
9675
 
9676
  for (insn = LABEL_REFS (lab); insn; insn = LABEL_REFS (insn))
9677
    INSN_DELETED_P (insn) = 1;
9678
 
9679
  /* Mark constants in a window.  */
9680
  for (insn = NEXT_INSN (x); insn; insn = NEXT_INSN (insn))
9681
    {
9682
      if (!NONJUMP_INSN_P (insn))
9683
        continue;
9684
 
9685
      pattern = PATTERN (insn);
9686
      if (GET_CODE (pattern) != UNSPEC_VOLATILE)
9687
        continue;
9688
 
9689
      switch (XINT (pattern, 1))
9690
        {
9691
        case UNSPECV_CONST2:
9692
        case UNSPECV_CONST4:
9693
        case UNSPECV_CONST8:
9694
          XVECEXP (pattern, 0, 1) = const1_rtx;
9695
          break;
9696
        case UNSPECV_WINDOW_END:
9697
          if (XVECEXP (pattern, 0, 0) == x)
9698
            return lab;
9699
          break;
9700
        case UNSPECV_CONST_END:
9701
          return lab;
9702
        default:
9703
          break;
9704
        }
9705
    }
9706
 
9707
  return lab;
9708
}
9709
 
9710
/* Return true if it's possible to redirect BRANCH1 to the destination
9711
   of an unconditional jump BRANCH2.  We only want to do this if the
9712
   resulting branch will have a short displacement.  */
9713
int
9714
sh_can_redirect_branch (rtx branch1, rtx branch2)
9715
{
9716
  if (flag_expensive_optimizations && simplejump_p (branch2))
9717
    {
9718
      rtx dest = XEXP (SET_SRC (single_set (branch2)), 0);
9719
      rtx insn;
9720
      int distance;
9721
 
9722
      for (distance = 0, insn = NEXT_INSN (branch1);
9723
           insn && distance < 256;
9724
           insn = PREV_INSN (insn))
9725
        {
9726
          if (insn == dest)
9727
            return 1;
9728
          else
9729
            distance += get_attr_length (insn);
9730
        }
9731
      for (distance = 0, insn = NEXT_INSN (branch1);
9732
           insn && distance < 256;
9733
           insn = NEXT_INSN (insn))
9734
        {
9735
          if (insn == dest)
9736
            return 1;
9737
          else
9738
            distance += get_attr_length (insn);
9739
        }
9740
    }
9741
  return 0;
9742
}
9743
 
9744
/* Return nonzero if register old_reg can be renamed to register new_reg.  */
9745
int
9746
sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
9747
                         unsigned int new_reg)
9748
{
9749
  /* Interrupt functions can only use registers that have already been
9750
     saved by the prologue, even if they would normally be
9751
     call-clobbered.  */
9752
 
9753
  if (sh_cfun_interrupt_handler_p () && !df_regs_ever_live_p (new_reg))
9754
    return 0;
9755
 
9756
  return 1;
9757
}
9758
 
9759
/* Function to update the integer COST
9760
   based on the relationship between INSN that is dependent on
9761
   DEP_INSN through the dependence LINK.  The default is to make no
9762
   adjustment to COST.  This can be used for example to specify to
9763
   the scheduler that an output- or anti-dependence does not incur
9764
   the same cost as a data-dependence.  The return value should be
9765
   the new value for COST.  */
9766
static int
9767
sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost)
9768
{
9769
  rtx reg, use_pat;
9770
 
9771
  if (TARGET_SHMEDIA)
9772
    {
9773
      /* On SHmedia, if the dependence is an anti-dependence or
9774
         output-dependence, there is no cost.  */
9775
      if (REG_NOTE_KIND (link) != 0)
9776
        {
9777
          /* However, dependencies between target register loads and
9778
             uses of the register in a subsequent block that are separated
9779
             by a conditional branch are not modelled - we have to do with
9780
             the anti-dependency between the target register load and the
9781
             conditional branch that ends the current block.  */
9782
          if (REG_NOTE_KIND (link) == REG_DEP_ANTI
9783
              && GET_CODE (PATTERN (dep_insn)) == SET
9784
              && (get_attr_type (dep_insn) == TYPE_PT_MEDIA
9785
                  || get_attr_type (dep_insn) == TYPE_PTABS_MEDIA)
9786
              && get_attr_type (insn) == TYPE_CBRANCH_MEDIA)
9787
            {
9788
              int orig_cost = cost;
9789
              rtx note = find_reg_note (insn, REG_BR_PROB, 0);
9790
              rtx target = ((! note
9791
                             || INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
9792
                            ? insn : JUMP_LABEL (insn));
9793
              /* On the likely path, the branch costs 1, on the unlikely path,
9794
                 it costs 3.  */
9795
              cost--;
9796
              do
9797
                target = next_active_insn (target);
9798
              while (target && ! flow_dependent_p (target, dep_insn)
9799
                     && --cost > 0);
9800
              /* If two branches are executed in immediate succession, with the
9801
                 first branch properly predicted, this causes a stall at the
9802
                 second branch, hence we won't need the target for the
9803
                 second branch for two cycles after the launch of the first
9804
                 branch.  */
9805
              if (cost > orig_cost - 2)
9806
                cost = orig_cost - 2;
9807
            }
9808
          else
9809
            cost = 0;
9810
        }
9811
 
9812
      else if (get_attr_is_mac_media (insn)
9813
               && get_attr_is_mac_media (dep_insn))
9814
        cost = 1;
9815
 
9816
      else if (! reload_completed
9817
               && GET_CODE (PATTERN (insn)) == SET
9818
               && GET_CODE (SET_SRC (PATTERN (insn))) == FLOAT
9819
               && GET_CODE (PATTERN (dep_insn)) == SET
9820
               && fp_arith_reg_operand (SET_SRC (PATTERN (dep_insn)), VOIDmode)
9821
               && cost < 4)
9822
        cost = 4;
9823
      /* Schedule the ptabs for a casesi_jump_media in preference to stuff
9824
         that is needed at the target.  */
9825
      else if (get_attr_type (insn) == TYPE_JUMP_MEDIA
9826
               && ! flow_dependent_p (insn, dep_insn))
9827
        cost--;
9828
    }
9829
  else if (REG_NOTE_KIND (link) == 0)
9830
    {
9831
      enum attr_type type;
9832
      rtx dep_set;
9833
 
9834
      if (recog_memoized (insn) < 0
9835
          || recog_memoized (dep_insn) < 0)
9836
        return cost;
9837
 
9838
      dep_set = single_set (dep_insn);
9839
 
9840
      /* The latency that we specify in the scheduling description refers
9841
         to the actual output, not to an auto-increment register; for that,
9842
         the latency is one.  */
9843
      if (dep_set && MEM_P (SET_SRC (dep_set)) && cost > 1)
9844
        {
9845
          rtx set = single_set (insn);
9846
 
9847
          if (set
9848
              && !reg_mentioned_p (SET_DEST (dep_set), SET_SRC (set))
9849
              && (!MEM_P (SET_DEST (set))
9850
                  || !reg_mentioned_p (SET_DEST (dep_set),
9851
                                       XEXP (SET_DEST (set), 0))))
9852
            cost = 1;
9853
        }
9854
      /* The only input for a call that is timing-critical is the
9855
         function's address.  */
9856
      if (CALL_P (insn))
9857
        {
9858
          rtx call = PATTERN (insn);
9859
 
9860
          if (GET_CODE (call) == PARALLEL)
9861
            call = XVECEXP (call, 0 ,0);
9862
          if (GET_CODE (call) == SET)
9863
            call = SET_SRC (call);
9864
          if (GET_CODE (call) == CALL && MEM_P (XEXP (call, 0))
9865
                  /* sibcalli_thunk uses a symbol_ref in an unspec.  */
9866
              && (GET_CODE (XEXP (XEXP (call, 0), 0)) == UNSPEC
9867
                  || ! reg_set_p (XEXP (XEXP (call, 0), 0), dep_insn)))
9868
            cost -= TARGET_SH4_300 ? 3 : 6;
9869
        }
9870
      /* Likewise, the most timing critical input for an sfuncs call
9871
         is the function address.  However, sfuncs typically start
9872
         using their arguments pretty quickly.
9873
         Assume a four cycle delay for SH4 before they are needed.
9874
         Cached ST40-300 calls are quicker, so assume only a one
9875
         cycle delay there.
9876
         ??? Maybe we should encode the delays till input registers
9877
         are needed by sfuncs into the sfunc call insn.  */
9878
      /* All sfunc calls are parallels with at least four components.
9879
         Exploit this to avoid unnecessary calls to sfunc_uses_reg.  */
9880
      else if (GET_CODE (PATTERN (insn)) == PARALLEL
9881
               && XVECLEN (PATTERN (insn), 0) >= 4
9882
               && (reg = sfunc_uses_reg (insn)))
9883
        {
9884
          if (! reg_set_p (reg, dep_insn))
9885
            cost -= TARGET_SH4_300 ? 1 : 4;
9886
        }
9887
      if (TARGET_HARD_SH4 && !TARGET_SH4_300)
9888
        {
9889
          enum attr_type dep_type = get_attr_type (dep_insn);
9890
 
9891
          if (dep_type == TYPE_FLOAD || dep_type == TYPE_PCFLOAD)
9892
            cost--;
9893
          else if ((dep_type == TYPE_LOAD_SI || dep_type == TYPE_PCLOAD_SI)
9894
                   && (type = get_attr_type (insn)) != TYPE_CALL
9895
                   && type != TYPE_SFUNC)
9896
            cost--;
9897
          /* When the preceding instruction loads the shift amount of
9898
             the following SHAD/SHLD, the latency of the load is increased
9899
             by 1 cycle.  */
9900
          if (get_attr_type (insn) == TYPE_DYN_SHIFT
9901
              && get_attr_any_int_load (dep_insn) == ANY_INT_LOAD_YES
9902
              && reg_overlap_mentioned_p (SET_DEST (dep_set),
9903
                                          XEXP (SET_SRC (single_set (insn)),
9904
                                                1)))
9905
            cost++;
9906
          /* When an LS group instruction with a latency of less than
9907
             3 cycles is followed by a double-precision floating-point
9908
             instruction, FIPR, or FTRV, the latency of the first
9909
             instruction is increased to 3 cycles.  */
9910
          else if (cost < 3
9911
                   && get_attr_insn_class (dep_insn) == INSN_CLASS_LS_GROUP
9912
                   && get_attr_dfp_comp (insn) == DFP_COMP_YES)
9913
            cost = 3;
9914
          /* The lsw register of a double-precision computation is ready one
9915
             cycle earlier.  */
9916
          else if (reload_completed
9917
                   && get_attr_dfp_comp (dep_insn) == DFP_COMP_YES
9918
                   && (use_pat = single_set (insn))
9919
                   && ! regno_use_in (REGNO (SET_DEST (single_set (dep_insn))),
9920
                                      SET_SRC (use_pat)))
9921
            cost -= 1;
9922
 
9923
          if (get_attr_any_fp_comp (dep_insn) == ANY_FP_COMP_YES
9924
              && get_attr_late_fp_use (insn) == LATE_FP_USE_YES)
9925
            cost -= 1;
9926
        }
9927
      else if (TARGET_SH4_300)
9928
        {
9929
          /* Stores need their input register two cycles later.  */
9930
          if (dep_set && cost >= 1
9931
              && ((type = get_attr_type (insn)) == TYPE_STORE
9932
                  || type == TYPE_PSTORE
9933
                  || type == TYPE_FSTORE || type == TYPE_MAC_MEM))
9934
            {
9935
              rtx set = single_set (insn);
9936
 
9937
              if (!reg_mentioned_p (SET_SRC (set), XEXP (SET_DEST (set), 0))
9938
                  && rtx_equal_p (SET_SRC (set), SET_DEST (dep_set)))
9939
                {
9940
                  cost -= 2;
9941
                  /* But don't reduce the cost below 1 if the address depends
9942
                     on a side effect of dep_insn.  */
9943
                  if (cost < 1
9944
                      && modified_in_p (XEXP (SET_DEST (set), 0), dep_insn))
9945
                    cost = 1;
9946
                }
9947
            }
9948
        }
9949
    }
9950
  /* An anti-dependence penalty of two applies if the first insn is a double
9951
     precision fadd / fsub / fmul.  */
9952
  else if (!TARGET_SH4_300
9953
           && REG_NOTE_KIND (link) == REG_DEP_ANTI
9954
           && recog_memoized (dep_insn) >= 0
9955
           && (get_attr_type (dep_insn) == TYPE_DFP_ARITH
9956
               || get_attr_type (dep_insn) == TYPE_DFP_MUL)
9957
           /* A lot of alleged anti-flow dependences are fake,
9958
              so check this one is real.  */
9959
           && flow_dependent_p (dep_insn, insn))
9960
    cost = 2;
9961
 
9962
  return cost;
9963
}
9964
 
9965
/* Check if INSN is flow-dependent on DEP_INSN.  Can also be used to check
9966
   if DEP_INSN is anti-flow dependent on INSN.  */
9967
static int
9968
flow_dependent_p (rtx insn, rtx dep_insn)
9969
{
9970
  rtx tmp = PATTERN (insn);
9971
 
9972
  note_stores (PATTERN (dep_insn), flow_dependent_p_1, &tmp);
9973
  return tmp == NULL_RTX;
9974
}
9975
 
9976
/* A helper function for flow_dependent_p called through note_stores.  */
9977
static void
9978
flow_dependent_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
9979
{
9980
  rtx * pinsn = (rtx *) data;
9981
 
9982
  if (*pinsn && reg_referenced_p (x, *pinsn))
9983
    *pinsn = NULL_RTX;
9984
}
9985
 
9986
/* For use by sh_allocate_initial_value.  Note that sh.md contains some
9987
   'special function' patterns (type sfunc) that clobber pr, but that
9988
   do not look like function calls to leaf_function_p.  Hence we must
9989
   do this extra check.  */
9990
static int
9991
sh_pr_n_sets (void)
9992
{
9993
  return DF_REG_DEF_COUNT (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
9994
}
9995
 
9996
/* Return where to allocate pseudo for a given hard register initial
9997
   value.  */
9998
static rtx
9999
sh_allocate_initial_value (rtx hard_reg)
10000
{
10001
  rtx x;
10002
 
10003
  if (REGNO (hard_reg) == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG))
10004
    {
10005
      if (current_function_is_leaf
10006
          && ! sh_pr_n_sets ()
10007
          && ! (TARGET_SHCOMPACT
10008
                && ((crtl->args.info.call_cookie
10009
                     & ~ CALL_COOKIE_RET_TRAMP (1))
10010
                    || crtl->saves_all_registers)))
10011
        x = hard_reg;
10012
      else
10013
        x = gen_frame_mem (Pmode, return_address_pointer_rtx);
10014
    }
10015
  else
10016
    x = NULL_RTX;
10017
 
10018
  return x;
10019
}
10020
 
10021
/* This function returns "2" to indicate dual issue for the SH4
10022
   processor.  To be used by the DFA pipeline description.  */
10023
static int
10024
sh_issue_rate (void)
10025
{
10026
  if (TARGET_SUPERSCALAR)
10027
    return 2;
10028
  else
10029
    return 1;
10030
}
10031
 
10032
/* Functions for ready queue reordering for sched1.  */
10033
 
10034
/* Get weight for mode for a set x.  */
10035
static short
10036
find_set_regmode_weight (rtx x, enum machine_mode mode)
10037
{
10038
  if (GET_CODE (x) == CLOBBER && register_operand (SET_DEST (x), mode))
10039
    return 1;
10040
  if (GET_CODE (x) == SET && register_operand (SET_DEST (x), mode))
10041
    {
10042
      if (REG_P (SET_DEST (x)))
10043
        {
10044
          if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
10045
            return 1;
10046
          else
10047
            return 0;
10048
        }
10049
      return 1;
10050
    }
10051
  return 0;
10052
}
10053
 
10054
/* Get regmode weight for insn.  */
10055
static short
10056
find_insn_regmode_weight (rtx insn, enum machine_mode mode)
10057
{
10058
  short reg_weight = 0;
10059
  rtx x;
10060
 
10061
  /* Increment weight for each register born here.  */
10062
  x = PATTERN (insn);
10063
  reg_weight += find_set_regmode_weight (x, mode);
10064
  if (GET_CODE (x) == PARALLEL)
10065
    {
10066
      int j;
10067
      for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
10068
        {
10069
          x = XVECEXP (PATTERN (insn), 0, j);
10070
          reg_weight += find_set_regmode_weight (x, mode);
10071
        }
10072
    }
10073
  /* Decrement weight for each register that dies here.  */
10074
  for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
10075
    {
10076
      if (REG_NOTE_KIND (x) == REG_DEAD || REG_NOTE_KIND (x) == REG_UNUSED)
10077
        {
10078
          rtx note = XEXP (x, 0);
10079
          if (REG_P (note) && GET_MODE (note) == mode)
10080
            reg_weight--;
10081
        }
10082
    }
10083
  return reg_weight;
10084
}
10085
 
10086
/* Calculate regmode weights for all insns of a basic block.  */
10087
static void
10088
find_regmode_weight (basic_block b, enum machine_mode mode)
10089
{
10090
  rtx insn, next_tail, head, tail;
10091
 
10092
  get_ebb_head_tail (b, b, &head, &tail);
10093
  next_tail = NEXT_INSN (tail);
10094
 
10095
  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
10096
    {
10097
      /* Handle register life information.  */
10098
      if (!INSN_P (insn))
10099
        continue;
10100
 
10101
      if (mode == SFmode)
10102
        INSN_REGMODE_WEIGHT (insn, mode) =
10103
          find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DFmode);
10104
      else if (mode == SImode)
10105
        INSN_REGMODE_WEIGHT (insn, mode) =
10106
          find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DImode);
10107
    }
10108
}
10109
 
10110
/* Comparison function for ready queue sorting.  */
10111
static int
10112
rank_for_reorder (const void *x, const void *y)
10113
{
10114
  rtx tmp = *(const rtx *) y;
10115
  rtx tmp2 = *(const rtx *) x;
10116
 
10117
  /* The insn in a schedule group should be issued the first.  */
10118
  if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
10119
    return SCHED_GROUP_P (tmp2) ? 1 : -1;
10120
 
10121
  /* If insns are equally good, sort by INSN_LUID (original insn order), This
10122
     minimizes instruction movement, thus minimizing sched's effect on
10123
     register pressure.  */
10124
  return INSN_LUID (tmp) - INSN_LUID (tmp2);
10125
}
10126
 
10127
/* Resort the array A in which only element at index N may be out of order.  */
10128
static void
10129
swap_reorder (rtx *a, int n)
10130
{
10131
  rtx insn = a[n - 1];
10132
  int i = n - 2;
10133
 
10134
  while (i >= 0 && rank_for_reorder (a + i, &insn) >= 0)
10135
    {
10136
      a[i + 1] = a[i];
10137
      i -= 1;
10138
    }
10139
  a[i + 1] = insn;
10140
}
10141
 
10142
#define SCHED_REORDER(READY, N_READY)                                   \
10143
  do                                                                    \
10144
    {                                                                   \
10145
      if ((N_READY) == 2)                                               \
10146
        swap_reorder (READY, N_READY);                                  \
10147
      else if ((N_READY) > 2)                                           \
10148
        qsort (READY, N_READY, sizeof (rtx), rank_for_reorder);         \
10149
    }                                                                   \
10150
  while (0)
10151
 
10152
/* Sort the ready list READY by ascending priority, using the SCHED_REORDER
10153
   macro.  */
10154
static void
10155
ready_reorder (rtx *ready, int nready)
10156
{
10157
  SCHED_REORDER (ready, nready);
10158
}
10159
 
10160
/* Count life regions of r0 for a block.  */
10161
static int
10162
find_r0_life_regions (basic_block b)
10163
{
10164
  rtx end, insn;
10165
  rtx pset;
10166
  rtx r0_reg;
10167
  int live;
10168
  int set;
10169
  int death = 0;
10170
 
10171
  if (REGNO_REG_SET_P (df_get_live_in (b), R0_REG))
10172
    {
10173
      set = 1;
10174
      live = 1;
10175
    }
10176
  else
10177
    {
10178
      set = 0;
10179
      live = 0;
10180
    }
10181
 
10182
  insn = BB_HEAD (b);
10183
  end = BB_END (b);
10184
  r0_reg = gen_rtx_REG (SImode, R0_REG);
10185
  while (1)
10186
    {
10187
      if (INSN_P (insn))
10188
        {
10189
          if (find_regno_note (insn, REG_DEAD, R0_REG))
10190
            {
10191
              death++;
10192
              live = 0;
10193
            }
10194
          if (!live
10195
              && (pset = single_set (insn))
10196
              && reg_overlap_mentioned_p (r0_reg, SET_DEST (pset))
10197
              && !find_regno_note (insn, REG_UNUSED, R0_REG))
10198
            {
10199
              set++;
10200
              live = 1;
10201
            }
10202
        }
10203
      if (insn == end)
10204
        break;
10205
      insn = NEXT_INSN (insn);
10206
    }
10207
  return set - death;
10208
}
10209
 
10210
/* Calculate regmode weights for all insns of all basic block.  */
10211
static void
10212
sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED,
10213
                   int verbose ATTRIBUTE_UNUSED,
10214
                   int old_max_uid)
10215
{
10216
  basic_block b;
10217
 
10218
  regmode_weight[0] = (short *) xcalloc (old_max_uid, sizeof (short));
10219
  regmode_weight[1] = (short *) xcalloc (old_max_uid, sizeof (short));
10220
  r0_life_regions = 0;
10221
 
10222
  FOR_EACH_BB_REVERSE (b)
10223
  {
10224
    find_regmode_weight (b, SImode);
10225
    find_regmode_weight (b, SFmode);
10226
    if (!reload_completed)
10227
      r0_life_regions += find_r0_life_regions (b);
10228
  }
10229
 
10230
  CURR_REGMODE_PRESSURE (SImode) = 0;
10231
  CURR_REGMODE_PRESSURE (SFmode) = 0;
10232
 
10233
}
10234
 
10235
/* Cleanup.  */
10236
static void
10237
sh_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
10238
                     int verbose ATTRIBUTE_UNUSED)
10239
{
10240
  if (regmode_weight[0])
10241
    {
10242
      free (regmode_weight[0]);
10243
      regmode_weight[0] = NULL;
10244
    }
10245
  if (regmode_weight[1])
10246
    {
10247
      free (regmode_weight[1]);
10248
      regmode_weight[1] = NULL;
10249
    }
10250
}
10251
 
10252
/* The scalar modes supported differs from the default version in TImode
10253
   for 32-bit SHMEDIA.  */
10254
static bool
10255
sh_scalar_mode_supported_p (enum machine_mode mode)
10256
{
10257
  if (TARGET_SHMEDIA32 && mode == TImode)
10258
    return false;
10259
 
10260
  return default_scalar_mode_supported_p (mode);
10261
}
10262
 
10263
/* Cache the can_issue_more so that we can return it from reorder2. Also,
10264
   keep count of register pressures on SImode and SFmode. */
10265
static int
10266
sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
10267
                   int sched_verbose ATTRIBUTE_UNUSED,
10268
                   rtx insn,
10269
                   int can_issue_more)
10270
{
10271
  if (GET_CODE (PATTERN (insn)) != USE
10272
      && GET_CODE (PATTERN (insn)) != CLOBBER)
10273
    cached_can_issue_more = can_issue_more - 1;
10274
  else
10275
    cached_can_issue_more = can_issue_more;
10276
 
10277
  if (reload_completed)
10278
    return cached_can_issue_more;
10279
 
10280
  CURR_REGMODE_PRESSURE (SImode) += INSN_REGMODE_WEIGHT (insn, SImode);
10281
  CURR_REGMODE_PRESSURE (SFmode) += INSN_REGMODE_WEIGHT (insn, SFmode);
10282
 
10283
  return cached_can_issue_more;
10284
}
10285
 
10286
static void
10287
sh_md_init (FILE *dump ATTRIBUTE_UNUSED,
10288
            int verbose ATTRIBUTE_UNUSED,
10289
            int veclen ATTRIBUTE_UNUSED)
10290
{
10291
  CURR_REGMODE_PRESSURE (SImode) = 0;
10292
  CURR_REGMODE_PRESSURE (SFmode) = 0;
10293
}
10294
 
10295
/* Some magic numbers.  */
10296
/* Pressure on register r0 can lead to spill failures. so avoid sched1 for
10297
   functions that already have high pressure on r0. */
10298
#define R0_MAX_LIFE_REGIONS 2
10299
/* Register Pressure thresholds for SImode and SFmode registers.  */
10300
#define SIMODE_MAX_WEIGHT 5
10301
#define SFMODE_MAX_WEIGHT 10
10302
 
10303
/* Return true if the pressure is high for MODE.  */
10304
static short
10305
high_pressure (enum machine_mode mode)
10306
{
10307
  /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
10308
     functions that already have high pressure on r0. */
10309
   if (r0_life_regions >= R0_MAX_LIFE_REGIONS)
10310
     return 1;
10311
 
10312
  if (mode == SFmode)
10313
    return (CURR_REGMODE_PRESSURE (SFmode) > SFMODE_MAX_WEIGHT);
10314
  else
10315
    return (CURR_REGMODE_PRESSURE (SImode) > SIMODE_MAX_WEIGHT);
10316
}
10317
 
10318
/* Reorder ready queue if register pressure is high.  */
10319
static int
10320
sh_reorder (FILE *dump ATTRIBUTE_UNUSED,
10321
            int sched_verbose ATTRIBUTE_UNUSED,
10322
            rtx *ready,
10323
            int *n_readyp,
10324
            int clock_var ATTRIBUTE_UNUSED)
10325
{
10326
  if (reload_completed)
10327
    return sh_issue_rate ();
10328
 
10329
  if (high_pressure (SFmode) || high_pressure (SImode))
10330
    {
10331
      ready_reorder (ready, *n_readyp);
10332
    }
10333
 
10334
  return sh_issue_rate ();
10335
}
10336
 
10337
/* Skip cycles if the current register pressure is high.  */
10338
static int
10339
sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
10340
             int sched_verbose ATTRIBUTE_UNUSED,
10341
             rtx *ready ATTRIBUTE_UNUSED,
10342
             int *n_readyp ATTRIBUTE_UNUSED,
10343
             int clock_var ATTRIBUTE_UNUSED)
10344
{
10345
  if (reload_completed)
10346
    return cached_can_issue_more;
10347
 
10348
  if (high_pressure(SFmode) || high_pressure (SImode))
10349
    skip_cycles = 1;
10350
 
10351
  return cached_can_issue_more;
10352
}
10353
 
10354
/* Skip cycles without sorting the ready queue. This will move insn from
10355
   Q->R. If this is the last cycle we are skipping; allow sorting of ready
10356
   queue by sh_reorder.  */
10357
 
10358
/* Generally, skipping these many cycles are sufficient for all insns to move
10359
   from Q -> R.  */
10360
#define MAX_SKIPS 8
10361
 
10362
static int
10363
sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
10364
                  int sched_verbose ATTRIBUTE_UNUSED,
10365
                  rtx insn ATTRIBUTE_UNUSED,
10366
                  int last_clock_var,
10367
                  int clock_var,
10368
                  int *sort_p)
10369
{
10370
  if (reload_completed)
10371
    return 0;
10372
 
10373
  if (skip_cycles)
10374
    {
10375
      if ((clock_var - last_clock_var) < MAX_SKIPS)
10376
        {
10377
          *sort_p = 0;
10378
          return 1;
10379
        }
10380
      /* If this is the last cycle we are skipping, allow reordering of R.  */
10381
      if ((clock_var - last_clock_var) == MAX_SKIPS)
10382
        {
10383
          *sort_p = 1;
10384
          return 1;
10385
        }
10386
    }
10387
 
10388
  skip_cycles = 0;
10389
 
10390
  return 0;
10391
}
10392
 
10393
/* SHmedia requires registers for branches, so we can't generate new
10394
   branches past reload.  */
10395
static bool
10396
sh_cannot_modify_jumps_p (void)
10397
{
10398
  return (TARGET_SHMEDIA && (reload_in_progress || reload_completed));
10399
}
10400
 
10401
static enum reg_class
10402
sh_target_reg_class (void)
10403
{
10404
  return TARGET_SHMEDIA ? TARGET_REGS : NO_REGS;
10405
}
10406
 
10407
static bool
10408
sh_optimize_target_register_callee_saved (bool after_prologue_epilogue_gen)
10409
{
10410
  HARD_REG_SET dummy;
10411
#if 0
10412
  rtx insn;
10413
#endif
10414
 
10415
  if (! shmedia_space_reserved_for_target_registers)
10416
    return 0;
10417
  if (after_prologue_epilogue_gen && ! TARGET_SAVE_ALL_TARGET_REGS)
10418
    return 0;
10419
  if (calc_live_regs (&dummy) >= 6 * 8)
10420
    return 1;
10421
  return 0;
10422
}
10423
 
10424
static bool
10425
sh_ms_bitfield_layout_p (const_tree record_type ATTRIBUTE_UNUSED)
10426
{
10427
  return (TARGET_SH5 || TARGET_HITACHI || sh_attr_renesas_p (record_type));
10428
}
10429
 
10430
/*
10431
   On the SH1..SH4, the trampoline looks like
10432
   2 0002 D202                  mov.l   l2,r2
10433
   1 0000 D301                  mov.l   l1,r3
10434
   3 0004 422B                  jmp     @r2
10435
   4 0006 0009                  nop
10436
   5 0008 00000000      l1:     .long   area
10437
   6 000c 00000000      l2:     .long   function
10438
 
10439
   SH5 (compact) uses r1 instead of r3 for the static chain.  */
10440
 
10441
 
10442
/* Emit RTL insns to initialize the variable parts of a trampoline.
10443
   FNADDR is an RTX for the address of the function's pure code.
10444
   CXT is an RTX for the static chain value for the function.  */
10445
 
10446
static void
10447
sh_trampoline_init (rtx tramp_mem, tree fndecl, rtx cxt)
10448
{
10449
  rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
10450
  rtx tramp = force_reg (Pmode, XEXP (tramp_mem, 0));
10451
 
10452
  if (TARGET_SHMEDIA64)
10453
    {
10454
      rtx tramp_templ;
10455
      int fixed_len;
10456
 
10457
      rtx movi1 = GEN_INT (0xcc000010);
10458
      rtx shori1 = GEN_INT (0xc8000010);
10459
      rtx src, dst;
10460
 
10461
      /* The following trampoline works within a +- 128 KB range for cxt:
10462
         ptb/u cxt,tr1; movi fnaddr >> 48,r0; shori fnaddr >> 32,r0;
10463
         shori fnaddr >> 16,r0; shori fnaddr,r0; ptabs/l r0,tr0
10464
         gettr tr1,r1; blink tr0,r63  */
10465
      /* Address rounding makes it hard to compute the exact bounds of the
10466
         offset for this trampoline, but we have a rather generous offset
10467
         range, so frame_offset should do fine as an upper bound.  */
10468
      if (cxt == virtual_stack_vars_rtx && frame_offset < 0x20000)
10469
        {
10470
          /* ??? could optimize this trampoline initialization
10471
             by writing DImode words with two insns each.  */
10472
          rtx mask = force_reg (DImode, GEN_INT (0x3fffc00));
10473
          rtx insn = gen_rtx_MINUS (DImode, cxt, tramp);
10474
          insn = gen_rtx_ASHIFT (DImode, insn, GEN_INT (10-2));
10475
          insn = gen_rtx_AND (DImode, insn, mask);
10476
          /* Or in ptb/u .,tr1 pattern */
10477
          insn = gen_rtx_IOR (DImode, insn, gen_int_mode (0xec000010, SImode));
10478
          insn = force_operand (insn, NULL_RTX);
10479
          insn = gen_lowpart (SImode, insn);
10480
          emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX), insn);
10481
          insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (38));
10482
          insn = gen_rtx_AND (DImode, insn, mask);
10483
          insn = force_operand (gen_rtx_IOR (DImode, movi1, insn), NULL_RTX);
10484
          insn = gen_lowpart (SImode, insn);
10485
          emit_move_insn (adjust_address (tramp_mem, SImode, 4), insn);
10486
          insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (22));
10487
          insn = gen_rtx_AND (DImode, insn, mask);
10488
          insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
10489
          insn = gen_lowpart (SImode, insn);
10490
          emit_move_insn (adjust_address (tramp_mem, SImode, 8), insn);
10491
          insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (6));
10492
          insn = gen_rtx_AND (DImode, insn, mask);
10493
          insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
10494
          insn = gen_lowpart (SImode, insn);
10495
          emit_move_insn (adjust_address (tramp_mem, SImode, 12), insn);
10496
          insn = gen_rtx_ASHIFT (DImode, fnaddr, GEN_INT (10));
10497
          insn = gen_rtx_AND (DImode, insn, mask);
10498
          insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
10499
          insn = gen_lowpart (SImode, insn);
10500
          emit_move_insn (adjust_address (tramp_mem, SImode, 16), insn);
10501
          emit_move_insn (adjust_address (tramp_mem, SImode, 20),
10502
                          GEN_INT (0x6bf10600));
10503
          emit_move_insn (adjust_address (tramp_mem, SImode, 24),
10504
                          GEN_INT (0x4415fc10));
10505
          emit_move_insn (adjust_address (tramp_mem, SImode, 28),
10506
                          GEN_INT (0x4401fff0));
10507
          emit_insn (gen_ic_invalidate_line (tramp));
10508
          return;
10509
        }
10510
      tramp_templ = gen_rtx_SYMBOL_REF (Pmode,"__GCC_nested_trampoline");
10511
      fixed_len = TRAMPOLINE_SIZE - 2 * GET_MODE_SIZE (Pmode);
10512
 
10513
      tramp_templ = gen_datalabel_ref (tramp_templ);
10514
      dst = tramp_mem;
10515
      src = gen_const_mem (BLKmode, tramp_templ);
10516
      set_mem_align (dst, 256);
10517
      set_mem_align (src, 64);
10518
      emit_block_move (dst, src, GEN_INT (fixed_len), BLOCK_OP_NORMAL);
10519
 
10520
      emit_move_insn (adjust_address (tramp_mem, Pmode, fixed_len), fnaddr);
10521
      emit_move_insn (adjust_address (tramp_mem, Pmode,
10522
                                      fixed_len + GET_MODE_SIZE (Pmode)),
10523
                      cxt);
10524
      emit_insn (gen_ic_invalidate_line (tramp));
10525
      return;
10526
    }
10527
  else if (TARGET_SHMEDIA)
10528
    {
10529
      /* movi fnaddr >> 16,r1; shori fnaddr,r1; ptabs/l r1,tr0
10530
         movi cxt >> 16,r1; shori cxt,r1; blink tr0,r63  */
10531
      rtx quad0 = gen_reg_rtx (DImode), cxtload = gen_reg_rtx (DImode);
10532
      rtx quad1 = gen_reg_rtx (DImode), quad2 = gen_reg_rtx (DImode);
10533
      /* movi 0,r1: 0xcc000010 shori 0,r1: c8000010  concatenated,
10534
         rotated 10 right, and higher 16 bit of every 32 selected.  */
10535
      rtx movishori
10536
        = force_reg (V2HImode, (simplify_gen_subreg
10537
                                (V2HImode, GEN_INT (0x4330432), SImode, 0)));
10538
      rtx ptabs = force_reg (DImode, GEN_INT (0x6bf10600));
10539
      rtx blink = force_reg (DImode, GEN_INT (0x4401fff0));
10540
 
10541
      fnaddr = force_reg (SImode, fnaddr);
10542
      cxt = force_reg (SImode, cxt);
10543
      emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, quad0, 0),
10544
                                 gen_rtx_SUBREG (V2HImode, fnaddr, 0),
10545
                                 movishori));
10546
      emit_insn (gen_rotrdi3_mextr (quad0, quad0,
10547
                                    GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
10548
      emit_insn (gen_ashldi3_media (quad0, quad0, const2_rtx));
10549
      emit_move_insn (change_address (tramp_mem, DImode, NULL_RTX), quad0);
10550
      emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, cxtload, 0),
10551
                                 gen_rtx_SUBREG (V2HImode, cxt, 0),
10552
                                 movishori));
10553
      emit_insn (gen_rotrdi3_mextr (cxtload, cxtload,
10554
                                    GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
10555
      emit_insn (gen_ashldi3_media (cxtload, cxtload, const2_rtx));
10556
      if (TARGET_LITTLE_ENDIAN)
10557
        {
10558
          emit_insn (gen_mshflo_l_di (quad1, ptabs, cxtload));
10559
          emit_insn (gen_mextr4 (quad2, cxtload, blink));
10560
        }
10561
      else
10562
        {
10563
          emit_insn (gen_mextr4 (quad1, cxtload, ptabs));
10564
          emit_insn (gen_mshflo_l_di (quad2, blink, cxtload));
10565
        }
10566
      emit_move_insn (adjust_address (tramp_mem, DImode, 8), quad1);
10567
      emit_move_insn (adjust_address (tramp_mem, DImode, 16), quad2);
10568
      emit_insn (gen_ic_invalidate_line (tramp));
10569
      return;
10570
    }
10571
  else if (TARGET_SHCOMPACT)
10572
    {
10573
      emit_insn (gen_initialize_trampoline (tramp, cxt, fnaddr));
10574
      return;
10575
    }
10576
  emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX),
10577
                  gen_int_mode (TARGET_LITTLE_ENDIAN ? 0xd301d202 : 0xd202d301,
10578
                                SImode));
10579
  emit_move_insn (adjust_address (tramp_mem, SImode, 4),
10580
                  gen_int_mode (TARGET_LITTLE_ENDIAN ? 0x0009422b : 0x422b0009,
10581
                                SImode));
10582
  emit_move_insn (adjust_address (tramp_mem, SImode, 8), cxt);
10583
  emit_move_insn (adjust_address (tramp_mem, SImode, 12), fnaddr);
10584
  if (TARGET_HARVARD)
10585
    {
10586
      if (!TARGET_INLINE_IC_INVALIDATE
10587
          || (!(TARGET_SH4A_ARCH || TARGET_SH4_300) && TARGET_USERMODE))
10588
        emit_library_call (function_symbol (NULL, "__ic_invalidate",
10589
                                            FUNCTION_ORDINARY),
10590
                           LCT_NORMAL, VOIDmode, 1, tramp, SImode);
10591
      else
10592
        emit_insn (gen_ic_invalidate_line (tramp));
10593
    }
10594
}
10595
 
10596
/* On SH5, trampolines are SHmedia code, so add 1 to the address.  */
10597
 
10598
static rtx
10599
sh_trampoline_adjust_address (rtx tramp)
10600
{
10601
  if (TARGET_SHMEDIA)
10602
    tramp = expand_simple_binop (Pmode, PLUS, tramp, const1_rtx,
10603
                                 gen_reg_rtx (Pmode), 0, OPTAB_LIB_WIDEN);
10604
  return tramp;
10605
}
10606
 
10607
/* FIXME: This is overly conservative.  A SHcompact function that
10608
   receives arguments ``by reference'' will have them stored in its
10609
   own stack frame, so it must not pass pointers or references to
10610
   these arguments to other functions by means of sibling calls.  */
10611
/* If PIC, we cannot make sibling calls to global functions
10612
   because the PLT requires r12 to be live.  */
10613
static bool
10614
sh_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
10615
{
10616
  return (1
10617
          && (! TARGET_SHCOMPACT
10618
              || crtl->args.info.stack_regs == 0)
10619
          && ! sh_cfun_interrupt_handler_p ()
10620
          && (! flag_pic
10621
              || (decl && ! TREE_PUBLIC (decl))
10622
              || (decl && DECL_VISIBILITY (decl) != VISIBILITY_DEFAULT)));
10623
}
10624
 
10625
/* Machine specific built-in functions.  */
10626
 
10627
struct builtin_description
10628
{
10629
  const enum insn_code icode;
10630
  const char *const name;
10631
  int signature;
10632
  tree fndecl;
10633
};
10634
 
10635
/* describe number and signedness of arguments; arg[0] == result
10636
   (1: unsigned, 2: signed, 4: don't care, 8: pointer 0: no argument */
10637
/* 9: 64-bit pointer, 10: 32-bit pointer */
10638
static const char signature_args[][4] =
10639
{
10640
#define SH_BLTIN_V2SI2 0
10641
  { 4, 4 },
10642
#define SH_BLTIN_V4HI2 1
10643
  { 4, 4 },
10644
#define SH_BLTIN_V2SI3 2
10645
  { 4, 4, 4 },
10646
#define SH_BLTIN_V4HI3 3
10647
  { 4, 4, 4 },
10648
#define SH_BLTIN_V8QI3 4
10649
  { 4, 4, 4 },
10650
#define SH_BLTIN_MAC_HISI 5
10651
  { 1, 4, 4, 1 },
10652
#define SH_BLTIN_SH_HI 6
10653
  { 4, 4, 1 },
10654
#define SH_BLTIN_SH_SI 7
10655
  { 4, 4, 1 },
10656
#define SH_BLTIN_V4HI2V2SI 8
10657
  { 4, 4, 4 },
10658
#define SH_BLTIN_V4HI2V8QI 9
10659
  { 4, 4, 4 },
10660
#define SH_BLTIN_SISF 10
10661
  { 4, 2 },
10662
#define SH_BLTIN_LDUA_L 11
10663
  { 2, 10 },
10664
#define SH_BLTIN_LDUA_Q 12
10665
  { 1, 10 },
10666
#define SH_BLTIN_STUA_L 13
10667
  { 0, 10, 2 },
10668
#define SH_BLTIN_STUA_Q 14
10669
  { 0, 10, 1 },
10670
#define SH_BLTIN_LDUA_L64 15
10671
  { 2, 9 },
10672
#define SH_BLTIN_LDUA_Q64 16
10673
  { 1, 9 },
10674
#define SH_BLTIN_STUA_L64 17
10675
  { 0, 9, 2 },
10676
#define SH_BLTIN_STUA_Q64 18
10677
  { 0, 9, 1 },
10678
#define SH_BLTIN_NUM_SHARED_SIGNATURES 19
10679
#define SH_BLTIN_2 19
10680
#define SH_BLTIN_SU 19
10681
  { 1, 2 },
10682
#define SH_BLTIN_3 20
10683
#define SH_BLTIN_SUS 20
10684
  { 2, 2, 1 },
10685
#define SH_BLTIN_PSSV 21
10686
  { 0, 8, 2, 2 },
10687
#define SH_BLTIN_XXUU 22
10688
#define SH_BLTIN_UUUU 22
10689
  { 1, 1, 1, 1 },
10690
#define SH_BLTIN_PV 23
10691
  { 0, 8 },
10692
};
10693
/* mcmv: operands considered unsigned.  */
10694
/* mmulsum_wq, msad_ubq: result considered unsigned long long.  */
10695
/* mperm: control value considered unsigned int.  */
10696
/* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int.  */
10697
/* mshards_q: returns signed short.  */
10698
/* nsb: takes long long arg, returns unsigned char.  */
10699
static struct builtin_description bdesc[] =
10700
{
10701
  { CODE_FOR_absv2si2,  "__builtin_absv2si2", SH_BLTIN_V2SI2, 0 },
10702
  { CODE_FOR_absv4hi2,  "__builtin_absv4hi2", SH_BLTIN_V4HI2, 0 },
10703
  { CODE_FOR_addv2si3,  "__builtin_addv2si3", SH_BLTIN_V2SI3, 0 },
10704
  { CODE_FOR_addv4hi3,  "__builtin_addv4hi3", SH_BLTIN_V4HI3, 0 },
10705
  { CODE_FOR_ssaddv2si3,"__builtin_ssaddv2si3", SH_BLTIN_V2SI3, 0 },
10706
  { CODE_FOR_usaddv8qi3,"__builtin_usaddv8qi3", SH_BLTIN_V8QI3, 0 },
10707
  { CODE_FOR_ssaddv4hi3,"__builtin_ssaddv4hi3", SH_BLTIN_V4HI3, 0 },
10708
  { CODE_FOR_alloco_i,  "__builtin_sh_media_ALLOCO", SH_BLTIN_PV, 0 },
10709
  { CODE_FOR_negcmpeqv8qi,"__builtin_sh_media_MCMPEQ_B", SH_BLTIN_V8QI3, 0 },
10710
  { CODE_FOR_negcmpeqv2si,"__builtin_sh_media_MCMPEQ_L", SH_BLTIN_V2SI3, 0 },
10711
  { CODE_FOR_negcmpeqv4hi,"__builtin_sh_media_MCMPEQ_W", SH_BLTIN_V4HI3, 0 },
10712
  { CODE_FOR_negcmpgtuv8qi,"__builtin_sh_media_MCMPGT_UB", SH_BLTIN_V8QI3, 0 },
10713
  { CODE_FOR_negcmpgtv2si,"__builtin_sh_media_MCMPGT_L", SH_BLTIN_V2SI3, 0 },
10714
  { CODE_FOR_negcmpgtv4hi,"__builtin_sh_media_MCMPGT_W", SH_BLTIN_V4HI3, 0 },
10715
  { CODE_FOR_mcmv,      "__builtin_sh_media_MCMV", SH_BLTIN_UUUU, 0 },
10716
  { CODE_FOR_mcnvs_lw,  "__builtin_sh_media_MCNVS_LW", SH_BLTIN_3, 0 },
10717
  { CODE_FOR_mcnvs_wb,  "__builtin_sh_media_MCNVS_WB", SH_BLTIN_V4HI2V8QI, 0 },
10718
  { CODE_FOR_mcnvs_wub, "__builtin_sh_media_MCNVS_WUB", SH_BLTIN_V4HI2V8QI, 0 },
10719
  { CODE_FOR_mextr1,    "__builtin_sh_media_MEXTR1", SH_BLTIN_V8QI3, 0 },
10720
  { CODE_FOR_mextr2,    "__builtin_sh_media_MEXTR2", SH_BLTIN_V8QI3, 0 },
10721
  { CODE_FOR_mextr3,    "__builtin_sh_media_MEXTR3", SH_BLTIN_V8QI3, 0 },
10722
  { CODE_FOR_mextr4,    "__builtin_sh_media_MEXTR4", SH_BLTIN_V8QI3, 0 },
10723
  { CODE_FOR_mextr5,    "__builtin_sh_media_MEXTR5", SH_BLTIN_V8QI3, 0 },
10724
  { CODE_FOR_mextr6,    "__builtin_sh_media_MEXTR6", SH_BLTIN_V8QI3, 0 },
10725
  { CODE_FOR_mextr7,    "__builtin_sh_media_MEXTR7", SH_BLTIN_V8QI3, 0 },
10726
  { CODE_FOR_mmacfx_wl, "__builtin_sh_media_MMACFX_WL", SH_BLTIN_MAC_HISI, 0 },
10727
  { CODE_FOR_mmacnfx_wl,"__builtin_sh_media_MMACNFX_WL", SH_BLTIN_MAC_HISI, 0 },
10728
  { CODE_FOR_mulv2si3,  "__builtin_mulv2si3", SH_BLTIN_V2SI3, 0 },
10729
  { CODE_FOR_mulv4hi3,  "__builtin_mulv4hi3", SH_BLTIN_V4HI3, 0 },
10730
  { CODE_FOR_mmulfx_l,  "__builtin_sh_media_MMULFX_L", SH_BLTIN_V2SI3, 0 },
10731
  { CODE_FOR_mmulfx_w,  "__builtin_sh_media_MMULFX_W", SH_BLTIN_V4HI3, 0 },
10732
  { CODE_FOR_mmulfxrp_w,"__builtin_sh_media_MMULFXRP_W", SH_BLTIN_V4HI3, 0 },
10733
  { CODE_FOR_mmulhi_wl, "__builtin_sh_media_MMULHI_WL", SH_BLTIN_V4HI2V2SI, 0 },
10734
  { CODE_FOR_mmullo_wl, "__builtin_sh_media_MMULLO_WL", SH_BLTIN_V4HI2V2SI, 0 },
10735
  { CODE_FOR_mmulsum_wq,"__builtin_sh_media_MMULSUM_WQ", SH_BLTIN_XXUU, 0 },
10736
  { CODE_FOR_mperm_w,   "__builtin_sh_media_MPERM_W", SH_BLTIN_SH_HI, 0 },
10737
  { CODE_FOR_msad_ubq,  "__builtin_sh_media_MSAD_UBQ", SH_BLTIN_XXUU, 0 },
10738
  { CODE_FOR_mshalds_l, "__builtin_sh_media_MSHALDS_L", SH_BLTIN_SH_SI, 0 },
10739
  { CODE_FOR_mshalds_w, "__builtin_sh_media_MSHALDS_W", SH_BLTIN_SH_HI, 0 },
10740
  { CODE_FOR_ashrv2si3, "__builtin_ashrv2si3", SH_BLTIN_SH_SI, 0 },
10741
  { CODE_FOR_ashrv4hi3, "__builtin_ashrv4hi3", SH_BLTIN_SH_HI, 0 },
10742
  { CODE_FOR_mshards_q, "__builtin_sh_media_MSHARDS_Q", SH_BLTIN_SUS, 0 },
10743
  { CODE_FOR_mshfhi_b,  "__builtin_sh_media_MSHFHI_B", SH_BLTIN_V8QI3, 0 },
10744
  { CODE_FOR_mshfhi_l,  "__builtin_sh_media_MSHFHI_L", SH_BLTIN_V2SI3, 0 },
10745
  { CODE_FOR_mshfhi_w,  "__builtin_sh_media_MSHFHI_W", SH_BLTIN_V4HI3, 0 },
10746
  { CODE_FOR_mshflo_b,  "__builtin_sh_media_MSHFLO_B", SH_BLTIN_V8QI3, 0 },
10747
  { CODE_FOR_mshflo_l,  "__builtin_sh_media_MSHFLO_L", SH_BLTIN_V2SI3, 0 },
10748
  { CODE_FOR_mshflo_w,  "__builtin_sh_media_MSHFLO_W", SH_BLTIN_V4HI3, 0 },
10749
  { CODE_FOR_ashlv2si3, "__builtin_ashlv2si3", SH_BLTIN_SH_SI, 0 },
10750
  { CODE_FOR_ashlv4hi3, "__builtin_ashlv4hi3", SH_BLTIN_SH_HI, 0 },
10751
  { CODE_FOR_lshrv2si3, "__builtin_lshrv2si3", SH_BLTIN_SH_SI, 0 },
10752
  { CODE_FOR_lshrv4hi3, "__builtin_lshrv4hi3", SH_BLTIN_SH_HI, 0 },
10753
  { CODE_FOR_subv2si3,  "__builtin_subv2si3", SH_BLTIN_V2SI3, 0 },
10754
  { CODE_FOR_subv4hi3,  "__builtin_subv4hi3", SH_BLTIN_V4HI3, 0 },
10755
  { CODE_FOR_sssubv2si3,"__builtin_sssubv2si3", SH_BLTIN_V2SI3, 0 },
10756
  { CODE_FOR_ussubv8qi3,"__builtin_ussubv8qi3", SH_BLTIN_V8QI3, 0 },
10757
  { CODE_FOR_sssubv4hi3,"__builtin_sssubv4hi3", SH_BLTIN_V4HI3, 0 },
10758
  { CODE_FOR_fcosa_s,   "__builtin_sh_media_FCOSA_S", SH_BLTIN_SISF, 0 },
10759
  { CODE_FOR_fsina_s,   "__builtin_sh_media_FSINA_S", SH_BLTIN_SISF, 0 },
10760
  { CODE_FOR_fipr,      "__builtin_sh_media_FIPR_S", SH_BLTIN_3, 0 },
10761
  { CODE_FOR_ftrv,      "__builtin_sh_media_FTRV_S", SH_BLTIN_3, 0 },
10762
  { CODE_FOR_mac_media, "__builtin_sh_media_FMAC_S", SH_BLTIN_3, 0 },
10763
  { CODE_FOR_sqrtdf2,   "__builtin_sh_media_FSQRT_D", SH_BLTIN_2, 0 },
10764
  { CODE_FOR_sqrtsf2,   "__builtin_sh_media_FSQRT_S", SH_BLTIN_2, 0 },
10765
  { CODE_FOR_fsrra_s,   "__builtin_sh_media_FSRRA_S", SH_BLTIN_2, 0 },
10766
  { CODE_FOR_ldhi_l,    "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L, 0 },
10767
  { CODE_FOR_ldhi_q,    "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q, 0 },
10768
  { CODE_FOR_ldlo_l,    "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L, 0 },
10769
  { CODE_FOR_ldlo_q,    "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q, 0 },
10770
  { CODE_FOR_sthi_l,    "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L, 0 },
10771
  { CODE_FOR_sthi_q,    "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q, 0 },
10772
  { CODE_FOR_stlo_l,    "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L, 0 },
10773
  { CODE_FOR_stlo_q,    "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q, 0 },
10774
  { CODE_FOR_ldhi_l64,  "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L64, 0 },
10775
  { CODE_FOR_ldhi_q64,  "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q64, 0 },
10776
  { CODE_FOR_ldlo_l64,  "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L64, 0 },
10777
  { CODE_FOR_ldlo_q64,  "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q64, 0 },
10778
  { CODE_FOR_sthi_l64,  "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L64, 0 },
10779
  { CODE_FOR_sthi_q64,  "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q64, 0 },
10780
  { CODE_FOR_stlo_l64,  "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L64, 0 },
10781
  { CODE_FOR_stlo_q64,  "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q64, 0 },
10782
  { CODE_FOR_nsb,       "__builtin_sh_media_NSB", SH_BLTIN_SU, 0 },
10783
  { CODE_FOR_byterev,   "__builtin_sh_media_BYTEREV", SH_BLTIN_2, 0 },
10784
  { CODE_FOR_prefetch,  "__builtin_sh_media_PREFO", SH_BLTIN_PSSV, 0 },
10785
};
10786
 
10787
static void
10788
sh_media_init_builtins (void)
10789
{
10790
  tree shared[SH_BLTIN_NUM_SHARED_SIGNATURES];
10791
  struct builtin_description *d;
10792
 
10793
  memset (shared, 0, sizeof shared);
10794
  for (d = bdesc; d - bdesc < (int) ARRAY_SIZE (bdesc); d++)
10795
    {
10796
      tree type, arg_type = 0;
10797
      int signature = d->signature;
10798
      int i;
10799
 
10800
      if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES && shared[signature])
10801
        type = shared[signature];
10802
      else
10803
        {
10804
          int has_result = signature_args[signature][0] != 0;
10805
 
10806
          if ((signature_args[signature][1] & 8)
10807
              && (((signature_args[signature][1] & 1) && TARGET_SHMEDIA32)
10808
                  || ((signature_args[signature][1] & 2) && TARGET_SHMEDIA64)))
10809
            continue;
10810
          if (! TARGET_FPU_ANY
10811
              && FLOAT_MODE_P (insn_data[d->icode].operand[0].mode))
10812
            continue;
10813
          type = void_list_node;
10814
          for (i = 3; ; i--)
10815
            {
10816
              int arg = signature_args[signature][i];
10817
              int opno = i - 1 + has_result;
10818
 
10819
              if (arg & 8)
10820
                arg_type = ptr_type_node;
10821
              else if (arg)
10822
                arg_type = (*lang_hooks.types.type_for_mode)
10823
                  (insn_data[d->icode].operand[opno].mode,
10824
                   (arg & 1));
10825
              else if (i)
10826
                continue;
10827
              else
10828
                arg_type = void_type_node;
10829
              if (i == 0)
10830
                break;
10831
              type = tree_cons (NULL_TREE, arg_type, type);
10832
            }
10833
          type = build_function_type (arg_type, type);
10834
          if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES)
10835
            shared[signature] = type;
10836
        }
10837
      d->fndecl =
10838
        add_builtin_function (d->name, type, d - bdesc, BUILT_IN_MD,
10839
                              NULL, NULL_TREE);
10840
    }
10841
}
10842
 
10843
/* Returns the shmedia builtin decl for CODE.  */
10844
 
10845
static tree
10846
sh_media_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
10847
{
10848
  if (code >= ARRAY_SIZE (bdesc))
10849
    return error_mark_node;
10850
 
10851
  return bdesc[code].fndecl;
10852
}
10853
 
10854
/* Implements target hook vector_mode_supported_p.  */
10855
bool
10856
sh_vector_mode_supported_p (enum machine_mode mode)
10857
{
10858
  if (TARGET_FPU_ANY
10859
      && ((mode == V2SFmode)
10860
          || (mode == V4SFmode)
10861
          || (mode == V16SFmode)))
10862
    return true;
10863
 
10864
  else if (TARGET_SHMEDIA
10865
           && ((mode == V8QImode)
10866
               || (mode == V2HImode)
10867
               || (mode == V4HImode)
10868
               || (mode == V2SImode)))
10869
    return true;
10870
 
10871
  return false;
10872
}
10873
 
10874
/* Implements target hook dwarf_calling_convention.  Return an enum
10875
   of dwarf_calling_convention.  */
10876
int
10877
sh_dwarf_calling_convention (const_tree func)
10878
{
10879
  if (sh_attr_renesas_p (func))
10880
    return DW_CC_GNU_renesas_sh;
10881
 
10882
  return DW_CC_normal;
10883
}
10884
 
10885
static void
10886
sh_init_builtins (void)
10887
{
10888
  if (TARGET_SHMEDIA)
10889
    sh_media_init_builtins ();
10890
}
10891
 
10892
/* Returns the sh builtin decl for CODE.  */
10893
 
10894
static tree
10895
sh_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
10896
{
10897
  if (TARGET_SHMEDIA)
10898
    return sh_media_builtin_decl (code, initialize_p);
10899
 
10900
  return error_mark_node;
10901
}
10902
 
10903
/* Expand an expression EXP that calls a built-in function,
10904
   with result going to TARGET if that's convenient
10905
   (and in mode MODE if that's convenient).
10906
   SUBTARGET may be used as the target for computing one of EXP's operands.
10907
   IGNORE is nonzero if the value is to be ignored.  */
10908
 
10909
static rtx
10910
sh_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10911
                   enum machine_mode mode ATTRIBUTE_UNUSED, int ignore)
10912
{
10913
  tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10914
  unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10915
  const struct builtin_description *d = &bdesc[fcode];
10916
  enum insn_code icode = d->icode;
10917
  int signature = d->signature;
10918
  enum machine_mode tmode = VOIDmode;
10919
  int nop = 0, i;
10920
  rtx op[4];
10921
  rtx pat = 0;
10922
 
10923
  if (signature_args[signature][0])
10924
    {
10925
      if (ignore)
10926
        return 0;
10927
 
10928
      tmode = insn_data[icode].operand[0].mode;
10929
      if (! target
10930
          || GET_MODE (target) != tmode
10931
          || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
10932
        target = gen_reg_rtx (tmode);
10933
      op[nop++] = target;
10934
    }
10935
  else
10936
    target = 0;
10937
 
10938
  for (i = 1; i <= 3; i++, nop++)
10939
    {
10940
      tree arg;
10941
      enum machine_mode opmode, argmode;
10942
      tree optype;
10943
 
10944
      if (! signature_args[signature][i])
10945
        break;
10946
      arg = CALL_EXPR_ARG (exp, i - 1);
10947
      if (arg == error_mark_node)
10948
        return const0_rtx;
10949
      if (signature_args[signature][i] & 8)
10950
        {
10951
          opmode = ptr_mode;
10952
          optype = ptr_type_node;
10953
        }
10954
      else
10955
        {
10956
          opmode = insn_data[icode].operand[nop].mode;
10957
          optype = (*lang_hooks.types.type_for_mode) (opmode, 0);
10958
        }
10959
      argmode = TYPE_MODE (TREE_TYPE (arg));
10960
      if (argmode != opmode)
10961
        arg = build1 (NOP_EXPR, optype, arg);
10962
      op[nop] = expand_expr (arg, NULL_RTX, opmode, EXPAND_NORMAL);
10963
      if (! (*insn_data[icode].operand[nop].predicate) (op[nop], opmode))
10964
        op[nop] = copy_to_mode_reg (opmode, op[nop]);
10965
    }
10966
 
10967
  switch (nop)
10968
    {
10969
    case 1:
10970
      pat = (*insn_data[d->icode].genfun) (op[0]);
10971
      break;
10972
    case 2:
10973
      pat = (*insn_data[d->icode].genfun) (op[0], op[1]);
10974
      break;
10975
    case 3:
10976
      pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2]);
10977
      break;
10978
    case 4:
10979
      pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2], op[3]);
10980
      break;
10981
    default:
10982
      gcc_unreachable ();
10983
    }
10984
  if (! pat)
10985
    return 0;
10986
  emit_insn (pat);
10987
  return target;
10988
}
10989
 
10990
void
10991
sh_expand_unop_v2sf (enum rtx_code code, rtx op0, rtx op1)
10992
{
10993
  rtx sel0 = const0_rtx;
10994
  rtx sel1 = const1_rtx;
10995
  rtx (*fn) (rtx, rtx, rtx, rtx, rtx) = gen_unary_sf_op;
10996
  rtx op = gen_rtx_fmt_e (code, SFmode, op1);
10997
 
10998
  emit_insn ((*fn) (op0, op1, op, sel0, sel0));
10999
  emit_insn ((*fn) (op0, op1, op, sel1, sel1));
11000
}
11001
 
11002
void
11003
sh_expand_binop_v2sf (enum rtx_code code, rtx op0, rtx op1, rtx op2)
11004
{
11005
  rtx op = gen_rtx_fmt_ee (code, SFmode, op1, op2);
11006
 
11007
  emit_insn (gen_binary_sf_op0 (op0, op1, op2, op));
11008
  emit_insn (gen_binary_sf_op1 (op0, op1, op2, op));
11009
}
11010
 
11011
/* Return true if hard register REGNO can hold a value of machine-mode MODE.
11012
   We can allow any mode in any general register.  The special registers
11013
   only allow SImode.  Don't allow any mode in the PR.
11014
 
11015
   We cannot hold DCmode values in the XD registers because alter_reg
11016
   handles subregs of them incorrectly.  We could work around this by
11017
   spacing the XD registers like the DR registers, but this would require
11018
   additional memory in every compilation to hold larger register vectors.
11019
   We could hold SFmode / SCmode values in XD registers, but that
11020
   would require a tertiary reload when reloading from / to memory,
11021
   and a secondary reload to reload from / to general regs; that
11022
   seems to be a loosing proposition.
11023
 
11024
   We want to allow TImode FP regs so that when V4SFmode is loaded as TImode,
11025
   it won't be ferried through GP registers first.  */
11026
 
11027
bool
11028
sh_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
11029
{
11030
  if (SPECIAL_REGISTER_P (regno))
11031
    return mode == SImode;
11032
 
11033
  if (regno == FPUL_REG)
11034
    return (mode == SImode || mode == SFmode);
11035
 
11036
  if (FP_REGISTER_P (regno) && mode == SFmode)
11037
    return true;
11038
 
11039
  if (mode == V2SFmode)
11040
    {
11041
      if (((FP_REGISTER_P (regno) && (regno - FIRST_FP_REG) % 2 == 0)
11042
           || GENERAL_REGISTER_P (regno)))
11043
        return true;
11044
      else
11045
        return false;
11046
    }
11047
 
11048
  if (mode == V4SFmode)
11049
    {
11050
      if ((FP_REGISTER_P (regno) && (regno - FIRST_FP_REG) % 4 == 0)
11051
          || GENERAL_REGISTER_P (regno))
11052
        return true;
11053
      else
11054
        return false;
11055
    }
11056
 
11057
  if (mode == V16SFmode)
11058
    {
11059
      if (TARGET_SHMEDIA)
11060
        {
11061
          if (FP_REGISTER_P (regno) && (regno - FIRST_FP_REG) % 16 == 0)
11062
            return true;
11063
          else
11064
            return false;
11065
        }
11066
      else
11067
        return regno == FIRST_XD_REG;
11068
    }
11069
 
11070
  if (FP_REGISTER_P (regno))
11071
    {
11072
      if (mode == SFmode
11073
          || mode == SImode
11074
          || ((TARGET_SH2E || TARGET_SHMEDIA) && mode == SCmode)
11075
          || ((((TARGET_SH4 || TARGET_SH2A_DOUBLE) && mode == DFmode)
11076
               || mode == DCmode
11077
               || (TARGET_SHMEDIA
11078
                   && (mode == DFmode || mode == DImode
11079
                       || mode == V2SFmode || mode == TImode)))
11080
              && ((regno - FIRST_FP_REG) & 1) == 0)
11081
          || ((TARGET_SH4 || TARGET_SHMEDIA) && mode == TImode
11082
              && ((regno - FIRST_FP_REG) & 3) == 0))
11083
        return true;
11084
      else
11085
        return false;
11086
    }
11087
 
11088
  if (XD_REGISTER_P (regno))
11089
    return mode == DFmode;
11090
 
11091
  if (TARGET_REGISTER_P (regno))
11092
    return (mode == DImode || mode == SImode || mode == PDImode);
11093
 
11094
  if (regno == PR_REG)
11095
    return mode == SImode;
11096
 
11097
  if (regno == FPSCR_REG)
11098
    return mode == PSImode;
11099
 
11100
  /* FIXME.  This works around PR target/37633 for -O0.  */
11101
  if (!optimize && TARGET_SHMEDIA32 && GET_MODE_SIZE (mode) > 4)
11102
    {
11103
      unsigned int n = GET_MODE_SIZE (mode) / 8;
11104
 
11105
      if (regno >= FIRST_GENERAL_REG + 10 - n + 1
11106
          && regno <= FIRST_GENERAL_REG + 14)
11107
        return false;
11108
    }
11109
 
11110
  return true;
11111
}
11112
 
11113
/* Return the class of registers for which a mode change from FROM to TO
11114
   is invalid.  */
11115
bool
11116
sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
11117
                             enum reg_class rclass)
11118
{
11119
  /* We want to enable the use of SUBREGs as a means to
11120
     VEC_SELECT a single element of a vector.  */
11121
  if (to == SFmode && VECTOR_MODE_P (from) && GET_MODE_INNER (from) == SFmode)
11122
    return (reg_classes_intersect_p (GENERAL_REGS, rclass));
11123
 
11124
  if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
11125
    {
11126
      if (TARGET_LITTLE_ENDIAN)
11127
        {
11128
          if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
11129
            return reg_classes_intersect_p (DF_REGS, rclass);
11130
        }
11131
      else
11132
        {
11133
          if (GET_MODE_SIZE (from) < 8)
11134
            return reg_classes_intersect_p (DF_HI_REGS, rclass);
11135
        }
11136
    }
11137
  return 0;
11138
}
11139
 
11140
 
11141
/* If ADDRESS refers to a CODE_LABEL, add NUSES to the number of times
11142
   that label is used.  */
11143
 
11144
void
11145
sh_mark_label (rtx address, int nuses)
11146
{
11147
  if (GOTOFF_P (address))
11148
    {
11149
      /* Extract the label or symbol.  */
11150
      address = XEXP (address, 0);
11151
      if (GET_CODE (address) == PLUS)
11152
        address = XEXP (address, 0);
11153
      address = XVECEXP (address, 0, 0);
11154
    }
11155
  if (GET_CODE (address) == LABEL_REF
11156
      && LABEL_P (XEXP (address, 0)))
11157
    LABEL_NUSES (XEXP (address, 0)) += nuses;
11158
}
11159
 
11160
/* Compute extra cost of moving data between one register class
11161
   and another.  */
11162
 
11163
/* If SECONDARY*_RELOAD_CLASS says something about the src/dst pair, regclass
11164
   uses this information.  Hence, the general register <-> floating point
11165
   register information here is not used for SFmode.  */
11166
 
11167
int
11168
sh_register_move_cost (enum machine_mode mode,
11169
                       enum reg_class srcclass, enum reg_class dstclass)
11170
{
11171
  if (dstclass == T_REGS || dstclass == PR_REGS)
11172
    return 10;
11173
 
11174
  if (dstclass == MAC_REGS && srcclass == MAC_REGS)
11175
    return 4;
11176
 
11177
  if (mode == SImode && ! TARGET_SHMEDIA && TARGET_FMOVD
11178
      && REGCLASS_HAS_FP_REG (srcclass)
11179
      && REGCLASS_HAS_FP_REG (dstclass))
11180
    return 4;
11181
 
11182
  if (REGCLASS_HAS_FP_REG (dstclass) && srcclass == T_REGS)
11183
    return ((TARGET_HARD_SH4 && !optimize_size) ? 10 : 7);
11184
 
11185
  if ((REGCLASS_HAS_FP_REG (dstclass) && srcclass == MAC_REGS)
11186
      || (dstclass == MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
11187
    return 9;
11188
 
11189
  if ((REGCLASS_HAS_FP_REG (dstclass)
11190
       && REGCLASS_HAS_GENERAL_REG (srcclass))
11191
      || (REGCLASS_HAS_GENERAL_REG (dstclass)
11192
          && REGCLASS_HAS_FP_REG (srcclass)))
11193
    return ((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12)
11194
            * ((GET_MODE_SIZE (mode) + 7) / 8U));
11195
 
11196
  if ((dstclass == FPUL_REGS
11197
       && REGCLASS_HAS_GENERAL_REG (srcclass))
11198
      || (srcclass == FPUL_REGS
11199
          && REGCLASS_HAS_GENERAL_REG (dstclass)))
11200
    return 5;
11201
 
11202
  if ((dstclass == FPUL_REGS
11203
       && (srcclass == PR_REGS || srcclass == MAC_REGS || srcclass == T_REGS))
11204
      || (srcclass == FPUL_REGS
11205
          && (dstclass == PR_REGS || dstclass == MAC_REGS)))
11206
    return 7;
11207
 
11208
  if ((srcclass == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
11209
      || ((dstclass) == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
11210
    return 20;
11211
 
11212
  /* ??? ptabs faults on (value & 0x3) == 0x3  */
11213
  if (TARGET_SHMEDIA
11214
      && ((srcclass) == TARGET_REGS || (srcclass) == SIBCALL_REGS))
11215
    {
11216
      if (sh_gettrcost >= 0)
11217
        return sh_gettrcost;
11218
      else if (!TARGET_PT_FIXED)
11219
        return 100;
11220
    }
11221
 
11222
  if ((srcclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
11223
      || (dstclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
11224
  return 4;
11225
 
11226
  if (TARGET_SHMEDIA
11227
      || (TARGET_FMOVD
11228
          && ! REGCLASS_HAS_GENERAL_REG (srcclass)
11229
          && ! REGCLASS_HAS_GENERAL_REG (dstclass)))
11230
    return 2 * ((GET_MODE_SIZE (mode) + 7) / 8U);
11231
 
11232
  return 2 * ((GET_MODE_SIZE (mode) + 3) / 4U);
11233
}
11234
 
11235
static rtx emit_load_ptr (rtx, rtx);
11236
 
11237
static rtx
11238
emit_load_ptr (rtx reg, rtx addr)
11239
{
11240
  rtx mem = gen_const_mem (ptr_mode, addr);
11241
 
11242
  if (Pmode != ptr_mode)
11243
    mem = gen_rtx_SIGN_EXTEND (Pmode, mem);
11244
  return emit_move_insn (reg, mem);
11245
}
11246
 
11247
static void
11248
sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
11249
                    HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
11250
                    tree function)
11251
{
11252
  CUMULATIVE_ARGS cum;
11253
  int structure_value_byref = 0;
11254
  rtx this_rtx, this_value, sibcall, insns, funexp;
11255
  tree funtype = TREE_TYPE (function);
11256
  int simple_add = CONST_OK_FOR_ADD (delta);
11257
  int did_load = 0;
11258
  rtx scratch0, scratch1, scratch2;
11259
  unsigned i;
11260
 
11261
  reload_completed = 1;
11262
  epilogue_completed = 1;
11263
  current_function_uses_only_leaf_regs = 1;
11264
 
11265
  emit_note (NOTE_INSN_PROLOGUE_END);
11266
 
11267
  /* Find the "this" pointer.  We have such a wide range of ABIs for the
11268
     SH that it's best to do this completely machine independently.
11269
     "this" is passed as first argument, unless a structure return pointer
11270
     comes first, in which case "this" comes second.  */
11271
  INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
11272
#ifndef PCC_STATIC_STRUCT_RETURN
11273
  if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
11274
    structure_value_byref = 1;
11275
#endif /* not PCC_STATIC_STRUCT_RETURN */
11276
  if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
11277
    {
11278
      tree ptype = build_pointer_type (TREE_TYPE (funtype));
11279
 
11280
      FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1);
11281
    }
11282
  this_rtx = FUNCTION_ARG (cum, Pmode, ptr_type_node, 1);
11283
 
11284
  /* For SHcompact, we only have r0 for a scratch register: r1 is the
11285
     static chain pointer (even if you can't have nested virtual functions
11286
     right now, someone might implement them sometime), and the rest of the
11287
     registers are used for argument passing, are callee-saved, or reserved.  */
11288
  /* We need to check call_used_regs / fixed_regs in case -fcall_saved-reg /
11289
     -ffixed-reg has been used.  */
11290
  if (! call_used_regs[0] || fixed_regs[0])
11291
    error ("r0 needs to be available as a call-clobbered register");
11292
  scratch0 = scratch1 = scratch2 = gen_rtx_REG (Pmode, 0);
11293
  if (! TARGET_SH5)
11294
    {
11295
      if (call_used_regs[1] && ! fixed_regs[1])
11296
        scratch1 = gen_rtx_REG (ptr_mode, 1);
11297
      /* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
11298
         pointing where to return struct values.  */
11299
      if (call_used_regs[3] && ! fixed_regs[3])
11300
        scratch2 = gen_rtx_REG (Pmode, 3);
11301
    }
11302
  else if (TARGET_SHMEDIA)
11303
    {
11304
      for (i = FIRST_GENERAL_REG; i <= LAST_GENERAL_REG; i++)
11305
        if (i != REGNO (scratch0) &&
11306
            call_used_regs[i] && ! fixed_regs[i] && ! FUNCTION_ARG_REGNO_P (i))
11307
          {
11308
            scratch1 = gen_rtx_REG (ptr_mode, i);
11309
            break;
11310
          }
11311
      if (scratch1 == scratch0)
11312
        error ("Need a second call-clobbered general purpose register");
11313
      for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
11314
        if (call_used_regs[i] && ! fixed_regs[i])
11315
          {
11316
            scratch2 = gen_rtx_REG (Pmode, i);
11317
            break;
11318
          }
11319
      if (scratch2 == scratch0)
11320
        error ("Need a call-clobbered target register");
11321
    }
11322
 
11323
  this_value = plus_constant (this_rtx, delta);
11324
  if (vcall_offset
11325
      && (simple_add || scratch0 != scratch1)
11326
      && strict_memory_address_p (ptr_mode, this_value))
11327
    {
11328
      emit_load_ptr (scratch0, this_value);
11329
      did_load = 1;
11330
    }
11331
 
11332
  if (!delta)
11333
    ; /* Do nothing.  */
11334
  else if (simple_add)
11335
    emit_move_insn (this_rtx, this_value);
11336
  else
11337
    {
11338
      emit_move_insn (scratch1, GEN_INT (delta));
11339
      emit_insn (gen_add2_insn (this_rtx, scratch1));
11340
    }
11341
 
11342
  if (vcall_offset)
11343
    {
11344
      rtx offset_addr;
11345
 
11346
      if (!did_load)
11347
        emit_load_ptr (scratch0, this_rtx);
11348
 
11349
      offset_addr = plus_constant (scratch0, vcall_offset);
11350
      if (strict_memory_address_p (ptr_mode, offset_addr))
11351
        ; /* Do nothing.  */
11352
      else if (! TARGET_SH5 && scratch0 != scratch1)
11353
        {
11354
          /* scratch0 != scratch1, and we have indexed loads.  Get better
11355
             schedule by loading the offset into r1 and using an indexed
11356
             load - then the load of r1 can issue before the load from
11357
             (this_rtx + delta) finishes.  */
11358
          emit_move_insn (scratch1, GEN_INT (vcall_offset));
11359
          offset_addr = gen_rtx_PLUS (Pmode, scratch0, scratch1);
11360
        }
11361
      else if (CONST_OK_FOR_ADD (vcall_offset))
11362
        {
11363
          emit_insn (gen_add2_insn (scratch0, GEN_INT (vcall_offset)));
11364
          offset_addr = scratch0;
11365
        }
11366
      else if (scratch0 != scratch1)
11367
        {
11368
          emit_move_insn (scratch1, GEN_INT (vcall_offset));
11369
          emit_insn (gen_add2_insn (scratch0, scratch1));
11370
          offset_addr = scratch0;
11371
        }
11372
      else
11373
        gcc_unreachable (); /* FIXME */
11374
      emit_load_ptr (scratch0, offset_addr);
11375
 
11376
      if (Pmode != ptr_mode)
11377
        scratch0 = gen_rtx_TRUNCATE (ptr_mode, scratch0);
11378
      emit_insn (gen_add2_insn (this_rtx, scratch0));
11379
    }
11380
 
11381
  /* Generate a tail call to the target function.  */
11382
  if (! TREE_USED (function))
11383
    {
11384
      assemble_external (function);
11385
      TREE_USED (function) = 1;
11386
    }
11387
  funexp = XEXP (DECL_RTL (function), 0);
11388
  /* If the function is overridden, so is the thunk, hence we don't
11389
     need GOT addressing even if this is a public symbol.  */
11390
#if 0
11391
  if (TARGET_SH1 && ! flag_weak)
11392
    sibcall = gen_sibcalli_thunk (funexp, const0_rtx);
11393
  else
11394
#endif
11395
  if (TARGET_SH2 && flag_pic)
11396
    {
11397
      sibcall = gen_sibcall_pcrel (funexp, const0_rtx);
11398
      XEXP (XVECEXP (sibcall, 0, 2), 0) = scratch2;
11399
    }
11400
  else
11401
    {
11402
      if (TARGET_SHMEDIA && flag_pic)
11403
        {
11404
          funexp = gen_sym2PIC (funexp);
11405
          PUT_MODE (funexp, Pmode);
11406
        }
11407
      emit_move_insn (scratch2, funexp);
11408
      funexp = gen_rtx_MEM (FUNCTION_MODE, scratch2);
11409
      sibcall = gen_sibcall (funexp, const0_rtx, NULL_RTX);
11410
    }
11411
  sibcall = emit_call_insn (sibcall);
11412
  SIBLING_CALL_P (sibcall) = 1;
11413
  use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall), this_rtx);
11414
  emit_barrier ();
11415
 
11416
  /* Run just enough of rest_of_compilation to do scheduling and get
11417
     the insns emitted.  Note that use_thunk calls
11418
     assemble_start_function and assemble_end_function.  */
11419
 
11420
  insn_locators_alloc ();
11421
  insns = get_insns ();
11422
 
11423
  if (optimize > 0)
11424
    {
11425
      if (! cfun->cfg)
11426
        init_flow (cfun);
11427
      split_all_insns_noflow ();
11428
    }
11429
 
11430
  sh_reorg ();
11431
 
11432
  if (optimize > 0 && flag_delayed_branch)
11433
    dbr_schedule (insns);
11434
 
11435
  shorten_branches (insns);
11436
  final_start_function (insns, file, 1);
11437
  final (insns, file, 1);
11438
  final_end_function ();
11439
 
11440
  reload_completed = 0;
11441
  epilogue_completed = 0;
11442
}
11443
 
11444
rtx
11445
function_symbol (rtx target, const char *name, enum sh_function_kind kind)
11446
{
11447
  rtx sym;
11448
 
11449
  /* If this is not an ordinary function, the name usually comes from a
11450
     string literal or an sprintf buffer.  Make sure we use the same
11451
     string consistently, so that cse will be able to unify address loads.  */
11452
  if (kind != FUNCTION_ORDINARY)
11453
    name = IDENTIFIER_POINTER (get_identifier (name));
11454
  sym = gen_rtx_SYMBOL_REF (Pmode, name);
11455
  SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_FUNCTION;
11456
  if (flag_pic)
11457
    switch (kind)
11458
      {
11459
      case FUNCTION_ORDINARY:
11460
        break;
11461
      case SFUNC_GOT:
11462
        {
11463
          rtx reg = target ? target : gen_reg_rtx (Pmode);
11464
 
11465
          emit_insn (gen_symGOT2reg (reg, sym));
11466
          sym = reg;
11467
          break;
11468
        }
11469
      case SFUNC_STATIC:
11470
        {
11471
          /* ??? To allow cse to work, we use GOTOFF relocations.
11472
             we could add combiner patterns to transform this into
11473
             straight pc-relative calls with sym2PIC / bsrf when
11474
             label load and function call are still 1:1 and in the
11475
             same basic block during combine.  */
11476
          rtx reg = target ? target : gen_reg_rtx (Pmode);
11477
 
11478
          emit_insn (gen_symGOTOFF2reg (reg, sym));
11479
          sym = reg;
11480
          break;
11481
        }
11482
      }
11483
  if (target && sym != target)
11484
    {
11485
      emit_move_insn (target, sym);
11486
      return target;
11487
    }
11488
  return sym;
11489
}
11490
 
11491
/* Find the number of a general purpose register in S.  */
11492
static int
11493
scavenge_reg (HARD_REG_SET *s)
11494
{
11495
  int r;
11496
  for (r = FIRST_GENERAL_REG; r <= LAST_GENERAL_REG; r++)
11497
    if (TEST_HARD_REG_BIT (*s, r))
11498
      return r;
11499
  return -1;
11500
}
11501
 
11502
rtx
11503
sh_get_pr_initial_val (void)
11504
{
11505
  rtx val;
11506
 
11507
  /* ??? Unfortunately, get_hard_reg_initial_val doesn't always work for the
11508
     PR register on SHcompact, because it might be clobbered by the prologue.
11509
     We check first if that is known to be the case.  */
11510
  if (TARGET_SHCOMPACT
11511
      && ((crtl->args.info.call_cookie
11512
           & ~ CALL_COOKIE_RET_TRAMP (1))
11513
          || crtl->saves_all_registers))
11514
    return gen_frame_mem (SImode, return_address_pointer_rtx);
11515
 
11516
  /* If we haven't finished rtl generation, there might be a nonlocal label
11517
     that we haven't seen yet.
11518
     ??? get_hard_reg_initial_val fails if it is called after register
11519
     allocation has started, unless it has been called before for the
11520
     same register.  And even then, we end in trouble if we didn't use
11521
     the register in the same basic block before.  So call
11522
     get_hard_reg_initial_val now and wrap it in an unspec if we might
11523
     need to replace it.  */
11524
  /* ??? We also must do this for TARGET_SH1 in general, because otherwise
11525
     combine can put the pseudo returned by get_hard_reg_initial_val into
11526
     instructions that need a general purpose registers, which will fail to
11527
     be recognized when the pseudo becomes allocated to PR.  */
11528
  val
11529
    = get_hard_reg_initial_val (Pmode, TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
11530
  if (TARGET_SH1)
11531
    return gen_rtx_UNSPEC (SImode, gen_rtvec (1, val), UNSPEC_RA);
11532
  return val;
11533
}
11534
 
11535
int
11536
sh_expand_t_scc (rtx operands[])
11537
{
11538
  enum rtx_code code = GET_CODE (operands[1]);
11539
  rtx target = operands[0];
11540
  rtx op0 = operands[2];
11541
  rtx op1 = operands[3];
11542
  rtx result = target;
11543
  HOST_WIDE_INT val;
11544
 
11545
  if (!REG_P (op0) || REGNO (op0) != T_REG
11546
      || !CONST_INT_P (op1))
11547
    return 0;
11548
  if (!REG_P (result))
11549
    result = gen_reg_rtx (SImode);
11550
  val = INTVAL (op1);
11551
  if ((code == EQ && val == 1) || (code == NE && val == 0))
11552
    emit_insn (gen_movt (result));
11553
  else if (TARGET_SH2A && ((code == EQ && val == 0)
11554
                            || (code == NE && val == 1)))
11555
    emit_insn (gen_xorsi3_movrt (result));
11556
  else if ((code == EQ && val == 0) || (code == NE && val == 1))
11557
    {
11558
      emit_clobber (result);
11559
      emit_insn (gen_subc (result, result, result));
11560
      emit_insn (gen_addsi3 (result, result, const1_rtx));
11561
    }
11562
  else if (code == EQ || code == NE)
11563
    emit_insn (gen_move_insn (result, GEN_INT (code == NE)));
11564
  else
11565
    return 0;
11566
  if (result != target)
11567
    emit_move_insn (target, result);
11568
  return 1;
11569
}
11570
 
11571
/* INSN is an sfunc; return the rtx that describes the address used.  */
11572
static rtx
11573
extract_sfunc_addr (rtx insn)
11574
{
11575
  rtx pattern, part = NULL_RTX;
11576
  int len, i;
11577
 
11578
  pattern = PATTERN (insn);
11579
  len = XVECLEN (pattern, 0);
11580
  for (i = 0; i < len; i++)
11581
    {
11582
      part = XVECEXP (pattern, 0, i);
11583
      if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == Pmode
11584
          && GENERAL_REGISTER_P (true_regnum (XEXP (part, 0))))
11585
        return XEXP (part, 0);
11586
    }
11587
  gcc_assert (GET_CODE (XVECEXP (pattern, 0, 0)) == UNSPEC_VOLATILE);
11588
  return XVECEXP (XVECEXP (pattern, 0, 0), 0, 1);
11589
}
11590
 
11591
/* Verify that the register in use_sfunc_addr still agrees with the address
11592
   used in the sfunc.  This prevents fill_slots_from_thread from changing
11593
   use_sfunc_addr.
11594
   INSN is the use_sfunc_addr instruction, and REG is the register it
11595
   guards.  */
11596
int
11597
check_use_sfunc_addr (rtx insn, rtx reg)
11598
{
11599
  /* Search for the sfunc.  It should really come right after INSN.  */
11600
  while ((insn = NEXT_INSN (insn)))
11601
    {
11602
      if (LABEL_P (insn) || JUMP_P (insn))
11603
        break;
11604
      if (! INSN_P (insn))
11605
        continue;
11606
 
11607
      if (GET_CODE (PATTERN (insn)) == SEQUENCE)
11608
        insn = XVECEXP (PATTERN (insn), 0, 0);
11609
      if (GET_CODE (PATTERN (insn)) != PARALLEL
11610
          || get_attr_type (insn) != TYPE_SFUNC)
11611
        continue;
11612
      return rtx_equal_p (extract_sfunc_addr (insn), reg);
11613
    }
11614
  gcc_unreachable ();
11615
}
11616
 
11617
/* This function returns a constant rtx that represents pi / 2**15 in
11618
   SFmode.  it's used to scale SFmode angles, in radians, to a
11619
   fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
11620
   maps to 0x10000).  */
11621
 
11622
static GTY(()) rtx sh_fsca_sf2int_rtx;
11623
 
11624
rtx
11625
sh_fsca_sf2int (void)
11626
{
11627
  if (! sh_fsca_sf2int_rtx)
11628
    {
11629
      REAL_VALUE_TYPE rv;
11630
 
11631
      real_from_string (&rv, "10430.378350470453");
11632
      sh_fsca_sf2int_rtx = const_double_from_real_value (rv, SFmode);
11633
    }
11634
 
11635
  return sh_fsca_sf2int_rtx;
11636
}
11637
 
11638
/* This function returns a constant rtx that represents pi / 2**15 in
11639
   DFmode.  it's used to scale DFmode angles, in radians, to a
11640
   fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
11641
   maps to 0x10000).  */
11642
 
11643
static GTY(()) rtx sh_fsca_df2int_rtx;
11644
 
11645
rtx
11646
sh_fsca_df2int (void)
11647
{
11648
  if (! sh_fsca_df2int_rtx)
11649
    {
11650
      REAL_VALUE_TYPE rv;
11651
 
11652
      real_from_string (&rv, "10430.378350470453");
11653
      sh_fsca_df2int_rtx = const_double_from_real_value (rv, DFmode);
11654
    }
11655
 
11656
  return sh_fsca_df2int_rtx;
11657
}
11658
 
11659
/* This function returns a constant rtx that represents 2**15 / pi in
11660
   SFmode.  it's used to scale a fixed-point signed 16.16-bit fraction
11661
   of a full circle back to a SFmode value, i.e., 0x10000 maps to
11662
   2*pi).  */
11663
 
11664
static GTY(()) rtx sh_fsca_int2sf_rtx;
11665
 
11666
rtx
11667
sh_fsca_int2sf (void)
11668
{
11669
  if (! sh_fsca_int2sf_rtx)
11670
    {
11671
      REAL_VALUE_TYPE rv;
11672
 
11673
      real_from_string (&rv, "9.587379924285257e-5");
11674
      sh_fsca_int2sf_rtx = const_double_from_real_value (rv, SFmode);
11675
    }
11676
 
11677
  return sh_fsca_int2sf_rtx;
11678
}
11679
 
11680
/* Initialize the CUMULATIVE_ARGS structure.  */
11681
 
11682
void
11683
sh_init_cumulative_args (CUMULATIVE_ARGS *  pcum,
11684
                         tree               fntype,
11685
                         rtx                libname ATTRIBUTE_UNUSED,
11686
                         tree               fndecl,
11687
                         signed int         n_named_args,
11688
                         enum machine_mode  mode)
11689
{
11690
  pcum->arg_count [(int) SH_ARG_FLOAT] = 0;
11691
  pcum->free_single_fp_reg = 0;
11692
  pcum->stack_regs = 0;
11693
  pcum->byref_regs = 0;
11694
  pcum->byref = 0;
11695
  pcum->outgoing = (n_named_args == -1) ? 0 : 1;
11696
 
11697
  /* XXX - Should we check TARGET_HITACHI here ???  */
11698
  pcum->renesas_abi = sh_attr_renesas_p (fntype) ? 1 : 0;
11699
 
11700
  if (fntype)
11701
    {
11702
      pcum->force_mem = ((TARGET_HITACHI || pcum->renesas_abi)
11703
                         && aggregate_value_p (TREE_TYPE (fntype), fndecl));
11704
      pcum->prototype_p = TYPE_ARG_TYPES (fntype) ? TRUE : FALSE;
11705
      pcum->arg_count [(int) SH_ARG_INT]
11706
        = TARGET_SH5 && aggregate_value_p (TREE_TYPE (fntype), fndecl);
11707
 
11708
      pcum->call_cookie
11709
        = CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
11710
                                 && pcum->arg_count [(int) SH_ARG_INT] == 0
11711
                                 && (TYPE_MODE (TREE_TYPE (fntype)) == BLKmode
11712
                                     ? int_size_in_bytes (TREE_TYPE (fntype))
11713
                                     : GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (fntype)))) > 4
11714
                                 && (BASE_RETURN_VALUE_REG (TYPE_MODE (TREE_TYPE (fntype)))
11715
                                     == FIRST_RET_REG));
11716
    }
11717
  else
11718
    {
11719
      pcum->arg_count [(int) SH_ARG_INT] = 0;
11720
      pcum->prototype_p = FALSE;
11721
      if (mode != VOIDmode)
11722
        {
11723
          pcum->call_cookie =
11724
            CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
11725
                                   && GET_MODE_SIZE (mode) > 4
11726
                                   && BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG);
11727
 
11728
          /* If the default ABI is the Renesas ABI then all library
11729
             calls must assume that the library will be using the
11730
             Renesas ABI.  So if the function would return its result
11731
             in memory then we must force the address of this memory
11732
             block onto the stack.  Ideally we would like to call
11733
             targetm.calls.return_in_memory() here but we do not have
11734
             the TYPE or the FNDECL available so we synthesize the
11735
             contents of that function as best we can.  */
11736
          pcum->force_mem =
11737
            (TARGET_DEFAULT & MASK_HITACHI)
11738
            && (mode == BLKmode
11739
                || (GET_MODE_SIZE (mode) > 4
11740
                    && !(mode == DFmode
11741
                         && TARGET_FPU_DOUBLE)));
11742
        }
11743
      else
11744
        {
11745
          pcum->call_cookie = 0;
11746
          pcum->force_mem = FALSE;
11747
        }
11748
    }
11749
}
11750
 
11751
/* Replace any occurrence of FROM(n) in X with TO(n).  The function does
11752
   not enter into CONST_DOUBLE for the replace.
11753
 
11754
   Note that copying is not done so X must not be shared unless all copies
11755
   are to be modified.
11756
 
11757
   This is like replace_rtx, except that we operate on N_REPLACEMENTS
11758
   replacements simultaneously - FROM(n) is replacements[n*2] and to(n) is
11759
   replacements[n*2+1] - and that we take mode changes into account.
11760
 
11761
   If a replacement is ambiguous, return NULL_RTX.
11762
 
11763
   If MODIFY is zero, don't modify any rtl in place,
11764
   just return zero or nonzero for failure / success.  */
11765
 
11766
rtx
11767
replace_n_hard_rtx (rtx x, rtx *replacements, int n_replacements, int modify)
11768
{
11769
  int i, j;
11770
  const char *fmt;
11771
 
11772
  /* The following prevents loops occurrence when we change MEM in
11773
     CONST_DOUBLE onto the same CONST_DOUBLE.  */
11774
  if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
11775
    return x;
11776
 
11777
  for (i = n_replacements - 1; i >= 0 ; i--)
11778
  if (x == replacements[i*2] && GET_MODE (x) == GET_MODE (replacements[i*2+1]))
11779
    return replacements[i*2+1];
11780
 
11781
  /* Allow this function to make replacements in EXPR_LISTs.  */
11782
  if (x == 0)
11783
    return 0;
11784
 
11785
  if (GET_CODE (x) == SUBREG)
11786
    {
11787
      rtx new_rtx = replace_n_hard_rtx (SUBREG_REG (x), replacements,
11788
                                    n_replacements, modify);
11789
 
11790
      if (CONST_INT_P (new_rtx))
11791
        {
11792
          x = simplify_subreg (GET_MODE (x), new_rtx,
11793
                               GET_MODE (SUBREG_REG (x)),
11794
                               SUBREG_BYTE (x));
11795
          if (! x)
11796
            abort ();
11797
        }
11798
      else if (modify)
11799
        SUBREG_REG (x) = new_rtx;
11800
 
11801
      return x;
11802
    }
11803
  else if (REG_P (x))
11804
    {
11805
      unsigned regno = REGNO (x);
11806
      unsigned nregs = (regno < FIRST_PSEUDO_REGISTER
11807
                        ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
11808
      rtx result = NULL_RTX;
11809
 
11810
      for (i = n_replacements - 1; i >= 0; i--)
11811
        {
11812
          rtx from = replacements[i*2];
11813
          rtx to = replacements[i*2+1];
11814
          unsigned from_regno, from_nregs, to_regno, new_regno;
11815
 
11816
          if (!REG_P (from))
11817
            continue;
11818
          from_regno = REGNO (from);
11819
          from_nregs = (from_regno < FIRST_PSEUDO_REGISTER
11820
                        ? HARD_REGNO_NREGS (from_regno, GET_MODE (from)) : 1);
11821
          if (regno < from_regno + from_nregs && regno + nregs > from_regno)
11822
            {
11823
              if (regno < from_regno
11824
                  || regno + nregs > from_regno + nregs
11825
                  || !REG_P (to)
11826
                  || result)
11827
                return NULL_RTX;
11828
              to_regno = REGNO (to);
11829
              if (to_regno < FIRST_PSEUDO_REGISTER)
11830
                {
11831
                  new_regno = regno + to_regno - from_regno;
11832
                  if ((unsigned) HARD_REGNO_NREGS (new_regno, GET_MODE (x))
11833
                      != nregs)
11834
                    return NULL_RTX;
11835
                  result = gen_rtx_REG (GET_MODE (x), new_regno);
11836
                }
11837
              else if (GET_MODE (x) <= GET_MODE (to))
11838
                result = gen_lowpart_common (GET_MODE (x), to);
11839
              else
11840
                result = gen_lowpart_SUBREG (GET_MODE (x), to);
11841
            }
11842
        }
11843
      return result ? result : x;
11844
    }
11845
  else if (GET_CODE (x) == ZERO_EXTEND)
11846
    {
11847
      rtx new_rtx = replace_n_hard_rtx (XEXP (x, 0), replacements,
11848
                                    n_replacements, modify);
11849
 
11850
      if (CONST_INT_P (new_rtx))
11851
        {
11852
          x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
11853
                                        new_rtx, GET_MODE (XEXP (x, 0)));
11854
          if (! x)
11855
            abort ();
11856
        }
11857
      else if (modify)
11858
        XEXP (x, 0) = new_rtx;
11859
 
11860
      return x;
11861
    }
11862
 
11863
  fmt = GET_RTX_FORMAT (GET_CODE (x));
11864
  for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
11865
    {
11866
      rtx new_rtx;
11867
 
11868
      if (fmt[i] == 'e')
11869
        {
11870
          new_rtx = replace_n_hard_rtx (XEXP (x, i), replacements,
11871
                                    n_replacements, modify);
11872
          if (!new_rtx)
11873
            return NULL_RTX;
11874
          if (modify)
11875
            XEXP (x, i) = new_rtx;
11876
        }
11877
      else if (fmt[i] == 'E')
11878
        for (j = XVECLEN (x, i) - 1; j >= 0; j--)
11879
          {
11880
            new_rtx = replace_n_hard_rtx (XVECEXP (x, i, j), replacements,
11881
                                      n_replacements, modify);
11882
          if (!new_rtx)
11883
            return NULL_RTX;
11884
            if (modify)
11885
              XVECEXP (x, i, j) = new_rtx;
11886
          }
11887
    }
11888
 
11889
  return x;
11890
}
11891
 
11892
rtx
11893
sh_gen_truncate (enum machine_mode mode, rtx x, int need_sign_ext)
11894
{
11895
  enum rtx_code code = TRUNCATE;
11896
 
11897
  if (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
11898
    {
11899
      rtx inner = XEXP (x, 0);
11900
      enum machine_mode inner_mode = GET_MODE (inner);
11901
 
11902
      if (inner_mode == mode)
11903
        return inner;
11904
      else if (GET_MODE_SIZE (inner_mode) >= GET_MODE_SIZE (mode))
11905
        x = inner;
11906
      else if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode)
11907
               && (! need_sign_ext || GET_CODE (x) == SIGN_EXTEND))
11908
        {
11909
          code = GET_CODE (x);
11910
          x = inner;
11911
        }
11912
    }
11913
  return gen_rtx_fmt_e (code, mode, x);
11914
}
11915
 
11916
/* called via for_each_rtx after reload, to clean up truncates of
11917
   registers that span multiple actual hard registers.  */
11918
int
11919
shmedia_cleanup_truncate (rtx *p, void *n_changes)
11920
{
11921
  rtx x = *p, reg;
11922
 
11923
  if (GET_CODE (x) != TRUNCATE)
11924
    return 0;
11925
  reg = XEXP (x, 0);
11926
  if (GET_MODE_SIZE (GET_MODE (reg)) > 8 && REG_P (reg))
11927
    {
11928
      enum machine_mode reg_mode = GET_MODE (reg);
11929
      XEXP (x, 0) = simplify_subreg (DImode, reg, reg_mode,
11930
                                     subreg_lowpart_offset (DImode, reg_mode));
11931
      *(int*) n_changes += 1;
11932
      return -1;
11933
    }
11934
  return 0;
11935
}
11936
 
11937
/* Load and store depend on the highpart of the address.  However,
11938
   set_attr_alternative does not give well-defined results before reload,
11939
   so we must look at the rtl ourselves to see if any of the feeding
11940
   registers is used in a memref.  */
11941
 
11942
/* Called by sh_contains_memref_p via for_each_rtx.  */
11943
static int
11944
sh_contains_memref_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
11945
{
11946
  return (MEM_P (*loc));
11947
}
11948
 
11949
/* Return nonzero iff INSN contains a MEM.  */
11950
int
11951
sh_contains_memref_p (rtx insn)
11952
{
11953
  return for_each_rtx (&PATTERN (insn), &sh_contains_memref_p_1, NULL);
11954
}
11955
 
11956
/* Return nonzero iff INSN loads a banked register.  */
11957
int
11958
sh_loads_bankedreg_p (rtx insn)
11959
{
11960
  if (GET_CODE (PATTERN (insn)) == SET)
11961
    {
11962
      rtx op = SET_DEST (PATTERN(insn));
11963
      if (REG_P (op) && BANKED_REGISTER_P (REGNO (op)))
11964
        return 1;
11965
    }
11966
 
11967
  return 0;
11968
}
11969
 
11970
/* FNADDR is the MEM expression from a call expander.  Return an address
11971
   to use in an SHmedia insn pattern.  */
11972
rtx
11973
shmedia_prepare_call_address (rtx fnaddr, int is_sibcall)
11974
{
11975
  int is_sym;
11976
 
11977
  fnaddr = XEXP (fnaddr, 0);
11978
  is_sym = GET_CODE (fnaddr) == SYMBOL_REF;
11979
  if (flag_pic && is_sym)
11980
    {
11981
      if (! SYMBOL_REF_LOCAL_P (fnaddr))
11982
        {
11983
          rtx reg = gen_reg_rtx (Pmode);
11984
 
11985
          /* We must not use GOTPLT for sibcalls, because PIC_REG
11986
             must be restored before the PLT code gets to run.  */
11987
          if (is_sibcall)
11988
            emit_insn (gen_symGOT2reg (reg, fnaddr));
11989
          else
11990
            emit_insn (gen_symGOTPLT2reg (reg, fnaddr));
11991
          fnaddr = reg;
11992
        }
11993
      else
11994
        {
11995
          fnaddr = gen_sym2PIC (fnaddr);
11996
          PUT_MODE (fnaddr, Pmode);
11997
        }
11998
    }
11999
  /* If ptabs might trap, make this visible to the rest of the compiler.
12000
     We generally assume that symbols pertain to valid locations, but
12001
     it is possible to generate invalid symbols with asm or linker tricks.
12002
     In a list of functions where each returns its successor, an invalid
12003
     symbol might denote an empty list.  */
12004
  if (!TARGET_PT_FIXED
12005
      && (!is_sym || TARGET_INVALID_SYMBOLS)
12006
      && (!REG_P (fnaddr) || ! TARGET_REGISTER_P (REGNO (fnaddr))))
12007
    {
12008
      rtx tr = gen_reg_rtx (PDImode);
12009
 
12010
      emit_insn (gen_ptabs (tr, fnaddr));
12011
      fnaddr = tr;
12012
    }
12013
  else if (! target_reg_operand (fnaddr, Pmode))
12014
    fnaddr = copy_to_mode_reg (Pmode, fnaddr);
12015
  return fnaddr;
12016
}
12017
 
12018
enum reg_class
12019
sh_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
12020
                     enum machine_mode mode, secondary_reload_info *sri)
12021
{
12022
  if (in_p)
12023
    {
12024
      if (REGCLASS_HAS_FP_REG (rclass)
12025
          && ! TARGET_SHMEDIA
12026
          && immediate_operand ((x), mode)
12027
          && ! ((fp_zero_operand (x) || fp_one_operand (x))
12028
                && mode == SFmode && fldi_ok ()))
12029
        switch (mode)
12030
          {
12031
          case SFmode:
12032
            sri->icode = CODE_FOR_reload_insf__frn;
12033
            return NO_REGS;
12034
          case DFmode:
12035
            sri->icode = CODE_FOR_reload_indf__frn;
12036
            return NO_REGS;
12037
          case SImode:
12038
            /* ??? If we knew that we are in the appropriate mode -
12039
               single precision - we could use a reload pattern directly.  */
12040
            return FPUL_REGS;
12041
          default:
12042
            abort ();
12043
          }
12044
      if (rclass == FPUL_REGS
12045
          && ((REG_P (x)
12046
               && (REGNO (x) == MACL_REG || REGNO (x) == MACH_REG
12047
                   || REGNO (x) == T_REG))
12048
              || GET_CODE (x) == PLUS))
12049
        return GENERAL_REGS;
12050
      if (rclass == FPUL_REGS && immediate_operand (x, mode))
12051
        {
12052
          if (satisfies_constraint_I08 (x) || fp_zero_operand (x))
12053
            return GENERAL_REGS;
12054
          else if (mode == SFmode)
12055
            return FP_REGS;
12056
          sri->icode = CODE_FOR_reload_insi__i_fpul;
12057
          return NO_REGS;
12058
        }
12059
      if (rclass == FPSCR_REGS
12060
          && ((REG_P (x) && REGNO (x) >= FIRST_PSEUDO_REGISTER)
12061
              || (MEM_P (x) && GET_CODE (XEXP (x, 0)) == PLUS)))
12062
        return GENERAL_REGS;
12063
      if (REGCLASS_HAS_FP_REG (rclass)
12064
          && TARGET_SHMEDIA
12065
          && immediate_operand (x, mode)
12066
          && x != CONST0_RTX (GET_MODE (x))
12067
          && GET_MODE (x) != V4SFmode)
12068
        return GENERAL_REGS;
12069
      if ((mode == QImode || mode == HImode)
12070
          && TARGET_SHMEDIA && inqhi_operand (x, mode))
12071
        {
12072
          sri->icode = ((mode == QImode)
12073
                        ? CODE_FOR_reload_inqi : CODE_FOR_reload_inhi);
12074
          return NO_REGS;
12075
        }
12076
      if (TARGET_SHMEDIA && rclass == GENERAL_REGS
12077
          && (GET_CODE (x) == LABEL_REF || PIC_ADDR_P (x)))
12078
        return TARGET_REGS;
12079
    } /* end of input-only processing.  */
12080
 
12081
  if (((REGCLASS_HAS_FP_REG (rclass)
12082
        && (REG_P (x)
12083
            && (GENERAL_OR_AP_REGISTER_P (REGNO (x))
12084
                || (FP_REGISTER_P (REGNO (x)) && mode == SImode
12085
                    && TARGET_FMOVD))))
12086
       || (REGCLASS_HAS_GENERAL_REG (rclass)
12087
           && REG_P (x)
12088
           && FP_REGISTER_P (REGNO (x))))
12089
      && ! TARGET_SHMEDIA
12090
      && (mode == SFmode || mode == SImode))
12091
    return FPUL_REGS;
12092
  if ((rclass == FPUL_REGS
12093
       || (REGCLASS_HAS_FP_REG (rclass)
12094
           && ! TARGET_SHMEDIA && mode == SImode))
12095
      && (MEM_P (x)
12096
          || (REG_P (x)
12097
              && (REGNO (x) >= FIRST_PSEUDO_REGISTER
12098
                  || REGNO (x) == T_REG
12099
                  || system_reg_operand (x, VOIDmode)))))
12100
    {
12101
      if (rclass == FPUL_REGS)
12102
        return GENERAL_REGS;
12103
      return FPUL_REGS;
12104
    }
12105
  if ((rclass == TARGET_REGS
12106
       || (TARGET_SHMEDIA && rclass == SIBCALL_REGS))
12107
      && !satisfies_constraint_Csy (x)
12108
      && (!REG_P (x) || ! GENERAL_REGISTER_P (REGNO (x))))
12109
    return GENERAL_REGS;
12110
  if ((rclass == MAC_REGS || rclass == PR_REGS)
12111
      && REG_P (x) && ! GENERAL_REGISTER_P (REGNO (x))
12112
      && rclass != REGNO_REG_CLASS (REGNO (x)))
12113
    return GENERAL_REGS;
12114
  if (rclass != GENERAL_REGS && REG_P (x)
12115
      && TARGET_REGISTER_P (REGNO (x)))
12116
    return GENERAL_REGS;
12117
  return NO_REGS;
12118
}
12119
 
12120
enum sh_divide_strategy_e sh_div_strategy = SH_DIV_STRATEGY_DEFAULT;
12121
 
12122
#include "gt-sh.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.