OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gcc-4.2.2/] [gcc/] [config/] [sh/] [sh.c] - Blame information for rev 816

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* Output routines for GCC for Renesas / SuperH SH.
2
   Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
3
   2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4
   Contributed by Steve Chamberlain (sac@cygnus.com).
5
   Improved by Jim Wilson (wilson@cygnus.com).
6
 
7
This file is part of GCC.
8
 
9
GCC is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 3, or (at your option)
12
any later version.
13
 
14
GCC is distributed in the hope that it will be useful,
15
but WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17
GNU General Public License for more details.
18
 
19
You should have received a copy of the GNU General Public License
20
along with GCC; see the file COPYING3.  If not see
21
<http://www.gnu.org/licenses/>.  */
22
 
23
#include "config.h"
24
#include "system.h"
25
#include "coretypes.h"
26
#include "tm.h"
27
#include "insn-config.h"
28
#include "rtl.h"
29
#include "tree.h"
30
#include "flags.h"
31
#include "expr.h"
32
#include "optabs.h"
33
#include "function.h"
34
#include "regs.h"
35
#include "hard-reg-set.h"
36
#include "output.h"
37
#include "insn-attr.h"
38
#include "toplev.h"
39
#include "recog.h"
40
#include "c-pragma.h"
41
#include "integrate.h"
42
#include "dwarf2.h"
43
#include "tm_p.h"
44
#include "target.h"
45
#include "target-def.h"
46
#include "real.h"
47
#include "langhooks.h"
48
#include "basic-block.h"
49
#include "cfglayout.h"
50
#include "intl.h"
51
#include "sched-int.h"
52
#include "ggc.h"
53
#include "tree-gimple.h"
54
#include "cfgloop.h"
55
#include "alloc-pool.h"
56
 
57
 
58
int code_for_indirect_jump_scratch = CODE_FOR_indirect_jump_scratch;
59
 
60
#define MSW (TARGET_LITTLE_ENDIAN ? 1 : 0)
61
#define LSW (TARGET_LITTLE_ENDIAN ? 0 : 1)
62
 
63
/* These are some macros to abstract register modes.  */
64
#define CONST_OK_FOR_ADD(size) \
65
  (TARGET_SHMEDIA ? CONST_OK_FOR_I10 (size) : CONST_OK_FOR_I08 (size))
66
#define GEN_MOV (*(TARGET_SHMEDIA64 ? gen_movdi : gen_movsi))
67
#define GEN_ADD3 (*(TARGET_SHMEDIA64 ? gen_adddi3 : gen_addsi3))
68
#define GEN_SUB3 (*(TARGET_SHMEDIA64 ? gen_subdi3 : gen_subsi3))
69
 
70
/* Set to 1 by expand_prologue() when the function is an interrupt handler.  */
71
int current_function_interrupt;
72
 
73
tree sh_deferred_function_attributes;
74
tree *sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
75
 
76
/* Global variables for machine-dependent things.  */
77
 
78
/* Which cpu are we scheduling for.  */
79
enum processor_type sh_cpu;
80
 
81
/* Definitions used in ready queue reordering for first scheduling pass.  */
82
 
83
/* Reg weights arrays for modes SFmode and SImode, indexed by insn LUID.  */
84
static short *regmode_weight[2];
85
 
86
/* Total SFmode and SImode weights of scheduled insns.  */
87
static int curr_regmode_pressure[2];
88
 
89
/* If true, skip cycles for Q -> R movement.  */
90
static int skip_cycles = 0;
91
 
92
/* Cached value of can_issue_more. This is cached in sh_variable_issue hook
93
   and returned from sh_reorder2.  */
94
static short cached_can_issue_more;
95
 
96
/* Saved operands from the last compare to use when we generate an scc
97
   or bcc insn.  */
98
 
99
rtx sh_compare_op0;
100
rtx sh_compare_op1;
101
 
102
/* Provides the class number of the smallest class containing
103
   reg number.  */
104
 
105
enum reg_class regno_reg_class[FIRST_PSEUDO_REGISTER] =
106
{
107
  R0_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
108
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
109
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
110
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
111
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
112
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
113
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
114
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
115
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
116
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
117
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
118
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
119
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
120
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
121
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
122
  GENERAL_REGS, GENERAL_REGS, GENERAL_REGS, GENERAL_REGS,
123
  FP0_REGS,FP_REGS, FP_REGS, FP_REGS,
124
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
125
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
126
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
127
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
128
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
129
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
130
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
131
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
132
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
133
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
134
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
135
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
136
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
137
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
138
  FP_REGS, FP_REGS, FP_REGS, FP_REGS,
139
  TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
140
  TARGET_REGS, TARGET_REGS, TARGET_REGS, TARGET_REGS,
141
  DF_REGS, DF_REGS, DF_REGS, DF_REGS,
142
  DF_REGS, DF_REGS, DF_REGS, DF_REGS,
143
  NO_REGS, GENERAL_REGS, PR_REGS, T_REGS,
144
  MAC_REGS, MAC_REGS, FPUL_REGS, FPSCR_REGS,
145
  GENERAL_REGS, GENERAL_REGS,
146
};
147
 
148
char sh_register_names[FIRST_PSEUDO_REGISTER] \
149
  [MAX_REGISTER_NAME_LENGTH + 1] = SH_REGISTER_NAMES_INITIALIZER;
150
 
151
char sh_additional_register_names[ADDREGNAMES_SIZE] \
152
  [MAX_ADDITIONAL_REGISTER_NAME_LENGTH + 1]
153
  = SH_ADDITIONAL_REGISTER_NAMES_INITIALIZER;
154
 
155
/* Provide reg_class from a letter such as appears in the machine
156
   description.  *: target independently reserved letter.
157
   reg_class_from_letter['e' - 'a'] is set to NO_REGS for TARGET_FMOVD.  */
158
 
159
enum reg_class reg_class_from_letter[] =
160
{
161
  /* a */ ALL_REGS,  /* b */ TARGET_REGS, /* c */ FPSCR_REGS, /* d */ DF_REGS,
162
  /* e */ FP_REGS,   /* f */ FP_REGS,  /* g **/ NO_REGS,     /* h */ NO_REGS,
163
  /* i **/ NO_REGS,  /* j */ NO_REGS,  /* k */ SIBCALL_REGS, /* l */ PR_REGS,
164
  /* m **/ NO_REGS,  /* n **/ NO_REGS, /* o **/ NO_REGS,     /* p **/ NO_REGS,
165
  /* q */ NO_REGS,   /* r **/ NO_REGS, /* s **/ NO_REGS,     /* t */ T_REGS,
166
  /* u */ NO_REGS,   /* v */ NO_REGS,  /* w */ FP0_REGS,     /* x */ MAC_REGS,
167
  /* y */ FPUL_REGS, /* z */ R0_REGS
168
};
169
 
170
int assembler_dialect;
171
 
172
static bool shmedia_space_reserved_for_target_registers;
173
 
174
static bool sh_handle_option (size_t, const char *, int);
175
static void split_branches (rtx);
176
static int branch_dest (rtx);
177
static void force_into (rtx, rtx);
178
static void print_slot (rtx);
179
static rtx add_constant (rtx, enum machine_mode, rtx);
180
static void dump_table (rtx, rtx);
181
static int hi_const (rtx);
182
static int broken_move (rtx);
183
static int mova_p (rtx);
184
static rtx find_barrier (int, rtx, rtx);
185
static int noncall_uses_reg (rtx, rtx, rtx *);
186
static rtx gen_block_redirect (rtx, int, int);
187
static void sh_reorg (void);
188
static void output_stack_adjust (int, rtx, int, HARD_REG_SET *);
189
static rtx frame_insn (rtx);
190
static rtx push (int);
191
static void pop (int);
192
static void push_regs (HARD_REG_SET *, int);
193
static int calc_live_regs (HARD_REG_SET *);
194
static void mark_use (rtx, rtx *);
195
static HOST_WIDE_INT rounded_frame_size (int);
196
static rtx mark_constant_pool_use (rtx);
197
const struct attribute_spec sh_attribute_table[];
198
static tree sh_handle_interrupt_handler_attribute (tree *, tree, tree, int, bool *);
199
static tree sh_handle_sp_switch_attribute (tree *, tree, tree, int, bool *);
200
static tree sh_handle_trap_exit_attribute (tree *, tree, tree, int, bool *);
201
static tree sh_handle_renesas_attribute (tree *, tree, tree, int, bool *);
202
static void sh_output_function_epilogue (FILE *, HOST_WIDE_INT);
203
static void sh_insert_attributes (tree, tree *);
204
static const char *sh_check_pch_target_flags (int);
205
static int sh_adjust_cost (rtx, rtx, rtx, int);
206
static int sh_issue_rate (void);
207
static int sh_dfa_new_cycle (FILE *, int, rtx, int, int, int *sort_p);
208
static short find_set_regmode_weight (rtx, enum machine_mode);
209
static short find_insn_regmode_weight (rtx, enum machine_mode);
210
static void find_regmode_weight (basic_block, enum machine_mode);
211
static void  sh_md_init_global (FILE *, int, int);
212
static void  sh_md_finish_global (FILE *, int);
213
static int rank_for_reorder (const void *, const void *);
214
static void swap_reorder (rtx *, int);
215
static void ready_reorder (rtx *, int);
216
static short high_pressure (enum machine_mode);
217
static int sh_reorder (FILE *, int, rtx *, int *, int);
218
static int sh_reorder2 (FILE *, int, rtx *, int *, int);
219
static void sh_md_init (FILE *, int, int);
220
static int sh_variable_issue (FILE *, int, rtx, int);
221
 
222
static bool sh_function_ok_for_sibcall (tree, tree);
223
 
224
static bool sh_cannot_modify_jumps_p (void);
225
static int sh_target_reg_class (void);
226
static bool sh_optimize_target_register_callee_saved (bool);
227
static bool sh_ms_bitfield_layout_p (tree);
228
 
229
static void sh_init_builtins (void);
230
static void sh_media_init_builtins (void);
231
static rtx sh_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
232
static void sh_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree);
233
static void sh_file_start (void);
234
static int flow_dependent_p (rtx, rtx);
235
static void flow_dependent_p_1 (rtx, rtx, void *);
236
static int shiftcosts (rtx);
237
static int andcosts (rtx);
238
static int addsubcosts (rtx);
239
static int multcosts (rtx);
240
static bool unspec_caller_rtx_p (rtx);
241
static bool sh_cannot_copy_insn_p (rtx);
242
static bool sh_rtx_costs (rtx, int, int, int *);
243
static int sh_address_cost (rtx);
244
#ifdef TARGET_ADJUST_UNROLL_MAX
245
static int sh_adjust_unroll_max (struct loop *, int, int, int, int);
246
#endif
247
static int sh_pr_n_sets (void);
248
static rtx sh_allocate_initial_value (rtx);
249
static int shmedia_target_regs_stack_space (HARD_REG_SET *);
250
static int shmedia_reserve_space_for_target_registers_p (int, HARD_REG_SET *);
251
static int shmedia_target_regs_stack_adjust (HARD_REG_SET *);
252
static int scavenge_reg (HARD_REG_SET *s);
253
struct save_schedule_s;
254
static struct save_entry_s *sh5_schedule_saves (HARD_REG_SET *,
255
                                                struct save_schedule_s *, int);
256
 
257
static rtx sh_struct_value_rtx (tree, int);
258
static bool sh_return_in_memory (tree, tree);
259
static rtx sh_builtin_saveregs (void);
260
static void sh_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree, int *, int);
261
static bool sh_strict_argument_naming (CUMULATIVE_ARGS *);
262
static bool sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *);
263
static tree sh_build_builtin_va_list (void);
264
static tree sh_gimplify_va_arg_expr (tree, tree, tree *, tree *);
265
static bool sh_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
266
                                  tree, bool);
267
static bool sh_callee_copies (CUMULATIVE_ARGS *, enum machine_mode,
268
                              tree, bool);
269
static int sh_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
270
                                 tree, bool);
271
static int sh_dwarf_calling_convention (tree);
272
static int hard_regs_intersect_p (HARD_REG_SET *, HARD_REG_SET *);
273
 
274
 
275
/* Initialize the GCC target structure.  */
276
#undef TARGET_ATTRIBUTE_TABLE
277
#define TARGET_ATTRIBUTE_TABLE sh_attribute_table
278
 
279
/* The next two are used for debug info when compiling with -gdwarf.  */
280
#undef TARGET_ASM_UNALIGNED_HI_OP
281
#define TARGET_ASM_UNALIGNED_HI_OP "\t.uaword\t"
282
#undef TARGET_ASM_UNALIGNED_SI_OP
283
#define TARGET_ASM_UNALIGNED_SI_OP "\t.ualong\t"
284
 
285
/* These are NULLed out on non-SH5 in OVERRIDE_OPTIONS.  */
286
#undef TARGET_ASM_UNALIGNED_DI_OP
287
#define TARGET_ASM_UNALIGNED_DI_OP "\t.uaquad\t"
288
#undef TARGET_ASM_ALIGNED_DI_OP
289
#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
290
 
291
#undef TARGET_ASM_FUNCTION_EPILOGUE
292
#define TARGET_ASM_FUNCTION_EPILOGUE sh_output_function_epilogue
293
 
294
#undef TARGET_ASM_OUTPUT_MI_THUNK
295
#define TARGET_ASM_OUTPUT_MI_THUNK sh_output_mi_thunk
296
 
297
#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
298
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
299
 
300
#undef TARGET_ASM_FILE_START
301
#define TARGET_ASM_FILE_START sh_file_start
302
#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
303
#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
304
 
305
#undef TARGET_DEFAULT_TARGET_FLAGS
306
#define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
307
#undef TARGET_HANDLE_OPTION
308
#define TARGET_HANDLE_OPTION sh_handle_option
309
 
310
#undef TARGET_INSERT_ATTRIBUTES
311
#define TARGET_INSERT_ATTRIBUTES sh_insert_attributes
312
 
313
#undef TARGET_SCHED_ADJUST_COST
314
#define TARGET_SCHED_ADJUST_COST sh_adjust_cost
315
 
316
#undef TARGET_SCHED_ISSUE_RATE
317
#define TARGET_SCHED_ISSUE_RATE sh_issue_rate
318
 
319
/* The next 5 hooks have been implemented for reenabling sched1.  With the
320
   help of these macros we are limiting the movement of insns in sched1 to
321
   reduce the register pressure.  The overall idea is to keep count of SImode
322
   and SFmode regs required by already scheduled insns. When these counts
323
   cross some threshold values; give priority to insns that free registers.
324
   The insn that frees registers is most likely to be the insn with lowest
325
   LUID (original insn order); but such an insn might be there in the stalled
326
   queue (Q) instead of the ready queue (R).  To solve this, we skip cycles
327
   upto a max of 8 cycles so that such insns may move from Q -> R.
328
 
329
   The description of the hooks are as below:
330
 
331
   TARGET_SCHED_INIT_GLOBAL: Added a new target hook in the generic
332
   scheduler; it is called inside the sched_init function just after
333
   find_insn_reg_weights function call. It is used to calculate the SImode
334
   and SFmode weights of insns of basic blocks; much similar to what
335
   find_insn_reg_weights does.
336
   TARGET_SCHED_FINISH_GLOBAL: Corresponding cleanup hook.
337
 
338
   TARGET_SCHED_DFA_NEW_CYCLE: Skip cycles if high register pressure is
339
   indicated by TARGET_SCHED_REORDER2; doing this may move insns from
340
   (Q)->(R).
341
 
342
   TARGET_SCHED_REORDER: If the register pressure for SImode or SFmode is
343
   high; reorder the ready queue so that the insn with lowest LUID will be
344
   issued next.
345
 
346
   TARGET_SCHED_REORDER2: If the register pressure is high, indicate to
347
   TARGET_SCHED_DFA_NEW_CYCLE to skip cycles.
348
 
349
   TARGET_SCHED_VARIABLE_ISSUE: Cache the value of can_issue_more so that it
350
   can be returned from TARGET_SCHED_REORDER2.
351
 
352
   TARGET_SCHED_INIT: Reset the register pressure counting variables.  */
353
 
354
#undef TARGET_SCHED_DFA_NEW_CYCLE
355
#define TARGET_SCHED_DFA_NEW_CYCLE sh_dfa_new_cycle
356
 
357
#undef TARGET_SCHED_INIT_GLOBAL
358
#define TARGET_SCHED_INIT_GLOBAL sh_md_init_global
359
 
360
#undef TARGET_SCHED_FINISH_GLOBAL
361
#define TARGET_SCHED_FINISH_GLOBAL sh_md_finish_global
362
 
363
#undef TARGET_SCHED_VARIABLE_ISSUE
364
#define TARGET_SCHED_VARIABLE_ISSUE sh_variable_issue
365
 
366
#undef TARGET_SCHED_REORDER
367
#define TARGET_SCHED_REORDER sh_reorder
368
 
369
#undef TARGET_SCHED_REORDER2
370
#define TARGET_SCHED_REORDER2 sh_reorder2
371
 
372
#undef TARGET_SCHED_INIT
373
#define TARGET_SCHED_INIT sh_md_init
374
 
375
#undef TARGET_CANNOT_MODIFY_JUMPS_P
376
#define TARGET_CANNOT_MODIFY_JUMPS_P sh_cannot_modify_jumps_p
377
#undef TARGET_BRANCH_TARGET_REGISTER_CLASS
378
#define TARGET_BRANCH_TARGET_REGISTER_CLASS sh_target_reg_class
379
#undef TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED
380
#define TARGET_BRANCH_TARGET_REGISTER_CALLEE_SAVED \
381
 sh_optimize_target_register_callee_saved
382
 
383
#undef TARGET_MS_BITFIELD_LAYOUT_P
384
#define TARGET_MS_BITFIELD_LAYOUT_P sh_ms_bitfield_layout_p
385
 
386
#undef TARGET_INIT_BUILTINS
387
#define TARGET_INIT_BUILTINS sh_init_builtins
388
#undef TARGET_EXPAND_BUILTIN
389
#define TARGET_EXPAND_BUILTIN sh_expand_builtin
390
 
391
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
392
#define TARGET_FUNCTION_OK_FOR_SIBCALL sh_function_ok_for_sibcall
393
 
394
#undef TARGET_CANNOT_COPY_INSN_P
395
#define TARGET_CANNOT_COPY_INSN_P sh_cannot_copy_insn_p
396
#undef TARGET_RTX_COSTS
397
#define TARGET_RTX_COSTS sh_rtx_costs
398
#undef TARGET_ADDRESS_COST
399
#define TARGET_ADDRESS_COST sh_address_cost
400
#undef TARGET_ALLOCATE_INITIAL_VALUE
401
#define TARGET_ALLOCATE_INITIAL_VALUE sh_allocate_initial_value
402
 
403
#undef TARGET_MACHINE_DEPENDENT_REORG
404
#define TARGET_MACHINE_DEPENDENT_REORG sh_reorg
405
 
406
#ifdef HAVE_AS_TLS
407
#undef TARGET_HAVE_TLS
408
#define TARGET_HAVE_TLS true
409
#endif
410
 
411
#undef TARGET_PROMOTE_PROTOTYPES
412
#define TARGET_PROMOTE_PROTOTYPES sh_promote_prototypes
413
#undef TARGET_PROMOTE_FUNCTION_ARGS
414
#define TARGET_PROMOTE_FUNCTION_ARGS sh_promote_prototypes
415
#undef TARGET_PROMOTE_FUNCTION_RETURN
416
#define TARGET_PROMOTE_FUNCTION_RETURN sh_promote_prototypes
417
 
418
#undef TARGET_STRUCT_VALUE_RTX
419
#define TARGET_STRUCT_VALUE_RTX sh_struct_value_rtx
420
#undef TARGET_RETURN_IN_MEMORY
421
#define TARGET_RETURN_IN_MEMORY sh_return_in_memory
422
 
423
#undef TARGET_EXPAND_BUILTIN_SAVEREGS
424
#define TARGET_EXPAND_BUILTIN_SAVEREGS sh_builtin_saveregs
425
#undef TARGET_SETUP_INCOMING_VARARGS
426
#define TARGET_SETUP_INCOMING_VARARGS sh_setup_incoming_varargs
427
#undef TARGET_STRICT_ARGUMENT_NAMING
428
#define TARGET_STRICT_ARGUMENT_NAMING sh_strict_argument_naming
429
#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
430
#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED sh_pretend_outgoing_varargs_named
431
#undef TARGET_MUST_PASS_IN_STACK
432
#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
433
#undef TARGET_PASS_BY_REFERENCE
434
#define TARGET_PASS_BY_REFERENCE sh_pass_by_reference
435
#undef TARGET_CALLEE_COPIES
436
#define TARGET_CALLEE_COPIES sh_callee_copies
437
#undef TARGET_ARG_PARTIAL_BYTES
438
#define TARGET_ARG_PARTIAL_BYTES sh_arg_partial_bytes
439
 
440
#undef TARGET_BUILD_BUILTIN_VA_LIST
441
#define TARGET_BUILD_BUILTIN_VA_LIST sh_build_builtin_va_list
442
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
443
#define TARGET_GIMPLIFY_VA_ARG_EXPR sh_gimplify_va_arg_expr
444
 
445
#undef TARGET_VECTOR_MODE_SUPPORTED_P
446
#define TARGET_VECTOR_MODE_SUPPORTED_P sh_vector_mode_supported_p
447
 
448
#undef TARGET_CHECK_PCH_TARGET_FLAGS
449
#define TARGET_CHECK_PCH_TARGET_FLAGS sh_check_pch_target_flags
450
 
451
#undef TARGET_DWARF_CALLING_CONVENTION
452
#define TARGET_DWARF_CALLING_CONVENTION sh_dwarf_calling_convention
453
 
454
/* Return regmode weight for insn.  */
455
#define INSN_REGMODE_WEIGHT(INSN, MODE)  regmode_weight[((MODE) == SImode) ? 0 : 1][INSN_UID (INSN)]
456
 
457
/* Return current register pressure for regmode.  */
458
#define CURR_REGMODE_PRESSURE(MODE)     curr_regmode_pressure[((MODE) == SImode) ? 0 : 1]
459
 
460
#ifdef SYMBIAN
461
 
462
#undef  TARGET_ENCODE_SECTION_INFO
463
#define TARGET_ENCODE_SECTION_INFO      sh_symbian_encode_section_info
464
#undef  TARGET_STRIP_NAME_ENCODING
465
#define TARGET_STRIP_NAME_ENCODING      sh_symbian_strip_name_encoding
466
#undef  TARGET_CXX_IMPORT_EXPORT_CLASS
467
#define TARGET_CXX_IMPORT_EXPORT_CLASS  symbian_import_export_class
468
 
469
#endif /* SYMBIAN */
470
 
471
#ifdef TARGET_ADJUST_UNROLL_MAX
472
#undef TARGET_ADJUST_UNROLL_MAX
473
#define TARGET_ADJUST_UNROLL_MAX sh_adjust_unroll_max
474
#endif
475
 
476
#undef TARGET_SECONDARY_RELOAD
477
#define TARGET_SECONDARY_RELOAD sh_secondary_reload
478
 
479
struct gcc_target targetm = TARGET_INITIALIZER;
480
 
481
/* Implement TARGET_HANDLE_OPTION.  */
482
 
483
static bool
484
sh_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED,
485
                  int value ATTRIBUTE_UNUSED)
486
{
487
  switch (code)
488
    {
489
    case OPT_m1:
490
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH1;
491
      return true;
492
 
493
    case OPT_m2:
494
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2;
495
      return true;
496
 
497
    case OPT_m2a:
498
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A;
499
      return true;
500
 
501
    case OPT_m2a_nofpu:
502
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_NOFPU;
503
      return true;
504
 
505
    case OPT_m2a_single:
506
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE;
507
      return true;
508
 
509
    case OPT_m2a_single_only:
510
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2A_SINGLE_ONLY;
511
      return true;
512
 
513
    case OPT_m2e:
514
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH2E;
515
      return true;
516
 
517
    case OPT_m3:
518
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3;
519
      return true;
520
 
521
    case OPT_m3e:
522
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH3E;
523
      return true;
524
 
525
    case OPT_m4:
526
    case OPT_m4_100:
527
    case OPT_m4_200:
528
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4;
529
      return true;
530
 
531
    case OPT_m4_nofpu:
532
    case OPT_m4_400:
533
    case OPT_m4_500:
534
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_NOFPU;
535
      return true;
536
 
537
    case OPT_m4_single:
538
    case OPT_m4_100_single:
539
    case OPT_m4_200_single:
540
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE;
541
      return true;
542
 
543
    case OPT_m4_single_only:
544
    case OPT_m4_100_single_only:
545
    case OPT_m4_200_single_only:
546
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4_SINGLE_ONLY;
547
      return true;
548
 
549
    case OPT_m4a:
550
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A;
551
      return true;
552
 
553
    case OPT_m4a_nofpu:
554
    case OPT_m4al:
555
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_NOFPU;
556
      return true;
557
 
558
    case OPT_m4a_single:
559
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE;
560
      return true;
561
 
562
    case OPT_m4a_single_only:
563
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH4A_SINGLE_ONLY;
564
      return true;
565
 
566
    case OPT_m5_32media:
567
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA;
568
      return true;
569
 
570
    case OPT_m5_32media_nofpu:
571
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_32MEDIA_NOFPU;
572
      return true;
573
 
574
    case OPT_m5_64media:
575
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA;
576
      return true;
577
 
578
    case OPT_m5_64media_nofpu:
579
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_64MEDIA_NOFPU;
580
      return true;
581
 
582
    case OPT_m5_compact:
583
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT;
584
      return true;
585
 
586
    case OPT_m5_compact_nofpu:
587
      target_flags = (target_flags & ~MASK_ARCH) | SELECT_SH5_COMPACT_NOFPU;
588
      return true;
589
 
590
    default:
591
      return true;
592
    }
593
}
594
 
595
/* Print the operand address in x to the stream.  */
596
 
597
void
598
print_operand_address (FILE *stream, rtx x)
599
{
600
  switch (GET_CODE (x))
601
    {
602
    case REG:
603
    case SUBREG:
604
      fprintf (stream, "@%s", reg_names[true_regnum (x)]);
605
      break;
606
 
607
    case PLUS:
608
      {
609
        rtx base = XEXP (x, 0);
610
        rtx index = XEXP (x, 1);
611
 
612
        switch (GET_CODE (index))
613
          {
614
          case CONST_INT:
615
            fprintf (stream, "@(%d,%s)", (int) INTVAL (index),
616
                     reg_names[true_regnum (base)]);
617
            break;
618
 
619
          case REG:
620
          case SUBREG:
621
            {
622
              int base_num = true_regnum (base);
623
              int index_num = true_regnum (index);
624
 
625
              fprintf (stream, "@(r0,%s)",
626
                       reg_names[MAX (base_num, index_num)]);
627
              break;
628
            }
629
 
630
          default:
631
            gcc_unreachable ();
632
          }
633
      }
634
      break;
635
 
636
    case PRE_DEC:
637
      fprintf (stream, "@-%s", reg_names[true_regnum (XEXP (x, 0))]);
638
      break;
639
 
640
    case POST_INC:
641
      fprintf (stream, "@%s+", reg_names[true_regnum (XEXP (x, 0))]);
642
      break;
643
 
644
    default:
645
      x = mark_constant_pool_use (x);
646
      output_addr_const (stream, x);
647
      break;
648
    }
649
}
650
 
651
/* Print operand x (an rtx) in assembler syntax to file stream
652
   according to modifier code.
653
 
654
   '.'  print a .s if insn needs delay slot
655
   ','  print LOCAL_LABEL_PREFIX
656
   '@'  print trap, rte or rts depending upon pragma interruptness
657
   '#'  output a nop if there is nothing to put in the delay slot
658
   '''  print likelihood suffix (/u for unlikely).
659
   '>'  print branch target if -fverbose-asm
660
   'O'  print a constant without the #
661
   'R'  print the LSW of a dp value - changes if in little endian
662
   'S'  print the MSW of a dp value - changes if in little endian
663
   'T'  print the next word of a dp value - same as 'R' in big endian mode.
664
   'M'  SHMEDIA: print an `x' if `m' will print `base,index'.
665
        otherwise: print .b / .w / .l / .s / .d suffix if operand is a MEM.
666
   'N'  print 'r63' if the operand is (const_int 0).
667
   'd'  print a V2SF reg as dN instead of fpN.
668
   'm'  print a pair `base,offset' or `base,index', for LD and ST.
669
   'U'  Likewise for {LD,ST}{HI,LO}.
670
   'u'  prints the lowest 16 bits of CONST_INT, as an unsigned value.
671
   'o'  output an operator.  */
672
 
673
void
674
print_operand (FILE *stream, rtx x, int code)
675
{
676
  int regno;
677
  enum machine_mode mode;
678
 
679
  switch (code)
680
    {
681
      tree trapa_attr;
682
 
683
    case '.':
684
      if (final_sequence
685
          && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
686
          && get_attr_length (XVECEXP (final_sequence, 0, 1)))
687
        fprintf (stream, ASSEMBLER_DIALECT ? "/s" : ".s");
688
      break;
689
    case ',':
690
      fprintf (stream, "%s", LOCAL_LABEL_PREFIX);
691
      break;
692
    case '@':
693
      trapa_attr = lookup_attribute ("trap_exit",
694
                                      DECL_ATTRIBUTES (current_function_decl));
695
      if (trapa_attr)
696
        fprintf (stream, "trapa #%ld",
697
                 (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr))));
698
      else if (sh_cfun_interrupt_handler_p ())
699
        fprintf (stream, "rte");
700
      else
701
        fprintf (stream, "rts");
702
      break;
703
    case '#':
704
      /* Output a nop if there's nothing in the delay slot.  */
705
      if (dbr_sequence_length () == 0)
706
        fprintf (stream, "\n\tnop");
707
      break;
708
    case '\'':
709
      {
710
        rtx note = find_reg_note (current_output_insn, REG_BR_PROB, 0);
711
 
712
        if (note && INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
713
          fputs ("/u", stream);
714
        break;
715
      }
716
    case '>':
717
      if (flag_verbose_asm && JUMP_LABEL (current_output_insn))
718
        {
719
          fputs ("\t! target: ", stream);
720
          output_addr_const (stream, JUMP_LABEL (current_output_insn));
721
        }
722
      break;
723
    case 'O':
724
      x = mark_constant_pool_use (x);
725
      output_addr_const (stream, x);
726
      break;
727
    /* N.B.: %R / %S / %T adjust memory addresses by four.
728
       For SHMEDIA, that means they can be used to access the first and
729
       second 32 bit part of a 64 bit (or larger) value that
730
       might be held in floating point registers or memory.
731
       While they can be used to access 64 bit parts of a larger value
732
       held in general purpose registers, that won't work with memory -
733
       neither for fp registers, since the frxx names are used.  */
734
    case 'R':
735
      if (REG_P (x) || GET_CODE (x) == SUBREG)
736
        {
737
          regno = true_regnum (x);
738
          regno += FP_REGISTER_P (regno) ? 1 : LSW;
739
          fputs (reg_names[regno], (stream));
740
        }
741
      else if (MEM_P (x))
742
        {
743
          x = adjust_address (x, SImode, 4 * LSW);
744
          print_operand_address (stream, XEXP (x, 0));
745
        }
746
      else
747
        {
748
          rtx sub = NULL_RTX;
749
 
750
          mode = GET_MODE (x);
751
          if (mode == VOIDmode)
752
            mode = DImode;
753
          if (GET_MODE_SIZE (mode) >= 8)
754
            sub = simplify_subreg (SImode, x, mode, 4 * LSW);
755
          if (sub)
756
            print_operand (stream, sub, 0);
757
          else
758
            output_operand_lossage ("invalid operand to %%R");
759
        }
760
      break;
761
    case 'S':
762
      if (REG_P (x) || GET_CODE (x) == SUBREG)
763
        {
764
          regno = true_regnum (x);
765
          regno += FP_REGISTER_P (regno) ? 0 : MSW;
766
          fputs (reg_names[regno], (stream));
767
        }
768
      else if (MEM_P (x))
769
        {
770
          x = adjust_address (x, SImode, 4 * MSW);
771
          print_operand_address (stream, XEXP (x, 0));
772
        }
773
      else
774
        {
775
          rtx sub = NULL_RTX;
776
 
777
          mode = GET_MODE (x);
778
          if (mode == VOIDmode)
779
            mode = DImode;
780
          if (GET_MODE_SIZE (mode) >= 8)
781
            sub = simplify_subreg (SImode, x, mode, 4 * MSW);
782
          if (sub)
783
            print_operand (stream, sub, 0);
784
          else
785
            output_operand_lossage ("invalid operand to %%S");
786
        }
787
      break;
788
    case 'T':
789
      /* Next word of a double.  */
790
      switch (GET_CODE (x))
791
        {
792
        case REG:
793
          fputs (reg_names[REGNO (x) + 1], (stream));
794
          break;
795
        case MEM:
796
          if (GET_CODE (XEXP (x, 0)) != PRE_DEC
797
              && GET_CODE (XEXP (x, 0)) != POST_INC)
798
            x = adjust_address (x, SImode, 4);
799
          print_operand_address (stream, XEXP (x, 0));
800
          break;
801
        default:
802
          break;
803
        }
804
      break;
805
    case 'o':
806
      switch (GET_CODE (x))
807
        {
808
        case PLUS:  fputs ("add", stream); break;
809
        case MINUS: fputs ("sub", stream); break;
810
        case MULT:  fputs ("mul", stream); break;
811
        case DIV:   fputs ("div", stream); break;
812
        case EQ:    fputs ("eq",  stream); break;
813
        case NE:    fputs ("ne",  stream); break;
814
        case GT:  case LT:  fputs ("gt",  stream); break;
815
        case GE:  case LE:  fputs ("ge",  stream); break;
816
        case GTU: case LTU: fputs ("gtu", stream); break;
817
        case GEU: case LEU: fputs ("geu", stream); break;
818
        default:
819
          break;
820
        }
821
      break;
822
    case 'M':
823
      if (TARGET_SHMEDIA)
824
        {
825
          if (GET_CODE (x) == MEM
826
              && GET_CODE (XEXP (x, 0)) == PLUS
827
              && (GET_CODE (XEXP (XEXP (x, 0), 1)) == REG
828
                  || GET_CODE (XEXP (XEXP (x, 0), 1)) == SUBREG))
829
            fputc ('x', stream);
830
        }
831
      else
832
        {
833
          if (GET_CODE (x) == MEM)
834
            {
835
              switch (GET_MODE (x))
836
                {
837
                case QImode: fputs (".b", stream); break;
838
                case HImode: fputs (".w", stream); break;
839
                case SImode: fputs (".l", stream); break;
840
                case SFmode: fputs (".s", stream); break;
841
                case DFmode: fputs (".d", stream); break;
842
                default: gcc_unreachable ();
843
                }
844
            }
845
        }
846
      break;
847
 
848
    case 'm':
849
      gcc_assert (GET_CODE (x) == MEM);
850
      x = XEXP (x, 0);
851
      /* Fall through.  */
852
    case 'U':
853
      switch (GET_CODE (x))
854
        {
855
        case REG:
856
        case SUBREG:
857
          print_operand (stream, x, 0);
858
          fputs (", 0", stream);
859
          break;
860
 
861
        case PLUS:
862
          print_operand (stream, XEXP (x, 0), 0);
863
          fputs (", ", stream);
864
          print_operand (stream, XEXP (x, 1), 0);
865
          break;
866
 
867
        default:
868
          gcc_unreachable ();
869
        }
870
      break;
871
 
872
    case 'd':
873
      gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == V2SFmode);
874
 
875
      fprintf ((stream), "d%s", reg_names[REGNO (x)] + 1);
876
      break;
877
 
878
    case 'N':
879
      if (x == CONST0_RTX (GET_MODE (x)))
880
        {
881
          fprintf ((stream), "r63");
882
          break;
883
        }
884
      goto default_output;
885
    case 'u':
886
      if (GET_CODE (x) == CONST_INT)
887
        {
888
          fprintf ((stream), "%u", (unsigned) INTVAL (x) & (0x10000 - 1));
889
          break;
890
        }
891
      /* Fall through.  */
892
 
893
    default_output:
894
    default:
895
      regno = 0;
896
      mode = GET_MODE (x);
897
 
898
      switch (GET_CODE (x))
899
        {
900
        case TRUNCATE:
901
          {
902
            rtx inner = XEXP (x, 0);
903
            int offset = 0;
904
            enum machine_mode inner_mode;
905
 
906
            /* We might see SUBREGs with vector mode registers inside.  */
907
            if (GET_CODE (inner) == SUBREG
908
                && (GET_MODE_SIZE (GET_MODE (inner))
909
                    == GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
910
                && subreg_lowpart_p (inner))
911
              inner = SUBREG_REG (inner);
912
            if (GET_CODE (inner) == CONST_INT)
913
              {
914
                x = GEN_INT (trunc_int_for_mode (INTVAL (inner), GET_MODE (x)));
915
                goto default_output;
916
              }
917
            inner_mode = GET_MODE (inner);
918
            if (GET_CODE (inner) == SUBREG
919
                && (GET_MODE_SIZE (GET_MODE (inner))
920
                    < GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner))))
921
                && GET_CODE (SUBREG_REG (inner)) == REG)
922
              {
923
                offset = subreg_regno_offset (REGNO (SUBREG_REG (inner)),
924
                                              GET_MODE (SUBREG_REG (inner)),
925
                                              SUBREG_BYTE (inner),
926
                                              GET_MODE (inner));
927
                inner = SUBREG_REG (inner);
928
              }
929
            if (GET_CODE (inner) != REG || GET_MODE_SIZE (inner_mode) > 8)
930
              abort ();
931
            /* Floating point register pairs are always big endian;
932
               general purpose registers are 64 bit wide.  */
933
            regno = REGNO (inner);
934
            regno = (HARD_REGNO_NREGS (regno, inner_mode)
935
                     - HARD_REGNO_NREGS (regno, mode))
936
                     + offset;
937
            x = inner;
938
            goto reg;
939
          }
940
        case SIGN_EXTEND:
941
          x = XEXP (x, 0);
942
          goto reg;
943
          /* FIXME: We need this on SHmedia32 because reload generates
944
             some sign-extended HI or QI loads into DImode registers
945
             but, because Pmode is SImode, the address ends up with a
946
             subreg:SI of the DImode register.  Maybe reload should be
947
             fixed so as to apply alter_subreg to such loads?  */
948
        case IF_THEN_ELSE:
949
          gcc_assert (trapping_target_operand (x, VOIDmode));
950
          x = XEXP (XEXP (x, 2), 0);
951
          goto default_output;
952
        case SUBREG:
953
          gcc_assert (SUBREG_BYTE (x) == 0
954
                      && GET_CODE (SUBREG_REG (x)) == REG);
955
 
956
          x = SUBREG_REG (x);
957
          /* Fall through.  */
958
 
959
        reg:
960
        case REG:
961
          regno += REGNO (x);
962
          if (FP_REGISTER_P (regno)
963
              && mode == V16SFmode)
964
            fprintf ((stream), "mtrx%s", reg_names[regno] + 2);
965
          else if (FP_REGISTER_P (REGNO (x))
966
                   && mode == V4SFmode)
967
            fprintf ((stream), "fv%s", reg_names[regno] + 2);
968
          else if (GET_CODE (x) == REG
969
                   && mode == V2SFmode)
970
            fprintf ((stream), "fp%s", reg_names[regno] + 2);
971
          else if (FP_REGISTER_P (REGNO (x))
972
                   && GET_MODE_SIZE (mode) > 4)
973
            fprintf ((stream), "d%s", reg_names[regno] + 1);
974
          else
975
            fputs (reg_names[regno], (stream));
976
          break;
977
 
978
        case MEM:
979
          output_address (XEXP (x, 0));
980
          break;
981
 
982
        case CONST:
983
          if (TARGET_SHMEDIA
984
              && (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
985
                  || GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
986
              && (GET_MODE (XEXP (x, 0)) == DImode
987
                  || GET_MODE (XEXP (x, 0)) == SImode)
988
              && GET_CODE (XEXP (XEXP (x, 0), 0)) == TRUNCATE
989
              && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode)
990
            {
991
              rtx val = XEXP (XEXP (XEXP (x, 0), 0), 0);
992
              rtx val2 = val;
993
              bool nested_expr = false;
994
 
995
              fputc ('(', stream);
996
              if (GET_CODE (val) == ASHIFTRT)
997
                {
998
                  fputc ('(', stream);
999
                  val2 = XEXP (val, 0);
1000
                }
1001
              if (GET_CODE (val2) == CONST
1002
                  || GET_RTX_CLASS (GET_CODE (val2)) != RTX_OBJ)
1003
                {
1004
                  fputc ('(', stream);
1005
                  nested_expr = true;
1006
                }
1007
              output_addr_const (stream, val2);
1008
              if (nested_expr)
1009
                fputc (')', stream);
1010
              if (GET_CODE (val) == ASHIFTRT)
1011
                {
1012
                  fputs (" >> ", stream);
1013
                  output_addr_const (stream, XEXP (val, 1));
1014
                  fputc (')', stream);
1015
                }
1016
              fputs (" & 65535)", stream);
1017
              break;
1018
            }
1019
 
1020
          /* Fall through.  */
1021
        default:
1022
          if (TARGET_SH1)
1023
            fputc ('#', stream);
1024
          output_addr_const (stream, x);
1025
          break;
1026
        }
1027
      break;
1028
    }
1029
}
1030
 
1031
/* Like force_operand, but guarantees that VALUE ends up in TARGET.  */
1032
static void
1033
force_into (rtx value, rtx target)
1034
{
1035
  value = force_operand (value, target);
1036
  if (! rtx_equal_p (value, target))
1037
    emit_insn (gen_move_insn (target, value));
1038
}
1039
 
1040
/* Emit code to perform a block move.  Choose the best method.
1041
 
1042
   OPERANDS[0] is the destination.
1043
   OPERANDS[1] is the source.
1044
   OPERANDS[2] is the size.
1045
   OPERANDS[3] is the alignment safe to use.  */
1046
 
1047
int
1048
expand_block_move (rtx *operands)
1049
{
1050
  int align = INTVAL (operands[3]);
1051
  int constp = (GET_CODE (operands[2]) == CONST_INT);
1052
  int bytes = (constp ? INTVAL (operands[2]) : 0);
1053
 
1054
  if (! constp)
1055
    return 0;
1056
 
1057
  /* If we could use mov.l to move words and dest is word-aligned, we
1058
     can use movua.l for loads and still generate a relatively short
1059
     and efficient sequence.  */
1060
  if (TARGET_SH4A_ARCH && align < 4
1061
      && MEM_ALIGN (operands[0]) >= 32
1062
      && can_move_by_pieces (bytes, 32))
1063
    {
1064
      rtx dest = copy_rtx (operands[0]);
1065
      rtx src = copy_rtx (operands[1]);
1066
      /* We could use different pseudos for each copied word, but
1067
         since movua can only load into r0, it's kind of
1068
         pointless.  */
1069
      rtx temp = gen_reg_rtx (SImode);
1070
      rtx src_addr = copy_addr_to_reg (XEXP (src, 0));
1071
      int copied = 0;
1072
 
1073
      while (copied + 4 <= bytes)
1074
        {
1075
          rtx to = adjust_address (dest, SImode, copied);
1076
          rtx from = adjust_automodify_address (src, SImode, src_addr, copied);
1077
 
1078
          emit_insn (gen_movua (temp, from));
1079
          emit_move_insn (src_addr, plus_constant (src_addr, 4));
1080
          emit_move_insn (to, temp);
1081
          copied += 4;
1082
        }
1083
 
1084
      if (copied < bytes)
1085
        move_by_pieces (adjust_address (dest, BLKmode, copied),
1086
                        adjust_automodify_address (src, BLKmode,
1087
                                                   src_addr, copied),
1088
                        bytes - copied, align, 0);
1089
 
1090
      return 1;
1091
    }
1092
 
1093
  /* If it isn't a constant number of bytes, or if it doesn't have 4 byte
1094
     alignment, or if it isn't a multiple of 4 bytes, then fail.  */
1095
  if (align < 4 || (bytes % 4 != 0))
1096
    return 0;
1097
 
1098
  if (TARGET_HARD_SH4)
1099
    {
1100
      if (bytes < 12)
1101
        return 0;
1102
      else if (bytes == 12)
1103
        {
1104
          rtx func_addr_rtx = gen_reg_rtx (Pmode);
1105
          rtx r4 = gen_rtx_REG (SImode, 4);
1106
          rtx r5 = gen_rtx_REG (SImode, 5);
1107
 
1108
          function_symbol (func_addr_rtx, "__movmemSI12_i4", SFUNC_STATIC);
1109
          force_into (XEXP (operands[0], 0), r4);
1110
          force_into (XEXP (operands[1], 0), r5);
1111
          emit_insn (gen_block_move_real_i4 (func_addr_rtx));
1112
          return 1;
1113
        }
1114
      else if (! TARGET_SMALLCODE)
1115
        {
1116
          const char *entry_name;
1117
          rtx func_addr_rtx = gen_reg_rtx (Pmode);
1118
          int dwords;
1119
          rtx r4 = gen_rtx_REG (SImode, 4);
1120
          rtx r5 = gen_rtx_REG (SImode, 5);
1121
          rtx r6 = gen_rtx_REG (SImode, 6);
1122
 
1123
          entry_name = (bytes & 4 ? "__movmem_i4_odd" : "__movmem_i4_even");
1124
          function_symbol (func_addr_rtx, entry_name, SFUNC_STATIC);
1125
          force_into (XEXP (operands[0], 0), r4);
1126
          force_into (XEXP (operands[1], 0), r5);
1127
 
1128
          dwords = bytes >> 3;
1129
          emit_insn (gen_move_insn (r6, GEN_INT (dwords - 1)));
1130
          emit_insn (gen_block_lump_real_i4 (func_addr_rtx));
1131
          return 1;
1132
        }
1133
      else
1134
        return 0;
1135
    }
1136
  if (bytes < 64)
1137
    {
1138
      char entry[30];
1139
      rtx func_addr_rtx = gen_reg_rtx (Pmode);
1140
      rtx r4 = gen_rtx_REG (SImode, 4);
1141
      rtx r5 = gen_rtx_REG (SImode, 5);
1142
 
1143
      sprintf (entry, "__movmemSI%d", bytes);
1144
      function_symbol (func_addr_rtx, entry, SFUNC_STATIC);
1145
      force_into (XEXP (operands[0], 0), r4);
1146
      force_into (XEXP (operands[1], 0), r5);
1147
      emit_insn (gen_block_move_real (func_addr_rtx));
1148
      return 1;
1149
    }
1150
 
1151
  /* This is the same number of bytes as a memcpy call, but to a different
1152
     less common function name, so this will occasionally use more space.  */
1153
  if (! TARGET_SMALLCODE)
1154
    {
1155
      rtx func_addr_rtx = gen_reg_rtx (Pmode);
1156
      int final_switch, while_loop;
1157
      rtx r4 = gen_rtx_REG (SImode, 4);
1158
      rtx r5 = gen_rtx_REG (SImode, 5);
1159
      rtx r6 = gen_rtx_REG (SImode, 6);
1160
 
1161
      function_symbol (func_addr_rtx, "__movmem", SFUNC_STATIC);
1162
      force_into (XEXP (operands[0], 0), r4);
1163
      force_into (XEXP (operands[1], 0), r5);
1164
 
1165
      /* r6 controls the size of the move.  16 is decremented from it
1166
         for each 64 bytes moved.  Then the negative bit left over is used
1167
         as an index into a list of move instructions.  e.g., a 72 byte move
1168
         would be set up with size(r6) = 14, for one iteration through the
1169
         big while loop, and a switch of -2 for the last part.  */
1170
 
1171
      final_switch = 16 - ((bytes / 4) % 16);
1172
      while_loop = ((bytes / 4) / 16 - 1) * 16;
1173
      emit_insn (gen_move_insn (r6, GEN_INT (while_loop + final_switch)));
1174
      emit_insn (gen_block_lump_real (func_addr_rtx));
1175
      return 1;
1176
    }
1177
 
1178
  return 0;
1179
}
1180
 
1181
/* Prepare operands for a move define_expand; specifically, one of the
1182
   operands must be in a register.  */
1183
 
1184
int
1185
prepare_move_operands (rtx operands[], enum machine_mode mode)
1186
{
1187
  if ((mode == SImode || mode == DImode)
1188
      && flag_pic
1189
      && ! ((mode == Pmode || mode == ptr_mode)
1190
            && tls_symbolic_operand (operands[1], Pmode) != 0))
1191
    {
1192
      rtx temp;
1193
      if (SYMBOLIC_CONST_P (operands[1]))
1194
        {
1195
          if (GET_CODE (operands[0]) == MEM)
1196
            operands[1] = force_reg (Pmode, operands[1]);
1197
          else if (TARGET_SHMEDIA
1198
                   && GET_CODE (operands[1]) == LABEL_REF
1199
                   && target_reg_operand (operands[0], mode))
1200
            /* It's ok.  */;
1201
          else
1202
            {
1203
              temp = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode);
1204
              operands[1] = legitimize_pic_address (operands[1], mode, temp);
1205
            }
1206
        }
1207
      else if (GET_CODE (operands[1]) == CONST
1208
               && GET_CODE (XEXP (operands[1], 0)) == PLUS
1209
               && SYMBOLIC_CONST_P (XEXP (XEXP (operands[1], 0), 0)))
1210
        {
1211
          temp = no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode);
1212
          temp = legitimize_pic_address (XEXP (XEXP (operands[1], 0), 0),
1213
                                         mode, temp);
1214
          operands[1] = expand_binop (mode, add_optab, temp,
1215
                                      XEXP (XEXP (operands[1], 0), 1),
1216
                                      no_new_pseudos ? temp
1217
                                      : gen_reg_rtx (Pmode),
1218
                                      0, OPTAB_LIB_WIDEN);
1219
        }
1220
    }
1221
 
1222
  if (! reload_in_progress && ! reload_completed)
1223
    {
1224
      /* Copy the source to a register if both operands aren't registers.  */
1225
      if (! register_operand (operands[0], mode)
1226
          && ! sh_register_operand (operands[1], mode))
1227
        operands[1] = copy_to_mode_reg (mode, operands[1]);
1228
 
1229
      if (GET_CODE (operands[0]) == MEM && ! memory_operand (operands[0], mode))
1230
        {
1231
          /* This is like change_address_1 (operands[0], mode, 0, 1) ,
1232
             except that we can't use that function because it is static.  */
1233
          rtx new = change_address (operands[0], mode, 0);
1234
          MEM_COPY_ATTRIBUTES (new, operands[0]);
1235
          operands[0] = new;
1236
        }
1237
 
1238
      /* This case can happen while generating code to move the result
1239
         of a library call to the target.  Reject `st r0,@(rX,rY)' because
1240
         reload will fail to find a spill register for rX, since r0 is already
1241
         being used for the source.  */
1242
      else if (TARGET_SH1
1243
               && refers_to_regno_p (R0_REG, R0_REG + 1, operands[1], (rtx *)0)
1244
               && GET_CODE (operands[0]) == MEM
1245
               && GET_CODE (XEXP (operands[0], 0)) == PLUS
1246
               && GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == REG)
1247
        operands[1] = copy_to_mode_reg (mode, operands[1]);
1248
    }
1249
 
1250
  if (mode == Pmode || mode == ptr_mode)
1251
    {
1252
      rtx op0, op1, opc;
1253
      enum tls_model tls_kind;
1254
 
1255
      op0 = operands[0];
1256
      op1 = operands[1];
1257
      if (GET_CODE (op1) == CONST
1258
          && GET_CODE (XEXP (op1, 0)) == PLUS
1259
          && tls_symbolic_operand (XEXP (XEXP (op1, 0), 0), Pmode))
1260
        {
1261
          opc = XEXP (XEXP (op1, 0), 1);
1262
          op1 = XEXP (XEXP (op1, 0), 0);
1263
        }
1264
      else
1265
        opc = NULL_RTX;
1266
 
1267
      if ((tls_kind = tls_symbolic_operand (op1, Pmode)))
1268
        {
1269
          rtx tga_op1, tga_ret, tmp, tmp2;
1270
 
1271
          switch (tls_kind)
1272
            {
1273
            case TLS_MODEL_GLOBAL_DYNAMIC:
1274
              tga_ret = gen_rtx_REG (Pmode, R0_REG);
1275
              emit_call_insn (gen_tls_global_dynamic (tga_ret, op1));
1276
              op1 = tga_ret;
1277
              break;
1278
 
1279
            case TLS_MODEL_LOCAL_DYNAMIC:
1280
              tga_ret = gen_rtx_REG (Pmode, R0_REG);
1281
              emit_call_insn (gen_tls_local_dynamic (tga_ret, op1));
1282
 
1283
              tmp = gen_reg_rtx (Pmode);
1284
              emit_move_insn (tmp, tga_ret);
1285
 
1286
              if (register_operand (op0, Pmode))
1287
                tmp2 = op0;
1288
              else
1289
                tmp2 = gen_reg_rtx (Pmode);
1290
 
1291
              emit_insn (gen_symDTPOFF2reg (tmp2, op1, tmp));
1292
              op1 = tmp2;
1293
              break;
1294
 
1295
            case TLS_MODEL_INITIAL_EXEC:
1296
              if (! flag_pic)
1297
                {
1298
                  /* Don't schedule insns for getting GOT address when
1299
                     the first scheduling is enabled, to avoid spill
1300
                     failures for R0.  */
1301
                  if (flag_schedule_insns)
1302
                    emit_insn (gen_blockage ());
1303
                  emit_insn (gen_GOTaddr2picreg ());
1304
                  emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode,
1305
                                                                 PIC_REG)));
1306
                  if (flag_schedule_insns)
1307
                    emit_insn (gen_blockage ());
1308
                }
1309
              tga_op1 = no_new_pseudos ? op0 : gen_reg_rtx (Pmode);
1310
              tmp = gen_sym2GOTTPOFF (op1);
1311
              emit_insn (gen_tls_initial_exec (tga_op1, tmp));
1312
              op1 = tga_op1;
1313
              break;
1314
 
1315
            case TLS_MODEL_LOCAL_EXEC:
1316
              tmp2 = gen_reg_rtx (Pmode);
1317
              emit_insn (gen_load_gbr (tmp2));
1318
              tmp = gen_reg_rtx (Pmode);
1319
              emit_insn (gen_symTPOFF2reg (tmp, op1));
1320
 
1321
              if (register_operand (op0, Pmode))
1322
                op1 = op0;
1323
              else
1324
                op1 = gen_reg_rtx (Pmode);
1325
 
1326
              emit_insn (gen_addsi3 (op1, tmp, tmp2));
1327
              break;
1328
 
1329
            default:
1330
              gcc_unreachable ();
1331
            }
1332
          if (opc)
1333
            emit_insn (gen_addsi3 (op1, op1, force_reg (SImode, opc)));
1334
          operands[1] = op1;
1335
        }
1336
    }
1337
 
1338
  return 0;
1339
}
1340
 
1341
/* Prepare the operands for an scc instruction; make sure that the
1342
   compare has been done.  */
1343
rtx
1344
prepare_scc_operands (enum rtx_code code)
1345
{
1346
  rtx t_reg = gen_rtx_REG (SImode, T_REG);
1347
  enum rtx_code oldcode = code;
1348
  enum machine_mode mode;
1349
 
1350
  /* First need a compare insn.  */
1351
  switch (code)
1352
    {
1353
    case NE:
1354
      /* It isn't possible to handle this case.  */
1355
      gcc_unreachable ();
1356
    case LT:
1357
      code = GT;
1358
      break;
1359
    case LE:
1360
      code = GE;
1361
      break;
1362
    case LTU:
1363
      code = GTU;
1364
      break;
1365
    case LEU:
1366
      code = GEU;
1367
      break;
1368
    default:
1369
      break;
1370
    }
1371
  if (code != oldcode)
1372
    {
1373
      rtx tmp = sh_compare_op0;
1374
      sh_compare_op0 = sh_compare_op1;
1375
      sh_compare_op1 = tmp;
1376
    }
1377
 
1378
  mode = GET_MODE (sh_compare_op0);
1379
  if (mode == VOIDmode)
1380
    mode = GET_MODE (sh_compare_op1);
1381
 
1382
  sh_compare_op0 = force_reg (mode, sh_compare_op0);
1383
  if ((code != EQ && code != NE
1384
       && (sh_compare_op1 != const0_rtx
1385
           || code == GTU  || code == GEU || code == LTU || code == LEU))
1386
      || (mode == DImode && sh_compare_op1 != const0_rtx)
1387
      || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1388
    sh_compare_op1 = force_reg (mode, sh_compare_op1);
1389
 
1390
  if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1391
    (mode == SFmode ? emit_sf_insn : emit_df_insn)
1392
     (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
1393
                gen_rtx_SET (VOIDmode, t_reg,
1394
                             gen_rtx_fmt_ee (code, SImode,
1395
                                             sh_compare_op0, sh_compare_op1)),
1396
                gen_rtx_USE (VOIDmode, get_fpscr_rtx ()))));
1397
  else
1398
    emit_insn (gen_rtx_SET (VOIDmode, t_reg,
1399
                            gen_rtx_fmt_ee (code, SImode,
1400
                                            sh_compare_op0, sh_compare_op1)));
1401
 
1402
  return t_reg;
1403
}
1404
 
1405
/* Called from the md file, set up the operands of a compare instruction.  */
1406
 
1407
void
1408
from_compare (rtx *operands, int code)
1409
{
1410
  enum machine_mode mode = GET_MODE (sh_compare_op0);
1411
  rtx insn;
1412
  if (mode == VOIDmode)
1413
    mode = GET_MODE (sh_compare_op1);
1414
  if (code != EQ
1415
      || mode == DImode
1416
      || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1417
    {
1418
      /* Force args into regs, since we can't use constants here.  */
1419
      sh_compare_op0 = force_reg (mode, sh_compare_op0);
1420
      if (sh_compare_op1 != const0_rtx
1421
          || code == GTU  || code == GEU
1422
          || (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT))
1423
        sh_compare_op1 = force_reg (mode, sh_compare_op1);
1424
    }
1425
  if (TARGET_SH2E && GET_MODE_CLASS (mode) == MODE_FLOAT && code == GE)
1426
    {
1427
      from_compare (operands, GT);
1428
      insn = gen_ieee_ccmpeqsf_t (sh_compare_op0, sh_compare_op1);
1429
    }
1430
  else
1431
    insn = gen_rtx_SET (VOIDmode,
1432
                        gen_rtx_REG (SImode, T_REG),
1433
                        gen_rtx_fmt_ee (code, SImode,
1434
                                        sh_compare_op0, sh_compare_op1));
1435
  if ((TARGET_SH4 || TARGET_SH2A) && GET_MODE_CLASS (mode) == MODE_FLOAT)
1436
    {
1437
      insn = gen_rtx_PARALLEL (VOIDmode,
1438
                      gen_rtvec (2, insn,
1439
                                 gen_rtx_USE (VOIDmode, get_fpscr_rtx ())));
1440
      (mode == SFmode ? emit_sf_insn : emit_df_insn) (insn);
1441
    }
1442
  else
1443
    emit_insn (insn);
1444
}
1445
 
1446
/* Functions to output assembly code.  */
1447
 
1448
/* Return a sequence of instructions to perform DI or DF move.
1449
 
1450
   Since the SH cannot move a DI or DF in one instruction, we have
1451
   to take care when we see overlapping source and dest registers.  */
1452
 
1453
const char *
1454
output_movedouble (rtx insn ATTRIBUTE_UNUSED, rtx operands[],
1455
                   enum machine_mode mode)
1456
{
1457
  rtx dst = operands[0];
1458
  rtx src = operands[1];
1459
 
1460
  if (GET_CODE (dst) == MEM
1461
      && GET_CODE (XEXP (dst, 0)) == PRE_DEC)
1462
    return "mov.l       %T1,%0\n\tmov.l %1,%0";
1463
 
1464
  if (register_operand (dst, mode)
1465
      && register_operand (src, mode))
1466
    {
1467
      if (REGNO (src) == MACH_REG)
1468
        return "sts     mach,%S0\n\tsts macl,%R0";
1469
 
1470
      /* When mov.d r1,r2 do r2->r3 then r1->r2;
1471
         when mov.d r1,r0 do r1->r0 then r2->r1.  */
1472
 
1473
      if (REGNO (src) + 1 == REGNO (dst))
1474
        return "mov     %T1,%T0\n\tmov  %1,%0";
1475
      else
1476
        return "mov     %1,%0\n\tmov    %T1,%T0";
1477
    }
1478
  else if (GET_CODE (src) == CONST_INT)
1479
    {
1480
      if (INTVAL (src) < 0)
1481
        output_asm_insn ("mov   #-1,%S0", operands);
1482
      else
1483
        output_asm_insn ("mov   #0,%S0", operands);
1484
 
1485
      return "mov       %1,%R0";
1486
    }
1487
  else if (GET_CODE (src) == MEM)
1488
    {
1489
      int ptrreg = -1;
1490
      int dreg = REGNO (dst);
1491
      rtx inside = XEXP (src, 0);
1492
 
1493
      switch (GET_CODE (inside))
1494
        {
1495
        case REG:
1496
          ptrreg = REGNO (inside);
1497
          break;
1498
 
1499
        case SUBREG:
1500
          ptrreg = subreg_regno (inside);
1501
          break;
1502
 
1503
        case PLUS:
1504
          ptrreg = REGNO (XEXP (inside, 0));
1505
          /* ??? A r0+REG address shouldn't be possible here, because it isn't
1506
             an offsettable address.  Unfortunately, offsettable addresses use
1507
             QImode to check the offset, and a QImode offsettable address
1508
             requires r0 for the other operand, which is not currently
1509
             supported, so we can't use the 'o' constraint.
1510
             Thus we must check for and handle r0+REG addresses here.
1511
             We punt for now, since this is likely very rare.  */
1512
          gcc_assert (GET_CODE (XEXP (inside, 1)) != REG);
1513
          break;
1514
 
1515
        case LABEL_REF:
1516
          return "mov.l %1,%0\n\tmov.l  %1+4,%T0";
1517
        case POST_INC:
1518
          return "mov.l %1,%0\n\tmov.l  %1,%T0";
1519
        default:
1520
          gcc_unreachable ();
1521
        }
1522
 
1523
      /* Work out the safe way to copy.  Copy into the second half first.  */
1524
      if (dreg == ptrreg)
1525
        return "mov.l   %T1,%T0\n\tmov.l        %1,%0";
1526
    }
1527
 
1528
  return "mov.l %1,%0\n\tmov.l  %T1,%T0";
1529
}
1530
 
1531
/* Print an instruction which would have gone into a delay slot after
1532
   another instruction, but couldn't because the other instruction expanded
1533
   into a sequence where putting the slot insn at the end wouldn't work.  */
1534
 
1535
static void
1536
print_slot (rtx insn)
1537
{
1538
  final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file, optimize, 1, NULL);
1539
 
1540
  INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
1541
}
1542
 
1543
const char *
1544
output_far_jump (rtx insn, rtx op)
1545
{
1546
  struct { rtx lab, reg, op; } this;
1547
  rtx braf_base_lab = NULL_RTX;
1548
  const char *jump;
1549
  int far;
1550
  int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn));
1551
  rtx prev;
1552
 
1553
  this.lab = gen_label_rtx ();
1554
 
1555
  if (TARGET_SH2
1556
      && offset >= -32764
1557
      && offset - get_attr_length (insn) <= 32766)
1558
    {
1559
      far = 0;
1560
      jump = "mov.w     %O0,%1; braf    %1";
1561
    }
1562
  else
1563
    {
1564
      far = 1;
1565
      if (flag_pic)
1566
        {
1567
          if (TARGET_SH2)
1568
            jump = "mov.l       %O0,%1; braf    %1";
1569
          else
1570
            jump = "mov.l       r0,@-r15; mova  %O0,r0; mov.l   @r0,%1; add     r0,%1; mov.l    @r15+,r0; jmp   @%1";
1571
        }
1572
      else
1573
        jump = "mov.l   %O0,%1; jmp     @%1";
1574
    }
1575
  /* If we have a scratch register available, use it.  */
1576
  if (GET_CODE ((prev = prev_nonnote_insn (insn))) == INSN
1577
      && INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
1578
    {
1579
      this.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0));
1580
      if (REGNO (this.reg) == R0_REG && flag_pic && ! TARGET_SH2)
1581
        jump = "mov.l   r1,@-r15; mova  %O0,r0; mov.l   @r0,r1; add     r1,r0; mov.l    @r15+,r1; jmp   @%1";
1582
      output_asm_insn (jump, &this.lab);
1583
      if (dbr_sequence_length ())
1584
        print_slot (final_sequence);
1585
      else
1586
        output_asm_insn ("nop", 0);
1587
    }
1588
  else
1589
    {
1590
      /* Output the delay slot insn first if any.  */
1591
      if (dbr_sequence_length ())
1592
        print_slot (final_sequence);
1593
 
1594
      this.reg = gen_rtx_REG (SImode, 13);
1595
      /* We must keep the stack aligned to 8-byte boundaries on SH5.
1596
         Fortunately, MACL is fixed and call-clobbered, and we never
1597
         need its value across jumps, so save r13 in it instead of in
1598
         the stack.  */
1599
      if (TARGET_SH5)
1600
        output_asm_insn ("lds   r13, macl", 0);
1601
      else
1602
        output_asm_insn ("mov.l r13,@-r15", 0);
1603
      output_asm_insn (jump, &this.lab);
1604
      if (TARGET_SH5)
1605
        output_asm_insn ("sts   macl, r13", 0);
1606
      else
1607
        output_asm_insn ("mov.l @r15+,r13", 0);
1608
    }
1609
  if (far && flag_pic && TARGET_SH2)
1610
    {
1611
      braf_base_lab = gen_label_rtx ();
1612
      (*targetm.asm_out.internal_label) (asm_out_file, "L",
1613
                                 CODE_LABEL_NUMBER (braf_base_lab));
1614
    }
1615
  if (far)
1616
    output_asm_insn (".align    2", 0);
1617
  (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this.lab));
1618
  this.op = op;
1619
  if (far && flag_pic)
1620
    {
1621
      if (TARGET_SH2)
1622
        this.lab = braf_base_lab;
1623
      output_asm_insn (".long   %O2-%O0", &this.lab);
1624
    }
1625
  else
1626
    output_asm_insn (far ? ".long       %O2" : ".word %O2-%O0", &this.lab);
1627
  return "";
1628
}
1629
 
1630
/* Local label counter, used for constants in the pool and inside
1631
   pattern branches.  */
1632
 
1633
static int lf = 100;
1634
 
1635
/* Output code for ordinary branches.  */
1636
 
1637
const char *
1638
output_branch (int logic, rtx insn, rtx *operands)
1639
{
1640
  switch (get_attr_length (insn))
1641
    {
1642
    case 6:
1643
      /* This can happen if filling the delay slot has caused a forward
1644
         branch to exceed its range (we could reverse it, but only
1645
         when we know we won't overextend other branches; this should
1646
         best be handled by relaxation).
1647
         It can also happen when other condbranches hoist delay slot insn
1648
         from their destination, thus leading to code size increase.
1649
         But the branch will still be in the range -4092..+4098 bytes.  */
1650
 
1651
      if (! TARGET_RELAX)
1652
        {
1653
          int label = lf++;
1654
          /* The call to print_slot will clobber the operands.  */
1655
          rtx op0 = operands[0];
1656
 
1657
          /* If the instruction in the delay slot is annulled (true), then
1658
             there is no delay slot where we can put it now.  The only safe
1659
             place for it is after the label.  final will do that by default.  */
1660
 
1661
          if (final_sequence
1662
              && ! INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))
1663
              && get_attr_length (XVECEXP (final_sequence, 0, 1)))
1664
            {
1665
              asm_fprintf (asm_out_file, "\tb%s%ss\t%LLF%d\n", logic ? "f" : "t",
1666
                           ASSEMBLER_DIALECT ? "/" : ".", label);
1667
              print_slot (final_sequence);
1668
            }
1669
          else
1670
            asm_fprintf (asm_out_file, "\tb%s\t%LLF%d\n", logic ? "f" : "t", label);
1671
 
1672
          output_asm_insn ("bra\t%l0", &op0);
1673
          fprintf (asm_out_file, "\tnop\n");
1674
          (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
1675
 
1676
          return "";
1677
        }
1678
      /* When relaxing, handle this like a short branch.  The linker
1679
         will fix it up if it still doesn't fit after relaxation.  */
1680
    case 2:
1681
      return logic ? "bt%.\t%l0" : "bf%.\t%l0";
1682
 
1683
      /* These are for SH2e, in which we have to account for the
1684
         extra nop because of the hardware bug in annulled branches.  */
1685
    case 8:
1686
      if (! TARGET_RELAX)
1687
        {
1688
          int label = lf++;
1689
 
1690
          gcc_assert (!final_sequence
1691
                      || !(INSN_ANNULLED_BRANCH_P
1692
                           (XVECEXP (final_sequence, 0, 0))));
1693
          asm_fprintf (asm_out_file, "b%s%ss\t%LLF%d\n",
1694
                       logic ? "f" : "t",
1695
                       ASSEMBLER_DIALECT ? "/" : ".", label);
1696
          fprintf (asm_out_file, "\tnop\n");
1697
          output_asm_insn ("bra\t%l0", operands);
1698
          fprintf (asm_out_file, "\tnop\n");
1699
          (*targetm.asm_out.internal_label) (asm_out_file, "LF", label);
1700
 
1701
          return "";
1702
        }
1703
      /* When relaxing, fall through.  */
1704
    case 4:
1705
      {
1706
        char buffer[10];
1707
 
1708
        sprintf (buffer, "b%s%ss\t%%l0",
1709
                 logic ? "t" : "f",
1710
                 ASSEMBLER_DIALECT ? "/" : ".");
1711
        output_asm_insn (buffer, &operands[0]);
1712
        return "nop";
1713
      }
1714
 
1715
    default:
1716
      /* There should be no longer branches now - that would
1717
         indicate that something has destroyed the branches set
1718
         up in machine_dependent_reorg.  */
1719
      gcc_unreachable ();
1720
    }
1721
}
1722
 
1723
const char *
1724
output_branchy_insn (enum rtx_code code, const char *template,
1725
                     rtx insn, rtx *operands)
1726
{
1727
  rtx next_insn = NEXT_INSN (insn);
1728
 
1729
  if (next_insn && GET_CODE (next_insn) == JUMP_INSN && condjump_p (next_insn))
1730
    {
1731
      rtx src = SET_SRC (PATTERN (next_insn));
1732
      if (GET_CODE (src) == IF_THEN_ELSE && GET_CODE (XEXP (src, 0)) != code)
1733
        {
1734
          /* Following branch not taken */
1735
          operands[9] = gen_label_rtx ();
1736
          emit_label_after (operands[9], next_insn);
1737
          INSN_ADDRESSES_NEW (operands[9],
1738
                              INSN_ADDRESSES (INSN_UID (next_insn))
1739
                              + get_attr_length (next_insn));
1740
          return template;
1741
        }
1742
      else
1743
        {
1744
          int offset = (branch_dest (next_insn)
1745
                        - INSN_ADDRESSES (INSN_UID (next_insn)) + 4);
1746
          if (offset >= -252 && offset <= 258)
1747
            {
1748
              if (GET_CODE (src) == IF_THEN_ELSE)
1749
                /* branch_true */
1750
                src = XEXP (src, 1);
1751
              operands[9] = src;
1752
              return template;
1753
            }
1754
        }
1755
    }
1756
  operands[9] = gen_label_rtx ();
1757
  emit_label_after (operands[9], insn);
1758
  INSN_ADDRESSES_NEW (operands[9],
1759
                      INSN_ADDRESSES (INSN_UID (insn))
1760
                      + get_attr_length (insn));
1761
  return template;
1762
}
1763
 
1764
const char *
1765
output_ieee_ccmpeq (rtx insn, rtx *operands)
1766
{
1767
  return output_branchy_insn (NE, "bt\t%l9\n\tfcmp/eq\t%1,%0",
1768
                              insn, operands);
1769
}
1770
 
1771
/* Output the start of the assembler file.  */
1772
 
1773
static void
1774
sh_file_start (void)
1775
{
1776
  default_file_start ();
1777
 
1778
#ifdef SYMBIAN
1779
  /* Declare the .directive section before it is used.  */
1780
  fputs ("\t.section .directive, \"SM\", @progbits, 1\n", asm_out_file);
1781
  fputs ("\t.asciz \"#<SYMEDIT>#\\n\"\n", asm_out_file);
1782
#endif
1783
 
1784
  if (TARGET_ELF)
1785
    /* We need to show the text section with the proper
1786
       attributes as in TEXT_SECTION_ASM_OP, before dwarf2out
1787
       emits it without attributes in TEXT_SECTION_ASM_OP, else GAS
1788
       will complain.  We can teach GAS specifically about the
1789
       default attributes for our choice of text section, but
1790
       then we would have to change GAS again if/when we change
1791
       the text section name.  */
1792
    fprintf (asm_out_file, "%s\n", TEXT_SECTION_ASM_OP);
1793
  else
1794
    /* Switch to the data section so that the coffsem symbol
1795
       isn't in the text section.  */
1796
    switch_to_section (data_section);
1797
 
1798
  if (TARGET_LITTLE_ENDIAN)
1799
    fputs ("\t.little\n", asm_out_file);
1800
 
1801
  if (!TARGET_ELF)
1802
    {
1803
      if (TARGET_SHCOMPACT)
1804
        fputs ("\t.mode\tSHcompact\n", asm_out_file);
1805
      else if (TARGET_SHMEDIA)
1806
        fprintf (asm_out_file, "\t.mode\tSHmedia\n\t.abi\t%i\n",
1807
                 TARGET_SHMEDIA64 ? 64 : 32);
1808
    }
1809
}
1810
 
1811
/* Check if PAT includes UNSPEC_CALLER unspec pattern.  */
1812
 
1813
static bool
1814
unspec_caller_rtx_p (rtx pat)
1815
{
1816
  switch (GET_CODE (pat))
1817
    {
1818
    case CONST:
1819
      return unspec_caller_rtx_p (XEXP (pat, 0));
1820
    case PLUS:
1821
    case MINUS:
1822
      if (unspec_caller_rtx_p (XEXP (pat, 0)))
1823
        return true;
1824
      return unspec_caller_rtx_p (XEXP (pat, 1));
1825
    case UNSPEC:
1826
      if (XINT (pat, 1) == UNSPEC_CALLER)
1827
        return true;
1828
    default:
1829
      break;
1830
    }
1831
 
1832
  return false;
1833
}
1834
 
1835
/* Indicate that INSN cannot be duplicated.  This is true for insn
1836
   that generates a unique label.  */
1837
 
1838
static bool
1839
sh_cannot_copy_insn_p (rtx insn)
1840
{
1841
  rtx pat;
1842
 
1843
  if (!reload_completed || !flag_pic)
1844
    return false;
1845
 
1846
  if (GET_CODE (insn) != INSN)
1847
    return false;
1848
  if (asm_noperands (insn) >= 0)
1849
    return false;
1850
 
1851
  pat = PATTERN (insn);
1852
  if (GET_CODE (pat) != SET)
1853
    return false;
1854
  pat = SET_SRC (pat);
1855
 
1856
  if (unspec_caller_rtx_p (pat))
1857
    return true;
1858
 
1859
  return false;
1860
}
1861
 
1862
/* Actual number of instructions used to make a shift by N.  */
1863
static const char ashiftrt_insns[] =
1864
  { 0,1,2,3,4,5,8,8,8,8,8,8,8,8,8,8,2,3,4,5,8,8,8,8,8,8,8,8,8,8,8,2};
1865
 
1866
/* Left shift and logical right shift are the same.  */
1867
static const char shift_insns[]    =
1868
  { 0,1,1,2,2,3,3,4,1,2,2,3,3,4,3,3,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
1869
 
1870
/* Individual shift amounts needed to get the above length sequences.
1871
   One bit right shifts clobber the T bit, so when possible, put one bit
1872
   shifts in the middle of the sequence, so the ends are eligible for
1873
   branch delay slots.  */
1874
static const short shift_amounts[32][5] = {
1875
  {0}, {1}, {2}, {2, 1},
1876
  {2, 2}, {2, 1, 2}, {2, 2, 2}, {2, 2, 1, 2},
1877
  {8}, {8, 1}, {8, 2}, {8, 1, 2},
1878
  {8, 2, 2}, {8, 2, 1, 2}, {8, -2, 8}, {8, -1, 8},
1879
  {16}, {16, 1}, {16, 2}, {16, 1, 2},
1880
  {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
1881
  {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
1882
  {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
1883
 
1884
/* Likewise, but for shift amounts < 16, up to three highmost bits
1885
   might be clobbered.  This is typically used when combined with some
1886
   kind of sign or zero extension.  */
1887
 
1888
static const char ext_shift_insns[]    =
1889
  { 0,1,1,2,2,3,2,2,1,2,2,3,3,3,2,2,1,2,2,3,3,4,3,3,2,3,3,4,4,4,3,3};
1890
 
1891
static const short ext_shift_amounts[32][4] = {
1892
  {0}, {1}, {2}, {2, 1},
1893
  {2, 2}, {2, 1, 2}, {8, -2}, {8, -1},
1894
  {8}, {8, 1}, {8, 2}, {8, 1, 2},
1895
  {8, 2, 2}, {16, -2, -1}, {16, -2}, {16, -1},
1896
  {16}, {16, 1}, {16, 2}, {16, 1, 2},
1897
  {16, 2, 2}, {16, 2, 1, 2}, {16, -2, 8}, {16, -1, 8},
1898
  {16, 8}, {16, 1, 8}, {16, 8, 2}, {16, 8, 1, 2},
1899
  {16, 8, 2, 2}, {16, -1, -2, 16}, {16, -2, 16}, {16, -1, 16}};
1900
 
1901
/* Assuming we have a value that has been sign-extended by at least one bit,
1902
   can we use the ext_shift_amounts with the last shift turned to an arithmetic shift
1903
   to shift it by N without data loss, and quicker than by other means?  */
1904
#define EXT_SHIFT_SIGNED(n) (((n) | 8) == 15)
1905
 
1906
/* This is used in length attributes in sh.md to help compute the length
1907
   of arbitrary constant shift instructions.  */
1908
 
1909
int
1910
shift_insns_rtx (rtx insn)
1911
{
1912
  rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
1913
  int shift_count = INTVAL (XEXP (set_src, 1));
1914
  enum rtx_code shift_code = GET_CODE (set_src);
1915
 
1916
  switch (shift_code)
1917
    {
1918
    case ASHIFTRT:
1919
      return ashiftrt_insns[shift_count];
1920
    case LSHIFTRT:
1921
    case ASHIFT:
1922
      return shift_insns[shift_count];
1923
    default:
1924
      gcc_unreachable ();
1925
    }
1926
}
1927
 
1928
/* Return the cost of a shift.  */
1929
 
1930
static inline int
1931
shiftcosts (rtx x)
1932
{
1933
  int value;
1934
 
1935
  if (TARGET_SHMEDIA)
1936
    return 1;
1937
 
1938
  if (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
1939
    {
1940
      if (GET_MODE (x) == DImode
1941
          && GET_CODE (XEXP (x, 1)) == CONST_INT
1942
          && INTVAL (XEXP (x, 1)) == 1)
1943
        return 2;
1944
 
1945
      /* Everything else is invalid, because there is no pattern for it.  */
1946
      return MAX_COST;
1947
    }
1948
  /* If shift by a non constant, then this will be expensive.  */
1949
  if (GET_CODE (XEXP (x, 1)) != CONST_INT)
1950
    return SH_DYNAMIC_SHIFT_COST;
1951
 
1952
  value = INTVAL (XEXP (x, 1));
1953
 
1954
  /* Otherwise, return the true cost in instructions.  */
1955
  if (GET_CODE (x) == ASHIFTRT)
1956
    {
1957
      int cost = ashiftrt_insns[value];
1958
      /* If SH3, then we put the constant in a reg and use shad.  */
1959
      if (cost > 1 + SH_DYNAMIC_SHIFT_COST)
1960
        cost = 1 + SH_DYNAMIC_SHIFT_COST;
1961
      return cost;
1962
    }
1963
  else
1964
    return shift_insns[value];
1965
}
1966
 
1967
/* Return the cost of an AND operation.  */
1968
 
1969
static inline int
1970
andcosts (rtx x)
1971
{
1972
  int i;
1973
 
1974
  /* Anding with a register is a single cycle and instruction.  */
1975
  if (GET_CODE (XEXP (x, 1)) != CONST_INT)
1976
    return 1;
1977
 
1978
  i = INTVAL (XEXP (x, 1));
1979
 
1980
  if (TARGET_SHMEDIA)
1981
    {
1982
      if (GET_CODE (XEXP (x, 1)) == CONST_INT
1983
          && (CONST_OK_FOR_I10 (INTVAL (XEXP (x, 1)))
1984
              || CONST_OK_FOR_J16 (INTVAL (XEXP (x, 1)))))
1985
        return 1;
1986
      else
1987
        return 1 + rtx_cost (XEXP (x, 1), AND);
1988
    }
1989
 
1990
  /* These constants are single cycle extu.[bw] instructions.  */
1991
  if (i == 0xff || i == 0xffff)
1992
    return 1;
1993
  /* Constants that can be used in an and immediate instruction in a single
1994
     cycle, but this requires r0, so make it a little more expensive.  */
1995
  if (CONST_OK_FOR_K08 (i))
1996
    return 2;
1997
  /* Constants that can be loaded with a mov immediate and an and.
1998
     This case is probably unnecessary.  */
1999
  if (CONST_OK_FOR_I08 (i))
2000
    return 2;
2001
  /* Any other constants requires a 2 cycle pc-relative load plus an and.
2002
     This case is probably unnecessary.  */
2003
  return 3;
2004
}
2005
 
2006
/* Return the cost of an addition or a subtraction.  */
2007
 
2008
static inline int
2009
addsubcosts (rtx x)
2010
{
2011
  /* Adding a register is a single cycle insn.  */
2012
  if (GET_CODE (XEXP (x, 1)) == REG
2013
      || GET_CODE (XEXP (x, 1)) == SUBREG)
2014
    return 1;
2015
 
2016
  /* Likewise for small constants.  */
2017
  if (GET_CODE (XEXP (x, 1)) == CONST_INT
2018
      && CONST_OK_FOR_ADD (INTVAL (XEXP (x, 1))))
2019
    return 1;
2020
 
2021
  if (TARGET_SHMEDIA)
2022
    switch (GET_CODE (XEXP (x, 1)))
2023
      {
2024
      case CONST:
2025
      case LABEL_REF:
2026
      case SYMBOL_REF:
2027
        return TARGET_SHMEDIA64 ? 5 : 3;
2028
 
2029
      case CONST_INT:
2030
        if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1))))
2031
          return 2;
2032
        else if (CONST_OK_FOR_I16 (INTVAL (XEXP (x, 1)) >> 16))
2033
          return 3;
2034
        else if (CONST_OK_FOR_I16 ((INTVAL (XEXP (x, 1)) >> 16) >> 16))
2035
          return 4;
2036
 
2037
        /* Fall through.  */
2038
      default:
2039
        return 5;
2040
      }
2041
 
2042
  /* Any other constant requires a 2 cycle pc-relative load plus an
2043
     addition.  */
2044
  return 3;
2045
}
2046
 
2047
/* Return the cost of a multiply.  */
2048
static inline int
2049
multcosts (rtx x ATTRIBUTE_UNUSED)
2050
{
2051
  if (sh_multcost >= 0)
2052
    return sh_multcost;
2053
  if (TARGET_SHMEDIA)
2054
    /* ??? We have a mul insn, but it has a latency of three, and doesn't
2055
       accept constants.  Ideally, we would use a cost of one or two and
2056
       add the cost of the operand, but disregard the latter when inside loops
2057
       and loop invariant code motion is still to follow.
2058
       Using a multiply first and splitting it later if it's a loss
2059
       doesn't work because of different sign / zero extension semantics
2060
       of multiplies vs. shifts.  */
2061
    return TARGET_SMALLCODE ? 2 : 3;
2062
 
2063
  if (TARGET_SH2)
2064
    {
2065
      /* We have a mul insn, so we can never take more than the mul and the
2066
         read of the mac reg, but count more because of the latency and extra
2067
         reg usage.  */
2068
      if (TARGET_SMALLCODE)
2069
        return 2;
2070
      return 3;
2071
    }
2072
 
2073
  /* If we're aiming at small code, then just count the number of
2074
     insns in a multiply call sequence.  */
2075
  if (TARGET_SMALLCODE)
2076
    return 5;
2077
 
2078
  /* Otherwise count all the insns in the routine we'd be calling too.  */
2079
  return 20;
2080
}
2081
 
2082
/* Compute a (partial) cost for rtx X.  Return true if the complete
2083
   cost has been computed, and false if subexpressions should be
2084
   scanned.  In either case, *TOTAL contains the cost result.  */
2085
 
2086
static bool
2087
sh_rtx_costs (rtx x, int code, int outer_code, int *total)
2088
{
2089
  switch (code)
2090
    {
2091
    case CONST_INT:
2092
      if (TARGET_SHMEDIA)
2093
        {
2094
          if (INTVAL (x) == 0)
2095
            *total = 0;
2096
          else if (outer_code == AND && and_operand ((x), DImode))
2097
            *total = 0;
2098
          else if ((outer_code == IOR || outer_code == XOR
2099
                    || outer_code == PLUS)
2100
                   && CONST_OK_FOR_I10 (INTVAL (x)))
2101
            *total = 0;
2102
          else if (CONST_OK_FOR_I16 (INTVAL (x)))
2103
            *total = COSTS_N_INSNS (outer_code != SET);
2104
          else if (CONST_OK_FOR_I16 (INTVAL (x) >> 16))
2105
            *total = COSTS_N_INSNS ((outer_code != SET) + 1);
2106
          else if (CONST_OK_FOR_I16 ((INTVAL (x) >> 16) >> 16))
2107
            *total = COSTS_N_INSNS ((outer_code != SET) + 2);
2108
          else
2109
            *total = COSTS_N_INSNS ((outer_code != SET) + 3);
2110
          return true;
2111
        }
2112
      if (CONST_OK_FOR_I08 (INTVAL (x)))
2113
        *total = 0;
2114
      else if ((outer_code == AND || outer_code == IOR || outer_code == XOR)
2115
               && CONST_OK_FOR_K08 (INTVAL (x)))
2116
        *total = 1;
2117
      else
2118
        *total = 8;
2119
      return true;
2120
 
2121
    case CONST:
2122
    case LABEL_REF:
2123
    case SYMBOL_REF:
2124
      if (TARGET_SHMEDIA64)
2125
        *total = COSTS_N_INSNS (4);
2126
      else if (TARGET_SHMEDIA32)
2127
        *total = COSTS_N_INSNS (2);
2128
      else
2129
        *total = 5;
2130
      return true;
2131
 
2132
    case CONST_DOUBLE:
2133
      if (TARGET_SHMEDIA)
2134
        *total = COSTS_N_INSNS (4);
2135
      else
2136
        *total = 10;
2137
      return true;
2138
    case CONST_VECTOR:
2139
      if (x == CONST0_RTX (GET_MODE (x)))
2140
        *total = 0;
2141
      else if (sh_1el_vec (x, VOIDmode))
2142
        *total = outer_code != SET;
2143
      if (sh_rep_vec (x, VOIDmode))
2144
        *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
2145
                  + (outer_code != SET));
2146
      *total = COSTS_N_INSNS (3) + (outer_code != SET);
2147
      return true;
2148
 
2149
    case PLUS:
2150
    case MINUS:
2151
      *total = COSTS_N_INSNS (addsubcosts (x));
2152
      return true;
2153
 
2154
    case AND:
2155
      *total = COSTS_N_INSNS (andcosts (x));
2156
      return true;
2157
 
2158
    case MULT:
2159
      *total = COSTS_N_INSNS (multcosts (x));
2160
      return true;
2161
 
2162
    case ASHIFT:
2163
    case ASHIFTRT:
2164
    case LSHIFTRT:
2165
      *total = COSTS_N_INSNS (shiftcosts (x));
2166
      return true;
2167
 
2168
    case DIV:
2169
    case UDIV:
2170
    case MOD:
2171
    case UMOD:
2172
      *total = COSTS_N_INSNS (20);
2173
      return true;
2174
 
2175
    case PARALLEL:
2176
      if (sh_1el_vec (x, VOIDmode))
2177
        *total = outer_code != SET;
2178
      if (sh_rep_vec (x, VOIDmode))
2179
        *total = ((GET_MODE_UNIT_SIZE (GET_MODE (x)) + 3) / 4
2180
                  + (outer_code != SET));
2181
      *total = COSTS_N_INSNS (3) + (outer_code != SET);
2182
      return true;
2183
 
2184
    case FLOAT:
2185
    case FIX:
2186
      *total = 100;
2187
      return true;
2188
 
2189
    default:
2190
      return false;
2191
    }
2192
}
2193
 
2194
/* Compute the cost of an address.  For the SH, all valid addresses are
2195
   the same cost.  Use a slightly higher cost for reg + reg addressing,
2196
   since it increases pressure on r0.  */
2197
 
2198
static int
2199
sh_address_cost (rtx X)
2200
{
2201
  return (GET_CODE (X) == PLUS
2202
          && ! CONSTANT_P (XEXP (X, 1))
2203
          && ! TARGET_SHMEDIA ? 1 : 0);
2204
}
2205
 
2206
/* Code to expand a shift.  */
2207
 
2208
void
2209
gen_ashift (int type, int n, rtx reg)
2210
{
2211
  /* Negative values here come from the shift_amounts array.  */
2212
  if (n < 0)
2213
    {
2214
      if (type == ASHIFT)
2215
        type = LSHIFTRT;
2216
      else
2217
        type = ASHIFT;
2218
      n = -n;
2219
    }
2220
 
2221
  switch (type)
2222
    {
2223
    case ASHIFTRT:
2224
      emit_insn (gen_ashrsi3_k (reg, reg, GEN_INT (n)));
2225
      break;
2226
    case LSHIFTRT:
2227
      if (n == 1)
2228
        emit_insn (gen_lshrsi3_m (reg, reg, GEN_INT (n)));
2229
      else
2230
        emit_insn (gen_lshrsi3_k (reg, reg, GEN_INT (n)));
2231
      break;
2232
    case ASHIFT:
2233
      emit_insn (gen_ashlsi3_std (reg, reg, GEN_INT (n)));
2234
      break;
2235
    }
2236
}
2237
 
2238
/* Same for HImode */
2239
 
2240
void
2241
gen_ashift_hi (int type, int n, rtx reg)
2242
{
2243
  /* Negative values here come from the shift_amounts array.  */
2244
  if (n < 0)
2245
    {
2246
      if (type == ASHIFT)
2247
        type = LSHIFTRT;
2248
      else
2249
        type = ASHIFT;
2250
      n = -n;
2251
    }
2252
 
2253
  switch (type)
2254
    {
2255
    case ASHIFTRT:
2256
    case LSHIFTRT:
2257
      /* We don't have HImode right shift operations because using the
2258
         ordinary 32 bit shift instructions for that doesn't generate proper
2259
         zero/sign extension.
2260
         gen_ashift_hi is only called in contexts where we know that the
2261
         sign extension works out correctly.  */
2262
      {
2263
        int offset = 0;
2264
        if (GET_CODE (reg) == SUBREG)
2265
          {
2266
            offset = SUBREG_BYTE (reg);
2267
            reg = SUBREG_REG (reg);
2268
          }
2269
        gen_ashift (type, n, gen_rtx_SUBREG (SImode, reg, offset));
2270
        break;
2271
      }
2272
    case ASHIFT:
2273
      emit_insn (gen_ashlhi3_k (reg, reg, GEN_INT (n)));
2274
      break;
2275
    }
2276
}
2277
 
2278
/* Output RTL to split a constant shift into its component SH constant
2279
   shift instructions.  */
2280
 
2281
void
2282
gen_shifty_op (int code, rtx *operands)
2283
{
2284
  int value = INTVAL (operands[2]);
2285
  int max, i;
2286
 
2287
  /* Truncate the shift count in case it is out of bounds.  */
2288
  value = value & 0x1f;
2289
 
2290
  if (value == 31)
2291
    {
2292
      if (code == LSHIFTRT)
2293
        {
2294
          emit_insn (gen_rotlsi3_1 (operands[0], operands[0]));
2295
          emit_insn (gen_movt (operands[0]));
2296
          return;
2297
        }
2298
      else if (code == ASHIFT)
2299
        {
2300
          /* There is a two instruction sequence for 31 bit left shifts,
2301
             but it requires r0.  */
2302
          if (GET_CODE (operands[0]) == REG && REGNO (operands[0]) == 0)
2303
            {
2304
              emit_insn (gen_andsi3 (operands[0], operands[0], const1_rtx));
2305
              emit_insn (gen_rotlsi3_31 (operands[0], operands[0]));
2306
              return;
2307
            }
2308
        }
2309
    }
2310
  else if (value == 0)
2311
    {
2312
      /* This can happen even when optimizing, if there were subregs before
2313
         reload.  Don't output a nop here, as this is never optimized away;
2314
         use a no-op move instead.  */
2315
      emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[0]));
2316
      return;
2317
    }
2318
 
2319
  max = shift_insns[value];
2320
  for (i = 0; i < max; i++)
2321
    gen_ashift (code, shift_amounts[value][i], operands[0]);
2322
}
2323
 
2324
/* Same as above, but optimized for values where the topmost bits don't
2325
   matter.  */
2326
 
2327
void
2328
gen_shifty_hi_op (int code, rtx *operands)
2329
{
2330
  int value = INTVAL (operands[2]);
2331
  int max, i;
2332
  void (*gen_fun) (int, int, rtx);
2333
 
2334
  /* This operation is used by and_shl for SImode values with a few
2335
     high bits known to be cleared.  */
2336
  value &= 31;
2337
  if (value == 0)
2338
    {
2339
      emit_insn (gen_nop ());
2340
      return;
2341
    }
2342
 
2343
  gen_fun = GET_MODE (operands[0]) == HImode ? gen_ashift_hi : gen_ashift;
2344
  if (code == ASHIFT)
2345
    {
2346
      max = ext_shift_insns[value];
2347
      for (i = 0; i < max; i++)
2348
        gen_fun (code, ext_shift_amounts[value][i], operands[0]);
2349
    }
2350
  else
2351
    /* When shifting right, emit the shifts in reverse order, so that
2352
       solitary negative values come first.  */
2353
    for (i = ext_shift_insns[value] - 1; i >= 0; i--)
2354
      gen_fun (code, ext_shift_amounts[value][i], operands[0]);
2355
}
2356
 
2357
/* Output RTL for an arithmetic right shift.  */
2358
 
2359
/* ??? Rewrite to use super-optimizer sequences.  */
2360
 
2361
int
2362
expand_ashiftrt (rtx *operands)
2363
{
2364
  rtx wrk;
2365
  char func[18];
2366
  int value;
2367
 
2368
  if (TARGET_SH3)
2369
    {
2370
      if (GET_CODE (operands[2]) != CONST_INT)
2371
        {
2372
          rtx count = copy_to_mode_reg (SImode, operands[2]);
2373
          emit_insn (gen_negsi2 (count, count));
2374
          emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
2375
          return 1;
2376
        }
2377
      else if (ashiftrt_insns[INTVAL (operands[2]) & 31]
2378
               > 1 + SH_DYNAMIC_SHIFT_COST)
2379
        {
2380
          rtx count
2381
            = force_reg (SImode, GEN_INT (- (INTVAL (operands[2]) & 31)));
2382
          emit_insn (gen_ashrsi3_d (operands[0], operands[1], count));
2383
          return 1;
2384
        }
2385
    }
2386
  if (GET_CODE (operands[2]) != CONST_INT)
2387
    return 0;
2388
 
2389
  value = INTVAL (operands[2]) & 31;
2390
 
2391
  if (value == 31)
2392
    {
2393
      /* If we are called from abs expansion, arrange things so that we
2394
         we can use a single MT instruction that doesn't clobber the source,
2395
         if LICM can hoist out the load of the constant zero.  */
2396
      if (currently_expanding_to_rtl)
2397
        {
2398
          emit_insn (gen_cmpgtsi_t (force_reg (SImode, CONST0_RTX (SImode)),
2399
                                    operands[1]));
2400
          emit_insn (gen_mov_neg_si_t (operands[0]));
2401
          return 1;
2402
        }
2403
      emit_insn (gen_ashrsi2_31 (operands[0], operands[1]));
2404
      return 1;
2405
    }
2406
  else if (value >= 16 && value <= 19)
2407
    {
2408
      wrk = gen_reg_rtx (SImode);
2409
      emit_insn (gen_ashrsi2_16 (wrk, operands[1]));
2410
      value -= 16;
2411
      while (value--)
2412
        gen_ashift (ASHIFTRT, 1, wrk);
2413
      emit_move_insn (operands[0], wrk);
2414
      return 1;
2415
    }
2416
  /* Expand a short sequence inline, longer call a magic routine.  */
2417
  else if (value <= 5)
2418
    {
2419
      wrk = gen_reg_rtx (SImode);
2420
      emit_move_insn (wrk, operands[1]);
2421
      while (value--)
2422
        gen_ashift (ASHIFTRT, 1, wrk);
2423
      emit_move_insn (operands[0], wrk);
2424
      return 1;
2425
    }
2426
 
2427
  wrk = gen_reg_rtx (Pmode);
2428
 
2429
  /* Load the value into an arg reg and call a helper.  */
2430
  emit_move_insn (gen_rtx_REG (SImode, 4), operands[1]);
2431
  sprintf (func, "__ashiftrt_r4_%d", value);
2432
  function_symbol (wrk, func, SFUNC_STATIC);
2433
  emit_insn (gen_ashrsi3_n (GEN_INT (value), wrk));
2434
  emit_move_insn (operands[0], gen_rtx_REG (SImode, 4));
2435
  return 1;
2436
}
2437
 
2438
int
2439
sh_dynamicalize_shift_p (rtx count)
2440
{
2441
  return shift_insns[INTVAL (count)] > 1 + SH_DYNAMIC_SHIFT_COST;
2442
}
2443
 
2444
/* Try to find a good way to implement the combiner pattern
2445
  [(set (match_operand:SI 0 "register_operand" "r")
2446
        (and:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
2447
                           (match_operand:SI 2 "const_int_operand" "n"))
2448
                (match_operand:SI 3 "const_int_operand" "n"))) .
2449
  LEFT_RTX is operand 2 in the above pattern, and MASK_RTX is operand 3.
2450
  return 0 for simple right / left or left/right shift combination.
2451
  return 1 for a combination of shifts with zero_extend.
2452
  return 2 for a combination of shifts with an AND that needs r0.
2453
  return 3 for a combination of shifts with an AND that needs an extra
2454
    scratch register, when the three highmost bits of the AND mask are clear.
2455
  return 4 for a combination of shifts with an AND that needs an extra
2456
    scratch register, when any of the three highmost bits of the AND mask
2457
    is set.
2458
  If ATTRP is set, store an initial right shift width in ATTRP[0],
2459
  and the instruction length in ATTRP[1] .  These values are not valid
2460
  when returning 0.
2461
  When ATTRP is set and returning 1, ATTRP[2] gets set to the index into
2462
  shift_amounts for the last shift value that is to be used before the
2463
  sign extend.  */
2464
int
2465
shl_and_kind (rtx left_rtx, rtx mask_rtx, int *attrp)
2466
{
2467
  unsigned HOST_WIDE_INT mask, lsb, mask2, lsb2;
2468
  int left = INTVAL (left_rtx), right;
2469
  int best = 0;
2470
  int cost, best_cost = 10000;
2471
  int best_right = 0, best_len = 0;
2472
  int i;
2473
  int can_ext;
2474
 
2475
  if (left < 0 || left > 31)
2476
    return 0;
2477
  if (GET_CODE (mask_rtx) == CONST_INT)
2478
    mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> left;
2479
  else
2480
    mask = (unsigned HOST_WIDE_INT) GET_MODE_MASK (SImode) >> left;
2481
  /* Can this be expressed as a right shift / left shift pair?  */
2482
  lsb = ((mask ^ (mask - 1)) >> 1) + 1;
2483
  right = exact_log2 (lsb);
2484
  mask2 = ~(mask + lsb - 1);
2485
  lsb2 = ((mask2 ^ (mask2 - 1)) >> 1) + 1;
2486
  /* mask has no zeroes but trailing zeroes <==> ! mask2 */
2487
  if (! mask2)
2488
    best_cost = shift_insns[right] + shift_insns[right + left];
2489
  /* mask has no trailing zeroes <==> ! right */
2490
  else if (! right && mask2 == ~(lsb2 - 1))
2491
    {
2492
      int late_right = exact_log2 (lsb2);
2493
      best_cost = shift_insns[left + late_right] + shift_insns[late_right];
2494
    }
2495
  /* Try to use zero extend.  */
2496
  if (mask2 == ~(lsb2 - 1))
2497
    {
2498
      int width, first;
2499
 
2500
      for (width = 8; width <= 16; width += 8)
2501
        {
2502
          /* Can we zero-extend right away?  */
2503
          if (lsb2 == (unsigned HOST_WIDE_INT) 1 << width)
2504
            {
2505
              cost
2506
                = 1 + ext_shift_insns[right] + ext_shift_insns[left + right];
2507
              if (cost < best_cost)
2508
                {
2509
                  best = 1;
2510
                  best_cost = cost;
2511
                  best_right = right;
2512
                  best_len = cost;
2513
                  if (attrp)
2514
                    attrp[2] = -1;
2515
                }
2516
              continue;
2517
            }
2518
          /* ??? Could try to put zero extend into initial right shift,
2519
             or even shift a bit left before the right shift.  */
2520
          /* Determine value of first part of left shift, to get to the
2521
             zero extend cut-off point.  */
2522
          first = width - exact_log2 (lsb2) + right;
2523
          if (first >= 0 && right + left - first >= 0)
2524
            {
2525
              cost = ext_shift_insns[right] + ext_shift_insns[first] + 1
2526
                + ext_shift_insns[right + left - first];
2527
              if (cost < best_cost)
2528
                {
2529
                  best = 1;
2530
                  best_cost = cost;
2531
                  best_right = right;
2532
                  best_len = cost;
2533
                  if (attrp)
2534
                    attrp[2] = first;
2535
                }
2536
            }
2537
        }
2538
    }
2539
  /* Try to use r0 AND pattern */
2540
  for (i = 0; i <= 2; i++)
2541
    {
2542
      if (i > right)
2543
        break;
2544
      if (! CONST_OK_FOR_K08 (mask >> i))
2545
        continue;
2546
      cost = (i != 0) + 2 + ext_shift_insns[left + i];
2547
      if (cost < best_cost)
2548
        {
2549
          best = 2;
2550
          best_cost = cost;
2551
          best_right = i;
2552
          best_len = cost - 1;
2553
        }
2554
    }
2555
  /* Try to use a scratch register to hold the AND operand.  */
2556
  can_ext = ((mask << left) & ((unsigned HOST_WIDE_INT) 3 << 30)) == 0;
2557
  for (i = 0; i <= 2; i++)
2558
    {
2559
      if (i > right)
2560
        break;
2561
      cost = (i != 0) + (CONST_OK_FOR_I08 (mask >> i) ? 2 : 3)
2562
        + (can_ext ? ext_shift_insns : shift_insns)[left + i];
2563
      if (cost < best_cost)
2564
        {
2565
          best = 4 - can_ext;
2566
          best_cost = cost;
2567
          best_right = i;
2568
          best_len = cost - 1 - ! CONST_OK_FOR_I08 (mask >> i);
2569
        }
2570
    }
2571
 
2572
  if (attrp)
2573
    {
2574
      attrp[0] = best_right;
2575
      attrp[1] = best_len;
2576
    }
2577
  return best;
2578
}
2579
 
2580
/* This is used in length attributes of the unnamed instructions
2581
   corresponding to shl_and_kind return values of 1 and 2.  */
2582
int
2583
shl_and_length (rtx insn)
2584
{
2585
  rtx set_src, left_rtx, mask_rtx;
2586
  int attributes[3];
2587
 
2588
  set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2589
  left_rtx = XEXP (XEXP (set_src, 0), 1);
2590
  mask_rtx = XEXP (set_src, 1);
2591
  shl_and_kind (left_rtx, mask_rtx, attributes);
2592
  return attributes[1];
2593
}
2594
 
2595
/* This is used in length attribute of the and_shl_scratch instruction.  */
2596
 
2597
int
2598
shl_and_scr_length (rtx insn)
2599
{
2600
  rtx set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2601
  int len = shift_insns[INTVAL (XEXP (set_src, 1))];
2602
  rtx op = XEXP (set_src, 0);
2603
  len += shift_insns[INTVAL (XEXP (op, 1))] + 1;
2604
  op = XEXP (XEXP (op, 0), 0);
2605
  return len + shift_insns[INTVAL (XEXP (op, 1))];
2606
}
2607
 
2608
/* Generate rtl for instructions for which shl_and_kind advised a particular
2609
   method of generating them, i.e. returned zero.  */
2610
 
2611
int
2612
gen_shl_and (rtx dest, rtx left_rtx, rtx mask_rtx, rtx source)
2613
{
2614
  int attributes[3];
2615
  unsigned HOST_WIDE_INT mask;
2616
  int kind = shl_and_kind (left_rtx, mask_rtx, attributes);
2617
  int right, total_shift;
2618
  void (*shift_gen_fun) (int, rtx *) = gen_shifty_hi_op;
2619
 
2620
  right = attributes[0];
2621
  total_shift = INTVAL (left_rtx) + right;
2622
  mask = (unsigned HOST_WIDE_INT) INTVAL (mask_rtx) >> total_shift;
2623
  switch (kind)
2624
    {
2625
    default:
2626
      return -1;
2627
    case 1:
2628
      {
2629
        int first = attributes[2];
2630
        rtx operands[3];
2631
 
2632
        if (first < 0)
2633
          {
2634
            emit_insn ((mask << right) <= 0xff
2635
                       ? gen_zero_extendqisi2 (dest,
2636
                                               gen_lowpart (QImode, source))
2637
                       : gen_zero_extendhisi2 (dest,
2638
                                               gen_lowpart (HImode, source)));
2639
            source = dest;
2640
          }
2641
        if (source != dest)
2642
          emit_insn (gen_movsi (dest, source));
2643
        operands[0] = dest;
2644
        if (right)
2645
          {
2646
            operands[2] = GEN_INT (right);
2647
            gen_shifty_hi_op (LSHIFTRT, operands);
2648
          }
2649
        if (first > 0)
2650
          {
2651
            operands[2] = GEN_INT (first);
2652
            gen_shifty_hi_op (ASHIFT, operands);
2653
            total_shift -= first;
2654
            mask <<= first;
2655
          }
2656
        if (first >= 0)
2657
          emit_insn (mask <= 0xff
2658
                     ? gen_zero_extendqisi2 (dest, gen_lowpart (QImode, dest))
2659
                     : gen_zero_extendhisi2 (dest, gen_lowpart (HImode, dest)));
2660
        if (total_shift > 0)
2661
          {
2662
            operands[2] = GEN_INT (total_shift);
2663
            gen_shifty_hi_op (ASHIFT, operands);
2664
          }
2665
        break;
2666
      }
2667
    case 4:
2668
      shift_gen_fun = gen_shifty_op;
2669
    case 3:
2670
      /* If the topmost bit that matters is set, set the topmost bits
2671
         that don't matter.  This way, we might be able to get a shorter
2672
         signed constant.  */
2673
      if (mask & ((HOST_WIDE_INT) 1 << (31 - total_shift)))
2674
        mask |= (HOST_WIDE_INT) ~0 << (31 - total_shift);
2675
    case 2:
2676
      /* Don't expand fine-grained when combining, because that will
2677
         make the pattern fail.  */
2678
      if (currently_expanding_to_rtl
2679
          || reload_in_progress || reload_completed)
2680
        {
2681
          rtx operands[3];
2682
 
2683
          /* Cases 3 and 4 should be handled by this split
2684
             only while combining  */
2685
          gcc_assert (kind <= 2);
2686
          if (right)
2687
            {
2688
              emit_insn (gen_lshrsi3 (dest, source, GEN_INT (right)));
2689
              source = dest;
2690
            }
2691
          emit_insn (gen_andsi3 (dest, source, GEN_INT (mask)));
2692
          if (total_shift)
2693
            {
2694
              operands[0] = dest;
2695
              operands[1] = dest;
2696
              operands[2] = GEN_INT (total_shift);
2697
              shift_gen_fun (ASHIFT, operands);
2698
            }
2699
          break;
2700
        }
2701
      else
2702
        {
2703
          int neg = 0;
2704
          if (kind != 4 && total_shift < 16)
2705
            {
2706
              neg = -ext_shift_amounts[total_shift][1];
2707
              if (neg > 0)
2708
                neg -= ext_shift_amounts[total_shift][2];
2709
              else
2710
                neg = 0;
2711
            }
2712
          emit_insn (gen_and_shl_scratch (dest, source,
2713
                                          GEN_INT (right),
2714
                                          GEN_INT (mask),
2715
                                          GEN_INT (total_shift + neg),
2716
                                          GEN_INT (neg)));
2717
          emit_insn (gen_movsi (dest, dest));
2718
          break;
2719
        }
2720
    }
2721
  return 0;
2722
}
2723
 
2724
/* Try to find a good way to implement the combiner pattern
2725
  [(set (match_operand:SI 0 "register_operand" "=r")
2726
        (sign_extract:SI (ashift:SI (match_operand:SI 1 "register_operand" "r")
2727
                                    (match_operand:SI 2 "const_int_operand" "n")
2728
                         (match_operand:SI 3 "const_int_operand" "n")
2729
                         (const_int 0)))
2730
   (clobber (reg:SI T_REG))]
2731
  LEFT_RTX is operand 2 in the above pattern, and SIZE_RTX is operand 3.
2732
  return 0 for simple left / right shift combination.
2733
  return 1 for left shift / 8 bit sign extend / left shift.
2734
  return 2 for left shift / 16 bit sign extend / left shift.
2735
  return 3 for left shift / 8 bit sign extend / shift / sign extend.
2736
  return 4 for left shift / 16 bit sign extend / shift / sign extend.
2737
  return 5 for left shift / 16 bit sign extend / right shift
2738
  return 6 for < 8 bit sign extend / left shift.
2739
  return 7 for < 8 bit sign extend / left shift / single right shift.
2740
  If COSTP is nonzero, assign the calculated cost to *COSTP.  */
2741
 
2742
int
2743
shl_sext_kind (rtx left_rtx, rtx size_rtx, int *costp)
2744
{
2745
  int left, size, insize, ext;
2746
  int cost = 0, best_cost;
2747
  int kind;
2748
 
2749
  left = INTVAL (left_rtx);
2750
  size = INTVAL (size_rtx);
2751
  insize = size - left;
2752
  gcc_assert (insize > 0);
2753
  /* Default to left / right shift.  */
2754
  kind = 0;
2755
  best_cost = shift_insns[32 - insize] + ashiftrt_insns[32 - size];
2756
  if (size <= 16)
2757
    {
2758
      /* 16 bit shift / sign extend / 16 bit shift */
2759
      cost = shift_insns[16 - insize] + 1 + ashiftrt_insns[16 - size];
2760
      /* If ashiftrt_insns[16 - size] is 8, this choice will be overridden
2761
         below, by alternative 3 or something even better.  */
2762
      if (cost < best_cost)
2763
        {
2764
          kind = 5;
2765
          best_cost = cost;
2766
        }
2767
    }
2768
  /* Try a plain sign extend between two shifts.  */
2769
  for (ext = 16; ext >= insize; ext -= 8)
2770
    {
2771
      if (ext <= size)
2772
        {
2773
          cost = ext_shift_insns[ext - insize] + 1 + shift_insns[size - ext];
2774
          if (cost < best_cost)
2775
            {
2776
              kind = ext / (unsigned) 8;
2777
              best_cost = cost;
2778
            }
2779
        }
2780
      /* Check if we can do a sloppy shift with a final signed shift
2781
         restoring the sign.  */
2782
      if (EXT_SHIFT_SIGNED (size - ext))
2783
        cost = ext_shift_insns[ext - insize] + ext_shift_insns[size - ext] + 1;
2784
      /* If not, maybe it's still cheaper to do the second shift sloppy,
2785
         and do a final sign extend?  */
2786
      else if (size <= 16)
2787
        cost = ext_shift_insns[ext - insize] + 1
2788
          + ext_shift_insns[size > ext ? size - ext : ext - size] + 1;
2789
      else
2790
        continue;
2791
      if (cost < best_cost)
2792
        {
2793
          kind = ext / (unsigned) 8 + 2;
2794
          best_cost = cost;
2795
        }
2796
    }
2797
  /* Check if we can sign extend in r0 */
2798
  if (insize < 8)
2799
    {
2800
      cost = 3 + shift_insns[left];
2801
      if (cost < best_cost)
2802
        {
2803
          kind = 6;
2804
          best_cost = cost;
2805
        }
2806
      /* Try the same with a final signed shift.  */
2807
      if (left < 31)
2808
        {
2809
          cost = 3 + ext_shift_insns[left + 1] + 1;
2810
          if (cost < best_cost)
2811
            {
2812
              kind = 7;
2813
              best_cost = cost;
2814
            }
2815
        }
2816
    }
2817
  if (TARGET_SH3)
2818
    {
2819
      /* Try to use a dynamic shift.  */
2820
      cost = shift_insns[32 - insize] + 1 + SH_DYNAMIC_SHIFT_COST;
2821
      if (cost < best_cost)
2822
        {
2823
          kind = 0;
2824
          best_cost = cost;
2825
        }
2826
    }
2827
  if (costp)
2828
    *costp = cost;
2829
  return kind;
2830
}
2831
 
2832
/* Function to be used in the length attribute of the instructions
2833
   implementing this pattern.  */
2834
 
2835
int
2836
shl_sext_length (rtx insn)
2837
{
2838
  rtx set_src, left_rtx, size_rtx;
2839
  int cost;
2840
 
2841
  set_src = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2842
  left_rtx = XEXP (XEXP (set_src, 0), 1);
2843
  size_rtx = XEXP (set_src, 1);
2844
  shl_sext_kind (left_rtx, size_rtx, &cost);
2845
  return cost;
2846
}
2847
 
2848
/* Generate rtl for this pattern */
2849
 
2850
int
2851
gen_shl_sext (rtx dest, rtx left_rtx, rtx size_rtx, rtx source)
2852
{
2853
  int kind;
2854
  int left, size, insize, cost;
2855
  rtx operands[3];
2856
 
2857
  kind = shl_sext_kind (left_rtx, size_rtx, &cost);
2858
  left = INTVAL (left_rtx);
2859
  size = INTVAL (size_rtx);
2860
  insize = size - left;
2861
  switch (kind)
2862
    {
2863
    case 1:
2864
    case 2:
2865
    case 3:
2866
    case 4:
2867
      {
2868
        int ext = kind & 1 ? 8 : 16;
2869
        int shift2 = size - ext;
2870
 
2871
        /* Don't expand fine-grained when combining, because that will
2872
           make the pattern fail.  */
2873
        if (! currently_expanding_to_rtl
2874
            && ! reload_in_progress && ! reload_completed)
2875
          {
2876
            emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
2877
            emit_insn (gen_movsi (dest, source));
2878
            break;
2879
          }
2880
        if (dest != source)
2881
          emit_insn (gen_movsi (dest, source));
2882
        operands[0] = dest;
2883
        if (ext - insize)
2884
          {
2885
            operands[2] = GEN_INT (ext - insize);
2886
            gen_shifty_hi_op (ASHIFT, operands);
2887
          }
2888
        emit_insn (kind & 1
2889
                   ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
2890
                   : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
2891
        if (kind <= 2)
2892
          {
2893
            if (shift2)
2894
              {
2895
                operands[2] = GEN_INT (shift2);
2896
                gen_shifty_op (ASHIFT, operands);
2897
              }
2898
          }
2899
        else
2900
          {
2901
            if (shift2 > 0)
2902
              {
2903
                if (EXT_SHIFT_SIGNED (shift2))
2904
                  {
2905
                    operands[2] = GEN_INT (shift2 + 1);
2906
                    gen_shifty_op (ASHIFT, operands);
2907
                    operands[2] = const1_rtx;
2908
                    gen_shifty_op (ASHIFTRT, operands);
2909
                    break;
2910
                  }
2911
                operands[2] = GEN_INT (shift2);
2912
                gen_shifty_hi_op (ASHIFT, operands);
2913
              }
2914
            else if (shift2)
2915
              {
2916
                operands[2] = GEN_INT (-shift2);
2917
                gen_shifty_hi_op (LSHIFTRT, operands);
2918
              }
2919
            emit_insn (size <= 8
2920
                       ? gen_extendqisi2 (dest, gen_lowpart (QImode, dest))
2921
                       : gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
2922
          }
2923
        break;
2924
      }
2925
    case 5:
2926
      {
2927
        int i = 16 - size;
2928
        if (! currently_expanding_to_rtl
2929
            && ! reload_in_progress && ! reload_completed)
2930
          emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
2931
        else
2932
          {
2933
            operands[0] = dest;
2934
            operands[2] = GEN_INT (16 - insize);
2935
            gen_shifty_hi_op (ASHIFT, operands);
2936
            emit_insn (gen_extendhisi2 (dest, gen_lowpart (HImode, dest)));
2937
          }
2938
        /* Don't use gen_ashrsi3 because it generates new pseudos.  */
2939
        while (--i >= 0)
2940
          gen_ashift (ASHIFTRT, 1, dest);
2941
        break;
2942
      }
2943
    case 6:
2944
    case 7:
2945
      /* Don't expand fine-grained when combining, because that will
2946
         make the pattern fail.  */
2947
      if (! currently_expanding_to_rtl
2948
          && ! reload_in_progress && ! reload_completed)
2949
        {
2950
          emit_insn (gen_shl_sext_ext (dest, source, left_rtx, size_rtx));
2951
          emit_insn (gen_movsi (dest, source));
2952
          break;
2953
        }
2954
      emit_insn (gen_andsi3 (dest, source, GEN_INT ((1 << insize) - 1)));
2955
      emit_insn (gen_xorsi3 (dest, dest, GEN_INT (1 << (insize - 1))));
2956
      emit_insn (gen_addsi3 (dest, dest, GEN_INT (-1 << (insize - 1))));
2957
      operands[0] = dest;
2958
      operands[2] = kind == 7 ? GEN_INT (left + 1) : left_rtx;
2959
      gen_shifty_op (ASHIFT, operands);
2960
      if (kind == 7)
2961
        emit_insn (gen_ashrsi3_k (dest, dest, const1_rtx));
2962
      break;
2963
    default:
2964
      return -1;
2965
    }
2966
  return 0;
2967
}
2968
 
2969
/* Prefix a symbol_ref name with "datalabel".  */
2970
 
2971
rtx
2972
gen_datalabel_ref (rtx sym)
2973
{
2974
  const char *str;
2975
 
2976
  if (GET_CODE (sym) == LABEL_REF)
2977
    return gen_rtx_CONST (GET_MODE (sym),
2978
                          gen_rtx_UNSPEC (GET_MODE (sym),
2979
                                          gen_rtvec (1, sym),
2980
                                          UNSPEC_DATALABEL));
2981
 
2982
  gcc_assert (GET_CODE (sym) == SYMBOL_REF);
2983
 
2984
  str = XSTR (sym, 0);
2985
  /* Share all SYMBOL_REF strings with the same value - that is important
2986
     for cse.  */
2987
  str = IDENTIFIER_POINTER (get_identifier (str));
2988
  XSTR (sym, 0) = str;
2989
 
2990
  return sym;
2991
}
2992
 
2993
 
2994
static alloc_pool label_ref_list_pool;
2995
 
2996
typedef struct label_ref_list_d
2997
{
2998
  rtx label;
2999
  struct label_ref_list_d *next;
3000
} *label_ref_list_t;
3001
 
3002
/* The SH cannot load a large constant into a register, constants have to
3003
   come from a pc relative load.  The reference of a pc relative load
3004
   instruction must be less than 1k in front of the instruction.  This
3005
   means that we often have to dump a constant inside a function, and
3006
   generate code to branch around it.
3007
 
3008
   It is important to minimize this, since the branches will slow things
3009
   down and make things bigger.
3010
 
3011
   Worst case code looks like:
3012
 
3013
   mov.l L1,rn
3014
   bra   L2
3015
   nop
3016
   align
3017
   L1:   .long value
3018
   L2:
3019
   ..
3020
 
3021
   mov.l L3,rn
3022
   bra   L4
3023
   nop
3024
   align
3025
   L3:   .long value
3026
   L4:
3027
   ..
3028
 
3029
   We fix this by performing a scan before scheduling, which notices which
3030
   instructions need to have their operands fetched from the constant table
3031
   and builds the table.
3032
 
3033
   The algorithm is:
3034
 
3035
   scan, find an instruction which needs a pcrel move.  Look forward, find the
3036
   last barrier which is within MAX_COUNT bytes of the requirement.
3037
   If there isn't one, make one.  Process all the instructions between
3038
   the find and the barrier.
3039
 
3040
   In the above example, we can tell that L3 is within 1k of L1, so
3041
   the first move can be shrunk from the 3 insn+constant sequence into
3042
   just 1 insn, and the constant moved to L3 to make:
3043
 
3044
   mov.l        L1,rn
3045
   ..
3046
   mov.l        L3,rn
3047
   bra          L4
3048
   nop
3049
   align
3050
   L3:.long value
3051
   L4:.long value
3052
 
3053
   Then the second move becomes the target for the shortening process.  */
3054
 
3055
typedef struct
3056
{
3057
  rtx value;                    /* Value in table.  */
3058
  rtx label;                    /* Label of value.  */
3059
  label_ref_list_t wend;        /* End of window.  */
3060
  enum machine_mode mode;       /* Mode of value.  */
3061
 
3062
  /* True if this constant is accessed as part of a post-increment
3063
     sequence.  Note that HImode constants are never accessed in this way.  */
3064
  bool part_of_sequence_p;
3065
} pool_node;
3066
 
3067
/* The maximum number of constants that can fit into one pool, since
3068
   constants in the range 0..510 are at least 2 bytes long, and in the
3069
   range from there to 1018 at least 4 bytes.  */
3070
 
3071
#define MAX_POOL_SIZE 372
3072
static pool_node pool_vector[MAX_POOL_SIZE];
3073
static int pool_size;
3074
static rtx pool_window_label;
3075
static int pool_window_last;
3076
 
3077
static int max_labelno_before_reorg;
3078
 
3079
/* ??? If we need a constant in HImode which is the truncated value of a
3080
   constant we need in SImode, we could combine the two entries thus saving
3081
   two bytes.  Is this common enough to be worth the effort of implementing
3082
   it?  */
3083
 
3084
/* ??? This stuff should be done at the same time that we shorten branches.
3085
   As it is now, we must assume that all branches are the maximum size, and
3086
   this causes us to almost always output constant pools sooner than
3087
   necessary.  */
3088
 
3089
/* Add a constant to the pool and return its label.  */
3090
 
3091
static rtx
3092
add_constant (rtx x, enum machine_mode mode, rtx last_value)
3093
{
3094
  int i;
3095
  rtx lab, new;
3096
  label_ref_list_t ref, newref;
3097
 
3098
  /* First see if we've already got it.  */
3099
  for (i = 0; i < pool_size; i++)
3100
    {
3101
      if (x->code == pool_vector[i].value->code
3102
          && mode == pool_vector[i].mode)
3103
        {
3104
          if (x->code == CODE_LABEL)
3105
            {
3106
              if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
3107
                continue;
3108
            }
3109
          if (rtx_equal_p (x, pool_vector[i].value))
3110
            {
3111
              lab = new = 0;
3112
              if (! last_value
3113
                  || ! i
3114
                  || ! rtx_equal_p (last_value, pool_vector[i-1].value))
3115
                {
3116
                  new = gen_label_rtx ();
3117
                  LABEL_REFS (new) = pool_vector[i].label;
3118
                  pool_vector[i].label = lab = new;
3119
                }
3120
              if (lab && pool_window_label)
3121
                {
3122
                  newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3123
                  newref->label = pool_window_label;
3124
                  ref = pool_vector[pool_window_last].wend;
3125
                  newref->next = ref;
3126
                  pool_vector[pool_window_last].wend = newref;
3127
                }
3128
              if (new)
3129
                pool_window_label = new;
3130
              pool_window_last = i;
3131
              return lab;
3132
            }
3133
        }
3134
    }
3135
 
3136
  /* Need a new one.  */
3137
  pool_vector[pool_size].value = x;
3138
  if (last_value && rtx_equal_p (last_value, pool_vector[pool_size - 1].value))
3139
    {
3140
      lab = 0;
3141
      pool_vector[pool_size - 1].part_of_sequence_p = true;
3142
    }
3143
  else
3144
    lab = gen_label_rtx ();
3145
  pool_vector[pool_size].mode = mode;
3146
  pool_vector[pool_size].label = lab;
3147
  pool_vector[pool_size].wend = NULL;
3148
  pool_vector[pool_size].part_of_sequence_p = (lab == 0);
3149
  if (lab && pool_window_label)
3150
    {
3151
      newref = (label_ref_list_t) pool_alloc (label_ref_list_pool);
3152
      newref->label = pool_window_label;
3153
      ref = pool_vector[pool_window_last].wend;
3154
      newref->next = ref;
3155
      pool_vector[pool_window_last].wend = newref;
3156
    }
3157
  if (lab)
3158
    pool_window_label = lab;
3159
  pool_window_last = pool_size;
3160
  pool_size++;
3161
  return lab;
3162
}
3163
 
3164
/* Output the literal table.  START, if nonzero, is the first instruction
3165
   this table is needed for, and also indicates that there is at least one
3166
   casesi_worker_2 instruction; We have to emit the operand3 labels from
3167
   these insns at a 4-byte  aligned position.  BARRIER is the barrier
3168
   after which we are to place the table.  */
3169
 
3170
static void
3171
dump_table (rtx start, rtx barrier)
3172
{
3173
  rtx scan = barrier;
3174
  int i;
3175
  int need_align = 1;
3176
  rtx lab;
3177
  label_ref_list_t ref;
3178
  int have_df = 0;
3179
 
3180
  /* Do two passes, first time dump out the HI sized constants.  */
3181
 
3182
  for (i = 0; i < pool_size; i++)
3183
    {
3184
      pool_node *p = &pool_vector[i];
3185
 
3186
      if (p->mode == HImode)
3187
        {
3188
          if (need_align)
3189
            {
3190
              scan = emit_insn_after (gen_align_2 (), scan);
3191
              need_align = 0;
3192
            }
3193
          for (lab = p->label; lab; lab = LABEL_REFS (lab))
3194
            scan = emit_label_after (lab, scan);
3195
          scan = emit_insn_after (gen_consttable_2 (p->value, const0_rtx),
3196
                                  scan);
3197
          for (ref = p->wend; ref; ref = ref->next)
3198
            {
3199
              lab = ref->label;
3200
              scan = emit_insn_after (gen_consttable_window_end (lab), scan);
3201
            }
3202
        }
3203
      else if (p->mode == DFmode)
3204
        have_df = 1;
3205
    }
3206
 
3207
  need_align = 1;
3208
 
3209
  if (start)
3210
    {
3211
      scan = emit_insn_after (gen_align_4 (), scan);
3212
      need_align = 0;
3213
      for (; start != barrier; start = NEXT_INSN (start))
3214
        if (GET_CODE (start) == INSN
3215
            && recog_memoized (start) == CODE_FOR_casesi_worker_2)
3216
          {
3217
            rtx src = SET_SRC (XVECEXP (PATTERN (start), 0, 0));
3218
            rtx lab = XEXP (XVECEXP (src, 0, 3), 0);
3219
 
3220
            scan = emit_label_after (lab, scan);
3221
          }
3222
    }
3223
  if (TARGET_FMOVD && TARGET_ALIGN_DOUBLE && have_df)
3224
    {
3225
      rtx align_insn = NULL_RTX;
3226
 
3227
      scan = emit_label_after (gen_label_rtx (), scan);
3228
      scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
3229
      need_align = 0;
3230
 
3231
      for (i = 0; i < pool_size; i++)
3232
        {
3233
          pool_node *p = &pool_vector[i];
3234
 
3235
          switch (p->mode)
3236
            {
3237
            case HImode:
3238
              break;
3239
            case SImode:
3240
            case SFmode:
3241
              if (align_insn && !p->part_of_sequence_p)
3242
                {
3243
                  for (lab = p->label; lab; lab = LABEL_REFS (lab))
3244
                    emit_label_before (lab, align_insn);
3245
                  emit_insn_before (gen_consttable_4 (p->value, const0_rtx),
3246
                                    align_insn);
3247
                  for (ref = p->wend; ref; ref = ref->next)
3248
                    {
3249
                      lab = ref->label;
3250
                      emit_insn_before (gen_consttable_window_end (lab),
3251
                                        align_insn);
3252
                    }
3253
                  delete_insn (align_insn);
3254
                  align_insn = NULL_RTX;
3255
                  continue;
3256
                }
3257
              else
3258
                {
3259
                  for (lab = p->label; lab; lab = LABEL_REFS (lab))
3260
                    scan = emit_label_after (lab, scan);
3261
                  scan = emit_insn_after (gen_consttable_4 (p->value,
3262
                                                            const0_rtx), scan);
3263
                  need_align = ! need_align;
3264
                }
3265
              break;
3266
            case DFmode:
3267
              if (need_align)
3268
                {
3269
                  scan = emit_insn_after (gen_align_log (GEN_INT (3)), scan);
3270
                  align_insn = scan;
3271
                  need_align = 0;
3272
                }
3273
            case DImode:
3274
              for (lab = p->label; lab; lab = LABEL_REFS (lab))
3275
                scan = emit_label_after (lab, scan);
3276
              scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
3277
                                      scan);
3278
              break;
3279
            default:
3280
              gcc_unreachable ();
3281
            }
3282
 
3283
          if (p->mode != HImode)
3284
            {
3285
              for (ref = p->wend; ref; ref = ref->next)
3286
                {
3287
                  lab = ref->label;
3288
                  scan = emit_insn_after (gen_consttable_window_end (lab),
3289
                                          scan);
3290
                }
3291
            }
3292
        }
3293
 
3294
      pool_size = 0;
3295
    }
3296
 
3297
  for (i = 0; i < pool_size; i++)
3298
    {
3299
      pool_node *p = &pool_vector[i];
3300
 
3301
      switch (p->mode)
3302
        {
3303
        case HImode:
3304
          break;
3305
        case SImode:
3306
        case SFmode:
3307
          if (need_align)
3308
            {
3309
              need_align = 0;
3310
              scan = emit_label_after (gen_label_rtx (), scan);
3311
              scan = emit_insn_after (gen_align_4 (), scan);
3312
            }
3313
          for (lab = p->label; lab; lab = LABEL_REFS (lab))
3314
            scan = emit_label_after (lab, scan);
3315
          scan = emit_insn_after (gen_consttable_4 (p->value, const0_rtx),
3316
                                  scan);
3317
          break;
3318
        case DFmode:
3319
        case DImode:
3320
          if (need_align)
3321
            {
3322
              need_align = 0;
3323
              scan = emit_label_after (gen_label_rtx (), scan);
3324
              scan = emit_insn_after (gen_align_4 (), scan);
3325
            }
3326
          for (lab = p->label; lab; lab = LABEL_REFS (lab))
3327
            scan = emit_label_after (lab, scan);
3328
          scan = emit_insn_after (gen_consttable_8 (p->value, const0_rtx),
3329
                                  scan);
3330
          break;
3331
        default:
3332
          gcc_unreachable ();
3333
        }
3334
 
3335
      if (p->mode != HImode)
3336
        {
3337
          for (ref = p->wend; ref; ref = ref->next)
3338
            {
3339
              lab = ref->label;
3340
              scan = emit_insn_after (gen_consttable_window_end (lab), scan);
3341
            }
3342
        }
3343
    }
3344
 
3345
  scan = emit_insn_after (gen_consttable_end (), scan);
3346
  scan = emit_barrier_after (scan);
3347
  pool_size = 0;
3348
  pool_window_label = NULL_RTX;
3349
  pool_window_last = 0;
3350
}
3351
 
3352
/* Return nonzero if constant would be an ok source for a
3353
   mov.w instead of a mov.l.  */
3354
 
3355
static int
3356
hi_const (rtx src)
3357
{
3358
  return (GET_CODE (src) == CONST_INT
3359
          && INTVAL (src) >= -32768
3360
          && INTVAL (src) <= 32767);
3361
}
3362
 
3363
#define MOVA_LABELREF(mova) XVECEXP (SET_SRC (PATTERN (mova)), 0, 0)
3364
 
3365
/* Nonzero if the insn is a move instruction which needs to be fixed.  */
3366
 
3367
/* ??? For a DImode/DFmode moves, we don't need to fix it if each half of the
3368
   CONST_DOUBLE input value is CONST_OK_FOR_I08.  For a SFmode move, we don't
3369
   need to fix it if the input value is CONST_OK_FOR_I08.  */
3370
 
3371
static int
3372
broken_move (rtx insn)
3373
{
3374
  if (GET_CODE (insn) == INSN)
3375
    {
3376
      rtx pat = PATTERN (insn);
3377
      if (GET_CODE (pat) == PARALLEL)
3378
        pat = XVECEXP (pat, 0, 0);
3379
      if (GET_CODE (pat) == SET
3380
          /* We can load any 8 bit value if we don't care what the high
3381
             order bits end up as.  */
3382
          && GET_MODE (SET_DEST (pat)) != QImode
3383
          && (CONSTANT_P (SET_SRC (pat))
3384
              /* Match mova_const.  */
3385
              || (GET_CODE (SET_SRC (pat)) == UNSPEC
3386
                  && XINT (SET_SRC (pat), 1) == UNSPEC_MOVA
3387
                  && GET_CODE (XVECEXP (SET_SRC (pat), 0, 0)) == CONST))
3388
          && ! (TARGET_SH2E
3389
                && GET_CODE (SET_SRC (pat)) == CONST_DOUBLE
3390
                && (fp_zero_operand (SET_SRC (pat))
3391
                    || fp_one_operand (SET_SRC (pat)))
3392
                /* ??? If this is a -m4 or -m4-single compilation, in general
3393
                   we don't know the current setting of fpscr, so disable fldi.
3394
                   There is an exception if this was a register-register move
3395
                   before reload - and hence it was ascertained that we have
3396
                   single precision setting - and in a post-reload optimization
3397
                   we changed this to do a constant load.  In that case
3398
                   we don't have an r0 clobber, hence we must use fldi.  */
3399
                && (! TARGET_SH4 || TARGET_FMOVD
3400
                    || (GET_CODE (XEXP (XVECEXP (PATTERN (insn), 0, 2), 0))
3401
                        == SCRATCH))
3402
                && GET_CODE (SET_DEST (pat)) == REG
3403
                && FP_REGISTER_P (REGNO (SET_DEST (pat))))
3404
          && ! (TARGET_SH2A
3405
                && GET_MODE (SET_DEST (pat)) == SImode
3406
                && GET_CODE (SET_SRC (pat)) == CONST_INT
3407
                && CONST_OK_FOR_I20 (INTVAL (SET_SRC (pat))))
3408
          && (GET_CODE (SET_SRC (pat)) != CONST_INT
3409
              || ! CONST_OK_FOR_I08 (INTVAL (SET_SRC (pat)))))
3410
        return 1;
3411
    }
3412
 
3413
  return 0;
3414
}
3415
 
3416
static int
3417
mova_p (rtx insn)
3418
{
3419
  return (GET_CODE (insn) == INSN
3420
          && GET_CODE (PATTERN (insn)) == SET
3421
          && GET_CODE (SET_SRC (PATTERN (insn))) == UNSPEC
3422
          && XINT (SET_SRC (PATTERN (insn)), 1) == UNSPEC_MOVA
3423
          /* Don't match mova_const.  */
3424
          && GET_CODE (MOVA_LABELREF (insn)) == LABEL_REF);
3425
}
3426
 
3427
/* Fix up a mova from a switch that went out of range.  */
3428
static void
3429
fixup_mova (rtx mova)
3430
{
3431
  PUT_MODE (XEXP (MOVA_LABELREF (mova), 0), QImode);
3432
  if (! flag_pic)
3433
    {
3434
      SET_SRC (PATTERN (mova)) = MOVA_LABELREF (mova);
3435
      INSN_CODE (mova) = -1;
3436
    }
3437
  else
3438
    {
3439
      rtx worker = mova;
3440
      rtx lab = gen_label_rtx ();
3441
      rtx wpat, wpat0, wpat1, wsrc, diff;
3442
 
3443
      do
3444
        {
3445
          worker = NEXT_INSN (worker);
3446
          gcc_assert (worker
3447
                      && GET_CODE (worker) != CODE_LABEL
3448
                      && GET_CODE (worker) != JUMP_INSN);
3449
        } while (GET_CODE (worker) == NOTE
3450
                 || recog_memoized (worker) != CODE_FOR_casesi_worker_1);
3451
      wpat = PATTERN (worker);
3452
      wpat0 = XVECEXP (wpat, 0, 0);
3453
      wpat1 = XVECEXP (wpat, 0, 1);
3454
      wsrc = SET_SRC (wpat0);
3455
      PATTERN (worker) = (gen_casesi_worker_2
3456
                          (SET_DEST (wpat0), XVECEXP (wsrc, 0, 1),
3457
                           XEXP (XVECEXP (wsrc, 0, 2), 0), lab,
3458
                           XEXP (wpat1, 0)));
3459
      INSN_CODE (worker) = -1;
3460
      diff = gen_rtx_MINUS (Pmode, XVECEXP (SET_SRC (PATTERN (mova)), 0, 0),
3461
                            gen_rtx_LABEL_REF (Pmode, lab));
3462
      diff = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, diff), UNSPEC_PIC);
3463
      SET_SRC (PATTERN (mova)) = gen_rtx_CONST (Pmode, diff);
3464
      INSN_CODE (mova) = -1;
3465
    }
3466
}
3467
 
3468
/* NEW_MOVA is a mova we've just encountered while scanning forward.  Update
3469
   *num_mova, and check if the new mova is not nested within the first one.
3470
   return 0 if *first_mova was replaced, 1 if new_mova was replaced,
3471
   2 if new_mova has been assigned to *first_mova, -1 otherwise..  */
3472
static int
3473
untangle_mova (int *num_mova, rtx *first_mova, rtx new_mova)
3474
{
3475
  int n_addr = 0; /* Initialization to shut up spurious warning.  */
3476
  int f_target, n_target = 0; /* Likewise.  */
3477
 
3478
  if (optimize)
3479
    {
3480
      n_addr = INSN_ADDRESSES (INSN_UID (new_mova));
3481
      n_target = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (new_mova), 0)));
3482
      if (n_addr > n_target || n_addr + 1022 < n_target)
3483
        {
3484
          /* Change the mova into a load.
3485
             broken_move will then return true for it.  */
3486
          fixup_mova (new_mova);
3487
          return 1;
3488
        }
3489
    }
3490
  if (!(*num_mova)++)
3491
    {
3492
      *first_mova = new_mova;
3493
      return 2;
3494
    }
3495
  if (!optimize
3496
      || ((f_target
3497
           = INSN_ADDRESSES (INSN_UID (XEXP (MOVA_LABELREF (*first_mova), 0))))
3498
          >= n_target))
3499
    return -1;
3500
 
3501
  (*num_mova)--;
3502
  if (f_target - INSN_ADDRESSES (INSN_UID (*first_mova))
3503
      > n_target - n_addr)
3504
    {
3505
      fixup_mova (*first_mova);
3506
      return 0;
3507
    }
3508
  else
3509
    {
3510
      fixup_mova (new_mova);
3511
      return 1;
3512
    }
3513
}
3514
 
3515
/* Find the last barrier from insn FROM which is close enough to hold the
3516
   constant pool.  If we can't find one, then create one near the end of
3517
   the range.  */
3518
 
3519
static rtx
3520
find_barrier (int num_mova, rtx mova, rtx from)
3521
{
3522
  int count_si = 0;
3523
  int count_hi = 0;
3524
  int found_hi = 0;
3525
  int found_si = 0;
3526
  int found_di = 0;
3527
  int hi_align = 2;
3528
  int si_align = 2;
3529
  int leading_mova = num_mova;
3530
  rtx barrier_before_mova = 0, found_barrier = 0, good_barrier = 0;
3531
  int si_limit;
3532
  int hi_limit;
3533
 
3534
  /* For HImode: range is 510, add 4 because pc counts from address of
3535
     second instruction after this one, subtract 2 for the jump instruction
3536
     that we may need to emit before the table, subtract 2 for the instruction
3537
     that fills the jump delay slot (in very rare cases, reorg will take an
3538
     instruction from after the constant pool or will leave the delay slot
3539
     empty).  This gives 510.
3540
     For SImode: range is 1020, add 4 because pc counts from address of
3541
     second instruction after this one, subtract 2 in case pc is 2 byte
3542
     aligned, subtract 2 for the jump instruction that we may need to emit
3543
     before the table, subtract 2 for the instruction that fills the jump
3544
     delay slot.  This gives 1018.  */
3545
 
3546
  /* The branch will always be shortened now that the reference address for
3547
     forward branches is the successor address, thus we need no longer make
3548
     adjustments to the [sh]i_limit for -O0.  */
3549
 
3550
  si_limit = 1018;
3551
  hi_limit = 510;
3552
 
3553
  while (from && count_si < si_limit && count_hi < hi_limit)
3554
    {
3555
      int inc = get_attr_length (from);
3556
      int new_align = 1;
3557
 
3558
      /* If this is a label that existed at the time of the compute_alignments
3559
         call, determine the alignment.  N.B.  When find_barrier recurses for
3560
         an out-of-reach mova, we might see labels at the start of previously
3561
         inserted constant tables.  */
3562
      if (GET_CODE (from) == CODE_LABEL
3563
          && CODE_LABEL_NUMBER (from) <= max_labelno_before_reorg)
3564
        {
3565
          if (optimize)
3566
            new_align = 1 << label_to_alignment (from);
3567
          else if (GET_CODE (prev_nonnote_insn (from)) == BARRIER)
3568
            new_align = 1 << barrier_align (from);
3569
          else
3570
            new_align = 1;
3571
          inc = 0;
3572
        }
3573
      /* In case we are scanning a constant table because of recursion, check
3574
         for explicit alignments.  If the table is long, we might be forced
3575
         to emit the new table in front of it; the length of the alignment
3576
         might be the last straw.  */
3577
      else if (GET_CODE (from) == INSN
3578
               && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
3579
               && XINT (PATTERN (from), 1) == UNSPECV_ALIGN)
3580
        new_align = INTVAL (XVECEXP (PATTERN (from), 0, 0));
3581
      /* When we find the end of a constant table, paste the new constant
3582
         at the end.  That is better than putting it in front because
3583
         this way, we don't need extra alignment for adding a 4-byte-aligned
3584
         mov(a) label to a 2/4 or 8/4 byte aligned table.  */
3585
      else if (GET_CODE (from) == INSN
3586
               && GET_CODE (PATTERN (from)) == UNSPEC_VOLATILE
3587
               && XINT (PATTERN (from), 1) == UNSPECV_CONST_END)
3588
        return from;
3589
 
3590
      if (GET_CODE (from) == BARRIER)
3591
        {
3592
 
3593
          found_barrier = from;
3594
 
3595
          /* If we are at the end of the function, or in front of an alignment
3596
             instruction, we need not insert an extra alignment.  We prefer
3597
             this kind of barrier.  */
3598
          if (barrier_align (from) > 2)
3599
            good_barrier = from;
3600
        }
3601
 
3602
      if (broken_move (from))
3603
        {
3604
          rtx pat, src, dst;
3605
          enum machine_mode mode;
3606
 
3607
          pat = PATTERN (from);
3608
          if (GET_CODE (pat) == PARALLEL)
3609
            pat = XVECEXP (pat, 0, 0);
3610
          src = SET_SRC (pat);
3611
          dst = SET_DEST (pat);
3612
          mode = GET_MODE (dst);
3613
 
3614
          /* We must explicitly check the mode, because sometimes the
3615
             front end will generate code to load unsigned constants into
3616
             HImode targets without properly sign extending them.  */
3617
          if (mode == HImode
3618
              || (mode == SImode && hi_const (src) && REGNO (dst) != FPUL_REG))
3619
            {
3620
              found_hi += 2;
3621
              /* We put the short constants before the long constants, so
3622
                 we must count the length of short constants in the range
3623
                 for the long constants.  */
3624
              /* ??? This isn't optimal, but is easy to do.  */
3625
              si_limit -= 2;
3626
            }
3627
          else
3628
            {
3629
              /* We dump DF/DI constants before SF/SI ones, because
3630
                 the limit is the same, but the alignment requirements
3631
                 are higher.  We may waste up to 4 additional bytes
3632
                 for alignment, and the DF/DI constant may have
3633
                 another SF/SI constant placed before it.  */
3634
              if (TARGET_SHCOMPACT
3635
                  && ! found_di
3636
                  && (mode == DFmode || mode == DImode))
3637
                {
3638
                  found_di = 1;
3639
                  si_limit -= 8;
3640
                }
3641
              while (si_align > 2 && found_si + si_align - 2 > count_si)
3642
                si_align >>= 1;
3643
              if (found_si > count_si)
3644
                count_si = found_si;
3645
              found_si += GET_MODE_SIZE (mode);
3646
              if (num_mova)
3647
                si_limit -= GET_MODE_SIZE (mode);
3648
            }
3649
        }
3650
 
3651
      if (mova_p (from))
3652
        {
3653
          switch (untangle_mova (&num_mova, &mova, from))
3654
            {
3655
              case 0:    return find_barrier (0, 0, mova);
3656
              case 2:
3657
                {
3658
                  leading_mova = 0;
3659
                  barrier_before_mova
3660
                    = good_barrier ? good_barrier : found_barrier;
3661
                }
3662
              default:  break;
3663
            }
3664
          if (found_si > count_si)
3665
            count_si = found_si;
3666
        }
3667
      else if (GET_CODE (from) == JUMP_INSN
3668
               && (GET_CODE (PATTERN (from)) == ADDR_VEC
3669
                   || GET_CODE (PATTERN (from)) == ADDR_DIFF_VEC))
3670
        {
3671
          if ((num_mova > 1 && GET_MODE (prev_nonnote_insn (from)) == VOIDmode)
3672
              || (num_mova
3673
                  && (prev_nonnote_insn (from)
3674
                      == XEXP (MOVA_LABELREF (mova), 0))))
3675
            num_mova--;
3676
          if (barrier_align (next_real_insn (from)) == align_jumps_log)
3677
            {
3678
              /* We have just passed the barrier in front of the
3679
                 ADDR_DIFF_VEC, which is stored in found_barrier.  Since
3680
                 the ADDR_DIFF_VEC is accessed as data, just like our pool
3681
                 constants, this is a good opportunity to accommodate what
3682
                 we have gathered so far.
3683
                 If we waited any longer, we could end up at a barrier in
3684
                 front of code, which gives worse cache usage for separated
3685
                 instruction / data caches.  */
3686
              good_barrier = found_barrier;
3687
              break;
3688
            }
3689
          else
3690
            {
3691
              rtx body = PATTERN (from);
3692
              inc = XVECLEN (body, 1) * GET_MODE_SIZE (GET_MODE (body));
3693
            }
3694
        }
3695
      /* For the SH1, we generate alignments even after jumps-around-jumps.  */
3696
      else if (GET_CODE (from) == JUMP_INSN
3697
               && ! TARGET_SH2
3698
               && ! TARGET_SMALLCODE)
3699
        new_align = 4;
3700
 
3701
      if (found_si)
3702
        {
3703
          count_si += inc;
3704
          if (new_align > si_align)
3705
            {
3706
              si_limit -= (count_si - 1) & (new_align - si_align);
3707
              si_align = new_align;
3708
            }
3709
          count_si = (count_si + new_align - 1) & -new_align;
3710
        }
3711
      if (found_hi)
3712
        {
3713
          count_hi += inc;
3714
          if (new_align > hi_align)
3715
            {
3716
              hi_limit -= (count_hi - 1) & (new_align - hi_align);
3717
              hi_align = new_align;
3718
            }
3719
          count_hi = (count_hi + new_align - 1) & -new_align;
3720
        }
3721
      from = NEXT_INSN (from);
3722
    }
3723
 
3724
  if (num_mova)
3725
    {
3726
      if (leading_mova)
3727
        {
3728
          /* Try as we might, the leading mova is out of range.  Change
3729
             it into a load (which will become a pcload) and retry.  */
3730
          fixup_mova (mova);
3731
          return find_barrier (0, 0, mova);
3732
        }
3733
      else
3734
        {
3735
          /* Insert the constant pool table before the mova instruction,
3736
             to prevent the mova label reference from going out of range.  */
3737
          from = mova;
3738
          good_barrier = found_barrier = barrier_before_mova;
3739
        }
3740
    }
3741
 
3742
  if (found_barrier)
3743
    {
3744
      if (good_barrier && next_real_insn (found_barrier))
3745
        found_barrier = good_barrier;
3746
    }
3747
  else
3748
    {
3749
      /* We didn't find a barrier in time to dump our stuff,
3750
         so we'll make one.  */
3751
      rtx label = gen_label_rtx ();
3752
 
3753
      /* If we exceeded the range, then we must back up over the last
3754
         instruction we looked at.  Otherwise, we just need to undo the
3755
         NEXT_INSN at the end of the loop.  */
3756
      if (count_hi > hi_limit || count_si > si_limit)
3757
        from = PREV_INSN (PREV_INSN (from));
3758
      else
3759
        from = PREV_INSN (from);
3760
 
3761
      /* Walk back to be just before any jump or label.
3762
         Putting it before a label reduces the number of times the branch
3763
         around the constant pool table will be hit.  Putting it before
3764
         a jump makes it more likely that the bra delay slot will be
3765
         filled.  */
3766
      while (GET_CODE (from) == JUMP_INSN || GET_CODE (from) == NOTE
3767
             || GET_CODE (from) == CODE_LABEL)
3768
        from = PREV_INSN (from);
3769
 
3770
      from = emit_jump_insn_after (gen_jump (label), from);
3771
      JUMP_LABEL (from) = label;
3772
      LABEL_NUSES (label) = 1;
3773
      found_barrier = emit_barrier_after (from);
3774
      emit_label_after (label, found_barrier);
3775
    }
3776
 
3777
  return found_barrier;
3778
}
3779
 
3780
/* If the instruction INSN is implemented by a special function, and we can
3781
   positively find the register that is used to call the sfunc, and this
3782
   register is not used anywhere else in this instruction - except as the
3783
   destination of a set, return this register; else, return 0.  */
3784
rtx
3785
sfunc_uses_reg (rtx insn)
3786
{
3787
  int i;
3788
  rtx pattern, part, reg_part, reg;
3789
 
3790
  if (GET_CODE (insn) != INSN)
3791
    return 0;
3792
  pattern = PATTERN (insn);
3793
  if (GET_CODE (pattern) != PARALLEL || get_attr_type (insn) != TYPE_SFUNC)
3794
    return 0;
3795
 
3796
  for (reg_part = 0, i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
3797
    {
3798
      part = XVECEXP (pattern, 0, i);
3799
      if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == SImode)
3800
        reg_part = part;
3801
    }
3802
  if (! reg_part)
3803
    return 0;
3804
  reg = XEXP (reg_part, 0);
3805
  for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
3806
    {
3807
      part = XVECEXP (pattern, 0, i);
3808
      if (part == reg_part || GET_CODE (part) == CLOBBER)
3809
        continue;
3810
      if (reg_mentioned_p (reg, ((GET_CODE (part) == SET
3811
                                  && GET_CODE (SET_DEST (part)) == REG)
3812
                                 ? SET_SRC (part) : part)))
3813
        return 0;
3814
    }
3815
  return reg;
3816
}
3817
 
3818
/* See if the only way in which INSN uses REG is by calling it, or by
3819
   setting it while calling it.  Set *SET to a SET rtx if the register
3820
   is set by INSN.  */
3821
 
3822
static int
3823
noncall_uses_reg (rtx reg, rtx insn, rtx *set)
3824
{
3825
  rtx pattern, reg2;
3826
 
3827
  *set = NULL_RTX;
3828
 
3829
  reg2 = sfunc_uses_reg (insn);
3830
  if (reg2 && REGNO (reg2) == REGNO (reg))
3831
    {
3832
      pattern = single_set (insn);
3833
      if (pattern
3834
          && GET_CODE (SET_DEST (pattern)) == REG
3835
          && REGNO (reg) == REGNO (SET_DEST (pattern)))
3836
        *set = pattern;
3837
      return 0;
3838
    }
3839
  if (GET_CODE (insn) != CALL_INSN)
3840
    {
3841
      /* We don't use rtx_equal_p because we don't care if the mode is
3842
         different.  */
3843
      pattern = single_set (insn);
3844
      if (pattern
3845
          && GET_CODE (SET_DEST (pattern)) == REG
3846
          && REGNO (reg) == REGNO (SET_DEST (pattern)))
3847
        {
3848
          rtx par, part;
3849
          int i;
3850
 
3851
          *set = pattern;
3852
          par = PATTERN (insn);
3853
          if (GET_CODE (par) == PARALLEL)
3854
            for (i = XVECLEN (par, 0) - 1; i >= 0; i--)
3855
              {
3856
                part = XVECEXP (par, 0, i);
3857
                if (GET_CODE (part) != SET && reg_mentioned_p (reg, part))
3858
                  return 1;
3859
              }
3860
          return reg_mentioned_p (reg, SET_SRC (pattern));
3861
        }
3862
 
3863
      return 1;
3864
    }
3865
 
3866
  pattern = PATTERN (insn);
3867
 
3868
  if (GET_CODE (pattern) == PARALLEL)
3869
    {
3870
      int i;
3871
 
3872
      for (i = XVECLEN (pattern, 0) - 1; i >= 1; i--)
3873
        if (reg_mentioned_p (reg, XVECEXP (pattern, 0, i)))
3874
          return 1;
3875
      pattern = XVECEXP (pattern, 0, 0);
3876
    }
3877
 
3878
  if (GET_CODE (pattern) == SET)
3879
    {
3880
      if (reg_mentioned_p (reg, SET_DEST (pattern)))
3881
        {
3882
          /* We don't use rtx_equal_p, because we don't care if the
3883
             mode is different.  */
3884
          if (GET_CODE (SET_DEST (pattern)) != REG
3885
              || REGNO (reg) != REGNO (SET_DEST (pattern)))
3886
            return 1;
3887
 
3888
          *set = pattern;
3889
        }
3890
 
3891
      pattern = SET_SRC (pattern);
3892
    }
3893
 
3894
  if (GET_CODE (pattern) != CALL
3895
      || GET_CODE (XEXP (pattern, 0)) != MEM
3896
      || ! rtx_equal_p (reg, XEXP (XEXP (pattern, 0), 0)))
3897
    return 1;
3898
 
3899
  return 0;
3900
}
3901
 
3902
/* Given a X, a pattern of an insn or a part of it, return a mask of used
3903
   general registers.  Bits 0..15 mean that the respective registers
3904
   are used as inputs in the instruction.  Bits 16..31 mean that the
3905
   registers 0..15, respectively, are used as outputs, or are clobbered.
3906
   IS_DEST should be set to 16 if X is the destination of a SET, else to 0.  */
3907
int
3908
regs_used (rtx x, int is_dest)
3909
{
3910
  enum rtx_code code;
3911
  const char *fmt;
3912
  int i, used = 0;
3913
 
3914
  if (! x)
3915
    return used;
3916
  code = GET_CODE (x);
3917
  switch (code)
3918
    {
3919
    case REG:
3920
      if (REGNO (x) < 16)
3921
        return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
3922
                << (REGNO (x) + is_dest));
3923
      return 0;
3924
    case SUBREG:
3925
      {
3926
        rtx y = SUBREG_REG (x);
3927
 
3928
        if (GET_CODE (y) != REG)
3929
          break;
3930
        if (REGNO (y) < 16)
3931
          return (((1 << HARD_REGNO_NREGS (0, GET_MODE (x))) - 1)
3932
                  << (REGNO (y) +
3933
                      subreg_regno_offset (REGNO (y),
3934
                                           GET_MODE (y),
3935
                                           SUBREG_BYTE (x),
3936
                                           GET_MODE (x)) + is_dest));
3937
        return 0;
3938
      }
3939
    case SET:
3940
      return regs_used (SET_SRC (x), 0) | regs_used (SET_DEST (x), 16);
3941
    case RETURN:
3942
      /* If there was a return value, it must have been indicated with USE.  */
3943
      return 0x00ffff00;
3944
    case CLOBBER:
3945
      is_dest = 1;
3946
      break;
3947
    case MEM:
3948
      is_dest = 0;
3949
      break;
3950
    case CALL:
3951
      used |= 0x00ff00f0;
3952
      break;
3953
    default:
3954
      break;
3955
    }
3956
 
3957
  fmt = GET_RTX_FORMAT (code);
3958
 
3959
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3960
    {
3961
      if (fmt[i] == 'E')
3962
        {
3963
          register int j;
3964
          for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3965
            used |= regs_used (XVECEXP (x, i, j), is_dest);
3966
        }
3967
      else if (fmt[i] == 'e')
3968
        used |= regs_used (XEXP (x, i), is_dest);
3969
    }
3970
  return used;
3971
}
3972
 
3973
/* Create an instruction that prevents redirection of a conditional branch
3974
   to the destination of the JUMP with address ADDR.
3975
   If the branch needs to be implemented as an indirect jump, try to find
3976
   a scratch register for it.
3977
   If NEED_BLOCK is 0, don't do anything unless we need a scratch register.
3978
   If any preceding insn that doesn't fit into a delay slot is good enough,
3979
   pass 1.  Pass 2 if a definite blocking insn is needed.
3980
   -1 is used internally to avoid deep recursion.
3981
   If a blocking instruction is made or recognized, return it.  */
3982
 
3983
static rtx
3984
gen_block_redirect (rtx jump, int addr, int need_block)
3985
{
3986
  int dead = 0;
3987
  rtx prev = prev_nonnote_insn (jump);
3988
  rtx dest;
3989
 
3990
  /* First, check if we already have an instruction that satisfies our need.  */
3991
  if (prev && GET_CODE (prev) == INSN && ! INSN_DELETED_P (prev))
3992
    {
3993
      if (INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch)
3994
        return prev;
3995
      if (GET_CODE (PATTERN (prev)) == USE
3996
          || GET_CODE (PATTERN (prev)) == CLOBBER
3997
          || get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
3998
        prev = jump;
3999
      else if ((need_block &= ~1) < 0)
4000
        return prev;
4001
      else if (recog_memoized (prev) == CODE_FOR_block_branch_redirect)
4002
        need_block = 0;
4003
    }
4004
  if (GET_CODE (PATTERN (jump)) == RETURN)
4005
    {
4006
      if (! need_block)
4007
        return prev;
4008
      /* Reorg even does nasty things with return insns that cause branches
4009
         to go out of range - see find_end_label and callers.  */
4010
      return emit_insn_before (gen_block_branch_redirect (const0_rtx) , jump);
4011
    }
4012
  /* We can't use JUMP_LABEL here because it might be undefined
4013
     when not optimizing.  */
4014
  dest = XEXP (SET_SRC (PATTERN (jump)), 0);
4015
  /* If the branch is out of range, try to find a scratch register for it.  */
4016
  if (optimize
4017
      && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4018
          > 4092 + 4098))
4019
    {
4020
      rtx scan;
4021
      /* Don't look for the stack pointer as a scratch register,
4022
         it would cause trouble if an interrupt occurred.  */
4023
      unsigned try = 0x7fff, used;
4024
      int jump_left = flag_expensive_optimizations + 1;
4025
 
4026
      /* It is likely that the most recent eligible instruction is wanted for
4027
         the delay slot.  Therefore, find out which registers it uses, and
4028
         try to avoid using them.  */
4029
 
4030
      for (scan = jump; (scan = PREV_INSN (scan)); )
4031
        {
4032
          enum rtx_code code;
4033
 
4034
          if (INSN_DELETED_P (scan))
4035
            continue;
4036
          code = GET_CODE (scan);
4037
          if (code == CODE_LABEL || code == JUMP_INSN)
4038
            break;
4039
          if (code == INSN
4040
              && GET_CODE (PATTERN (scan)) != USE
4041
              && GET_CODE (PATTERN (scan)) != CLOBBER
4042
              && get_attr_in_delay_slot (scan) == IN_DELAY_SLOT_YES)
4043
            {
4044
              try &= ~regs_used (PATTERN (scan), 0);
4045
              break;
4046
            }
4047
        }
4048
      for (used = dead = 0, scan = JUMP_LABEL (jump);
4049
           (scan = NEXT_INSN (scan)); )
4050
        {
4051
          enum rtx_code code;
4052
 
4053
          if (INSN_DELETED_P (scan))
4054
            continue;
4055
          code = GET_CODE (scan);
4056
          if (INSN_P (scan))
4057
            {
4058
              used |= regs_used (PATTERN (scan), 0);
4059
              if (code == CALL_INSN)
4060
                used |= regs_used (CALL_INSN_FUNCTION_USAGE (scan), 0);
4061
              dead |= (used >> 16) & ~used;
4062
              if (dead & try)
4063
                {
4064
                  dead &= try;
4065
                  break;
4066
                }
4067
              if (code == JUMP_INSN)
4068
                {
4069
                  if (jump_left-- && simplejump_p (scan))
4070
                    scan = JUMP_LABEL (scan);
4071
                  else
4072
                    break;
4073
                }
4074
            }
4075
        }
4076
      /* Mask out the stack pointer again, in case it was
4077
         the only 'free' register we have found.  */
4078
      dead &= 0x7fff;
4079
    }
4080
  /* If the immediate destination is still in range, check for possible
4081
     threading with a jump beyond the delay slot insn.
4082
     Don't check if we are called recursively; the jump has been or will be
4083
     checked in a different invocation then.  */
4084
 
4085
  else if (optimize && need_block >= 0)
4086
    {
4087
      rtx next = next_active_insn (next_active_insn (dest));
4088
      if (next && GET_CODE (next) == JUMP_INSN
4089
          && GET_CODE (PATTERN (next)) == SET
4090
          && recog_memoized (next) == CODE_FOR_jump_compact)
4091
        {
4092
          dest = JUMP_LABEL (next);
4093
          if (dest
4094
              && (INSN_ADDRESSES (INSN_UID (dest)) - addr + (unsigned) 4092
4095
                  > 4092 + 4098))
4096
            gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), -1);
4097
        }
4098
    }
4099
 
4100
  if (dead)
4101
    {
4102
      rtx reg = gen_rtx_REG (SImode, exact_log2 (dead & -dead));
4103
 
4104
      /* It would be nice if we could convert the jump into an indirect
4105
         jump / far branch right now, and thus exposing all constituent
4106
         instructions to further optimization.  However, reorg uses
4107
         simplejump_p to determine if there is an unconditional jump where
4108
         it should try to schedule instructions from the target of the
4109
         branch; simplejump_p fails for indirect jumps even if they have
4110
         a JUMP_LABEL.  */
4111
      rtx insn = emit_insn_before (gen_indirect_jump_scratch
4112
                                   (reg, GEN_INT (INSN_UID (JUMP_LABEL (jump))))
4113
                                   , jump);
4114
      /* ??? We would like this to have the scope of the jump, but that
4115
         scope will change when a delay slot insn of an inner scope is added.
4116
         Hence, after delay slot scheduling, we'll have to expect
4117
         NOTE_INSN_BLOCK_END notes between the indirect_jump_scratch and
4118
         the jump.  */
4119
 
4120
      INSN_LOCATOR (insn) = INSN_LOCATOR (jump);
4121
      INSN_CODE (insn) = CODE_FOR_indirect_jump_scratch;
4122
      return insn;
4123
    }
4124
  else if (need_block)
4125
    /* We can't use JUMP_LABEL here because it might be undefined
4126
       when not optimizing.  */
4127
    return emit_insn_before (gen_block_branch_redirect
4128
                      (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))))
4129
                      , jump);
4130
  return prev;
4131
}
4132
 
4133
#define CONDJUMP_MIN -252
4134
#define CONDJUMP_MAX 262
4135
struct far_branch
4136
{
4137
  /* A label (to be placed) in front of the jump
4138
     that jumps to our ultimate destination.  */
4139
  rtx near_label;
4140
  /* Where we are going to insert it if we cannot move the jump any farther,
4141
     or the jump itself if we have picked up an existing jump.  */
4142
  rtx insert_place;
4143
  /* The ultimate destination.  */
4144
  rtx far_label;
4145
  struct far_branch *prev;
4146
  /* If the branch has already been created, its address;
4147
     else the address of its first prospective user.  */
4148
  int address;
4149
};
4150
 
4151
static void gen_far_branch (struct far_branch *);
4152
enum mdep_reorg_phase_e mdep_reorg_phase;
4153
static void
4154
gen_far_branch (struct far_branch *bp)
4155
{
4156
  rtx insn = bp->insert_place;
4157
  rtx jump;
4158
  rtx label = gen_label_rtx ();
4159
  int ok;
4160
 
4161
  emit_label_after (label, insn);
4162
  if (bp->far_label)
4163
    {
4164
      jump = emit_jump_insn_after (gen_jump (bp->far_label), insn);
4165
      LABEL_NUSES (bp->far_label)++;
4166
    }
4167
  else
4168
    jump = emit_jump_insn_after (gen_return (), insn);
4169
  /* Emit a barrier so that reorg knows that any following instructions
4170
     are not reachable via a fall-through path.
4171
     But don't do this when not optimizing, since we wouldn't suppress the
4172
     alignment for the barrier then, and could end up with out-of-range
4173
     pc-relative loads.  */
4174
  if (optimize)
4175
    emit_barrier_after (jump);
4176
  emit_label_after (bp->near_label, insn);
4177
  JUMP_LABEL (jump) = bp->far_label;
4178
  ok = invert_jump (insn, label, 1);
4179
  gcc_assert (ok);
4180
 
4181
  /* If we are branching around a jump (rather than a return), prevent
4182
     reorg from using an insn from the jump target as the delay slot insn -
4183
     when reorg did this, it pessimized code (we rather hide the delay slot)
4184
     and it could cause branches to go out of range.  */
4185
  if (bp->far_label)
4186
    (emit_insn_after
4187
     (gen_stuff_delay_slot
4188
      (GEN_INT (INSN_UID (XEXP (SET_SRC (PATTERN (jump)), 0))),
4189
       GEN_INT (recog_memoized (insn) == CODE_FOR_branch_false)),
4190
      insn));
4191
  /* Prevent reorg from undoing our splits.  */
4192
  gen_block_redirect (jump, bp->address += 2, 2);
4193
}
4194
 
4195
/* Fix up ADDR_DIFF_VECs.  */
4196
void
4197
fixup_addr_diff_vecs (rtx first)
4198
{
4199
  rtx insn;
4200
 
4201
  for (insn = first; insn; insn = NEXT_INSN (insn))
4202
    {
4203
      rtx vec_lab, pat, prev, prevpat, x, braf_label;
4204
 
4205
      if (GET_CODE (insn) != JUMP_INSN
4206
          || GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC)
4207
        continue;
4208
      pat = PATTERN (insn);
4209
      vec_lab = XEXP (XEXP (pat, 0), 0);
4210
 
4211
      /* Search the matching casesi_jump_2.  */
4212
      for (prev = vec_lab; ; prev = PREV_INSN (prev))
4213
        {
4214
          if (GET_CODE (prev) != JUMP_INSN)
4215
            continue;
4216
          prevpat = PATTERN (prev);
4217
          if (GET_CODE (prevpat) != PARALLEL || XVECLEN (prevpat, 0) != 2)
4218
            continue;
4219
          x = XVECEXP (prevpat, 0, 1);
4220
          if (GET_CODE (x) != USE)
4221
            continue;
4222
          x = XEXP (x, 0);
4223
          if (GET_CODE (x) == LABEL_REF && XEXP (x, 0) == vec_lab)
4224
            break;
4225
        }
4226
      /* FIXME: This is a bug in the optimizer, but it seems harmless
4227
         to just avoid panicing.  */
4228
      if (!prev)
4229
        continue;
4230
 
4231
      /* Emit the reference label of the braf where it belongs, right after
4232
         the casesi_jump_2 (i.e. braf).  */
4233
      braf_label = XEXP (XEXP (SET_SRC (XVECEXP (prevpat, 0, 0)), 1), 0);
4234
      emit_label_after (braf_label, prev);
4235
 
4236
      /* Fix up the ADDR_DIF_VEC to be relative
4237
         to the reference address of the braf.  */
4238
      XEXP (XEXP (pat, 0), 0) = braf_label;
4239
    }
4240
}
4241
 
4242
/* BARRIER_OR_LABEL is either a BARRIER or a CODE_LABEL immediately following
4243
   a barrier.  Return the base 2 logarithm of the desired alignment.  */
4244
int
4245
barrier_align (rtx barrier_or_label)
4246
{
4247
  rtx next = next_real_insn (barrier_or_label), pat, prev;
4248
  int slot, credit, jump_to_next = 0;
4249
 
4250
  if (! next)
4251
    return 0;
4252
 
4253
  pat = PATTERN (next);
4254
 
4255
  if (GET_CODE (pat) == ADDR_DIFF_VEC)
4256
    return 2;
4257
 
4258
  if (GET_CODE (pat) == UNSPEC_VOLATILE && XINT (pat, 1) == UNSPECV_ALIGN)
4259
    /* This is a barrier in front of a constant table.  */
4260
    return 0;
4261
 
4262
  prev = prev_real_insn (barrier_or_label);
4263
  if (GET_CODE (PATTERN (prev)) == ADDR_DIFF_VEC)
4264
    {
4265
      pat = PATTERN (prev);
4266
      /* If this is a very small table, we want to keep the alignment after
4267
         the table to the minimum for proper code alignment.  */
4268
      return ((TARGET_SMALLCODE
4269
               || ((unsigned) XVECLEN (pat, 1) * GET_MODE_SIZE (GET_MODE (pat))
4270
                   <= (unsigned) 1 << (CACHE_LOG - 2)))
4271
              ? 1 << TARGET_SHMEDIA : align_jumps_log);
4272
    }
4273
 
4274
  if (TARGET_SMALLCODE)
4275
    return 0;
4276
 
4277
  if (! TARGET_SH2 || ! optimize)
4278
    return align_jumps_log;
4279
 
4280
  /* When fixing up pcloads, a constant table might be inserted just before
4281
     the basic block that ends with the barrier.  Thus, we can't trust the
4282
     instruction lengths before that.  */
4283
  if (mdep_reorg_phase > SH_FIXUP_PCLOAD)
4284
    {
4285
      /* Check if there is an immediately preceding branch to the insn beyond
4286
         the barrier.  We must weight the cost of discarding useful information
4287
         from the current cache line when executing this branch and there is
4288
         an alignment, against that of fetching unneeded insn in front of the
4289
         branch target when there is no alignment.  */
4290
 
4291
      /* There are two delay_slot cases to consider.  One is the simple case
4292
         where the preceding branch is to the insn beyond the barrier (simple
4293
         delay slot filling), and the other is where the preceding branch has
4294
         a delay slot that is a duplicate of the insn after the barrier
4295
         (fill_eager_delay_slots) and the branch is to the insn after the insn
4296
         after the barrier.  */
4297
 
4298
      /* PREV is presumed to be the JUMP_INSN for the barrier under
4299
         investigation.  Skip to the insn before it.  */
4300
      prev = prev_real_insn (prev);
4301
 
4302
      for (slot = 2, credit = (1 << (CACHE_LOG - 2)) + 2;
4303
           credit >= 0 && prev && GET_CODE (prev) == INSN;
4304
           prev = prev_real_insn (prev))
4305
        {
4306
          jump_to_next = 0;
4307
          if (GET_CODE (PATTERN (prev)) == USE
4308
              || GET_CODE (PATTERN (prev)) == CLOBBER)
4309
            continue;
4310
          if (GET_CODE (PATTERN (prev)) == SEQUENCE)
4311
            {
4312
              prev = XVECEXP (PATTERN (prev), 0, 1);
4313
              if (INSN_UID (prev) == INSN_UID (next))
4314
                {
4315
                  /* Delay slot was filled with insn at jump target.  */
4316
                  jump_to_next = 1;
4317
                  continue;
4318
                }
4319
            }
4320
 
4321
          if (slot &&
4322
              get_attr_in_delay_slot (prev) == IN_DELAY_SLOT_YES)
4323
            slot = 0;
4324
          credit -= get_attr_length (prev);
4325
        }
4326
      if (prev
4327
          && GET_CODE (prev) == JUMP_INSN
4328
          && JUMP_LABEL (prev))
4329
        {
4330
          rtx x;
4331
          if (jump_to_next
4332
              || next_real_insn (JUMP_LABEL (prev)) == next
4333
              /* If relax_delay_slots() decides NEXT was redundant
4334
                 with some previous instruction, it will have
4335
                 redirected PREV's jump to the following insn.  */
4336
              || JUMP_LABEL (prev) == next_nonnote_insn (next)
4337
              /* There is no upper bound on redundant instructions
4338
                 that might have been skipped, but we must not put an
4339
                 alignment where none had been before.  */
4340
              || (x = (NEXT_INSN (NEXT_INSN (PREV_INSN (prev)))),
4341
                  (INSN_P (x)
4342
                   && (INSN_CODE (x) == CODE_FOR_block_branch_redirect
4343
                       || INSN_CODE (x) == CODE_FOR_indirect_jump_scratch
4344
                       || INSN_CODE (x) == CODE_FOR_stuff_delay_slot))))
4345
            {
4346
              rtx pat = PATTERN (prev);
4347
              if (GET_CODE (pat) == PARALLEL)
4348
                pat = XVECEXP (pat, 0, 0);
4349
              if (credit - slot >= (GET_CODE (SET_SRC (pat)) == PC ? 2 : 0))
4350
                return 0;
4351
            }
4352
        }
4353
    }
4354
 
4355
  return align_jumps_log;
4356
}
4357
 
4358
/* If we are inside a phony loop, almost any kind of label can turn up as the
4359
   first one in the loop.  Aligning a braf label causes incorrect switch
4360
   destination addresses; we can detect braf labels because they are
4361
   followed by a BARRIER.
4362
   Applying loop alignment to small constant or switch tables is a waste
4363
   of space, so we suppress this too.  */
4364
int
4365
sh_loop_align (rtx label)
4366
{
4367
  rtx next = label;
4368
 
4369
  do
4370
    next = next_nonnote_insn (next);
4371
  while (next && GET_CODE (next) == CODE_LABEL);
4372
 
4373
  if (! next
4374
      || ! INSN_P (next)
4375
      || GET_CODE (PATTERN (next)) == ADDR_DIFF_VEC
4376
      || recog_memoized (next) == CODE_FOR_consttable_2)
4377
    return 0;
4378
 
4379
  return align_loops_log;
4380
}
4381
 
4382
/* Do a final pass over the function, just before delayed branch
4383
   scheduling.  */
4384
 
4385
static void
4386
sh_reorg (void)
4387
{
4388
  rtx first, insn, mova = NULL_RTX;
4389
  int num_mova;
4390
  rtx r0_rtx = gen_rtx_REG (Pmode, 0);
4391
  rtx r0_inc_rtx = gen_rtx_POST_INC (Pmode, r0_rtx);
4392
 
4393
  first = get_insns ();
4394
  max_labelno_before_reorg = max_label_num ();
4395
 
4396
  /* We must split call insns before introducing `mova's.  If we're
4397
     optimizing, they'll have already been split.  Otherwise, make
4398
     sure we don't split them too late.  */
4399
  if (! optimize)
4400
    split_all_insns_noflow ();
4401
 
4402
  if (TARGET_SHMEDIA)
4403
    return;
4404
 
4405
  /* If relaxing, generate pseudo-ops to associate function calls with
4406
     the symbols they call.  It does no harm to not generate these
4407
     pseudo-ops.  However, when we can generate them, it enables to
4408
     linker to potentially relax the jsr to a bsr, and eliminate the
4409
     register load and, possibly, the constant pool entry.  */
4410
 
4411
  mdep_reorg_phase = SH_INSERT_USES_LABELS;
4412
  if (TARGET_RELAX)
4413
    {
4414
      /* Remove all REG_LABEL notes.  We want to use them for our own
4415
         purposes.  This works because none of the remaining passes
4416
         need to look at them.
4417
 
4418
         ??? But it may break in the future.  We should use a machine
4419
         dependent REG_NOTE, or some other approach entirely.  */
4420
      for (insn = first; insn; insn = NEXT_INSN (insn))
4421
        {
4422
          if (INSN_P (insn))
4423
            {
4424
              rtx note;
4425
 
4426
              while ((note = find_reg_note (insn, REG_LABEL, NULL_RTX)) != 0)
4427
                remove_note (insn, note);
4428
            }
4429
        }
4430
 
4431
      for (insn = first; insn; insn = NEXT_INSN (insn))
4432
        {
4433
          rtx pattern, reg, link, set, scan, dies, label;
4434
          int rescan = 0, foundinsn = 0;
4435
 
4436
          if (GET_CODE (insn) == CALL_INSN)
4437
            {
4438
              pattern = PATTERN (insn);
4439
 
4440
              if (GET_CODE (pattern) == PARALLEL)
4441
                pattern = XVECEXP (pattern, 0, 0);
4442
              if (GET_CODE (pattern) == SET)
4443
                pattern = SET_SRC (pattern);
4444
 
4445
              if (GET_CODE (pattern) != CALL
4446
                  || GET_CODE (XEXP (pattern, 0)) != MEM)
4447
                continue;
4448
 
4449
              reg = XEXP (XEXP (pattern, 0), 0);
4450
            }
4451
          else
4452
            {
4453
              reg = sfunc_uses_reg (insn);
4454
              if (! reg)
4455
                continue;
4456
            }
4457
 
4458
          if (GET_CODE (reg) != REG)
4459
            continue;
4460
 
4461
          /* This is a function call via REG.  If the only uses of REG
4462
             between the time that it is set and the time that it dies
4463
             are in function calls, then we can associate all the
4464
             function calls with the setting of REG.  */
4465
 
4466
          for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
4467
            {
4468
              rtx linked_insn;
4469
 
4470
              if (REG_NOTE_KIND (link) != 0)
4471
                continue;
4472
              linked_insn = XEXP (link, 0);
4473
              set = single_set (linked_insn);
4474
              if (set
4475
                  && rtx_equal_p (reg, SET_DEST (set))
4476
                  && ! INSN_DELETED_P (linked_insn))
4477
                {
4478
                  link = linked_insn;
4479
                  break;
4480
                }
4481
            }
4482
 
4483
          if (! link)
4484
            {
4485
              /* ??? Sometimes global register allocation will have
4486
                 deleted the insn pointed to by LOG_LINKS.  Try
4487
                 scanning backward to find where the register is set.  */
4488
              for (scan = PREV_INSN (insn);
4489
                   scan && GET_CODE (scan) != CODE_LABEL;
4490
                   scan = PREV_INSN (scan))
4491
                {
4492
                  if (! INSN_P (scan))
4493
                    continue;
4494
 
4495
                  if (! reg_mentioned_p (reg, scan))
4496
                    continue;
4497
 
4498
                  if (noncall_uses_reg (reg, scan, &set))
4499
                    break;
4500
 
4501
                  if (set)
4502
                    {
4503
                      link = scan;
4504
                      break;
4505
                    }
4506
                }
4507
            }
4508
 
4509
          if (! link)
4510
            continue;
4511
 
4512
          /* The register is set at LINK.  */
4513
 
4514
          /* We can only optimize the function call if the register is
4515
             being set to a symbol.  In theory, we could sometimes
4516
             optimize calls to a constant location, but the assembler
4517
             and linker do not support that at present.  */
4518
          if (GET_CODE (SET_SRC (set)) != SYMBOL_REF
4519
              && GET_CODE (SET_SRC (set)) != LABEL_REF)
4520
            continue;
4521
 
4522
          /* Scan forward from LINK to the place where REG dies, and
4523
             make sure that the only insns which use REG are
4524
             themselves function calls.  */
4525
 
4526
          /* ??? This doesn't work for call targets that were allocated
4527
             by reload, since there may not be a REG_DEAD note for the
4528
             register.  */
4529
 
4530
          dies = NULL_RTX;
4531
          for (scan = NEXT_INSN (link); scan; scan = NEXT_INSN (scan))
4532
            {
4533
              rtx scanset;
4534
 
4535
              /* Don't try to trace forward past a CODE_LABEL if we haven't
4536
                 seen INSN yet.  Ordinarily, we will only find the setting insn
4537
                 in LOG_LINKS if it is in the same basic block.  However,
4538
                 cross-jumping can insert code labels in between the load and
4539
                 the call, and can result in situations where a single call
4540
                 insn may have two targets depending on where we came from.  */
4541
 
4542
              if (GET_CODE (scan) == CODE_LABEL && ! foundinsn)
4543
                break;
4544
 
4545
              if (! INSN_P (scan))
4546
                continue;
4547
 
4548
              /* Don't try to trace forward past a JUMP.  To optimize
4549
                 safely, we would have to check that all the
4550
                 instructions at the jump destination did not use REG.  */
4551
 
4552
              if (GET_CODE (scan) == JUMP_INSN)
4553
                break;
4554
 
4555
              if (! reg_mentioned_p (reg, scan))
4556
                continue;
4557
 
4558
              if (noncall_uses_reg (reg, scan, &scanset))
4559
                break;
4560
 
4561
              if (scan == insn)
4562
                foundinsn = 1;
4563
 
4564
              if (scan != insn
4565
                  && (GET_CODE (scan) == CALL_INSN || sfunc_uses_reg (scan)))
4566
                {
4567
                  /* There is a function call to this register other
4568
                     than the one we are checking.  If we optimize
4569
                     this call, we need to rescan again below.  */
4570
                  rescan = 1;
4571
                }
4572
 
4573
              /* ??? We shouldn't have to worry about SCANSET here.
4574
                 We should just be able to check for a REG_DEAD note
4575
                 on a function call.  However, the REG_DEAD notes are
4576
                 apparently not dependable around libcalls; c-torture
4577
                 execute/920501-2 is a test case.  If SCANSET is set,
4578
                 then this insn sets the register, so it must have
4579
                 died earlier.  Unfortunately, this will only handle
4580
                 the cases in which the register is, in fact, set in a
4581
                 later insn.  */
4582
 
4583
              /* ??? We shouldn't have to use FOUNDINSN here.
4584
                 However, the LOG_LINKS fields are apparently not
4585
                 entirely reliable around libcalls;
4586
                 newlib/libm/math/e_pow.c is a test case.  Sometimes
4587
                 an insn will appear in LOG_LINKS even though it is
4588
                 not the most recent insn which sets the register.  */
4589
 
4590
              if (foundinsn
4591
                  && (scanset
4592
                      || find_reg_note (scan, REG_DEAD, reg)))
4593
                {
4594
                  dies = scan;
4595
                  break;
4596
                }
4597
            }
4598
 
4599
          if (! dies)
4600
            {
4601
              /* Either there was a branch, or some insn used REG
4602
                 other than as a function call address.  */
4603
              continue;
4604
            }
4605
 
4606
          /* Create a code label, and put it in a REG_LABEL note on
4607
             the insn which sets the register, and on each call insn
4608
             which uses the register.  In final_prescan_insn we look
4609
             for the REG_LABEL notes, and output the appropriate label
4610
             or pseudo-op.  */
4611
 
4612
          label = gen_label_rtx ();
4613
          REG_NOTES (link) = gen_rtx_INSN_LIST (REG_LABEL, label,
4614
                                                REG_NOTES (link));
4615
          REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, label,
4616
                                                REG_NOTES (insn));
4617
          if (rescan)
4618
            {
4619
              scan = link;
4620
              do
4621
                {
4622
                  rtx reg2;
4623
 
4624
                  scan = NEXT_INSN (scan);
4625
                  if (scan != insn
4626
                      && ((GET_CODE (scan) == CALL_INSN
4627
                           && reg_mentioned_p (reg, scan))
4628
                          || ((reg2 = sfunc_uses_reg (scan))
4629
                              && REGNO (reg2) == REGNO (reg))))
4630
                    REG_NOTES (scan)
4631
                      = gen_rtx_INSN_LIST (REG_LABEL, label, REG_NOTES (scan));
4632
                }
4633
              while (scan != dies);
4634
            }
4635
        }
4636
    }
4637
 
4638
  if (TARGET_SH2)
4639
    fixup_addr_diff_vecs (first);
4640
 
4641
  if (optimize)
4642
    {
4643
      mdep_reorg_phase = SH_SHORTEN_BRANCHES0;
4644
      shorten_branches (first);
4645
    }
4646
 
4647
  /* Scan the function looking for move instructions which have to be
4648
     changed to pc-relative loads and insert the literal tables.  */
4649
  label_ref_list_pool = create_alloc_pool ("label references list",
4650
                                           sizeof (struct label_ref_list_d),
4651
                                           30);
4652
  mdep_reorg_phase = SH_FIXUP_PCLOAD;
4653
  for (insn = first, num_mova = 0; insn; insn = NEXT_INSN (insn))
4654
    {
4655
      if (mova_p (insn))
4656
        {
4657
          /* ??? basic block reordering can move a switch table dispatch
4658
             below the switch table.  Check if that has happened.
4659
             We only have the addresses available when optimizing; but then,
4660
             this check shouldn't be needed when not optimizing.  */
4661
          if (!untangle_mova (&num_mova, &mova, insn))
4662
            {
4663
              insn = mova;
4664
              num_mova = 0;
4665
            }
4666
        }
4667
      else if (GET_CODE (insn) == JUMP_INSN
4668
               && GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
4669
               && num_mova
4670
               /* ??? loop invariant motion can also move a mova out of a
4671
                  loop.  Since loop does this code motion anyway, maybe we
4672
                  should wrap UNSPEC_MOVA into a CONST, so that reload can
4673
                  move it back.  */
4674
               && ((num_mova > 1
4675
                    && GET_MODE (prev_nonnote_insn (insn)) == VOIDmode)
4676
                   || (prev_nonnote_insn (insn)
4677
                       == XEXP (MOVA_LABELREF (mova), 0))))
4678
        {
4679
          rtx scan;
4680
          int total;
4681
 
4682
          num_mova--;
4683
 
4684
          /* Some code might have been inserted between the mova and
4685
             its ADDR_DIFF_VEC.  Check if the mova is still in range.  */
4686
          for (scan = mova, total = 0; scan != insn; scan = NEXT_INSN (scan))
4687
            total += get_attr_length (scan);
4688
 
4689
          /* range of mova is 1020, add 4 because pc counts from address of
4690
             second instruction after this one, subtract 2 in case pc is 2
4691
             byte aligned.  Possible alignment needed for the ADDR_DIFF_VEC
4692
             cancels out with alignment effects of the mova itself.  */
4693
          if (total > 1022)
4694
            {
4695
              /* Change the mova into a load, and restart scanning
4696
                 there.  broken_move will then return true for mova.  */
4697
              fixup_mova (mova);
4698
              insn = mova;
4699
            }
4700
        }
4701
      if (broken_move (insn)
4702
          || (GET_CODE (insn) == INSN
4703
              && recog_memoized (insn) == CODE_FOR_casesi_worker_2))
4704
        {
4705
          rtx scan;
4706
          /* Scan ahead looking for a barrier to stick the constant table
4707
             behind.  */
4708
          rtx barrier = find_barrier (num_mova, mova, insn);
4709
          rtx last_float_move = NULL_RTX, last_float = 0, *last_float_addr = NULL;
4710
          int need_aligned_label = 0;
4711
 
4712
          if (num_mova && ! mova_p (mova))
4713
            {
4714
              /* find_barrier had to change the first mova into a
4715
                 pcload; thus, we have to start with this new pcload.  */
4716
              insn = mova;
4717
              num_mova = 0;
4718
            }
4719
          /* Now find all the moves between the points and modify them.  */
4720
          for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
4721
            {
4722
              if (GET_CODE (scan) == CODE_LABEL)
4723
                last_float = 0;
4724
              if (GET_CODE (scan) == INSN
4725
                  && recog_memoized (scan) == CODE_FOR_casesi_worker_2)
4726
                need_aligned_label = 1;
4727
              if (broken_move (scan))
4728
                {
4729
                  rtx *patp = &PATTERN (scan), pat = *patp;
4730
                  rtx src, dst;
4731
                  rtx lab;
4732
                  rtx newsrc;
4733
                  enum machine_mode mode;
4734
 
4735
                  if (GET_CODE (pat) == PARALLEL)
4736
                    patp = &XVECEXP (pat, 0, 0), pat = *patp;
4737
                  src = SET_SRC (pat);
4738
                  dst = SET_DEST (pat);
4739
                  mode = GET_MODE (dst);
4740
 
4741
                  if (mode == SImode && hi_const (src)
4742
                      && REGNO (dst) != FPUL_REG)
4743
                    {
4744
                      int offset = 0;
4745
 
4746
                      mode = HImode;
4747
                      while (GET_CODE (dst) == SUBREG)
4748
                        {
4749
                          offset += subreg_regno_offset (REGNO (SUBREG_REG (dst)),
4750
                                                         GET_MODE (SUBREG_REG (dst)),
4751
                                                         SUBREG_BYTE (dst),
4752
                                                         GET_MODE (dst));
4753
                          dst = SUBREG_REG (dst);
4754
                        }
4755
                      dst = gen_rtx_REG (HImode, REGNO (dst) + offset);
4756
                    }
4757
                  if (GET_CODE (dst) == REG && FP_ANY_REGISTER_P (REGNO (dst)))
4758
                    {
4759
                      /* This must be an insn that clobbers r0.  */
4760
                      rtx *clobberp = &XVECEXP (PATTERN (scan), 0,
4761
                                                XVECLEN (PATTERN (scan), 0)
4762
                                                - 1);
4763
                      rtx clobber = *clobberp;
4764
 
4765
                      gcc_assert (GET_CODE (clobber) == CLOBBER
4766
                                  && rtx_equal_p (XEXP (clobber, 0), r0_rtx));
4767
 
4768
                      if (last_float
4769
                          && reg_set_between_p (r0_rtx, last_float_move, scan))
4770
                        last_float = 0;
4771
                      if (last_float
4772
                          && TARGET_SHCOMPACT
4773
                          && GET_MODE_SIZE (mode) != 4
4774
                          && GET_MODE_SIZE (GET_MODE (last_float)) == 4)
4775
                        last_float = 0;
4776
                      lab = add_constant (src, mode, last_float);
4777
                      if (lab)
4778
                        emit_insn_before (gen_mova (lab), scan);
4779
                      else
4780
                        {
4781
                          /* There will be a REG_UNUSED note for r0 on
4782
                             LAST_FLOAT_MOVE; we have to change it to REG_INC,
4783
                             lest reorg:mark_target_live_regs will not
4784
                             consider r0 to be used, and we end up with delay
4785
                             slot insn in front of SCAN that clobbers r0.  */
4786
                          rtx note
4787
                            = find_regno_note (last_float_move, REG_UNUSED, 0);
4788
 
4789
                          /* If we are not optimizing, then there may not be
4790
                             a note.  */
4791
                          if (note)
4792
                            PUT_MODE (note, REG_INC);
4793
 
4794
                          *last_float_addr = r0_inc_rtx;
4795
                        }
4796
                      last_float_move = scan;
4797
                      last_float = src;
4798
                      newsrc = gen_const_mem (mode,
4799
                                        (((TARGET_SH4 && ! TARGET_FMOVD)
4800
                                          || REGNO (dst) == FPUL_REG)
4801
                                         ? r0_inc_rtx
4802
                                         : r0_rtx));
4803
                      last_float_addr = &XEXP (newsrc, 0);
4804
 
4805
                      /* Remove the clobber of r0.  */
4806
                      *clobberp = gen_rtx_CLOBBER (GET_MODE (clobber),
4807
                                                   gen_rtx_SCRATCH (Pmode));
4808
                    }
4809
                  /* This is a mova needing a label.  Create it.  */
4810
                  else if (GET_CODE (src) == UNSPEC
4811
                           && XINT (src, 1) == UNSPEC_MOVA
4812
                           && GET_CODE (XVECEXP (src, 0, 0)) == CONST)
4813
                    {
4814
                      lab = add_constant (XVECEXP (src, 0, 0), mode, 0);
4815
                      newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
4816
                      newsrc = gen_rtx_UNSPEC (SImode,
4817
                                               gen_rtvec (1, newsrc),
4818
                                               UNSPEC_MOVA);
4819
                    }
4820
                  else
4821
                    {
4822
                      lab = add_constant (src, mode, 0);
4823
                      newsrc = gen_rtx_LABEL_REF (VOIDmode, lab);
4824
                      newsrc = gen_const_mem (mode, newsrc);
4825
                    }
4826
                  *patp = gen_rtx_SET (VOIDmode, dst, newsrc);
4827
                  INSN_CODE (scan) = -1;
4828
                }
4829
            }
4830
          dump_table (need_aligned_label ? insn : 0, barrier);
4831
          insn = barrier;
4832
        }
4833
    }
4834
  free_alloc_pool (label_ref_list_pool);
4835
  for (insn = first; insn; insn = NEXT_INSN (insn))
4836
    PUT_MODE (insn, VOIDmode);
4837
 
4838
  mdep_reorg_phase = SH_SHORTEN_BRANCHES1;
4839
  INSN_ADDRESSES_FREE ();
4840
  split_branches (first);
4841
 
4842
  /* The INSN_REFERENCES_ARE_DELAYED in sh.h is problematic because it
4843
     also has an effect on the register that holds the address of the sfunc.
4844
     Insert an extra dummy insn in front of each sfunc that pretends to
4845
     use this register.  */
4846
  if (flag_delayed_branch)
4847
    {
4848
      for (insn = first; insn; insn = NEXT_INSN (insn))
4849
        {
4850
          rtx reg = sfunc_uses_reg (insn);
4851
 
4852
          if (! reg)
4853
            continue;
4854
          emit_insn_before (gen_use_sfunc_addr (reg), insn);
4855
        }
4856
    }
4857
#if 0
4858
  /* fpscr is not actually a user variable, but we pretend it is for the
4859
     sake of the previous optimization passes, since we want it handled like
4860
     one.  However, we don't have any debugging information for it, so turn
4861
     it into a non-user variable now.  */
4862
  if (TARGET_SH4)
4863
    REG_USERVAR_P (get_fpscr_rtx ()) = 0;
4864
#endif
4865
  mdep_reorg_phase = SH_AFTER_MDEP_REORG;
4866
}
4867
 
4868
int
4869
get_dest_uid (rtx label, int max_uid)
4870
{
4871
  rtx dest = next_real_insn (label);
4872
  int dest_uid;
4873
  if (! dest)
4874
    /* This can happen for an undefined label.  */
4875
    return 0;
4876
  dest_uid = INSN_UID (dest);
4877
  /* If this is a newly created branch redirection blocking instruction,
4878
     we cannot index the branch_uid or insn_addresses arrays with its
4879
     uid.  But then, we won't need to, because the actual destination is
4880
     the following branch.  */
4881
  while (dest_uid >= max_uid)
4882
    {
4883
      dest = NEXT_INSN (dest);
4884
      dest_uid = INSN_UID (dest);
4885
    }
4886
  if (GET_CODE (dest) == JUMP_INSN && GET_CODE (PATTERN (dest)) == RETURN)
4887
    return 0;
4888
  return dest_uid;
4889
}
4890
 
4891
/* Split condbranches that are out of range.  Also add clobbers for
4892
   scratch registers that are needed in far jumps.
4893
   We do this before delay slot scheduling, so that it can take our
4894
   newly created instructions into account.  It also allows us to
4895
   find branches with common targets more easily.  */
4896
 
4897
static void
4898
split_branches (rtx first)
4899
{
4900
  rtx insn;
4901
  struct far_branch **uid_branch, *far_branch_list = 0;
4902
  int max_uid = get_max_uid ();
4903
  int ok;
4904
 
4905
  /* Find out which branches are out of range.  */
4906
  shorten_branches (first);
4907
 
4908
  uid_branch = (struct far_branch **) alloca (max_uid * sizeof *uid_branch);
4909
  memset ((char *) uid_branch, 0, max_uid * sizeof *uid_branch);
4910
 
4911
  for (insn = first; insn; insn = NEXT_INSN (insn))
4912
    if (! INSN_P (insn))
4913
      continue;
4914
    else if (INSN_DELETED_P (insn))
4915
      {
4916
        /* Shorten_branches would split this instruction again,
4917
           so transform it into a note.  */
4918
        PUT_CODE (insn, NOTE);
4919
        NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
4920
        NOTE_SOURCE_FILE (insn) = 0;
4921
      }
4922
    else if (GET_CODE (insn) == JUMP_INSN
4923
             /* Don't mess with ADDR_DIFF_VEC */
4924
             && (GET_CODE (PATTERN (insn)) == SET
4925
                 || GET_CODE (PATTERN (insn)) == RETURN))
4926
      {
4927
        enum attr_type type = get_attr_type (insn);
4928
        if (type == TYPE_CBRANCH)
4929
          {
4930
            rtx next, beyond;
4931
 
4932
            if (get_attr_length (insn) > 4)
4933
              {
4934
                rtx src = SET_SRC (PATTERN (insn));
4935
                rtx olabel = XEXP (XEXP (src, 1), 0);
4936
                int addr = INSN_ADDRESSES (INSN_UID (insn));
4937
                rtx label = 0;
4938
                int dest_uid = get_dest_uid (olabel, max_uid);
4939
                struct far_branch *bp = uid_branch[dest_uid];
4940
 
4941
                /* redirect_jump needs a valid JUMP_LABEL, and it might delete
4942
                   the label if the LABEL_NUSES count drops to zero.  There is
4943
                   always a jump_optimize pass that sets these values, but it
4944
                   proceeds to delete unreferenced code, and then if not
4945
                   optimizing, to un-delete the deleted instructions, thus
4946
                   leaving labels with too low uses counts.  */
4947
                if (! optimize)
4948
                  {
4949
                    JUMP_LABEL (insn) = olabel;
4950
                    LABEL_NUSES (olabel)++;
4951
                  }
4952
                if (! bp)
4953
                  {
4954
                    bp = (struct far_branch *) alloca (sizeof *bp);
4955
                    uid_branch[dest_uid] = bp;
4956
                    bp->prev = far_branch_list;
4957
                    far_branch_list = bp;
4958
                    bp->far_label
4959
                      = XEXP (XEXP (SET_SRC (PATTERN (insn)), 1), 0);
4960
                    LABEL_NUSES (bp->far_label)++;
4961
                  }
4962
                else
4963
                  {
4964
                    label = bp->near_label;
4965
                    if (! label && bp->address - addr >= CONDJUMP_MIN)
4966
                      {
4967
                        rtx block = bp->insert_place;
4968
 
4969
                        if (GET_CODE (PATTERN (block)) == RETURN)
4970
                          block = PREV_INSN (block);
4971
                        else
4972
                          block = gen_block_redirect (block,
4973
                                                      bp->address, 2);
4974
                        label = emit_label_after (gen_label_rtx (),
4975
                                                  PREV_INSN (block));
4976
                        bp->near_label = label;
4977
                      }
4978
                    else if (label && ! NEXT_INSN (label))
4979
                      {
4980
                        if (addr + 2 - bp->address <= CONDJUMP_MAX)
4981
                          bp->insert_place = insn;
4982
                        else
4983
                          gen_far_branch (bp);
4984
                      }
4985
                  }
4986
                if (! label
4987
                    || (NEXT_INSN (label) && bp->address - addr < CONDJUMP_MIN))
4988
                  {
4989
                    bp->near_label = label = gen_label_rtx ();
4990
                    bp->insert_place = insn;
4991
                    bp->address = addr;
4992
                  }
4993
                ok = redirect_jump (insn, label, 1);
4994
                gcc_assert (ok);
4995
              }
4996
            else
4997
              {
4998
                /* get_attr_length (insn) == 2 */
4999
                /* Check if we have a pattern where reorg wants to redirect
5000
                   the branch to a label from an unconditional branch that
5001
                   is too far away.  */
5002
                /* We can't use JUMP_LABEL here because it might be undefined
5003
                   when not optimizing.  */
5004
                /* A syntax error might cause beyond to be NULL_RTX.  */
5005
                beyond
5006
                  = next_active_insn (XEXP (XEXP (SET_SRC (PATTERN (insn)), 1),
5007
                                            0));
5008
 
5009
                if (beyond
5010
                    && (GET_CODE (beyond) == JUMP_INSN
5011
                        || ((beyond = next_active_insn (beyond))
5012
                            && GET_CODE (beyond) == JUMP_INSN))
5013
                    && GET_CODE (PATTERN (beyond)) == SET
5014
                    && recog_memoized (beyond) == CODE_FOR_jump_compact
5015
                    && ((INSN_ADDRESSES
5016
                         (INSN_UID (XEXP (SET_SRC (PATTERN (beyond)), 0)))
5017
                         - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5018
                        > 252 + 258 + 2))
5019
                  gen_block_redirect (beyond,
5020
                                      INSN_ADDRESSES (INSN_UID (beyond)), 1);
5021
              }
5022
 
5023
            next = next_active_insn (insn);
5024
 
5025
            if ((GET_CODE (next) == JUMP_INSN
5026
                 || ((next = next_active_insn (next))
5027
                     && GET_CODE (next) == JUMP_INSN))
5028
                && GET_CODE (PATTERN (next)) == SET
5029
                && recog_memoized (next) == CODE_FOR_jump_compact
5030
                && ((INSN_ADDRESSES
5031
                     (INSN_UID (XEXP (SET_SRC (PATTERN (next)), 0)))
5032
                     - INSN_ADDRESSES (INSN_UID (insn)) + (unsigned) 252)
5033
                    > 252 + 258 + 2))
5034
              gen_block_redirect (next, INSN_ADDRESSES (INSN_UID (next)), 1);
5035
          }
5036
        else if (type == TYPE_JUMP || type == TYPE_RETURN)
5037
          {
5038
            int addr = INSN_ADDRESSES (INSN_UID (insn));
5039
            rtx far_label = 0;
5040
            int dest_uid = 0;
5041
            struct far_branch *bp;
5042
 
5043
            if (type == TYPE_JUMP)
5044
              {
5045
                far_label = XEXP (SET_SRC (PATTERN (insn)), 0);
5046
                dest_uid = get_dest_uid (far_label, max_uid);
5047
                if (! dest_uid)
5048
                  {
5049
                    /* Parse errors can lead to labels outside
5050
                      the insn stream.  */
5051
                    if (! NEXT_INSN (far_label))
5052
                      continue;
5053
 
5054
                    if (! optimize)
5055
                      {
5056
                        JUMP_LABEL (insn) = far_label;
5057
                        LABEL_NUSES (far_label)++;
5058
                      }
5059
                    redirect_jump (insn, NULL_RTX, 1);
5060
                    far_label = 0;
5061
                  }
5062
              }
5063
            bp = uid_branch[dest_uid];
5064
            if (! bp)
5065
              {
5066
                bp = (struct far_branch *) alloca (sizeof *bp);
5067
                uid_branch[dest_uid] = bp;
5068
                bp->prev = far_branch_list;
5069
                far_branch_list = bp;
5070
                bp->near_label = 0;
5071
                bp->far_label = far_label;
5072
                if (far_label)
5073
                  LABEL_NUSES (far_label)++;
5074
              }
5075
            else if (bp->near_label && ! NEXT_INSN (bp->near_label))
5076
              if (addr - bp->address <= CONDJUMP_MAX)
5077
                emit_label_after (bp->near_label, PREV_INSN (insn));
5078
              else
5079
                {
5080
                  gen_far_branch (bp);
5081
                  bp->near_label = 0;
5082
                }
5083
            else
5084
              bp->near_label = 0;
5085
            bp->address = addr;
5086
            bp->insert_place = insn;
5087
            if (! far_label)
5088
              emit_insn_before (gen_block_branch_redirect (const0_rtx), insn);
5089
            else
5090
              gen_block_redirect (insn, addr, bp->near_label ? 2 : 0);
5091
          }
5092
      }
5093
  /* Generate all pending far branches,
5094
     and free our references to the far labels.  */
5095
  while (far_branch_list)
5096
    {
5097
      if (far_branch_list->near_label
5098
          && ! NEXT_INSN (far_branch_list->near_label))
5099
        gen_far_branch (far_branch_list);
5100
      if (optimize
5101
          && far_branch_list->far_label
5102
          && ! --LABEL_NUSES (far_branch_list->far_label))
5103
        delete_insn (far_branch_list->far_label);
5104
      far_branch_list = far_branch_list->prev;
5105
    }
5106
 
5107
  /* Instruction length information is no longer valid due to the new
5108
     instructions that have been generated.  */
5109
  init_insn_lengths ();
5110
}
5111
 
5112
/* Dump out instruction addresses, which is useful for debugging the
5113
   constant pool table stuff.
5114
 
5115
   If relaxing, output the label and pseudo-ops used to link together
5116
   calls and the instruction which set the registers.  */
5117
 
5118
/* ??? The addresses printed by this routine for insns are nonsense for
5119
   insns which are inside of a sequence where none of the inner insns have
5120
   variable length.  This is because the second pass of shorten_branches
5121
   does not bother to update them.  */
5122
 
5123
void
5124
final_prescan_insn (rtx insn, rtx *opvec ATTRIBUTE_UNUSED,
5125
                    int noperands ATTRIBUTE_UNUSED)
5126
{
5127
  if (TARGET_DUMPISIZE)
5128
    fprintf (asm_out_file, "\n! at %04x\n", INSN_ADDRESSES (INSN_UID (insn)));
5129
 
5130
  if (TARGET_RELAX)
5131
    {
5132
      rtx note;
5133
 
5134
      note = find_reg_note (insn, REG_LABEL, NULL_RTX);
5135
      if (note)
5136
        {
5137
          rtx pattern;
5138
 
5139
          pattern = PATTERN (insn);
5140
          if (GET_CODE (pattern) == PARALLEL)
5141
            pattern = XVECEXP (pattern, 0, 0);
5142
          switch (GET_CODE (pattern))
5143
            {
5144
            case SET:
5145
              if (GET_CODE (SET_SRC (pattern)) != CALL
5146
                  && get_attr_type (insn) != TYPE_SFUNC)
5147
                {
5148
                  targetm.asm_out.internal_label
5149
                    (asm_out_file, "L", CODE_LABEL_NUMBER (XEXP (note, 0)));
5150
                  break;
5151
                }
5152
              /* else FALLTHROUGH */
5153
            case CALL:
5154
              asm_fprintf (asm_out_file, "\t.uses %LL%d\n",
5155
                           CODE_LABEL_NUMBER (XEXP (note, 0)));
5156
              break;
5157
 
5158
            default:
5159
              gcc_unreachable ();
5160
            }
5161
        }
5162
    }
5163
}
5164
 
5165
/* Dump out any constants accumulated in the final pass.  These will
5166
   only be labels.  */
5167
 
5168
const char *
5169
output_jump_label_table (void)
5170
{
5171
  int i;
5172
 
5173
  if (pool_size)
5174
    {
5175
      fprintf (asm_out_file, "\t.align 2\n");
5176
      for (i = 0; i < pool_size; i++)
5177
        {
5178
          pool_node *p = &pool_vector[i];
5179
 
5180
          (*targetm.asm_out.internal_label) (asm_out_file, "L",
5181
                                     CODE_LABEL_NUMBER (p->label));
5182
          output_asm_insn (".long       %O0", &p->value);
5183
        }
5184
      pool_size = 0;
5185
    }
5186
 
5187
  return "";
5188
}
5189
 
5190
/* A full frame looks like:
5191
 
5192
   arg-5
5193
   arg-4
5194
   [ if current_function_anonymous_args
5195
   arg-3
5196
   arg-2
5197
   arg-1
5198
   arg-0 ]
5199
   saved-fp
5200
   saved-r10
5201
   saved-r11
5202
   saved-r12
5203
   saved-pr
5204
   local-n
5205
   ..
5206
   local-1
5207
   local-0        <- fp points here.  */
5208
 
5209
/* Number of bytes pushed for anonymous args, used to pass information
5210
   between expand_prologue and expand_epilogue.  */
5211
 
5212
/* Adjust the stack by SIZE bytes.  REG holds the rtl of the register to be
5213
   adjusted.  If epilogue_p is zero, this is for a prologue; otherwise, it's
5214
   for an epilogue and a negative value means that it's for a sibcall
5215
   epilogue.  If LIVE_REGS_MASK is nonzero, it points to a HARD_REG_SET of
5216
   all the registers that are about to be restored, and hence dead.  */
5217
 
5218
static void
5219
output_stack_adjust (int size, rtx reg, int epilogue_p,
5220
                     HARD_REG_SET *live_regs_mask)
5221
{
5222
  rtx (*emit_fn) (rtx) = epilogue_p ? &emit_insn : &frame_insn;
5223
  if (size)
5224
    {
5225
      HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
5226
 
5227
/* This test is bogus, as output_stack_adjust is used to re-align the
5228
   stack.  */
5229
#if 0
5230
      gcc_assert (!(size % align));
5231
#endif
5232
 
5233
      if (CONST_OK_FOR_ADD (size))
5234
        emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size)));
5235
      /* Try to do it with two partial adjustments; however, we must make
5236
         sure that the stack is properly aligned at all times, in case
5237
         an interrupt occurs between the two partial adjustments.  */
5238
      else if (CONST_OK_FOR_ADD (size / 2 & -align)
5239
               && CONST_OK_FOR_ADD (size - (size / 2 & -align)))
5240
        {
5241
          emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size / 2 & -align)));
5242
          emit_fn (GEN_ADD3 (reg, reg, GEN_INT (size - (size / 2 & -align))));
5243
        }
5244
      else
5245
        {
5246
          rtx const_reg;
5247
          rtx insn;
5248
          int temp = epilogue_p ? 7 : (TARGET_SH5 ? 0 : 1);
5249
          int i;
5250
 
5251
          /* If TEMP is invalid, we could temporarily save a general
5252
             register to MACL.  However, there is currently no need
5253
             to handle this case, so just die when we see it.  */
5254
          if (epilogue_p < 0
5255
              || current_function_interrupt
5256
              || ! call_really_used_regs[temp] || fixed_regs[temp])
5257
            temp = -1;
5258
          if (temp < 0 && ! current_function_interrupt
5259
              && (TARGET_SHMEDIA || epilogue_p >= 0))
5260
            {
5261
              HARD_REG_SET temps;
5262
              COPY_HARD_REG_SET (temps, call_used_reg_set);
5263
              AND_COMPL_HARD_REG_SET (temps, call_fixed_reg_set);
5264
              if (epilogue_p > 0)
5265
                {
5266
                  int nreg = 0;
5267
                  if (current_function_return_rtx)
5268
                    {
5269
                      enum machine_mode mode;
5270
                      mode = GET_MODE (current_function_return_rtx);
5271
                      if (BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG)
5272
                        nreg = HARD_REGNO_NREGS (FIRST_RET_REG, mode);
5273
                    }
5274
                  for (i = 0; i < nreg; i++)
5275
                    CLEAR_HARD_REG_BIT (temps, FIRST_RET_REG + i);
5276
                  if (current_function_calls_eh_return)
5277
                    {
5278
                      CLEAR_HARD_REG_BIT (temps, EH_RETURN_STACKADJ_REGNO);
5279
                      for (i = 0; i <= 3; i++)
5280
                        CLEAR_HARD_REG_BIT (temps, EH_RETURN_DATA_REGNO (i));
5281
                    }
5282
                }
5283
              if (TARGET_SHMEDIA && epilogue_p < 0)
5284
                for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
5285
                  CLEAR_HARD_REG_BIT (temps, i);
5286
              if (epilogue_p <= 0)
5287
                {
5288
                  for (i = FIRST_PARM_REG;
5289
                       i < FIRST_PARM_REG + NPARM_REGS (SImode); i++)
5290
                    CLEAR_HARD_REG_BIT (temps, i);
5291
                  if (cfun->static_chain_decl != NULL)
5292
                    CLEAR_HARD_REG_BIT (temps, STATIC_CHAIN_REGNUM);
5293
                }
5294
              temp = scavenge_reg (&temps);
5295
            }
5296
          if (temp < 0 && live_regs_mask)
5297
            {
5298
              HARD_REG_SET temps;
5299
 
5300
              COPY_HARD_REG_SET (temps, *live_regs_mask);
5301
              CLEAR_HARD_REG_BIT (temps, REGNO (reg));
5302
              temp = scavenge_reg (&temps);
5303
            }
5304
          if (temp < 0)
5305
            {
5306
              rtx adj_reg, tmp_reg, mem;
5307
 
5308
              /* If we reached here, the most likely case is the (sibcall)
5309
                 epilogue for non SHmedia.  Put a special push/pop sequence
5310
                 for such case as the last resort.  This looks lengthy but
5311
                 would not be problem because it seems to be very
5312
                 rare.  */
5313
 
5314
              gcc_assert (!TARGET_SHMEDIA && epilogue_p);
5315
 
5316
 
5317
               /* ??? There is still the slight possibility that r4 or
5318
                  r5 have been reserved as fixed registers or assigned
5319
                  as global registers, and they change during an
5320
                  interrupt.  There are possible ways to handle this:
5321
 
5322
                  - If we are adjusting the frame pointer (r14), we can do
5323
                    with a single temp register and an ordinary push / pop
5324
                    on the stack.
5325
                  - Grab any call-used or call-saved registers (i.e. not
5326
                    fixed or globals) for the temps we need.  We might
5327
                    also grab r14 if we are adjusting the stack pointer.
5328
                    If we can't find enough available registers, issue
5329
                    a diagnostic and die - the user must have reserved
5330
                    way too many registers.
5331
                 But since all this is rather unlikely to happen and
5332
                 would require extra testing, we just die if r4 / r5
5333
                 are not available.  */
5334
              gcc_assert (!fixed_regs[4] && !fixed_regs[5]
5335
                          && !global_regs[4] && !global_regs[5]);
5336
 
5337
              adj_reg = gen_rtx_REG (GET_MODE (reg), 4);
5338
              tmp_reg = gen_rtx_REG (GET_MODE (reg), 5);
5339
              emit_move_insn (gen_tmp_stack_mem (Pmode, reg), adj_reg);
5340
              emit_insn (GEN_MOV (adj_reg, GEN_INT (size)));
5341
              emit_insn (GEN_ADD3 (adj_reg, adj_reg, reg));
5342
              mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
5343
              emit_move_insn (mem, tmp_reg);
5344
              emit_move_insn (tmp_reg, gen_tmp_stack_mem (Pmode, reg));
5345
              mem = gen_tmp_stack_mem (Pmode, gen_rtx_PRE_DEC (Pmode, adj_reg));
5346
              emit_move_insn (mem, tmp_reg);
5347
              emit_move_insn (reg, adj_reg);
5348
              mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
5349
              emit_move_insn (adj_reg, mem);
5350
              mem = gen_tmp_stack_mem (Pmode, gen_rtx_POST_INC (Pmode, reg));
5351
              emit_move_insn (tmp_reg, mem);
5352
              /* Tell flow the insns that pop r4/r5 aren't dead.  */
5353
              emit_insn (gen_rtx_USE (VOIDmode, tmp_reg));
5354
              emit_insn (gen_rtx_USE (VOIDmode, adj_reg));
5355
              return;
5356
            }
5357
          const_reg = gen_rtx_REG (GET_MODE (reg), temp);
5358
 
5359
          /* If SIZE is negative, subtract the positive value.
5360
             This sometimes allows a constant pool entry to be shared
5361
             between prologue and epilogue code.  */
5362
          if (size < 0)
5363
            {
5364
              emit_insn (GEN_MOV (const_reg, GEN_INT (-size)));
5365
              insn = emit_fn (GEN_SUB3 (reg, reg, const_reg));
5366
            }
5367
          else
5368
            {
5369
              emit_insn (GEN_MOV (const_reg, GEN_INT (size)));
5370
              insn = emit_fn (GEN_ADD3 (reg, reg, const_reg));
5371
            }
5372
          if (! epilogue_p)
5373
            REG_NOTES (insn)
5374
              = (gen_rtx_EXPR_LIST
5375
                 (REG_FRAME_RELATED_EXPR,
5376
                  gen_rtx_SET (VOIDmode, reg,
5377
                               gen_rtx_PLUS (SImode, reg, GEN_INT (size))),
5378
                  REG_NOTES (insn)));
5379
        }
5380
    }
5381
}
5382
 
5383
static rtx
5384
frame_insn (rtx x)
5385
{
5386
  x = emit_insn (x);
5387
  RTX_FRAME_RELATED_P (x) = 1;
5388
  return x;
5389
}
5390
 
5391
/* Output RTL to push register RN onto the stack.  */
5392
 
5393
static rtx
5394
push (int rn)
5395
{
5396
  rtx x;
5397
  if (rn == FPUL_REG)
5398
    x = gen_push_fpul ();
5399
  else if (rn == FPSCR_REG)
5400
    x = gen_push_fpscr ();
5401
  else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
5402
           && FP_OR_XD_REGISTER_P (rn))
5403
    {
5404
      if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
5405
        return NULL_RTX;
5406
      x = gen_push_4 (gen_rtx_REG (DFmode, rn));
5407
    }
5408
  else if (TARGET_SH2E && FP_REGISTER_P (rn))
5409
    x = gen_push_e (gen_rtx_REG (SFmode, rn));
5410
  else
5411
    x = gen_push (gen_rtx_REG (SImode, rn));
5412
 
5413
  x = frame_insn (x);
5414
  REG_NOTES (x)
5415
    = gen_rtx_EXPR_LIST (REG_INC,
5416
                         gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
5417
  return x;
5418
}
5419
 
5420
/* Output RTL to pop register RN from the stack.  */
5421
 
5422
static void
5423
pop (int rn)
5424
{
5425
  rtx x;
5426
  if (rn == FPUL_REG)
5427
    x = gen_pop_fpul ();
5428
  else if (rn == FPSCR_REG)
5429
    x = gen_pop_fpscr ();
5430
  else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && ! TARGET_FPU_SINGLE
5431
           && FP_OR_XD_REGISTER_P (rn))
5432
    {
5433
      if (FP_REGISTER_P (rn) && (rn - FIRST_FP_REG) & 1)
5434
        return;
5435
      x = gen_pop_4 (gen_rtx_REG (DFmode, rn));
5436
    }
5437
  else if (TARGET_SH2E && FP_REGISTER_P (rn))
5438
    x = gen_pop_e (gen_rtx_REG (SFmode, rn));
5439
  else
5440
    x = gen_pop (gen_rtx_REG (SImode, rn));
5441
 
5442
  x = emit_insn (x);
5443
  REG_NOTES (x)
5444
    = gen_rtx_EXPR_LIST (REG_INC,
5445
                         gen_rtx_REG (SImode, STACK_POINTER_REGNUM), 0);
5446
}
5447
 
5448
/* Generate code to push the regs specified in the mask.  */
5449
 
5450
static void
5451
push_regs (HARD_REG_SET *mask, int interrupt_handler)
5452
{
5453
  int i;
5454
  int skip_fpscr = 0;
5455
 
5456
  /* Push PR last; this gives better latencies after the prologue, and
5457
     candidates for the return delay slot when there are no general
5458
     registers pushed.  */
5459
  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5460
    {
5461
      /* If this is an interrupt handler, and the SZ bit varies,
5462
         and we have to push any floating point register, we need
5463
         to switch to the correct precision first.  */
5464
      if (i == FIRST_FP_REG && interrupt_handler && TARGET_FMOVD
5465
          && hard_regs_intersect_p (mask, &reg_class_contents[DF_REGS]))
5466
        {
5467
          HARD_REG_SET unsaved;
5468
 
5469
          push (FPSCR_REG);
5470
          COMPL_HARD_REG_SET (unsaved, *mask);
5471
          fpscr_set_from_mem (NORMAL_MODE (FP_MODE), unsaved);
5472
          skip_fpscr = 1;
5473
        }
5474
      if (i != PR_REG
5475
          && (i != FPSCR_REG || ! skip_fpscr)
5476
          && TEST_HARD_REG_BIT (*mask, i))
5477
        push (i);
5478
    }
5479
  if (TEST_HARD_REG_BIT (*mask, PR_REG))
5480
    push (PR_REG);
5481
}
5482
 
5483
/* Calculate how much extra space is needed to save all callee-saved
5484
   target registers.
5485
   LIVE_REGS_MASK is the register mask calculated by calc_live_regs.  */
5486
 
5487
static int
5488
shmedia_target_regs_stack_space (HARD_REG_SET *live_regs_mask)
5489
{
5490
  int reg;
5491
  int stack_space = 0;
5492
  int interrupt_handler = sh_cfun_interrupt_handler_p ();
5493
 
5494
  for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
5495
    if ((! call_really_used_regs[reg] || interrupt_handler)
5496
        && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
5497
      /* Leave space to save this target register on the stack,
5498
         in case target register allocation wants to use it.  */
5499
      stack_space += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
5500
  return stack_space;
5501
}
5502
 
5503
/* Decide whether we should reserve space for callee-save target registers,
5504
   in case target register allocation wants to use them.  REGS_SAVED is
5505
   the space, in bytes, that is already required for register saves.
5506
   LIVE_REGS_MASK is the register mask calculated by calc_live_regs.  */
5507
 
5508
static int
5509
shmedia_reserve_space_for_target_registers_p (int regs_saved,
5510
                                              HARD_REG_SET *live_regs_mask)
5511
{
5512
  if (optimize_size)
5513
    return 0;
5514
  return shmedia_target_regs_stack_space (live_regs_mask) <= regs_saved;
5515
}
5516
 
5517
/* Decide how much space to reserve for callee-save target registers
5518
   in case target register allocation wants to use them.
5519
   LIVE_REGS_MASK is the register mask calculated by calc_live_regs.  */
5520
 
5521
static int
5522
shmedia_target_regs_stack_adjust (HARD_REG_SET *live_regs_mask)
5523
{
5524
  if (shmedia_space_reserved_for_target_registers)
5525
    return shmedia_target_regs_stack_space (live_regs_mask);
5526
  else
5527
    return 0;
5528
}
5529
 
5530
/* Work out the registers which need to be saved, both as a mask and a
5531
   count of saved words.  Return the count.
5532
 
5533
   If doing a pragma interrupt function, then push all regs used by the
5534
   function, and if we call another function (we can tell by looking at PR),
5535
   make sure that all the regs it clobbers are safe too.  */
5536
 
5537
static int
5538
calc_live_regs (HARD_REG_SET *live_regs_mask)
5539
{
5540
  unsigned int reg;
5541
  int count;
5542
  tree attrs;
5543
  bool interrupt_or_trapa_handler, trapa_handler, interrupt_handler;
5544
  bool nosave_low_regs;
5545
  int pr_live, has_call;
5546
 
5547
  attrs = DECL_ATTRIBUTES (current_function_decl);
5548
  interrupt_or_trapa_handler = sh_cfun_interrupt_handler_p ();
5549
  trapa_handler = lookup_attribute ("trapa_handler", attrs) != NULL_TREE;
5550
  interrupt_handler = interrupt_or_trapa_handler && ! trapa_handler;
5551
  nosave_low_regs = lookup_attribute ("nosave_low_regs", attrs) != NULL_TREE;
5552
 
5553
  CLEAR_HARD_REG_SET (*live_regs_mask);
5554
  if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && interrupt_handler
5555
      && regs_ever_live[FPSCR_REG])
5556
    target_flags &= ~MASK_FPU_SINGLE;
5557
  /* If we can save a lot of saves by switching to double mode, do that.  */
5558
  else if ((TARGET_SH4 || TARGET_SH2A_DOUBLE) && TARGET_FMOVD && TARGET_FPU_SINGLE)
5559
    for (count = 0, reg = FIRST_FP_REG; reg <= LAST_FP_REG; reg += 2)
5560
      if (regs_ever_live[reg] && regs_ever_live[reg+1]
5561
          && (! call_really_used_regs[reg]
5562
              || interrupt_handler)
5563
          && ++count > 2)
5564
        {
5565
          target_flags &= ~MASK_FPU_SINGLE;
5566
          break;
5567
        }
5568
  /* PR_MEDIA_REG is a general purpose register, thus global_alloc already
5569
     knows how to use it.  That means the pseudo originally allocated for
5570
     the initial value can become the PR_MEDIA_REG hard register, as seen for
5571
     execute/20010122-1.c:test9.  */
5572
  if (TARGET_SHMEDIA)
5573
    /* ??? this function is called from initial_elimination_offset, hence we
5574
       can't use the result of sh_media_register_for_return here.  */
5575
    pr_live = sh_pr_n_sets ();
5576
  else
5577
    {
5578
      rtx pr_initial = has_hard_reg_initial_val (Pmode, PR_REG);
5579
      pr_live = (pr_initial
5580
                 ? (GET_CODE (pr_initial) != REG
5581
                    || REGNO (pr_initial) != (PR_REG))
5582
                 : regs_ever_live[PR_REG]);
5583
      /* For Shcompact, if not optimizing, we end up with a memory reference
5584
         using the return address pointer for __builtin_return_address even
5585
         though there is no actual need to put the PR register on the stack.  */
5586
      pr_live |= regs_ever_live[RETURN_ADDRESS_POINTER_REGNUM];
5587
    }
5588
  /* Force PR to be live if the prologue has to call the SHmedia
5589
     argument decoder or register saver.  */
5590
  if (TARGET_SHCOMPACT
5591
      && ((current_function_args_info.call_cookie
5592
           & ~ CALL_COOKIE_RET_TRAMP (1))
5593
          || current_function_has_nonlocal_label))
5594
    pr_live = 1;
5595
  has_call = TARGET_SHMEDIA ? ! leaf_function_p () : pr_live;
5596
  for (count = 0, reg = FIRST_PSEUDO_REGISTER; reg-- != 0; )
5597
    {
5598
      if (reg == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG)
5599
          ? pr_live
5600
          : interrupt_handler
5601
          ? (/* Need to save all the regs ever live.  */
5602
             (regs_ever_live[reg]
5603
              || (call_really_used_regs[reg]
5604
                  && (! fixed_regs[reg] || reg == MACH_REG || reg == MACL_REG
5605
                      || reg == PIC_OFFSET_TABLE_REGNUM)
5606
                  && has_call)
5607
              || (TARGET_SHMEDIA && has_call
5608
                  && REGISTER_NATURAL_MODE (reg) == SImode
5609
                  && (GENERAL_REGISTER_P (reg) || TARGET_REGISTER_P (reg))))
5610
             && reg != STACK_POINTER_REGNUM && reg != ARG_POINTER_REGNUM
5611
             && reg != RETURN_ADDRESS_POINTER_REGNUM
5612
             && reg != T_REG && reg != GBR_REG
5613
             /* Push fpscr only on targets which have FPU */
5614
             && (reg != FPSCR_REG || TARGET_FPU_ANY))
5615
          : (/* Only push those regs which are used and need to be saved.  */
5616
             (TARGET_SHCOMPACT
5617
              && flag_pic
5618
              && current_function_args_info.call_cookie
5619
              && reg == PIC_OFFSET_TABLE_REGNUM)
5620
             || (regs_ever_live[reg]
5621
                 && (!call_really_used_regs[reg]
5622
                     || (trapa_handler && reg == FPSCR_REG && TARGET_FPU_ANY)))
5623
             || (current_function_calls_eh_return
5624
                 && (reg == EH_RETURN_DATA_REGNO (0)
5625
                     || reg == EH_RETURN_DATA_REGNO (1)
5626
                     || reg == EH_RETURN_DATA_REGNO (2)
5627
                     || reg == EH_RETURN_DATA_REGNO (3)))
5628
             || ((reg == MACL_REG || reg == MACH_REG)
5629
                 && regs_ever_live[reg]
5630
                 && sh_cfun_attr_renesas_p ())
5631
             ))
5632
        {
5633
          SET_HARD_REG_BIT (*live_regs_mask, reg);
5634
          count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
5635
 
5636
          if ((TARGET_SH4 || TARGET_SH2A_DOUBLE || TARGET_SH5) && TARGET_FMOVD
5637
              && GET_MODE_CLASS (REGISTER_NATURAL_MODE (reg)) == MODE_FLOAT)
5638
            {
5639
              if (FP_REGISTER_P (reg))
5640
                {
5641
                  if (! TARGET_FPU_SINGLE && ! regs_ever_live[reg ^ 1])
5642
                    {
5643
                      SET_HARD_REG_BIT (*live_regs_mask, (reg ^ 1));
5644
                      count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg ^ 1));
5645
                    }
5646
                }
5647
              else if (XD_REGISTER_P (reg))
5648
                {
5649
                  /* Must switch to double mode to access these registers.  */
5650
                  target_flags &= ~MASK_FPU_SINGLE;
5651
                }
5652
            }
5653
        }
5654
      if (nosave_low_regs && reg == R8_REG)
5655
        break;
5656
    }
5657
  /* If we have a target register optimization pass after prologue / epilogue
5658
     threading, we need to assume all target registers will be live even if
5659
     they aren't now.  */
5660
  if (flag_branch_target_load_optimize2
5661
      && TARGET_SAVE_ALL_TARGET_REGS
5662
      && shmedia_space_reserved_for_target_registers)
5663
    for (reg = LAST_TARGET_REG; reg >= FIRST_TARGET_REG; reg--)
5664
      if ((! call_really_used_regs[reg] || interrupt_handler)
5665
          && ! TEST_HARD_REG_BIT (*live_regs_mask, reg))
5666
        {
5667
          SET_HARD_REG_BIT (*live_regs_mask, reg);
5668
          count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (reg));
5669
        }
5670
  /* If this is an interrupt handler, we don't have any call-clobbered
5671
     registers we can conveniently use for target register save/restore.
5672
     Make sure we save at least one general purpose register when we need
5673
     to save target registers.  */
5674
  if (interrupt_handler
5675
      && hard_regs_intersect_p (live_regs_mask,
5676
                                &reg_class_contents[TARGET_REGS])
5677
      && ! hard_regs_intersect_p (live_regs_mask,
5678
                                  &reg_class_contents[GENERAL_REGS]))
5679
    {
5680
      SET_HARD_REG_BIT (*live_regs_mask, R0_REG);
5681
      count += GET_MODE_SIZE (REGISTER_NATURAL_MODE (R0_REG));
5682
    }
5683
 
5684
  return count;
5685
}
5686
 
5687
/* Code to generate prologue and epilogue sequences */
5688
 
5689
/* PUSHED is the number of bytes that are being pushed on the
5690
   stack for register saves.  Return the frame size, padded
5691
   appropriately so that the stack stays properly aligned.  */
5692
static HOST_WIDE_INT
5693
rounded_frame_size (int pushed)
5694
{
5695
  HOST_WIDE_INT size = get_frame_size ();
5696
  HOST_WIDE_INT align = STACK_BOUNDARY / BITS_PER_UNIT;
5697
 
5698
  return ((size + pushed + align - 1) & -align) - pushed;
5699
}
5700
 
5701
/* Choose a call-clobbered target-branch register that remains
5702
   unchanged along the whole function.  We set it up as the return
5703
   value in the prologue.  */
5704
int
5705
sh_media_register_for_return (void)
5706
{
5707
  int regno;
5708
  int tr0_used;
5709
 
5710
  if (! current_function_is_leaf)
5711
    return -1;
5712
  if (lookup_attribute ("interrupt_handler",
5713
                        DECL_ATTRIBUTES (current_function_decl)))
5714
    return -1;
5715
  if (sh_cfun_interrupt_handler_p ())
5716
    return -1;
5717
 
5718
  tr0_used = flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM];
5719
 
5720
  for (regno = FIRST_TARGET_REG + tr0_used; regno <= LAST_TARGET_REG; regno++)
5721
    if (call_really_used_regs[regno] && ! regs_ever_live[regno])
5722
      return regno;
5723
 
5724
  return -1;
5725
}
5726
 
5727
/* The maximum registers we need to save are:
5728
   - 62 general purpose registers (r15 is stack pointer, r63 is zero)
5729
   - 32 floating point registers (for each pair, we save none,
5730
         one single precision value, or a double precision value).
5731
   -  8 target registers
5732
   -  add 1 entry for a delimiter.  */
5733
#define MAX_SAVED_REGS (62+32+8)
5734
 
5735
typedef struct save_entry_s
5736
{
5737
  unsigned char reg;
5738
  unsigned char mode;
5739
  short offset;
5740
} save_entry;
5741
 
5742
#define MAX_TEMPS 4
5743
 
5744
/* There will be a delimiter entry with VOIDmode both at the start and the
5745
   end of a filled in schedule.  The end delimiter has the offset of the
5746
   save with the smallest (i.e. most negative) offset.  */
5747
typedef struct save_schedule_s
5748
{
5749
  save_entry entries[MAX_SAVED_REGS + 2];
5750
  int temps[MAX_TEMPS+1];
5751
} save_schedule;
5752
 
5753
/* Fill in SCHEDULE according to LIVE_REGS_MASK.  If RESTORE is nonzero,
5754
   use reverse order.  Returns the last entry written to (not counting
5755
   the delimiter).  OFFSET_BASE is a number to be added to all offset
5756
   entries.  */
5757
 
5758
static save_entry *
5759
sh5_schedule_saves (HARD_REG_SET *live_regs_mask, save_schedule *schedule,
5760
                    int offset_base)
5761
{
5762
  int align, i;
5763
  save_entry *entry = schedule->entries;
5764
  int tmpx = 0;
5765
  int offset;
5766
 
5767
  if (! current_function_interrupt)
5768
    for (i = FIRST_GENERAL_REG; tmpx < MAX_TEMPS && i <= LAST_GENERAL_REG; i++)
5769
      if (call_really_used_regs[i] && ! fixed_regs[i] && i != PR_MEDIA_REG
5770
          && ! FUNCTION_ARG_REGNO_P (i)
5771
          && i != FIRST_RET_REG
5772
          && ! (cfun->static_chain_decl != NULL && i == STATIC_CHAIN_REGNUM)
5773
          && ! (current_function_calls_eh_return
5774
                && (i == EH_RETURN_STACKADJ_REGNO
5775
                    || ((unsigned) i >= EH_RETURN_DATA_REGNO (0)
5776
                        && (unsigned) i <= EH_RETURN_DATA_REGNO (3)))))
5777
        schedule->temps[tmpx++] = i;
5778
  entry->reg = -1;
5779
  entry->mode = VOIDmode;
5780
  entry->offset = offset_base;
5781
  entry++;
5782
  /* We loop twice: first, we save 8-byte aligned registers in the
5783
     higher addresses, that are known to be aligned.  Then, we
5784
     proceed to saving 32-bit registers that don't need 8-byte
5785
     alignment.
5786
     If this is an interrupt function, all registers that need saving
5787
     need to be saved in full.  moreover, we need to postpone saving
5788
     target registers till we have saved some general purpose registers
5789
     we can then use as scratch registers.  */
5790
  offset = offset_base;
5791
  for (align = 1; align >= 0; align--)
5792
    {
5793
      for (i = FIRST_PSEUDO_REGISTER - 1; i >= 0; i--)
5794
        if (TEST_HARD_REG_BIT (*live_regs_mask, i))
5795
          {
5796
            enum machine_mode mode = REGISTER_NATURAL_MODE (i);
5797
            int reg = i;
5798
 
5799
            if (current_function_interrupt)
5800
              {
5801
                if (TARGET_REGISTER_P (i))
5802
                  continue;
5803
                if (GENERAL_REGISTER_P (i))
5804
                  mode = DImode;
5805
              }
5806
            if (mode == SFmode && (i % 2) == 1
5807
                && ! TARGET_FPU_SINGLE && FP_REGISTER_P (i)
5808
                && (TEST_HARD_REG_BIT (*live_regs_mask, (i ^ 1))))
5809
              {
5810
                mode = DFmode;
5811
                i--;
5812
                reg--;
5813
              }
5814
 
5815
            /* If we're doing the aligned pass and this is not aligned,
5816
               or we're doing the unaligned pass and this is aligned,
5817
               skip it.  */
5818
            if ((GET_MODE_SIZE (mode) % (STACK_BOUNDARY / BITS_PER_UNIT) == 0)
5819
                != align)
5820
              continue;
5821
 
5822
            if (current_function_interrupt
5823
                && GENERAL_REGISTER_P (i)
5824
                && tmpx < MAX_TEMPS)
5825
              schedule->temps[tmpx++] = i;
5826
 
5827
            offset -= GET_MODE_SIZE (mode);
5828
            entry->reg = i;
5829
            entry->mode = mode;
5830
            entry->offset = offset;
5831
            entry++;
5832
          }
5833
      if (align && current_function_interrupt)
5834
        for (i = LAST_TARGET_REG; i >= FIRST_TARGET_REG; i--)
5835
          if (TEST_HARD_REG_BIT (*live_regs_mask, i))
5836
            {
5837
              offset -= GET_MODE_SIZE (DImode);
5838
              entry->reg = i;
5839
              entry->mode = DImode;
5840
              entry->offset = offset;
5841
              entry++;
5842
            }
5843
    }
5844
  entry->reg = -1;
5845
  entry->mode = VOIDmode;
5846
  entry->offset = offset;
5847
  schedule->temps[tmpx] = -1;
5848
  return entry - 1;
5849
}
5850
 
5851
void
5852
sh_expand_prologue (void)
5853
{
5854
  HARD_REG_SET live_regs_mask;
5855
  int d, i;
5856
  int d_rounding = 0;
5857
  int save_flags = target_flags;
5858
  int pretend_args;
5859
  tree sp_switch_attr
5860
    = lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl));
5861
 
5862
  current_function_interrupt = sh_cfun_interrupt_handler_p ();
5863
 
5864
  /* We have pretend args if we had an object sent partially in registers
5865
     and partially on the stack, e.g. a large structure.  */
5866
  pretend_args = current_function_pretend_args_size;
5867
  if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl)
5868
      && (NPARM_REGS(SImode)
5869
          > current_function_args_info.arg_count[(int) SH_ARG_INT]))
5870
    pretend_args = 0;
5871
  output_stack_adjust (-pretend_args
5872
                       - current_function_args_info.stack_regs * 8,
5873
                       stack_pointer_rtx, 0, NULL);
5874
 
5875
  if (TARGET_SHCOMPACT && flag_pic && current_function_args_info.call_cookie)
5876
    /* We're going to use the PIC register to load the address of the
5877
       incoming-argument decoder and/or of the return trampoline from
5878
       the GOT, so make sure the PIC register is preserved and
5879
       initialized.  */
5880
    regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
5881
 
5882
  if (TARGET_SHCOMPACT
5883
      && (current_function_args_info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
5884
    {
5885
      int reg;
5886
 
5887
      /* First, make all registers with incoming arguments that will
5888
         be pushed onto the stack live, so that register renaming
5889
         doesn't overwrite them.  */
5890
      for (reg = 0; reg < NPARM_REGS (SImode); reg++)
5891
        if (CALL_COOKIE_STACKSEQ_GET (current_function_args_info.call_cookie)
5892
            >= NPARM_REGS (SImode) - reg)
5893
          for (; reg < NPARM_REGS (SImode); reg++)
5894
            emit_insn (gen_shcompact_preserve_incoming_args
5895
                       (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
5896
        else if (CALL_COOKIE_INT_REG_GET
5897
                 (current_function_args_info.call_cookie, reg) == 1)
5898
          emit_insn (gen_shcompact_preserve_incoming_args
5899
                     (gen_rtx_REG (SImode, FIRST_PARM_REG + reg)));
5900
 
5901
      emit_move_insn (gen_rtx_REG (Pmode, MACL_REG),
5902
                      stack_pointer_rtx);
5903
      emit_move_insn (gen_rtx_REG (SImode, R0_REG),
5904
                      GEN_INT (current_function_args_info.call_cookie));
5905
      emit_move_insn (gen_rtx_REG (SImode, MACH_REG),
5906
                      gen_rtx_REG (SImode, R0_REG));
5907
    }
5908
  else if (TARGET_SHMEDIA)
5909
    {
5910
      int tr = sh_media_register_for_return ();
5911
 
5912
      if (tr >= 0)
5913
        {
5914
          rtx insn = emit_move_insn (gen_rtx_REG (DImode, tr),
5915
                                     gen_rtx_REG (DImode, PR_MEDIA_REG));
5916
 
5917
          /* ??? We should suppress saving pr when we don't need it, but this
5918
             is tricky because of builtin_return_address.  */
5919
 
5920
          /* If this function only exits with sibcalls, this copy
5921
             will be flagged as dead.  */
5922
          REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD,
5923
                                                const0_rtx,
5924
                                                REG_NOTES (insn));
5925
        }
5926
    }
5927
 
5928
  /* Emit the code for SETUP_VARARGS.  */
5929
  if (current_function_stdarg)
5930
    {
5931
      if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
5932
        {
5933
          /* Push arg regs as if they'd been provided by caller in stack.  */
5934
          for (i = 0; i < NPARM_REGS(SImode); i++)
5935
            {
5936
              int rn = NPARM_REGS(SImode) + FIRST_PARM_REG - i - 1;
5937
              rtx insn;
5938
 
5939
              if (i >= (NPARM_REGS(SImode)
5940
                        - current_function_args_info.arg_count[(int) SH_ARG_INT]
5941
                        ))
5942
                break;
5943
              insn = push (rn);
5944
              RTX_FRAME_RELATED_P (insn) = 0;
5945
            }
5946
        }
5947
    }
5948
 
5949
  /* If we're supposed to switch stacks at function entry, do so now.  */
5950
  if (sp_switch_attr)
5951
    {
5952
      /* The argument specifies a variable holding the address of the
5953
         stack the interrupt function should switch to/from at entry/exit.  */
5954
      const char *s
5955
        = ggc_strdup (TREE_STRING_POINTER (TREE_VALUE (sp_switch_attr)));
5956
      rtx sp_switch = gen_rtx_SYMBOL_REF (Pmode, s);
5957
 
5958
      emit_insn (gen_sp_switch_1 (sp_switch));
5959
    }
5960
 
5961
  d = calc_live_regs (&live_regs_mask);
5962
  /* ??? Maybe we could save some switching if we can move a mode switch
5963
     that already happens to be at the function start into the prologue.  */
5964
  if (target_flags != save_flags && ! current_function_interrupt)
5965
    emit_insn (gen_toggle_sz ());
5966
 
5967
  if (TARGET_SH5)
5968
    {
5969
      int offset_base, offset;
5970
      rtx r0 = NULL_RTX;
5971
      int offset_in_r0 = -1;
5972
      int sp_in_r0 = 0;
5973
      int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
5974
      int total_size, save_size;
5975
      save_schedule schedule;
5976
      save_entry *entry;
5977
      int *tmp_pnt;
5978
 
5979
      if (call_really_used_regs[R0_REG] && ! fixed_regs[R0_REG]
5980
          && ! current_function_interrupt)
5981
        r0 = gen_rtx_REG (Pmode, R0_REG);
5982
 
5983
      /* D is the actual number of bytes that we need for saving registers,
5984
         however, in initial_elimination_offset we have committed to using
5985
         an additional TREGS_SPACE amount of bytes - in order to keep both
5986
         addresses to arguments supplied by the caller and local variables
5987
         valid, we must keep this gap.  Place it between the incoming
5988
         arguments and the actually saved registers in a bid to optimize
5989
         locality of reference.  */
5990
      total_size = d + tregs_space;
5991
      total_size += rounded_frame_size (total_size);
5992
      save_size = total_size - rounded_frame_size (d);
5993
      if (save_size % (STACK_BOUNDARY / BITS_PER_UNIT))
5994
        d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
5995
                        - save_size % (STACK_BOUNDARY / BITS_PER_UNIT));
5996
 
5997
      /* If adjusting the stack in a single step costs nothing extra, do so.
5998
         I.e. either if a single addi is enough, or we need a movi anyway,
5999
         and we don't exceed the maximum offset range (the test for the
6000
         latter is conservative for simplicity).  */
6001
      if (TARGET_SHMEDIA
6002
          && (CONST_OK_FOR_I10 (-total_size)
6003
              || (! CONST_OK_FOR_I10 (-(save_size + d_rounding))
6004
                  && total_size <= 2044)))
6005
        d_rounding = total_size - save_size;
6006
 
6007
      offset_base = d + d_rounding;
6008
 
6009
      output_stack_adjust (-(save_size + d_rounding), stack_pointer_rtx,
6010
                           0, NULL);
6011
 
6012
      sh5_schedule_saves (&live_regs_mask, &schedule, offset_base);
6013
      tmp_pnt = schedule.temps;
6014
      for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
6015
        {
6016
          enum machine_mode mode = entry->mode;
6017
          unsigned int reg = entry->reg;
6018
          rtx reg_rtx, mem_rtx, pre_dec = NULL_RTX;
6019
          rtx orig_reg_rtx;
6020
 
6021
          offset = entry->offset;
6022
 
6023
          reg_rtx = gen_rtx_REG (mode, reg);
6024
 
6025
          mem_rtx = gen_frame_mem (mode,
6026
                                   gen_rtx_PLUS (Pmode,
6027
                                                 stack_pointer_rtx,
6028
                                                 GEN_INT (offset)));
6029
 
6030
          GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_pre_dec);
6031
 
6032
          gcc_assert (r0);
6033
          mem_rtx = NULL_RTX;
6034
 
6035
        try_pre_dec:
6036
          do
6037
            if (HAVE_PRE_DECREMENT
6038
                && (offset_in_r0 - offset == GET_MODE_SIZE (mode)
6039
                    || mem_rtx == NULL_RTX
6040
                    || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
6041
              {
6042
                pre_dec = gen_frame_mem (mode, gen_rtx_PRE_DEC (Pmode, r0));
6043
 
6044
                GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (pre_dec, 0),
6045
                                          pre_dec_ok);
6046
 
6047
                pre_dec = NULL_RTX;
6048
 
6049
                break;
6050
 
6051
              pre_dec_ok:
6052
                mem_rtx = NULL_RTX;
6053
                offset += GET_MODE_SIZE (mode);
6054
              }
6055
          while (0);
6056
 
6057
          if (mem_rtx != NULL_RTX)
6058
            goto addr_ok;
6059
 
6060
          if (offset_in_r0 == -1)
6061
            {
6062
              emit_move_insn (r0, GEN_INT (offset));
6063
              offset_in_r0 = offset;
6064
            }
6065
          else if (offset != offset_in_r0)
6066
            {
6067
              emit_move_insn (r0,
6068
                              gen_rtx_PLUS
6069
                              (Pmode, r0,
6070
                               GEN_INT (offset - offset_in_r0)));
6071
              offset_in_r0 += offset - offset_in_r0;
6072
            }
6073
 
6074
          if (pre_dec != NULL_RTX)
6075
            {
6076
              if (! sp_in_r0)
6077
                {
6078
                  emit_move_insn (r0,
6079
                                  gen_rtx_PLUS
6080
                                  (Pmode, r0, stack_pointer_rtx));
6081
                  sp_in_r0 = 1;
6082
                }
6083
 
6084
              offset -= GET_MODE_SIZE (mode);
6085
              offset_in_r0 -= GET_MODE_SIZE (mode);
6086
 
6087
              mem_rtx = pre_dec;
6088
            }
6089
          else if (sp_in_r0)
6090
            mem_rtx = gen_frame_mem (mode, r0);
6091
          else
6092
            mem_rtx = gen_frame_mem (mode,
6093
                                     gen_rtx_PLUS (Pmode,
6094
                                                   stack_pointer_rtx,
6095
                                                   r0));
6096
 
6097
          /* We must not use an r0-based address for target-branch
6098
             registers or for special registers without pre-dec
6099
             memory addresses, since we store their values in r0
6100
             first.  */
6101
          gcc_assert (!TARGET_REGISTER_P (reg)
6102
                      && ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
6103
                          || mem_rtx == pre_dec));
6104
 
6105
        addr_ok:
6106
          orig_reg_rtx = reg_rtx;
6107
          if (TARGET_REGISTER_P (reg)
6108
              || ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
6109
                  && mem_rtx != pre_dec))
6110
            {
6111
              rtx tmp_reg = gen_rtx_REG (GET_MODE (reg_rtx), *tmp_pnt);
6112
 
6113
              emit_move_insn (tmp_reg, reg_rtx);
6114
 
6115
              if (REGNO (tmp_reg) == R0_REG)
6116
                {
6117
                  offset_in_r0 = -1;
6118
                  sp_in_r0 = 0;
6119
                  gcc_assert (!refers_to_regno_p
6120
                              (R0_REG, R0_REG+1, mem_rtx, (rtx *) 0));
6121
                }
6122
 
6123
              if (*++tmp_pnt <= 0)
6124
                tmp_pnt = schedule.temps;
6125
 
6126
              reg_rtx = tmp_reg;
6127
            }
6128
          {
6129
            rtx insn;
6130
 
6131
            /* Mark as interesting for dwarf cfi generator */
6132
            insn = emit_move_insn (mem_rtx, reg_rtx);
6133
            RTX_FRAME_RELATED_P (insn) = 1;
6134
            /* If we use an intermediate register for the save, we can't
6135
               describe this exactly in cfi as a copy of the to-be-saved
6136
               register into the temporary register and then the temporary
6137
               register on the stack, because the temporary register can
6138
               have a different natural size than the to-be-saved register.
6139
               Thus, we gloss over the intermediate copy and pretend we do
6140
               a direct save from the to-be-saved register.  */
6141
            if (REGNO (reg_rtx) != reg)
6142
              {
6143
                rtx set, note_rtx;
6144
 
6145
                set = gen_rtx_SET (VOIDmode, mem_rtx, orig_reg_rtx);
6146
                note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
6147
                                              REG_NOTES (insn));
6148
                REG_NOTES (insn) = note_rtx;
6149
              }
6150
 
6151
            if (TARGET_SHCOMPACT && (offset_in_r0 != -1))
6152
              {
6153
                rtx reg_rtx = gen_rtx_REG (mode, reg);
6154
                rtx set, note_rtx;
6155
                rtx mem_rtx = gen_frame_mem (mode,
6156
                                             gen_rtx_PLUS (Pmode,
6157
                                                           stack_pointer_rtx,
6158
                                                           GEN_INT (offset)));
6159
 
6160
                set = gen_rtx_SET (VOIDmode, mem_rtx, reg_rtx);
6161
                note_rtx = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, set,
6162
                                              REG_NOTES (insn));
6163
                REG_NOTES (insn) = note_rtx;
6164
              }
6165
          }
6166
        }
6167
 
6168
      gcc_assert (entry->offset == d_rounding);
6169
    }
6170
  else
6171
    push_regs (&live_regs_mask, current_function_interrupt);
6172
 
6173
  if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
6174
    {
6175
      rtx insn = get_last_insn ();
6176
      rtx last = emit_insn (gen_GOTaddr2picreg ());
6177
 
6178
      /* Mark these insns as possibly dead.  Sometimes, flow2 may
6179
         delete all uses of the PIC register.  In this case, let it
6180
         delete the initialization too.  */
6181
      do
6182
        {
6183
          insn = NEXT_INSN (insn);
6184
 
6185
          REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD,
6186
                                                const0_rtx,
6187
                                                REG_NOTES (insn));
6188
        }
6189
      while (insn != last);
6190
    }
6191
 
6192
  if (SHMEDIA_REGS_STACK_ADJUST ())
6193
    {
6194
      /* This must NOT go through the PLT, otherwise mach and macl
6195
         may be clobbered.  */
6196
      function_symbol (gen_rtx_REG (Pmode, R0_REG),
6197
                       (TARGET_FPU_ANY
6198
                        ? "__GCC_push_shmedia_regs"
6199
                        : "__GCC_push_shmedia_regs_nofpu"), SFUNC_GOT);
6200
      emit_insn (gen_shmedia_save_restore_regs_compact
6201
                 (GEN_INT (-SHMEDIA_REGS_STACK_ADJUST ())));
6202
    }
6203
 
6204
  if (target_flags != save_flags && ! current_function_interrupt)
6205
    {
6206
      rtx insn = emit_insn (gen_toggle_sz ());
6207
 
6208
      /* If we're lucky, a mode switch in the function body will
6209
         overwrite fpscr, turning this insn dead.  Tell flow this
6210
         insn is ok to delete.  */
6211
      REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD,
6212
                                            const0_rtx,
6213
                                            REG_NOTES (insn));
6214
    }
6215
 
6216
  target_flags = save_flags;
6217
 
6218
  output_stack_adjust (-rounded_frame_size (d) + d_rounding,
6219
                       stack_pointer_rtx, 0, NULL);
6220
 
6221
  if (frame_pointer_needed)
6222
    frame_insn (GEN_MOV (hard_frame_pointer_rtx, stack_pointer_rtx));
6223
 
6224
  if (TARGET_SHCOMPACT
6225
      && (current_function_args_info.call_cookie & ~ CALL_COOKIE_RET_TRAMP(1)))
6226
    {
6227
      /* This must NOT go through the PLT, otherwise mach and macl
6228
         may be clobbered.  */
6229
      function_symbol (gen_rtx_REG (Pmode, R0_REG),
6230
                      "__GCC_shcompact_incoming_args", SFUNC_GOT);
6231
      emit_insn (gen_shcompact_incoming_args ());
6232
    }
6233
}
6234
 
6235
void
6236
sh_expand_epilogue (bool sibcall_p)
6237
{
6238
  HARD_REG_SET live_regs_mask;
6239
  int d, i;
6240
  int d_rounding = 0;
6241
 
6242
  int save_flags = target_flags;
6243
  int frame_size, save_size;
6244
  int fpscr_deferred = 0;
6245
  int e = sibcall_p ? -1 : 1;
6246
 
6247
  d = calc_live_regs (&live_regs_mask);
6248
 
6249
  save_size = d;
6250
  frame_size = rounded_frame_size (d);
6251
 
6252
  if (TARGET_SH5)
6253
    {
6254
      int tregs_space = shmedia_target_regs_stack_adjust (&live_regs_mask);
6255
      int total_size;
6256
      if (d % (STACK_BOUNDARY / BITS_PER_UNIT))
6257
      d_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
6258
                    - d % (STACK_BOUNDARY / BITS_PER_UNIT));
6259
 
6260
      total_size = d + tregs_space;
6261
      total_size += rounded_frame_size (total_size);
6262
      save_size = total_size - frame_size;
6263
 
6264
      /* If adjusting the stack in a single step costs nothing extra, do so.
6265
         I.e. either if a single addi is enough, or we need a movi anyway,
6266
         and we don't exceed the maximum offset range (the test for the
6267
         latter is conservative for simplicity).  */
6268
      if (TARGET_SHMEDIA
6269
          && ! frame_pointer_needed
6270
          && (CONST_OK_FOR_I10 (total_size)
6271
              || (! CONST_OK_FOR_I10 (save_size + d_rounding)
6272
                  && total_size <= 2044)))
6273
        d_rounding = frame_size;
6274
 
6275
      frame_size -= d_rounding;
6276
    }
6277
 
6278
  if (frame_pointer_needed)
6279
    {
6280
      /* We must avoid scheduling the epilogue with previous basic blocks
6281
         when exception handling is enabled.  See PR/18032.  */
6282
      if (flag_exceptions)
6283
        emit_insn (gen_blockage ());
6284
      output_stack_adjust (frame_size, hard_frame_pointer_rtx, e,
6285
                           &live_regs_mask);
6286
 
6287
      /* We must avoid moving the stack pointer adjustment past code
6288
         which reads from the local frame, else an interrupt could
6289
         occur after the SP adjustment and clobber data in the local
6290
         frame.  */
6291
      emit_insn (gen_blockage ());
6292
      emit_insn (GEN_MOV (stack_pointer_rtx, hard_frame_pointer_rtx));
6293
    }
6294
  else if (frame_size)
6295
    {
6296
      /* We must avoid moving the stack pointer adjustment past code
6297
         which reads from the local frame, else an interrupt could
6298
         occur after the SP adjustment and clobber data in the local
6299
         frame.  */
6300
      emit_insn (gen_blockage ());
6301
      output_stack_adjust (frame_size, stack_pointer_rtx, e, &live_regs_mask);
6302
    }
6303
 
6304
  if (SHMEDIA_REGS_STACK_ADJUST ())
6305
    {
6306
      function_symbol (gen_rtx_REG (Pmode, R0_REG),
6307
                       (TARGET_FPU_ANY
6308
                        ? "__GCC_pop_shmedia_regs"
6309
                        : "__GCC_pop_shmedia_regs_nofpu"), SFUNC_GOT);
6310
      /* This must NOT go through the PLT, otherwise mach and macl
6311
         may be clobbered.  */
6312
      emit_insn (gen_shmedia_save_restore_regs_compact
6313
                 (GEN_INT (SHMEDIA_REGS_STACK_ADJUST ())));
6314
    }
6315
 
6316
  /* Pop all the registers.  */
6317
 
6318
  if (target_flags != save_flags && ! current_function_interrupt)
6319
    emit_insn (gen_toggle_sz ());
6320
  if (TARGET_SH5)
6321
    {
6322
      int offset_base, offset;
6323
      int offset_in_r0 = -1;
6324
      int sp_in_r0 = 0;
6325
      rtx r0 = gen_rtx_REG (Pmode, R0_REG);
6326
      save_schedule schedule;
6327
      save_entry *entry;
6328
      int *tmp_pnt;
6329
 
6330
      entry = sh5_schedule_saves (&live_regs_mask, &schedule, d_rounding);
6331
      offset_base = -entry[1].offset + d_rounding;
6332
      tmp_pnt = schedule.temps;
6333
      for (; entry->mode != VOIDmode; entry--)
6334
        {
6335
          enum machine_mode mode = entry->mode;
6336
          int reg = entry->reg;
6337
          rtx reg_rtx, mem_rtx, post_inc = NULL_RTX, insn;
6338
 
6339
          offset = offset_base + entry->offset;
6340
          reg_rtx = gen_rtx_REG (mode, reg);
6341
 
6342
          mem_rtx = gen_frame_mem (mode,
6343
                                   gen_rtx_PLUS (Pmode,
6344
                                                 stack_pointer_rtx,
6345
                                                 GEN_INT (offset)));
6346
 
6347
          GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (mem_rtx, 0), try_post_inc);
6348
 
6349
          mem_rtx = NULL_RTX;
6350
 
6351
        try_post_inc:
6352
          do
6353
            if (HAVE_POST_INCREMENT
6354
                && (offset == offset_in_r0
6355
                    || (offset + GET_MODE_SIZE (mode) != d + d_rounding
6356
                        && mem_rtx == NULL_RTX)
6357
                    || reg == PR_REG || SPECIAL_REGISTER_P (reg)))
6358
              {
6359
                post_inc = gen_frame_mem (mode, gen_rtx_POST_INC (Pmode, r0));
6360
 
6361
                GO_IF_LEGITIMATE_ADDRESS (mode, XEXP (post_inc, 0),
6362
                                          post_inc_ok);
6363
 
6364
                post_inc = NULL_RTX;
6365
 
6366
                break;
6367
 
6368
              post_inc_ok:
6369
                mem_rtx = NULL_RTX;
6370
              }
6371
          while (0);
6372
 
6373
          if (mem_rtx != NULL_RTX)
6374
            goto addr_ok;
6375
 
6376
          if (offset_in_r0 == -1)
6377
            {
6378
              emit_move_insn (r0, GEN_INT (offset));
6379
              offset_in_r0 = offset;
6380
            }
6381
          else if (offset != offset_in_r0)
6382
            {
6383
              emit_move_insn (r0,
6384
                              gen_rtx_PLUS
6385
                              (Pmode, r0,
6386
                               GEN_INT (offset - offset_in_r0)));
6387
              offset_in_r0 += offset - offset_in_r0;
6388
            }
6389
 
6390
          if (post_inc != NULL_RTX)
6391
            {
6392
              if (! sp_in_r0)
6393
                {
6394
                  emit_move_insn (r0,
6395
                                  gen_rtx_PLUS
6396
                                  (Pmode, r0, stack_pointer_rtx));
6397
                  sp_in_r0 = 1;
6398
                }
6399
 
6400
              mem_rtx = post_inc;
6401
 
6402
              offset_in_r0 += GET_MODE_SIZE (mode);
6403
            }
6404
          else if (sp_in_r0)
6405
            mem_rtx = gen_frame_mem (mode, r0);
6406
          else
6407
            mem_rtx = gen_frame_mem (mode,
6408
                                     gen_rtx_PLUS (Pmode,
6409
                                                   stack_pointer_rtx,
6410
                                                   r0));
6411
 
6412
          gcc_assert ((reg != PR_REG && !SPECIAL_REGISTER_P (reg))
6413
                      || mem_rtx == post_inc);
6414
 
6415
        addr_ok:
6416
          if ((reg == PR_REG || SPECIAL_REGISTER_P (reg))
6417
              && mem_rtx != post_inc)
6418
            {
6419
              insn = emit_move_insn (r0, mem_rtx);
6420
              mem_rtx = r0;
6421
            }
6422
          else if (TARGET_REGISTER_P (reg))
6423
            {
6424
              rtx tmp_reg = gen_rtx_REG (mode, *tmp_pnt);
6425
 
6426
              /* Give the scheduler a bit of freedom by using up to
6427
                 MAX_TEMPS registers in a round-robin fashion.  */
6428
              insn = emit_move_insn (tmp_reg, mem_rtx);
6429
              mem_rtx = tmp_reg;
6430
              if (*++tmp_pnt < 0)
6431
                tmp_pnt = schedule.temps;
6432
            }
6433
 
6434
          insn = emit_move_insn (reg_rtx, mem_rtx);
6435
          if (reg == PR_MEDIA_REG && sh_media_register_for_return () >= 0)
6436
            /* This is dead, unless we return with a sibcall.  */
6437
            REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD,
6438
                                                  const0_rtx,
6439
                                                  REG_NOTES (insn));
6440
        }
6441
 
6442
      gcc_assert (entry->offset + offset_base == d + d_rounding);
6443
    }
6444
  else /* ! TARGET_SH5 */
6445
    {
6446
      save_size = 0;
6447
      if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
6448
        pop (PR_REG);
6449
      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6450
        {
6451
          int j = (FIRST_PSEUDO_REGISTER - 1) - i;
6452
 
6453
          if (j == FPSCR_REG && current_function_interrupt && TARGET_FMOVD
6454
              && hard_regs_intersect_p (&live_regs_mask,
6455
                                        &reg_class_contents[DF_REGS]))
6456
            fpscr_deferred = 1;
6457
          else if (j != PR_REG && TEST_HARD_REG_BIT (live_regs_mask, j))
6458
            pop (j);
6459
          if (j == FIRST_FP_REG && fpscr_deferred)
6460
            pop (FPSCR_REG);
6461
 
6462
        }
6463
    }
6464
  if (target_flags != save_flags && ! current_function_interrupt)
6465
    emit_insn (gen_toggle_sz ());
6466
  target_flags = save_flags;
6467
 
6468
  output_stack_adjust (current_function_pretend_args_size
6469
                       + save_size + d_rounding
6470
                       + current_function_args_info.stack_regs * 8,
6471
                       stack_pointer_rtx, e, NULL);
6472
 
6473
  if (current_function_calls_eh_return)
6474
    emit_insn (GEN_ADD3 (stack_pointer_rtx, stack_pointer_rtx,
6475
                         EH_RETURN_STACKADJ_RTX));
6476
 
6477
  /* Switch back to the normal stack if necessary.  */
6478
  if (lookup_attribute ("sp_switch", DECL_ATTRIBUTES (current_function_decl)))
6479
    emit_insn (gen_sp_switch_2 ());
6480
 
6481
  /* Tell flow the insn that pops PR isn't dead.  */
6482
  /* PR_REG will never be live in SHmedia mode, and we don't need to
6483
     USE PR_MEDIA_REG, since it will be explicitly copied to TR0_REG
6484
     by the return pattern.  */
6485
  if (TEST_HARD_REG_BIT (live_regs_mask, PR_REG))
6486
    emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, PR_REG)));
6487
}
6488
 
6489
static int sh_need_epilogue_known = 0;
6490
 
6491
int
6492
sh_need_epilogue (void)
6493
{
6494
  if (! sh_need_epilogue_known)
6495
    {
6496
      rtx epilogue;
6497
 
6498
      start_sequence ();
6499
      sh_expand_epilogue (0);
6500
      epilogue = get_insns ();
6501
      end_sequence ();
6502
      sh_need_epilogue_known = (epilogue == NULL ? -1 : 1);
6503
    }
6504
  return sh_need_epilogue_known > 0;
6505
}
6506
 
6507
/* Emit code to change the current function's return address to RA.
6508
   TEMP is available as a scratch register, if needed.  */
6509
 
6510
void
6511
sh_set_return_address (rtx ra, rtx tmp)
6512
{
6513
  HARD_REG_SET live_regs_mask;
6514
  int d;
6515
  int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
6516
  int pr_offset;
6517
 
6518
  d = calc_live_regs (&live_regs_mask);
6519
 
6520
  /* If pr_reg isn't life, we can set it (or the register given in
6521
     sh_media_register_for_return) directly.  */
6522
  if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
6523
    {
6524
      rtx rr;
6525
 
6526
      if (TARGET_SHMEDIA)
6527
        {
6528
          int rr_regno = sh_media_register_for_return ();
6529
 
6530
          if (rr_regno < 0)
6531
            rr_regno = pr_reg;
6532
 
6533
          rr = gen_rtx_REG (DImode, rr_regno);
6534
        }
6535
      else
6536
        rr = gen_rtx_REG (SImode, pr_reg);
6537
 
6538
      emit_insn (GEN_MOV (rr, ra));
6539
      /* Tell flow the register for return isn't dead.  */
6540
      emit_insn (gen_rtx_USE (VOIDmode, rr));
6541
      return;
6542
    }
6543
 
6544
  if (TARGET_SH5)
6545
    {
6546
      int offset;
6547
      save_schedule schedule;
6548
      save_entry *entry;
6549
 
6550
      entry = sh5_schedule_saves (&live_regs_mask, &schedule, 0);
6551
      offset = entry[1].offset;
6552
      for (; entry->mode != VOIDmode; entry--)
6553
        if (entry->reg == pr_reg)
6554
          goto found;
6555
 
6556
      /* We can't find pr register.  */
6557
      gcc_unreachable ();
6558
 
6559
    found:
6560
      offset = entry->offset - offset;
6561
      pr_offset = (rounded_frame_size (d) + offset
6562
                   + SHMEDIA_REGS_STACK_ADJUST ());
6563
    }
6564
  else
6565
    pr_offset = rounded_frame_size (d);
6566
 
6567
  emit_insn (GEN_MOV (tmp, GEN_INT (pr_offset)));
6568
  emit_insn (GEN_ADD3 (tmp, tmp, hard_frame_pointer_rtx));
6569
 
6570
  tmp = gen_frame_mem (Pmode, tmp);
6571
  emit_insn (GEN_MOV (tmp, ra));
6572
}
6573
 
6574
/* Clear variables at function end.  */
6575
 
6576
static void
6577
sh_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6578
                             HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6579
{
6580
  sh_need_epilogue_known = 0;
6581
}
6582
 
6583
static rtx
6584
sh_builtin_saveregs (void)
6585
{
6586
  /* First unnamed integer register.  */
6587
  int first_intreg = current_function_args_info.arg_count[(int) SH_ARG_INT];
6588
  /* Number of integer registers we need to save.  */
6589
  int n_intregs = MAX (0, NPARM_REGS (SImode) - first_intreg);
6590
  /* First unnamed SFmode float reg */
6591
  int first_floatreg = current_function_args_info.arg_count[(int) SH_ARG_FLOAT];
6592
  /* Number of SFmode float regs to save.  */
6593
  int n_floatregs = MAX (0, NPARM_REGS (SFmode) - first_floatreg);
6594
  rtx regbuf, fpregs;
6595
  int bufsize, regno;
6596
  HOST_WIDE_INT alias_set;
6597
 
6598
  if (TARGET_SH5)
6599
    {
6600
      if (n_intregs)
6601
        {
6602
          int pushregs = n_intregs;
6603
 
6604
          while (pushregs < NPARM_REGS (SImode) - 1
6605
                 && (CALL_COOKIE_INT_REG_GET
6606
                        (current_function_args_info.call_cookie,
6607
                         NPARM_REGS (SImode) - pushregs)
6608
                     == 1))
6609
            {
6610
              current_function_args_info.call_cookie
6611
                &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
6612
                                          - pushregs, 1);
6613
              pushregs++;
6614
            }
6615
 
6616
          if (pushregs == NPARM_REGS (SImode))
6617
            current_function_args_info.call_cookie
6618
              |= (CALL_COOKIE_INT_REG (0, 1)
6619
                  | CALL_COOKIE_STACKSEQ (pushregs - 1));
6620
          else
6621
            current_function_args_info.call_cookie
6622
              |= CALL_COOKIE_STACKSEQ (pushregs);
6623
 
6624
          current_function_pretend_args_size += 8 * n_intregs;
6625
        }
6626
      if (TARGET_SHCOMPACT)
6627
        return const0_rtx;
6628
    }
6629
 
6630
  if (! TARGET_SH2E && ! TARGET_SH4 && ! TARGET_SH5)
6631
    {
6632
      error ("__builtin_saveregs not supported by this subtarget");
6633
      return const0_rtx;
6634
    }
6635
 
6636
  if (TARGET_SHMEDIA)
6637
    n_floatregs = 0;
6638
 
6639
  /* Allocate block of memory for the regs.  */
6640
  /* ??? If n_intregs + n_floatregs == 0, should we allocate at least 1 byte?
6641
     Or can assign_stack_local accept a 0 SIZE argument?  */
6642
  bufsize = (n_intregs * UNITS_PER_WORD) + (n_floatregs * UNITS_PER_WORD);
6643
 
6644
  if (TARGET_SHMEDIA)
6645
    regbuf = gen_frame_mem (BLKmode, gen_rtx_REG (Pmode, ARG_POINTER_REGNUM));
6646
  else if (n_floatregs & 1)
6647
    {
6648
      rtx addr;
6649
 
6650
      regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
6651
      addr = copy_to_mode_reg (Pmode, XEXP (regbuf, 0));
6652
      emit_insn (gen_iorsi3 (addr, addr, GEN_INT (UNITS_PER_WORD)));
6653
      regbuf = change_address (regbuf, BLKmode, addr);
6654
    }
6655
  else if (STACK_BOUNDARY < 64 && TARGET_FPU_DOUBLE && n_floatregs)
6656
    {
6657
      rtx addr, mask;
6658
 
6659
      regbuf = assign_stack_local (BLKmode, bufsize + UNITS_PER_WORD, 0);
6660
      addr = copy_to_mode_reg (Pmode, plus_constant (XEXP (regbuf, 0), 4));
6661
      mask = copy_to_mode_reg (Pmode, GEN_INT (-8));
6662
      emit_insn (gen_andsi3 (addr, addr, mask));
6663
      regbuf = change_address (regbuf, BLKmode, addr);
6664
    }
6665
  else
6666
    regbuf = assign_stack_local (BLKmode, bufsize, TARGET_FPU_DOUBLE ? 64 : 0);
6667
  alias_set = get_varargs_alias_set ();
6668
  set_mem_alias_set (regbuf, alias_set);
6669
 
6670
  /* Save int args.
6671
     This is optimized to only save the regs that are necessary.  Explicitly
6672
     named args need not be saved.  */
6673
  if (n_intregs > 0)
6674
    move_block_from_reg (BASE_ARG_REG (SImode) + first_intreg,
6675
                         adjust_address (regbuf, BLKmode,
6676
                                         n_floatregs * UNITS_PER_WORD),
6677
                         n_intregs);
6678
 
6679
  if (TARGET_SHMEDIA)
6680
    /* Return the address of the regbuf.  */
6681
    return XEXP (regbuf, 0);
6682
 
6683
  /* Save float args.
6684
     This is optimized to only save the regs that are necessary.  Explicitly
6685
     named args need not be saved.
6686
     We explicitly build a pointer to the buffer because it halves the insn
6687
     count when not optimizing (otherwise the pointer is built for each reg
6688
     saved).
6689
     We emit the moves in reverse order so that we can use predecrement.  */
6690
 
6691
  fpregs = copy_to_mode_reg (Pmode,
6692
                             plus_constant (XEXP (regbuf, 0),
6693
                                            n_floatregs * UNITS_PER_WORD));
6694
  if (TARGET_SH4 || TARGET_SH2A_DOUBLE)
6695
    {
6696
      rtx mem;
6697
      for (regno = NPARM_REGS (DFmode) - 2; regno >= first_floatreg; regno -= 2)
6698
        {
6699
          emit_insn (gen_addsi3 (fpregs, fpregs,
6700
                                 GEN_INT (-2 * UNITS_PER_WORD)));
6701
          mem = change_address (regbuf, DFmode, fpregs);
6702
          emit_move_insn (mem,
6703
                          gen_rtx_REG (DFmode, BASE_ARG_REG (DFmode) + regno));
6704
        }
6705
      regno = first_floatreg;
6706
      if (regno & 1)
6707
        {
6708
          emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
6709
          mem = change_address (regbuf, SFmode, fpregs);
6710
          emit_move_insn (mem,
6711
                          gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno
6712
                                                - (TARGET_LITTLE_ENDIAN != 0)));
6713
        }
6714
    }
6715
  else
6716
    for (regno = NPARM_REGS (SFmode) - 1; regno >= first_floatreg; regno--)
6717
      {
6718
        rtx mem;
6719
 
6720
        emit_insn (gen_addsi3 (fpregs, fpregs, GEN_INT (-UNITS_PER_WORD)));
6721
        mem = change_address (regbuf, SFmode, fpregs);
6722
        emit_move_insn (mem,
6723
                        gen_rtx_REG (SFmode, BASE_ARG_REG (SFmode) + regno));
6724
      }
6725
 
6726
  /* Return the address of the regbuf.  */
6727
  return XEXP (regbuf, 0);
6728
}
6729
 
6730
/* Define the `__builtin_va_list' type for the ABI.  */
6731
 
6732
static tree
6733
sh_build_builtin_va_list (void)
6734
{
6735
  tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
6736
  tree record;
6737
 
6738
  if (TARGET_SH5 || (! TARGET_SH2E && ! TARGET_SH4)
6739
      || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
6740
    return ptr_type_node;
6741
 
6742
  record = (*lang_hooks.types.make_type) (RECORD_TYPE);
6743
 
6744
  f_next_o = build_decl (FIELD_DECL, get_identifier ("__va_next_o"),
6745
                         ptr_type_node);
6746
  f_next_o_limit = build_decl (FIELD_DECL,
6747
                               get_identifier ("__va_next_o_limit"),
6748
                               ptr_type_node);
6749
  f_next_fp = build_decl (FIELD_DECL, get_identifier ("__va_next_fp"),
6750
                          ptr_type_node);
6751
  f_next_fp_limit = build_decl (FIELD_DECL,
6752
                                get_identifier ("__va_next_fp_limit"),
6753
                                ptr_type_node);
6754
  f_next_stack = build_decl (FIELD_DECL, get_identifier ("__va_next_stack"),
6755
                             ptr_type_node);
6756
 
6757
  DECL_FIELD_CONTEXT (f_next_o) = record;
6758
  DECL_FIELD_CONTEXT (f_next_o_limit) = record;
6759
  DECL_FIELD_CONTEXT (f_next_fp) = record;
6760
  DECL_FIELD_CONTEXT (f_next_fp_limit) = record;
6761
  DECL_FIELD_CONTEXT (f_next_stack) = record;
6762
 
6763
  TYPE_FIELDS (record) = f_next_o;
6764
  TREE_CHAIN (f_next_o) = f_next_o_limit;
6765
  TREE_CHAIN (f_next_o_limit) = f_next_fp;
6766
  TREE_CHAIN (f_next_fp) = f_next_fp_limit;
6767
  TREE_CHAIN (f_next_fp_limit) = f_next_stack;
6768
 
6769
  layout_type (record);
6770
 
6771
  return record;
6772
}
6773
 
6774
/* Implement `va_start' for varargs and stdarg.  */
6775
 
6776
void
6777
sh_va_start (tree valist, rtx nextarg)
6778
{
6779
  tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
6780
  tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
6781
  tree t, u;
6782
  int nfp, nint;
6783
 
6784
  if (TARGET_SH5)
6785
    {
6786
      expand_builtin_saveregs ();
6787
      std_expand_builtin_va_start (valist, nextarg);
6788
      return;
6789
    }
6790
 
6791
  if ((! TARGET_SH2E && ! TARGET_SH4)
6792
      || TARGET_HITACHI || sh_cfun_attr_renesas_p ())
6793
    {
6794
      std_expand_builtin_va_start (valist, nextarg);
6795
      return;
6796
    }
6797
 
6798
  f_next_o = TYPE_FIELDS (va_list_type_node);
6799
  f_next_o_limit = TREE_CHAIN (f_next_o);
6800
  f_next_fp = TREE_CHAIN (f_next_o_limit);
6801
  f_next_fp_limit = TREE_CHAIN (f_next_fp);
6802
  f_next_stack = TREE_CHAIN (f_next_fp_limit);
6803
 
6804
  next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
6805
                   NULL_TREE);
6806
  next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
6807
                         valist, f_next_o_limit, NULL_TREE);
6808
  next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp), valist, f_next_fp,
6809
                    NULL_TREE);
6810
  next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
6811
                          valist, f_next_fp_limit, NULL_TREE);
6812
  next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
6813
                       valist, f_next_stack, NULL_TREE);
6814
 
6815
  /* Call __builtin_saveregs.  */
6816
  u = make_tree (ptr_type_node, expand_builtin_saveregs ());
6817
  t = build2 (MODIFY_EXPR, ptr_type_node, next_fp, u);
6818
  TREE_SIDE_EFFECTS (t) = 1;
6819
  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6820
 
6821
  nfp = current_function_args_info.arg_count[SH_ARG_FLOAT];
6822
  if (nfp < 8)
6823
    nfp = 8 - nfp;
6824
  else
6825
    nfp = 0;
6826
  u = fold_build2 (PLUS_EXPR, ptr_type_node, u,
6827
                   build_int_cst (NULL_TREE, UNITS_PER_WORD * nfp));
6828
  t = build2 (MODIFY_EXPR, ptr_type_node, next_fp_limit, u);
6829
  TREE_SIDE_EFFECTS (t) = 1;
6830
  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6831
 
6832
  t = build2 (MODIFY_EXPR, ptr_type_node, next_o, u);
6833
  TREE_SIDE_EFFECTS (t) = 1;
6834
  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6835
 
6836
  nint = current_function_args_info.arg_count[SH_ARG_INT];
6837
  if (nint < 4)
6838
    nint = 4 - nint;
6839
  else
6840
    nint = 0;
6841
  u = fold_build2 (PLUS_EXPR, ptr_type_node, u,
6842
                   build_int_cst (NULL_TREE, UNITS_PER_WORD * nint));
6843
  t = build2 (MODIFY_EXPR, ptr_type_node, next_o_limit, u);
6844
  TREE_SIDE_EFFECTS (t) = 1;
6845
  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6846
 
6847
  u = make_tree (ptr_type_node, nextarg);
6848
  t = build2 (MODIFY_EXPR, ptr_type_node, next_stack, u);
6849
  TREE_SIDE_EFFECTS (t) = 1;
6850
  expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
6851
}
6852
 
6853
/* TYPE is a RECORD_TYPE.  If there is only a single nonzero-sized
6854
   member, return it.  */
6855
static tree
6856
find_sole_member (tree type)
6857
{
6858
  tree field, member = NULL_TREE;
6859
 
6860
  for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
6861
    {
6862
      if (TREE_CODE (field) != FIELD_DECL)
6863
        continue;
6864
      if (!DECL_SIZE (field))
6865
        return NULL_TREE;
6866
      if (integer_zerop (DECL_SIZE (field)))
6867
        continue;
6868
      if (member)
6869
        return NULL_TREE;
6870
      member = field;
6871
    }
6872
  return member;
6873
}
6874
/* Implement `va_arg'.  */
6875
 
6876
static tree
6877
sh_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p,
6878
                         tree *post_p ATTRIBUTE_UNUSED)
6879
{
6880
  HOST_WIDE_INT size, rsize;
6881
  tree tmp, pptr_type_node;
6882
  tree addr, lab_over = NULL, result = NULL;
6883
  int pass_by_ref = targetm.calls.must_pass_in_stack (TYPE_MODE (type), type);
6884
  tree eff_type;
6885
 
6886
  if (pass_by_ref)
6887
    type = build_pointer_type (type);
6888
 
6889
  size = int_size_in_bytes (type);
6890
  rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
6891
  pptr_type_node = build_pointer_type (ptr_type_node);
6892
 
6893
  if (! TARGET_SH5 && (TARGET_SH2E || TARGET_SH4)
6894
      && ! (TARGET_HITACHI || sh_cfun_attr_renesas_p ()))
6895
    {
6896
      tree f_next_o, f_next_o_limit, f_next_fp, f_next_fp_limit, f_next_stack;
6897
      tree next_o, next_o_limit, next_fp, next_fp_limit, next_stack;
6898
      int pass_as_float;
6899
      tree lab_false;
6900
      tree member;
6901
 
6902
      f_next_o = TYPE_FIELDS (va_list_type_node);
6903
      f_next_o_limit = TREE_CHAIN (f_next_o);
6904
      f_next_fp = TREE_CHAIN (f_next_o_limit);
6905
      f_next_fp_limit = TREE_CHAIN (f_next_fp);
6906
      f_next_stack = TREE_CHAIN (f_next_fp_limit);
6907
 
6908
      next_o = build3 (COMPONENT_REF, TREE_TYPE (f_next_o), valist, f_next_o,
6909
                       NULL_TREE);
6910
      next_o_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_o_limit),
6911
                             valist, f_next_o_limit, NULL_TREE);
6912
      next_fp = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp),
6913
                        valist, f_next_fp, NULL_TREE);
6914
      next_fp_limit = build3 (COMPONENT_REF, TREE_TYPE (f_next_fp_limit),
6915
                              valist, f_next_fp_limit, NULL_TREE);
6916
      next_stack = build3 (COMPONENT_REF, TREE_TYPE (f_next_stack),
6917
                           valist, f_next_stack, NULL_TREE);
6918
 
6919
      /* Structures with a single member with a distinct mode are passed
6920
         like their member.  This is relevant if the latter has a REAL_TYPE
6921
         or COMPLEX_TYPE type.  */
6922
      eff_type = type;
6923
      while (TREE_CODE (eff_type) == RECORD_TYPE
6924
             && (member = find_sole_member (eff_type))
6925
             && (TREE_CODE (TREE_TYPE (member)) == REAL_TYPE
6926
                 || TREE_CODE (TREE_TYPE (member)) == COMPLEX_TYPE
6927
                 || TREE_CODE (TREE_TYPE (member)) == RECORD_TYPE))
6928
        {
6929
          tree field_type = TREE_TYPE (member);
6930
 
6931
          if (TYPE_MODE (eff_type) == TYPE_MODE (field_type))
6932
            eff_type = field_type;
6933
          else
6934
            {
6935
              gcc_assert ((TYPE_ALIGN (eff_type)
6936
                           < GET_MODE_ALIGNMENT (TYPE_MODE (field_type)))
6937
                          || (TYPE_ALIGN (eff_type)
6938
                              > GET_MODE_BITSIZE (TYPE_MODE (field_type))));
6939
              break;
6940
            }
6941
        }
6942
 
6943
      if (TARGET_SH4)
6944
        {
6945
          pass_as_float = ((TREE_CODE (eff_type) == REAL_TYPE && size <= 8)
6946
                           || (TREE_CODE (eff_type) == COMPLEX_TYPE
6947
                               && TREE_CODE (TREE_TYPE (eff_type)) == REAL_TYPE
6948
                               && size <= 16));
6949
        }
6950
      else
6951
        {
6952
          pass_as_float = (TREE_CODE (eff_type) == REAL_TYPE && size == 4);
6953
        }
6954
 
6955
      addr = create_tmp_var (pptr_type_node, NULL);
6956
      lab_false = create_artificial_label ();
6957
      lab_over = create_artificial_label ();
6958
 
6959
      valist = build1 (INDIRECT_REF, ptr_type_node, addr);
6960
 
6961
      if (pass_as_float)
6962
        {
6963
          tree next_fp_tmp = create_tmp_var (TREE_TYPE (f_next_fp), NULL);
6964
          tree cmp;
6965
          bool is_double = size == 8 && TREE_CODE (eff_type) == REAL_TYPE;
6966
 
6967
          tmp = build1 (ADDR_EXPR, pptr_type_node, next_fp);
6968
          tmp = build2 (MODIFY_EXPR, void_type_node, addr, tmp);
6969
          gimplify_and_add (tmp, pre_p);
6970
 
6971
          tmp = build2 (MODIFY_EXPR, ptr_type_node, next_fp_tmp, valist);
6972
          gimplify_and_add (tmp, pre_p);
6973
          tmp = next_fp_limit;
6974
          if (size > 4 && !is_double)
6975
            tmp = build2 (PLUS_EXPR, TREE_TYPE (tmp), tmp,
6976
                          fold_convert (TREE_TYPE (tmp), size_int (4 - size)));
6977
          tmp = build2 (GE_EXPR, boolean_type_node, next_fp_tmp, tmp);
6978
          cmp = build3 (COND_EXPR, void_type_node, tmp,
6979
                        build1 (GOTO_EXPR, void_type_node, lab_false),
6980
                        NULL_TREE);
6981
          if (!is_double)
6982
            gimplify_and_add (cmp, pre_p);
6983
 
6984
          if (TYPE_ALIGN (eff_type) > BITS_PER_WORD
6985
              || (is_double || size == 16))
6986
            {
6987
              tmp = fold_convert (ptr_type_node, size_int (UNITS_PER_WORD));
6988
              tmp = build2 (BIT_AND_EXPR, ptr_type_node, next_fp_tmp, tmp);
6989
              tmp = build2 (PLUS_EXPR, ptr_type_node, next_fp_tmp, tmp);
6990
              tmp = build2 (MODIFY_EXPR, ptr_type_node, next_fp_tmp, tmp);
6991
              gimplify_and_add (tmp, pre_p);
6992
            }
6993
          if (is_double)
6994
            gimplify_and_add (cmp, pre_p);
6995
 
6996
#ifdef FUNCTION_ARG_SCmode_WART
6997
          if (TYPE_MODE (eff_type) == SCmode
6998
              && TARGET_SH4 && TARGET_LITTLE_ENDIAN)
6999
            {
7000
              tree subtype = TREE_TYPE (eff_type);
7001
              tree real, imag;
7002
 
7003
              imag
7004
                = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7005
              imag = get_initialized_tmp_var (imag, pre_p, NULL);
7006
 
7007
              real
7008
                = std_gimplify_va_arg_expr (next_fp_tmp, subtype, pre_p, NULL);
7009
              real = get_initialized_tmp_var (real, pre_p, NULL);
7010
 
7011
              result = build2 (COMPLEX_EXPR, type, real, imag);
7012
              result = get_initialized_tmp_var (result, pre_p, NULL);
7013
            }
7014
#endif /* FUNCTION_ARG_SCmode_WART */
7015
 
7016
          tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
7017
          gimplify_and_add (tmp, pre_p);
7018
 
7019
          tmp = build1 (LABEL_EXPR, void_type_node, lab_false);
7020
          gimplify_and_add (tmp, pre_p);
7021
 
7022
          tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
7023
          tmp = build2 (MODIFY_EXPR, void_type_node, addr, tmp);
7024
          gimplify_and_add (tmp, pre_p);
7025
          tmp = build2 (MODIFY_EXPR, ptr_type_node, next_fp_tmp, valist);
7026
          gimplify_and_add (tmp, pre_p);
7027
 
7028
          tmp = build2 (MODIFY_EXPR, ptr_type_node, valist, next_fp_tmp);
7029
          gimplify_and_add (tmp, post_p);
7030
          valist = next_fp_tmp;
7031
        }
7032
      else
7033
        {
7034
          tmp = fold_convert (ptr_type_node, size_int (rsize));
7035
          tmp = build2 (PLUS_EXPR, ptr_type_node, next_o, tmp);
7036
          tmp = build2 (GT_EXPR, boolean_type_node, tmp, next_o_limit);
7037
          tmp = build3 (COND_EXPR, void_type_node, tmp,
7038
                        build1 (GOTO_EXPR, void_type_node, lab_false),
7039
                        NULL_TREE);
7040
          gimplify_and_add (tmp, pre_p);
7041
 
7042
          tmp = build1 (ADDR_EXPR, pptr_type_node, next_o);
7043
          tmp = build2 (MODIFY_EXPR, void_type_node, addr, tmp);
7044
          gimplify_and_add (tmp, pre_p);
7045
 
7046
          tmp = build1 (GOTO_EXPR, void_type_node, lab_over);
7047
          gimplify_and_add (tmp, pre_p);
7048
 
7049
          tmp = build1 (LABEL_EXPR, void_type_node, lab_false);
7050
          gimplify_and_add (tmp, pre_p);
7051
 
7052
          if (size > 4 && ! TARGET_SH4)
7053
            {
7054
              tmp = build2 (MODIFY_EXPR, ptr_type_node, next_o, next_o_limit);
7055
              gimplify_and_add (tmp, pre_p);
7056
            }
7057
 
7058
          tmp = build1 (ADDR_EXPR, pptr_type_node, next_stack);
7059
          tmp = build2 (MODIFY_EXPR, void_type_node, addr, tmp);
7060
          gimplify_and_add (tmp, pre_p);
7061
        }
7062
 
7063
      if (!result)
7064
        {
7065
          tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
7066
          gimplify_and_add (tmp, pre_p);
7067
        }
7068
    }
7069
 
7070
  /* ??? In va-sh.h, there had been code to make values larger than
7071
     size 8 indirect.  This does not match the FUNCTION_ARG macros.  */
7072
 
7073
  tmp = std_gimplify_va_arg_expr (valist, type, pre_p, NULL);
7074
  if (result)
7075
    {
7076
      tmp = build2 (MODIFY_EXPR, void_type_node, result, tmp);
7077
      gimplify_and_add (tmp, pre_p);
7078
 
7079
      tmp = build1 (LABEL_EXPR, void_type_node, lab_over);
7080
      gimplify_and_add (tmp, pre_p);
7081
    }
7082
  else
7083
    result = tmp;
7084
 
7085
  if (pass_by_ref)
7086
    result = build_va_arg_indirect_ref (result);
7087
 
7088
  return result;
7089
}
7090
 
7091
bool
7092
sh_promote_prototypes (tree type)
7093
{
7094
  if (TARGET_HITACHI)
7095
    return 0;
7096
  if (! type)
7097
    return 1;
7098
  return ! sh_attr_renesas_p (type);
7099
}
7100
 
7101
/* Whether an argument must be passed by reference.  On SHcompact, we
7102
   pretend arguments wider than 32-bits that would have been passed in
7103
   registers are passed by reference, so that an SHmedia trampoline
7104
   loads them into the full 64-bits registers.  */
7105
 
7106
static int
7107
shcompact_byref (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7108
                 tree type, bool named)
7109
{
7110
  unsigned HOST_WIDE_INT size;
7111
 
7112
  if (type)
7113
    size = int_size_in_bytes (type);
7114
  else
7115
    size = GET_MODE_SIZE (mode);
7116
 
7117
  if (cum->arg_count[SH_ARG_INT] < NPARM_REGS (SImode)
7118
      && (!named
7119
          || GET_SH_ARG_CLASS (mode) == SH_ARG_INT
7120
          || (GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT
7121
              && cum->arg_count[SH_ARG_FLOAT] >= NPARM_REGS (SFmode)))
7122
      && size > 4
7123
      && !SHCOMPACT_FORCE_ON_STACK (mode, type)
7124
      && !SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
7125
    return size;
7126
  else
7127
    return 0;
7128
}
7129
 
7130
static bool
7131
sh_pass_by_reference (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7132
                      tree type, bool named)
7133
{
7134
  if (targetm.calls.must_pass_in_stack (mode, type))
7135
    return true;
7136
 
7137
  /* ??? std_gimplify_va_arg_expr passes NULL for cum.  That function
7138
     wants to know about pass-by-reference semantics for incoming
7139
     arguments.  */
7140
  if (! cum)
7141
    return false;
7142
 
7143
  if (TARGET_SHCOMPACT)
7144
    {
7145
      cum->byref = shcompact_byref (cum, mode, type, named);
7146
      return cum->byref != 0;
7147
    }
7148
 
7149
  return false;
7150
}
7151
 
7152
static bool
7153
sh_callee_copies (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7154
                  tree type, bool named ATTRIBUTE_UNUSED)
7155
{
7156
  /* ??? How can it possibly be correct to return true only on the
7157
     caller side of the equation?  Is there someplace else in the
7158
     sh backend that's magically producing the copies?  */
7159
  return (cum->outgoing
7160
          && ((mode == BLKmode ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode))
7161
              % SH_MIN_ALIGN_FOR_CALLEE_COPY == 0));
7162
}
7163
 
7164
static int
7165
sh_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
7166
                      tree type, bool named ATTRIBUTE_UNUSED)
7167
{
7168
  int words = 0;
7169
 
7170
  if (!TARGET_SH5
7171
      && PASS_IN_REG_P (*cum, mode, type)
7172
      && !(TARGET_SH4 || TARGET_SH2A_DOUBLE)
7173
      && (ROUND_REG (*cum, mode)
7174
          + (mode != BLKmode
7175
             ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
7176
             : ROUND_ADVANCE (int_size_in_bytes (type)))
7177
          > NPARM_REGS (mode)))
7178
    words = NPARM_REGS (mode) - ROUND_REG (*cum, mode);
7179
 
7180
  else if (!TARGET_SHCOMPACT
7181
           && SH5_WOULD_BE_PARTIAL_NREGS (*cum, mode, type, named))
7182
    words = NPARM_REGS (SImode) - cum->arg_count[SH_ARG_INT];
7183
 
7184
  return words * UNITS_PER_WORD;
7185
}
7186
 
7187
 
7188
/* Define where to put the arguments to a function.
7189
   Value is zero to push the argument on the stack,
7190
   or a hard register in which to store the argument.
7191
 
7192
   MODE is the argument's machine mode.
7193
   TYPE is the data type of the argument (as a tree).
7194
    This is null for libcalls where that information may
7195
    not be available.
7196
   CUM is a variable of type CUMULATIVE_ARGS which gives info about
7197
    the preceding args and about the function being called.
7198
   NAMED is nonzero if this argument is a named parameter
7199
    (otherwise it is an extra parameter matching an ellipsis).
7200
 
7201
   On SH the first args are normally in registers
7202
   and the rest are pushed.  Any arg that starts within the first
7203
   NPARM_REGS words is at least partially passed in a register unless
7204
   its data type forbids.  */
7205
 
7206
 
7207
rtx
7208
sh_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode,
7209
                 tree type, int named)
7210
{
7211
  if (! TARGET_SH5 && mode == VOIDmode)
7212
    return GEN_INT (ca->renesas_abi ? 1 : 0);
7213
 
7214
  if (! TARGET_SH5
7215
      && PASS_IN_REG_P (*ca, mode, type)
7216
      && (named || ! (TARGET_HITACHI || ca->renesas_abi)))
7217
    {
7218
      int regno;
7219
 
7220
      if (mode == SCmode && TARGET_SH4 && TARGET_LITTLE_ENDIAN
7221
          && (! FUNCTION_ARG_SCmode_WART || (ROUND_REG (*ca, mode) & 1)))
7222
        {
7223
          rtx r1 = gen_rtx_EXPR_LIST (VOIDmode,
7224
                                      gen_rtx_REG (SFmode,
7225
                                                   BASE_ARG_REG (mode)
7226
                                                   + (ROUND_REG (*ca, mode) ^ 1)),
7227
                                      const0_rtx);
7228
          rtx r2 = gen_rtx_EXPR_LIST (VOIDmode,
7229
                                      gen_rtx_REG (SFmode,
7230
                                                   BASE_ARG_REG (mode)
7231
                                                   + ((ROUND_REG (*ca, mode) + 1) ^ 1)),
7232
                                      GEN_INT (4));
7233
          return gen_rtx_PARALLEL(SCmode, gen_rtvec(2, r1, r2));
7234
        }
7235
 
7236
     /* If the alignment of a DF value causes an SF register to be
7237
        skipped, we will use that skipped register for the next SF
7238
        value.  */
7239
      if ((TARGET_HITACHI || ca->renesas_abi)
7240
          && ca->free_single_fp_reg
7241
          && mode == SFmode)
7242
        return gen_rtx_REG (mode, ca->free_single_fp_reg);
7243
 
7244
      regno = (BASE_ARG_REG (mode) + ROUND_REG (*ca, mode))
7245
               ^ (mode == SFmode && TARGET_SH4
7246
                  && TARGET_LITTLE_ENDIAN != 0
7247
                  && ! TARGET_HITACHI && ! ca->renesas_abi);
7248
      return gen_rtx_REG (mode, regno);
7249
 
7250
    }
7251
 
7252
  if (TARGET_SH5)
7253
    {
7254
      if (mode == VOIDmode && TARGET_SHCOMPACT)
7255
        return GEN_INT (ca->call_cookie);
7256
 
7257
      /* The following test assumes unnamed arguments are promoted to
7258
         DFmode.  */
7259
      if (mode == SFmode && ca->free_single_fp_reg)
7260
        return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode, ca->free_single_fp_reg);
7261
 
7262
      if ((GET_SH_ARG_CLASS (mode) == SH_ARG_FLOAT)
7263
          && (named || ! ca->prototype_p)
7264
          && ca->arg_count[(int) SH_ARG_FLOAT] < NPARM_REGS (SFmode))
7265
        {
7266
          if (! ca->prototype_p && TARGET_SHMEDIA)
7267
            return SH5_PROTOTYPELESS_FLOAT_ARG (*ca, mode);
7268
 
7269
          return SH5_PROTOTYPED_FLOAT_ARG (*ca, mode,
7270
                                           FIRST_FP_PARM_REG
7271
                                           + ca->arg_count[(int) SH_ARG_FLOAT]);
7272
        }
7273
 
7274
      if (ca->arg_count[(int) SH_ARG_INT] < NPARM_REGS (SImode)
7275
          && (! TARGET_SHCOMPACT
7276
              || (! SHCOMPACT_FORCE_ON_STACK (mode, type)
7277
                  && ! SH5_WOULD_BE_PARTIAL_NREGS (*ca, mode,
7278
                                                   type, named))))
7279
        {
7280
          return gen_rtx_REG (mode, (FIRST_PARM_REG
7281
                                       + ca->arg_count[(int) SH_ARG_INT]));
7282
        }
7283
 
7284
      return 0;
7285
    }
7286
 
7287
  return 0;
7288
}
7289
 
7290
/* Update the data in CUM to advance over an argument
7291
   of mode MODE and data type TYPE.
7292
   (TYPE is null for libcalls where that information may not be
7293
   available.)  */
7294
 
7295
void
7296
sh_function_arg_advance (CUMULATIVE_ARGS *ca, enum machine_mode mode,
7297
                         tree type, int named)
7298
{
7299
  if (ca->force_mem)
7300
    ca->force_mem = 0;
7301
  else if (TARGET_SH5)
7302
    {
7303
      tree type2 = (ca->byref && type
7304
                    ? TREE_TYPE (type)
7305
                    : type);
7306
      enum machine_mode mode2 = (ca->byref && type
7307
                                 ? TYPE_MODE (type2)
7308
                                 : mode);
7309
      int dwords = ((ca->byref
7310
                     ? ca->byref
7311
                     : mode2 == BLKmode
7312
                     ? int_size_in_bytes (type2)
7313
                     : GET_MODE_SIZE (mode2)) + 7) / 8;
7314
      int numregs = MIN (dwords, NPARM_REGS (SImode)
7315
                         - ca->arg_count[(int) SH_ARG_INT]);
7316
 
7317
      if (numregs)
7318
        {
7319
          ca->arg_count[(int) SH_ARG_INT] += numregs;
7320
          if (TARGET_SHCOMPACT
7321
              && SHCOMPACT_FORCE_ON_STACK (mode2, type2))
7322
            {
7323
              ca->call_cookie
7324
                |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7325
                                        - numregs, 1);
7326
              /* N.B. We want this also for outgoing.  */
7327
              ca->stack_regs += numregs;
7328
            }
7329
          else if (ca->byref)
7330
            {
7331
              if (! ca->outgoing)
7332
                ca->stack_regs += numregs;
7333
              ca->byref_regs += numregs;
7334
              ca->byref = 0;
7335
              do
7336
                ca->call_cookie
7337
                  |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7338
                                          - numregs, 2);
7339
              while (--numregs);
7340
              ca->call_cookie
7341
                |= CALL_COOKIE_INT_REG (ca->arg_count[(int) SH_ARG_INT]
7342
                                        - 1, 1);
7343
            }
7344
          else if (dwords > numregs)
7345
            {
7346
              int pushregs = numregs;
7347
 
7348
              if (TARGET_SHCOMPACT)
7349
                ca->stack_regs += numregs;
7350
              while (pushregs < NPARM_REGS (SImode) - 1
7351
                     && (CALL_COOKIE_INT_REG_GET
7352
                         (ca->call_cookie,
7353
                          NPARM_REGS (SImode) - pushregs)
7354
                         == 1))
7355
                {
7356
                  ca->call_cookie
7357
                    &= ~ CALL_COOKIE_INT_REG (NPARM_REGS (SImode)
7358
                                              - pushregs, 1);
7359
                  pushregs++;
7360
                }
7361
              if (numregs == NPARM_REGS (SImode))
7362
                ca->call_cookie
7363
                  |= CALL_COOKIE_INT_REG (0, 1)
7364
                  | CALL_COOKIE_STACKSEQ (numregs - 1);
7365
              else
7366
                ca->call_cookie
7367
                  |= CALL_COOKIE_STACKSEQ (numregs);
7368
            }
7369
        }
7370
      if (GET_SH_ARG_CLASS (mode2) == SH_ARG_FLOAT
7371
          && (named || ! ca->prototype_p))
7372
        {
7373
          if (mode2 == SFmode && ca->free_single_fp_reg)
7374
            ca->free_single_fp_reg = 0;
7375
          else if (ca->arg_count[(int) SH_ARG_FLOAT]
7376
                   < NPARM_REGS (SFmode))
7377
            {
7378
              int numfpregs
7379
                = MIN ((GET_MODE_SIZE (mode2) + 7) / 8 * 2,
7380
                       NPARM_REGS (SFmode)
7381
                       - ca->arg_count[(int) SH_ARG_FLOAT]);
7382
 
7383
              ca->arg_count[(int) SH_ARG_FLOAT] += numfpregs;
7384
 
7385
              if (TARGET_SHCOMPACT && ! ca->prototype_p)
7386
                {
7387
                  if (ca->outgoing && numregs > 0)
7388
                    do
7389
                      {
7390
                        ca->call_cookie
7391
                          |= (CALL_COOKIE_INT_REG
7392
                              (ca->arg_count[(int) SH_ARG_INT]
7393
                               - numregs + ((numfpregs - 2) / 2),
7394
                               4 + (ca->arg_count[(int) SH_ARG_FLOAT]
7395
                                    - numfpregs) / 2));
7396
                      }
7397
                    while (numfpregs -= 2);
7398
                }
7399
              else if (mode2 == SFmode && (named)
7400
                       && (ca->arg_count[(int) SH_ARG_FLOAT]
7401
                           < NPARM_REGS (SFmode)))
7402
                ca->free_single_fp_reg
7403
                  = FIRST_FP_PARM_REG - numfpregs
7404
                  + ca->arg_count[(int) SH_ARG_FLOAT] + 1;
7405
            }
7406
        }
7407
      return;
7408
    }
7409
 
7410
  if ((TARGET_HITACHI || ca->renesas_abi) && TARGET_FPU_DOUBLE)
7411
    {
7412
      /* Note that we've used the skipped register.  */
7413
      if (mode == SFmode && ca->free_single_fp_reg)
7414
        {
7415
          ca->free_single_fp_reg = 0;
7416
          return;
7417
        }
7418
      /* When we have a DF after an SF, there's an SF register that get
7419
         skipped in order to align the DF value.  We note this skipped
7420
         register, because the next SF value will use it, and not the
7421
         SF that follows the DF.  */
7422
      if (mode == DFmode
7423
          && ROUND_REG (*ca, DFmode) != ROUND_REG (*ca, SFmode))
7424
        {
7425
          ca->free_single_fp_reg = (ROUND_REG (*ca, SFmode)
7426
                                    + BASE_ARG_REG (mode));
7427
        }
7428
    }
7429
 
7430
  if (! ((TARGET_SH4 || TARGET_SH2A) || ca->renesas_abi)
7431
      || PASS_IN_REG_P (*ca, mode, type))
7432
    (ca->arg_count[(int) GET_SH_ARG_CLASS (mode)]
7433
     = (ROUND_REG (*ca, mode)
7434
        + (mode == BLKmode
7435
           ? ROUND_ADVANCE (int_size_in_bytes (type))
7436
           : ROUND_ADVANCE (GET_MODE_SIZE (mode)))));
7437
}
7438
 
7439
/* The Renesas calling convention doesn't quite fit into this scheme since
7440
   the address is passed like an invisible argument, but one that is always
7441
   passed in memory.  */
7442
static rtx
7443
sh_struct_value_rtx (tree fndecl, int incoming ATTRIBUTE_UNUSED)
7444
{
7445
  if (TARGET_HITACHI || sh_attr_renesas_p (fndecl))
7446
    return 0;
7447
  return gen_rtx_REG (Pmode, 2);
7448
}
7449
 
7450
/* Worker function for TARGET_RETURN_IN_MEMORY.  */
7451
 
7452
static bool
7453
sh_return_in_memory (tree type, tree fndecl)
7454
{
7455
  if (TARGET_SH5)
7456
    {
7457
      if (TYPE_MODE (type) == BLKmode)
7458
        return ((unsigned HOST_WIDE_INT) int_size_in_bytes (type)) > 8;
7459
      else
7460
        return GET_MODE_SIZE (TYPE_MODE (type)) > 8;
7461
    }
7462
  else
7463
    {
7464
      return (TYPE_MODE (type) == BLKmode
7465
              || ((TARGET_HITACHI || sh_attr_renesas_p (fndecl))
7466
                  && TREE_CODE (type) == RECORD_TYPE));
7467
    }
7468
}
7469
 
7470
/* We actually emit the code in sh_expand_prologue.  We used to use
7471
   a static variable to flag that we need to emit this code, but that
7472
   doesn't when inlining, when functions are deferred and then emitted
7473
   later.  Fortunately, we already have two flags that are part of struct
7474
   function that tell if a function uses varargs or stdarg.  */
7475
static void
7476
sh_setup_incoming_varargs (CUMULATIVE_ARGS *ca,
7477
                           enum machine_mode mode,
7478
                           tree type,
7479
                           int *pretend_arg_size,
7480
                           int second_time ATTRIBUTE_UNUSED)
7481
{
7482
  gcc_assert (current_function_stdarg);
7483
  if (TARGET_VARARGS_PRETEND_ARGS (current_function_decl))
7484
    {
7485
      int named_parm_regs, anon_parm_regs;
7486
 
7487
      named_parm_regs = (ROUND_REG (*ca, mode)
7488
                         + (mode == BLKmode
7489
                            ? ROUND_ADVANCE (int_size_in_bytes (type))
7490
                            : ROUND_ADVANCE (GET_MODE_SIZE (mode))));
7491
      anon_parm_regs = NPARM_REGS (SImode) - named_parm_regs;
7492
      if (anon_parm_regs > 0)
7493
        *pretend_arg_size = anon_parm_regs * 4;
7494
    }
7495
}
7496
 
7497
static bool
7498
sh_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
7499
{
7500
  return TARGET_SH5;
7501
}
7502
 
7503
static bool
7504
sh_pretend_outgoing_varargs_named (CUMULATIVE_ARGS *ca)
7505
{
7506
  return ! (TARGET_HITACHI || ca->renesas_abi) && ! TARGET_SH5;
7507
}
7508
 
7509
 
7510
/* Define the offset between two registers, one to be eliminated, and
7511
   the other its replacement, at the start of a routine.  */
7512
 
7513
int
7514
initial_elimination_offset (int from, int to)
7515
{
7516
  int regs_saved;
7517
  int regs_saved_rounding = 0;
7518
  int total_saved_regs_space;
7519
  int total_auto_space;
7520
  int save_flags = target_flags;
7521
  int copy_flags;
7522
  HARD_REG_SET live_regs_mask;
7523
 
7524
  shmedia_space_reserved_for_target_registers = false;
7525
  regs_saved = calc_live_regs (&live_regs_mask);
7526
  regs_saved += SHMEDIA_REGS_STACK_ADJUST ();
7527
 
7528
  if (shmedia_reserve_space_for_target_registers_p (regs_saved, &live_regs_mask))
7529
    {
7530
      shmedia_space_reserved_for_target_registers = true;
7531
      regs_saved += shmedia_target_regs_stack_adjust (&live_regs_mask);
7532
    }
7533
 
7534
  if (TARGET_SH5 && regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT))
7535
    regs_saved_rounding = ((STACK_BOUNDARY / BITS_PER_UNIT)
7536
                           - regs_saved % (STACK_BOUNDARY / BITS_PER_UNIT));
7537
 
7538
  total_auto_space = rounded_frame_size (regs_saved) - regs_saved_rounding;
7539
  copy_flags = target_flags;
7540
  target_flags = save_flags;
7541
 
7542
  total_saved_regs_space = regs_saved + regs_saved_rounding;
7543
 
7544
  if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7545
    return total_saved_regs_space + total_auto_space
7546
      + current_function_args_info.byref_regs * 8;
7547
 
7548
  if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7549
    return total_saved_regs_space + total_auto_space
7550
      + current_function_args_info.byref_regs * 8;
7551
 
7552
  /* Initial gap between fp and sp is 0.  */
7553
  if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7554
    return 0;
7555
 
7556
  if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7557
    return rounded_frame_size (0);
7558
 
7559
  if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7560
    return rounded_frame_size (0);
7561
 
7562
  gcc_assert (from == RETURN_ADDRESS_POINTER_REGNUM
7563
              && (to == HARD_FRAME_POINTER_REGNUM
7564
                  || to == STACK_POINTER_REGNUM));
7565
  if (TARGET_SH5)
7566
    {
7567
      int n = total_saved_regs_space;
7568
      int pr_reg = TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG;
7569
      save_schedule schedule;
7570
      save_entry *entry;
7571
 
7572
      n += total_auto_space;
7573
 
7574
      /* If it wasn't saved, there's not much we can do.  */
7575
      if (! TEST_HARD_REG_BIT (live_regs_mask, pr_reg))
7576
        return n;
7577
 
7578
      target_flags = copy_flags;
7579
 
7580
      sh5_schedule_saves (&live_regs_mask, &schedule, n);
7581
      for (entry = &schedule.entries[1]; entry->mode != VOIDmode; entry++)
7582
        if (entry->reg == pr_reg)
7583
          {
7584
            target_flags = save_flags;
7585
            return entry->offset;
7586
          }
7587
      gcc_unreachable ();
7588
    }
7589
  else
7590
    return total_auto_space;
7591
}
7592
 
7593
/* Insert any deferred function attributes from earlier pragmas.  */
7594
static void
7595
sh_insert_attributes (tree node, tree *attributes)
7596
{
7597
  tree attrs;
7598
 
7599
  if (TREE_CODE (node) != FUNCTION_DECL)
7600
    return;
7601
 
7602
  /* We are only interested in fields.  */
7603
  if (!DECL_P (node))
7604
    return;
7605
 
7606
  /* Append the attributes to the deferred attributes.  */
7607
  *sh_deferred_function_attributes_tail = *attributes;
7608
  attrs = sh_deferred_function_attributes;
7609
  if (!attrs)
7610
    return;
7611
 
7612
  /* Some attributes imply or require the interrupt attribute.  */
7613
  if (!lookup_attribute ("interrupt_handler", attrs)
7614
      && !lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (node)))
7615
    {
7616
      /* If we have a trapa_handler, but no interrupt_handler attribute,
7617
         insert an interrupt_handler attribute.  */
7618
      if (lookup_attribute ("trapa_handler", attrs) != NULL_TREE)
7619
        /* We can't use sh_pr_interrupt here because that's not in the
7620
           java frontend.  */
7621
        attrs
7622
          = tree_cons (get_identifier("interrupt_handler"), NULL_TREE, attrs);
7623
      /* However, for sp_switch, trap_exit and nosave_low_regs, if the
7624
         interrupt attribute is missing, we ignore the attribute and warn.  */
7625
      else if (lookup_attribute ("sp_switch", attrs)
7626
               || lookup_attribute ("trap_exit", attrs)
7627
               || lookup_attribute ("nosave_low_regs", attrs))
7628
        {
7629
          tree *tail;
7630
 
7631
          for (tail = attributes; attrs; attrs = TREE_CHAIN (attrs))
7632
            {
7633
              if (is_attribute_p ("sp_switch", TREE_PURPOSE (attrs))
7634
                  || is_attribute_p ("trap_exit", TREE_PURPOSE (attrs))
7635
                  || is_attribute_p ("nosave_low_regs", TREE_PURPOSE (attrs)))
7636
                warning (OPT_Wattributes,
7637
                         "%qs attribute only applies to interrupt functions",
7638
                         IDENTIFIER_POINTER (TREE_PURPOSE (attrs)));
7639
              else
7640
                {
7641
                  *tail = tree_cons (TREE_PURPOSE (attrs), NULL_TREE,
7642
                                     NULL_TREE);
7643
                  tail = &TREE_CHAIN (*tail);
7644
                }
7645
            }
7646
          attrs = *attributes;
7647
        }
7648
    }
7649
 
7650
  /* Install the processed list.  */
7651
  *attributes = attrs;
7652
 
7653
  /* Clear deferred attributes.  */
7654
  sh_deferred_function_attributes = NULL_TREE;
7655
  sh_deferred_function_attributes_tail = &sh_deferred_function_attributes;
7656
 
7657
  return;
7658
}
7659
 
7660
/* Supported attributes:
7661
 
7662
   interrupt_handler -- specifies this function is an interrupt handler.
7663
 
7664
   trapa_handler - like above, but don't save all registers.
7665
 
7666
   sp_switch -- specifies an alternate stack for an interrupt handler
7667
   to run on.
7668
 
7669
   trap_exit -- use a trapa to exit an interrupt function instead of
7670
   an rte instruction.
7671
 
7672
   nosave_low_regs - don't save r0..r7 in an interrupt handler.
7673
     This is useful on the SH3 and upwards,
7674
     which has a separate set of low regs for User and Supervisor modes.
7675
     This should only be used for the lowest level of interrupts.  Higher levels
7676
     of interrupts must save the registers in case they themselves are
7677
     interrupted.
7678
 
7679
   renesas -- use Renesas calling/layout conventions (functions and
7680
   structures).
7681
 
7682
*/
7683
 
7684
const struct attribute_spec sh_attribute_table[] =
7685
{
7686
  /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
7687
  { "interrupt_handler", 0, 0, true,  false, false, sh_handle_interrupt_handler_attribute },
7688
  { "sp_switch",         1, 1, true,  false, false, sh_handle_sp_switch_attribute },
7689
  { "trap_exit",         1, 1, true,  false, false, sh_handle_trap_exit_attribute },
7690
  { "renesas",           0, 0, false, true, false, sh_handle_renesas_attribute },
7691
  { "trapa_handler",     0, 0, true,  false, false, sh_handle_interrupt_handler_attribute },
7692
  { "nosave_low_regs",   0, 0, true,  false, false, sh_handle_interrupt_handler_attribute },
7693
#ifdef SYMBIAN
7694
  /* Symbian support adds three new attributes:
7695
     dllexport - for exporting a function/variable that will live in a dll
7696
     dllimport - for importing a function/variable from a dll
7697
 
7698
     Microsoft allows multiple declspecs in one __declspec, separating
7699
     them with spaces.  We do NOT support this.  Instead, use __declspec
7700
     multiple times.  */
7701
  { "dllimport",         0, 0, true,  false, false, sh_symbian_handle_dll_attribute },
7702
  { "dllexport",         0, 0, true,  false, false, sh_symbian_handle_dll_attribute },
7703
#endif
7704
  { NULL,                0, 0, false, false, false, NULL }
7705
};
7706
 
7707
/* Handle an "interrupt_handler" attribute; arguments as in
7708
   struct attribute_spec.handler.  */
7709
static tree
7710
sh_handle_interrupt_handler_attribute (tree *node, tree name,
7711
                                       tree args ATTRIBUTE_UNUSED,
7712
                                       int flags ATTRIBUTE_UNUSED,
7713
                                       bool *no_add_attrs)
7714
{
7715
  if (TREE_CODE (*node) != FUNCTION_DECL)
7716
    {
7717
      warning (OPT_Wattributes, "%qs attribute only applies to functions",
7718
               IDENTIFIER_POINTER (name));
7719
      *no_add_attrs = true;
7720
    }
7721
  else if (TARGET_SHCOMPACT)
7722
    {
7723
      error ("attribute interrupt_handler is not compatible with -m5-compact");
7724
      *no_add_attrs = true;
7725
    }
7726
 
7727
  return NULL_TREE;
7728
}
7729
 
7730
/* Handle an "sp_switch" attribute; arguments as in
7731
   struct attribute_spec.handler.  */
7732
static tree
7733
sh_handle_sp_switch_attribute (tree *node, tree name, tree args,
7734
                               int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
7735
{
7736
  if (TREE_CODE (*node) != FUNCTION_DECL)
7737
    {
7738
      warning (OPT_Wattributes, "%qs attribute only applies to functions",
7739
               IDENTIFIER_POINTER (name));
7740
      *no_add_attrs = true;
7741
    }
7742
  else if (TREE_CODE (TREE_VALUE (args)) != STRING_CST)
7743
    {
7744
      /* The argument must be a constant string.  */
7745
      warning (OPT_Wattributes, "%qs attribute argument not a string constant",
7746
               IDENTIFIER_POINTER (name));
7747
      *no_add_attrs = true;
7748
    }
7749
 
7750
  return NULL_TREE;
7751
}
7752
 
7753
/* Handle an "trap_exit" attribute; arguments as in
7754
   struct attribute_spec.handler.  */
7755
static tree
7756
sh_handle_trap_exit_attribute (tree *node, tree name, tree args,
7757
                               int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
7758
{
7759
  if (TREE_CODE (*node) != FUNCTION_DECL)
7760
    {
7761
      warning (OPT_Wattributes, "%qs attribute only applies to functions",
7762
               IDENTIFIER_POINTER (name));
7763
      *no_add_attrs = true;
7764
    }
7765
  /* The argument specifies a trap number to be used in a trapa instruction
7766
     at function exit (instead of an rte instruction).  */
7767
  else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
7768
    {
7769
      /* The argument must be a constant integer.  */
7770
      warning (OPT_Wattributes, "%qs attribute argument not an "
7771
               "integer constant", IDENTIFIER_POINTER (name));
7772
      *no_add_attrs = true;
7773
    }
7774
 
7775
  return NULL_TREE;
7776
}
7777
 
7778
static tree
7779
sh_handle_renesas_attribute (tree *node ATTRIBUTE_UNUSED,
7780
                             tree name ATTRIBUTE_UNUSED,
7781
                             tree args ATTRIBUTE_UNUSED,
7782
                             int flags ATTRIBUTE_UNUSED,
7783
                             bool *no_add_attrs ATTRIBUTE_UNUSED)
7784
{
7785
  return NULL_TREE;
7786
}
7787
 
7788
/* True if __attribute__((renesas)) or -mrenesas.  */
7789
int
7790
sh_attr_renesas_p (tree td)
7791
{
7792
  if (TARGET_HITACHI)
7793
    return 1;
7794
  if (td == 0)
7795
    return 0;
7796
  if (DECL_P (td))
7797
    td = TREE_TYPE (td);
7798
  if (td == error_mark_node)
7799
    return 0;
7800
  return (lookup_attribute ("renesas", TYPE_ATTRIBUTES (td))
7801
          != NULL_TREE);
7802
}
7803
 
7804
/* True if __attribute__((renesas)) or -mrenesas, for the current
7805
   function.  */
7806
int
7807
sh_cfun_attr_renesas_p (void)
7808
{
7809
  return sh_attr_renesas_p (current_function_decl);
7810
}
7811
 
7812
int
7813
sh_cfun_interrupt_handler_p (void)
7814
{
7815
  return (lookup_attribute ("interrupt_handler",
7816
                            DECL_ATTRIBUTES (current_function_decl))
7817
          != NULL_TREE);
7818
}
7819
 
7820
/* Implement TARGET_CHECK_PCH_TARGET_FLAGS.  */
7821
 
7822
static const char *
7823
sh_check_pch_target_flags (int old_flags)
7824
{
7825
  if ((old_flags ^ target_flags) & (MASK_SH1 | MASK_SH2 | MASK_SH3
7826
                                    | MASK_SH_E | MASK_HARD_SH4
7827
                                    | MASK_FPU_SINGLE | MASK_SH4))
7828
    return _("created and used with different architectures / ABIs");
7829
  if ((old_flags ^ target_flags) & MASK_HITACHI)
7830
    return _("created and used with different ABIs");
7831
  if ((old_flags ^ target_flags) & MASK_LITTLE_ENDIAN)
7832
    return _("created and used with different endianness");
7833
  return NULL;
7834
}
7835
 
7836
/* Predicates used by the templates.  */
7837
 
7838
/* Returns 1 if OP is MACL, MACH or PR.  The input must be a REG rtx.
7839
   Used only in general_movsrc_operand.  */
7840
 
7841
int
7842
system_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7843
{
7844
  switch (REGNO (op))
7845
    {
7846
    case PR_REG:
7847
    case MACL_REG:
7848
    case MACH_REG:
7849
      return 1;
7850
    }
7851
  return 0;
7852
}
7853
 
7854
/* Nonzero if OP is a floating point value with value 0.0.  */
7855
 
7856
int
7857
fp_zero_operand (rtx op)
7858
{
7859
  REAL_VALUE_TYPE r;
7860
 
7861
  if (GET_MODE (op) != SFmode)
7862
    return 0;
7863
 
7864
  REAL_VALUE_FROM_CONST_DOUBLE (r, op);
7865
  return REAL_VALUES_EQUAL (r, dconst0) && ! REAL_VALUE_MINUS_ZERO (r);
7866
}
7867
 
7868
/* Nonzero if OP is a floating point value with value 1.0.  */
7869
 
7870
int
7871
fp_one_operand (rtx op)
7872
{
7873
  REAL_VALUE_TYPE r;
7874
 
7875
  if (GET_MODE (op) != SFmode)
7876
    return 0;
7877
 
7878
  REAL_VALUE_FROM_CONST_DOUBLE (r, op);
7879
  return REAL_VALUES_EQUAL (r, dconst1);
7880
}
7881
 
7882
/* For -m4 and -m4-single-only, mode switching is used.  If we are
7883
   compiling without -mfmovd, movsf_ie isn't taken into account for
7884
   mode switching.  We could check in machine_dependent_reorg for
7885
   cases where we know we are in single precision mode, but there is
7886
   interface to find that out during reload, so we must avoid
7887
   choosing an fldi alternative during reload and thus failing to
7888
   allocate a scratch register for the constant loading.  */
7889
int
7890
fldi_ok (void)
7891
{
7892
  return ! TARGET_SH4 || TARGET_FMOVD || reload_completed;
7893
}
7894
 
7895
int
7896
tertiary_reload_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7897
{
7898
  enum rtx_code code = GET_CODE (op);
7899
  return code == MEM || (TARGET_SH4 && code == CONST_DOUBLE);
7900
}
7901
 
7902
/* Return the TLS type for TLS symbols, 0 for otherwise.  */
7903
int
7904
tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7905
{
7906
  if (GET_CODE (op) != SYMBOL_REF)
7907
    return 0;
7908
  return SYMBOL_REF_TLS_MODEL (op);
7909
}
7910
 
7911
/* Return the destination address of a branch.  */
7912
 
7913
static int
7914
branch_dest (rtx branch)
7915
{
7916
  rtx dest = SET_SRC (PATTERN (branch));
7917
  int dest_uid;
7918
 
7919
  if (GET_CODE (dest) == IF_THEN_ELSE)
7920
    dest = XEXP (dest, 1);
7921
  dest = XEXP (dest, 0);
7922
  dest_uid = INSN_UID (dest);
7923
  return INSN_ADDRESSES (dest_uid);
7924
}
7925
 
7926
/* Return nonzero if REG is not used after INSN.
7927
   We assume REG is a reload reg, and therefore does
7928
   not live past labels.  It may live past calls or jumps though.  */
7929
int
7930
reg_unused_after (rtx reg, rtx insn)
7931
{
7932
  enum rtx_code code;
7933
  rtx set;
7934
 
7935
  /* If the reg is set by this instruction, then it is safe for our
7936
     case.  Disregard the case where this is a store to memory, since
7937
     we are checking a register used in the store address.  */
7938
  set = single_set (insn);
7939
  if (set && GET_CODE (SET_DEST (set)) != MEM
7940
      && reg_overlap_mentioned_p (reg, SET_DEST (set)))
7941
    return 1;
7942
 
7943
  while ((insn = NEXT_INSN (insn)))
7944
    {
7945
      rtx set;
7946
      if (!INSN_P (insn))
7947
        continue;
7948
 
7949
      code = GET_CODE (insn);
7950
 
7951
#if 0
7952
      /* If this is a label that existed before reload, then the register
7953
         if dead here.  However, if this is a label added by reorg, then
7954
         the register may still be live here.  We can't tell the difference,
7955
         so we just ignore labels completely.  */
7956
      if (code == CODE_LABEL)
7957
        return 1;
7958
      /* else */
7959
#endif
7960
 
7961
      if (code == JUMP_INSN)
7962
        return 0;
7963
 
7964
      /* If this is a sequence, we must handle them all at once.
7965
         We could have for instance a call that sets the target register,
7966
         and an insn in a delay slot that uses the register.  In this case,
7967
         we must return 0.  */
7968
      else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
7969
        {
7970
          int i;
7971
          int retval = 0;
7972
 
7973
          for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
7974
            {
7975
              rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
7976
              rtx set = single_set (this_insn);
7977
 
7978
              if (GET_CODE (this_insn) == CALL_INSN)
7979
                code = CALL_INSN;
7980
              else if (GET_CODE (this_insn) == JUMP_INSN)
7981
                {
7982
                  if (INSN_ANNULLED_BRANCH_P (this_insn))
7983
                    return 0;
7984
                  code = JUMP_INSN;
7985
                }
7986
 
7987
              if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
7988
                return 0;
7989
              if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
7990
                {
7991
                  if (GET_CODE (SET_DEST (set)) != MEM)
7992
                    retval = 1;
7993
                  else
7994
                    return 0;
7995
                }
7996
              if (set == 0
7997
                  && reg_overlap_mentioned_p (reg, PATTERN (this_insn)))
7998
                return 0;
7999
            }
8000
          if (retval == 1)
8001
            return 1;
8002
          else if (code == JUMP_INSN)
8003
            return 0;
8004
        }
8005
 
8006
      set = single_set (insn);
8007
      if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
8008
        return 0;
8009
      if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
8010
        return GET_CODE (SET_DEST (set)) != MEM;
8011
      if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
8012
        return 0;
8013
 
8014
      if (code == CALL_INSN && call_really_used_regs[REGNO (reg)])
8015
        return 1;
8016
    }
8017
  return 1;
8018
}
8019
 
8020
#include "ggc.h"
8021
 
8022
static GTY(()) rtx fpscr_rtx;
8023
rtx
8024
get_fpscr_rtx (void)
8025
{
8026
  if (! fpscr_rtx)
8027
    {
8028
      fpscr_rtx = gen_rtx_REG (PSImode, FPSCR_REG);
8029
      REG_USERVAR_P (fpscr_rtx) = 1;
8030
      mark_user_reg (fpscr_rtx);
8031
    }
8032
  if (! reload_completed || mdep_reorg_phase != SH_AFTER_MDEP_REORG)
8033
    mark_user_reg (fpscr_rtx);
8034
  return fpscr_rtx;
8035
}
8036
 
8037
static GTY(()) tree fpscr_values;
8038
 
8039
static void
8040
emit_fpu_switch (rtx scratch, int index)
8041
{
8042
  rtx dst, src;
8043
 
8044
  if (fpscr_values == NULL)
8045
    {
8046
      tree t;
8047
 
8048
      t = build_index_type (integer_one_node);
8049
      t = build_array_type (integer_type_node, t);
8050
      t = build_decl (VAR_DECL, get_identifier ("__fpscr_values"), t);
8051
      DECL_ARTIFICIAL (t) = 1;
8052
      DECL_IGNORED_P (t) = 1;
8053
      DECL_EXTERNAL (t) = 1;
8054
      TREE_STATIC (t) = 1;
8055
      TREE_PUBLIC (t) = 1;
8056
      TREE_USED (t) = 1;
8057
 
8058
      fpscr_values = t;
8059
    }
8060
 
8061
  src = DECL_RTL (fpscr_values);
8062
  if (no_new_pseudos)
8063
    {
8064
      emit_move_insn (scratch, XEXP (src, 0));
8065
      if (index != 0)
8066
        emit_insn (gen_addsi3 (scratch, scratch, GEN_INT (index * 4)));
8067
      src = adjust_automodify_address (src, PSImode, scratch, index * 4);
8068
    }
8069
  else
8070
    src = adjust_address (src, PSImode, index * 4);
8071
 
8072
  dst = get_fpscr_rtx ();
8073
  emit_move_insn (dst, src);
8074
}
8075
 
8076
void
8077
emit_sf_insn (rtx pat)
8078
{
8079
  emit_insn (pat);
8080
}
8081
 
8082
void
8083
emit_df_insn (rtx pat)
8084
{
8085
  emit_insn (pat);
8086
}
8087
 
8088
void
8089
expand_sf_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
8090
{
8091
  emit_sf_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
8092
}
8093
 
8094
void
8095
expand_sf_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
8096
{
8097
  emit_sf_insn ((*fun) (operands[0], operands[1], operands[2],
8098
                         get_fpscr_rtx ()));
8099
}
8100
 
8101
void
8102
expand_df_unop (rtx (*fun) (rtx, rtx, rtx), rtx *operands)
8103
{
8104
  emit_df_insn ((*fun) (operands[0], operands[1], get_fpscr_rtx ()));
8105
}
8106
 
8107
void
8108
expand_df_binop (rtx (*fun) (rtx, rtx, rtx, rtx), rtx *operands)
8109
{
8110
  emit_df_insn ((*fun) (operands[0], operands[1], operands[2],
8111
                        get_fpscr_rtx ()));
8112
}
8113
 
8114
/* ??? gcc does flow analysis strictly after common subexpression
8115
   elimination.  As a result, common subexpression elimination fails
8116
   when there are some intervening statements setting the same register.
8117
   If we did nothing about this, this would hurt the precision switching
8118
   for SH4 badly.  There is some cse after reload, but it is unable to
8119
   undo the extra register pressure from the unused instructions, and
8120
   it cannot remove auto-increment loads.
8121
 
8122
   A C code example that shows this flow/cse weakness for (at least) SH
8123
   and sparc (as of gcc ss-970706) is this:
8124
 
8125
double
8126
f(double a)
8127
{
8128
  double d;
8129
  d = 0.1;
8130
  a += d;
8131
  d = 1.1;
8132
  d = 0.1;
8133
  a *= d;
8134
  return a;
8135
}
8136
 
8137
   So we add another pass before common subexpression elimination, to
8138
   remove assignments that are dead due to a following assignment in the
8139
   same basic block.  */
8140
 
8141
static void
8142
mark_use (rtx x, rtx *reg_set_block)
8143
{
8144
  enum rtx_code code;
8145
 
8146
  if (! x)
8147
    return;
8148
  code = GET_CODE (x);
8149
  switch (code)
8150
    {
8151
    case REG:
8152
      {
8153
        int regno = REGNO (x);
8154
        int nregs = (regno < FIRST_PSEUDO_REGISTER
8155
                     ? HARD_REGNO_NREGS (regno, GET_MODE (x))
8156
                     : 1);
8157
        do
8158
          {
8159
            reg_set_block[regno + nregs - 1] = 0;
8160
          }
8161
        while (--nregs);
8162
        break;
8163
      }
8164
    case SET:
8165
      {
8166
        rtx dest = SET_DEST (x);
8167
 
8168
        if (GET_CODE (dest) == SUBREG)
8169
          dest = SUBREG_REG (dest);
8170
        if (GET_CODE (dest) != REG)
8171
          mark_use (dest, reg_set_block);
8172
        mark_use (SET_SRC (x), reg_set_block);
8173
        break;
8174
      }
8175
    case CLOBBER:
8176
      break;
8177
    default:
8178
      {
8179
        const char *fmt = GET_RTX_FORMAT (code);
8180
        int i, j;
8181
        for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8182
          {
8183
            if (fmt[i] == 'e')
8184
              mark_use (XEXP (x, i), reg_set_block);
8185
            else if (fmt[i] == 'E')
8186
              for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8187
                mark_use (XVECEXP (x, i, j), reg_set_block);
8188
          }
8189
        break;
8190
      }
8191
    }
8192
}
8193
 
8194
static rtx get_free_reg (HARD_REG_SET);
8195
 
8196
/* This function returns a register to use to load the address to load
8197
   the fpscr from.  Currently it always returns r1 or r7, but when we are
8198
   able to use pseudo registers after combine, or have a better mechanism
8199
   for choosing a register, it should be done here.  */
8200
/* REGS_LIVE is the liveness information for the point for which we
8201
   need this allocation.  In some bare-bones exit blocks, r1 is live at the
8202
   start.  We can even have all of r0..r3 being live:
8203
__complex__ long long f (double d) { if (d == 0) return 2; else return 3; }
8204
   INSN before which new insns are placed with will clobber the register
8205
   we return.  If a basic block consists only of setting the return value
8206
   register to a pseudo and using that register, the return value is not
8207
   live before or after this block, yet we we'll insert our insns right in
8208
   the middle.  */
8209
 
8210
static rtx
8211
get_free_reg (HARD_REG_SET regs_live)
8212
{
8213
  if (! TEST_HARD_REG_BIT (regs_live, 1))
8214
    return gen_rtx_REG (Pmode, 1);
8215
 
8216
  /* Hard reg 1 is live; since this is a SMALL_REGISTER_CLASSES target,
8217
     there shouldn't be anything but a jump before the function end.  */
8218
  gcc_assert (!TEST_HARD_REG_BIT (regs_live, 7));
8219
  return gen_rtx_REG (Pmode, 7);
8220
}
8221
 
8222
/* This function will set the fpscr from memory.
8223
   MODE is the mode we are setting it to.  */
8224
void
8225
fpscr_set_from_mem (int mode, HARD_REG_SET regs_live)
8226
{
8227
  enum attr_fp_mode fp_mode = mode;
8228
  enum attr_fp_mode norm_mode = ACTUAL_NORMAL_MODE (FP_MODE);
8229
  rtx addr_reg = get_free_reg (regs_live);
8230
 
8231
  emit_fpu_switch (addr_reg, fp_mode == norm_mode);
8232
}
8233
 
8234
/* Is the given character a logical line separator for the assembler?  */
8235
#ifndef IS_ASM_LOGICAL_LINE_SEPARATOR
8236
#define IS_ASM_LOGICAL_LINE_SEPARATOR(C) ((C) == ';')
8237
#endif
8238
 
8239
int
8240
sh_insn_length_adjustment (rtx insn)
8241
{
8242
  /* Instructions with unfilled delay slots take up an extra two bytes for
8243
     the nop in the delay slot.  */
8244
  if (((GET_CODE (insn) == INSN
8245
        && GET_CODE (PATTERN (insn)) != USE
8246
        && GET_CODE (PATTERN (insn)) != CLOBBER)
8247
       || GET_CODE (insn) == CALL_INSN
8248
       || (GET_CODE (insn) == JUMP_INSN
8249
           && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
8250
           && GET_CODE (PATTERN (insn)) != ADDR_VEC))
8251
      && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE
8252
      && get_attr_needs_delay_slot (insn) == NEEDS_DELAY_SLOT_YES)
8253
    return 2;
8254
 
8255
  /* SH2e has a bug that prevents the use of annulled branches, so if
8256
     the delay slot is not filled, we'll have to put a NOP in it.  */
8257
  if (sh_cpu == CPU_SH2E
8258
      && GET_CODE (insn) == JUMP_INSN
8259
      && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
8260
      && GET_CODE (PATTERN (insn)) != ADDR_VEC
8261
      && get_attr_type (insn) == TYPE_CBRANCH
8262
      && GET_CODE (PATTERN (NEXT_INSN (PREV_INSN (insn)))) != SEQUENCE)
8263
    return 2;
8264
 
8265
  /* sh-dsp parallel processing insn take four bytes instead of two.  */
8266
 
8267
  if (GET_CODE (insn) == INSN)
8268
    {
8269
      int sum = 0;
8270
      rtx body = PATTERN (insn);
8271
      const char *template;
8272
      char c;
8273
      int maybe_label = 1;
8274
 
8275
      if (GET_CODE (body) == ASM_INPUT)
8276
        template = XSTR (body, 0);
8277
      else if (asm_noperands (body) >= 0)
8278
        template
8279
          = decode_asm_operands (body, NULL, NULL, NULL, NULL);
8280
      else
8281
        return 0;
8282
      do
8283
        {
8284
          int ppi_adjust = 0;
8285
 
8286
          do
8287
            c = *template++;
8288
          while (c == ' ' || c == '\t');
8289
          /* all sh-dsp parallel-processing insns start with p.
8290
             The only non-ppi sh insn starting with p is pref.
8291
             The only ppi starting with pr is prnd.  */
8292
          if ((c == 'p' || c == 'P') && strncasecmp ("re", template, 2))
8293
            ppi_adjust = 2;
8294
          /* The repeat pseudo-insn expands two three insns, a total of
8295
             six bytes in size.  */
8296
          else if ((c == 'r' || c == 'R')
8297
                   && ! strncasecmp ("epeat", template, 5))
8298
            ppi_adjust = 4;
8299
          while (c && c != '\n' && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c))
8300
            {
8301
              /* If this is a label, it is obviously not a ppi insn.  */
8302
              if (c == ':' && maybe_label)
8303
                {
8304
                  ppi_adjust = 0;
8305
                  break;
8306
                }
8307
              else if (c == '\'' || c == '"')
8308
                maybe_label = 0;
8309
              c = *template++;
8310
            }
8311
          sum += ppi_adjust;
8312
          maybe_label = c != ':';
8313
        }
8314
      while (c);
8315
      return sum;
8316
    }
8317
  return 0;
8318
}
8319
 
8320
/* Return TRUE if X references a SYMBOL_REF or LABEL_REF whose symbol
8321
   isn't protected by a PIC unspec.  */
8322
int
8323
nonpic_symbol_mentioned_p (rtx x)
8324
{
8325
  register const char *fmt;
8326
  register int i;
8327
 
8328
  if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF
8329
      || GET_CODE (x) == PC)
8330
    return 1;
8331
 
8332
  /* We don't want to look into the possible MEM location of a
8333
     CONST_DOUBLE, since we're not going to use it, in general.  */
8334
  if (GET_CODE (x) == CONST_DOUBLE)
8335
    return 0;
8336
 
8337
  if (GET_CODE (x) == UNSPEC
8338
      && (XINT (x, 1) == UNSPEC_PIC
8339
          || XINT (x, 1) == UNSPEC_GOT
8340
          || XINT (x, 1) == UNSPEC_GOTOFF
8341
          || XINT (x, 1) == UNSPEC_GOTPLT
8342
          || XINT (x, 1) == UNSPEC_GOTTPOFF
8343
          || XINT (x, 1) == UNSPEC_DTPOFF
8344
          || XINT (x, 1) == UNSPEC_PLT))
8345
    return 0;
8346
 
8347
  fmt = GET_RTX_FORMAT (GET_CODE (x));
8348
  for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
8349
    {
8350
      if (fmt[i] == 'E')
8351
        {
8352
          register int j;
8353
 
8354
          for (j = XVECLEN (x, i) - 1; j >= 0; j--)
8355
            if (nonpic_symbol_mentioned_p (XVECEXP (x, i, j)))
8356
              return 1;
8357
        }
8358
      else if (fmt[i] == 'e' && nonpic_symbol_mentioned_p (XEXP (x, i)))
8359
        return 1;
8360
    }
8361
 
8362
  return 0;
8363
}
8364
 
8365
/* Convert a non-PIC address in `orig' to a PIC address using @GOT or
8366
   @GOTOFF in `reg'.  */
8367
rtx
8368
legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
8369
                        rtx reg)
8370
{
8371
  if (tls_symbolic_operand (orig, Pmode))
8372
    return orig;
8373
 
8374
  if (GET_CODE (orig) == LABEL_REF
8375
      || (GET_CODE (orig) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (orig)))
8376
    {
8377
      if (reg == 0)
8378
        reg = gen_reg_rtx (Pmode);
8379
 
8380
      emit_insn (gen_symGOTOFF2reg (reg, orig));
8381
      return reg;
8382
    }
8383
  else if (GET_CODE (orig) == SYMBOL_REF)
8384
    {
8385
      if (reg == 0)
8386
        reg = gen_reg_rtx (Pmode);
8387
 
8388
      emit_insn (gen_symGOT2reg (reg, orig));
8389
      return reg;
8390
    }
8391
  return orig;
8392
}
8393
 
8394
/* Mark the use of a constant in the literal table. If the constant
8395
   has multiple labels, make it unique.  */
8396
static rtx
8397
mark_constant_pool_use (rtx x)
8398
{
8399
  rtx insn, lab, pattern;
8400
 
8401
  if (x == NULL)
8402
    return x;
8403
 
8404
  switch (GET_CODE (x))
8405
    {
8406
    case LABEL_REF:
8407
      x = XEXP (x, 0);
8408
    case CODE_LABEL:
8409
      break;
8410
    default:
8411
      return x;
8412
    }
8413
 
8414
  /* Get the first label in the list of labels for the same constant
8415
     and delete another labels in the list.  */
8416
  lab = x;
8417
  for (insn = PREV_INSN (x); insn; insn = PREV_INSN (insn))
8418
    {
8419
      if (GET_CODE (insn) != CODE_LABEL
8420
          || LABEL_REFS (insn) != NEXT_INSN (insn))
8421
        break;
8422
      lab = insn;
8423
    }
8424
 
8425
  for (insn = LABEL_REFS (lab); insn; insn = LABEL_REFS (insn))
8426
    INSN_DELETED_P (insn) = 1;
8427
 
8428
  /* Mark constants in a window.  */
8429
  for (insn = NEXT_INSN (x); insn; insn = NEXT_INSN (insn))
8430
    {
8431
      if (GET_CODE (insn) != INSN)
8432
        continue;
8433
 
8434
      pattern = PATTERN (insn);
8435
      if (GET_CODE (pattern) != UNSPEC_VOLATILE)
8436
        continue;
8437
 
8438
      switch (XINT (pattern, 1))
8439
        {
8440
        case UNSPECV_CONST2:
8441
        case UNSPECV_CONST4:
8442
        case UNSPECV_CONST8:
8443
          XVECEXP (pattern, 0, 1) = const1_rtx;
8444
          break;
8445
        case UNSPECV_WINDOW_END:
8446
          if (XVECEXP (pattern, 0, 0) == x)
8447
            return lab;
8448
          break;
8449
        case UNSPECV_CONST_END:
8450
          return lab;
8451
        default:
8452
          break;
8453
        }
8454
    }
8455
 
8456
  return lab;
8457
}
8458
 
8459
/* Return true if it's possible to redirect BRANCH1 to the destination
8460
   of an unconditional jump BRANCH2.  We only want to do this if the
8461
   resulting branch will have a short displacement.  */
8462
int
8463
sh_can_redirect_branch (rtx branch1, rtx branch2)
8464
{
8465
  if (flag_expensive_optimizations && simplejump_p (branch2))
8466
    {
8467
      rtx dest = XEXP (SET_SRC (single_set (branch2)), 0);
8468
      rtx insn;
8469
      int distance;
8470
 
8471
      for (distance = 0, insn = NEXT_INSN (branch1);
8472
           insn && distance < 256;
8473
           insn = PREV_INSN (insn))
8474
        {
8475
          if (insn == dest)
8476
            return 1;
8477
          else
8478
            distance += get_attr_length (insn);
8479
        }
8480
      for (distance = 0, insn = NEXT_INSN (branch1);
8481
           insn && distance < 256;
8482
           insn = NEXT_INSN (insn))
8483
        {
8484
          if (insn == dest)
8485
            return 1;
8486
          else
8487
            distance += get_attr_length (insn);
8488
        }
8489
    }
8490
  return 0;
8491
}
8492
 
8493
/* Return nonzero if register old_reg can be renamed to register new_reg.  */
8494
int
8495
sh_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
8496
                         unsigned int new_reg)
8497
{
8498
  /* Interrupt functions can only use registers that have already been
8499
     saved by the prologue, even if they would normally be
8500
     call-clobbered.  */
8501
 
8502
  if (sh_cfun_interrupt_handler_p () && !regs_ever_live[new_reg])
8503
    return 0;
8504
 
8505
  return 1;
8506
}
8507
 
8508
/* Function to update the integer COST
8509
   based on the relationship between INSN that is dependent on
8510
   DEP_INSN through the dependence LINK.  The default is to make no
8511
   adjustment to COST.  This can be used for example to specify to
8512
   the scheduler that an output- or anti-dependence does not incur
8513
   the same cost as a data-dependence.  The return value should be
8514
   the new value for COST.  */
8515
static int
8516
sh_adjust_cost (rtx insn, rtx link ATTRIBUTE_UNUSED, rtx dep_insn, int cost)
8517
{
8518
  rtx reg, use_pat;
8519
 
8520
  if (TARGET_SHMEDIA)
8521
    {
8522
      /* On SHmedia, if the dependence is an anti-dependence or
8523
         output-dependence, there is no cost.  */
8524
      if (REG_NOTE_KIND (link) != 0)
8525
        {
8526
          /* However, dependencies between target register loads and
8527
             uses of the register in a subsequent block that are separated
8528
             by a conditional branch are not modelled - we have to do with
8529
             the anti-dependency between the target register load and the
8530
             conditional branch that ends the current block.  */
8531
          if (REG_NOTE_KIND (link) == REG_DEP_ANTI
8532
              && GET_CODE (PATTERN (dep_insn)) == SET
8533
              && (get_attr_type (dep_insn) == TYPE_PT_MEDIA
8534
                  || get_attr_type (dep_insn) == TYPE_PTABS_MEDIA)
8535
              && get_attr_type (insn) == TYPE_CBRANCH_MEDIA)
8536
            {
8537
              int orig_cost = cost;
8538
              rtx note = find_reg_note (insn, REG_BR_PROB, 0);
8539
              rtx target = ((! note
8540
                             || INTVAL (XEXP (note, 0)) * 2 < REG_BR_PROB_BASE)
8541
                            ? insn : JUMP_LABEL (insn));
8542
              /* On the likely path, the branch costs 1, on the unlikely path,
8543
                 it costs 3.  */
8544
              cost--;
8545
              do
8546
                target = next_active_insn (target);
8547
              while (target && ! flow_dependent_p (target, dep_insn)
8548
                     && --cost > 0);
8549
              /* If two branches are executed in immediate succession, with the
8550
                 first branch properly predicted, this causes a stall at the
8551
                 second branch, hence we won't need the target for the
8552
                 second branch for two cycles after the launch of the first
8553
                 branch.  */
8554
              if (cost > orig_cost - 2)
8555
                cost = orig_cost - 2;
8556
            }
8557
          else
8558
            cost = 0;
8559
        }
8560
 
8561
      else if (get_attr_is_mac_media (insn)
8562
               && get_attr_is_mac_media (dep_insn))
8563
        cost = 1;
8564
 
8565
      else if (! reload_completed
8566
               && GET_CODE (PATTERN (insn)) == SET
8567
               && GET_CODE (SET_SRC (PATTERN (insn))) == FLOAT
8568
               && GET_CODE (PATTERN (dep_insn)) == SET
8569
               && fp_arith_reg_operand (SET_SRC (PATTERN (dep_insn)), VOIDmode)
8570
               && cost < 4)
8571
        cost = 4;
8572
      /* Schedule the ptabs for a casesi_jump_media in preference to stuff
8573
         that is needed at the target.  */
8574
      else if (get_attr_type (insn) == TYPE_JUMP_MEDIA
8575
               && ! flow_dependent_p (insn, dep_insn))
8576
        cost--;
8577
    }
8578
  else if (REG_NOTE_KIND (link) == 0)
8579
    {
8580
      enum attr_type dep_type, type;
8581
 
8582
      if (recog_memoized (insn) < 0
8583
          || recog_memoized (dep_insn) < 0)
8584
        return cost;
8585
 
8586
      dep_type = get_attr_type (dep_insn);
8587
      if (dep_type == TYPE_FLOAD || dep_type == TYPE_PCFLOAD)
8588
        cost--;
8589
      if ((dep_type == TYPE_LOAD_SI || dep_type == TYPE_PCLOAD_SI)
8590
          && (type = get_attr_type (insn)) != TYPE_CALL
8591
          && type != TYPE_SFUNC)
8592
        cost--;
8593
 
8594
      /* The only input for a call that is timing-critical is the
8595
         function's address.  */
8596
      if (GET_CODE(insn) == CALL_INSN)
8597
        {
8598
          rtx call = PATTERN (insn);
8599
 
8600
          if (GET_CODE (call) == PARALLEL)
8601
            call = XVECEXP (call, 0 ,0);
8602
          if (GET_CODE (call) == SET)
8603
            call = SET_SRC (call);
8604
          if (GET_CODE (call) == CALL && GET_CODE (XEXP (call, 0)) == MEM
8605
                  /* sibcalli_thunk uses a symbol_ref in an unspec.  */
8606
              && (GET_CODE (XEXP (XEXP (call, 0), 0)) == UNSPEC
8607
                  || ! reg_set_p (XEXP (XEXP (call, 0), 0), dep_insn)))
8608
            cost = 0;
8609
        }
8610
      /* Likewise, the most timing critical input for an sfuncs call
8611
         is the function address.  However, sfuncs typically start
8612
         using their arguments pretty quickly.
8613
         Assume a four cycle delay before they are needed.  */
8614
      /* All sfunc calls are parallels with at least four components.
8615
         Exploit this to avoid unnecessary calls to sfunc_uses_reg.  */
8616
      else if (GET_CODE (PATTERN (insn)) == PARALLEL
8617
               && XVECLEN (PATTERN (insn), 0) >= 4
8618
               && (reg = sfunc_uses_reg (insn)))
8619
        {
8620
          if (! reg_set_p (reg, dep_insn))
8621
            cost -= 4;
8622
        }
8623
      /* When the preceding instruction loads the shift amount of
8624
         the following SHAD/SHLD, the latency of the load is increased
8625
         by 1 cycle.  */
8626
      else if (TARGET_SH4
8627
               && get_attr_type (insn) == TYPE_DYN_SHIFT
8628
               && get_attr_any_int_load (dep_insn) == ANY_INT_LOAD_YES
8629
               && reg_overlap_mentioned_p (SET_DEST (single_set (dep_insn)),
8630
                                           XEXP (SET_SRC (single_set (insn)),
8631
                                                 1)))
8632
        cost++;
8633
      /* When an LS group instruction with a latency of less than
8634
         3 cycles is followed by a double-precision floating-point
8635
         instruction, FIPR, or FTRV, the latency of the first
8636
         instruction is increased to 3 cycles.  */
8637
      else if (cost < 3
8638
               && get_attr_insn_class (dep_insn) == INSN_CLASS_LS_GROUP
8639
               && get_attr_dfp_comp (insn) == DFP_COMP_YES)
8640
        cost = 3;
8641
      /* The lsw register of a double-precision computation is ready one
8642
         cycle earlier.  */
8643
      else if (reload_completed
8644
               && get_attr_dfp_comp (dep_insn) == DFP_COMP_YES
8645
               && (use_pat = single_set (insn))
8646
               && ! regno_use_in (REGNO (SET_DEST (single_set (dep_insn))),
8647
                                  SET_SRC (use_pat)))
8648
        cost -= 1;
8649
 
8650
      if (get_attr_any_fp_comp (dep_insn) == ANY_FP_COMP_YES
8651
          && get_attr_late_fp_use (insn) == LATE_FP_USE_YES)
8652
        cost -= 1;
8653
    }
8654
  /* An anti-dependence penalty of two applies if the first insn is a double
8655
     precision fadd / fsub / fmul.  */
8656
  else if (REG_NOTE_KIND (link) == REG_DEP_ANTI
8657
           && recog_memoized (dep_insn) >= 0
8658
           && get_attr_type (dep_insn) == TYPE_DFP_ARITH
8659
           /* A lot of alleged anti-flow dependences are fake,
8660
              so check this one is real.  */
8661
           && flow_dependent_p (dep_insn, insn))
8662
    cost = 2;
8663
 
8664
 
8665
  return cost;
8666
}
8667
 
8668
/* Check if INSN is flow-dependent on DEP_INSN.  Can also be used to check
8669
   if DEP_INSN is anti-flow dependent on INSN.  */
8670
static int
8671
flow_dependent_p (rtx insn, rtx dep_insn)
8672
{
8673
  rtx tmp = PATTERN (insn);
8674
 
8675
  note_stores (PATTERN (dep_insn), flow_dependent_p_1, &tmp);
8676
  return tmp == NULL_RTX;
8677
}
8678
 
8679
/* A helper function for flow_dependent_p called through note_stores.  */
8680
static void
8681
flow_dependent_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
8682
{
8683
  rtx * pinsn = (rtx *) data;
8684
 
8685
  if (*pinsn && reg_referenced_p (x, *pinsn))
8686
    *pinsn = NULL_RTX;
8687
}
8688
 
8689
/* For use by sh_allocate_initial_value.  Note that sh.md contains some
8690
   'special function' patterns (type sfunc) that clobber pr, but that
8691
   do not look like function calls to leaf_function_p.  Hence we must
8692
   do this extra check.  */
8693
static int
8694
sh_pr_n_sets (void)
8695
{
8696
  return REG_N_SETS (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
8697
}
8698
 
8699
/* Return where to allocate pseudo for a given hard register initial
8700
   value.  */
8701
static rtx
8702
sh_allocate_initial_value (rtx hard_reg)
8703
{
8704
  rtx x;
8705
 
8706
  if (REGNO (hard_reg) == (TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG))
8707
    {
8708
      if (current_function_is_leaf
8709
          && ! sh_pr_n_sets ()
8710
          && ! (TARGET_SHCOMPACT
8711
                && ((current_function_args_info.call_cookie
8712
                     & ~ CALL_COOKIE_RET_TRAMP (1))
8713
                    || current_function_has_nonlocal_label)))
8714
        x = hard_reg;
8715
      else
8716
        x = gen_frame_mem (Pmode, return_address_pointer_rtx);
8717
    }
8718
  else
8719
    x = NULL_RTX;
8720
 
8721
  return x;
8722
}
8723
 
8724
/* This function returns "2" to indicate dual issue for the SH4
8725
   processor.  To be used by the DFA pipeline description.  */
8726
static int
8727
sh_issue_rate (void)
8728
{
8729
  if (TARGET_SUPERSCALAR)
8730
    return 2;
8731
  else
8732
    return 1;
8733
}
8734
 
8735
/* Functions for ready queue reordering for sched1.  */
8736
 
8737
/* Get weight for mode for a set x.  */
8738
static short
8739
find_set_regmode_weight (rtx x, enum machine_mode mode)
8740
{
8741
  if (GET_CODE (x) == CLOBBER && register_operand (SET_DEST (x), mode))
8742
    return 1;
8743
  if (GET_CODE (x) == SET && register_operand (SET_DEST (x), mode))
8744
    {
8745
      if (GET_CODE (SET_DEST (x)) == REG)
8746
        {
8747
          if (!reg_mentioned_p (SET_DEST (x), SET_SRC (x)))
8748
            return 1;
8749
          else
8750
            return 0;
8751
        }
8752
      return 1;
8753
    }
8754
  return 0;
8755
}
8756
 
8757
/* Get regmode weight for insn.  */
8758
static short
8759
find_insn_regmode_weight (rtx insn, enum machine_mode mode)
8760
{
8761
  short reg_weight = 0;
8762
  rtx x;
8763
 
8764
  /* Increment weight for each register born here.  */
8765
  x = PATTERN (insn);
8766
  reg_weight += find_set_regmode_weight (x, mode);
8767
  if (GET_CODE (x) == PARALLEL)
8768
    {
8769
      int j;
8770
      for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
8771
        {
8772
          x = XVECEXP (PATTERN (insn), 0, j);
8773
          reg_weight += find_set_regmode_weight (x, mode);
8774
        }
8775
    }
8776
  /* Decrement weight for each register that dies here.  */
8777
  for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
8778
    {
8779
      if (REG_NOTE_KIND (x) == REG_DEAD || REG_NOTE_KIND (x) == REG_UNUSED)
8780
        {
8781
          rtx note = XEXP (x, 0);
8782
          if (GET_CODE (note) == REG && GET_MODE (note) == mode)
8783
            reg_weight--;
8784
        }
8785
    }
8786
  return reg_weight;
8787
}
8788
 
8789
/* Calculate regmode weights for all insns of a basic block.  */
8790
static void
8791
find_regmode_weight (basic_block b, enum machine_mode mode)
8792
{
8793
  rtx insn, next_tail, head, tail;
8794
 
8795
  get_ebb_head_tail (b, b, &head, &tail);
8796
  next_tail = NEXT_INSN (tail);
8797
 
8798
  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
8799
    {
8800
      /* Handle register life information.  */
8801
      if (!INSN_P (insn))
8802
        continue;
8803
 
8804
      if (mode == SFmode)
8805
        INSN_REGMODE_WEIGHT (insn, mode) =
8806
          find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DFmode);
8807
      else if (mode == SImode)
8808
        INSN_REGMODE_WEIGHT (insn, mode) =
8809
          find_insn_regmode_weight (insn, mode) + 2 * find_insn_regmode_weight (insn, DImode);
8810
    }
8811
}
8812
 
8813
/* Comparison function for ready queue sorting.  */
8814
static int
8815
rank_for_reorder (const void *x, const void *y)
8816
{
8817
  rtx tmp = *(const rtx *) y;
8818
  rtx tmp2 = *(const rtx *) x;
8819
 
8820
  /* The insn in a schedule group should be issued the first.  */
8821
  if (SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2))
8822
    return SCHED_GROUP_P (tmp2) ? 1 : -1;
8823
 
8824
  /* If insns are equally good, sort by INSN_LUID (original insn order), This
8825
     minimizes instruction movement, thus minimizing sched's effect on
8826
     register pressure.  */
8827
  return INSN_LUID (tmp) - INSN_LUID (tmp2);
8828
}
8829
 
8830
/* Resort the array A in which only element at index N may be out of order.  */
8831
static void
8832
swap_reorder (rtx *a, int n)
8833
{
8834
  rtx insn = a[n - 1];
8835
  int i = n - 2;
8836
 
8837
  while (i >= 0 && rank_for_reorder (a + i, &insn) >= 0)
8838
    {
8839
      a[i + 1] = a[i];
8840
      i -= 1;
8841
    }
8842
  a[i + 1] = insn;
8843
}
8844
 
8845
#define SCHED_REORDER(READY, N_READY)                                   \
8846
  do                                                                    \
8847
    {                                                                   \
8848
      if ((N_READY) == 2)                                               \
8849
        swap_reorder (READY, N_READY);                                  \
8850
      else if ((N_READY) > 2)                                           \
8851
        qsort (READY, N_READY, sizeof (rtx), rank_for_reorder);         \
8852
    }                                                                   \
8853
  while (0)
8854
 
8855
/* Sort the ready list READY by ascending priority, using the SCHED_REORDER
8856
   macro.  */
8857
static void
8858
ready_reorder (rtx *ready, int nready)
8859
{
8860
  SCHED_REORDER (ready, nready);
8861
}
8862
 
8863
/* Calculate regmode weights for all insns of all basic block.  */
8864
static void
8865
sh_md_init_global (FILE *dump ATTRIBUTE_UNUSED,
8866
                   int verbose ATTRIBUTE_UNUSED,
8867
                   int old_max_uid)
8868
{
8869
  basic_block b;
8870
 
8871
  regmode_weight[0] = (short *) xcalloc (old_max_uid, sizeof (short));
8872
  regmode_weight[1] = (short *) xcalloc (old_max_uid, sizeof (short));
8873
 
8874
  FOR_EACH_BB_REVERSE (b)
8875
  {
8876
    find_regmode_weight (b, SImode);
8877
    find_regmode_weight (b, SFmode);
8878
  }
8879
 
8880
  CURR_REGMODE_PRESSURE (SImode) = 0;
8881
  CURR_REGMODE_PRESSURE (SFmode) = 0;
8882
 
8883
}
8884
 
8885
/* Cleanup.  */
8886
static void
8887
sh_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
8888
                     int verbose ATTRIBUTE_UNUSED)
8889
{
8890
  if (regmode_weight[0])
8891
    {
8892
      free (regmode_weight[0]);
8893
      regmode_weight[0] = NULL;
8894
    }
8895
  if (regmode_weight[1])
8896
    {
8897
      free (regmode_weight[1]);
8898
      regmode_weight[1] = NULL;
8899
    }
8900
}
8901
 
8902
/* Cache the can_issue_more so that we can return it from reorder2. Also,
8903
   keep count of register pressures on SImode and SFmode. */
8904
static int
8905
sh_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
8906
                   int sched_verbose ATTRIBUTE_UNUSED,
8907
                   rtx insn,
8908
                   int can_issue_more)
8909
{
8910
  if (GET_CODE (PATTERN (insn)) != USE
8911
      && GET_CODE (PATTERN (insn)) != CLOBBER)
8912
    cached_can_issue_more = can_issue_more - 1;
8913
  else
8914
    cached_can_issue_more = can_issue_more;
8915
 
8916
  if (reload_completed)
8917
    return cached_can_issue_more;
8918
 
8919
  CURR_REGMODE_PRESSURE (SImode) += INSN_REGMODE_WEIGHT (insn, SImode);
8920
  CURR_REGMODE_PRESSURE (SFmode) += INSN_REGMODE_WEIGHT (insn, SFmode);
8921
 
8922
  return cached_can_issue_more;
8923
}
8924
 
8925
static void
8926
sh_md_init (FILE *dump ATTRIBUTE_UNUSED,
8927
            int verbose ATTRIBUTE_UNUSED,
8928
            int veclen ATTRIBUTE_UNUSED)
8929
{
8930
  CURR_REGMODE_PRESSURE (SImode) = 0;
8931
  CURR_REGMODE_PRESSURE (SFmode) = 0;
8932
}
8933
 
8934
/* Some magic numbers.  */
8935
/* Pressure on register r0 can lead to spill failures. so avoid sched1 for
8936
   functions that already have high pressure on r0. */
8937
#define R0_MAX_LIFE_REGIONS 2
8938
#define R0_MAX_LIVE_LENGTH 12
8939
/* Register Pressure thresholds for SImode and SFmode registers.  */
8940
#define SIMODE_MAX_WEIGHT 5
8941
#define SFMODE_MAX_WEIGHT 10
8942
 
8943
/* Return true if the pressure is high for MODE.  */
8944
static short
8945
high_pressure (enum machine_mode mode)
8946
{
8947
  /* Pressure on register r0 can lead to spill failures. so avoid sched1 for
8948
     functions that already have high pressure on r0. */
8949
  if ((REG_N_SETS (0) - REG_N_DEATHS (0)) >= R0_MAX_LIFE_REGIONS
8950
      && REG_LIVE_LENGTH (0) >= R0_MAX_LIVE_LENGTH)
8951
    return 1;
8952
 
8953
  if (mode == SFmode)
8954
    return (CURR_REGMODE_PRESSURE (SFmode) > SFMODE_MAX_WEIGHT);
8955
  else
8956
    return (CURR_REGMODE_PRESSURE (SImode) > SIMODE_MAX_WEIGHT);
8957
}
8958
 
8959
/* Reorder ready queue if register pressure is high.  */
8960
static int
8961
sh_reorder (FILE *dump ATTRIBUTE_UNUSED,
8962
            int sched_verbose ATTRIBUTE_UNUSED,
8963
            rtx *ready,
8964
            int *n_readyp,
8965
            int clock_var ATTRIBUTE_UNUSED)
8966
{
8967
  if (reload_completed)
8968
    return sh_issue_rate ();
8969
 
8970
  if (high_pressure (SFmode) || high_pressure (SImode))
8971
    {
8972
      ready_reorder (ready, *n_readyp);
8973
    }
8974
 
8975
  return sh_issue_rate ();
8976
}
8977
 
8978
/* Skip cycles if the current register pressure is high.  */
8979
static int
8980
sh_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
8981
             int sched_verbose ATTRIBUTE_UNUSED,
8982
             rtx *ready ATTRIBUTE_UNUSED,
8983
             int *n_readyp ATTRIBUTE_UNUSED,
8984
             int clock_var ATTRIBUTE_UNUSED)
8985
{
8986
  if (reload_completed)
8987
    return cached_can_issue_more;
8988
 
8989
  if (high_pressure(SFmode) || high_pressure (SImode))
8990
    skip_cycles = 1;
8991
 
8992
  return cached_can_issue_more;
8993
}
8994
 
8995
/* Skip cycles without sorting the ready queue. This will move insn from
8996
   Q->R. If this is the last cycle we are skipping; allow sorting of ready
8997
   queue by sh_reorder.  */
8998
 
8999
/* Generally, skipping these many cycles are sufficient for all insns to move
9000
   from Q -> R.  */
9001
#define MAX_SKIPS 8
9002
 
9003
static int
9004
sh_dfa_new_cycle (FILE *sched_dump ATTRIBUTE_UNUSED,
9005
                  int sched_verbose ATTRIBUTE_UNUSED,
9006
                  rtx insn ATTRIBUTE_UNUSED,
9007
                  int last_clock_var,
9008
                  int clock_var,
9009
                  int *sort_p)
9010
{
9011
  if (reload_completed)
9012
    return 0;
9013
 
9014
  if (skip_cycles)
9015
    {
9016
      if ((clock_var - last_clock_var) < MAX_SKIPS)
9017
        {
9018
          *sort_p = 0;
9019
          return 1;
9020
        }
9021
      /* If this is the last cycle we are skipping, allow reordering of R.  */
9022
      if ((clock_var - last_clock_var) == MAX_SKIPS)
9023
        {
9024
          *sort_p = 1;
9025
          return 1;
9026
        }
9027
    }
9028
 
9029
  skip_cycles = 0;
9030
 
9031
  return 0;
9032
}
9033
 
9034
/* SHmedia requires registers for branches, so we can't generate new
9035
   branches past reload.  */
9036
static bool
9037
sh_cannot_modify_jumps_p (void)
9038
{
9039
  return (TARGET_SHMEDIA && (reload_in_progress || reload_completed));
9040
}
9041
 
9042
static int
9043
sh_target_reg_class (void)
9044
{
9045
  return TARGET_SHMEDIA ? TARGET_REGS : NO_REGS;
9046
}
9047
 
9048
static bool
9049
sh_optimize_target_register_callee_saved (bool after_prologue_epilogue_gen)
9050
{
9051
  HARD_REG_SET dummy;
9052
  rtx insn;
9053
 
9054
  if (! shmedia_space_reserved_for_target_registers)
9055
    return 0;
9056
  if (after_prologue_epilogue_gen && ! TARGET_SAVE_ALL_TARGET_REGS)
9057
    return 0;
9058
  if (calc_live_regs (&dummy) >= 6 * 8)
9059
    return 1;
9060
  /* This is a borderline case.  See if we got a nested loop, or a loop
9061
     with a call, or with more than 4 labels inside.  */
9062
  for (insn = get_insns(); insn; insn = NEXT_INSN (insn))
9063
    {
9064
      if (GET_CODE (insn) == NOTE
9065
          && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
9066
        {
9067
          int labels = 0;
9068
 
9069
          do
9070
            {
9071
              insn = NEXT_INSN (insn);
9072
              if ((GET_CODE (insn) == NOTE
9073
                   && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
9074
                  || GET_CODE (insn) == CALL_INSN
9075
                  || (GET_CODE (insn) == CODE_LABEL && ++labels > 4))
9076
                return 1;
9077
            }
9078
          while (GET_CODE (insn) != NOTE
9079
                 || NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_END);
9080
        }
9081
    }
9082
  return 0;
9083
}
9084
 
9085
static bool
9086
sh_ms_bitfield_layout_p (tree record_type ATTRIBUTE_UNUSED)
9087
{
9088
  return (TARGET_SH5 || TARGET_HITACHI || sh_attr_renesas_p (record_type));
9089
}
9090
 
9091
/*
9092
   On the SH1..SH4, the trampoline looks like
9093
   2 0002 D202                  mov.l   l2,r2
9094
   1 0000 D301                  mov.l   l1,r3
9095
   3 0004 422B                  jmp     @r2
9096
   4 0006 0009                  nop
9097
   5 0008 00000000      l1:     .long   area
9098
   6 000c 00000000      l2:     .long   function
9099
 
9100
   SH5 (compact) uses r1 instead of r3 for the static chain.  */
9101
 
9102
 
9103
/* Emit RTL insns to initialize the variable parts of a trampoline.
9104
   FNADDR is an RTX for the address of the function's pure code.
9105
   CXT is an RTX for the static chain value for the function.  */
9106
 
9107
void
9108
sh_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
9109
{
9110
  rtx tramp_mem = gen_frame_mem (BLKmode, tramp);
9111
 
9112
  if (TARGET_SHMEDIA64)
9113
    {
9114
      rtx tramp_templ;
9115
      int fixed_len;
9116
 
9117
      rtx movi1 = GEN_INT (0xcc000010);
9118
      rtx shori1 = GEN_INT (0xc8000010);
9119
      rtx src, dst;
9120
 
9121
      /* The following trampoline works within a +- 128 KB range for cxt:
9122
         ptb/u cxt,tr1; movi fnaddr >> 48,r0; shori fnaddr >> 32,r0;
9123
         shori fnaddr >> 16,r0; shori fnaddr,r0; ptabs/l r0,tr0
9124
         gettr tr1,r1; blink tr0,r63  */
9125
      /* Address rounding makes it hard to compute the exact bounds of the
9126
         offset for this trampoline, but we have a rather generous offset
9127
         range, so frame_offset should do fine as an upper bound.  */
9128
      if (cxt == virtual_stack_vars_rtx && frame_offset < 0x20000)
9129
        {
9130
          /* ??? could optimize this trampoline initialization
9131
             by writing DImode words with two insns each.  */
9132
          rtx mask = force_reg (DImode, GEN_INT (0x3fffc00));
9133
          rtx insn = gen_rtx_MINUS (DImode, cxt, tramp);
9134
          insn = gen_rtx_ASHIFT (DImode, insn, GEN_INT (10-2));
9135
          insn = gen_rtx_AND (DImode, insn, mask);
9136
          /* Or in ptb/u .,tr1 pattern */
9137
          insn = gen_rtx_IOR (DImode, insn, gen_int_mode (0xec000010, SImode));
9138
          insn = force_operand (insn, NULL_RTX);
9139
          insn = gen_lowpart (SImode, insn);
9140
          emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX), insn);
9141
          insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (38));
9142
          insn = gen_rtx_AND (DImode, insn, mask);
9143
          insn = force_operand (gen_rtx_IOR (DImode, movi1, insn), NULL_RTX);
9144
          insn = gen_lowpart (SImode, insn);
9145
          emit_move_insn (adjust_address (tramp_mem, SImode, 4), insn);
9146
          insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (22));
9147
          insn = gen_rtx_AND (DImode, insn, mask);
9148
          insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9149
          insn = gen_lowpart (SImode, insn);
9150
          emit_move_insn (adjust_address (tramp_mem, SImode, 8), insn);
9151
          insn = gen_rtx_LSHIFTRT (DImode, fnaddr, GEN_INT (6));
9152
          insn = gen_rtx_AND (DImode, insn, mask);
9153
          insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9154
          insn = gen_lowpart (SImode, insn);
9155
          emit_move_insn (adjust_address (tramp_mem, SImode, 12), insn);
9156
          insn = gen_rtx_ASHIFT (DImode, fnaddr, GEN_INT (10));
9157
          insn = gen_rtx_AND (DImode, insn, mask);
9158
          insn = force_operand (gen_rtx_IOR (DImode, shori1, insn), NULL_RTX);
9159
          insn = gen_lowpart (SImode, insn);
9160
          emit_move_insn (adjust_address (tramp_mem, SImode, 16), insn);
9161
          emit_move_insn (adjust_address (tramp_mem, SImode, 20),
9162
                          GEN_INT (0x6bf10600));
9163
          emit_move_insn (adjust_address (tramp_mem, SImode, 24),
9164
                          GEN_INT (0x4415fc10));
9165
          emit_move_insn (adjust_address (tramp_mem, SImode, 28),
9166
                          GEN_INT (0x4401fff0));
9167
          emit_insn (gen_ic_invalidate_line (tramp));
9168
          return;
9169
        }
9170
      tramp_templ = gen_rtx_SYMBOL_REF (Pmode,"__GCC_nested_trampoline");
9171
      fixed_len = TRAMPOLINE_SIZE - 2 * GET_MODE_SIZE (Pmode);
9172
 
9173
      tramp_templ = gen_datalabel_ref (tramp_templ);
9174
      dst = tramp_mem;
9175
      src = gen_const_mem (BLKmode, tramp_templ);
9176
      set_mem_align (dst, 256);
9177
      set_mem_align (src, 64);
9178
      emit_block_move (dst, src, GEN_INT (fixed_len), BLOCK_OP_NORMAL);
9179
 
9180
      emit_move_insn (adjust_address (tramp_mem, Pmode, fixed_len), fnaddr);
9181
      emit_move_insn (adjust_address (tramp_mem, Pmode,
9182
                                      fixed_len + GET_MODE_SIZE (Pmode)),
9183
                      cxt);
9184
      emit_insn (gen_ic_invalidate_line (tramp));
9185
      return;
9186
    }
9187
  else if (TARGET_SHMEDIA)
9188
    {
9189
      /* movi fnaddr >> 16,r1; shori fnaddr,r1; ptabs/l r1,tr0
9190
         movi cxt >> 16,r1; shori cxt,r1; blink tr0,r63  */
9191
      rtx quad0 = gen_reg_rtx (DImode), cxtload = gen_reg_rtx (DImode);
9192
      rtx quad1 = gen_reg_rtx (DImode), quad2 = gen_reg_rtx (DImode);
9193
      /* movi 0,r1: 0xcc000010 shori 0,r1: c8000010  concatenated,
9194
         rotated 10 right, and higher 16 bit of every 32 selected.  */
9195
      rtx movishori
9196
        = force_reg (V2HImode, (simplify_gen_subreg
9197
                                (V2HImode, GEN_INT (0x4330432), SImode, 0)));
9198
      rtx ptabs = force_reg (DImode, GEN_INT (0x6bf10600));
9199
      rtx blink = force_reg (DImode, GEN_INT (0x4401fff0));
9200
 
9201
      tramp = force_reg (Pmode, tramp);
9202
      fnaddr = force_reg (SImode, fnaddr);
9203
      cxt = force_reg (SImode, cxt);
9204
      emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, quad0, 0),
9205
                                 gen_rtx_SUBREG (V2HImode, fnaddr, 0),
9206
                                 movishori));
9207
      emit_insn (gen_rotrdi3_mextr (quad0, quad0,
9208
                                    GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
9209
      emit_insn (gen_ashldi3_media (quad0, quad0, const2_rtx));
9210
      emit_move_insn (change_address (tramp_mem, DImode, NULL_RTX), quad0);
9211
      emit_insn (gen_mshflo_w_x (gen_rtx_SUBREG (V4HImode, cxtload, 0),
9212
                                 gen_rtx_SUBREG (V2HImode, cxt, 0),
9213
                                 movishori));
9214
      emit_insn (gen_rotrdi3_mextr (cxtload, cxtload,
9215
                                    GEN_INT (TARGET_LITTLE_ENDIAN ? 24 : 56)));
9216
      emit_insn (gen_ashldi3_media (cxtload, cxtload, const2_rtx));
9217
      if (TARGET_LITTLE_ENDIAN)
9218
        {
9219
          emit_insn (gen_mshflo_l_di (quad1, ptabs, cxtload));
9220
          emit_insn (gen_mextr4 (quad2, cxtload, blink));
9221
        }
9222
      else
9223
        {
9224
          emit_insn (gen_mextr4 (quad1, cxtload, ptabs));
9225
          emit_insn (gen_mshflo_l_di (quad2, blink, cxtload));
9226
        }
9227
      emit_move_insn (adjust_address (tramp_mem, DImode, 8), quad1);
9228
      emit_move_insn (adjust_address (tramp_mem, DImode, 16), quad2);
9229
      emit_insn (gen_ic_invalidate_line (tramp));
9230
      return;
9231
    }
9232
  else if (TARGET_SHCOMPACT)
9233
    {
9234
      emit_insn (gen_initialize_trampoline (tramp, cxt, fnaddr));
9235
      return;
9236
    }
9237
  emit_move_insn (change_address (tramp_mem, SImode, NULL_RTX),
9238
                  gen_int_mode (TARGET_LITTLE_ENDIAN ? 0xd301d202 : 0xd202d301,
9239
                                SImode));
9240
  emit_move_insn (adjust_address (tramp_mem, SImode, 4),
9241
                  gen_int_mode (TARGET_LITTLE_ENDIAN ? 0x0009422b : 0x422b0009,
9242
                                SImode));
9243
  emit_move_insn (adjust_address (tramp_mem, SImode, 8), cxt);
9244
  emit_move_insn (adjust_address (tramp_mem, SImode, 12), fnaddr);
9245
  if (TARGET_HARVARD)
9246
    {
9247
      if (TARGET_USERMODE)
9248
        emit_library_call (function_symbol (NULL, "__ic_invalidate",
9249
                                            FUNCTION_ORDINARY),
9250
                           0, VOIDmode, 1, tramp, SImode);
9251
      else
9252
        emit_insn (gen_ic_invalidate_line (tramp));
9253
    }
9254
}
9255
 
9256
/* FIXME: This is overly conservative.  A SHcompact function that
9257
   receives arguments ``by reference'' will have them stored in its
9258
   own stack frame, so it must not pass pointers or references to
9259
   these arguments to other functions by means of sibling calls.  */
9260
/* If PIC, we cannot make sibling calls to global functions
9261
   because the PLT requires r12 to be live.  */
9262
static bool
9263
sh_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
9264
{
9265
  return (1
9266
          && (! TARGET_SHCOMPACT
9267
              || current_function_args_info.stack_regs == 0)
9268
          && ! sh_cfun_interrupt_handler_p ()
9269
          && (! flag_pic
9270
              || (decl && ! TREE_PUBLIC (decl))
9271
              || (decl && DECL_VISIBILITY (decl) != VISIBILITY_DEFAULT)));
9272
}
9273
 
9274
/* Machine specific built-in functions.  */
9275
 
9276
struct builtin_description
9277
{
9278
  const enum insn_code icode;
9279
  const char *const name;
9280
  int signature;
9281
};
9282
 
9283
/* describe number and signedness of arguments; arg[0] == result
9284
   (1: unsigned, 2: signed, 4: don't care, 8: pointer 0: no argument */
9285
/* 9: 64 bit pointer, 10: 32 bit pointer */
9286
static const char signature_args[][4] =
9287
{
9288
#define SH_BLTIN_V2SI2 0
9289
  { 4, 4 },
9290
#define SH_BLTIN_V4HI2 1
9291
  { 4, 4 },
9292
#define SH_BLTIN_V2SI3 2
9293
  { 4, 4, 4 },
9294
#define SH_BLTIN_V4HI3 3
9295
  { 4, 4, 4 },
9296
#define SH_BLTIN_V8QI3 4
9297
  { 4, 4, 4 },
9298
#define SH_BLTIN_MAC_HISI 5
9299
  { 1, 4, 4, 1 },
9300
#define SH_BLTIN_SH_HI 6
9301
  { 4, 4, 1 },
9302
#define SH_BLTIN_SH_SI 7
9303
  { 4, 4, 1 },
9304
#define SH_BLTIN_V4HI2V2SI 8
9305
  { 4, 4, 4 },
9306
#define SH_BLTIN_V4HI2V8QI 9
9307
  { 4, 4, 4 },
9308
#define SH_BLTIN_SISF 10
9309
  { 4, 2 },
9310
#define SH_BLTIN_LDUA_L 11
9311
  { 2, 10 },
9312
#define SH_BLTIN_LDUA_Q 12
9313
  { 1, 10 },
9314
#define SH_BLTIN_STUA_L 13
9315
  { 0, 10, 2 },
9316
#define SH_BLTIN_STUA_Q 14
9317
  { 0, 10, 1 },
9318
#define SH_BLTIN_LDUA_L64 15
9319
  { 2, 9 },
9320
#define SH_BLTIN_LDUA_Q64 16
9321
  { 1, 9 },
9322
#define SH_BLTIN_STUA_L64 17
9323
  { 0, 9, 2 },
9324
#define SH_BLTIN_STUA_Q64 18
9325
  { 0, 9, 1 },
9326
#define SH_BLTIN_NUM_SHARED_SIGNATURES 19
9327
#define SH_BLTIN_2 19
9328
#define SH_BLTIN_SU 19
9329
  { 1, 2 },
9330
#define SH_BLTIN_3 20
9331
#define SH_BLTIN_SUS 20
9332
  { 2, 2, 1 },
9333
#define SH_BLTIN_PSSV 21
9334
  { 0, 8, 2, 2 },
9335
#define SH_BLTIN_XXUU 22
9336
#define SH_BLTIN_UUUU 22
9337
  { 1, 1, 1, 1 },
9338
#define SH_BLTIN_PV 23
9339
  { 0, 8 },
9340
};
9341
/* mcmv: operands considered unsigned.  */
9342
/* mmulsum_wq, msad_ubq: result considered unsigned long long.  */
9343
/* mperm: control value considered unsigned int.  */
9344
/* mshalds, mshard, mshards, mshlld, mshlrd: shift count is unsigned int.  */
9345
/* mshards_q: returns signed short.  */
9346
/* nsb: takes long long arg, returns unsigned char.  */
9347
static const struct builtin_description bdesc[] =
9348
{
9349
  { CODE_FOR_absv2si2,  "__builtin_absv2si2", SH_BLTIN_V2SI2 },
9350
  { CODE_FOR_absv4hi2,  "__builtin_absv4hi2", SH_BLTIN_V4HI2 },
9351
  { CODE_FOR_addv2si3,  "__builtin_addv2si3", SH_BLTIN_V2SI3 },
9352
  { CODE_FOR_addv4hi3,  "__builtin_addv4hi3", SH_BLTIN_V4HI3 },
9353
  { CODE_FOR_ssaddv2si3,"__builtin_ssaddv2si3", SH_BLTIN_V2SI3 },
9354
  { CODE_FOR_usaddv8qi3,"__builtin_usaddv8qi3", SH_BLTIN_V8QI3 },
9355
  { CODE_FOR_ssaddv4hi3,"__builtin_ssaddv4hi3", SH_BLTIN_V4HI3 },
9356
  { CODE_FOR_alloco_i,  "__builtin_sh_media_ALLOCO", SH_BLTIN_PV },
9357
  { CODE_FOR_negcmpeqv8qi,"__builtin_sh_media_MCMPEQ_B", SH_BLTIN_V8QI3 },
9358
  { CODE_FOR_negcmpeqv2si,"__builtin_sh_media_MCMPEQ_L", SH_BLTIN_V2SI3 },
9359
  { CODE_FOR_negcmpeqv4hi,"__builtin_sh_media_MCMPEQ_W", SH_BLTIN_V4HI3 },
9360
  { CODE_FOR_negcmpgtuv8qi,"__builtin_sh_media_MCMPGT_UB", SH_BLTIN_V8QI3 },
9361
  { CODE_FOR_negcmpgtv2si,"__builtin_sh_media_MCMPGT_L", SH_BLTIN_V2SI3 },
9362
  { CODE_FOR_negcmpgtv4hi,"__builtin_sh_media_MCMPGT_W", SH_BLTIN_V4HI3 },
9363
  { CODE_FOR_mcmv,      "__builtin_sh_media_MCMV", SH_BLTIN_UUUU },
9364
  { CODE_FOR_mcnvs_lw,  "__builtin_sh_media_MCNVS_LW", SH_BLTIN_3 },
9365
  { CODE_FOR_mcnvs_wb,  "__builtin_sh_media_MCNVS_WB", SH_BLTIN_V4HI2V8QI },
9366
  { CODE_FOR_mcnvs_wub, "__builtin_sh_media_MCNVS_WUB", SH_BLTIN_V4HI2V8QI },
9367
  { CODE_FOR_mextr1,    "__builtin_sh_media_MEXTR1", SH_BLTIN_V8QI3 },
9368
  { CODE_FOR_mextr2,    "__builtin_sh_media_MEXTR2", SH_BLTIN_V8QI3 },
9369
  { CODE_FOR_mextr3,    "__builtin_sh_media_MEXTR3", SH_BLTIN_V8QI3 },
9370
  { CODE_FOR_mextr4,    "__builtin_sh_media_MEXTR4", SH_BLTIN_V8QI3 },
9371
  { CODE_FOR_mextr5,    "__builtin_sh_media_MEXTR5", SH_BLTIN_V8QI3 },
9372
  { CODE_FOR_mextr6,    "__builtin_sh_media_MEXTR6", SH_BLTIN_V8QI3 },
9373
  { CODE_FOR_mextr7,    "__builtin_sh_media_MEXTR7", SH_BLTIN_V8QI3 },
9374
  { CODE_FOR_mmacfx_wl, "__builtin_sh_media_MMACFX_WL", SH_BLTIN_MAC_HISI },
9375
  { CODE_FOR_mmacnfx_wl,"__builtin_sh_media_MMACNFX_WL", SH_BLTIN_MAC_HISI },
9376
  { CODE_FOR_mulv2si3,  "__builtin_mulv2si3", SH_BLTIN_V2SI3, },
9377
  { CODE_FOR_mulv4hi3,  "__builtin_mulv4hi3", SH_BLTIN_V4HI3 },
9378
  { CODE_FOR_mmulfx_l,  "__builtin_sh_media_MMULFX_L", SH_BLTIN_V2SI3 },
9379
  { CODE_FOR_mmulfx_w,  "__builtin_sh_media_MMULFX_W", SH_BLTIN_V4HI3 },
9380
  { CODE_FOR_mmulfxrp_w,"__builtin_sh_media_MMULFXRP_W", SH_BLTIN_V4HI3 },
9381
  { CODE_FOR_mmulhi_wl, "__builtin_sh_media_MMULHI_WL", SH_BLTIN_V4HI2V2SI },
9382
  { CODE_FOR_mmullo_wl, "__builtin_sh_media_MMULLO_WL", SH_BLTIN_V4HI2V2SI },
9383
  { CODE_FOR_mmulsum_wq,"__builtin_sh_media_MMULSUM_WQ", SH_BLTIN_XXUU },
9384
  { CODE_FOR_mperm_w,   "__builtin_sh_media_MPERM_W", SH_BLTIN_SH_HI },
9385
  { CODE_FOR_msad_ubq,  "__builtin_sh_media_MSAD_UBQ", SH_BLTIN_XXUU },
9386
  { CODE_FOR_mshalds_l, "__builtin_sh_media_MSHALDS_L", SH_BLTIN_SH_SI },
9387
  { CODE_FOR_mshalds_w, "__builtin_sh_media_MSHALDS_W", SH_BLTIN_SH_HI },
9388
  { CODE_FOR_ashrv2si3, "__builtin_ashrv2si3", SH_BLTIN_SH_SI },
9389
  { CODE_FOR_ashrv4hi3, "__builtin_ashrv4hi3", SH_BLTIN_SH_HI },
9390
  { CODE_FOR_mshards_q, "__builtin_sh_media_MSHARDS_Q", SH_BLTIN_SUS },
9391
  { CODE_FOR_mshfhi_b,  "__builtin_sh_media_MSHFHI_B", SH_BLTIN_V8QI3 },
9392
  { CODE_FOR_mshfhi_l,  "__builtin_sh_media_MSHFHI_L", SH_BLTIN_V2SI3 },
9393
  { CODE_FOR_mshfhi_w,  "__builtin_sh_media_MSHFHI_W", SH_BLTIN_V4HI3 },
9394
  { CODE_FOR_mshflo_b,  "__builtin_sh_media_MSHFLO_B", SH_BLTIN_V8QI3 },
9395
  { CODE_FOR_mshflo_l,  "__builtin_sh_media_MSHFLO_L", SH_BLTIN_V2SI3 },
9396
  { CODE_FOR_mshflo_w,  "__builtin_sh_media_MSHFLO_W", SH_BLTIN_V4HI3 },
9397
  { CODE_FOR_ashlv2si3, "__builtin_ashlv2si3", SH_BLTIN_SH_SI },
9398
  { CODE_FOR_ashlv4hi3, "__builtin_ashlv4hi3", SH_BLTIN_SH_HI },
9399
  { CODE_FOR_lshrv2si3, "__builtin_lshrv2si3", SH_BLTIN_SH_SI },
9400
  { CODE_FOR_lshrv4hi3, "__builtin_lshrv4hi3", SH_BLTIN_SH_HI },
9401
  { CODE_FOR_subv2si3,  "__builtin_subv2si3", SH_BLTIN_V2SI3 },
9402
  { CODE_FOR_subv4hi3,  "__builtin_subv4hi3", SH_BLTIN_V4HI3 },
9403
  { CODE_FOR_sssubv2si3,"__builtin_sssubv2si3", SH_BLTIN_V2SI3 },
9404
  { CODE_FOR_ussubv8qi3,"__builtin_ussubv8qi3", SH_BLTIN_V8QI3 },
9405
  { CODE_FOR_sssubv4hi3,"__builtin_sssubv4hi3", SH_BLTIN_V4HI3 },
9406
  { CODE_FOR_fcosa_s,   "__builtin_sh_media_FCOSA_S", SH_BLTIN_SISF },
9407
  { CODE_FOR_fsina_s,   "__builtin_sh_media_FSINA_S", SH_BLTIN_SISF },
9408
  { CODE_FOR_fipr,      "__builtin_sh_media_FIPR_S", SH_BLTIN_3 },
9409
  { CODE_FOR_ftrv,      "__builtin_sh_media_FTRV_S", SH_BLTIN_3 },
9410
  { CODE_FOR_mac_media, "__builtin_sh_media_FMAC_S", SH_BLTIN_3 },
9411
  { CODE_FOR_sqrtdf2,   "__builtin_sh_media_FSQRT_D", SH_BLTIN_2 },
9412
  { CODE_FOR_sqrtsf2,   "__builtin_sh_media_FSQRT_S", SH_BLTIN_2 },
9413
  { CODE_FOR_fsrra_s,   "__builtin_sh_media_FSRRA_S", SH_BLTIN_2 },
9414
  { CODE_FOR_ldhi_l,    "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L },
9415
  { CODE_FOR_ldhi_q,    "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q },
9416
  { CODE_FOR_ldlo_l,    "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L },
9417
  { CODE_FOR_ldlo_q,    "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q },
9418
  { CODE_FOR_sthi_l,    "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L },
9419
  { CODE_FOR_sthi_q,    "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q },
9420
  { CODE_FOR_stlo_l,    "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L },
9421
  { CODE_FOR_stlo_q,    "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q },
9422
  { CODE_FOR_ldhi_l64,  "__builtin_sh_media_LDHI_L", SH_BLTIN_LDUA_L64 },
9423
  { CODE_FOR_ldhi_q64,  "__builtin_sh_media_LDHI_Q", SH_BLTIN_LDUA_Q64 },
9424
  { CODE_FOR_ldlo_l64,  "__builtin_sh_media_LDLO_L", SH_BLTIN_LDUA_L64 },
9425
  { CODE_FOR_ldlo_q64,  "__builtin_sh_media_LDLO_Q", SH_BLTIN_LDUA_Q64 },
9426
  { CODE_FOR_sthi_l64,  "__builtin_sh_media_STHI_L", SH_BLTIN_STUA_L64 },
9427
  { CODE_FOR_sthi_q64,  "__builtin_sh_media_STHI_Q", SH_BLTIN_STUA_Q64 },
9428
  { CODE_FOR_stlo_l64,  "__builtin_sh_media_STLO_L", SH_BLTIN_STUA_L64 },
9429
  { CODE_FOR_stlo_q64,  "__builtin_sh_media_STLO_Q", SH_BLTIN_STUA_Q64 },
9430
  { CODE_FOR_nsb,       "__builtin_sh_media_NSB", SH_BLTIN_SU },
9431
  { CODE_FOR_byterev,   "__builtin_sh_media_BYTEREV", SH_BLTIN_2 },
9432
  { CODE_FOR_prefetch,  "__builtin_sh_media_PREFO", SH_BLTIN_PSSV },
9433
};
9434
 
9435
static void
9436
sh_media_init_builtins (void)
9437
{
9438
  tree shared[SH_BLTIN_NUM_SHARED_SIGNATURES];
9439
  const struct builtin_description *d;
9440
 
9441
  memset (shared, 0, sizeof shared);
9442
  for (d = bdesc; d - bdesc < (int) ARRAY_SIZE (bdesc); d++)
9443
    {
9444
      tree type, arg_type = 0;
9445
      int signature = d->signature;
9446
      int i;
9447
 
9448
      if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES && shared[signature])
9449
        type = shared[signature];
9450
      else
9451
        {
9452
          int has_result = signature_args[signature][0] != 0;
9453
 
9454
          if ((signature_args[signature][1] & 8)
9455
              && (((signature_args[signature][1] & 1) && TARGET_SHMEDIA32)
9456
                  || ((signature_args[signature][1] & 2) && TARGET_SHMEDIA64)))
9457
            continue;
9458
          if (! TARGET_FPU_ANY
9459
              && FLOAT_MODE_P (insn_data[d->icode].operand[0].mode))
9460
            continue;
9461
          type = void_list_node;
9462
          for (i = 3; ; i--)
9463
            {
9464
              int arg = signature_args[signature][i];
9465
              int opno = i - 1 + has_result;
9466
 
9467
              if (arg & 8)
9468
                arg_type = ptr_type_node;
9469
              else if (arg)
9470
                arg_type = (*lang_hooks.types.type_for_mode)
9471
                  (insn_data[d->icode].operand[opno].mode,
9472
                   (arg & 1));
9473
              else if (i)
9474
                continue;
9475
              else
9476
                arg_type = void_type_node;
9477
              if (i == 0)
9478
                break;
9479
              type = tree_cons (NULL_TREE, arg_type, type);
9480
            }
9481
          type = build_function_type (arg_type, type);
9482
          if (signature < SH_BLTIN_NUM_SHARED_SIGNATURES)
9483
            shared[signature] = type;
9484
        }
9485
      lang_hooks.builtin_function (d->name, type, d - bdesc, BUILT_IN_MD,
9486
                                   NULL, NULL_TREE);
9487
    }
9488
}
9489
 
9490
/* Implements target hook vector_mode_supported_p.  */
9491
bool
9492
sh_vector_mode_supported_p (enum machine_mode mode)
9493
{
9494
  if (TARGET_FPU_ANY
9495
      && ((mode == V2SFmode)
9496
          || (mode == V4SFmode)
9497
          || (mode == V16SFmode)))
9498
    return true;
9499
 
9500
  else if (TARGET_SHMEDIA
9501
           && ((mode == V8QImode)
9502
               || (mode == V2HImode)
9503
               || (mode == V4HImode)
9504
               || (mode == V2SImode)))
9505
    return true;
9506
 
9507
  return false;
9508
}
9509
 
9510
/* Implements target hook dwarf_calling_convention.  Return an enum
9511
   of dwarf_calling_convention.  */
9512
int
9513
sh_dwarf_calling_convention (tree func)
9514
{
9515
  if (sh_attr_renesas_p (func))
9516
    return DW_CC_GNU_renesas_sh;
9517
 
9518
  return DW_CC_normal;
9519
}
9520
 
9521
static void
9522
sh_init_builtins (void)
9523
{
9524
  if (TARGET_SHMEDIA)
9525
    sh_media_init_builtins ();
9526
}
9527
 
9528
/* Expand an expression EXP that calls a built-in function,
9529
   with result going to TARGET if that's convenient
9530
   (and in mode MODE if that's convenient).
9531
   SUBTARGET may be used as the target for computing one of EXP's operands.
9532
   IGNORE is nonzero if the value is to be ignored.  */
9533
 
9534
static rtx
9535
sh_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9536
                   enum machine_mode mode ATTRIBUTE_UNUSED, int ignore)
9537
{
9538
  tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
9539
  tree arglist = TREE_OPERAND (exp, 1);
9540
  unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9541
  const struct builtin_description *d = &bdesc[fcode];
9542
  enum insn_code icode = d->icode;
9543
  int signature = d->signature;
9544
  enum machine_mode tmode = VOIDmode;
9545
  int nop = 0, i;
9546
  rtx op[4];
9547
  rtx pat = 0;
9548
 
9549
  if (signature_args[signature][0])
9550
    {
9551
      if (ignore)
9552
        return 0;
9553
 
9554
      tmode = insn_data[icode].operand[0].mode;
9555
      if (! target
9556
          || GET_MODE (target) != tmode
9557
          || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
9558
        target = gen_reg_rtx (tmode);
9559
      op[nop++] = target;
9560
    }
9561
  else
9562
    target = 0;
9563
 
9564
  for (i = 1; i <= 3; i++, nop++)
9565
    {
9566
      tree arg;
9567
      enum machine_mode opmode, argmode;
9568
      tree optype;
9569
 
9570
      if (! signature_args[signature][i])
9571
        break;
9572
      arg = TREE_VALUE (arglist);
9573
      if (arg == error_mark_node)
9574
        return const0_rtx;
9575
      arglist = TREE_CHAIN (arglist);
9576
      if (signature_args[signature][i] & 8)
9577
        {
9578
          opmode = ptr_mode;
9579
          optype = ptr_type_node;
9580
        }
9581
      else
9582
        {
9583
          opmode = insn_data[icode].operand[nop].mode;
9584
          optype = (*lang_hooks.types.type_for_mode) (opmode, 0);
9585
        }
9586
      argmode = TYPE_MODE (TREE_TYPE (arg));
9587
      if (argmode != opmode)
9588
        arg = build1 (NOP_EXPR, optype, arg);
9589
      op[nop] = expand_expr (arg, NULL_RTX, opmode, 0);
9590
      if (! (*insn_data[icode].operand[nop].predicate) (op[nop], opmode))
9591
        op[nop] = copy_to_mode_reg (opmode, op[nop]);
9592
    }
9593
 
9594
  switch (nop)
9595
    {
9596
    case 1:
9597
      pat = (*insn_data[d->icode].genfun) (op[0]);
9598
      break;
9599
    case 2:
9600
      pat = (*insn_data[d->icode].genfun) (op[0], op[1]);
9601
      break;
9602
    case 3:
9603
      pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2]);
9604
      break;
9605
    case 4:
9606
      pat = (*insn_data[d->icode].genfun) (op[0], op[1], op[2], op[3]);
9607
      break;
9608
    default:
9609
      gcc_unreachable ();
9610
    }
9611
  if (! pat)
9612
    return 0;
9613
  emit_insn (pat);
9614
  return target;
9615
}
9616
 
9617
void
9618
sh_expand_unop_v2sf (enum rtx_code code, rtx op0, rtx op1)
9619
{
9620
  rtx sel0 = const0_rtx;
9621
  rtx sel1 = const1_rtx;
9622
  rtx (*fn) (rtx, rtx, rtx, rtx, rtx) = gen_unary_sf_op;
9623
  rtx op = gen_rtx_fmt_e (code, SFmode, op1);
9624
 
9625
  emit_insn ((*fn) (op0, op1, op, sel0, sel0));
9626
  emit_insn ((*fn) (op0, op1, op, sel1, sel1));
9627
}
9628
 
9629
void
9630
sh_expand_binop_v2sf (enum rtx_code code, rtx op0, rtx op1, rtx op2)
9631
{
9632
  rtx sel0 = const0_rtx;
9633
  rtx sel1 = const1_rtx;
9634
  rtx (*fn) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx)
9635
    = gen_binary_sf_op;
9636
  rtx op = gen_rtx_fmt_ee (code, SFmode, op1, op2);
9637
 
9638
  emit_insn ((*fn) (op0, op1, op2, op, sel0, sel0, sel0, sel1));
9639
  emit_insn ((*fn) (op0, op1, op2, op, sel1, sel1, sel1, sel0));
9640
}
9641
 
9642
/* Return the class of registers for which a mode change from FROM to TO
9643
   is invalid.  */
9644
bool
9645
sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9646
                             enum reg_class class)
9647
{
9648
  /* We want to enable the use of SUBREGs as a means to
9649
     VEC_SELECT a single element of a vector.  */
9650
  if (to == SFmode && VECTOR_MODE_P (from) && GET_MODE_INNER (from) == SFmode)
9651
    return (reg_classes_intersect_p (GENERAL_REGS, class));
9652
 
9653
  if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to))
9654
    {
9655
      if (TARGET_LITTLE_ENDIAN)
9656
        {
9657
          if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8)
9658
            return reg_classes_intersect_p (DF_REGS, class);
9659
        }
9660
      else
9661
        {
9662
          if (GET_MODE_SIZE (from) < 8)
9663
            return reg_classes_intersect_p (DF_HI_REGS, class);
9664
        }
9665
    }
9666
  return 0;
9667
}
9668
 
9669
 
9670
/* If ADDRESS refers to a CODE_LABEL, add NUSES to the number of times
9671
   that label is used.  */
9672
 
9673
void
9674
sh_mark_label (rtx address, int nuses)
9675
{
9676
  if (GOTOFF_P (address))
9677
    {
9678
      /* Extract the label or symbol.  */
9679
      address = XEXP (address, 0);
9680
      if (GET_CODE (address) == PLUS)
9681
        address = XEXP (address, 0);
9682
      address = XVECEXP (address, 0, 0);
9683
    }
9684
  if (GET_CODE (address) == LABEL_REF
9685
      && GET_CODE (XEXP (address, 0)) == CODE_LABEL)
9686
    LABEL_NUSES (XEXP (address, 0)) += nuses;
9687
}
9688
 
9689
/* Compute extra cost of moving data between one register class
9690
   and another.  */
9691
 
9692
/* If SECONDARY*_RELOAD_CLASS says something about the src/dst pair, regclass
9693
   uses this information.  Hence, the general register <-> floating point
9694
   register information here is not used for SFmode.  */
9695
 
9696
int
9697
sh_register_move_cost (enum machine_mode mode,
9698
                       enum reg_class srcclass, enum reg_class dstclass)
9699
{
9700
  if (dstclass == T_REGS || dstclass == PR_REGS)
9701
    return 10;
9702
 
9703
  if (dstclass == MAC_REGS && srcclass == MAC_REGS)
9704
    return 4;
9705
 
9706
  if (mode == SImode && ! TARGET_SHMEDIA && TARGET_FMOVD
9707
      && REGCLASS_HAS_FP_REG (srcclass)
9708
      && REGCLASS_HAS_FP_REG (dstclass))
9709
    return 4;
9710
 
9711
  if (REGCLASS_HAS_FP_REG (dstclass) && srcclass == T_REGS)
9712
    return ((TARGET_HARD_SH4 && !optimize_size) ? 10 : 7);
9713
 
9714
  if ((REGCLASS_HAS_FP_REG (dstclass) && srcclass == MAC_REGS)
9715
      || (dstclass == MAC_REGS && REGCLASS_HAS_FP_REG (srcclass)))
9716
    return 9;
9717
 
9718
  if ((REGCLASS_HAS_FP_REG (dstclass)
9719
       && REGCLASS_HAS_GENERAL_REG (srcclass))
9720
      || (REGCLASS_HAS_GENERAL_REG (dstclass)
9721
          && REGCLASS_HAS_FP_REG (srcclass)))
9722
    return ((TARGET_SHMEDIA ? 4 : TARGET_FMOVD ? 8 : 12)
9723
            * ((GET_MODE_SIZE (mode) + 7) / 8U));
9724
 
9725
  if ((dstclass == FPUL_REGS
9726
       && REGCLASS_HAS_GENERAL_REG (srcclass))
9727
      || (srcclass == FPUL_REGS
9728
          && REGCLASS_HAS_GENERAL_REG (dstclass)))
9729
    return 5;
9730
 
9731
  if ((dstclass == FPUL_REGS
9732
       && (srcclass == PR_REGS || srcclass == MAC_REGS || srcclass == T_REGS))
9733
      || (srcclass == FPUL_REGS
9734
          && (dstclass == PR_REGS || dstclass == MAC_REGS)))
9735
    return 7;
9736
 
9737
  if ((srcclass == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
9738
      || ((dstclass) == TARGET_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
9739
    return 20;
9740
 
9741
  /* ??? ptabs faults on (value & 0x3) == 0x3  */
9742
  if (TARGET_SHMEDIA
9743
      && ((srcclass) == TARGET_REGS || (srcclass) == SIBCALL_REGS))
9744
    {
9745
      if (sh_gettrcost >= 0)
9746
        return sh_gettrcost;
9747
      else if (!TARGET_PT_FIXED)
9748
        return 100;
9749
    }
9750
 
9751
  if ((srcclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (dstclass))
9752
      || (dstclass == FPSCR_REGS && ! REGCLASS_HAS_GENERAL_REG (srcclass)))
9753
  return 4;
9754
 
9755
  if (TARGET_SHMEDIA
9756
      || (TARGET_FMOVD
9757
          && ! REGCLASS_HAS_GENERAL_REG (srcclass)
9758
          && ! REGCLASS_HAS_GENERAL_REG (dstclass)))
9759
    return 2 * ((GET_MODE_SIZE (mode) + 7) / 8U);
9760
 
9761
  return 2 * ((GET_MODE_SIZE (mode) + 3) / 4U);
9762
}
9763
 
9764
static rtx emit_load_ptr (rtx, rtx);
9765
 
9766
static rtx
9767
emit_load_ptr (rtx reg, rtx addr)
9768
{
9769
  rtx mem = gen_const_mem (ptr_mode, addr);
9770
 
9771
  if (Pmode != ptr_mode)
9772
    mem = gen_rtx_SIGN_EXTEND (Pmode, mem);
9773
  return emit_move_insn (reg, mem);
9774
}
9775
 
9776
static void
9777
sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
9778
                    HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9779
                    tree function)
9780
{
9781
  CUMULATIVE_ARGS cum;
9782
  int structure_value_byref = 0;
9783
  rtx this, this_value, sibcall, insns, funexp;
9784
  tree funtype = TREE_TYPE (function);
9785
  int simple_add = CONST_OK_FOR_ADD (delta);
9786
  int did_load = 0;
9787
  rtx scratch0, scratch1, scratch2;
9788
  unsigned i;
9789
 
9790
  reload_completed = 1;
9791
  epilogue_completed = 1;
9792
  no_new_pseudos = 1;
9793
  current_function_uses_only_leaf_regs = 1;
9794
  reset_block_changes ();
9795
 
9796
  emit_note (NOTE_INSN_PROLOGUE_END);
9797
 
9798
  /* Find the "this" pointer.  We have such a wide range of ABIs for the
9799
     SH that it's best to do this completely machine independently.
9800
     "this" is passed as first argument, unless a structure return pointer
9801
     comes first, in which case "this" comes second.  */
9802
  INIT_CUMULATIVE_ARGS (cum, funtype, NULL_RTX, 0, 1);
9803
#ifndef PCC_STATIC_STRUCT_RETURN
9804
  if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
9805
    structure_value_byref = 1;
9806
#endif /* not PCC_STATIC_STRUCT_RETURN */
9807
  if (structure_value_byref && sh_struct_value_rtx (function, 0) == 0)
9808
    {
9809
      tree ptype = build_pointer_type (TREE_TYPE (funtype));
9810
 
9811
      FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1);
9812
    }
9813
  this = FUNCTION_ARG (cum, Pmode, ptr_type_node, 1);
9814
 
9815
  /* For SHcompact, we only have r0 for a scratch register: r1 is the
9816
     static chain pointer (even if you can't have nested virtual functions
9817
     right now, someone might implement them sometime), and the rest of the
9818
     registers are used for argument passing, are callee-saved, or reserved.  */
9819
  /* We need to check call_used_regs / fixed_regs in case -fcall_saved-reg /
9820
     -ffixed-reg has been used.  */
9821
  if (! call_used_regs[0] || fixed_regs[0])
9822
    error ("r0 needs to be available as a call-clobbered register");
9823
  scratch0 = scratch1 = scratch2 = gen_rtx_REG (Pmode, 0);
9824
  if (! TARGET_SH5)
9825
    {
9826
      if (call_used_regs[1] && ! fixed_regs[1])
9827
        scratch1 = gen_rtx_REG (ptr_mode, 1);
9828
      /* N.B., if not TARGET_HITACHI, register 2 is used to pass the pointer
9829
         pointing where to return struct values.  */
9830
      if (call_used_regs[3] && ! fixed_regs[3])
9831
        scratch2 = gen_rtx_REG (Pmode, 3);
9832
    }
9833
  else if (TARGET_SHMEDIA)
9834
    {
9835
      for (i = FIRST_GENERAL_REG; i <= LAST_GENERAL_REG; i++)
9836
        if (i != REGNO (scratch0) &&
9837
            call_used_regs[i] && ! fixed_regs[i] && ! FUNCTION_ARG_REGNO_P (i))
9838
          {
9839
            scratch1 = gen_rtx_REG (ptr_mode, i);
9840
            break;
9841
          }
9842
      if (scratch1 == scratch0)
9843
        error ("Need a second call-clobbered general purpose register");
9844
      for (i = FIRST_TARGET_REG; i <= LAST_TARGET_REG; i++)
9845
        if (call_used_regs[i] && ! fixed_regs[i])
9846
          {
9847
            scratch2 = gen_rtx_REG (Pmode, i);
9848
            break;
9849
          }
9850
      if (scratch2 == scratch0)
9851
        error ("Need a call-clobbered target register");
9852
    }
9853
 
9854
  this_value = plus_constant (this, delta);
9855
  if (vcall_offset
9856
      && (simple_add || scratch0 != scratch1)
9857
      && strict_memory_address_p (ptr_mode, this_value))
9858
    {
9859
      emit_load_ptr (scratch0, this_value);
9860
      did_load = 1;
9861
    }
9862
 
9863
  if (!delta)
9864
    ; /* Do nothing.  */
9865
  else if (simple_add)
9866
    emit_move_insn (this, this_value);
9867
  else
9868
    {
9869
      emit_move_insn (scratch1, GEN_INT (delta));
9870
      emit_insn (gen_add2_insn (this, scratch1));
9871
    }
9872
 
9873
  if (vcall_offset)
9874
    {
9875
      rtx offset_addr;
9876
 
9877
      if (!did_load)
9878
        emit_load_ptr (scratch0, this);
9879
 
9880
      offset_addr = plus_constant (scratch0, vcall_offset);
9881
      if (strict_memory_address_p (ptr_mode, offset_addr))
9882
        ; /* Do nothing.  */
9883
      else if (! TARGET_SH5 && scratch0 != scratch1)
9884
        {
9885
          /* scratch0 != scratch1, and we have indexed loads.  Get better
9886
             schedule by loading the offset into r1 and using an indexed
9887
             load - then the load of r1 can issue before the load from
9888
             (this + delta) finishes.  */
9889
          emit_move_insn (scratch1, GEN_INT (vcall_offset));
9890
          offset_addr = gen_rtx_PLUS (Pmode, scratch0, scratch1);
9891
        }
9892
      else if (CONST_OK_FOR_ADD (vcall_offset))
9893
        {
9894
          emit_insn (gen_add2_insn (scratch0, GEN_INT (vcall_offset)));
9895
          offset_addr = scratch0;
9896
        }
9897
      else if (scratch0 != scratch1)
9898
        {
9899
          emit_move_insn (scratch1, GEN_INT (vcall_offset));
9900
          emit_insn (gen_add2_insn (scratch0, scratch1));
9901
          offset_addr = scratch0;
9902
        }
9903
      else
9904
        gcc_unreachable (); /* FIXME */
9905
      emit_load_ptr (scratch0, offset_addr);
9906
 
9907
      if (Pmode != ptr_mode)
9908
        scratch0 = gen_rtx_TRUNCATE (ptr_mode, scratch0);
9909
      emit_insn (gen_add2_insn (this, scratch0));
9910
    }
9911
 
9912
  /* Generate a tail call to the target function.  */
9913
  if (! TREE_USED (function))
9914
    {
9915
      assemble_external (function);
9916
      TREE_USED (function) = 1;
9917
    }
9918
  funexp = XEXP (DECL_RTL (function), 0);
9919
  /* If the function is overridden, so is the thunk, hence we don't
9920
     need GOT addressing even if this is a public symbol.  */
9921
#if 0
9922
  if (TARGET_SH1 && ! flag_weak)
9923
    sibcall = gen_sibcalli_thunk (funexp, const0_rtx);
9924
  else
9925
#endif
9926
  if (TARGET_SH2 && flag_pic)
9927
    {
9928
      sibcall = gen_sibcall_pcrel (funexp, const0_rtx);
9929
      XEXP (XVECEXP (sibcall, 0, 2), 0) = scratch2;
9930
    }
9931
  else
9932
    {
9933
      if (TARGET_SHMEDIA && flag_pic)
9934
        {
9935
          funexp = gen_sym2PIC (funexp);
9936
          PUT_MODE (funexp, Pmode);
9937
        }
9938
      emit_move_insn (scratch2, funexp);
9939
      funexp = gen_rtx_MEM (FUNCTION_MODE, scratch2);
9940
      sibcall = gen_sibcall (funexp, const0_rtx, NULL_RTX);
9941
    }
9942
  sibcall = emit_call_insn (sibcall);
9943
  SIBLING_CALL_P (sibcall) = 1;
9944
  use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall), this);
9945
  emit_barrier ();
9946
 
9947
  /* Run just enough of rest_of_compilation to do scheduling and get
9948
     the insns emitted.  Note that use_thunk calls
9949
     assemble_start_function and assemble_end_function.  */
9950
 
9951
  insn_locators_initialize ();
9952
  insns = get_insns ();
9953
 
9954
  if (optimize > 0)
9955
    {
9956
      /* Initialize the bitmap obstacks.  */
9957
      bitmap_obstack_initialize (NULL);
9958
      bitmap_obstack_initialize (&reg_obstack);
9959
      if (! cfun->cfg)
9960
        init_flow ();
9961
      rtl_register_cfg_hooks ();
9962
      init_rtl_bb_info (ENTRY_BLOCK_PTR);
9963
      init_rtl_bb_info (EXIT_BLOCK_PTR);
9964
      ENTRY_BLOCK_PTR->flags |= BB_RTL;
9965
      EXIT_BLOCK_PTR->flags |= BB_RTL;
9966
      find_basic_blocks (insns);
9967
 
9968
      if (flag_schedule_insns_after_reload)
9969
        {
9970
          life_analysis (PROP_FINAL);
9971
 
9972
          split_all_insns (1);
9973
 
9974
          schedule_insns ();
9975
        }
9976
      /* We must split jmp insn in PIC case.  */
9977
      else if (flag_pic)
9978
        split_all_insns_noflow ();
9979
    }
9980
 
9981
  sh_reorg ();
9982
 
9983
  if (optimize > 0 && flag_delayed_branch)
9984
    dbr_schedule (insns);
9985
 
9986
  shorten_branches (insns);
9987
  final_start_function (insns, file, 1);
9988
  final (insns, file, 1);
9989
  final_end_function ();
9990
 
9991
  if (optimize > 0)
9992
    {
9993
      /* Release all memory allocated by flow.  */
9994
      free_basic_block_vars ();
9995
 
9996
      /* Release the bitmap obstacks.  */
9997
      bitmap_obstack_release (&reg_obstack);
9998
      bitmap_obstack_release (NULL);
9999
    }
10000
 
10001
  reload_completed = 0;
10002
  epilogue_completed = 0;
10003
  no_new_pseudos = 0;
10004
}
10005
 
10006
rtx
10007
function_symbol (rtx target, const char *name, enum sh_function_kind kind)
10008
{
10009
  rtx sym;
10010
 
10011
  /* If this is not an ordinary function, the name usually comes from a
10012
     string literal or an sprintf buffer.  Make sure we use the same
10013
     string consistently, so that cse will be able to unify address loads.  */
10014
  if (kind != FUNCTION_ORDINARY)
10015
    name = IDENTIFIER_POINTER (get_identifier (name));
10016
  sym = gen_rtx_SYMBOL_REF (Pmode, name);
10017
  SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_FUNCTION;
10018
  if (flag_pic)
10019
    switch (kind)
10020
      {
10021
      case FUNCTION_ORDINARY:
10022
        break;
10023
      case SFUNC_GOT:
10024
        {
10025
          rtx reg = target ? target : gen_reg_rtx (Pmode);
10026
 
10027
          emit_insn (gen_symGOT2reg (reg, sym));
10028
          sym = reg;
10029
          break;
10030
        }
10031
      case SFUNC_STATIC:
10032
        {
10033
          /* ??? To allow cse to work, we use GOTOFF relocations.
10034
             we could add combiner patterns to transform this into
10035
             straight pc-relative calls with sym2PIC / bsrf when
10036
             label load and function call are still 1:1 and in the
10037
             same basic block during combine.  */
10038
          rtx reg = target ? target : gen_reg_rtx (Pmode);
10039
 
10040
          emit_insn (gen_symGOTOFF2reg (reg, sym));
10041
          sym = reg;
10042
          break;
10043
        }
10044
      }
10045
  if (target && sym != target)
10046
    {
10047
      emit_move_insn (target, sym);
10048
      return target;
10049
    }
10050
  return sym;
10051
}
10052
 
10053
/* Find the number of a general purpose register in S.  */
10054
static int
10055
scavenge_reg (HARD_REG_SET *s)
10056
{
10057
  int r;
10058
  for (r = FIRST_GENERAL_REG; r <= LAST_GENERAL_REG; r++)
10059
    if (TEST_HARD_REG_BIT (*s, r))
10060
      return r;
10061
  return -1;
10062
}
10063
 
10064
rtx
10065
sh_get_pr_initial_val (void)
10066
{
10067
  rtx val;
10068
 
10069
  /* ??? Unfortunately, get_hard_reg_initial_val doesn't always work for the
10070
     PR register on SHcompact, because it might be clobbered by the prologue.
10071
     We check first if that is known to be the case.  */
10072
  if (TARGET_SHCOMPACT
10073
      && ((current_function_args_info.call_cookie
10074
           & ~ CALL_COOKIE_RET_TRAMP (1))
10075
          || current_function_has_nonlocal_label))
10076
    return gen_frame_mem (SImode, return_address_pointer_rtx);
10077
 
10078
  /* If we haven't finished rtl generation, there might be a nonlocal label
10079
     that we haven't seen yet.
10080
     ??? get_hard_reg_initial_val fails if it is called while no_new_pseudos
10081
     is set, unless it has been called before for the same register.  And even
10082
     then, we end in trouble if we didn't use the register in the same
10083
     basic block before.  So call get_hard_reg_initial_val now and wrap it
10084
     in an unspec if we might need to replace it.  */
10085
  /* ??? We also must do this for TARGET_SH1 in general, because otherwise
10086
     combine can put the pseudo returned by get_hard_reg_initial_val into
10087
     instructions that need a general purpose registers, which will fail to
10088
     be recognized when the pseudo becomes allocated to PR.  */
10089
  val
10090
    = get_hard_reg_initial_val (Pmode, TARGET_SHMEDIA ? PR_MEDIA_REG : PR_REG);
10091
  if (TARGET_SH1)
10092
    return gen_rtx_UNSPEC (SImode, gen_rtvec (1, val), UNSPEC_RA);
10093
  return val;
10094
}
10095
 
10096
int
10097
sh_expand_t_scc (enum rtx_code code, rtx target)
10098
{
10099
  rtx result = target;
10100
  HOST_WIDE_INT val;
10101
 
10102
  if (GET_CODE (sh_compare_op0) != REG || REGNO (sh_compare_op0) != T_REG
10103
      || GET_CODE (sh_compare_op1) != CONST_INT)
10104
    return 0;
10105
  if (GET_CODE (result) != REG)
10106
    result = gen_reg_rtx (SImode);
10107
  val = INTVAL (sh_compare_op1);
10108
  if ((code == EQ && val == 1) || (code == NE && val == 0))
10109
    emit_insn (gen_movt (result));
10110
  else if ((code == EQ && val == 0) || (code == NE && val == 1))
10111
    {
10112
      emit_insn (gen_rtx_CLOBBER (VOIDmode, result));
10113
      emit_insn (gen_subc (result, result, result));
10114
      emit_insn (gen_addsi3 (result, result, const1_rtx));
10115
    }
10116
  else if (code == EQ || code == NE)
10117
    emit_insn (gen_move_insn (result, GEN_INT (code == NE)));
10118
  else
10119
    return 0;
10120
  if (result != target)
10121
    emit_move_insn (target, result);
10122
  return 1;
10123
}
10124
 
10125
/* INSN is an sfunc; return the rtx that describes the address used.  */
10126
static rtx
10127
extract_sfunc_addr (rtx insn)
10128
{
10129
  rtx pattern, part = NULL_RTX;
10130
  int len, i;
10131
 
10132
  pattern = PATTERN (insn);
10133
  len = XVECLEN (pattern, 0);
10134
  for (i = 0; i < len; i++)
10135
    {
10136
      part = XVECEXP (pattern, 0, i);
10137
      if (GET_CODE (part) == USE && GET_MODE (XEXP (part, 0)) == Pmode
10138
          && GENERAL_REGISTER_P (true_regnum (XEXP (part, 0))))
10139
        return XEXP (part, 0);
10140
    }
10141
  gcc_assert (GET_CODE (XVECEXP (pattern, 0, 0)) == UNSPEC_VOLATILE);
10142
  return XVECEXP (XVECEXP (pattern, 0, 0), 0, 1);
10143
}
10144
 
10145
/* Verify that the register in use_sfunc_addr still agrees with the address
10146
   used in the sfunc.  This prevents fill_slots_from_thread from changing
10147
   use_sfunc_addr.
10148
   INSN is the use_sfunc_addr instruction, and REG is the register it
10149
   guards.  */
10150
int
10151
check_use_sfunc_addr (rtx insn, rtx reg)
10152
{
10153
  /* Search for the sfunc.  It should really come right after INSN.  */
10154
  while ((insn = NEXT_INSN (insn)))
10155
    {
10156
      if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
10157
        break;
10158
      if (! INSN_P (insn))
10159
        continue;
10160
 
10161
      if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10162
        insn = XVECEXP (PATTERN (insn), 0, 0);
10163
      if (GET_CODE (PATTERN (insn)) != PARALLEL
10164
          || get_attr_type (insn) != TYPE_SFUNC)
10165
        continue;
10166
      return rtx_equal_p (extract_sfunc_addr (insn), reg);
10167
    }
10168
  gcc_unreachable ();
10169
}
10170
 
10171
/* This function returns a constant rtx that represents pi / 2**15 in
10172
   SFmode.  it's used to scale SFmode angles, in radians, to a
10173
   fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
10174
   maps to 0x10000).  */
10175
 
10176
static GTY(()) rtx sh_fsca_sf2int_rtx;
10177
 
10178
rtx
10179
sh_fsca_sf2int (void)
10180
{
10181
  if (! sh_fsca_sf2int_rtx)
10182
    {
10183
      REAL_VALUE_TYPE rv;
10184
 
10185
      real_from_string (&rv, "10430.378350470453");
10186
      sh_fsca_sf2int_rtx = const_double_from_real_value (rv, SFmode);
10187
    }
10188
 
10189
  return sh_fsca_sf2int_rtx;
10190
}
10191
 
10192
/* This function returns a constant rtx that represents pi / 2**15 in
10193
   DFmode.  it's used to scale DFmode angles, in radians, to a
10194
   fixed-point signed 16.16-bit fraction of a full circle, i.e., 2*pi
10195
   maps to 0x10000).  */
10196
 
10197
static GTY(()) rtx sh_fsca_df2int_rtx;
10198
 
10199
rtx
10200
sh_fsca_df2int (void)
10201
{
10202
  if (! sh_fsca_df2int_rtx)
10203
    {
10204
      REAL_VALUE_TYPE rv;
10205
 
10206
      real_from_string (&rv, "10430.378350470453");
10207
      sh_fsca_df2int_rtx = const_double_from_real_value (rv, DFmode);
10208
    }
10209
 
10210
  return sh_fsca_df2int_rtx;
10211
}
10212
 
10213
/* This function returns a constant rtx that represents 2**15 / pi in
10214
   SFmode.  it's used to scale a fixed-point signed 16.16-bit fraction
10215
   of a full circle back to a SFmode value, i.e., 0x10000 maps to
10216
   2*pi).  */
10217
 
10218
static GTY(()) rtx sh_fsca_int2sf_rtx;
10219
 
10220
rtx
10221
sh_fsca_int2sf (void)
10222
{
10223
  if (! sh_fsca_int2sf_rtx)
10224
    {
10225
      REAL_VALUE_TYPE rv;
10226
 
10227
      real_from_string (&rv, "9.587379924285257e-5");
10228
      sh_fsca_int2sf_rtx = const_double_from_real_value (rv, SFmode);
10229
    }
10230
 
10231
  return sh_fsca_int2sf_rtx;
10232
}
10233
 
10234
/* Initialize the CUMULATIVE_ARGS structure.  */
10235
 
10236
void
10237
sh_init_cumulative_args (CUMULATIVE_ARGS *  pcum,
10238
                         tree               fntype,
10239
                         rtx                libname ATTRIBUTE_UNUSED,
10240
                         tree               fndecl,
10241
                         signed int         n_named_args,
10242
                         enum machine_mode  mode)
10243
{
10244
  pcum->arg_count [(int) SH_ARG_FLOAT] = 0;
10245
  pcum->free_single_fp_reg = 0;
10246
  pcum->stack_regs = 0;
10247
  pcum->byref_regs = 0;
10248
  pcum->byref = 0;
10249
  pcum->outgoing = (n_named_args == -1) ? 0 : 1;
10250
 
10251
  /* XXX - Should we check TARGET_HITACHI here ???  */
10252
  pcum->renesas_abi = sh_attr_renesas_p (fntype) ? 1 : 0;
10253
 
10254
  if (fntype)
10255
    {
10256
      pcum->force_mem = ((TARGET_HITACHI || pcum->renesas_abi)
10257
                         && aggregate_value_p (TREE_TYPE (fntype), fndecl));
10258
      pcum->prototype_p = TYPE_ARG_TYPES (fntype) ? TRUE : FALSE;
10259
      pcum->arg_count [(int) SH_ARG_INT]
10260
        = TARGET_SH5 && aggregate_value_p (TREE_TYPE (fntype), fndecl);
10261
 
10262
      pcum->call_cookie
10263
        = CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
10264
                                 && pcum->arg_count [(int) SH_ARG_INT] == 0
10265
                                 && (TYPE_MODE (TREE_TYPE (fntype)) == BLKmode
10266
                                     ? int_size_in_bytes (TREE_TYPE (fntype))
10267
                                     : GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (fntype)))) > 4
10268
                                 && (BASE_RETURN_VALUE_REG (TYPE_MODE (TREE_TYPE (fntype)))
10269
                                     == FIRST_RET_REG));
10270
    }
10271
  else
10272
    {
10273
      pcum->arg_count [(int) SH_ARG_INT] = 0;
10274
      pcum->prototype_p = FALSE;
10275
      if (mode != VOIDmode)
10276
        {
10277
          pcum->call_cookie =
10278
            CALL_COOKIE_RET_TRAMP (TARGET_SHCOMPACT
10279
                                   && GET_MODE_SIZE (mode) > 4
10280
                                   && BASE_RETURN_VALUE_REG (mode) == FIRST_RET_REG);
10281
 
10282
          /* If the default ABI is the Renesas ABI then all library
10283
             calls must assume that the library will be using the
10284
             Renesas ABI.  So if the function would return its result
10285
             in memory then we must force the address of this memory
10286
             block onto the stack.  Ideally we would like to call
10287
             targetm.calls.return_in_memory() here but we do not have
10288
             the TYPE or the FNDECL available so we synthesize the
10289
             contents of that function as best we can.  */
10290
          pcum->force_mem =
10291
            (TARGET_DEFAULT & MASK_HITACHI)
10292
            && (mode == BLKmode
10293
                || (GET_MODE_SIZE (mode) > 4
10294
                    && !(mode == DFmode
10295
                         && TARGET_FPU_DOUBLE)));
10296
        }
10297
      else
10298
        {
10299
          pcum->call_cookie = 0;
10300
          pcum->force_mem = FALSE;
10301
        }
10302
    }
10303
}
10304
 
10305
/* Determine if two hard register sets intersect.
10306
   Return 1 if they do.  */
10307
 
10308
static int
10309
hard_regs_intersect_p (HARD_REG_SET *a, HARD_REG_SET *b)
10310
{
10311
  HARD_REG_SET c;
10312
  COPY_HARD_REG_SET (c, *a);
10313
  AND_HARD_REG_SET (c, *b);
10314
  GO_IF_HARD_REG_SUBSET (c, reg_class_contents[(int) NO_REGS], lose);
10315
  return 1;
10316
lose:
10317
  return 0;
10318
}
10319
 
10320
#ifdef TARGET_ADJUST_UNROLL_MAX
10321
static int
10322
sh_adjust_unroll_max (struct loop * loop, int insn_count,
10323
                      int max_unrolled_insns, int strength_reduce_p,
10324
                      int unroll_type)
10325
{
10326
/* This doesn't work in 4.0 because the old unroller & loop.h  is gone.  */
10327
  if (TARGET_ADJUST_UNROLL && TARGET_SHMEDIA)
10328
    {
10329
      /* Throttle back loop unrolling so that the costs of using more
10330
         targets than the eight target register we have don't outweigh
10331
         the benefits of unrolling.  */
10332
      rtx insn;
10333
      int n_labels = 0, n_calls = 0, n_exit_dest = 0, n_inner_loops = -1;
10334
      int n_barriers = 0;
10335
      rtx dest;
10336
      int i;
10337
      rtx exit_dest[8];
10338
      int threshold;
10339
      int unroll_benefit = 0, mem_latency = 0;
10340
      int base_cost, best_cost, cost;
10341
      int factor, best_factor;
10342
      int n_dest;
10343
      unsigned max_iterations = 32767;
10344
      int n_iterations;
10345
      int need_precond = 0, precond = 0;
10346
      basic_block * bbs = get_loop_body (loop);
10347
      struct niter_desc *desc;
10348
 
10349
      /* Assume that all labels inside the loop are used from inside the
10350
         loop.  If the loop has multiple entry points, it is unlikely to
10351
         be unrolled anyways.
10352
         Also assume that all calls are to different functions.  That is
10353
         somewhat pessimistic, but if you have lots of calls, unrolling the
10354
         loop is not likely to gain you much in the first place.  */
10355
      i = loop->num_nodes - 1;
10356
      for (insn = BB_HEAD (bbs[i]); ; )
10357
        {
10358
          if (GET_CODE (insn) == CODE_LABEL)
10359
            n_labels++;
10360
          else if (GET_CODE (insn) == CALL_INSN)
10361
            n_calls++;
10362
          else if (GET_CODE (insn) == NOTE
10363
                   && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
10364
            n_inner_loops++;
10365
          else if (GET_CODE (insn) == BARRIER)
10366
            n_barriers++;
10367
          if (insn != BB_END (bbs[i]))
10368
            insn = NEXT_INSN (insn);
10369
          else if (--i >= 0)
10370
            insn = BB_HEAD (bbs[i]);
10371
           else
10372
            break;
10373
        }
10374
      free (bbs);
10375
      /* One label for the loop top is normal, and it won't be duplicated by
10376
         unrolling.  */
10377
      if (n_labels <= 1)
10378
        return max_unrolled_insns;
10379
      if (n_inner_loops > 0)
10380
        return 0;
10381
      for (dest = loop->exit_labels; dest && n_exit_dest < 8;
10382
           dest = LABEL_NEXTREF (dest))
10383
        {
10384
          for (i = n_exit_dest - 1;
10385
               i >= 0 && XEXP (dest, 0) != XEXP (exit_dest[i], 0); i--);
10386
          if (i < 0)
10387
            exit_dest[n_exit_dest++] = dest;
10388
        }
10389
      /* If the loop top and call and exit destinations are enough to fill up
10390
         the target registers, we're unlikely to do any more damage by
10391
         unrolling.  */
10392
      if (n_calls + n_exit_dest >= 7)
10393
        return max_unrolled_insns;
10394
 
10395
      /* ??? In the new loop unroller, there is no longer any strength
10396
         reduction information available.  Thus, when it comes to unrolling,
10397
         we know the cost of everything, but we know the value of nothing.  */
10398
#if 0
10399
      if (strength_reduce_p
10400
          && (unroll_type == LPT_UNROLL_RUNTIME
10401
              || unroll_type == LPT_UNROLL_CONSTANT
10402
              || unroll_type == LPT_PEEL_COMPLETELY))
10403
        {
10404
          struct loop_ivs *ivs = LOOP_IVS (loop);
10405
          struct iv_class *bl;
10406
 
10407
          /* We'll save one compare-and-branch in each loop body copy
10408
             but the last one.  */
10409
          unroll_benefit = 1;
10410
          /* Assess the benefit of removing biv & giv updates.  */
10411
          for (bl = ivs->list; bl; bl = bl->next)
10412
            {
10413
              rtx increment = biv_total_increment (bl);
10414
              struct induction *v;
10415
 
10416
              if (increment && GET_CODE (increment) == CONST_INT)
10417
                {
10418
                  unroll_benefit++;
10419
                  for (v = bl->giv; v; v = v->next_iv)
10420
                    {
10421
                      if (! v->ignore && v->same == 0
10422
                          && GET_CODE (v->mult_val) == CONST_INT)
10423
                        unroll_benefit++;
10424
                      /* If this giv uses an array, try to determine
10425
                         a maximum iteration count from the size of the
10426
                         array.  This need not be correct all the time,
10427
                         but should not be too far off the mark too often.  */
10428
                      while (v->giv_type == DEST_ADDR)
10429
                        {
10430
                          rtx mem = PATTERN (v->insn);
10431
                          tree mem_expr, type, size_tree;
10432
 
10433
                          if (GET_CODE (SET_SRC (mem)) == MEM)
10434
                            mem = SET_SRC (mem);
10435
                          else if (GET_CODE (SET_DEST (mem)) == MEM)
10436
                            mem = SET_DEST (mem);
10437
                          else
10438
                            break;
10439
                          mem_expr = MEM_EXPR (mem);
10440
                          if (! mem_expr)
10441
                            break;
10442
                          type = TREE_TYPE (mem_expr);
10443
                          if (TREE_CODE (type) != ARRAY_TYPE
10444
                              || ! TYPE_SIZE (type) || ! TYPE_SIZE_UNIT (type))
10445
                            break;
10446
                          size_tree = fold_build2 (TRUNC_DIV_EXPR,
10447
                                                   bitsizetype,
10448
                                                   TYPE_SIZE (type),
10449
                                                   TYPE_SIZE_UNIT (type));
10450
                          if (TREE_CODE (size_tree) == INTEGER_CST
10451
                              && ! TREE_INT_CST_HIGH (size_tree)
10452
                              && TREE_INT_CST_LOW  (size_tree) < max_iterations)
10453
                            max_iterations = TREE_INT_CST_LOW  (size_tree);
10454
                          break;
10455
                        }
10456
                    }
10457
                }
10458
            }
10459
        }
10460
#else /* 0 */
10461
      /* Assume there is at least some benefit.  */
10462
      unroll_benefit = 1;
10463
#endif /* 0 */
10464
 
10465
      desc = get_simple_loop_desc (loop);
10466
      n_iterations = desc->const_iter ? desc->niter : 0;
10467
      max_iterations
10468
        = max_iterations < desc->niter_max ? max_iterations : desc->niter_max;
10469
 
10470
      if (! strength_reduce_p || ! n_iterations)
10471
        need_precond = 1;
10472
      if (! n_iterations)
10473
        {
10474
          n_iterations
10475
            = max_iterations < 3 ? max_iterations : max_iterations * 3 / 4;
10476
          if (! n_iterations)
10477
            return 0;
10478
        }
10479
#if 0 /* ??? See above - missing induction variable information.  */
10480
      while (unroll_benefit > 1) /* no loop */
10481
        {
10482
          /* We include the benefit of biv/ giv updates.  Check if some or
10483
             all of these updates are likely to fit into a scheduling
10484
             bubble of a load.
10485
             We check for the following case:
10486
             - All the insns leading to the first JUMP_INSN are in a strict
10487
               dependency chain.
10488
             - there is at least one memory reference in them.
10489
 
10490
             When we find such a pattern, we assume that we can hide as many
10491
             updates as the total of the load latency is, if we have an
10492
             unroll factor of at least two.  We might or might not also do
10493
             this without unrolling, so rather than considering this as an
10494
             extra unroll benefit, discount it in the unroll benefits of unroll
10495
             factors higher than two.  */
10496
 
10497
          rtx set, last_set;
10498
 
10499
          insn = next_active_insn (loop->start);
10500
          last_set = single_set (insn);
10501
          if (! last_set)
10502
            break;
10503
          if (GET_CODE (SET_SRC (last_set)) == MEM)
10504
            mem_latency += 2;
10505
          for (insn = NEXT_INSN (insn); insn != end; insn = NEXT_INSN (insn))
10506
            {
10507
              if (! INSN_P (insn))
10508
                continue;
10509
              if (GET_CODE (insn) == JUMP_INSN)
10510
                break;
10511
              if (! reg_referenced_p (SET_DEST (last_set), PATTERN (insn)))
10512
                {
10513
                  /* Check if this is a to-be-reduced giv insn.  */
10514
                  struct loop_ivs *ivs = LOOP_IVS (loop);
10515
                  struct iv_class *bl;
10516
                  struct induction *v;
10517
                  for (bl = ivs->list; bl; bl = bl->next)
10518
                    {
10519
                      if (bl->biv->insn == insn)
10520
                        goto is_biv;
10521
                      for (v = bl->giv; v; v = v->next_iv)
10522
                        if (v->insn == insn)
10523
                          goto is_giv;
10524
                    }
10525
                  mem_latency--;
10526
                is_biv:
10527
                is_giv:
10528
                  continue;
10529
                }
10530
              set = single_set (insn);
10531
              if (! set)
10532
                continue;
10533
              if (GET_CODE (SET_SRC (set)) == MEM)
10534
                mem_latency += 2;
10535
              last_set = set;
10536
            }
10537
          if (mem_latency < 0)
10538
            mem_latency = 0;
10539
          else if (mem_latency > unroll_benefit - 1)
10540
            mem_latency = unroll_benefit - 1;
10541
          break;
10542
        }
10543
#endif /* 0 */
10544
      if (n_labels + (unroll_benefit + n_labels * 8) / n_iterations
10545
          <= unroll_benefit)
10546
        return max_unrolled_insns;
10547
 
10548
      n_dest = n_labels + n_calls + n_exit_dest;
10549
      base_cost = n_dest <= 8 ? 0 : n_dest - 7;
10550
      best_cost = 0;
10551
      best_factor = 1;
10552
      if (n_barriers * 2 > n_labels - 1)
10553
        n_barriers = (n_labels - 1) / 2;
10554
      for (factor = 2; factor <= 8; factor++)
10555
        {
10556
          /* Bump up preconditioning cost for each power of two.  */
10557
          if (! (factor & (factor-1)))
10558
            precond += 4;
10559
          /* When preconditioning, only powers of two will be considered.  */
10560
          else if (need_precond)
10561
            continue;
10562
          n_dest = ((unroll_type != LPT_PEEL_COMPLETELY)
10563
                    + (n_labels - 1) * factor + n_calls + n_exit_dest
10564
                    - (n_barriers * factor >> 1)
10565
                    + need_precond);
10566
          cost
10567
            = ((n_dest <= 8 ? 0 : n_dest - 7)
10568
               - base_cost * factor
10569
               - ((factor > 2 ? unroll_benefit - mem_latency : unroll_benefit)
10570
                  * (factor - (unroll_type != LPT_PEEL_COMPLETELY)))
10571
               + ((unroll_benefit + 1 + (n_labels - 1) * factor)
10572
                  / n_iterations));
10573
          if (need_precond)
10574
            cost += (precond + unroll_benefit * factor / 2) / n_iterations;
10575
          if (cost < best_cost)
10576
            {
10577
              best_cost = cost;
10578
              best_factor = factor;
10579
            }
10580
        }
10581
      threshold = best_factor * insn_count;
10582
      if (max_unrolled_insns > threshold)
10583
        max_unrolled_insns = threshold;
10584
    }
10585
  return max_unrolled_insns;
10586
}
10587
#endif /* TARGET_ADJUST_UNROLL_MAX */
10588
 
10589
/* Replace any occurrence of FROM(n) in X with TO(n).  The function does
10590
   not enter into CONST_DOUBLE for the replace.
10591
 
10592
   Note that copying is not done so X must not be shared unless all copies
10593
   are to be modified.
10594
 
10595
   This is like replace_rtx, except that we operate on N_REPLACEMENTS
10596
   replacements simultaneously - FROM(n) is replacements[n*2] and to(n) is
10597
   replacements[n*2+1] - and that we take mode changes into account.
10598
 
10599
   If a replacement is ambiguous, return NULL_RTX.
10600
 
10601
   If MODIFY is zero, don't modify any rtl in place,
10602
   just return zero or nonzero for failure / success.  */
10603
 
10604
rtx
10605
replace_n_hard_rtx (rtx x, rtx *replacements, int n_replacements, int modify)
10606
{
10607
  int i, j;
10608
  const char *fmt;
10609
 
10610
  /* The following prevents loops occurrence when we change MEM in
10611
     CONST_DOUBLE onto the same CONST_DOUBLE.  */
10612
  if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
10613
    return x;
10614
 
10615
  for (i = n_replacements - 1; i >= 0 ; i--)
10616
  if (x == replacements[i*2] && GET_MODE (x) == GET_MODE (replacements[i*2+1]))
10617
    return replacements[i*2+1];
10618
 
10619
  /* Allow this function to make replacements in EXPR_LISTs.  */
10620
  if (x == 0)
10621
    return 0;
10622
 
10623
  if (GET_CODE (x) == SUBREG)
10624
    {
10625
      rtx new = replace_n_hard_rtx (SUBREG_REG (x), replacements,
10626
                                    n_replacements, modify);
10627
 
10628
      if (GET_CODE (new) == CONST_INT)
10629
        {
10630
          x = simplify_subreg (GET_MODE (x), new,
10631
                               GET_MODE (SUBREG_REG (x)),
10632
                               SUBREG_BYTE (x));
10633
          if (! x)
10634
            abort ();
10635
        }
10636
      else if (modify)
10637
        SUBREG_REG (x) = new;
10638
 
10639
      return x;
10640
    }
10641
  else if (GET_CODE (x) == REG)
10642
    {
10643
      unsigned regno = REGNO (x);
10644
      unsigned nregs = (regno < FIRST_PSEUDO_REGISTER
10645
                        ? HARD_REGNO_NREGS (regno, GET_MODE (x)) : 1);
10646
      rtx result = NULL_RTX;
10647
 
10648
      for (i = n_replacements - 1; i >= 0; i--)
10649
        {
10650
          rtx from = replacements[i*2];
10651
          rtx to = replacements[i*2+1];
10652
          unsigned from_regno, from_nregs, to_regno, new_regno;
10653
 
10654
          if (GET_CODE (from) != REG)
10655
            continue;
10656
          from_regno = REGNO (from);
10657
          from_nregs = (from_regno < FIRST_PSEUDO_REGISTER
10658
                        ? HARD_REGNO_NREGS (from_regno, GET_MODE (from)) : 1);
10659
          if (regno < from_regno + from_nregs && regno + nregs > from_regno)
10660
            {
10661
              if (regno < from_regno
10662
                  || regno + nregs > from_regno + nregs
10663
                  || GET_CODE (to) != REG
10664
                  || result)
10665
                return NULL_RTX;
10666
              to_regno = REGNO (to);
10667
              if (to_regno < FIRST_PSEUDO_REGISTER)
10668
                {
10669
                  new_regno = regno + to_regno - from_regno;
10670
                  if ((unsigned) HARD_REGNO_NREGS (new_regno, GET_MODE (x))
10671
                      != nregs)
10672
                    return NULL_RTX;
10673
                  result = gen_rtx_REG (GET_MODE (x), new_regno);
10674
                }
10675
              else if (GET_MODE (x) <= GET_MODE (to))
10676
                result = gen_lowpart_common (GET_MODE (x), to);
10677
              else
10678
                result = gen_lowpart_SUBREG (GET_MODE (x), to);
10679
            }
10680
        }
10681
      return result ? result : x;
10682
    }
10683
  else if (GET_CODE (x) == ZERO_EXTEND)
10684
    {
10685
      rtx new = replace_n_hard_rtx (XEXP (x, 0), replacements,
10686
                                    n_replacements, modify);
10687
 
10688
      if (GET_CODE (new) == CONST_INT)
10689
        {
10690
          x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
10691
                                        new, GET_MODE (XEXP (x, 0)));
10692
          if (! x)
10693
            abort ();
10694
        }
10695
      else if (modify)
10696
        XEXP (x, 0) = new;
10697
 
10698
      return x;
10699
    }
10700
 
10701
  fmt = GET_RTX_FORMAT (GET_CODE (x));
10702
  for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
10703
    {
10704
      rtx new;
10705
 
10706
      if (fmt[i] == 'e')
10707
        {
10708
          new = replace_n_hard_rtx (XEXP (x, i), replacements,
10709
                                    n_replacements, modify);
10710
          if (!new)
10711
            return NULL_RTX;
10712
          if (modify)
10713
            XEXP (x, i) = new;
10714
        }
10715
      else if (fmt[i] == 'E')
10716
        for (j = XVECLEN (x, i) - 1; j >= 0; j--)
10717
          {
10718
            new = replace_n_hard_rtx (XVECEXP (x, i, j), replacements,
10719
                                      n_replacements, modify);
10720
          if (!new)
10721
            return NULL_RTX;
10722
            if (modify)
10723
              XVECEXP (x, i, j) = new;
10724
          }
10725
    }
10726
 
10727
  return x;
10728
}
10729
 
10730
rtx
10731
sh_gen_truncate (enum machine_mode mode, rtx x, int need_sign_ext)
10732
{
10733
  enum rtx_code code = TRUNCATE;
10734
 
10735
  if (GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND)
10736
    {
10737
      rtx inner = XEXP (x, 0);
10738
      enum machine_mode inner_mode = GET_MODE (inner);
10739
 
10740
      if (inner_mode == mode)
10741
        return inner;
10742
      else if (GET_MODE_SIZE (inner_mode) >= GET_MODE_SIZE (mode))
10743
        x = inner;
10744
      else if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode)
10745
               && (! need_sign_ext || GET_CODE (x) == SIGN_EXTEND))
10746
        {
10747
          code = GET_CODE (x);
10748
          x = inner;
10749
        }
10750
    }
10751
  return gen_rtx_fmt_e (code, mode, x);
10752
}
10753
 
10754
/* called via for_each_rtx after reload, to clean up truncates of
10755
   registers that span multiple actual hard registers.  */
10756
int
10757
shmedia_cleanup_truncate (rtx *p, void *n_changes)
10758
{
10759
  rtx x = *p, reg;
10760
 
10761
  if (GET_CODE (x) != TRUNCATE)
10762
    return 0;
10763
  reg = XEXP (x, 0);
10764
  if (GET_MODE_SIZE (GET_MODE (reg)) > 8 && GET_CODE (reg) == REG)
10765
    {
10766
      enum machine_mode reg_mode = GET_MODE (reg);
10767
      XEXP (x, 0) = simplify_subreg (DImode, reg, reg_mode,
10768
                                     subreg_lowpart_offset (DImode, reg_mode));
10769
      *(int*) n_changes += 1;
10770
      return -1;
10771
    }
10772
  return 0;
10773
}
10774
 
10775
/* Load and store depend on the highpart of the address.  However,
10776
   set_attr_alternative does not give well-defined results before reload,
10777
   so we must look at the rtl ourselves to see if any of the feeding
10778
   registers is used in a memref.  */
10779
 
10780
/* Called by sh_contains_memref_p via for_each_rtx.  */
10781
static int
10782
sh_contains_memref_p_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
10783
{
10784
  return (GET_CODE (*loc) == MEM);
10785
}
10786
 
10787
/* Return nonzero iff INSN contains a MEM.  */
10788
int
10789
sh_contains_memref_p (rtx insn)
10790
{
10791
  return for_each_rtx (&PATTERN (insn), &sh_contains_memref_p_1, NULL);
10792
}
10793
 
10794
/* FNADDR is the MEM expression from a call expander.  Return an address
10795
   to use in an SHmedia insn pattern.  */
10796
rtx
10797
shmedia_prepare_call_address (rtx fnaddr, int is_sibcall)
10798
{
10799
  int is_sym;
10800
 
10801
  fnaddr = XEXP (fnaddr, 0);
10802
  is_sym = GET_CODE (fnaddr) == SYMBOL_REF;
10803
  if (flag_pic && is_sym)
10804
    {
10805
      if (! SYMBOL_REF_LOCAL_P (fnaddr))
10806
        {
10807
          rtx reg = gen_reg_rtx (Pmode);
10808
 
10809
          /* We must not use GOTPLT for sibcalls, because PIC_REG
10810
             must be restored before the PLT code gets to run.  */
10811
          if (is_sibcall)
10812
            emit_insn (gen_symGOT2reg (reg, fnaddr));
10813
          else
10814
            emit_insn (gen_symGOTPLT2reg (reg, fnaddr));
10815
          fnaddr = reg;
10816
        }
10817
      else
10818
        {
10819
          fnaddr = gen_sym2PIC (fnaddr);
10820
          PUT_MODE (fnaddr, Pmode);
10821
        }
10822
    }
10823
  /* If ptabs might trap, make this visible to the rest of the compiler.
10824
     We generally assume that symbols pertain to valid locations, but
10825
     it is possible to generate invalid symbols with asm or linker tricks.
10826
     In a list of functions where each returns its successor, an invalid
10827
     symbol might denote an empty list.  */
10828
  if (!TARGET_PT_FIXED
10829
      && (!is_sym || TARGET_INVALID_SYMBOLS)
10830
      && (!REG_P (fnaddr) || ! TARGET_REGISTER_P (REGNO (fnaddr))))
10831
    {
10832
      rtx tr = gen_reg_rtx (PDImode);
10833
 
10834
      emit_insn (gen_ptabs (tr, fnaddr));
10835
      fnaddr = tr;
10836
    }
10837
  else if (! target_reg_operand (fnaddr, Pmode))
10838
    fnaddr = copy_to_mode_reg (Pmode, fnaddr);
10839
  return fnaddr;
10840
}
10841
 
10842
enum reg_class
10843
sh_secondary_reload (bool in_p, rtx x, enum reg_class class,
10844
                     enum machine_mode mode, secondary_reload_info *sri)
10845
{
10846
  if (in_p)
10847
    {
10848
      if (REGCLASS_HAS_FP_REG (class)
10849
          && ! TARGET_SHMEDIA
10850
          && immediate_operand ((x), mode)
10851
          && ! ((fp_zero_operand (x) || fp_one_operand (x))
10852
                && mode == SFmode && fldi_ok ()))
10853
        switch (mode)
10854
          {
10855
          case SFmode:
10856
            sri->icode = CODE_FOR_reload_insf__frn;
10857
            return NO_REGS;
10858
          case DFmode:
10859
            sri->icode = CODE_FOR_reload_indf__frn;
10860
            return NO_REGS;
10861
          case SImode:
10862
            /* ??? If we knew that we are in the appropriate mode -
10863
               single precision - we could use a reload pattern directly.  */
10864
            return FPUL_REGS;
10865
          default:
10866
            abort ();
10867
          }
10868
      if (class == FPUL_REGS
10869
          && ((GET_CODE (x) == REG
10870
               && (REGNO (x) == MACL_REG || REGNO (x) == MACH_REG
10871
                   || REGNO (x) == T_REG))
10872
              || GET_CODE (x) == PLUS))
10873
        return GENERAL_REGS;
10874
      if (class == FPUL_REGS && immediate_operand (x, mode))
10875
        {
10876
          if (GET_CODE (x) == CONST_INT && CONST_OK_FOR_I08 (INTVAL (x)))
10877
            return GENERAL_REGS;
10878
          sri->icode = CODE_FOR_reload_insi__i_fpul;
10879
          return NO_REGS;
10880
        }
10881
      if (class == FPSCR_REGS
10882
          && ((GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
10883
              || (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == PLUS)))
10884
        return GENERAL_REGS;
10885
      if (REGCLASS_HAS_FP_REG (class)
10886
          && TARGET_SHMEDIA
10887
          && immediate_operand (x, mode)
10888
          && x != CONST0_RTX (GET_MODE (x))
10889
          && GET_MODE (x) != V4SFmode)
10890
        return GENERAL_REGS;
10891
      if ((mode == QImode || mode == HImode)
10892
          && TARGET_SHMEDIA && inqhi_operand (x, mode))
10893
        {
10894
          sri->icode = ((mode == QImode)
10895
                        ? CODE_FOR_reload_inqi : CODE_FOR_reload_inhi);
10896
          return NO_REGS;
10897
        }
10898
      if (TARGET_SHMEDIA && class == GENERAL_REGS
10899
          && (GET_CODE (x) == LABEL_REF || PIC_DIRECT_ADDR_P (x)))
10900
        return TARGET_REGS;
10901
    } /* end of input-only processing.  */
10902
 
10903
  if (((REGCLASS_HAS_FP_REG (class)
10904
        && (GET_CODE (x) == REG
10905
            && (GENERAL_OR_AP_REGISTER_P (REGNO (x))
10906
                || (FP_REGISTER_P (REGNO (x)) && mode == SImode
10907
                    && TARGET_FMOVD))))
10908
       || (REGCLASS_HAS_GENERAL_REG (class)
10909
           && GET_CODE (x) == REG
10910
           && FP_REGISTER_P (REGNO (x))))
10911
      && ! TARGET_SHMEDIA
10912
      && (mode == SFmode || mode == SImode))
10913
    return FPUL_REGS;
10914
  if ((class == FPUL_REGS
10915
       || (REGCLASS_HAS_FP_REG (class)
10916
           && ! TARGET_SHMEDIA && mode == SImode))
10917
      && (GET_CODE (x) == MEM
10918
          || (GET_CODE (x) == REG
10919
              && (REGNO (x) >= FIRST_PSEUDO_REGISTER
10920
                  || REGNO (x) == T_REG
10921
                  || system_reg_operand (x, VOIDmode)))))
10922
    {
10923
      if (class == FPUL_REGS)
10924
        return GENERAL_REGS;
10925
      return FPUL_REGS;
10926
    }
10927
  if ((class == TARGET_REGS
10928
       || (TARGET_SHMEDIA && class == SIBCALL_REGS))
10929
      && !EXTRA_CONSTRAINT_Csy (x)
10930
      && (GET_CODE (x) != REG || ! GENERAL_REGISTER_P (REGNO (x))))
10931
    return GENERAL_REGS;
10932
  if ((class == MAC_REGS || class == PR_REGS)
10933
      && GET_CODE (x) == REG && ! GENERAL_REGISTER_P (REGNO (x))
10934
      && class != REGNO_REG_CLASS (REGNO (x)))
10935
    return GENERAL_REGS;
10936
  if (class != GENERAL_REGS && GET_CODE (x) == REG
10937
      && TARGET_REGISTER_P (REGNO (x)))
10938
    return GENERAL_REGS;
10939
  return NO_REGS;
10940
}
10941
 
10942
enum sh_divide_strategy_e sh_div_strategy = SH_DIV_STRATEGY_DEFAULT;
10943
 
10944
#include "gt-sh.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.