OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gcc-4.2.2/] [gcc/] [config/] [sparc/] [sparc.c] - Blame information for rev 154

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* Subroutines for insn-output.c for SPARC.
2
   Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4
   Free Software Foundation, Inc.
5
   Contributed by Michael Tiemann (tiemann@cygnus.com)
6
   64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
7
   at Cygnus Support.
8
 
9
This file is part of GCC.
10
 
11
GCC is free software; you can redistribute it and/or modify
12
it under the terms of the GNU General Public License as published by
13
the Free Software Foundation; either version 3, or (at your option)
14
any later version.
15
 
16
GCC is distributed in the hope that it will be useful,
17
but WITHOUT ANY WARRANTY; without even the implied warranty of
18
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19
GNU General Public License for more details.
20
 
21
You should have received a copy of the GNU General Public License
22
along with GCC; see the file COPYING3.  If not see
23
<http://www.gnu.org/licenses/>.  */
24
 
25
#include "config.h"
26
#include "system.h"
27
#include "coretypes.h"
28
#include "tm.h"
29
#include "tree.h"
30
#include "rtl.h"
31
#include "regs.h"
32
#include "hard-reg-set.h"
33
#include "real.h"
34
#include "insn-config.h"
35
#include "insn-codes.h"
36
#include "conditions.h"
37
#include "output.h"
38
#include "insn-attr.h"
39
#include "flags.h"
40
#include "function.h"
41
#include "expr.h"
42
#include "optabs.h"
43
#include "recog.h"
44
#include "toplev.h"
45
#include "ggc.h"
46
#include "tm_p.h"
47
#include "debug.h"
48
#include "target.h"
49
#include "target-def.h"
50
#include "cfglayout.h"
51
#include "tree-gimple.h"
52
#include "langhooks.h"
53
 
54
/* Processor costs */
55
static const
56
struct processor_costs cypress_costs = {
57
  COSTS_N_INSNS (2), /* int load */
58
  COSTS_N_INSNS (2), /* int signed load */
59
  COSTS_N_INSNS (2), /* int zeroed load */
60
  COSTS_N_INSNS (2), /* float load */
61
  COSTS_N_INSNS (5), /* fmov, fneg, fabs */
62
  COSTS_N_INSNS (5), /* fadd, fsub */
63
  COSTS_N_INSNS (1), /* fcmp */
64
  COSTS_N_INSNS (1), /* fmov, fmovr */
65
  COSTS_N_INSNS (7), /* fmul */
66
  COSTS_N_INSNS (37), /* fdivs */
67
  COSTS_N_INSNS (37), /* fdivd */
68
  COSTS_N_INSNS (63), /* fsqrts */
69
  COSTS_N_INSNS (63), /* fsqrtd */
70
  COSTS_N_INSNS (1), /* imul */
71
  COSTS_N_INSNS (1), /* imulX */
72
  0, /* imul bit factor */
73
  COSTS_N_INSNS (1), /* idiv */
74
  COSTS_N_INSNS (1), /* idivX */
75
  COSTS_N_INSNS (1), /* movcc/movr */
76
  0, /* shift penalty */
77
};
78
 
79
static const
80
struct processor_costs supersparc_costs = {
81
  COSTS_N_INSNS (1), /* int load */
82
  COSTS_N_INSNS (1), /* int signed load */
83
  COSTS_N_INSNS (1), /* int zeroed load */
84
  COSTS_N_INSNS (0), /* float load */
85
  COSTS_N_INSNS (3), /* fmov, fneg, fabs */
86
  COSTS_N_INSNS (3), /* fadd, fsub */
87
  COSTS_N_INSNS (3), /* fcmp */
88
  COSTS_N_INSNS (1), /* fmov, fmovr */
89
  COSTS_N_INSNS (3), /* fmul */
90
  COSTS_N_INSNS (6), /* fdivs */
91
  COSTS_N_INSNS (9), /* fdivd */
92
  COSTS_N_INSNS (12), /* fsqrts */
93
  COSTS_N_INSNS (12), /* fsqrtd */
94
  COSTS_N_INSNS (4), /* imul */
95
  COSTS_N_INSNS (4), /* imulX */
96
  0, /* imul bit factor */
97
  COSTS_N_INSNS (4), /* idiv */
98
  COSTS_N_INSNS (4), /* idivX */
99
  COSTS_N_INSNS (1), /* movcc/movr */
100
  1, /* shift penalty */
101
};
102
 
103
static const
104
struct processor_costs hypersparc_costs = {
105
  COSTS_N_INSNS (1), /* int load */
106
  COSTS_N_INSNS (1), /* int signed load */
107
  COSTS_N_INSNS (1), /* int zeroed load */
108
  COSTS_N_INSNS (1), /* float load */
109
  COSTS_N_INSNS (1), /* fmov, fneg, fabs */
110
  COSTS_N_INSNS (1), /* fadd, fsub */
111
  COSTS_N_INSNS (1), /* fcmp */
112
  COSTS_N_INSNS (1), /* fmov, fmovr */
113
  COSTS_N_INSNS (1), /* fmul */
114
  COSTS_N_INSNS (8), /* fdivs */
115
  COSTS_N_INSNS (12), /* fdivd */
116
  COSTS_N_INSNS (17), /* fsqrts */
117
  COSTS_N_INSNS (17), /* fsqrtd */
118
  COSTS_N_INSNS (17), /* imul */
119
  COSTS_N_INSNS (17), /* imulX */
120
  0, /* imul bit factor */
121
  COSTS_N_INSNS (17), /* idiv */
122
  COSTS_N_INSNS (17), /* idivX */
123
  COSTS_N_INSNS (1), /* movcc/movr */
124
  0, /* shift penalty */
125
};
126
 
127
static const
128
struct processor_costs sparclet_costs = {
129
  COSTS_N_INSNS (3), /* int load */
130
  COSTS_N_INSNS (3), /* int signed load */
131
  COSTS_N_INSNS (1), /* int zeroed load */
132
  COSTS_N_INSNS (1), /* float load */
133
  COSTS_N_INSNS (1), /* fmov, fneg, fabs */
134
  COSTS_N_INSNS (1), /* fadd, fsub */
135
  COSTS_N_INSNS (1), /* fcmp */
136
  COSTS_N_INSNS (1), /* fmov, fmovr */
137
  COSTS_N_INSNS (1), /* fmul */
138
  COSTS_N_INSNS (1), /* fdivs */
139
  COSTS_N_INSNS (1), /* fdivd */
140
  COSTS_N_INSNS (1), /* fsqrts */
141
  COSTS_N_INSNS (1), /* fsqrtd */
142
  COSTS_N_INSNS (5), /* imul */
143
  COSTS_N_INSNS (5), /* imulX */
144
  0, /* imul bit factor */
145
  COSTS_N_INSNS (5), /* idiv */
146
  COSTS_N_INSNS (5), /* idivX */
147
  COSTS_N_INSNS (1), /* movcc/movr */
148
  0, /* shift penalty */
149
};
150
 
151
static const
152
struct processor_costs ultrasparc_costs = {
153
  COSTS_N_INSNS (2), /* int load */
154
  COSTS_N_INSNS (3), /* int signed load */
155
  COSTS_N_INSNS (2), /* int zeroed load */
156
  COSTS_N_INSNS (2), /* float load */
157
  COSTS_N_INSNS (1), /* fmov, fneg, fabs */
158
  COSTS_N_INSNS (4), /* fadd, fsub */
159
  COSTS_N_INSNS (1), /* fcmp */
160
  COSTS_N_INSNS (2), /* fmov, fmovr */
161
  COSTS_N_INSNS (4), /* fmul */
162
  COSTS_N_INSNS (13), /* fdivs */
163
  COSTS_N_INSNS (23), /* fdivd */
164
  COSTS_N_INSNS (13), /* fsqrts */
165
  COSTS_N_INSNS (23), /* fsqrtd */
166
  COSTS_N_INSNS (4), /* imul */
167
  COSTS_N_INSNS (4), /* imulX */
168
  2, /* imul bit factor */
169
  COSTS_N_INSNS (37), /* idiv */
170
  COSTS_N_INSNS (68), /* idivX */
171
  COSTS_N_INSNS (2), /* movcc/movr */
172
  2, /* shift penalty */
173
};
174
 
175
static const
176
struct processor_costs ultrasparc3_costs = {
177
  COSTS_N_INSNS (2), /* int load */
178
  COSTS_N_INSNS (3), /* int signed load */
179
  COSTS_N_INSNS (3), /* int zeroed load */
180
  COSTS_N_INSNS (2), /* float load */
181
  COSTS_N_INSNS (3), /* fmov, fneg, fabs */
182
  COSTS_N_INSNS (4), /* fadd, fsub */
183
  COSTS_N_INSNS (5), /* fcmp */
184
  COSTS_N_INSNS (3), /* fmov, fmovr */
185
  COSTS_N_INSNS (4), /* fmul */
186
  COSTS_N_INSNS (17), /* fdivs */
187
  COSTS_N_INSNS (20), /* fdivd */
188
  COSTS_N_INSNS (20), /* fsqrts */
189
  COSTS_N_INSNS (29), /* fsqrtd */
190
  COSTS_N_INSNS (6), /* imul */
191
  COSTS_N_INSNS (6), /* imulX */
192
  0, /* imul bit factor */
193
  COSTS_N_INSNS (40), /* idiv */
194
  COSTS_N_INSNS (71), /* idivX */
195
  COSTS_N_INSNS (2), /* movcc/movr */
196
  0, /* shift penalty */
197
};
198
 
199
static const
200
struct processor_costs niagara_costs = {
201
  COSTS_N_INSNS (3), /* int load */
202
  COSTS_N_INSNS (3), /* int signed load */
203
  COSTS_N_INSNS (3), /* int zeroed load */
204
  COSTS_N_INSNS (9), /* float load */
205
  COSTS_N_INSNS (8), /* fmov, fneg, fabs */
206
  COSTS_N_INSNS (8), /* fadd, fsub */
207
  COSTS_N_INSNS (26), /* fcmp */
208
  COSTS_N_INSNS (8), /* fmov, fmovr */
209
  COSTS_N_INSNS (29), /* fmul */
210
  COSTS_N_INSNS (54), /* fdivs */
211
  COSTS_N_INSNS (83), /* fdivd */
212
  COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
213
  COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
214
  COSTS_N_INSNS (11), /* imul */
215
  COSTS_N_INSNS (11), /* imulX */
216
  0, /* imul bit factor */
217
  COSTS_N_INSNS (72), /* idiv */
218
  COSTS_N_INSNS (72), /* idivX */
219
  COSTS_N_INSNS (1), /* movcc/movr */
220
  0, /* shift penalty */
221
};
222
 
223
const struct processor_costs *sparc_costs = &cypress_costs;
224
 
225
#ifdef HAVE_AS_RELAX_OPTION
226
/* If 'as' and 'ld' are relaxing tail call insns into branch always, use
227
   "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
228
   With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
229
   somebody does not branch between the sethi and jmp.  */
230
#define LEAF_SIBCALL_SLOT_RESERVED_P 1
231
#else
232
#define LEAF_SIBCALL_SLOT_RESERVED_P \
233
  ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
234
#endif
235
 
236
/* Global variables for machine-dependent things.  */
237
 
238
/* Size of frame.  Need to know this to emit return insns from leaf procedures.
239
   ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
240
   reload pass.  This is important as the value is later used for scheduling
241
   (to see what can go in a delay slot).
242
   APPARENT_FSIZE is the size of the stack less the register save area and less
243
   the outgoing argument area.  It is used when saving call preserved regs.  */
244
static HOST_WIDE_INT apparent_fsize;
245
static HOST_WIDE_INT actual_fsize;
246
 
247
/* Number of live general or floating point registers needed to be
248
   saved (as 4-byte quantities).  */
249
static int num_gfregs;
250
 
251
/* The alias set for prologue/epilogue register save/restore.  */
252
static GTY(()) int sparc_sr_alias_set;
253
 
254
/* The alias set for the structure return value.  */
255
static GTY(()) int struct_value_alias_set;
256
 
257
/* Save the operands last given to a compare for use when we
258
   generate a scc or bcc insn.  */
259
rtx sparc_compare_op0, sparc_compare_op1, sparc_compare_emitted;
260
 
261
/* Vector to say how input registers are mapped to output registers.
262
   HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
263
   eliminate it.  You must use -fomit-frame-pointer to get that.  */
264
char leaf_reg_remap[] =
265
{ 0, 1, 2, 3, 4, 5, 6, 7,
266
  -1, -1, -1, -1, -1, -1, 14, -1,
267
  -1, -1, -1, -1, -1, -1, -1, -1,
268
  8, 9, 10, 11, 12, 13, -1, 15,
269
 
270
  32, 33, 34, 35, 36, 37, 38, 39,
271
  40, 41, 42, 43, 44, 45, 46, 47,
272
  48, 49, 50, 51, 52, 53, 54, 55,
273
  56, 57, 58, 59, 60, 61, 62, 63,
274
  64, 65, 66, 67, 68, 69, 70, 71,
275
  72, 73, 74, 75, 76, 77, 78, 79,
276
  80, 81, 82, 83, 84, 85, 86, 87,
277
  88, 89, 90, 91, 92, 93, 94, 95,
278
  96, 97, 98, 99, 100};
279
 
280
/* Vector, indexed by hard register number, which contains 1
281
   for a register that is allowable in a candidate for leaf
282
   function treatment.  */
283
char sparc_leaf_regs[] =
284
{ 1, 1, 1, 1, 1, 1, 1, 1,
285
  0, 0, 0, 0, 0, 0, 1, 0,
286
  0, 0, 0, 0, 0, 0, 0, 0,
287
  1, 1, 1, 1, 1, 1, 0, 1,
288
  1, 1, 1, 1, 1, 1, 1, 1,
289
  1, 1, 1, 1, 1, 1, 1, 1,
290
  1, 1, 1, 1, 1, 1, 1, 1,
291
  1, 1, 1, 1, 1, 1, 1, 1,
292
  1, 1, 1, 1, 1, 1, 1, 1,
293
  1, 1, 1, 1, 1, 1, 1, 1,
294
  1, 1, 1, 1, 1, 1, 1, 1,
295
  1, 1, 1, 1, 1, 1, 1, 1,
296
  1, 1, 1, 1, 1};
297
 
298
struct machine_function GTY(())
299
{
300
  /* Some local-dynamic TLS symbol name.  */
301
  const char *some_ld_name;
302
 
303
  /* True if the current function is leaf and uses only leaf regs,
304
     so that the SPARC leaf function optimization can be applied.
305
     Private version of current_function_uses_only_leaf_regs, see
306
     sparc_expand_prologue for the rationale.  */
307
  int leaf_function_p;
308
 
309
  /* True if the data calculated by sparc_expand_prologue are valid.  */
310
  bool prologue_data_valid_p;
311
};
312
 
313
#define sparc_leaf_function_p  cfun->machine->leaf_function_p
314
#define sparc_prologue_data_valid_p  cfun->machine->prologue_data_valid_p
315
 
316
/* Register we pretend to think the frame pointer is allocated to.
317
   Normally, this is %fp, but if we are in a leaf procedure, this
318
   is %sp+"something".  We record "something" separately as it may
319
   be too big for reg+constant addressing.  */
320
static rtx frame_base_reg;
321
static HOST_WIDE_INT frame_base_offset;
322
 
323
/* 1 if the next opcode is to be specially indented.  */
324
int sparc_indent_opcode = 0;
325
 
326
static bool sparc_handle_option (size_t, const char *, int);
327
static void sparc_init_modes (void);
328
static void scan_record_type (tree, int *, int *, int *);
329
static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
330
                                tree, int, int, int *, int *);
331
 
332
static int supersparc_adjust_cost (rtx, rtx, rtx, int);
333
static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
334
 
335
static void sparc_output_addr_vec (rtx);
336
static void sparc_output_addr_diff_vec (rtx);
337
static void sparc_output_deferred_case_vectors (void);
338
static rtx sparc_builtin_saveregs (void);
339
static int epilogue_renumber (rtx *, int);
340
static bool sparc_assemble_integer (rtx, unsigned int, int);
341
static int set_extends (rtx);
342
static void emit_pic_helper (void);
343
static void load_pic_register (bool);
344
static int save_or_restore_regs (int, int, rtx, int, int);
345
static void emit_save_or_restore_regs (int);
346
static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
347
static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
348
#ifdef OBJECT_FORMAT_ELF
349
static void sparc_elf_asm_named_section (const char *, unsigned int, tree);
350
#endif
351
 
352
static int sparc_adjust_cost (rtx, rtx, rtx, int);
353
static int sparc_issue_rate (void);
354
static void sparc_sched_init (FILE *, int, int);
355
static int sparc_use_sched_lookahead (void);
356
 
357
static void emit_soft_tfmode_libcall (const char *, int, rtx *);
358
static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
359
static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
360
static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
361
static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
362
 
363
static bool sparc_function_ok_for_sibcall (tree, tree);
364
static void sparc_init_libfuncs (void);
365
static void sparc_init_builtins (void);
366
static void sparc_vis_init_builtins (void);
367
static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
368
static tree sparc_fold_builtin (tree, tree, bool);
369
static int sparc_vis_mul8x16 (int, int);
370
static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
371
static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
372
                                   HOST_WIDE_INT, tree);
373
static bool sparc_can_output_mi_thunk (tree, HOST_WIDE_INT,
374
                                       HOST_WIDE_INT, tree);
375
static struct machine_function * sparc_init_machine_status (void);
376
static bool sparc_cannot_force_const_mem (rtx);
377
static rtx sparc_tls_get_addr (void);
378
static rtx sparc_tls_got (void);
379
static const char *get_some_local_dynamic_name (void);
380
static int get_some_local_dynamic_name_1 (rtx *, void *);
381
static bool sparc_rtx_costs (rtx, int, int, int *);
382
static bool sparc_promote_prototypes (tree);
383
static rtx sparc_struct_value_rtx (tree, int);
384
static bool sparc_return_in_memory (tree, tree);
385
static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
386
static tree sparc_gimplify_va_arg (tree, tree, tree *, tree *);
387
static bool sparc_vector_mode_supported_p (enum machine_mode);
388
static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
389
                                     enum machine_mode, tree, bool);
390
static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
391
                                    enum machine_mode, tree, bool);
392
static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
393
static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
394
static void sparc_file_end (void);
395
#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
396
static const char *sparc_mangle_fundamental_type (tree);
397
#endif
398
#ifdef SUBTARGET_ATTRIBUTE_TABLE
399
const struct attribute_spec sparc_attribute_table[];
400
#endif
401
 
402
/* Option handling.  */
403
 
404
/* Parsed value.  */
405
enum cmodel sparc_cmodel;
406
 
407
char sparc_hard_reg_printed[8];
408
 
409
struct sparc_cpu_select sparc_select[] =
410
{
411
  /* switch     name,           tune    arch */
412
  { (char *)0,   "default",      1,      1 },
413
  { (char *)0,   "-mcpu=",       1,      1 },
414
  { (char *)0,   "-mtune=",      1,      0 },
415
  { 0, 0, 0, 0 }
416
};
417
 
418
/* CPU type.  This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx.  */
419
enum processor_type sparc_cpu;
420
 
421
/* Whetheran FPU option was specified.  */
422
static bool fpu_option_set = false;
423
 
424
/* Initialize the GCC target structure.  */
425
 
426
/* The sparc default is to use .half rather than .short for aligned
427
   HI objects.  Use .word instead of .long on non-ELF systems.  */
428
#undef TARGET_ASM_ALIGNED_HI_OP
429
#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
430
#ifndef OBJECT_FORMAT_ELF
431
#undef TARGET_ASM_ALIGNED_SI_OP
432
#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
433
#endif
434
 
435
#undef TARGET_ASM_UNALIGNED_HI_OP
436
#define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
437
#undef TARGET_ASM_UNALIGNED_SI_OP
438
#define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
439
#undef TARGET_ASM_UNALIGNED_DI_OP
440
#define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
441
 
442
/* The target hook has to handle DI-mode values.  */
443
#undef TARGET_ASM_INTEGER
444
#define TARGET_ASM_INTEGER sparc_assemble_integer
445
 
446
#undef TARGET_ASM_FUNCTION_PROLOGUE
447
#define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
448
#undef TARGET_ASM_FUNCTION_EPILOGUE
449
#define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
450
 
451
#undef TARGET_SCHED_ADJUST_COST
452
#define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
453
#undef TARGET_SCHED_ISSUE_RATE
454
#define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
455
#undef TARGET_SCHED_INIT
456
#define TARGET_SCHED_INIT sparc_sched_init
457
#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
458
#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
459
 
460
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
461
#define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
462
 
463
#undef TARGET_INIT_LIBFUNCS
464
#define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
465
#undef TARGET_INIT_BUILTINS
466
#define TARGET_INIT_BUILTINS sparc_init_builtins
467
 
468
#undef TARGET_EXPAND_BUILTIN
469
#define TARGET_EXPAND_BUILTIN sparc_expand_builtin
470
#undef TARGET_FOLD_BUILTIN
471
#define TARGET_FOLD_BUILTIN sparc_fold_builtin
472
 
473
#if TARGET_TLS
474
#undef TARGET_HAVE_TLS
475
#define TARGET_HAVE_TLS true
476
#endif
477
 
478
#undef TARGET_CANNOT_FORCE_CONST_MEM
479
#define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
480
 
481
#undef TARGET_ASM_OUTPUT_MI_THUNK
482
#define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
483
#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
484
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
485
 
486
#undef TARGET_RTX_COSTS
487
#define TARGET_RTX_COSTS sparc_rtx_costs
488
#undef TARGET_ADDRESS_COST
489
#define TARGET_ADDRESS_COST hook_int_rtx_0
490
 
491
/* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
492
   no-op for TARGET_ARCH32 this is ok.  Otherwise we'd need to add a runtime
493
   test for this value.  */
494
#undef TARGET_PROMOTE_FUNCTION_ARGS
495
#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
496
 
497
/* This is only needed for TARGET_ARCH64, but since PROMOTE_FUNCTION_MODE is a
498
   no-op for TARGET_ARCH32 this is ok.  Otherwise we'd need to add a runtime
499
   test for this value.  */
500
#undef TARGET_PROMOTE_FUNCTION_RETURN
501
#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
502
 
503
#undef TARGET_PROMOTE_PROTOTYPES
504
#define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
505
 
506
#undef TARGET_STRUCT_VALUE_RTX
507
#define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
508
#undef TARGET_RETURN_IN_MEMORY
509
#define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
510
#undef TARGET_MUST_PASS_IN_STACK
511
#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
512
#undef TARGET_PASS_BY_REFERENCE
513
#define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
514
#undef TARGET_ARG_PARTIAL_BYTES
515
#define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
516
 
517
#undef TARGET_EXPAND_BUILTIN_SAVEREGS
518
#define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
519
#undef TARGET_STRICT_ARGUMENT_NAMING
520
#define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
521
 
522
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
523
#define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
524
 
525
#undef TARGET_VECTOR_MODE_SUPPORTED_P
526
#define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
527
 
528
#undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
529
#define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
530
 
531
#ifdef SUBTARGET_INSERT_ATTRIBUTES
532
#undef TARGET_INSERT_ATTRIBUTES
533
#define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
534
#endif
535
 
536
#ifdef SUBTARGET_ATTRIBUTE_TABLE
537
#undef TARGET_ATTRIBUTE_TABLE
538
#define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
539
#endif
540
 
541
#undef TARGET_RELAXED_ORDERING
542
#define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
543
 
544
#undef TARGET_DEFAULT_TARGET_FLAGS
545
#define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
546
#undef TARGET_HANDLE_OPTION
547
#define TARGET_HANDLE_OPTION sparc_handle_option
548
 
549
#if TARGET_GNU_TLS
550
#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
551
#define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
552
#endif
553
 
554
#undef TARGET_ASM_FILE_END
555
#define TARGET_ASM_FILE_END sparc_file_end
556
 
557
#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
558
#undef TARGET_MANGLE_FUNDAMENTAL_TYPE
559
#define TARGET_MANGLE_FUNDAMENTAL_TYPE sparc_mangle_fundamental_type
560
#endif
561
 
562
struct gcc_target targetm = TARGET_INITIALIZER;
563
 
564
/* Implement TARGET_HANDLE_OPTION.  */
565
 
566
static bool
567
sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
568
{
569
  switch (code)
570
    {
571
    case OPT_mfpu:
572
    case OPT_mhard_float:
573
    case OPT_msoft_float:
574
      fpu_option_set = true;
575
      break;
576
 
577
    case OPT_mcpu_:
578
      sparc_select[1].string = arg;
579
      break;
580
 
581
    case OPT_mtune_:
582
      sparc_select[2].string = arg;
583
      break;
584
    }
585
 
586
  return true;
587
}
588
 
589
/* Validate and override various options, and do some machine dependent
590
   initialization.  */
591
 
592
void
593
sparc_override_options (void)
594
{
595
  static struct code_model {
596
    const char *const name;
597
    const int value;
598
  } const cmodels[] = {
599
    { "32", CM_32 },
600
    { "medlow", CM_MEDLOW },
601
    { "medmid", CM_MEDMID },
602
    { "medany", CM_MEDANY },
603
    { "embmedany", CM_EMBMEDANY },
604
    { 0, 0 }
605
  };
606
  const struct code_model *cmodel;
607
  /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=.  */
608
  static struct cpu_default {
609
    const int cpu;
610
    const char *const name;
611
  } const cpu_default[] = {
612
    /* There must be one entry here for each TARGET_CPU value.  */
613
    { TARGET_CPU_sparc, "cypress" },
614
    { TARGET_CPU_sparclet, "tsc701" },
615
    { TARGET_CPU_sparclite, "f930" },
616
    { TARGET_CPU_v8, "v8" },
617
    { TARGET_CPU_hypersparc, "hypersparc" },
618
    { TARGET_CPU_sparclite86x, "sparclite86x" },
619
    { TARGET_CPU_supersparc, "supersparc" },
620
    { TARGET_CPU_v9, "v9" },
621
    { TARGET_CPU_ultrasparc, "ultrasparc" },
622
    { TARGET_CPU_ultrasparc3, "ultrasparc3" },
623
    { TARGET_CPU_niagara, "niagara" },
624
    { 0, 0 }
625
  };
626
  const struct cpu_default *def;
627
  /* Table of values for -m{cpu,tune}=.  */
628
  static struct cpu_table {
629
    const char *const name;
630
    const enum processor_type processor;
631
    const int disable;
632
    const int enable;
633
  } const cpu_table[] = {
634
    { "v7",         PROCESSOR_V7, MASK_ISA, 0 },
635
    { "cypress",    PROCESSOR_CYPRESS, MASK_ISA, 0 },
636
    { "v8",         PROCESSOR_V8, MASK_ISA, MASK_V8 },
637
    /* TI TMS390Z55 supersparc */
638
    { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
639
    { "sparclite",  PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
640
    /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
641
       The Fujitsu MB86934 is the recent sparclite chip, with an fpu.  */
642
    { "f930",       PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
643
    { "f934",       PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
644
    { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
645
    { "sparclite86x",  PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
646
      MASK_SPARCLITE },
647
    { "sparclet",   PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
648
    /* TEMIC sparclet */
649
    { "tsc701",     PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
650
    { "v9",         PROCESSOR_V9, MASK_ISA, MASK_V9 },
651
    /* TI ultrasparc I, II, IIi */
652
    { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
653
    /* Although insns using %y are deprecated, it is a clear win on current
654
       ultrasparcs.  */
655
                                                    |MASK_DEPRECATED_V8_INSNS},
656
    /* TI ultrasparc III */
657
    /* ??? Check if %y issue still holds true in ultra3.  */
658
    { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
659
    /* UltraSPARC T1 */
660
    { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
661
    { 0, 0, 0, 0 }
662
  };
663
  const struct cpu_table *cpu;
664
  const struct sparc_cpu_select *sel;
665
  int fpu;
666
 
667
#ifndef SPARC_BI_ARCH
668
  /* Check for unsupported architecture size.  */
669
  if (! TARGET_64BIT != DEFAULT_ARCH32_P)
670
    error ("%s is not supported by this configuration",
671
           DEFAULT_ARCH32_P ? "-m64" : "-m32");
672
#endif
673
 
674
  /* We force all 64bit archs to use 128 bit long double */
675
  if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
676
    {
677
      error ("-mlong-double-64 not allowed with -m64");
678
      target_flags |= MASK_LONG_DOUBLE_128;
679
    }
680
 
681
  /* Code model selection.  */
682
  sparc_cmodel = SPARC_DEFAULT_CMODEL;
683
 
684
#ifdef SPARC_BI_ARCH
685
  if (TARGET_ARCH32)
686
    sparc_cmodel = CM_32;
687
#endif
688
 
689
  if (sparc_cmodel_string != NULL)
690
    {
691
      if (TARGET_ARCH64)
692
        {
693
          for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
694
            if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
695
              break;
696
          if (cmodel->name == NULL)
697
            error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
698
          else
699
            sparc_cmodel = cmodel->value;
700
        }
701
      else
702
        error ("-mcmodel= is not supported on 32 bit systems");
703
    }
704
 
705
  fpu = target_flags & MASK_FPU; /* save current -mfpu status */
706
 
707
  /* Set the default CPU.  */
708
  for (def = &cpu_default[0]; def->name; ++def)
709
    if (def->cpu == TARGET_CPU_DEFAULT)
710
      break;
711
  gcc_assert (def->name);
712
  sparc_select[0].string = def->name;
713
 
714
  for (sel = &sparc_select[0]; sel->name; ++sel)
715
    {
716
      if (sel->string)
717
        {
718
          for (cpu = &cpu_table[0]; cpu->name; ++cpu)
719
            if (! strcmp (sel->string, cpu->name))
720
              {
721
                if (sel->set_tune_p)
722
                  sparc_cpu = cpu->processor;
723
 
724
                if (sel->set_arch_p)
725
                  {
726
                    target_flags &= ~cpu->disable;
727
                    target_flags |= cpu->enable;
728
                  }
729
                break;
730
              }
731
 
732
          if (! cpu->name)
733
            error ("bad value (%s) for %s switch", sel->string, sel->name);
734
        }
735
    }
736
 
737
  /* If -mfpu or -mno-fpu was explicitly used, don't override with
738
     the processor default.  */
739
  if (fpu_option_set)
740
    target_flags = (target_flags & ~MASK_FPU) | fpu;
741
 
742
  /* Don't allow -mvis if FPU is disabled.  */
743
  if (! TARGET_FPU)
744
    target_flags &= ~MASK_VIS;
745
 
746
  /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
747
     are available.
748
     -m64 also implies v9.  */
749
  if (TARGET_VIS || TARGET_ARCH64)
750
    {
751
      target_flags |= MASK_V9;
752
      target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
753
    }
754
 
755
  /* Use the deprecated v8 insns for sparc64 in 32 bit mode.  */
756
  if (TARGET_V9 && TARGET_ARCH32)
757
    target_flags |= MASK_DEPRECATED_V8_INSNS;
758
 
759
  /* V8PLUS requires V9, makes no sense in 64 bit mode.  */
760
  if (! TARGET_V9 || TARGET_ARCH64)
761
    target_flags &= ~MASK_V8PLUS;
762
 
763
  /* Don't use stack biasing in 32 bit mode.  */
764
  if (TARGET_ARCH32)
765
    target_flags &= ~MASK_STACK_BIAS;
766
 
767
  /* Supply a default value for align_functions.  */
768
  if (align_functions == 0
769
      && (sparc_cpu == PROCESSOR_ULTRASPARC
770
          || sparc_cpu == PROCESSOR_ULTRASPARC3
771
          || sparc_cpu == PROCESSOR_NIAGARA))
772
    align_functions = 32;
773
 
774
  /* Validate PCC_STRUCT_RETURN.  */
775
  if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
776
    flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
777
 
778
  /* Only use .uaxword when compiling for a 64-bit target.  */
779
  if (!TARGET_ARCH64)
780
    targetm.asm_out.unaligned_op.di = NULL;
781
 
782
  /* Do various machine dependent initializations.  */
783
  sparc_init_modes ();
784
 
785
  /* Acquire unique alias sets for our private stuff.  */
786
  sparc_sr_alias_set = new_alias_set ();
787
  struct_value_alias_set = new_alias_set ();
788
 
789
  /* Set up function hooks.  */
790
  init_machine_status = sparc_init_machine_status;
791
 
792
  switch (sparc_cpu)
793
    {
794
    case PROCESSOR_V7:
795
    case PROCESSOR_CYPRESS:
796
      sparc_costs = &cypress_costs;
797
      break;
798
    case PROCESSOR_V8:
799
    case PROCESSOR_SPARCLITE:
800
    case PROCESSOR_SUPERSPARC:
801
      sparc_costs = &supersparc_costs;
802
      break;
803
    case PROCESSOR_F930:
804
    case PROCESSOR_F934:
805
    case PROCESSOR_HYPERSPARC:
806
    case PROCESSOR_SPARCLITE86X:
807
      sparc_costs = &hypersparc_costs;
808
      break;
809
    case PROCESSOR_SPARCLET:
810
    case PROCESSOR_TSC701:
811
      sparc_costs = &sparclet_costs;
812
      break;
813
    case PROCESSOR_V9:
814
    case PROCESSOR_ULTRASPARC:
815
      sparc_costs = &ultrasparc_costs;
816
      break;
817
    case PROCESSOR_ULTRASPARC3:
818
      sparc_costs = &ultrasparc3_costs;
819
      break;
820
    case PROCESSOR_NIAGARA:
821
      sparc_costs = &niagara_costs;
822
      break;
823
    };
824
 
825
#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
826
  if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
827
    target_flags |= MASK_LONG_DOUBLE_128;
828
#endif
829
}
830
 
831
#ifdef SUBTARGET_ATTRIBUTE_TABLE
832
/* Table of valid machine attributes.  */
833
const struct attribute_spec sparc_attribute_table[] =
834
{
835
  /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
836
  SUBTARGET_ATTRIBUTE_TABLE,
837
  { NULL,        0, 0, false, false, false, NULL }
838
};
839
#endif
840
 
841
/* Miscellaneous utilities.  */
842
 
843
/* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
844
   or branch on register contents instructions.  */
845
 
846
int
847
v9_regcmp_p (enum rtx_code code)
848
{
849
  return (code == EQ || code == NE || code == GE || code == LT
850
          || code == LE || code == GT);
851
}
852
 
853
/* Nonzero if OP is a floating point constant which can
854
   be loaded into an integer register using a single
855
   sethi instruction.  */
856
 
857
int
858
fp_sethi_p (rtx op)
859
{
860
  if (GET_CODE (op) == CONST_DOUBLE)
861
    {
862
      REAL_VALUE_TYPE r;
863
      long i;
864
 
865
      REAL_VALUE_FROM_CONST_DOUBLE (r, op);
866
      REAL_VALUE_TO_TARGET_SINGLE (r, i);
867
      return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
868
    }
869
 
870
  return 0;
871
}
872
 
873
/* Nonzero if OP is a floating point constant which can
874
   be loaded into an integer register using a single
875
   mov instruction.  */
876
 
877
int
878
fp_mov_p (rtx op)
879
{
880
  if (GET_CODE (op) == CONST_DOUBLE)
881
    {
882
      REAL_VALUE_TYPE r;
883
      long i;
884
 
885
      REAL_VALUE_FROM_CONST_DOUBLE (r, op);
886
      REAL_VALUE_TO_TARGET_SINGLE (r, i);
887
      return SPARC_SIMM13_P (i);
888
    }
889
 
890
  return 0;
891
}
892
 
893
/* Nonzero if OP is a floating point constant which can
894
   be loaded into an integer register using a high/losum
895
   instruction sequence.  */
896
 
897
int
898
fp_high_losum_p (rtx op)
899
{
900
  /* The constraints calling this should only be in
901
     SFmode move insns, so any constant which cannot
902
     be moved using a single insn will do.  */
903
  if (GET_CODE (op) == CONST_DOUBLE)
904
    {
905
      REAL_VALUE_TYPE r;
906
      long i;
907
 
908
      REAL_VALUE_FROM_CONST_DOUBLE (r, op);
909
      REAL_VALUE_TO_TARGET_SINGLE (r, i);
910
      return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
911
    }
912
 
913
  return 0;
914
}
915
 
916
/* Expand a move instruction.  Return true if all work is done.  */
917
 
918
bool
919
sparc_expand_move (enum machine_mode mode, rtx *operands)
920
{
921
  /* Handle sets of MEM first.  */
922
  if (GET_CODE (operands[0]) == MEM)
923
    {
924
      /* 0 is a register (or a pair of registers) on SPARC.  */
925
      if (register_or_zero_operand (operands[1], mode))
926
        return false;
927
 
928
      if (!reload_in_progress)
929
        {
930
          operands[0] = validize_mem (operands[0]);
931
          operands[1] = force_reg (mode, operands[1]);
932
        }
933
    }
934
 
935
  /* Fixup TLS cases.  */
936
  if (TARGET_HAVE_TLS
937
      && CONSTANT_P (operands[1])
938
      && GET_CODE (operands[1]) != HIGH
939
      && sparc_tls_referenced_p (operands [1]))
940
    {
941
      rtx sym = operands[1];
942
      rtx addend = NULL;
943
 
944
      if (GET_CODE (sym) == CONST && GET_CODE (XEXP (sym, 0)) == PLUS)
945
        {
946
          addend = XEXP (XEXP (sym, 0), 1);
947
          sym = XEXP (XEXP (sym, 0), 0);
948
        }
949
 
950
      gcc_assert (SPARC_SYMBOL_REF_TLS_P (sym));
951
 
952
      sym = legitimize_tls_address (sym);
953
      if (addend)
954
        {
955
          sym = gen_rtx_PLUS (mode, sym, addend);
956
          sym = force_operand (sym, operands[0]);
957
        }
958
      operands[1] = sym;
959
    }
960
 
961
  /* Fixup PIC cases.  */
962
  if (flag_pic && CONSTANT_P (operands[1]))
963
    {
964
      if (pic_address_needs_scratch (operands[1]))
965
        operands[1] = legitimize_pic_address (operands[1], mode, 0);
966
 
967
      if (GET_CODE (operands[1]) == LABEL_REF && mode == SImode)
968
        {
969
          emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
970
          return true;
971
        }
972
 
973
      if (GET_CODE (operands[1]) == LABEL_REF && mode == DImode)
974
        {
975
          gcc_assert (TARGET_ARCH64);
976
          emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
977
          return true;
978
        }
979
 
980
      if (symbolic_operand (operands[1], mode))
981
        {
982
          operands[1] = legitimize_pic_address (operands[1],
983
                                                mode,
984
                                                (reload_in_progress ?
985
                                                 operands[0] :
986
                                                 NULL_RTX));
987
          return false;
988
        }
989
    }
990
 
991
  /* If we are trying to toss an integer constant into FP registers,
992
     or loading a FP or vector constant, force it into memory.  */
993
  if (CONSTANT_P (operands[1])
994
      && REG_P (operands[0])
995
      && (SPARC_FP_REG_P (REGNO (operands[0]))
996
          || SCALAR_FLOAT_MODE_P (mode)
997
          || VECTOR_MODE_P (mode)))
998
    {
999
      /* emit_group_store will send such bogosity to us when it is
1000
         not storing directly into memory.  So fix this up to avoid
1001
         crashes in output_constant_pool.  */
1002
      if (operands [1] == const0_rtx)
1003
        operands[1] = CONST0_RTX (mode);
1004
 
1005
      /* We can clear FP registers if TARGET_VIS, and always other regs.  */
1006
      if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1007
          && const_zero_operand (operands[1], mode))
1008
        return false;
1009
 
1010
      if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1011
          /* We are able to build any SF constant in integer registers
1012
             with at most 2 instructions.  */
1013
          && (mode == SFmode
1014
              /* And any DF constant in integer registers.  */
1015
              || (mode == DFmode
1016
                  && (reload_completed || reload_in_progress))))
1017
        return false;
1018
 
1019
      operands[1] = force_const_mem (mode, operands[1]);
1020
      if (!reload_in_progress)
1021
        operands[1] = validize_mem (operands[1]);
1022
      return false;
1023
    }
1024
 
1025
  /* Accept non-constants and valid constants unmodified.  */
1026
  if (!CONSTANT_P (operands[1])
1027
      || GET_CODE (operands[1]) == HIGH
1028
      || input_operand (operands[1], mode))
1029
    return false;
1030
 
1031
  switch (mode)
1032
    {
1033
    case QImode:
1034
      /* All QImode constants require only one insn, so proceed.  */
1035
      break;
1036
 
1037
    case HImode:
1038
    case SImode:
1039
      sparc_emit_set_const32 (operands[0], operands[1]);
1040
      return true;
1041
 
1042
    case DImode:
1043
      /* input_operand should have filtered out 32-bit mode.  */
1044
      sparc_emit_set_const64 (operands[0], operands[1]);
1045
      return true;
1046
 
1047
    default:
1048
      gcc_unreachable ();
1049
    }
1050
 
1051
  return false;
1052
}
1053
 
1054
/* Load OP1, a 32-bit constant, into OP0, a register.
1055
   We know it can't be done in one insn when we get
1056
   here, the move expander guarantees this.  */
1057
 
1058
void
1059
sparc_emit_set_const32 (rtx op0, rtx op1)
1060
{
1061
  enum machine_mode mode = GET_MODE (op0);
1062
  rtx temp;
1063
 
1064
  if (reload_in_progress || reload_completed)
1065
    temp = op0;
1066
  else
1067
    temp = gen_reg_rtx (mode);
1068
 
1069
  if (GET_CODE (op1) == CONST_INT)
1070
    {
1071
      gcc_assert (!small_int_operand (op1, mode)
1072
                  && !const_high_operand (op1, mode));
1073
 
1074
      /* Emit them as real moves instead of a HIGH/LO_SUM,
1075
         this way CSE can see everything and reuse intermediate
1076
         values if it wants.  */
1077
      emit_insn (gen_rtx_SET (VOIDmode, temp,
1078
                              GEN_INT (INTVAL (op1)
1079
                                & ~(HOST_WIDE_INT)0x3ff)));
1080
 
1081
      emit_insn (gen_rtx_SET (VOIDmode,
1082
                              op0,
1083
                              gen_rtx_IOR (mode, temp,
1084
                                           GEN_INT (INTVAL (op1) & 0x3ff))));
1085
    }
1086
  else
1087
    {
1088
      /* A symbol, emit in the traditional way.  */
1089
      emit_insn (gen_rtx_SET (VOIDmode, temp,
1090
                              gen_rtx_HIGH (mode, op1)));
1091
      emit_insn (gen_rtx_SET (VOIDmode,
1092
                              op0, gen_rtx_LO_SUM (mode, temp, op1)));
1093
    }
1094
}
1095
 
1096
/* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1097
   If TEMP is nonzero, we are forbidden to use any other scratch
1098
   registers.  Otherwise, we are allowed to generate them as needed.
1099
 
1100
   Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1101
   or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns).  */
1102
 
1103
void
1104
sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1105
{
1106
  rtx temp1, temp2, temp3, temp4, temp5;
1107
  rtx ti_temp = 0;
1108
 
1109
  if (temp && GET_MODE (temp) == TImode)
1110
    {
1111
      ti_temp = temp;
1112
      temp = gen_rtx_REG (DImode, REGNO (temp));
1113
    }
1114
 
1115
  /* SPARC-V9 code-model support.  */
1116
  switch (sparc_cmodel)
1117
    {
1118
    case CM_MEDLOW:
1119
      /* The range spanned by all instructions in the object is less
1120
         than 2^31 bytes (2GB) and the distance from any instruction
1121
         to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1122
         than 2^31 bytes (2GB).
1123
 
1124
         The executable must be in the low 4TB of the virtual address
1125
         space.
1126
 
1127
         sethi  %hi(symbol), %temp1
1128
         or     %temp1, %lo(symbol), %reg  */
1129
      if (temp)
1130
        temp1 = temp;  /* op0 is allowed.  */
1131
      else
1132
        temp1 = gen_reg_rtx (DImode);
1133
 
1134
      emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1135
      emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1136
      break;
1137
 
1138
    case CM_MEDMID:
1139
      /* The range spanned by all instructions in the object is less
1140
         than 2^31 bytes (2GB) and the distance from any instruction
1141
         to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1142
         than 2^31 bytes (2GB).
1143
 
1144
         The executable must be in the low 16TB of the virtual address
1145
         space.
1146
 
1147
         sethi  %h44(symbol), %temp1
1148
         or     %temp1, %m44(symbol), %temp2
1149
         sllx   %temp2, 12, %temp3
1150
         or     %temp3, %l44(symbol), %reg  */
1151
      if (temp)
1152
        {
1153
          temp1 = op0;
1154
          temp2 = op0;
1155
          temp3 = temp;  /* op0 is allowed.  */
1156
        }
1157
      else
1158
        {
1159
          temp1 = gen_reg_rtx (DImode);
1160
          temp2 = gen_reg_rtx (DImode);
1161
          temp3 = gen_reg_rtx (DImode);
1162
        }
1163
 
1164
      emit_insn (gen_seth44 (temp1, op1));
1165
      emit_insn (gen_setm44 (temp2, temp1, op1));
1166
      emit_insn (gen_rtx_SET (VOIDmode, temp3,
1167
                              gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1168
      emit_insn (gen_setl44 (op0, temp3, op1));
1169
      break;
1170
 
1171
    case CM_MEDANY:
1172
      /* The range spanned by all instructions in the object is less
1173
         than 2^31 bytes (2GB) and the distance from any instruction
1174
         to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1175
         than 2^31 bytes (2GB).
1176
 
1177
         The executable can be placed anywhere in the virtual address
1178
         space.
1179
 
1180
         sethi  %hh(symbol), %temp1
1181
         sethi  %lm(symbol), %temp2
1182
         or     %temp1, %hm(symbol), %temp3
1183
         sllx   %temp3, 32, %temp4
1184
         or     %temp4, %temp2, %temp5
1185
         or     %temp5, %lo(symbol), %reg  */
1186
      if (temp)
1187
        {
1188
          /* It is possible that one of the registers we got for operands[2]
1189
             might coincide with that of operands[0] (which is why we made
1190
             it TImode).  Pick the other one to use as our scratch.  */
1191
          if (rtx_equal_p (temp, op0))
1192
            {
1193
              gcc_assert (ti_temp);
1194
              temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1195
            }
1196
          temp1 = op0;
1197
          temp2 = temp;  /* op0 is _not_ allowed, see above.  */
1198
          temp3 = op0;
1199
          temp4 = op0;
1200
          temp5 = op0;
1201
        }
1202
      else
1203
        {
1204
          temp1 = gen_reg_rtx (DImode);
1205
          temp2 = gen_reg_rtx (DImode);
1206
          temp3 = gen_reg_rtx (DImode);
1207
          temp4 = gen_reg_rtx (DImode);
1208
          temp5 = gen_reg_rtx (DImode);
1209
        }
1210
 
1211
      emit_insn (gen_sethh (temp1, op1));
1212
      emit_insn (gen_setlm (temp2, op1));
1213
      emit_insn (gen_sethm (temp3, temp1, op1));
1214
      emit_insn (gen_rtx_SET (VOIDmode, temp4,
1215
                              gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1216
      emit_insn (gen_rtx_SET (VOIDmode, temp5,
1217
                              gen_rtx_PLUS (DImode, temp4, temp2)));
1218
      emit_insn (gen_setlo (op0, temp5, op1));
1219
      break;
1220
 
1221
    case CM_EMBMEDANY:
1222
      /* Old old old backwards compatibility kruft here.
1223
         Essentially it is MEDLOW with a fixed 64-bit
1224
         virtual base added to all data segment addresses.
1225
         Text-segment stuff is computed like MEDANY, we can't
1226
         reuse the code above because the relocation knobs
1227
         look different.
1228
 
1229
         Data segment:  sethi   %hi(symbol), %temp1
1230
                        add     %temp1, EMBMEDANY_BASE_REG, %temp2
1231
                        or      %temp2, %lo(symbol), %reg  */
1232
      if (data_segment_operand (op1, GET_MODE (op1)))
1233
        {
1234
          if (temp)
1235
            {
1236
              temp1 = temp;  /* op0 is allowed.  */
1237
              temp2 = op0;
1238
            }
1239
          else
1240
            {
1241
              temp1 = gen_reg_rtx (DImode);
1242
              temp2 = gen_reg_rtx (DImode);
1243
            }
1244
 
1245
          emit_insn (gen_embmedany_sethi (temp1, op1));
1246
          emit_insn (gen_embmedany_brsum (temp2, temp1));
1247
          emit_insn (gen_embmedany_losum (op0, temp2, op1));
1248
        }
1249
 
1250
      /* Text segment:  sethi   %uhi(symbol), %temp1
1251
                        sethi   %hi(symbol), %temp2
1252
                        or      %temp1, %ulo(symbol), %temp3
1253
                        sllx    %temp3, 32, %temp4
1254
                        or      %temp4, %temp2, %temp5
1255
                        or      %temp5, %lo(symbol), %reg  */
1256
      else
1257
        {
1258
          if (temp)
1259
            {
1260
              /* It is possible that one of the registers we got for operands[2]
1261
                 might coincide with that of operands[0] (which is why we made
1262
                 it TImode).  Pick the other one to use as our scratch.  */
1263
              if (rtx_equal_p (temp, op0))
1264
                {
1265
                  gcc_assert (ti_temp);
1266
                  temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1267
                }
1268
              temp1 = op0;
1269
              temp2 = temp;  /* op0 is _not_ allowed, see above.  */
1270
              temp3 = op0;
1271
              temp4 = op0;
1272
              temp5 = op0;
1273
            }
1274
          else
1275
            {
1276
              temp1 = gen_reg_rtx (DImode);
1277
              temp2 = gen_reg_rtx (DImode);
1278
              temp3 = gen_reg_rtx (DImode);
1279
              temp4 = gen_reg_rtx (DImode);
1280
              temp5 = gen_reg_rtx (DImode);
1281
            }
1282
 
1283
          emit_insn (gen_embmedany_textuhi (temp1, op1));
1284
          emit_insn (gen_embmedany_texthi  (temp2, op1));
1285
          emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1286
          emit_insn (gen_rtx_SET (VOIDmode, temp4,
1287
                                  gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1288
          emit_insn (gen_rtx_SET (VOIDmode, temp5,
1289
                                  gen_rtx_PLUS (DImode, temp4, temp2)));
1290
          emit_insn (gen_embmedany_textlo  (op0, temp5, op1));
1291
        }
1292
      break;
1293
 
1294
    default:
1295
      gcc_unreachable ();
1296
    }
1297
}
1298
 
1299
#if HOST_BITS_PER_WIDE_INT == 32
1300
void
1301
sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1302
{
1303
  gcc_unreachable ();
1304
}
1305
#else
1306
/* These avoid problems when cross compiling.  If we do not
1307
   go through all this hair then the optimizer will see
1308
   invalid REG_EQUAL notes or in some cases none at all.  */
1309
static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1310
static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1311
static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1312
static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1313
 
1314
/* The optimizer is not to assume anything about exactly
1315
   which bits are set for a HIGH, they are unspecified.
1316
   Unfortunately this leads to many missed optimizations
1317
   during CSE.  We mask out the non-HIGH bits, and matches
1318
   a plain movdi, to alleviate this problem.  */
1319
static rtx
1320
gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1321
{
1322
  return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1323
}
1324
 
1325
static rtx
1326
gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1327
{
1328
  return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1329
}
1330
 
1331
static rtx
1332
gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1333
{
1334
  return gen_rtx_IOR (DImode, src, GEN_INT (val));
1335
}
1336
 
1337
static rtx
1338
gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1339
{
1340
  return gen_rtx_XOR (DImode, src, GEN_INT (val));
1341
}
1342
 
1343
/* Worker routines for 64-bit constant formation on arch64.
1344
   One of the key things to be doing in these emissions is
1345
   to create as many temp REGs as possible.  This makes it
1346
   possible for half-built constants to be used later when
1347
   such values are similar to something required later on.
1348
   Without doing this, the optimizer cannot see such
1349
   opportunities.  */
1350
 
1351
static void sparc_emit_set_const64_quick1 (rtx, rtx,
1352
                                           unsigned HOST_WIDE_INT, int);
1353
 
1354
static void
1355
sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1356
                               unsigned HOST_WIDE_INT low_bits, int is_neg)
1357
{
1358
  unsigned HOST_WIDE_INT high_bits;
1359
 
1360
  if (is_neg)
1361
    high_bits = (~low_bits) & 0xffffffff;
1362
  else
1363
    high_bits = low_bits;
1364
 
1365
  emit_insn (gen_safe_HIGH64 (temp, high_bits));
1366
  if (!is_neg)
1367
    {
1368
      emit_insn (gen_rtx_SET (VOIDmode, op0,
1369
                              gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1370
    }
1371
  else
1372
    {
1373
      /* If we are XOR'ing with -1, then we should emit a one's complement
1374
         instead.  This way the combiner will notice logical operations
1375
         such as ANDN later on and substitute.  */
1376
      if ((low_bits & 0x3ff) == 0x3ff)
1377
        {
1378
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1379
                                  gen_rtx_NOT (DImode, temp)));
1380
        }
1381
      else
1382
        {
1383
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1384
                                  gen_safe_XOR64 (temp,
1385
                                                  (-(HOST_WIDE_INT)0x400
1386
                                                   | (low_bits & 0x3ff)))));
1387
        }
1388
    }
1389
}
1390
 
1391
static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1392
                                           unsigned HOST_WIDE_INT, int);
1393
 
1394
static void
1395
sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1396
                               unsigned HOST_WIDE_INT high_bits,
1397
                               unsigned HOST_WIDE_INT low_immediate,
1398
                               int shift_count)
1399
{
1400
  rtx temp2 = op0;
1401
 
1402
  if ((high_bits & 0xfffffc00) != 0)
1403
    {
1404
      emit_insn (gen_safe_HIGH64 (temp, high_bits));
1405
      if ((high_bits & ~0xfffffc00) != 0)
1406
        emit_insn (gen_rtx_SET (VOIDmode, op0,
1407
                                gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1408
      else
1409
        temp2 = temp;
1410
    }
1411
  else
1412
    {
1413
      emit_insn (gen_safe_SET64 (temp, high_bits));
1414
      temp2 = temp;
1415
    }
1416
 
1417
  /* Now shift it up into place.  */
1418
  emit_insn (gen_rtx_SET (VOIDmode, op0,
1419
                          gen_rtx_ASHIFT (DImode, temp2,
1420
                                          GEN_INT (shift_count))));
1421
 
1422
  /* If there is a low immediate part piece, finish up by
1423
     putting that in as well.  */
1424
  if (low_immediate != 0)
1425
    emit_insn (gen_rtx_SET (VOIDmode, op0,
1426
                            gen_safe_OR64 (op0, low_immediate)));
1427
}
1428
 
1429
static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1430
                                            unsigned HOST_WIDE_INT);
1431
 
1432
/* Full 64-bit constant decomposition.  Even though this is the
1433
   'worst' case, we still optimize a few things away.  */
1434
static void
1435
sparc_emit_set_const64_longway (rtx op0, rtx temp,
1436
                                unsigned HOST_WIDE_INT high_bits,
1437
                                unsigned HOST_WIDE_INT low_bits)
1438
{
1439
  rtx sub_temp;
1440
 
1441
  if (reload_in_progress || reload_completed)
1442
    sub_temp = op0;
1443
  else
1444
    sub_temp = gen_reg_rtx (DImode);
1445
 
1446
  if ((high_bits & 0xfffffc00) != 0)
1447
    {
1448
      emit_insn (gen_safe_HIGH64 (temp, high_bits));
1449
      if ((high_bits & ~0xfffffc00) != 0)
1450
        emit_insn (gen_rtx_SET (VOIDmode,
1451
                                sub_temp,
1452
                                gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1453
      else
1454
        sub_temp = temp;
1455
    }
1456
  else
1457
    {
1458
      emit_insn (gen_safe_SET64 (temp, high_bits));
1459
      sub_temp = temp;
1460
    }
1461
 
1462
  if (!reload_in_progress && !reload_completed)
1463
    {
1464
      rtx temp2 = gen_reg_rtx (DImode);
1465
      rtx temp3 = gen_reg_rtx (DImode);
1466
      rtx temp4 = gen_reg_rtx (DImode);
1467
 
1468
      emit_insn (gen_rtx_SET (VOIDmode, temp4,
1469
                              gen_rtx_ASHIFT (DImode, sub_temp,
1470
                                              GEN_INT (32))));
1471
 
1472
      emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1473
      if ((low_bits & ~0xfffffc00) != 0)
1474
        {
1475
          emit_insn (gen_rtx_SET (VOIDmode, temp3,
1476
                                  gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1477
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1478
                                  gen_rtx_PLUS (DImode, temp4, temp3)));
1479
        }
1480
      else
1481
        {
1482
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1483
                                  gen_rtx_PLUS (DImode, temp4, temp2)));
1484
        }
1485
    }
1486
  else
1487
    {
1488
      rtx low1 = GEN_INT ((low_bits >> (32 - 12))          & 0xfff);
1489
      rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12))     & 0xfff);
1490
      rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1491
      int to_shift = 12;
1492
 
1493
      /* We are in the middle of reload, so this is really
1494
         painful.  However we do still make an attempt to
1495
         avoid emitting truly stupid code.  */
1496
      if (low1 != const0_rtx)
1497
        {
1498
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1499
                                  gen_rtx_ASHIFT (DImode, sub_temp,
1500
                                                  GEN_INT (to_shift))));
1501
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1502
                                  gen_rtx_IOR (DImode, op0, low1)));
1503
          sub_temp = op0;
1504
          to_shift = 12;
1505
        }
1506
      else
1507
        {
1508
          to_shift += 12;
1509
        }
1510
      if (low2 != const0_rtx)
1511
        {
1512
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1513
                                  gen_rtx_ASHIFT (DImode, sub_temp,
1514
                                                  GEN_INT (to_shift))));
1515
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1516
                                  gen_rtx_IOR (DImode, op0, low2)));
1517
          sub_temp = op0;
1518
          to_shift = 8;
1519
        }
1520
      else
1521
        {
1522
          to_shift += 8;
1523
        }
1524
      emit_insn (gen_rtx_SET (VOIDmode, op0,
1525
                              gen_rtx_ASHIFT (DImode, sub_temp,
1526
                                              GEN_INT (to_shift))));
1527
      if (low3 != const0_rtx)
1528
        emit_insn (gen_rtx_SET (VOIDmode, op0,
1529
                                gen_rtx_IOR (DImode, op0, low3)));
1530
      /* phew...  */
1531
    }
1532
}
1533
 
1534
/* Analyze a 64-bit constant for certain properties.  */
1535
static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1536
                                    unsigned HOST_WIDE_INT,
1537
                                    int *, int *, int *);
1538
 
1539
static void
1540
analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1541
                        unsigned HOST_WIDE_INT low_bits,
1542
                        int *hbsp, int *lbsp, int *abbasp)
1543
{
1544
  int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1545
  int i;
1546
 
1547
  lowest_bit_set = highest_bit_set = -1;
1548
  i = 0;
1549
  do
1550
    {
1551
      if ((lowest_bit_set == -1)
1552
          && ((low_bits >> i) & 1))
1553
        lowest_bit_set = i;
1554
      if ((highest_bit_set == -1)
1555
          && ((high_bits >> (32 - i - 1)) & 1))
1556
        highest_bit_set = (64 - i - 1);
1557
    }
1558
  while (++i < 32
1559
         && ((highest_bit_set == -1)
1560
             || (lowest_bit_set == -1)));
1561
  if (i == 32)
1562
    {
1563
      i = 0;
1564
      do
1565
        {
1566
          if ((lowest_bit_set == -1)
1567
              && ((high_bits >> i) & 1))
1568
            lowest_bit_set = i + 32;
1569
          if ((highest_bit_set == -1)
1570
              && ((low_bits >> (32 - i - 1)) & 1))
1571
            highest_bit_set = 32 - i - 1;
1572
        }
1573
      while (++i < 32
1574
             && ((highest_bit_set == -1)
1575
                 || (lowest_bit_set == -1)));
1576
    }
1577
  /* If there are no bits set this should have gone out
1578
     as one instruction!  */
1579
  gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1580
  all_bits_between_are_set = 1;
1581
  for (i = lowest_bit_set; i <= highest_bit_set; i++)
1582
    {
1583
      if (i < 32)
1584
        {
1585
          if ((low_bits & (1 << i)) != 0)
1586
            continue;
1587
        }
1588
      else
1589
        {
1590
          if ((high_bits & (1 << (i - 32))) != 0)
1591
            continue;
1592
        }
1593
      all_bits_between_are_set = 0;
1594
      break;
1595
    }
1596
  *hbsp = highest_bit_set;
1597
  *lbsp = lowest_bit_set;
1598
  *abbasp = all_bits_between_are_set;
1599
}
1600
 
1601
static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1602
 
1603
static int
1604
const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1605
                   unsigned HOST_WIDE_INT low_bits)
1606
{
1607
  int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1608
 
1609
  if (high_bits == 0
1610
      || high_bits == 0xffffffff)
1611
    return 1;
1612
 
1613
  analyze_64bit_constant (high_bits, low_bits,
1614
                          &highest_bit_set, &lowest_bit_set,
1615
                          &all_bits_between_are_set);
1616
 
1617
  if ((highest_bit_set == 63
1618
       || lowest_bit_set == 0)
1619
      && all_bits_between_are_set != 0)
1620
    return 1;
1621
 
1622
  if ((highest_bit_set - lowest_bit_set) < 21)
1623
    return 1;
1624
 
1625
  return 0;
1626
}
1627
 
1628
static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1629
                                                        unsigned HOST_WIDE_INT,
1630
                                                        int, int);
1631
 
1632
static unsigned HOST_WIDE_INT
1633
create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1634
                          unsigned HOST_WIDE_INT low_bits,
1635
                          int lowest_bit_set, int shift)
1636
{
1637
  HOST_WIDE_INT hi, lo;
1638
 
1639
  if (lowest_bit_set < 32)
1640
    {
1641
      lo = (low_bits >> lowest_bit_set) << shift;
1642
      hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1643
    }
1644
  else
1645
    {
1646
      lo = 0;
1647
      hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1648
    }
1649
  gcc_assert (! (hi & lo));
1650
  return (hi | lo);
1651
}
1652
 
1653
/* Here we are sure to be arch64 and this is an integer constant
1654
   being loaded into a register.  Emit the most efficient
1655
   insn sequence possible.  Detection of all the 1-insn cases
1656
   has been done already.  */
1657
void
1658
sparc_emit_set_const64 (rtx op0, rtx op1)
1659
{
1660
  unsigned HOST_WIDE_INT high_bits, low_bits;
1661
  int lowest_bit_set, highest_bit_set;
1662
  int all_bits_between_are_set;
1663
  rtx temp = 0;
1664
 
1665
  /* Sanity check that we know what we are working with.  */
1666
  gcc_assert (TARGET_ARCH64
1667
              && (GET_CODE (op0) == SUBREG
1668
                  || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1669
 
1670
  if (reload_in_progress || reload_completed)
1671
    temp = op0;
1672
 
1673
  if (GET_CODE (op1) != CONST_INT)
1674
    {
1675
      sparc_emit_set_symbolic_const64 (op0, op1, temp);
1676
      return;
1677
    }
1678
 
1679
  if (! temp)
1680
    temp = gen_reg_rtx (DImode);
1681
 
1682
  high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1683
  low_bits = (INTVAL (op1) & 0xffffffff);
1684
 
1685
  /* low_bits   bits 0  --> 31
1686
     high_bits  bits 32 --> 63  */
1687
 
1688
  analyze_64bit_constant (high_bits, low_bits,
1689
                          &highest_bit_set, &lowest_bit_set,
1690
                          &all_bits_between_are_set);
1691
 
1692
  /* First try for a 2-insn sequence.  */
1693
 
1694
  /* These situations are preferred because the optimizer can
1695
   * do more things with them:
1696
   * 1) mov     -1, %reg
1697
   *    sllx    %reg, shift, %reg
1698
   * 2) mov     -1, %reg
1699
   *    srlx    %reg, shift, %reg
1700
   * 3) mov     some_small_const, %reg
1701
   *    sllx    %reg, shift, %reg
1702
   */
1703
  if (((highest_bit_set == 63
1704
        || lowest_bit_set == 0)
1705
       && all_bits_between_are_set != 0)
1706
      || ((highest_bit_set - lowest_bit_set) < 12))
1707
    {
1708
      HOST_WIDE_INT the_const = -1;
1709
      int shift = lowest_bit_set;
1710
 
1711
      if ((highest_bit_set != 63
1712
           && lowest_bit_set != 0)
1713
          || all_bits_between_are_set == 0)
1714
        {
1715
          the_const =
1716
            create_simple_focus_bits (high_bits, low_bits,
1717
                                      lowest_bit_set, 0);
1718
        }
1719
      else if (lowest_bit_set == 0)
1720
        shift = -(63 - highest_bit_set);
1721
 
1722
      gcc_assert (SPARC_SIMM13_P (the_const));
1723
      gcc_assert (shift != 0);
1724
 
1725
      emit_insn (gen_safe_SET64 (temp, the_const));
1726
      if (shift > 0)
1727
        emit_insn (gen_rtx_SET (VOIDmode,
1728
                                op0,
1729
                                gen_rtx_ASHIFT (DImode,
1730
                                                temp,
1731
                                                GEN_INT (shift))));
1732
      else if (shift < 0)
1733
        emit_insn (gen_rtx_SET (VOIDmode,
1734
                                op0,
1735
                                gen_rtx_LSHIFTRT (DImode,
1736
                                                  temp,
1737
                                                  GEN_INT (-shift))));
1738
      return;
1739
    }
1740
 
1741
  /* Now a range of 22 or less bits set somewhere.
1742
   * 1) sethi   %hi(focus_bits), %reg
1743
   *    sllx    %reg, shift, %reg
1744
   * 2) sethi   %hi(focus_bits), %reg
1745
   *    srlx    %reg, shift, %reg
1746
   */
1747
  if ((highest_bit_set - lowest_bit_set) < 21)
1748
    {
1749
      unsigned HOST_WIDE_INT focus_bits =
1750
        create_simple_focus_bits (high_bits, low_bits,
1751
                                  lowest_bit_set, 10);
1752
 
1753
      gcc_assert (SPARC_SETHI_P (focus_bits));
1754
      gcc_assert (lowest_bit_set != 10);
1755
 
1756
      emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1757
 
1758
      /* If lowest_bit_set == 10 then a sethi alone could have done it.  */
1759
      if (lowest_bit_set < 10)
1760
        emit_insn (gen_rtx_SET (VOIDmode,
1761
                                op0,
1762
                                gen_rtx_LSHIFTRT (DImode, temp,
1763
                                                  GEN_INT (10 - lowest_bit_set))));
1764
      else if (lowest_bit_set > 10)
1765
        emit_insn (gen_rtx_SET (VOIDmode,
1766
                                op0,
1767
                                gen_rtx_ASHIFT (DImode, temp,
1768
                                                GEN_INT (lowest_bit_set - 10))));
1769
      return;
1770
    }
1771
 
1772
  /* 1) sethi   %hi(low_bits), %reg
1773
   *    or      %reg, %lo(low_bits), %reg
1774
   * 2) sethi   %hi(~low_bits), %reg
1775
   *    xor     %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1776
   */
1777
  if (high_bits == 0
1778
      || high_bits == 0xffffffff)
1779
    {
1780
      sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1781
                                     (high_bits == 0xffffffff));
1782
      return;
1783
    }
1784
 
1785
  /* Now, try 3-insn sequences.  */
1786
 
1787
  /* 1) sethi   %hi(high_bits), %reg
1788
   *    or      %reg, %lo(high_bits), %reg
1789
   *    sllx    %reg, 32, %reg
1790
   */
1791
  if (low_bits == 0)
1792
    {
1793
      sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1794
      return;
1795
    }
1796
 
1797
  /* We may be able to do something quick
1798
     when the constant is negated, so try that.  */
1799
  if (const64_is_2insns ((~high_bits) & 0xffffffff,
1800
                         (~low_bits) & 0xfffffc00))
1801
    {
1802
      /* NOTE: The trailing bits get XOR'd so we need the
1803
         non-negated bits, not the negated ones.  */
1804
      unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1805
 
1806
      if ((((~high_bits) & 0xffffffff) == 0
1807
           && ((~low_bits) & 0x80000000) == 0)
1808
          || (((~high_bits) & 0xffffffff) == 0xffffffff
1809
              && ((~low_bits) & 0x80000000) != 0))
1810
        {
1811
          unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1812
 
1813
          if ((SPARC_SETHI_P (fast_int)
1814
               && (~high_bits & 0xffffffff) == 0)
1815
              || SPARC_SIMM13_P (fast_int))
1816
            emit_insn (gen_safe_SET64 (temp, fast_int));
1817
          else
1818
            sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1819
        }
1820
      else
1821
        {
1822
          rtx negated_const;
1823
          negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1824
                                   (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1825
          sparc_emit_set_const64 (temp, negated_const);
1826
        }
1827
 
1828
      /* If we are XOR'ing with -1, then we should emit a one's complement
1829
         instead.  This way the combiner will notice logical operations
1830
         such as ANDN later on and substitute.  */
1831
      if (trailing_bits == 0x3ff)
1832
        {
1833
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1834
                                  gen_rtx_NOT (DImode, temp)));
1835
        }
1836
      else
1837
        {
1838
          emit_insn (gen_rtx_SET (VOIDmode,
1839
                                  op0,
1840
                                  gen_safe_XOR64 (temp,
1841
                                                  (-0x400 | trailing_bits))));
1842
        }
1843
      return;
1844
    }
1845
 
1846
  /* 1) sethi   %hi(xxx), %reg
1847
   *    or      %reg, %lo(xxx), %reg
1848
   *    sllx    %reg, yyy, %reg
1849
   *
1850
   * ??? This is just a generalized version of the low_bits==0
1851
   * thing above, FIXME...
1852
   */
1853
  if ((highest_bit_set - lowest_bit_set) < 32)
1854
    {
1855
      unsigned HOST_WIDE_INT focus_bits =
1856
        create_simple_focus_bits (high_bits, low_bits,
1857
                                  lowest_bit_set, 0);
1858
 
1859
      /* We can't get here in this state.  */
1860
      gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1861
 
1862
      /* So what we know is that the set bits straddle the
1863
         middle of the 64-bit word.  */
1864
      sparc_emit_set_const64_quick2 (op0, temp,
1865
                                     focus_bits, 0,
1866
                                     lowest_bit_set);
1867
      return;
1868
    }
1869
 
1870
  /* 1) sethi   %hi(high_bits), %reg
1871
   *    or      %reg, %lo(high_bits), %reg
1872
   *    sllx    %reg, 32, %reg
1873
   *    or      %reg, low_bits, %reg
1874
   */
1875
  if (SPARC_SIMM13_P(low_bits)
1876
      && ((int)low_bits > 0))
1877
    {
1878
      sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1879
      return;
1880
    }
1881
 
1882
  /* The easiest way when all else fails, is full decomposition.  */
1883
#if 0
1884
  printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1885
          high_bits, low_bits, ~high_bits, ~low_bits);
1886
#endif
1887
  sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1888
}
1889
#endif /* HOST_BITS_PER_WIDE_INT == 32 */
1890
 
1891
/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1892
   return the mode to be used for the comparison.  For floating-point,
1893
   CCFP[E]mode is used.  CC_NOOVmode should be used when the first operand
1894
   is a PLUS, MINUS, NEG, or ASHIFT.  CCmode should be used when no special
1895
   processing is needed.  */
1896
 
1897
enum machine_mode
1898
select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1899
{
1900
  if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1901
    {
1902
      switch (op)
1903
        {
1904
        case EQ:
1905
        case NE:
1906
        case UNORDERED:
1907
        case ORDERED:
1908
        case UNLT:
1909
        case UNLE:
1910
        case UNGT:
1911
        case UNGE:
1912
        case UNEQ:
1913
        case LTGT:
1914
          return CCFPmode;
1915
 
1916
        case LT:
1917
        case LE:
1918
        case GT:
1919
        case GE:
1920
          return CCFPEmode;
1921
 
1922
        default:
1923
          gcc_unreachable ();
1924
        }
1925
    }
1926
  else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1927
           || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1928
    {
1929
      if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1930
        return CCX_NOOVmode;
1931
      else
1932
        return CC_NOOVmode;
1933
    }
1934
  else
1935
    {
1936
      if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1937
        return CCXmode;
1938
      else
1939
        return CCmode;
1940
    }
1941
}
1942
 
1943
/* X and Y are two things to compare using CODE.  Emit the compare insn and
1944
   return the rtx for the cc reg in the proper mode.  */
1945
 
1946
rtx
1947
gen_compare_reg (enum rtx_code code)
1948
{
1949
  rtx x = sparc_compare_op0;
1950
  rtx y = sparc_compare_op1;
1951
  enum machine_mode mode = SELECT_CC_MODE (code, x, y);
1952
  rtx cc_reg;
1953
 
1954
  if (sparc_compare_emitted != NULL_RTX)
1955
    {
1956
      cc_reg = sparc_compare_emitted;
1957
      sparc_compare_emitted = NULL_RTX;
1958
      return cc_reg;
1959
    }
1960
 
1961
  /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
1962
     fcc regs (cse can't tell they're really call clobbered regs and will
1963
     remove a duplicate comparison even if there is an intervening function
1964
     call - it will then try to reload the cc reg via an int reg which is why
1965
     we need the movcc patterns).  It is possible to provide the movcc
1966
     patterns by using the ldxfsr/stxfsr v9 insns.  I tried it: you need two
1967
     registers (say %g1,%g5) and it takes about 6 insns.  A better fix would be
1968
     to tell cse that CCFPE mode registers (even pseudos) are call
1969
     clobbered.  */
1970
 
1971
  /* ??? This is an experiment.  Rather than making changes to cse which may
1972
     or may not be easy/clean, we do our own cse.  This is possible because
1973
     we will generate hard registers.  Cse knows they're call clobbered (it
1974
     doesn't know the same thing about pseudos). If we guess wrong, no big
1975
     deal, but if we win, great!  */
1976
 
1977
  if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1978
#if 1 /* experiment */
1979
    {
1980
      int reg;
1981
      /* We cycle through the registers to ensure they're all exercised.  */
1982
      static int next_fcc_reg = 0;
1983
      /* Previous x,y for each fcc reg.  */
1984
      static rtx prev_args[4][2];
1985
 
1986
      /* Scan prev_args for x,y.  */
1987
      for (reg = 0; reg < 4; reg++)
1988
        if (prev_args[reg][0] == x && prev_args[reg][1] == y)
1989
          break;
1990
      if (reg == 4)
1991
        {
1992
          reg = next_fcc_reg;
1993
          prev_args[reg][0] = x;
1994
          prev_args[reg][1] = y;
1995
          next_fcc_reg = (next_fcc_reg + 1) & 3;
1996
        }
1997
      cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
1998
    }
1999
#else
2000
    cc_reg = gen_reg_rtx (mode);
2001
#endif /* ! experiment */
2002
  else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2003
    cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2004
  else
2005
    cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2006
 
2007
  emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
2008
                          gen_rtx_COMPARE (mode, x, y)));
2009
 
2010
  return cc_reg;
2011
}
2012
 
2013
/* This function is used for v9 only.
2014
   CODE is the code for an Scc's comparison.
2015
   OPERANDS[0] is the target of the Scc insn.
2016
   OPERANDS[1] is the value we compare against const0_rtx (which hasn't
2017
   been generated yet).
2018
 
2019
   This function is needed to turn
2020
 
2021
           (set (reg:SI 110)
2022
               (gt (reg:CCX 100 %icc)
2023
                   (const_int 0)))
2024
   into
2025
           (set (reg:SI 110)
2026
               (gt:DI (reg:CCX 100 %icc)
2027
                   (const_int 0)))
2028
 
2029
   IE: The instruction recognizer needs to see the mode of the comparison to
2030
   find the right instruction. We could use "gt:DI" right in the
2031
   define_expand, but leaving it out allows us to handle DI, SI, etc.
2032
 
2033
   We refer to the global sparc compare operands sparc_compare_op0 and
2034
   sparc_compare_op1.  */
2035
 
2036
int
2037
gen_v9_scc (enum rtx_code compare_code, register rtx *operands)
2038
{
2039
  if (! TARGET_ARCH64
2040
      && (GET_MODE (sparc_compare_op0) == DImode
2041
          || GET_MODE (operands[0]) == DImode))
2042
    return 0;
2043
 
2044
  /* Try to use the movrCC insns.  */
2045
  if (TARGET_ARCH64
2046
      && GET_MODE_CLASS (GET_MODE (sparc_compare_op0)) == MODE_INT
2047
      && sparc_compare_op1 == const0_rtx
2048
      && v9_regcmp_p (compare_code))
2049
    {
2050
      rtx op0 = sparc_compare_op0;
2051
      rtx temp;
2052
 
2053
      /* Special case for op0 != 0.  This can be done with one instruction if
2054
         operands[0] == sparc_compare_op0.  */
2055
 
2056
      if (compare_code == NE
2057
          && GET_MODE (operands[0]) == DImode
2058
          && rtx_equal_p (op0, operands[0]))
2059
        {
2060
          emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2061
                              gen_rtx_IF_THEN_ELSE (DImode,
2062
                                       gen_rtx_fmt_ee (compare_code, DImode,
2063
                                                       op0, const0_rtx),
2064
                                       const1_rtx,
2065
                                       operands[0])));
2066
          return 1;
2067
        }
2068
 
2069
      if (reg_overlap_mentioned_p (operands[0], op0))
2070
        {
2071
          /* Handle the case where operands[0] == sparc_compare_op0.
2072
             We "early clobber" the result.  */
2073
          op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
2074
          emit_move_insn (op0, sparc_compare_op0);
2075
        }
2076
 
2077
      emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2078
      if (GET_MODE (op0) != DImode)
2079
        {
2080
          temp = gen_reg_rtx (DImode);
2081
          convert_move (temp, op0, 0);
2082
        }
2083
      else
2084
        temp = op0;
2085
      emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2086
                          gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2087
                                   gen_rtx_fmt_ee (compare_code, DImode,
2088
                                                   temp, const0_rtx),
2089
                                   const1_rtx,
2090
                                   operands[0])));
2091
      return 1;
2092
    }
2093
  else
2094
    {
2095
      operands[1] = gen_compare_reg (compare_code);
2096
 
2097
      switch (GET_MODE (operands[1]))
2098
        {
2099
          case CCmode :
2100
          case CCXmode :
2101
          case CCFPEmode :
2102
          case CCFPmode :
2103
            break;
2104
          default :
2105
            gcc_unreachable ();
2106
        }
2107
      emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
2108
      emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2109
                          gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2110
                                   gen_rtx_fmt_ee (compare_code,
2111
                                                   GET_MODE (operands[1]),
2112
                                                   operands[1], const0_rtx),
2113
                                    const1_rtx, operands[0])));
2114
      return 1;
2115
    }
2116
}
2117
 
2118
/* Emit a conditional jump insn for the v9 architecture using comparison code
2119
   CODE and jump target LABEL.
2120
   This function exists to take advantage of the v9 brxx insns.  */
2121
 
2122
void
2123
emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2124
{
2125
  gcc_assert (sparc_compare_emitted == NULL_RTX);
2126
  emit_jump_insn (gen_rtx_SET (VOIDmode,
2127
                           pc_rtx,
2128
                           gen_rtx_IF_THEN_ELSE (VOIDmode,
2129
                                    gen_rtx_fmt_ee (code, GET_MODE (op0),
2130
                                                    op0, const0_rtx),
2131
                                    gen_rtx_LABEL_REF (VOIDmode, label),
2132
                                    pc_rtx)));
2133
}
2134
 
2135
/* Generate a DFmode part of a hard TFmode register.
2136
   REG is the TFmode hard register, LOW is 1 for the
2137
   low 64bit of the register and 0 otherwise.
2138
 */
2139
rtx
2140
gen_df_reg (rtx reg, int low)
2141
{
2142
  int regno = REGNO (reg);
2143
 
2144
  if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2145
    regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2146
  return gen_rtx_REG (DFmode, regno);
2147
}
2148
 
2149
/* Generate a call to FUNC with OPERANDS.  Operand 0 is the return value.
2150
   Unlike normal calls, TFmode operands are passed by reference.  It is
2151
   assumed that no more than 3 operands are required.  */
2152
 
2153
static void
2154
emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2155
{
2156
  rtx ret_slot = NULL, arg[3], func_sym;
2157
  int i;
2158
 
2159
  /* We only expect to be called for conversions, unary, and binary ops.  */
2160
  gcc_assert (nargs == 2 || nargs == 3);
2161
 
2162
  for (i = 0; i < nargs; ++i)
2163
    {
2164
      rtx this_arg = operands[i];
2165
      rtx this_slot;
2166
 
2167
      /* TFmode arguments and return values are passed by reference.  */
2168
      if (GET_MODE (this_arg) == TFmode)
2169
        {
2170
          int force_stack_temp;
2171
 
2172
          force_stack_temp = 0;
2173
          if (TARGET_BUGGY_QP_LIB && i == 0)
2174
            force_stack_temp = 1;
2175
 
2176
          if (GET_CODE (this_arg) == MEM
2177
              && ! force_stack_temp)
2178
            this_arg = XEXP (this_arg, 0);
2179
          else if (CONSTANT_P (this_arg)
2180
                   && ! force_stack_temp)
2181
            {
2182
              this_slot = force_const_mem (TFmode, this_arg);
2183
              this_arg = XEXP (this_slot, 0);
2184
            }
2185
          else
2186
            {
2187
              this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2188
 
2189
              /* Operand 0 is the return value.  We'll copy it out later.  */
2190
              if (i > 0)
2191
                emit_move_insn (this_slot, this_arg);
2192
              else
2193
                ret_slot = this_slot;
2194
 
2195
              this_arg = XEXP (this_slot, 0);
2196
            }
2197
        }
2198
 
2199
      arg[i] = this_arg;
2200
    }
2201
 
2202
  func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2203
 
2204
  if (GET_MODE (operands[0]) == TFmode)
2205
    {
2206
      if (nargs == 2)
2207
        emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2208
                           arg[0], GET_MODE (arg[0]),
2209
                           arg[1], GET_MODE (arg[1]));
2210
      else
2211
        emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2212
                           arg[0], GET_MODE (arg[0]),
2213
                           arg[1], GET_MODE (arg[1]),
2214
                           arg[2], GET_MODE (arg[2]));
2215
 
2216
      if (ret_slot)
2217
        emit_move_insn (operands[0], ret_slot);
2218
    }
2219
  else
2220
    {
2221
      rtx ret;
2222
 
2223
      gcc_assert (nargs == 2);
2224
 
2225
      ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2226
                                     GET_MODE (operands[0]), 1,
2227
                                     arg[1], GET_MODE (arg[1]));
2228
 
2229
      if (ret != operands[0])
2230
        emit_move_insn (operands[0], ret);
2231
    }
2232
}
2233
 
2234
/* Expand soft-float TFmode calls to sparc abi routines.  */
2235
 
2236
static void
2237
emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2238
{
2239
  const char *func;
2240
 
2241
  switch (code)
2242
    {
2243
    case PLUS:
2244
      func = "_Qp_add";
2245
      break;
2246
    case MINUS:
2247
      func = "_Qp_sub";
2248
      break;
2249
    case MULT:
2250
      func = "_Qp_mul";
2251
      break;
2252
    case DIV:
2253
      func = "_Qp_div";
2254
      break;
2255
    default:
2256
      gcc_unreachable ();
2257
    }
2258
 
2259
  emit_soft_tfmode_libcall (func, 3, operands);
2260
}
2261
 
2262
static void
2263
emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2264
{
2265
  const char *func;
2266
 
2267
  gcc_assert (code == SQRT);
2268
  func = "_Qp_sqrt";
2269
 
2270
  emit_soft_tfmode_libcall (func, 2, operands);
2271
}
2272
 
2273
static void
2274
emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2275
{
2276
  const char *func;
2277
 
2278
  switch (code)
2279
    {
2280
    case FLOAT_EXTEND:
2281
      switch (GET_MODE (operands[1]))
2282
        {
2283
        case SFmode:
2284
          func = "_Qp_stoq";
2285
          break;
2286
        case DFmode:
2287
          func = "_Qp_dtoq";
2288
          break;
2289
        default:
2290
          gcc_unreachable ();
2291
        }
2292
      break;
2293
 
2294
    case FLOAT_TRUNCATE:
2295
      switch (GET_MODE (operands[0]))
2296
        {
2297
        case SFmode:
2298
          func = "_Qp_qtos";
2299
          break;
2300
        case DFmode:
2301
          func = "_Qp_qtod";
2302
          break;
2303
        default:
2304
          gcc_unreachable ();
2305
        }
2306
      break;
2307
 
2308
    case FLOAT:
2309
      switch (GET_MODE (operands[1]))
2310
        {
2311
        case SImode:
2312
          func = "_Qp_itoq";
2313
          break;
2314
        case DImode:
2315
          func = "_Qp_xtoq";
2316
          break;
2317
        default:
2318
          gcc_unreachable ();
2319
        }
2320
      break;
2321
 
2322
    case UNSIGNED_FLOAT:
2323
      switch (GET_MODE (operands[1]))
2324
        {
2325
        case SImode:
2326
          func = "_Qp_uitoq";
2327
          break;
2328
        case DImode:
2329
          func = "_Qp_uxtoq";
2330
          break;
2331
        default:
2332
          gcc_unreachable ();
2333
        }
2334
      break;
2335
 
2336
    case FIX:
2337
      switch (GET_MODE (operands[0]))
2338
        {
2339
        case SImode:
2340
          func = "_Qp_qtoi";
2341
          break;
2342
        case DImode:
2343
          func = "_Qp_qtox";
2344
          break;
2345
        default:
2346
          gcc_unreachable ();
2347
        }
2348
      break;
2349
 
2350
    case UNSIGNED_FIX:
2351
      switch (GET_MODE (operands[0]))
2352
        {
2353
        case SImode:
2354
          func = "_Qp_qtoui";
2355
          break;
2356
        case DImode:
2357
          func = "_Qp_qtoux";
2358
          break;
2359
        default:
2360
          gcc_unreachable ();
2361
        }
2362
      break;
2363
 
2364
    default:
2365
      gcc_unreachable ();
2366
    }
2367
 
2368
  emit_soft_tfmode_libcall (func, 2, operands);
2369
}
2370
 
2371
/* Expand a hard-float tfmode operation.  All arguments must be in
2372
   registers.  */
2373
 
2374
static void
2375
emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2376
{
2377
  rtx op, dest;
2378
 
2379
  if (GET_RTX_CLASS (code) == RTX_UNARY)
2380
    {
2381
      operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2382
      op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2383
    }
2384
  else
2385
    {
2386
      operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2387
      operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2388
      op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2389
                           operands[1], operands[2]);
2390
    }
2391
 
2392
  if (register_operand (operands[0], VOIDmode))
2393
    dest = operands[0];
2394
  else
2395
    dest = gen_reg_rtx (GET_MODE (operands[0]));
2396
 
2397
  emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2398
 
2399
  if (dest != operands[0])
2400
    emit_move_insn (operands[0], dest);
2401
}
2402
 
2403
void
2404
emit_tfmode_binop (enum rtx_code code, rtx *operands)
2405
{
2406
  if (TARGET_HARD_QUAD)
2407
    emit_hard_tfmode_operation (code, operands);
2408
  else
2409
    emit_soft_tfmode_binop (code, operands);
2410
}
2411
 
2412
void
2413
emit_tfmode_unop (enum rtx_code code, rtx *operands)
2414
{
2415
  if (TARGET_HARD_QUAD)
2416
    emit_hard_tfmode_operation (code, operands);
2417
  else
2418
    emit_soft_tfmode_unop (code, operands);
2419
}
2420
 
2421
void
2422
emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2423
{
2424
  if (TARGET_HARD_QUAD)
2425
    emit_hard_tfmode_operation (code, operands);
2426
  else
2427
    emit_soft_tfmode_cvt (code, operands);
2428
}
2429
 
2430
/* Return nonzero if a branch/jump/call instruction will be emitting
2431
   nop into its delay slot.  */
2432
 
2433
int
2434
empty_delay_slot (rtx insn)
2435
{
2436
  rtx seq;
2437
 
2438
  /* If no previous instruction (should not happen), return true.  */
2439
  if (PREV_INSN (insn) == NULL)
2440
    return 1;
2441
 
2442
  seq = NEXT_INSN (PREV_INSN (insn));
2443
  if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2444
    return 0;
2445
 
2446
  return 1;
2447
}
2448
 
2449
/* Return nonzero if TRIAL can go into the call delay slot.  */
2450
 
2451
int
2452
tls_call_delay (rtx trial)
2453
{
2454
  rtx pat;
2455
 
2456
  /* Binutils allows
2457
       call __tls_get_addr, %tgd_call (foo)
2458
        add %l7, %o0, %o0, %tgd_add (foo)
2459
     while Sun as/ld does not.  */
2460
  if (TARGET_GNU_TLS || !TARGET_TLS)
2461
    return 1;
2462
 
2463
  pat = PATTERN (trial);
2464
 
2465
  /* We must reject tgd_add{32|64}, i.e.
2466
       (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2467
     and tldm_add{32|64}, i.e.
2468
       (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2469
     for Sun as/ld.  */
2470
  if (GET_CODE (pat) == SET
2471
      && GET_CODE (SET_SRC (pat)) == PLUS)
2472
    {
2473
      rtx unspec = XEXP (SET_SRC (pat), 1);
2474
 
2475
      if (GET_CODE (unspec) == UNSPEC
2476
          && (XINT (unspec, 1) == UNSPEC_TLSGD
2477
              || XINT (unspec, 1) == UNSPEC_TLSLDM))
2478
        return 0;
2479
    }
2480
 
2481
  return 1;
2482
}
2483
 
2484
/* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2485
   instruction.  RETURN_P is true if the v9 variant 'return' is to be
2486
   considered in the test too.
2487
 
2488
   TRIAL must be a SET whose destination is a REG appropriate for the
2489
   'restore' instruction or, if RETURN_P is true, for the 'return'
2490
   instruction.  */
2491
 
2492
static int
2493
eligible_for_restore_insn (rtx trial, bool return_p)
2494
{
2495
  rtx pat = PATTERN (trial);
2496
  rtx src = SET_SRC (pat);
2497
 
2498
  /* The 'restore src,%g0,dest' pattern for word mode and below.  */
2499
  if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2500
      && arith_operand (src, GET_MODE (src)))
2501
    {
2502
      if (TARGET_ARCH64)
2503
        return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2504
      else
2505
        return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2506
    }
2507
 
2508
  /* The 'restore src,%g0,dest' pattern for double-word mode.  */
2509
  else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2510
           && arith_double_operand (src, GET_MODE (src)))
2511
    return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2512
 
2513
  /* The 'restore src,%g0,dest' pattern for float if no FPU.  */
2514
  else if (! TARGET_FPU && register_operand (src, SFmode))
2515
    return 1;
2516
 
2517
  /* The 'restore src,%g0,dest' pattern for double if no FPU.  */
2518
  else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2519
    return 1;
2520
 
2521
  /* If we have the 'return' instruction, anything that does not use
2522
     local or output registers and can go into a delay slot wins.  */
2523
  else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2524
           && (get_attr_in_uncond_branch_delay (trial)
2525
               == IN_UNCOND_BRANCH_DELAY_TRUE))
2526
    return 1;
2527
 
2528
  /* The 'restore src1,src2,dest' pattern for SImode.  */
2529
  else if (GET_CODE (src) == PLUS
2530
           && register_operand (XEXP (src, 0), SImode)
2531
           && arith_operand (XEXP (src, 1), SImode))
2532
    return 1;
2533
 
2534
  /* The 'restore src1,src2,dest' pattern for DImode.  */
2535
  else if (GET_CODE (src) == PLUS
2536
           && register_operand (XEXP (src, 0), DImode)
2537
           && arith_double_operand (XEXP (src, 1), DImode))
2538
    return 1;
2539
 
2540
  /* The 'restore src1,%lo(src2),dest' pattern.  */
2541
  else if (GET_CODE (src) == LO_SUM
2542
           && ! TARGET_CM_MEDMID
2543
           && ((register_operand (XEXP (src, 0), SImode)
2544
                && immediate_operand (XEXP (src, 1), SImode))
2545
               || (TARGET_ARCH64
2546
                   && register_operand (XEXP (src, 0), DImode)
2547
                   && immediate_operand (XEXP (src, 1), DImode))))
2548
    return 1;
2549
 
2550
  /* The 'restore src,src,dest' pattern.  */
2551
  else if (GET_CODE (src) == ASHIFT
2552
           && (register_operand (XEXP (src, 0), SImode)
2553
               || register_operand (XEXP (src, 0), DImode))
2554
           && XEXP (src, 1) == const1_rtx)
2555
    return 1;
2556
 
2557
  return 0;
2558
}
2559
 
2560
/* Return nonzero if TRIAL can go into the function return's
2561
   delay slot.  */
2562
 
2563
int
2564
eligible_for_return_delay (rtx trial)
2565
{
2566
  rtx pat;
2567
 
2568
  if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2569
    return 0;
2570
 
2571
  if (get_attr_length (trial) != 1)
2572
    return 0;
2573
 
2574
  /* If there are any call-saved registers, we should scan TRIAL if it
2575
     does not reference them.  For now just make it easy.  */
2576
  if (num_gfregs)
2577
    return 0;
2578
 
2579
  /* If the function uses __builtin_eh_return, the eh_return machinery
2580
     occupies the delay slot.  */
2581
  if (current_function_calls_eh_return)
2582
    return 0;
2583
 
2584
  /* In the case of a true leaf function, anything can go into the slot.  */
2585
  if (sparc_leaf_function_p)
2586
    return get_attr_in_uncond_branch_delay (trial)
2587
           == IN_UNCOND_BRANCH_DELAY_TRUE;
2588
 
2589
  pat = PATTERN (trial);
2590
 
2591
  /* Otherwise, only operations which can be done in tandem with
2592
     a `restore' or `return' insn can go into the delay slot.  */
2593
  if (GET_CODE (SET_DEST (pat)) != REG
2594
      || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2595
    return 0;
2596
 
2597
  /* If this instruction sets up floating point register and we have a return
2598
     instruction, it can probably go in.  But restore will not work
2599
     with FP_REGS.  */
2600
  if (REGNO (SET_DEST (pat)) >= 32)
2601
    return (TARGET_V9
2602
            && ! epilogue_renumber (&pat, 1)
2603
            && (get_attr_in_uncond_branch_delay (trial)
2604
                == IN_UNCOND_BRANCH_DELAY_TRUE));
2605
 
2606
  return eligible_for_restore_insn (trial, true);
2607
}
2608
 
2609
/* Return nonzero if TRIAL can go into the sibling call's
2610
   delay slot.  */
2611
 
2612
int
2613
eligible_for_sibcall_delay (rtx trial)
2614
{
2615
  rtx pat;
2616
 
2617
  if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2618
    return 0;
2619
 
2620
  if (get_attr_length (trial) != 1)
2621
    return 0;
2622
 
2623
  pat = PATTERN (trial);
2624
 
2625
  if (sparc_leaf_function_p)
2626
    {
2627
      /* If the tail call is done using the call instruction,
2628
         we have to restore %o7 in the delay slot.  */
2629
      if (LEAF_SIBCALL_SLOT_RESERVED_P)
2630
        return 0;
2631
 
2632
      /* %g1 is used to build the function address */
2633
      if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2634
        return 0;
2635
 
2636
      return 1;
2637
    }
2638
 
2639
  /* Otherwise, only operations which can be done in tandem with
2640
     a `restore' insn can go into the delay slot.  */
2641
  if (GET_CODE (SET_DEST (pat)) != REG
2642
      || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2643
      || REGNO (SET_DEST (pat)) >= 32)
2644
    return 0;
2645
 
2646
  /* If it mentions %o7, it can't go in, because sibcall will clobber it
2647
     in most cases.  */
2648
  if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2649
    return 0;
2650
 
2651
  return eligible_for_restore_insn (trial, false);
2652
}
2653
 
2654
int
2655
short_branch (int uid1, int uid2)
2656
{
2657
  int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2658
 
2659
  /* Leave a few words of "slop".  */
2660
  if (delta >= -1023 && delta <= 1022)
2661
    return 1;
2662
 
2663
  return 0;
2664
}
2665
 
2666
/* Return nonzero if REG is not used after INSN.
2667
   We assume REG is a reload reg, and therefore does
2668
   not live past labels or calls or jumps.  */
2669
int
2670
reg_unused_after (rtx reg, rtx insn)
2671
{
2672
  enum rtx_code code, prev_code = UNKNOWN;
2673
 
2674
  while ((insn = NEXT_INSN (insn)))
2675
    {
2676
      if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2677
        return 1;
2678
 
2679
      code = GET_CODE (insn);
2680
      if (GET_CODE (insn) == CODE_LABEL)
2681
        return 1;
2682
 
2683
      if (INSN_P (insn))
2684
        {
2685
          rtx set = single_set (insn);
2686
          int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2687
          if (set && in_src)
2688
            return 0;
2689
          if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2690
            return 1;
2691
          if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2692
            return 0;
2693
        }
2694
      prev_code = code;
2695
    }
2696
  return 1;
2697
}
2698
 
2699
/* Determine if it's legal to put X into the constant pool.  This
2700
   is not possible if X contains the address of a symbol that is
2701
   not constant (TLS) or not known at final link time (PIC).  */
2702
 
2703
static bool
2704
sparc_cannot_force_const_mem (rtx x)
2705
{
2706
  switch (GET_CODE (x))
2707
    {
2708
    case CONST_INT:
2709
    case CONST_DOUBLE:
2710
    case CONST_VECTOR:
2711
      /* Accept all non-symbolic constants.  */
2712
      return false;
2713
 
2714
    case LABEL_REF:
2715
      /* Labels are OK iff we are non-PIC.  */
2716
      return flag_pic != 0;
2717
 
2718
    case SYMBOL_REF:
2719
      /* 'Naked' TLS symbol references are never OK,
2720
         non-TLS symbols are OK iff we are non-PIC.  */
2721
      if (SYMBOL_REF_TLS_MODEL (x))
2722
        return true;
2723
      else
2724
        return flag_pic != 0;
2725
 
2726
    case CONST:
2727
      return sparc_cannot_force_const_mem (XEXP (x, 0));
2728
    case PLUS:
2729
    case MINUS:
2730
      return sparc_cannot_force_const_mem (XEXP (x, 0))
2731
         || sparc_cannot_force_const_mem (XEXP (x, 1));
2732
    case UNSPEC:
2733
      return true;
2734
    default:
2735
      gcc_unreachable ();
2736
    }
2737
}
2738
 
2739
/* PIC support.  */
2740
static GTY(()) char pic_helper_symbol_name[256];
2741
static GTY(()) rtx pic_helper_symbol;
2742
static GTY(()) bool pic_helper_emitted_p = false;
2743
static GTY(()) rtx global_offset_table;
2744
 
2745
/* Ensure that we are not using patterns that are not OK with PIC.  */
2746
 
2747
int
2748
check_pic (int i)
2749
{
2750
  switch (flag_pic)
2751
    {
2752
    case 1:
2753
      gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2754
                  && (GET_CODE (recog_data.operand[i]) != CONST
2755
                  || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2756
                      && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2757
                          == global_offset_table)
2758
                      && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2759
                          == CONST))));
2760
    case 2:
2761
    default:
2762
      return 1;
2763
    }
2764
}
2765
 
2766
/* Return true if X is an address which needs a temporary register when
2767
   reloaded while generating PIC code.  */
2768
 
2769
int
2770
pic_address_needs_scratch (rtx x)
2771
{
2772
  /* An address which is a symbolic plus a non SMALL_INT needs a temp reg.  */
2773
  if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2774
      && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2775
      && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2776
      && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2777
    return 1;
2778
 
2779
  return 0;
2780
}
2781
 
2782
/* Determine if a given RTX is a valid constant.  We already know this
2783
   satisfies CONSTANT_P.  */
2784
 
2785
bool
2786
legitimate_constant_p (rtx x)
2787
{
2788
  rtx inner;
2789
 
2790
  switch (GET_CODE (x))
2791
    {
2792
    case SYMBOL_REF:
2793
      /* TLS symbols are not constant.  */
2794
      if (SYMBOL_REF_TLS_MODEL (x))
2795
        return false;
2796
      break;
2797
 
2798
    case CONST:
2799
      inner = XEXP (x, 0);
2800
 
2801
      /* Offsets of TLS symbols are never valid.
2802
         Discourage CSE from creating them.  */
2803
      if (GET_CODE (inner) == PLUS
2804
          && SPARC_SYMBOL_REF_TLS_P (XEXP (inner, 0)))
2805
        return false;
2806
      break;
2807
 
2808
    case CONST_DOUBLE:
2809
      if (GET_MODE (x) == VOIDmode)
2810
        return true;
2811
 
2812
      /* Floating point constants are generally not ok.
2813
         The only exception is 0.0 in VIS.  */
2814
      if (TARGET_VIS
2815
          && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2816
          && const_zero_operand (x, GET_MODE (x)))
2817
        return true;
2818
 
2819
      return false;
2820
 
2821
    case CONST_VECTOR:
2822
      /* Vector constants are generally not ok.
2823
         The only exception is 0 in VIS.  */
2824
      if (TARGET_VIS
2825
          && const_zero_operand (x, GET_MODE (x)))
2826
        return true;
2827
 
2828
      return false;
2829
 
2830
    default:
2831
      break;
2832
    }
2833
 
2834
  return true;
2835
}
2836
 
2837
/* Determine if a given RTX is a valid constant address.  */
2838
 
2839
bool
2840
constant_address_p (rtx x)
2841
{
2842
  switch (GET_CODE (x))
2843
    {
2844
    case LABEL_REF:
2845
    case CONST_INT:
2846
    case HIGH:
2847
      return true;
2848
 
2849
    case CONST:
2850
      if (flag_pic && pic_address_needs_scratch (x))
2851
        return false;
2852
      return legitimate_constant_p (x);
2853
 
2854
    case SYMBOL_REF:
2855
      return !flag_pic && legitimate_constant_p (x);
2856
 
2857
    default:
2858
      return false;
2859
    }
2860
}
2861
 
2862
/* Nonzero if the constant value X is a legitimate general operand
2863
   when generating PIC code.  It is given that flag_pic is on and
2864
   that X satisfies CONSTANT_P or is a CONST_DOUBLE.  */
2865
 
2866
bool
2867
legitimate_pic_operand_p (rtx x)
2868
{
2869
  if (pic_address_needs_scratch (x))
2870
    return false;
2871
  if (SPARC_SYMBOL_REF_TLS_P (x)
2872
      || (GET_CODE (x) == CONST
2873
          && GET_CODE (XEXP (x, 0)) == PLUS
2874
          && SPARC_SYMBOL_REF_TLS_P (XEXP (XEXP (x, 0), 0))))
2875
    return false;
2876
  return true;
2877
}
2878
 
2879
/* Return nonzero if ADDR is a valid memory address.
2880
   STRICT specifies whether strict register checking applies.  */
2881
 
2882
int
2883
legitimate_address_p (enum machine_mode mode, rtx addr, int strict)
2884
{
2885
  rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
2886
 
2887
  if (REG_P (addr) || GET_CODE (addr) == SUBREG)
2888
    rs1 = addr;
2889
  else if (GET_CODE (addr) == PLUS)
2890
    {
2891
      rs1 = XEXP (addr, 0);
2892
      rs2 = XEXP (addr, 1);
2893
 
2894
      /* Canonicalize.  REG comes first, if there are no regs,
2895
         LO_SUM comes first.  */
2896
      if (!REG_P (rs1)
2897
          && GET_CODE (rs1) != SUBREG
2898
          && (REG_P (rs2)
2899
              || GET_CODE (rs2) == SUBREG
2900
              || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
2901
        {
2902
          rs1 = XEXP (addr, 1);
2903
          rs2 = XEXP (addr, 0);
2904
        }
2905
 
2906
      if ((flag_pic == 1
2907
           && rs1 == pic_offset_table_rtx
2908
           && !REG_P (rs2)
2909
           && GET_CODE (rs2) != SUBREG
2910
           && GET_CODE (rs2) != LO_SUM
2911
           && GET_CODE (rs2) != MEM
2912
           && ! SPARC_SYMBOL_REF_TLS_P (rs2)
2913
           && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
2914
           && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
2915
          || ((REG_P (rs1)
2916
               || GET_CODE (rs1) == SUBREG)
2917
              && RTX_OK_FOR_OFFSET_P (rs2)))
2918
        {
2919
          imm1 = rs2;
2920
          rs2 = NULL;
2921
        }
2922
      else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
2923
               && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
2924
        {
2925
          /* We prohibit REG + REG for TFmode when there are no quad move insns
2926
             and we consequently need to split.  We do this because REG+REG
2927
             is not an offsettable address.  If we get the situation in reload
2928
             where source and destination of a movtf pattern are both MEMs with
2929
             REG+REG address, then only one of them gets converted to an
2930
             offsettable address.  */
2931
          if (mode == TFmode
2932
              && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
2933
            return 0;
2934
 
2935
          /* We prohibit REG + REG on ARCH32 if not optimizing for
2936
             DFmode/DImode because then mem_min_alignment is likely to be zero
2937
             after reload and the  forced split would lack a matching splitter
2938
             pattern.  */
2939
          if (TARGET_ARCH32 && !optimize
2940
              && (mode == DFmode || mode == DImode))
2941
            return 0;
2942
        }
2943
      else if (USE_AS_OFFSETABLE_LO10
2944
               && GET_CODE (rs1) == LO_SUM
2945
               && TARGET_ARCH64
2946
               && ! TARGET_CM_MEDMID
2947
               && RTX_OK_FOR_OLO10_P (rs2))
2948
        {
2949
          rs2 = NULL;
2950
          imm1 = XEXP (rs1, 1);
2951
          rs1 = XEXP (rs1, 0);
2952
          if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2953
            return 0;
2954
        }
2955
    }
2956
  else if (GET_CODE (addr) == LO_SUM)
2957
    {
2958
      rs1 = XEXP (addr, 0);
2959
      imm1 = XEXP (addr, 1);
2960
 
2961
      if (! CONSTANT_P (imm1) || SPARC_SYMBOL_REF_TLS_P (rs1))
2962
        return 0;
2963
 
2964
      /* We can't allow TFmode in 32-bit mode, because an offset greater
2965
         than the alignment (8) may cause the LO_SUM to overflow.  */
2966
      if (mode == TFmode && TARGET_ARCH32)
2967
        return 0;
2968
    }
2969
  else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
2970
    return 1;
2971
  else
2972
    return 0;
2973
 
2974
  if (GET_CODE (rs1) == SUBREG)
2975
    rs1 = SUBREG_REG (rs1);
2976
  if (!REG_P (rs1))
2977
    return 0;
2978
 
2979
  if (rs2)
2980
    {
2981
      if (GET_CODE (rs2) == SUBREG)
2982
        rs2 = SUBREG_REG (rs2);
2983
      if (!REG_P (rs2))
2984
        return 0;
2985
    }
2986
 
2987
  if (strict)
2988
    {
2989
      if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
2990
          || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
2991
        return 0;
2992
    }
2993
  else
2994
    {
2995
      if ((REGNO (rs1) >= 32
2996
           && REGNO (rs1) != FRAME_POINTER_REGNUM
2997
           && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
2998
          || (rs2
2999
              && (REGNO (rs2) >= 32
3000
                  && REGNO (rs2) != FRAME_POINTER_REGNUM
3001
                  && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3002
        return 0;
3003
    }
3004
  return 1;
3005
}
3006
 
3007
/* Construct the SYMBOL_REF for the tls_get_offset function.  */
3008
 
3009
static GTY(()) rtx sparc_tls_symbol;
3010
 
3011
static rtx
3012
sparc_tls_get_addr (void)
3013
{
3014
  if (!sparc_tls_symbol)
3015
    sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3016
 
3017
  return sparc_tls_symbol;
3018
}
3019
 
3020
static rtx
3021
sparc_tls_got (void)
3022
{
3023
  rtx temp;
3024
  if (flag_pic)
3025
    {
3026
      current_function_uses_pic_offset_table = 1;
3027
      return pic_offset_table_rtx;
3028
    }
3029
 
3030
  if (!global_offset_table)
3031
    global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3032
  temp = gen_reg_rtx (Pmode);
3033
  emit_move_insn (temp, global_offset_table);
3034
  return temp;
3035
}
3036
 
3037
/* Return 1 if *X is a thread-local symbol.  */
3038
 
3039
static int
3040
sparc_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
3041
{
3042
  return SPARC_SYMBOL_REF_TLS_P (*x);
3043
}
3044
 
3045
/* Return 1 if X contains a thread-local symbol.  */
3046
 
3047
bool
3048
sparc_tls_referenced_p (rtx x)
3049
{
3050
  if (!TARGET_HAVE_TLS)
3051
    return false;
3052
 
3053
  return for_each_rtx (&x, &sparc_tls_symbol_ref_1, 0);
3054
}
3055
 
3056
/* ADDR contains a thread-local SYMBOL_REF.  Generate code to compute
3057
   this (thread-local) address.  */
3058
 
3059
rtx
3060
legitimize_tls_address (rtx addr)
3061
{
3062
  rtx temp1, temp2, temp3, ret, o0, got, insn;
3063
 
3064
  gcc_assert (! no_new_pseudos);
3065
 
3066
  if (GET_CODE (addr) == SYMBOL_REF)
3067
    switch (SYMBOL_REF_TLS_MODEL (addr))
3068
      {
3069
      case TLS_MODEL_GLOBAL_DYNAMIC:
3070
        start_sequence ();
3071
        temp1 = gen_reg_rtx (SImode);
3072
        temp2 = gen_reg_rtx (SImode);
3073
        ret = gen_reg_rtx (Pmode);
3074
        o0 = gen_rtx_REG (Pmode, 8);
3075
        got = sparc_tls_got ();
3076
        emit_insn (gen_tgd_hi22 (temp1, addr));
3077
        emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3078
        if (TARGET_ARCH32)
3079
          {
3080
            emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3081
            insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3082
                                                   addr, const1_rtx));
3083
          }
3084
        else
3085
          {
3086
            emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3087
            insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3088
                                                   addr, const1_rtx));
3089
          }
3090
        CALL_INSN_FUNCTION_USAGE (insn)
3091
          = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3092
                               CALL_INSN_FUNCTION_USAGE (insn));
3093
        insn = get_insns ();
3094
        end_sequence ();
3095
        emit_libcall_block (insn, ret, o0, addr);
3096
        break;
3097
 
3098
      case TLS_MODEL_LOCAL_DYNAMIC:
3099
        start_sequence ();
3100
        temp1 = gen_reg_rtx (SImode);
3101
        temp2 = gen_reg_rtx (SImode);
3102
        temp3 = gen_reg_rtx (Pmode);
3103
        ret = gen_reg_rtx (Pmode);
3104
        o0 = gen_rtx_REG (Pmode, 8);
3105
        got = sparc_tls_got ();
3106
        emit_insn (gen_tldm_hi22 (temp1));
3107
        emit_insn (gen_tldm_lo10 (temp2, temp1));
3108
        if (TARGET_ARCH32)
3109
          {
3110
            emit_insn (gen_tldm_add32 (o0, got, temp2));
3111
            insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3112
                                                    const1_rtx));
3113
          }
3114
        else
3115
          {
3116
            emit_insn (gen_tldm_add64 (o0, got, temp2));
3117
            insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3118
                                                    const1_rtx));
3119
          }
3120
        CALL_INSN_FUNCTION_USAGE (insn)
3121
          = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3122
                               CALL_INSN_FUNCTION_USAGE (insn));
3123
        insn = get_insns ();
3124
        end_sequence ();
3125
        emit_libcall_block (insn, temp3, o0,
3126
                            gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3127
                                            UNSPEC_TLSLD_BASE));
3128
        temp1 = gen_reg_rtx (SImode);
3129
        temp2 = gen_reg_rtx (SImode);
3130
        emit_insn (gen_tldo_hix22 (temp1, addr));
3131
        emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3132
        if (TARGET_ARCH32)
3133
          emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3134
        else
3135
          emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3136
        break;
3137
 
3138
      case TLS_MODEL_INITIAL_EXEC:
3139
        temp1 = gen_reg_rtx (SImode);
3140
        temp2 = gen_reg_rtx (SImode);
3141
        temp3 = gen_reg_rtx (Pmode);
3142
        got = sparc_tls_got ();
3143
        emit_insn (gen_tie_hi22 (temp1, addr));
3144
        emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3145
        if (TARGET_ARCH32)
3146
          emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3147
        else
3148
          emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3149
        if (TARGET_SUN_TLS)
3150
          {
3151
            ret = gen_reg_rtx (Pmode);
3152
            if (TARGET_ARCH32)
3153
              emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3154
                                        temp3, addr));
3155
            else
3156
              emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3157
                                        temp3, addr));
3158
          }
3159
        else
3160
          ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3161
        break;
3162
 
3163
      case TLS_MODEL_LOCAL_EXEC:
3164
        temp1 = gen_reg_rtx (Pmode);
3165
        temp2 = gen_reg_rtx (Pmode);
3166
        if (TARGET_ARCH32)
3167
          {
3168
            emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3169
            emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3170
          }
3171
        else
3172
          {
3173
            emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3174
            emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3175
          }
3176
        ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3177
        break;
3178
 
3179
      default:
3180
        gcc_unreachable ();
3181
      }
3182
 
3183
  else
3184
    gcc_unreachable ();  /* for now ... */
3185
 
3186
  return ret;
3187
}
3188
 
3189
 
3190
/* Legitimize PIC addresses.  If the address is already position-independent,
3191
   we return ORIG.  Newly generated position-independent addresses go into a
3192
   reg.  This is REG if nonzero, otherwise we allocate register(s) as
3193
   necessary.  */
3194
 
3195
rtx
3196
legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
3197
                        rtx reg)
3198
{
3199
  if (GET_CODE (orig) == SYMBOL_REF)
3200
    {
3201
      rtx pic_ref, address;
3202
      rtx insn;
3203
 
3204
      if (reg == 0)
3205
        {
3206
          gcc_assert (! reload_in_progress && ! reload_completed);
3207
          reg = gen_reg_rtx (Pmode);
3208
        }
3209
 
3210
      if (flag_pic == 2)
3211
        {
3212
          /* If not during reload, allocate another temp reg here for loading
3213
             in the address, so that these instructions can be optimized
3214
             properly.  */
3215
          rtx temp_reg = ((reload_in_progress || reload_completed)
3216
                          ? reg : gen_reg_rtx (Pmode));
3217
 
3218
          /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3219
             won't get confused into thinking that these two instructions
3220
             are loading in the true address of the symbol.  If in the
3221
             future a PIC rtx exists, that should be used instead.  */
3222
          if (TARGET_ARCH64)
3223
            {
3224
              emit_insn (gen_movdi_high_pic (temp_reg, orig));
3225
              emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3226
            }
3227
          else
3228
            {
3229
              emit_insn (gen_movsi_high_pic (temp_reg, orig));
3230
              emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3231
            }
3232
          address = temp_reg;
3233
        }
3234
      else
3235
        address = orig;
3236
 
3237
      pic_ref = gen_const_mem (Pmode,
3238
                               gen_rtx_PLUS (Pmode,
3239
                                             pic_offset_table_rtx, address));
3240
      current_function_uses_pic_offset_table = 1;
3241
      insn = emit_move_insn (reg, pic_ref);
3242
      /* Put a REG_EQUAL note on this insn, so that it can be optimized
3243
         by loop.  */
3244
      REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
3245
                                  REG_NOTES (insn));
3246
      return reg;
3247
    }
3248
  else if (GET_CODE (orig) == CONST)
3249
    {
3250
      rtx base, offset;
3251
 
3252
      if (GET_CODE (XEXP (orig, 0)) == PLUS
3253
          && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3254
        return orig;
3255
 
3256
      if (reg == 0)
3257
        {
3258
          gcc_assert (! reload_in_progress && ! reload_completed);
3259
          reg = gen_reg_rtx (Pmode);
3260
        }
3261
 
3262
      gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3263
      base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3264
      offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3265
                                       base == reg ? 0 : reg);
3266
 
3267
      if (GET_CODE (offset) == CONST_INT)
3268
        {
3269
          if (SMALL_INT (offset))
3270
            return plus_constant (base, INTVAL (offset));
3271
          else if (! reload_in_progress && ! reload_completed)
3272
            offset = force_reg (Pmode, offset);
3273
          else
3274
            /* If we reach here, then something is seriously wrong.  */
3275
            gcc_unreachable ();
3276
        }
3277
      return gen_rtx_PLUS (Pmode, base, offset);
3278
    }
3279
  else if (GET_CODE (orig) == LABEL_REF)
3280
    /* ??? Why do we do this?  */
3281
    /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3282
       the register is live instead, in case it is eliminated.  */
3283
    current_function_uses_pic_offset_table = 1;
3284
 
3285
  return orig;
3286
}
3287
 
3288
/* Try machine-dependent ways of modifying an illegitimate address X
3289
   to be legitimate.  If we find one, return the new, valid address.
3290
 
3291
   OLDX is the address as it was before break_out_memory_refs was called.
3292
   In some cases it is useful to look at this to decide what needs to be done.
3293
 
3294
   MODE is the mode of the operand pointed to by X.  */
3295
 
3296
rtx
3297
legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, enum machine_mode mode)
3298
{
3299
  rtx orig_x = x;
3300
 
3301
  if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3302
    x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3303
                      force_operand (XEXP (x, 0), NULL_RTX));
3304
  if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3305
    x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3306
                      force_operand (XEXP (x, 1), NULL_RTX));
3307
  if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3308
    x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3309
                      XEXP (x, 1));
3310
  if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3311
    x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3312
                      force_operand (XEXP (x, 1), NULL_RTX));
3313
 
3314
  if (x != orig_x && legitimate_address_p (mode, x, FALSE))
3315
    return x;
3316
 
3317
  if (SPARC_SYMBOL_REF_TLS_P (x))
3318
    x = legitimize_tls_address (x);
3319
  else if (flag_pic)
3320
    x = legitimize_pic_address (x, mode, 0);
3321
  else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3322
    x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3323
                      copy_to_mode_reg (Pmode, XEXP (x, 1)));
3324
  else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3325
    x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3326
                      copy_to_mode_reg (Pmode, XEXP (x, 0)));
3327
  else if (GET_CODE (x) == SYMBOL_REF
3328
           || GET_CODE (x) == CONST
3329
           || GET_CODE (x) == LABEL_REF)
3330
    x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3331
  return x;
3332
}
3333
 
3334
/* Emit the special PIC helper function.  */
3335
 
3336
static void
3337
emit_pic_helper (void)
3338
{
3339
  const char *pic_name = reg_names[REGNO (pic_offset_table_rtx)];
3340
  int align;
3341
 
3342
  switch_to_section (text_section);
3343
 
3344
  align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
3345
  if (align > 0)
3346
    ASM_OUTPUT_ALIGN (asm_out_file, align);
3347
  ASM_OUTPUT_LABEL (asm_out_file, pic_helper_symbol_name);
3348
  if (flag_delayed_branch)
3349
    fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
3350
            pic_name, pic_name);
3351
  else
3352
    fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
3353
            pic_name, pic_name);
3354
 
3355
  pic_helper_emitted_p = true;
3356
}
3357
 
3358
/* Emit code to load the PIC register.  */
3359
 
3360
static void
3361
load_pic_register (bool delay_pic_helper)
3362
{
3363
  int orig_flag_pic = flag_pic;
3364
 
3365
  /* If we haven't initialized the special PIC symbols, do so now.  */
3366
  if (!pic_helper_symbol_name[0])
3367
    {
3368
      ASM_GENERATE_INTERNAL_LABEL (pic_helper_symbol_name, "LADDPC", 0);
3369
      pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, pic_helper_symbol_name);
3370
      global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3371
    }
3372
 
3373
  /* If we haven't emitted the special PIC helper function, do so now unless
3374
     we are requested to delay it.  */
3375
  if (!delay_pic_helper && !pic_helper_emitted_p)
3376
    emit_pic_helper ();
3377
 
3378
  flag_pic = 0;
3379
  if (TARGET_ARCH64)
3380
    emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3381
                                     pic_helper_symbol));
3382
  else
3383
    emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3384
                                     pic_helper_symbol));
3385
  flag_pic = orig_flag_pic;
3386
 
3387
  /* Need to emit this whether or not we obey regdecls,
3388
     since setjmp/longjmp can cause life info to screw up.
3389
     ??? In the case where we don't obey regdecls, this is not sufficient
3390
     since we may not fall out the bottom.  */
3391
  emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
3392
}
3393
 
3394
/* Return 1 if RTX is a MEM which is known to be aligned to at
3395
   least a DESIRED byte boundary.  */
3396
 
3397
int
3398
mem_min_alignment (rtx mem, int desired)
3399
{
3400
  rtx addr, base, offset;
3401
 
3402
  /* If it's not a MEM we can't accept it.  */
3403
  if (GET_CODE (mem) != MEM)
3404
    return 0;
3405
 
3406
  /* Obviously...  */
3407
  if (!TARGET_UNALIGNED_DOUBLES
3408
      && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3409
    return 1;
3410
 
3411
  /* ??? The rest of the function predates MEM_ALIGN so
3412
     there is probably a bit of redundancy.  */
3413
  addr = XEXP (mem, 0);
3414
  base = offset = NULL_RTX;
3415
  if (GET_CODE (addr) == PLUS)
3416
    {
3417
      if (GET_CODE (XEXP (addr, 0)) == REG)
3418
        {
3419
          base = XEXP (addr, 0);
3420
 
3421
          /* What we are saying here is that if the base
3422
             REG is aligned properly, the compiler will make
3423
             sure any REG based index upon it will be so
3424
             as well.  */
3425
          if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3426
            offset = XEXP (addr, 1);
3427
          else
3428
            offset = const0_rtx;
3429
        }
3430
    }
3431
  else if (GET_CODE (addr) == REG)
3432
    {
3433
      base = addr;
3434
      offset = const0_rtx;
3435
    }
3436
 
3437
  if (base != NULL_RTX)
3438
    {
3439
      int regno = REGNO (base);
3440
 
3441
      if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3442
        {
3443
          /* Check if the compiler has recorded some information
3444
             about the alignment of the base REG.  If reload has
3445
             completed, we already matched with proper alignments.
3446
             If not running global_alloc, reload might give us
3447
             unaligned pointer to local stack though.  */
3448
          if (((cfun != 0
3449
                && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3450
               || (optimize && reload_completed))
3451
              && (INTVAL (offset) & (desired - 1)) == 0)
3452
            return 1;
3453
        }
3454
      else
3455
        {
3456
          if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3457
            return 1;
3458
        }
3459
    }
3460
  else if (! TARGET_UNALIGNED_DOUBLES
3461
           || CONSTANT_P (addr)
3462
           || GET_CODE (addr) == LO_SUM)
3463
    {
3464
      /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3465
         is true, in which case we can only assume that an access is aligned if
3466
         it is to a constant address, or the address involves a LO_SUM.  */
3467
      return 1;
3468
    }
3469
 
3470
  /* An obviously unaligned address.  */
3471
  return 0;
3472
}
3473
 
3474
 
3475
/* Vectors to keep interesting information about registers where it can easily
3476
   be got.  We used to use the actual mode value as the bit number, but there
3477
   are more than 32 modes now.  Instead we use two tables: one indexed by
3478
   hard register number, and one indexed by mode.  */
3479
 
3480
/* The purpose of sparc_mode_class is to shrink the range of modes so that
3481
   they all fit (as bit numbers) in a 32 bit word (again).  Each real mode is
3482
   mapped into one sparc_mode_class mode.  */
3483
 
3484
enum sparc_mode_class {
3485
  S_MODE, D_MODE, T_MODE, O_MODE,
3486
  SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3487
  CC_MODE, CCFP_MODE
3488
};
3489
 
3490
/* Modes for single-word and smaller quantities.  */
3491
#define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3492
 
3493
/* Modes for double-word and smaller quantities.  */
3494
#define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3495
 
3496
/* Modes for quad-word and smaller quantities.  */
3497
#define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3498
 
3499
/* Modes for 8-word and smaller quantities.  */
3500
#define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3501
 
3502
/* Modes for single-float quantities.  We must allow any single word or
3503
   smaller quantity.  This is because the fix/float conversion instructions
3504
   take integer inputs/outputs from the float registers.  */
3505
#define SF_MODES (S_MODES)
3506
 
3507
/* Modes for double-float and smaller quantities.  */
3508
#define DF_MODES (S_MODES | D_MODES)
3509
 
3510
/* Modes for double-float only quantities.  */
3511
#define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3512
 
3513
/* Modes for quad-float only quantities.  */
3514
#define TF_ONLY_MODES (1 << (int) TF_MODE)
3515
 
3516
/* Modes for quad-float and smaller quantities.  */
3517
#define TF_MODES (DF_MODES | TF_ONLY_MODES)
3518
 
3519
/* Modes for quad-float and double-float quantities.  */
3520
#define TF_MODES_NO_S (DF_MODES_NO_S | TF_ONLY_MODES)
3521
 
3522
/* Modes for quad-float pair only quantities.  */
3523
#define OF_ONLY_MODES (1 << (int) OF_MODE)
3524
 
3525
/* Modes for quad-float pairs and smaller quantities.  */
3526
#define OF_MODES (TF_MODES | OF_ONLY_MODES)
3527
 
3528
#define OF_MODES_NO_S (TF_MODES_NO_S | OF_ONLY_MODES)
3529
 
3530
/* Modes for condition codes.  */
3531
#define CC_MODES (1 << (int) CC_MODE)
3532
#define CCFP_MODES (1 << (int) CCFP_MODE)
3533
 
3534
/* Value is 1 if register/mode pair is acceptable on sparc.
3535
   The funny mixture of D and T modes is because integer operations
3536
   do not specially operate on tetra quantities, so non-quad-aligned
3537
   registers can hold quadword quantities (except %o4 and %i4 because
3538
   they cross fixed registers).  */
3539
 
3540
/* This points to either the 32 bit or the 64 bit version.  */
3541
const int *hard_regno_mode_classes;
3542
 
3543
static const int hard_32bit_mode_classes[] = {
3544
  S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3545
  T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3546
  T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3547
  T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3548
 
3549
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3550
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3551
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3552
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3553
 
3554
  /* FP regs f32 to f63.  Only the even numbered registers actually exist,
3555
     and none can hold SFmode/SImode values.  */
3556
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3557
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3558
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3559
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3560
 
3561
  /* %fcc[0123] */
3562
  CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3563
 
3564
  /* %icc */
3565
  CC_MODES
3566
};
3567
 
3568
static const int hard_64bit_mode_classes[] = {
3569
  D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3570
  O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3571
  T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3572
  O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3573
 
3574
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3575
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3576
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3577
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3578
 
3579
  /* FP regs f32 to f63.  Only the even numbered registers actually exist,
3580
     and none can hold SFmode/SImode values.  */
3581
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3582
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3583
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3584
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3585
 
3586
  /* %fcc[0123] */
3587
  CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3588
 
3589
  /* %icc */
3590
  CC_MODES
3591
};
3592
 
3593
int sparc_mode_class [NUM_MACHINE_MODES];
3594
 
3595
enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3596
 
3597
static void
3598
sparc_init_modes (void)
3599
{
3600
  int i;
3601
 
3602
  for (i = 0; i < NUM_MACHINE_MODES; i++)
3603
    {
3604
      switch (GET_MODE_CLASS (i))
3605
        {
3606
        case MODE_INT:
3607
        case MODE_PARTIAL_INT:
3608
        case MODE_COMPLEX_INT:
3609
          if (GET_MODE_SIZE (i) <= 4)
3610
            sparc_mode_class[i] = 1 << (int) S_MODE;
3611
          else if (GET_MODE_SIZE (i) == 8)
3612
            sparc_mode_class[i] = 1 << (int) D_MODE;
3613
          else if (GET_MODE_SIZE (i) == 16)
3614
            sparc_mode_class[i] = 1 << (int) T_MODE;
3615
          else if (GET_MODE_SIZE (i) == 32)
3616
            sparc_mode_class[i] = 1 << (int) O_MODE;
3617
          else
3618
            sparc_mode_class[i] = 0;
3619
          break;
3620
        case MODE_VECTOR_INT:
3621
          if (GET_MODE_SIZE (i) <= 4)
3622
            sparc_mode_class[i] = 1 << (int)SF_MODE;
3623
          else if (GET_MODE_SIZE (i) == 8)
3624
            sparc_mode_class[i] = 1 << (int)DF_MODE;
3625
          break;
3626
        case MODE_FLOAT:
3627
        case MODE_COMPLEX_FLOAT:
3628
          if (GET_MODE_SIZE (i) <= 4)
3629
            sparc_mode_class[i] = 1 << (int) SF_MODE;
3630
          else if (GET_MODE_SIZE (i) == 8)
3631
            sparc_mode_class[i] = 1 << (int) DF_MODE;
3632
          else if (GET_MODE_SIZE (i) == 16)
3633
            sparc_mode_class[i] = 1 << (int) TF_MODE;
3634
          else if (GET_MODE_SIZE (i) == 32)
3635
            sparc_mode_class[i] = 1 << (int) OF_MODE;
3636
          else
3637
            sparc_mode_class[i] = 0;
3638
          break;
3639
        case MODE_CC:
3640
          if (i == (int) CCFPmode || i == (int) CCFPEmode)
3641
            sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3642
          else
3643
            sparc_mode_class[i] = 1 << (int) CC_MODE;
3644
          break;
3645
        default:
3646
          sparc_mode_class[i] = 0;
3647
          break;
3648
        }
3649
    }
3650
 
3651
  if (TARGET_ARCH64)
3652
    hard_regno_mode_classes = hard_64bit_mode_classes;
3653
  else
3654
    hard_regno_mode_classes = hard_32bit_mode_classes;
3655
 
3656
  /* Initialize the array used by REGNO_REG_CLASS.  */
3657
  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3658
    {
3659
      if (i < 16 && TARGET_V8PLUS)
3660
        sparc_regno_reg_class[i] = I64_REGS;
3661
      else if (i < 32 || i == FRAME_POINTER_REGNUM)
3662
        sparc_regno_reg_class[i] = GENERAL_REGS;
3663
      else if (i < 64)
3664
        sparc_regno_reg_class[i] = FP_REGS;
3665
      else if (i < 96)
3666
        sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3667
      else if (i < 100)
3668
        sparc_regno_reg_class[i] = FPCC_REGS;
3669
      else
3670
        sparc_regno_reg_class[i] = NO_REGS;
3671
    }
3672
}
3673
 
3674
/* Compute the frame size required by the function.  This function is called
3675
   during the reload pass and also by sparc_expand_prologue.  */
3676
 
3677
HOST_WIDE_INT
3678
sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3679
{
3680
  int outgoing_args_size = (current_function_outgoing_args_size
3681
                            + REG_PARM_STACK_SPACE (current_function_decl));
3682
  int n_regs = 0;  /* N_REGS is the number of 4-byte regs saved thus far.  */
3683
  int i;
3684
 
3685
  if (TARGET_ARCH64)
3686
    {
3687
      for (i = 0; i < 8; i++)
3688
        if (regs_ever_live[i] && ! call_used_regs[i])
3689
          n_regs += 2;
3690
    }
3691
  else
3692
    {
3693
      for (i = 0; i < 8; i += 2)
3694
        if ((regs_ever_live[i] && ! call_used_regs[i])
3695
            || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
3696
          n_regs += 2;
3697
    }
3698
 
3699
  for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3700
    if ((regs_ever_live[i] && ! call_used_regs[i])
3701
        || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
3702
      n_regs += 2;
3703
 
3704
  /* Set up values for use in prologue and epilogue.  */
3705
  num_gfregs = n_regs;
3706
 
3707
  if (leaf_function_p
3708
      && n_regs == 0
3709
      && size == 0
3710
      && current_function_outgoing_args_size == 0)
3711
    actual_fsize = apparent_fsize = 0;
3712
  else
3713
    {
3714
      /* We subtract STARTING_FRAME_OFFSET, remember it's negative.  */
3715
      apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3716
      apparent_fsize += n_regs * 4;
3717
      actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3718
    }
3719
 
3720
  /* Make sure nothing can clobber our register windows.
3721
     If a SAVE must be done, or there is a stack-local variable,
3722
     the register window area must be allocated.  */
3723
  if (! leaf_function_p || size > 0)
3724
    actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3725
 
3726
  return SPARC_STACK_ALIGN (actual_fsize);
3727
}
3728
 
3729
/* Output any necessary .register pseudo-ops.  */
3730
 
3731
void
3732
sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3733
{
3734
#ifdef HAVE_AS_REGISTER_PSEUDO_OP
3735
  int i;
3736
 
3737
  if (TARGET_ARCH32)
3738
    return;
3739
 
3740
  /* Check if %g[2367] were used without
3741
     .register being printed for them already.  */
3742
  for (i = 2; i < 8; i++)
3743
    {
3744
      if (regs_ever_live [i]
3745
          && ! sparc_hard_reg_printed [i])
3746
        {
3747
          sparc_hard_reg_printed [i] = 1;
3748
          /* %g7 is used as TLS base register, use #ignore
3749
             for it instead of #scratch.  */
3750
          fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3751
                   i == 7 ? "ignore" : "scratch");
3752
        }
3753
      if (i == 3) i = 5;
3754
    }
3755
#endif
3756
}
3757
 
3758
/* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3759
   as needed.  LOW should be double-word aligned for 32-bit registers.
3760
   Return the new OFFSET.  */
3761
 
3762
#define SORR_SAVE    0
3763
#define SORR_RESTORE 1
3764
 
3765
static int
3766
save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3767
{
3768
  rtx mem, insn;
3769
  int i;
3770
 
3771
  if (TARGET_ARCH64 && high <= 32)
3772
    {
3773
      for (i = low; i < high; i++)
3774
        {
3775
          if (regs_ever_live[i] && ! call_used_regs[i])
3776
            {
3777
              mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3778
              set_mem_alias_set (mem, sparc_sr_alias_set);
3779
              if (action == SORR_SAVE)
3780
                {
3781
                  insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3782
                  RTX_FRAME_RELATED_P (insn) = 1;
3783
                }
3784
              else  /* action == SORR_RESTORE */
3785
                emit_move_insn (gen_rtx_REG (DImode, i), mem);
3786
              offset += 8;
3787
            }
3788
        }
3789
    }
3790
  else
3791
    {
3792
      for (i = low; i < high; i += 2)
3793
        {
3794
          bool reg0 = regs_ever_live[i] && ! call_used_regs[i];
3795
          bool reg1 = regs_ever_live[i+1] && ! call_used_regs[i+1];
3796
          enum machine_mode mode;
3797
          int regno;
3798
 
3799
          if (reg0 && reg1)
3800
            {
3801
              mode = i < 32 ? DImode : DFmode;
3802
              regno = i;
3803
            }
3804
          else if (reg0)
3805
            {
3806
              mode = i < 32 ? SImode : SFmode;
3807
              regno = i;
3808
            }
3809
          else if (reg1)
3810
            {
3811
              mode = i < 32 ? SImode : SFmode;
3812
              regno = i + 1;
3813
              offset += 4;
3814
            }
3815
          else
3816
            continue;
3817
 
3818
          mem = gen_rtx_MEM (mode, plus_constant (base, offset));
3819
          set_mem_alias_set (mem, sparc_sr_alias_set);
3820
          if (action == SORR_SAVE)
3821
            {
3822
              insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
3823
              RTX_FRAME_RELATED_P (insn) = 1;
3824
            }
3825
          else  /* action == SORR_RESTORE */
3826
            emit_move_insn (gen_rtx_REG (mode, regno), mem);
3827
 
3828
          /* Always preserve double-word alignment.  */
3829
          offset = (offset + 7) & -8;
3830
        }
3831
    }
3832
 
3833
  return offset;
3834
}
3835
 
3836
/* Emit code to save call-saved registers.  */
3837
 
3838
static void
3839
emit_save_or_restore_regs (int action)
3840
{
3841
  HOST_WIDE_INT offset;
3842
  rtx base;
3843
 
3844
  offset = frame_base_offset - apparent_fsize;
3845
 
3846
  if (offset < -4096 || offset + num_gfregs * 4 > 4095)
3847
    {
3848
      /* ??? This might be optimized a little as %g1 might already have a
3849
         value close enough that a single add insn will do.  */
3850
      /* ??? Although, all of this is probably only a temporary fix
3851
         because if %g1 can hold a function result, then
3852
         sparc_expand_epilogue will lose (the result will be
3853
         clobbered).  */
3854
      base = gen_rtx_REG (Pmode, 1);
3855
      emit_move_insn (base, GEN_INT (offset));
3856
      emit_insn (gen_rtx_SET (VOIDmode,
3857
                              base,
3858
                              gen_rtx_PLUS (Pmode, frame_base_reg, base)));
3859
      offset = 0;
3860
    }
3861
  else
3862
    base = frame_base_reg;
3863
 
3864
  offset = save_or_restore_regs (0, 8, base, offset, action);
3865
  save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
3866
}
3867
 
3868
/* Generate a save_register_window insn.  */
3869
 
3870
static rtx
3871
gen_save_register_window (rtx increment)
3872
{
3873
  if (TARGET_ARCH64)
3874
    return gen_save_register_windowdi (increment);
3875
  else
3876
    return gen_save_register_windowsi (increment);
3877
}
3878
 
3879
/* Generate an increment for the stack pointer.  */
3880
 
3881
static rtx
3882
gen_stack_pointer_inc (rtx increment)
3883
{
3884
  return gen_rtx_SET (VOIDmode,
3885
                      stack_pointer_rtx,
3886
                      gen_rtx_PLUS (Pmode,
3887
                                    stack_pointer_rtx,
3888
                                    increment));
3889
}
3890
 
3891
/* Generate a decrement for the stack pointer.  */
3892
 
3893
static rtx
3894
gen_stack_pointer_dec (rtx decrement)
3895
{
3896
  return gen_rtx_SET (VOIDmode,
3897
                      stack_pointer_rtx,
3898
                      gen_rtx_MINUS (Pmode,
3899
                                     stack_pointer_rtx,
3900
                                     decrement));
3901
}
3902
 
3903
/* Expand the function prologue.  The prologue is responsible for reserving
3904
   storage for the frame, saving the call-saved registers and loading the
3905
   PIC register if needed.  */
3906
 
3907
void
3908
sparc_expand_prologue (void)
3909
{
3910
  rtx insn;
3911
  int i;
3912
 
3913
  /* Compute a snapshot of current_function_uses_only_leaf_regs.  Relying
3914
     on the final value of the flag means deferring the prologue/epilogue
3915
     expansion until just before the second scheduling pass, which is too
3916
     late to emit multiple epilogues or return insns.
3917
 
3918
     Of course we are making the assumption that the value of the flag
3919
     will not change between now and its final value.  Of the three parts
3920
     of the formula, only the last one can reasonably vary.  Let's take a
3921
     closer look, after assuming that the first two ones are set to true
3922
     (otherwise the last value is effectively silenced).
3923
 
3924
     If only_leaf_regs_used returns false, the global predicate will also
3925
     be false so the actual frame size calculated below will be positive.
3926
     As a consequence, the save_register_window insn will be emitted in
3927
     the instruction stream; now this insn explicitly references %fp
3928
     which is not a leaf register so only_leaf_regs_used will always
3929
     return false subsequently.
3930
 
3931
     If only_leaf_regs_used returns true, we hope that the subsequent
3932
     optimization passes won't cause non-leaf registers to pop up.  For
3933
     example, the regrename pass has special provisions to not rename to
3934
     non-leaf registers in a leaf function.  */
3935
  sparc_leaf_function_p
3936
    = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
3937
 
3938
  /* Need to use actual_fsize, since we are also allocating
3939
     space for our callee (and our own register save area).  */
3940
  actual_fsize
3941
    = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
3942
 
3943
  /* Advertise that the data calculated just above are now valid.  */
3944
  sparc_prologue_data_valid_p = true;
3945
 
3946
  if (sparc_leaf_function_p)
3947
    {
3948
      frame_base_reg = stack_pointer_rtx;
3949
      frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
3950
    }
3951
  else
3952
    {
3953
      frame_base_reg = hard_frame_pointer_rtx;
3954
      frame_base_offset = SPARC_STACK_BIAS;
3955
    }
3956
 
3957
  if (actual_fsize == 0)
3958
    /* do nothing.  */ ;
3959
  else if (sparc_leaf_function_p)
3960
    {
3961
      if (actual_fsize <= 4096)
3962
        insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
3963
      else if (actual_fsize <= 8192)
3964
        {
3965
          insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
3966
          /* %sp is still the CFA register.  */
3967
          RTX_FRAME_RELATED_P (insn) = 1;
3968
          insn
3969
            = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
3970
        }
3971
      else
3972
        {
3973
          rtx reg = gen_rtx_REG (Pmode, 1);
3974
          emit_move_insn (reg, GEN_INT (-actual_fsize));
3975
          insn = emit_insn (gen_stack_pointer_inc (reg));
3976
          REG_NOTES (insn) =
3977
            gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3978
                               gen_stack_pointer_inc (GEN_INT (-actual_fsize)),
3979
                               REG_NOTES (insn));
3980
        }
3981
 
3982
      RTX_FRAME_RELATED_P (insn) = 1;
3983
    }
3984
  else
3985
    {
3986
      if (actual_fsize <= 4096)
3987
        insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
3988
      else if (actual_fsize <= 8192)
3989
        {
3990
          insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
3991
          /* %sp is not the CFA register anymore.  */
3992
          emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
3993
        }
3994
      else
3995
        {
3996
          rtx reg = gen_rtx_REG (Pmode, 1);
3997
          emit_move_insn (reg, GEN_INT (-actual_fsize));
3998
          insn = emit_insn (gen_save_register_window (reg));
3999
        }
4000
 
4001
      RTX_FRAME_RELATED_P (insn) = 1;
4002
      for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4003
        RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4004
    }
4005
 
4006
  if (num_gfregs)
4007
    emit_save_or_restore_regs (SORR_SAVE);
4008
 
4009
  /* Load the PIC register if needed.  */
4010
  if (flag_pic && current_function_uses_pic_offset_table)
4011
    load_pic_register (false);
4012
}
4013
 
4014
/* This function generates the assembly code for function entry, which boils
4015
   down to emitting the necessary .register directives.  */
4016
 
4017
static void
4018
sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4019
{
4020
  /* Check that the assumption we made in sparc_expand_prologue is valid.  */
4021
  gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4022
 
4023
  sparc_output_scratch_registers (file);
4024
}
4025
 
4026
/* Expand the function epilogue, either normal or part of a sibcall.
4027
   We emit all the instructions except the return or the call.  */
4028
 
4029
void
4030
sparc_expand_epilogue (void)
4031
{
4032
  if (num_gfregs)
4033
    emit_save_or_restore_regs (SORR_RESTORE);
4034
 
4035
  if (actual_fsize == 0)
4036
    /* do nothing.  */ ;
4037
  else if (sparc_leaf_function_p)
4038
    {
4039
      if (actual_fsize <= 4096)
4040
        emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4041
      else if (actual_fsize <= 8192)
4042
        {
4043
          emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4044
          emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4045
        }
4046
      else
4047
        {
4048
          rtx reg = gen_rtx_REG (Pmode, 1);
4049
          emit_move_insn (reg, GEN_INT (-actual_fsize));
4050
          emit_insn (gen_stack_pointer_dec (reg));
4051
        }
4052
    }
4053
}
4054
 
4055
/* Return true if it is appropriate to emit `return' instructions in the
4056
   body of a function.  */
4057
 
4058
bool
4059
sparc_can_use_return_insn_p (void)
4060
{
4061
  return sparc_prologue_data_valid_p
4062
         && (actual_fsize == 0 || !sparc_leaf_function_p);
4063
}
4064
 
4065
/* This function generates the assembly code for function exit.  */
4066
 
4067
static void
4068
sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4069
{
4070
  /* If code does not drop into the epilogue, we have to still output
4071
     a dummy nop for the sake of sane backtraces.  Otherwise, if the
4072
     last two instructions of a function were "call foo; dslot;" this
4073
     can make the return PC of foo (i.e. address of call instruction
4074
     plus 8) point to the first instruction in the next function.  */
4075
 
4076
  rtx insn, last_real_insn;
4077
 
4078
  insn = get_last_insn ();
4079
 
4080
  last_real_insn = prev_real_insn (insn);
4081
  if (last_real_insn
4082
      && GET_CODE (last_real_insn) == INSN
4083
      && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4084
    last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4085
 
4086
  if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4087
    fputs("\tnop\n", file);
4088
 
4089
  sparc_output_deferred_case_vectors ();
4090
}
4091
 
4092
/* Output a 'restore' instruction.  */
4093
 
4094
static void
4095
output_restore (rtx pat)
4096
{
4097
  rtx operands[3];
4098
 
4099
  if (! pat)
4100
    {
4101
      fputs ("\t restore\n", asm_out_file);
4102
      return;
4103
    }
4104
 
4105
  gcc_assert (GET_CODE (pat) == SET);
4106
 
4107
  operands[0] = SET_DEST (pat);
4108
  pat = SET_SRC (pat);
4109
 
4110
  switch (GET_CODE (pat))
4111
    {
4112
      case PLUS:
4113
        operands[1] = XEXP (pat, 0);
4114
        operands[2] = XEXP (pat, 1);
4115
        output_asm_insn (" restore %r1, %2, %Y0", operands);
4116
        break;
4117
      case LO_SUM:
4118
        operands[1] = XEXP (pat, 0);
4119
        operands[2] = XEXP (pat, 1);
4120
        output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4121
        break;
4122
      case ASHIFT:
4123
        operands[1] = XEXP (pat, 0);
4124
        gcc_assert (XEXP (pat, 1) == const1_rtx);
4125
        output_asm_insn (" restore %r1, %r1, %Y0", operands);
4126
        break;
4127
      default:
4128
        operands[1] = pat;
4129
        output_asm_insn (" restore %%g0, %1, %Y0", operands);
4130
        break;
4131
    }
4132
}
4133
 
4134
/* Output a return.  */
4135
 
4136
const char *
4137
output_return (rtx insn)
4138
{
4139
  if (sparc_leaf_function_p)
4140
    {
4141
      /* This is a leaf function so we don't have to bother restoring the
4142
         register window, which frees us from dealing with the convoluted
4143
         semantics of restore/return.  We simply output the jump to the
4144
         return address and the insn in the delay slot (if any).  */
4145
 
4146
      gcc_assert (! current_function_calls_eh_return);
4147
 
4148
      return "jmp\t%%o7+%)%#";
4149
    }
4150
  else
4151
    {
4152
      /* This is a regular function so we have to restore the register window.
4153
         We may have a pending insn for the delay slot, which will be either
4154
         combined with the 'restore' instruction or put in the delay slot of
4155
         the 'return' instruction.  */
4156
 
4157
      if (current_function_calls_eh_return)
4158
        {
4159
          /* If the function uses __builtin_eh_return, the eh_return
4160
             machinery occupies the delay slot.  */
4161
          gcc_assert (! final_sequence);
4162
 
4163
          if (! flag_delayed_branch)
4164
            fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4165
 
4166
          if (TARGET_V9)
4167
            fputs ("\treturn\t%i7+8\n", asm_out_file);
4168
          else
4169
            fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4170
 
4171
          if (flag_delayed_branch)
4172
            fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4173
          else
4174
            fputs ("\t nop\n", asm_out_file);
4175
        }
4176
      else if (final_sequence)
4177
        {
4178
          rtx delay, pat;
4179
 
4180
          delay = NEXT_INSN (insn);
4181
          gcc_assert (delay);
4182
 
4183
          pat = PATTERN (delay);
4184
 
4185
          if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4186
            {
4187
              epilogue_renumber (&pat, 0);
4188
              return "return\t%%i7+%)%#";
4189
            }
4190
          else
4191
            {
4192
              output_asm_insn ("jmp\t%%i7+%)", NULL);
4193
              output_restore (pat);
4194
              PATTERN (delay) = gen_blockage ();
4195
              INSN_CODE (delay) = -1;
4196
            }
4197
        }
4198
      else
4199
        {
4200
          /* The delay slot is empty.  */
4201
          if (TARGET_V9)
4202
            return "return\t%%i7+%)\n\t nop";
4203
          else if (flag_delayed_branch)
4204
            return "jmp\t%%i7+%)\n\t restore";
4205
          else
4206
            return "restore\n\tjmp\t%%o7+%)\n\t nop";
4207
        }
4208
    }
4209
 
4210
  return "";
4211
}
4212
 
4213
/* Output a sibling call.  */
4214
 
4215
const char *
4216
output_sibcall (rtx insn, rtx call_operand)
4217
{
4218
  rtx operands[1];
4219
 
4220
  gcc_assert (flag_delayed_branch);
4221
 
4222
  operands[0] = call_operand;
4223
 
4224
  if (sparc_leaf_function_p)
4225
    {
4226
      /* This is a leaf function so we don't have to bother restoring the
4227
         register window.  We simply output the jump to the function and
4228
         the insn in the delay slot (if any).  */
4229
 
4230
      gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4231
 
4232
      if (final_sequence)
4233
        output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4234
                         operands);
4235
      else
4236
        /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4237
           it into branch if possible.  */
4238
        output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4239
                         operands);
4240
    }
4241
  else
4242
    {
4243
      /* This is a regular function so we have to restore the register window.
4244
         We may have a pending insn for the delay slot, which will be combined
4245
         with the 'restore' instruction.  */
4246
 
4247
      output_asm_insn ("call\t%a0, 0", operands);
4248
 
4249
      if (final_sequence)
4250
        {
4251
          rtx delay = NEXT_INSN (insn);
4252
          gcc_assert (delay);
4253
 
4254
          output_restore (PATTERN (delay));
4255
 
4256
          PATTERN (delay) = gen_blockage ();
4257
          INSN_CODE (delay) = -1;
4258
        }
4259
      else
4260
        output_restore (NULL_RTX);
4261
    }
4262
 
4263
  return "";
4264
}
4265
 
4266
/* Functions for handling argument passing.
4267
 
4268
   For 32-bit, the first 6 args are normally in registers and the rest are
4269
   pushed.  Any arg that starts within the first 6 words is at least
4270
   partially passed in a register unless its data type forbids.
4271
 
4272
   For 64-bit, the argument registers are laid out as an array of 16 elements
4273
   and arguments are added sequentially.  The first 6 int args and up to the
4274
   first 16 fp args (depending on size) are passed in regs.
4275
 
4276
   Slot    Stack   Integral   Float   Float in structure   Double   Long Double
4277
   ----    -----   --------   -----   ------------------   ------   -----------
4278
    15   [SP+248]              %f31       %f30,%f31         %d30
4279
    14   [SP+240]              %f29       %f28,%f29         %d28       %q28
4280
    13   [SP+232]              %f27       %f26,%f27         %d26
4281
    12   [SP+224]              %f25       %f24,%f25         %d24       %q24
4282
    11   [SP+216]              %f23       %f22,%f23         %d22
4283
    10   [SP+208]              %f21       %f20,%f21         %d20       %q20
4284
     9   [SP+200]              %f19       %f18,%f19         %d18
4285
     8   [SP+192]              %f17       %f16,%f17         %d16       %q16
4286
     7   [SP+184]              %f15       %f14,%f15         %d14
4287
     6   [SP+176]              %f13       %f12,%f13         %d12       %q12
4288
     5   [SP+168]     %o5      %f11       %f10,%f11         %d10
4289
     4   [SP+160]     %o4       %f9        %f8,%f9           %d8        %q8
4290
     3   [SP+152]     %o3       %f7        %f6,%f7           %d6
4291
     2   [SP+144]     %o2       %f5        %f4,%f5           %d4        %q4
4292
     1   [SP+136]     %o1       %f3        %f2,%f3           %d2
4293
 
4294
 
4295
   Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4296
 
4297
   Integral arguments are always passed as 64-bit quantities appropriately
4298
   extended.
4299
 
4300
   Passing of floating point values is handled as follows.
4301
   If a prototype is in scope:
4302
     If the value is in a named argument (i.e. not a stdarg function or a
4303
     value not part of the `...') then the value is passed in the appropriate
4304
     fp reg.
4305
     If the value is part of the `...' and is passed in one of the first 6
4306
     slots then the value is passed in the appropriate int reg.
4307
     If the value is part of the `...' and is not passed in one of the first 6
4308
     slots then the value is passed in memory.
4309
   If a prototype is not in scope:
4310
     If the value is one of the first 6 arguments the value is passed in the
4311
     appropriate integer reg and the appropriate fp reg.
4312
     If the value is not one of the first 6 arguments the value is passed in
4313
     the appropriate fp reg and in memory.
4314
 
4315
 
4316
   Summary of the calling conventions implemented by GCC on SPARC:
4317
 
4318
   32-bit ABI:
4319
                                size      argument     return value
4320
 
4321
      small integer              <4       int. reg.      int. reg.
4322
      word                        4       int. reg.      int. reg.
4323
      double word                 8       int. reg.      int. reg.
4324
 
4325
      _Complex small integer     <8       int. reg.      int. reg.
4326
      _Complex word               8       int. reg.      int. reg.
4327
      _Complex double word       16        memory        int. reg.
4328
 
4329
      vector integer            <=8       int. reg.       FP reg.
4330
      vector integer             >8        memory         memory
4331
 
4332
      float                       4       int. reg.       FP reg.
4333
      double                      8       int. reg.       FP reg.
4334
      long double                16        memory         memory
4335
 
4336
      _Complex float              8        memory         FP reg.
4337
      _Complex double            16        memory         FP reg.
4338
      _Complex long double       32        memory         FP reg.
4339
 
4340
      vector float              any        memory         memory
4341
 
4342
      aggregate                 any        memory         memory
4343
 
4344
 
4345
 
4346
    64-bit ABI:
4347
                                size      argument     return value
4348
 
4349
      small integer              <8       int. reg.      int. reg.
4350
      word                        8       int. reg.      int. reg.
4351
      double word                16       int. reg.      int. reg.
4352
 
4353
      _Complex small integer    <16       int. reg.      int. reg.
4354
      _Complex word              16       int. reg.      int. reg.
4355
      _Complex double word       32        memory        int. reg.
4356
 
4357
      vector integer           <=16        FP reg.        FP reg.
4358
      vector integer       16<s<=32        memory         FP reg.
4359
      vector integer            >32        memory         memory
4360
 
4361
      float                       4        FP reg.        FP reg.
4362
      double                      8        FP reg.        FP reg.
4363
      long double                16        FP reg.        FP reg.
4364
 
4365
      _Complex float              8        FP reg.        FP reg.
4366
      _Complex double            16        FP reg.        FP reg.
4367
      _Complex long double       32        memory         FP reg.
4368
 
4369
      vector float             <=16        FP reg.        FP reg.
4370
      vector float         16<s<=32        memory         FP reg.
4371
      vector float              >32        memory         memory
4372
 
4373
      aggregate                <=16         reg.           reg.
4374
      aggregate            16<s<=32        memory          reg.
4375
      aggregate                 >32        memory         memory
4376
 
4377
 
4378
 
4379
Note #1: complex floating-point types follow the extended SPARC ABIs as
4380
implemented by the Sun compiler.
4381
 
4382
Note #2: integral vector types follow the scalar floating-point types
4383
conventions to match what is implemented by the Sun VIS SDK.
4384
 
4385
Note #3: floating-point vector types follow the aggregate types
4386
conventions.  */
4387
 
4388
 
4389
/* Maximum number of int regs for args.  */
4390
#define SPARC_INT_ARG_MAX 6
4391
/* Maximum number of fp regs for args.  */
4392
#define SPARC_FP_ARG_MAX 16
4393
 
4394
#define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4395
 
4396
/* Handle the INIT_CUMULATIVE_ARGS macro.
4397
   Initialize a variable CUM of type CUMULATIVE_ARGS
4398
   for a call to a function whose data type is FNTYPE.
4399
   For a library call, FNTYPE is 0.  */
4400
 
4401
void
4402
init_cumulative_args (struct sparc_args *cum, tree fntype,
4403
                      rtx libname ATTRIBUTE_UNUSED,
4404
                      tree fndecl ATTRIBUTE_UNUSED)
4405
{
4406
  cum->words = 0;
4407
  cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4408
  cum->libcall_p = fntype == 0;
4409
}
4410
 
4411
/* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4412
   When a prototype says `char' or `short', really pass an `int'.  */
4413
 
4414
static bool
4415
sparc_promote_prototypes (tree fntype ATTRIBUTE_UNUSED)
4416
{
4417
  return TARGET_ARCH32 ? true : false;
4418
}
4419
 
4420
/* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook.  */
4421
 
4422
static bool
4423
sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4424
{
4425
  return TARGET_ARCH64 ? true : false;
4426
}
4427
 
4428
/* Scan the record type TYPE and return the following predicates:
4429
    - INTREGS_P: the record contains at least one field or sub-field
4430
      that is eligible for promotion in integer registers.
4431
    - FP_REGS_P: the record contains at least one field or sub-field
4432
      that is eligible for promotion in floating-point registers.
4433
    - PACKED_P: the record contains at least one field that is packed.
4434
 
4435
   Sub-fields are not taken into account for the PACKED_P predicate.  */
4436
 
4437
static void
4438
scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4439
{
4440
  tree field;
4441
 
4442
  for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4443
    {
4444
      if (TREE_CODE (field) == FIELD_DECL)
4445
        {
4446
          if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4447
            scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4448
          else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4449
                   || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4450
                  && TARGET_FPU)
4451
            *fpregs_p = 1;
4452
          else
4453
            *intregs_p = 1;
4454
 
4455
          if (packed_p && DECL_PACKED (field))
4456
            *packed_p = 1;
4457
        }
4458
    }
4459
}
4460
 
4461
/* Compute the slot number to pass an argument in.
4462
   Return the slot number or -1 if passing on the stack.
4463
 
4464
   CUM is a variable of type CUMULATIVE_ARGS which gives info about
4465
    the preceding args and about the function being called.
4466
   MODE is the argument's machine mode.
4467
   TYPE is the data type of the argument (as a tree).
4468
    This is null for libcalls where that information may
4469
    not be available.
4470
   NAMED is nonzero if this argument is a named parameter
4471
    (otherwise it is an extra parameter matching an ellipsis).
4472
   INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4473
   *PREGNO records the register number to use if scalar type.
4474
   *PPADDING records the amount of padding needed in words.  */
4475
 
4476
static int
4477
function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4478
                     tree type, int named, int incoming_p,
4479
                     int *pregno, int *ppadding)
4480
{
4481
  int regbase = (incoming_p
4482
                 ? SPARC_INCOMING_INT_ARG_FIRST
4483
                 : SPARC_OUTGOING_INT_ARG_FIRST);
4484
  int slotno = cum->words;
4485
  enum mode_class mclass;
4486
  int regno;
4487
 
4488
  *ppadding = 0;
4489
 
4490
  if (type && TREE_ADDRESSABLE (type))
4491
    return -1;
4492
 
4493
  if (TARGET_ARCH32
4494
      && mode == BLKmode
4495
      && type
4496
      && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4497
    return -1;
4498
 
4499
  /* For SPARC64, objects requiring 16-byte alignment get it.  */
4500
  if (TARGET_ARCH64
4501
      && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4502
      && (slotno & 1) != 0)
4503
    slotno++, *ppadding = 1;
4504
 
4505
  mclass = GET_MODE_CLASS (mode);
4506
  if (type && TREE_CODE (type) == VECTOR_TYPE)
4507
    {
4508
      /* Vector types deserve special treatment because they are
4509
         polymorphic wrt their mode, depending upon whether VIS
4510
         instructions are enabled.  */
4511
      if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4512
        {
4513
          /* The SPARC port defines no floating-point vector modes.  */
4514
          gcc_assert (mode == BLKmode);
4515
        }
4516
      else
4517
        {
4518
          /* Integral vector types should either have a vector
4519
             mode or an integral mode, because we are guaranteed
4520
             by pass_by_reference that their size is not greater
4521
             than 16 bytes and TImode is 16-byte wide.  */
4522
          gcc_assert (mode != BLKmode);
4523
 
4524
          /* Vector integers are handled like floats according to
4525
             the Sun VIS SDK.  */
4526
          mclass = MODE_FLOAT;
4527
        }
4528
    }
4529
 
4530
  switch (mclass)
4531
    {
4532
    case MODE_FLOAT:
4533
    case MODE_COMPLEX_FLOAT:
4534
      if (TARGET_ARCH64 && TARGET_FPU && named)
4535
        {
4536
          if (slotno >= SPARC_FP_ARG_MAX)
4537
            return -1;
4538
          regno = SPARC_FP_ARG_FIRST + slotno * 2;
4539
          /* Arguments filling only one single FP register are
4540
             right-justified in the outer double FP register.  */
4541
          if (GET_MODE_SIZE (mode) <= 4)
4542
            regno++;
4543
          break;
4544
        }
4545
      /* fallthrough */
4546
 
4547
    case MODE_INT:
4548
    case MODE_COMPLEX_INT:
4549
      if (slotno >= SPARC_INT_ARG_MAX)
4550
        return -1;
4551
      regno = regbase + slotno;
4552
      break;
4553
 
4554
    case MODE_RANDOM:
4555
      if (mode == VOIDmode)
4556
        /* MODE is VOIDmode when generating the actual call.  */
4557
        return -1;
4558
 
4559
      gcc_assert (mode == BLKmode);
4560
 
4561
      if (TARGET_ARCH32
4562
          || !type
4563
          || (TREE_CODE (type) != VECTOR_TYPE
4564
              && TREE_CODE (type) != RECORD_TYPE))
4565
        {
4566
          if (slotno >= SPARC_INT_ARG_MAX)
4567
            return -1;
4568
          regno = regbase + slotno;
4569
        }
4570
      else  /* TARGET_ARCH64 && type */
4571
        {
4572
          int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4573
 
4574
          /* First see what kinds of registers we would need.  */
4575
          if (TREE_CODE (type) == VECTOR_TYPE)
4576
            fpregs_p = 1;
4577
          else
4578
            scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4579
 
4580
          /* The ABI obviously doesn't specify how packed structures
4581
             are passed.  These are defined to be passed in int regs
4582
             if possible, otherwise memory.  */
4583
          if (packed_p || !named)
4584
            fpregs_p = 0, intregs_p = 1;
4585
 
4586
          /* If all arg slots are filled, then must pass on stack.  */
4587
          if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4588
            return -1;
4589
 
4590
          /* If there are only int args and all int arg slots are filled,
4591
             then must pass on stack.  */
4592
          if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4593
            return -1;
4594
 
4595
          /* Note that even if all int arg slots are filled, fp members may
4596
             still be passed in regs if such regs are available.
4597
             *PREGNO isn't set because there may be more than one, it's up
4598
             to the caller to compute them.  */
4599
          return slotno;
4600
        }
4601
      break;
4602
 
4603
    default :
4604
      gcc_unreachable ();
4605
    }
4606
 
4607
  *pregno = regno;
4608
  return slotno;
4609
}
4610
 
4611
/* Handle recursive register counting for structure field layout.  */
4612
 
4613
struct function_arg_record_value_parms
4614
{
4615
  rtx ret;              /* return expression being built.  */
4616
  int slotno;           /* slot number of the argument.  */
4617
  int named;            /* whether the argument is named.  */
4618
  int regbase;          /* regno of the base register.  */
4619
  int stack;            /* 1 if part of the argument is on the stack.  */
4620
  int intoffset;        /* offset of the first pending integer field.  */
4621
  unsigned int nregs;   /* number of words passed in registers.  */
4622
};
4623
 
4624
static void function_arg_record_value_3
4625
 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4626
static void function_arg_record_value_2
4627
 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4628
static void function_arg_record_value_1
4629
 (tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4630
static rtx function_arg_record_value (tree, enum machine_mode, int, int, int);
4631
static rtx function_arg_union_value (int, enum machine_mode, int, int);
4632
 
4633
/* A subroutine of function_arg_record_value.  Traverse the structure
4634
   recursively and determine how many registers will be required.  */
4635
 
4636
static void
4637
function_arg_record_value_1 (tree type, HOST_WIDE_INT startbitpos,
4638
                             struct function_arg_record_value_parms *parms,
4639
                             bool packed_p)
4640
{
4641
  tree field;
4642
 
4643
  /* We need to compute how many registers are needed so we can
4644
     allocate the PARALLEL but before we can do that we need to know
4645
     whether there are any packed fields.  The ABI obviously doesn't
4646
     specify how structures are passed in this case, so they are
4647
     defined to be passed in int regs if possible, otherwise memory,
4648
     regardless of whether there are fp values present.  */
4649
 
4650
  if (! packed_p)
4651
    for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4652
      {
4653
        if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4654
          {
4655
            packed_p = true;
4656
            break;
4657
          }
4658
      }
4659
 
4660
  /* Compute how many registers we need.  */
4661
  for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4662
    {
4663
      if (TREE_CODE (field) == FIELD_DECL)
4664
        {
4665
          HOST_WIDE_INT bitpos = startbitpos;
4666
 
4667
          if (DECL_SIZE (field) != 0)
4668
            {
4669
              if (integer_zerop (DECL_SIZE (field)))
4670
                continue;
4671
 
4672
              if (host_integerp (bit_position (field), 1))
4673
                bitpos += int_bit_position (field);
4674
            }
4675
 
4676
          /* ??? FIXME: else assume zero offset.  */
4677
 
4678
          if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4679
            function_arg_record_value_1 (TREE_TYPE (field),
4680
                                         bitpos,
4681
                                         parms,
4682
                                         packed_p);
4683
          else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4684
                    || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4685
                   && TARGET_FPU
4686
                   && parms->named
4687
                   && ! packed_p)
4688
            {
4689
              if (parms->intoffset != -1)
4690
                {
4691
                  unsigned int startbit, endbit;
4692
                  int intslots, this_slotno;
4693
 
4694
                  startbit = parms->intoffset & -BITS_PER_WORD;
4695
                  endbit   = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4696
 
4697
                  intslots = (endbit - startbit) / BITS_PER_WORD;
4698
                  this_slotno = parms->slotno + parms->intoffset
4699
                    / BITS_PER_WORD;
4700
 
4701
                  if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4702
                    {
4703
                      intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4704
                      /* We need to pass this field on the stack.  */
4705
                      parms->stack = 1;
4706
                    }
4707
 
4708
                  parms->nregs += intslots;
4709
                  parms->intoffset = -1;
4710
                }
4711
 
4712
              /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4713
                 If it wasn't true we wouldn't be here.  */
4714
              if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4715
                  && DECL_MODE (field) == BLKmode)
4716
                parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4717
              else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4718
                parms->nregs += 2;
4719
              else
4720
                parms->nregs += 1;
4721
            }
4722
          else
4723
            {
4724
              if (parms->intoffset == -1)
4725
                parms->intoffset = bitpos;
4726
            }
4727
        }
4728
    }
4729
}
4730
 
4731
/* A subroutine of function_arg_record_value.  Assign the bits of the
4732
   structure between parms->intoffset and bitpos to integer registers.  */
4733
 
4734
static void
4735
function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4736
                             struct function_arg_record_value_parms *parms)
4737
{
4738
  enum machine_mode mode;
4739
  unsigned int regno;
4740
  unsigned int startbit, endbit;
4741
  int this_slotno, intslots, intoffset;
4742
  rtx reg;
4743
 
4744
  if (parms->intoffset == -1)
4745
    return;
4746
 
4747
  intoffset = parms->intoffset;
4748
  parms->intoffset = -1;
4749
 
4750
  startbit = intoffset & -BITS_PER_WORD;
4751
  endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4752
  intslots = (endbit - startbit) / BITS_PER_WORD;
4753
  this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4754
 
4755
  intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4756
  if (intslots <= 0)
4757
    return;
4758
 
4759
  /* If this is the trailing part of a word, only load that much into
4760
     the register.  Otherwise load the whole register.  Note that in
4761
     the latter case we may pick up unwanted bits.  It's not a problem
4762
     at the moment but may wish to revisit.  */
4763
 
4764
  if (intoffset % BITS_PER_WORD != 0)
4765
    mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
4766
                                   MODE_INT);
4767
  else
4768
    mode = word_mode;
4769
 
4770
  intoffset /= BITS_PER_UNIT;
4771
  do
4772
    {
4773
      regno = parms->regbase + this_slotno;
4774
      reg = gen_rtx_REG (mode, regno);
4775
      XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4776
        = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
4777
 
4778
      this_slotno += 1;
4779
      intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
4780
      mode = word_mode;
4781
      parms->nregs += 1;
4782
      intslots -= 1;
4783
    }
4784
  while (intslots > 0);
4785
}
4786
 
4787
/* A subroutine of function_arg_record_value.  Traverse the structure
4788
   recursively and assign bits to floating point registers.  Track which
4789
   bits in between need integer registers; invoke function_arg_record_value_3
4790
   to make that happen.  */
4791
 
4792
static void
4793
function_arg_record_value_2 (tree type, HOST_WIDE_INT startbitpos,
4794
                             struct function_arg_record_value_parms *parms,
4795
                             bool packed_p)
4796
{
4797
  tree field;
4798
 
4799
  if (! packed_p)
4800
    for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4801
      {
4802
        if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4803
          {
4804
            packed_p = true;
4805
            break;
4806
          }
4807
      }
4808
 
4809
  for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4810
    {
4811
      if (TREE_CODE (field) == FIELD_DECL)
4812
        {
4813
          HOST_WIDE_INT bitpos = startbitpos;
4814
 
4815
          if (DECL_SIZE (field) != 0)
4816
            {
4817
              if (integer_zerop (DECL_SIZE (field)))
4818
                continue;
4819
 
4820
              if (host_integerp (bit_position (field), 1))
4821
                bitpos += int_bit_position (field);
4822
            }
4823
 
4824
          /* ??? FIXME: else assume zero offset.  */
4825
 
4826
          if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4827
            function_arg_record_value_2 (TREE_TYPE (field),
4828
                                         bitpos,
4829
                                         parms,
4830
                                         packed_p);
4831
          else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4832
                    || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4833
                   && TARGET_FPU
4834
                   && parms->named
4835
                   && ! packed_p)
4836
            {
4837
              int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
4838
              int regno, nregs, pos;
4839
              enum machine_mode mode = DECL_MODE (field);
4840
              rtx reg;
4841
 
4842
              function_arg_record_value_3 (bitpos, parms);
4843
 
4844
              if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4845
                  && mode == BLKmode)
4846
                {
4847
                  mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4848
                  nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4849
                }
4850
              else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4851
                {
4852
                  mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
4853
                  nregs = 2;
4854
                }
4855
              else
4856
                nregs = 1;
4857
 
4858
              regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
4859
              if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
4860
                regno++;
4861
              reg = gen_rtx_REG (mode, regno);
4862
              pos = bitpos / BITS_PER_UNIT;
4863
              XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4864
                = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4865
              parms->nregs += 1;
4866
              while (--nregs > 0)
4867
                {
4868
                  regno += GET_MODE_SIZE (mode) / 4;
4869
                  reg = gen_rtx_REG (mode, regno);
4870
                  pos += GET_MODE_SIZE (mode);
4871
                  XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
4872
                    = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
4873
                  parms->nregs += 1;
4874
                }
4875
            }
4876
          else
4877
            {
4878
              if (parms->intoffset == -1)
4879
                parms->intoffset = bitpos;
4880
            }
4881
        }
4882
    }
4883
}
4884
 
4885
/* Used by function_arg and function_value to implement the complex
4886
   conventions of the 64-bit ABI for passing and returning structures.
4887
   Return an expression valid as a return value for the two macros
4888
   FUNCTION_ARG and FUNCTION_VALUE.
4889
 
4890
   TYPE is the data type of the argument (as a tree).
4891
    This is null for libcalls where that information may
4892
    not be available.
4893
   MODE is the argument's machine mode.
4894
   SLOTNO is the index number of the argument's slot in the parameter array.
4895
   NAMED is nonzero if this argument is a named parameter
4896
    (otherwise it is an extra parameter matching an ellipsis).
4897
   REGBASE is the regno of the base register for the parameter array.  */
4898
 
4899
static rtx
4900
function_arg_record_value (tree type, enum machine_mode mode,
4901
                           int slotno, int named, int regbase)
4902
{
4903
  HOST_WIDE_INT typesize = int_size_in_bytes (type);
4904
  struct function_arg_record_value_parms parms;
4905
  unsigned int nregs;
4906
 
4907
  parms.ret = NULL_RTX;
4908
  parms.slotno = slotno;
4909
  parms.named = named;
4910
  parms.regbase = regbase;
4911
  parms.stack = 0;
4912
 
4913
  /* Compute how many registers we need.  */
4914
  parms.nregs = 0;
4915
  parms.intoffset = 0;
4916
  function_arg_record_value_1 (type, 0, &parms, false);
4917
 
4918
  /* Take into account pending integer fields.  */
4919
  if (parms.intoffset != -1)
4920
    {
4921
      unsigned int startbit, endbit;
4922
      int intslots, this_slotno;
4923
 
4924
      startbit = parms.intoffset & -BITS_PER_WORD;
4925
      endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4926
      intslots = (endbit - startbit) / BITS_PER_WORD;
4927
      this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
4928
 
4929
      if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4930
        {
4931
          intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4932
          /* We need to pass this field on the stack.  */
4933
          parms.stack = 1;
4934
        }
4935
 
4936
      parms.nregs += intslots;
4937
    }
4938
  nregs = parms.nregs;
4939
 
4940
  /* Allocate the vector and handle some annoying special cases.  */
4941
  if (nregs == 0)
4942
    {
4943
      /* ??? Empty structure has no value?  Duh?  */
4944
      if (typesize <= 0)
4945
        {
4946
          /* Though there's nothing really to store, return a word register
4947
             anyway so the rest of gcc doesn't go nuts.  Returning a PARALLEL
4948
             leads to breakage due to the fact that there are zero bytes to
4949
             load.  */
4950
          return gen_rtx_REG (mode, regbase);
4951
        }
4952
      else
4953
        {
4954
          /* ??? C++ has structures with no fields, and yet a size.  Give up
4955
             for now and pass everything back in integer registers.  */
4956
          nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4957
        }
4958
      if (nregs + slotno > SPARC_INT_ARG_MAX)
4959
        nregs = SPARC_INT_ARG_MAX - slotno;
4960
    }
4961
  gcc_assert (nregs != 0);
4962
 
4963
  parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
4964
 
4965
  /* If at least one field must be passed on the stack, generate
4966
     (parallel [(expr_list (nil) ...) ...]) so that all fields will
4967
     also be passed on the stack.  We can't do much better because the
4968
     semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
4969
     of structures for which the fields passed exclusively in registers
4970
     are not at the beginning of the structure.  */
4971
  if (parms.stack)
4972
    XVECEXP (parms.ret, 0, 0)
4973
      = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
4974
 
4975
  /* Fill in the entries.  */
4976
  parms.nregs = 0;
4977
  parms.intoffset = 0;
4978
  function_arg_record_value_2 (type, 0, &parms, false);
4979
  function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
4980
 
4981
  gcc_assert (parms.nregs == nregs);
4982
 
4983
  return parms.ret;
4984
}
4985
 
4986
/* Used by function_arg and function_value to implement the conventions
4987
   of the 64-bit ABI for passing and returning unions.
4988
   Return an expression valid as a return value for the two macros
4989
   FUNCTION_ARG and FUNCTION_VALUE.
4990
 
4991
   SIZE is the size in bytes of the union.
4992
   MODE is the argument's machine mode.
4993
   REGNO is the hard register the union will be passed in.  */
4994
 
4995
static rtx
4996
function_arg_union_value (int size, enum machine_mode mode, int slotno,
4997
                          int regno)
4998
{
4999
  int nwords = ROUND_ADVANCE (size), i;
5000
  rtx regs;
5001
 
5002
  /* See comment in previous function for empty structures.  */
5003
  if (nwords == 0)
5004
    return gen_rtx_REG (mode, regno);
5005
 
5006
  if (slotno == SPARC_INT_ARG_MAX - 1)
5007
    nwords = 1;
5008
 
5009
  regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5010
 
5011
  for (i = 0; i < nwords; i++)
5012
    {
5013
      /* Unions are passed left-justified.  */
5014
      XVECEXP (regs, 0, i)
5015
        = gen_rtx_EXPR_LIST (VOIDmode,
5016
                             gen_rtx_REG (word_mode, regno),
5017
                             GEN_INT (UNITS_PER_WORD * i));
5018
      regno++;
5019
    }
5020
 
5021
  return regs;
5022
}
5023
 
5024
/* Used by function_arg and function_value to implement the conventions
5025
   for passing and returning large (BLKmode) vectors.
5026
   Return an expression valid as a return value for the two macros
5027
   FUNCTION_ARG and FUNCTION_VALUE.
5028
 
5029
   SIZE is the size in bytes of the vector.
5030
   BASE_MODE is the argument's base machine mode.
5031
   REGNO is the FP hard register the vector will be passed in.  */
5032
 
5033
static rtx
5034
function_arg_vector_value (int size, enum machine_mode base_mode, int regno)
5035
{
5036
  unsigned short base_mode_size = GET_MODE_SIZE (base_mode);
5037
  int nregs = size / base_mode_size, i;
5038
  rtx regs;
5039
 
5040
  regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5041
 
5042
  for (i = 0; i < nregs; i++)
5043
    {
5044
      XVECEXP (regs, 0, i)
5045
        = gen_rtx_EXPR_LIST (VOIDmode,
5046
                             gen_rtx_REG (base_mode, regno),
5047
                             GEN_INT (base_mode_size * i));
5048
      regno += base_mode_size / 4;
5049
    }
5050
 
5051
  return regs;
5052
}
5053
 
5054
/* Handle the FUNCTION_ARG macro.
5055
   Determine where to put an argument to a function.
5056
   Value is zero to push the argument on the stack,
5057
   or a hard register in which to store the argument.
5058
 
5059
   CUM is a variable of type CUMULATIVE_ARGS which gives info about
5060
    the preceding args and about the function being called.
5061
   MODE is the argument's machine mode.
5062
   TYPE is the data type of the argument (as a tree).
5063
    This is null for libcalls where that information may
5064
    not be available.
5065
   NAMED is nonzero if this argument is a named parameter
5066
    (otherwise it is an extra parameter matching an ellipsis).
5067
   INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.  */
5068
 
5069
rtx
5070
function_arg (const struct sparc_args *cum, enum machine_mode mode,
5071
              tree type, int named, int incoming_p)
5072
{
5073
  int regbase = (incoming_p
5074
                 ? SPARC_INCOMING_INT_ARG_FIRST
5075
                 : SPARC_OUTGOING_INT_ARG_FIRST);
5076
  int slotno, regno, padding;
5077
  enum mode_class mclass = GET_MODE_CLASS (mode);
5078
 
5079
  slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5080
                                &regno, &padding);
5081
  if (slotno == -1)
5082
    return 0;
5083
 
5084
  /* Vector types deserve special treatment because they are polymorphic wrt
5085
     their mode, depending upon whether VIS instructions are enabled.  */
5086
  if (type && TREE_CODE (type) == VECTOR_TYPE)
5087
    {
5088
      HOST_WIDE_INT size = int_size_in_bytes (type);
5089
      gcc_assert ((TARGET_ARCH32 && size <= 8)
5090
                  || (TARGET_ARCH64 && size <= 16));
5091
 
5092
      if (mode == BLKmode)
5093
        return function_arg_vector_value (size,
5094
                                          TYPE_MODE (TREE_TYPE (type)),
5095
                                          SPARC_FP_ARG_FIRST + 2*slotno);
5096
      else
5097
        mclass = MODE_FLOAT;
5098
    }
5099
 
5100
  if (TARGET_ARCH32)
5101
    return gen_rtx_REG (mode, regno);
5102
 
5103
  /* Structures up to 16 bytes in size are passed in arg slots on the stack
5104
     and are promoted to registers if possible.  */
5105
  if (type && TREE_CODE (type) == RECORD_TYPE)
5106
    {
5107
      HOST_WIDE_INT size = int_size_in_bytes (type);
5108
      gcc_assert (size <= 16);
5109
 
5110
      return function_arg_record_value (type, mode, slotno, named, regbase);
5111
    }
5112
 
5113
  /* Unions up to 16 bytes in size are passed in integer registers.  */
5114
  else if (type && TREE_CODE (type) == UNION_TYPE)
5115
    {
5116
      HOST_WIDE_INT size = int_size_in_bytes (type);
5117
      gcc_assert (size <= 16);
5118
 
5119
      return function_arg_union_value (size, mode, slotno, regno);
5120
    }
5121
 
5122
  /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5123
     but also have the slot allocated for them.
5124
     If no prototype is in scope fp values in register slots get passed
5125
     in two places, either fp regs and int regs or fp regs and memory.  */
5126
  else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5127
           && SPARC_FP_REG_P (regno))
5128
    {
5129
      rtx reg = gen_rtx_REG (mode, regno);
5130
      if (cum->prototype_p || cum->libcall_p)
5131
        {
5132
          /* "* 2" because fp reg numbers are recorded in 4 byte
5133
             quantities.  */
5134
#if 0
5135
          /* ??? This will cause the value to be passed in the fp reg and
5136
             in the stack.  When a prototype exists we want to pass the
5137
             value in the reg but reserve space on the stack.  That's an
5138
             optimization, and is deferred [for a bit].  */
5139
          if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5140
            return gen_rtx_PARALLEL (mode,
5141
                            gen_rtvec (2,
5142
                                       gen_rtx_EXPR_LIST (VOIDmode,
5143
                                                NULL_RTX, const0_rtx),
5144
                                       gen_rtx_EXPR_LIST (VOIDmode,
5145
                                                reg, const0_rtx)));
5146
          else
5147
#else
5148
          /* ??? It seems that passing back a register even when past
5149
             the area declared by REG_PARM_STACK_SPACE will allocate
5150
             space appropriately, and will not copy the data onto the
5151
             stack, exactly as we desire.
5152
 
5153
             This is due to locate_and_pad_parm being called in
5154
             expand_call whenever reg_parm_stack_space > 0, which
5155
             while beneficial to our example here, would seem to be
5156
             in error from what had been intended.  Ho hum...  -- r~ */
5157
#endif
5158
            return reg;
5159
        }
5160
      else
5161
        {
5162
          rtx v0, v1;
5163
 
5164
          if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5165
            {
5166
              int intreg;
5167
 
5168
              /* On incoming, we don't need to know that the value
5169
                 is passed in %f0 and %i0, and it confuses other parts
5170
                 causing needless spillage even on the simplest cases.  */
5171
              if (incoming_p)
5172
                return reg;
5173
 
5174
              intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5175
                        + (regno - SPARC_FP_ARG_FIRST) / 2);
5176
 
5177
              v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5178
              v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5179
                                      const0_rtx);
5180
              return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5181
            }
5182
          else
5183
            {
5184
              v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5185
              v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5186
              return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5187
            }
5188
        }
5189
    }
5190
 
5191
  /* All other aggregate types are passed in an integer register in a mode
5192
     corresponding to the size of the type.  */
5193
  else if (type && AGGREGATE_TYPE_P (type))
5194
    {
5195
      HOST_WIDE_INT size = int_size_in_bytes (type);
5196
      gcc_assert (size <= 16);
5197
 
5198
      mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5199
    }
5200
 
5201
  return gen_rtx_REG (mode, regno);
5202
}
5203
 
5204
/* For an arg passed partly in registers and partly in memory,
5205
   this is the number of bytes of registers used.
5206
   For args passed entirely in registers or entirely in memory, zero.
5207
 
5208
   Any arg that starts in the first 6 regs but won't entirely fit in them
5209
   needs partial registers on v8.  On v9, structures with integer
5210
   values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5211
   values that begin in the last fp reg [where "last fp reg" varies with the
5212
   mode] will be split between that reg and memory.  */
5213
 
5214
static int
5215
sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5216
                         tree type, bool named)
5217
{
5218
  int slotno, regno, padding;
5219
 
5220
  /* We pass 0 for incoming_p here, it doesn't matter.  */
5221
  slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5222
 
5223
  if (slotno == -1)
5224
    return 0;
5225
 
5226
  if (TARGET_ARCH32)
5227
    {
5228
      if ((slotno + (mode == BLKmode
5229
                     ? ROUND_ADVANCE (int_size_in_bytes (type))
5230
                     : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5231
          > SPARC_INT_ARG_MAX)
5232
        return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5233
    }
5234
  else
5235
    {
5236
      /* We are guaranteed by pass_by_reference that the size of the
5237
         argument is not greater than 16 bytes, so we only need to return
5238
         one word if the argument is partially passed in registers.  */
5239
 
5240
      if (type && AGGREGATE_TYPE_P (type))
5241
        {
5242
          int size = int_size_in_bytes (type);
5243
 
5244
          if (size > UNITS_PER_WORD
5245
              && slotno == SPARC_INT_ARG_MAX - 1)
5246
            return UNITS_PER_WORD;
5247
        }
5248
      else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5249
               || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5250
                   && ! (TARGET_FPU && named)))
5251
        {
5252
          /* The complex types are passed as packed types.  */
5253
          if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5254
              && slotno == SPARC_INT_ARG_MAX - 1)
5255
            return UNITS_PER_WORD;
5256
        }
5257
      else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5258
        {
5259
          if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5260
              > SPARC_FP_ARG_MAX)
5261
            return UNITS_PER_WORD;
5262
        }
5263
    }
5264
 
5265
  return 0;
5266
}
5267
 
5268
/* Handle the TARGET_PASS_BY_REFERENCE target hook.
5269
   Specify whether to pass the argument by reference.  */
5270
 
5271
static bool
5272
sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5273
                         enum machine_mode mode, tree type,
5274
                         bool named ATTRIBUTE_UNUSED)
5275
{
5276
  if (TARGET_ARCH32)
5277
    /* Original SPARC 32-bit ABI says that structures and unions,
5278
       and quad-precision floats are passed by reference.  For Pascal,
5279
       also pass arrays by reference.  All other base types are passed
5280
       in registers.
5281
 
5282
       Extended ABI (as implemented by the Sun compiler) says that all
5283
       complex floats are passed by reference.  Pass complex integers
5284
       in registers up to 8 bytes.  More generally, enforce the 2-word
5285
       cap for passing arguments in registers.
5286
 
5287
       Vector ABI (as implemented by the Sun VIS SDK) says that vector
5288
       integers are passed like floats of the same size, that is in
5289
       registers up to 8 bytes.  Pass all vector floats by reference
5290
       like structure and unions.  */
5291
    return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5292
            || mode == SCmode
5293
            /* Catch CDImode, TFmode, DCmode and TCmode.  */
5294
            || GET_MODE_SIZE (mode) > 8
5295
            || (type
5296
                && TREE_CODE (type) == VECTOR_TYPE
5297
                && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5298
  else
5299
    /* Original SPARC 64-bit ABI says that structures and unions
5300
       smaller than 16 bytes are passed in registers, as well as
5301
       all other base types.
5302
 
5303
       Extended ABI (as implemented by the Sun compiler) says that
5304
       complex floats are passed in registers up to 16 bytes.  Pass
5305
       all complex integers in registers up to 16 bytes.  More generally,
5306
       enforce the 2-word cap for passing arguments in registers.
5307
 
5308
       Vector ABI (as implemented by the Sun VIS SDK) says that vector
5309
       integers are passed like floats of the same size, that is in
5310
       registers (up to 16 bytes).  Pass all vector floats like structure
5311
       and unions.  */
5312
    return ((type
5313
             && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5314
             && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5315
            /* Catch CTImode and TCmode.  */
5316
            || GET_MODE_SIZE (mode) > 16);
5317
}
5318
 
5319
/* Handle the FUNCTION_ARG_ADVANCE macro.
5320
   Update the data in CUM to advance over an argument
5321
   of mode MODE and data type TYPE.
5322
   TYPE is null for libcalls where that information may not be available.  */
5323
 
5324
void
5325
function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5326
                      tree type, int named)
5327
{
5328
  int slotno, regno, padding;
5329
 
5330
  /* We pass 0 for incoming_p here, it doesn't matter.  */
5331
  slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5332
 
5333
  /* If register required leading padding, add it.  */
5334
  if (slotno != -1)
5335
    cum->words += padding;
5336
 
5337
  if (TARGET_ARCH32)
5338
    {
5339
      cum->words += (mode != BLKmode
5340
                     ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5341
                     : ROUND_ADVANCE (int_size_in_bytes (type)));
5342
    }
5343
  else
5344
    {
5345
      if (type && AGGREGATE_TYPE_P (type))
5346
        {
5347
          int size = int_size_in_bytes (type);
5348
 
5349
          if (size <= 8)
5350
            ++cum->words;
5351
          else if (size <= 16)
5352
            cum->words += 2;
5353
          else /* passed by reference */
5354
            ++cum->words;
5355
        }
5356
      else
5357
        {
5358
          cum->words += (mode != BLKmode
5359
                         ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5360
                         : ROUND_ADVANCE (int_size_in_bytes (type)));
5361
        }
5362
    }
5363
}
5364
 
5365
/* Handle the FUNCTION_ARG_PADDING macro.
5366
   For the 64 bit ABI structs are always stored left shifted in their
5367
   argument slot.  */
5368
 
5369
enum direction
5370
function_arg_padding (enum machine_mode mode, tree type)
5371
{
5372
  if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5373
    return upward;
5374
 
5375
  /* Fall back to the default.  */
5376
  return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5377
}
5378
 
5379
/* Handle the TARGET_RETURN_IN_MEMORY target hook.
5380
   Specify whether to return the return value in memory.  */
5381
 
5382
static bool
5383
sparc_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
5384
{
5385
  if (TARGET_ARCH32)
5386
    /* Original SPARC 32-bit ABI says that structures and unions,
5387
       and quad-precision floats are returned in memory.  All other
5388
       base types are returned in registers.
5389
 
5390
       Extended ABI (as implemented by the Sun compiler) says that
5391
       all complex floats are returned in registers (8 FP registers
5392
       at most for '_Complex long double').  Return all complex integers
5393
       in registers (4 at most for '_Complex long long').
5394
 
5395
       Vector ABI (as implemented by the Sun VIS SDK) says that vector
5396
       integers are returned like floats of the same size, that is in
5397
       registers up to 8 bytes and in memory otherwise.  Return all
5398
       vector floats in memory like structure and unions; note that
5399
       they always have BLKmode like the latter.  */
5400
    return (TYPE_MODE (type) == BLKmode
5401
            || TYPE_MODE (type) == TFmode
5402
            || (TREE_CODE (type) == VECTOR_TYPE
5403
                && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5404
  else
5405
    /* Original SPARC 64-bit ABI says that structures and unions
5406
       smaller than 32 bytes are returned in registers, as well as
5407
       all other base types.
5408
 
5409
       Extended ABI (as implemented by the Sun compiler) says that all
5410
       complex floats are returned in registers (8 FP registers at most
5411
       for '_Complex long double').  Return all complex integers in
5412
       registers (4 at most for '_Complex TItype').
5413
 
5414
       Vector ABI (as implemented by the Sun VIS SDK) says that vector
5415
       integers are returned like floats of the same size, that is in
5416
       registers.  Return all vector floats like structure and unions;
5417
       note that they always have BLKmode like the latter.  */
5418
    return ((TYPE_MODE (type) == BLKmode
5419
             && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5420
}
5421
 
5422
/* Handle the TARGET_STRUCT_VALUE target hook.
5423
   Return where to find the structure return value address.  */
5424
 
5425
static rtx
5426
sparc_struct_value_rtx (tree fndecl, int incoming)
5427
{
5428
  if (TARGET_ARCH64)
5429
    return 0;
5430
  else
5431
    {
5432
      rtx mem;
5433
 
5434
      if (incoming)
5435
        mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5436
                                                 STRUCT_VALUE_OFFSET));
5437
      else
5438
        mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5439
                                                 STRUCT_VALUE_OFFSET));
5440
 
5441
      /* Only follow the SPARC ABI for fixed-size structure returns.
5442
         Variable size structure returns are handled per the normal
5443
         procedures in GCC. This is enabled by -mstd-struct-return */
5444
      if (incoming == 2
5445
          && sparc_std_struct_return
5446
          && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5447
          && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5448
        {
5449
          /* We must check and adjust the return address, as it is
5450
             optional as to whether the return object is really
5451
             provided.  */
5452
          rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5453
          rtx scratch = gen_reg_rtx (SImode);
5454
          rtx endlab = gen_label_rtx ();
5455
 
5456
          /* Calculate the return object size */
5457
          tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5458
          rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5459
          /* Construct a temporary return value */
5460
          rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5461
 
5462
          /* Implement SPARC 32-bit psABI callee returns struck checking
5463
             requirements:
5464
 
5465
              Fetch the instruction where we will return to and see if
5466
             it's an unimp instruction (the most significant 10 bits
5467
             will be zero).  */
5468
          emit_move_insn (scratch, gen_rtx_MEM (SImode,
5469
                                                plus_constant (ret_rtx, 8)));
5470
          /* Assume the size is valid and pre-adjust */
5471
          emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5472
          emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5473
          emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5474
          /* Assign stack temp:
5475
             Write the address of the memory pointed to by temp_val into
5476
             the memory pointed to by mem */
5477
          emit_move_insn (mem, XEXP (temp_val, 0));
5478
          emit_label (endlab);
5479
        }
5480
 
5481
      set_mem_alias_set (mem, struct_value_alias_set);
5482
      return mem;
5483
    }
5484
}
5485
 
5486
/* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5487
   For v9, function return values are subject to the same rules as arguments,
5488
   except that up to 32 bytes may be returned in registers.  */
5489
 
5490
rtx
5491
function_value (tree type, enum machine_mode mode, int incoming_p)
5492
{
5493
  /* Beware that the two values are swapped here wrt function_arg.  */
5494
  int regbase = (incoming_p
5495
                 ? SPARC_OUTGOING_INT_ARG_FIRST
5496
                 : SPARC_INCOMING_INT_ARG_FIRST);
5497
  enum mode_class mclass = GET_MODE_CLASS (mode);
5498
  int regno;
5499
 
5500
  /* Vector types deserve special treatment because they are polymorphic wrt
5501
     their mode, depending upon whether VIS instructions are enabled.  */
5502
  if (type && TREE_CODE (type) == VECTOR_TYPE)
5503
    {
5504
      HOST_WIDE_INT size = int_size_in_bytes (type);
5505
      gcc_assert ((TARGET_ARCH32 && size <= 8)
5506
                  || (TARGET_ARCH64 && size <= 32));
5507
 
5508
      if (mode == BLKmode)
5509
        return function_arg_vector_value (size,
5510
                                          TYPE_MODE (TREE_TYPE (type)),
5511
                                          SPARC_FP_ARG_FIRST);
5512
      else
5513
        mclass = MODE_FLOAT;
5514
    }
5515
 
5516
  if (TARGET_ARCH64 && type)
5517
    {
5518
      /* Structures up to 32 bytes in size are returned in registers.  */
5519
      if (TREE_CODE (type) == RECORD_TYPE)
5520
        {
5521
          HOST_WIDE_INT size = int_size_in_bytes (type);
5522
          gcc_assert (size <= 32);
5523
 
5524
          return function_arg_record_value (type, mode, 0, 1, regbase);
5525
        }
5526
 
5527
      /* Unions up to 32 bytes in size are returned in integer registers.  */
5528
      else if (TREE_CODE (type) == UNION_TYPE)
5529
        {
5530
          HOST_WIDE_INT size = int_size_in_bytes (type);
5531
          gcc_assert (size <= 32);
5532
 
5533
          return function_arg_union_value (size, mode, 0, regbase);
5534
        }
5535
 
5536
      /* Objects that require it are returned in FP registers.  */
5537
      else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5538
        ;
5539
 
5540
      /* All other aggregate types are returned in an integer register in a
5541
         mode corresponding to the size of the type.  */
5542
      else if (AGGREGATE_TYPE_P (type))
5543
        {
5544
          /* All other aggregate types are passed in an integer register
5545
             in a mode corresponding to the size of the type.  */
5546
          HOST_WIDE_INT size = int_size_in_bytes (type);
5547
          gcc_assert (size <= 32);
5548
 
5549
          mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5550
 
5551
          /* ??? We probably should have made the same ABI change in
5552
             3.4.0 as the one we made for unions.   The latter was
5553
             required by the SCD though, while the former is not
5554
             specified, so we favored compatibility and efficiency.
5555
 
5556
             Now we're stuck for aggregates larger than 16 bytes,
5557
             because OImode vanished in the meantime.  Let's not
5558
             try to be unduly clever, and simply follow the ABI
5559
             for unions in that case.  */
5560
          if (mode == BLKmode)
5561
            return function_arg_union_value (size, mode, 0, regbase);
5562
          else
5563
            mclass = MODE_INT;
5564
        }
5565
 
5566
      /* This must match PROMOTE_FUNCTION_MODE.  */
5567
      else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5568
        mode = word_mode;
5569
    }
5570
 
5571
  if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5572
    regno = SPARC_FP_ARG_FIRST;
5573
  else
5574
    regno = regbase;
5575
 
5576
  return gen_rtx_REG (mode, regno);
5577
}
5578
 
5579
/* Do what is necessary for `va_start'.  We look at the current function
5580
   to determine if stdarg or varargs is used and return the address of
5581
   the first unnamed parameter.  */
5582
 
5583
static rtx
5584
sparc_builtin_saveregs (void)
5585
{
5586
  int first_reg = current_function_args_info.words;
5587
  rtx address;
5588
  int regno;
5589
 
5590
  for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5591
    emit_move_insn (gen_rtx_MEM (word_mode,
5592
                                 gen_rtx_PLUS (Pmode,
5593
                                               frame_pointer_rtx,
5594
                                               GEN_INT (FIRST_PARM_OFFSET (0)
5595
                                                        + (UNITS_PER_WORD
5596
                                                           * regno)))),
5597
                    gen_rtx_REG (word_mode,
5598
                                 SPARC_INCOMING_INT_ARG_FIRST + regno));
5599
 
5600
  address = gen_rtx_PLUS (Pmode,
5601
                          frame_pointer_rtx,
5602
                          GEN_INT (FIRST_PARM_OFFSET (0)
5603
                                   + UNITS_PER_WORD * first_reg));
5604
 
5605
  return address;
5606
}
5607
 
5608
/* Implement `va_start' for stdarg.  */
5609
 
5610
void
5611
sparc_va_start (tree valist, rtx nextarg)
5612
{
5613
  nextarg = expand_builtin_saveregs ();
5614
  std_expand_builtin_va_start (valist, nextarg);
5615
}
5616
 
5617
/* Implement `va_arg' for stdarg.  */
5618
 
5619
static tree
5620
sparc_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
5621
{
5622
  HOST_WIDE_INT size, rsize, align;
5623
  tree addr, incr;
5624
  bool indirect;
5625
  tree ptrtype = build_pointer_type (type);
5626
 
5627
  if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5628
    {
5629
      indirect = true;
5630
      size = rsize = UNITS_PER_WORD;
5631
      align = 0;
5632
    }
5633
  else
5634
    {
5635
      indirect = false;
5636
      size = int_size_in_bytes (type);
5637
      rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5638
      align = 0;
5639
 
5640
      if (TARGET_ARCH64)
5641
        {
5642
          /* For SPARC64, objects requiring 16-byte alignment get it.  */
5643
          if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5644
            align = 2 * UNITS_PER_WORD;
5645
 
5646
          /* SPARC-V9 ABI states that structures up to 16 bytes in size
5647
             are left-justified in their slots.  */
5648
          if (AGGREGATE_TYPE_P (type))
5649
            {
5650
              if (size == 0)
5651
                size = rsize = UNITS_PER_WORD;
5652
              else
5653
                size = rsize;
5654
            }
5655
        }
5656
    }
5657
 
5658
  incr = valist;
5659
  if (align)
5660
    {
5661
      incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
5662
                           ssize_int (align - 1)));
5663
      incr = fold (build2 (BIT_AND_EXPR, ptr_type_node, incr,
5664
                           ssize_int (-align)));
5665
    }
5666
 
5667
  gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5668
  addr = incr;
5669
 
5670
  if (BYTES_BIG_ENDIAN && size < rsize)
5671
    addr = fold (build2 (PLUS_EXPR, ptr_type_node, incr,
5672
                         ssize_int (rsize - size)));
5673
 
5674
  if (indirect)
5675
    {
5676
      addr = fold_convert (build_pointer_type (ptrtype), addr);
5677
      addr = build_va_arg_indirect_ref (addr);
5678
    }
5679
  /* If the address isn't aligned properly for the type,
5680
     we may need to copy to a temporary.
5681
     FIXME: This is inefficient.  Usually we can do this
5682
     in registers.  */
5683
  else if (align == 0
5684
           && TYPE_ALIGN (type) > BITS_PER_WORD)
5685
    {
5686
      tree tmp = create_tmp_var (type, "va_arg_tmp");
5687
      tree dest_addr = build_fold_addr_expr (tmp);
5688
 
5689
      tree copy = build_function_call_expr
5690
        (implicit_built_in_decls[BUILT_IN_MEMCPY],
5691
         tree_cons (NULL_TREE, dest_addr,
5692
                    tree_cons (NULL_TREE, addr,
5693
                               tree_cons (NULL_TREE, size_int (rsize),
5694
                                          NULL_TREE))));
5695
 
5696
      gimplify_and_add (copy, pre_p);
5697
      addr = dest_addr;
5698
    }
5699
  else
5700
    addr = fold_convert (ptrtype, addr);
5701
 
5702
  incr = fold (build2 (PLUS_EXPR, ptr_type_node, incr, ssize_int (rsize)));
5703
  incr = build2 (MODIFY_EXPR, ptr_type_node, valist, incr);
5704
  gimplify_and_add (incr, post_p);
5705
 
5706
  return build_va_arg_indirect_ref (addr);
5707
}
5708
 
5709
/* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5710
   Specify whether the vector mode is supported by the hardware.  */
5711
 
5712
static bool
5713
sparc_vector_mode_supported_p (enum machine_mode mode)
5714
{
5715
  return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5716
}
5717
 
5718
/* Return the string to output an unconditional branch to LABEL, which is
5719
   the operand number of the label.
5720
 
5721
   DEST is the destination insn (i.e. the label), INSN is the source.  */
5722
 
5723
const char *
5724
output_ubranch (rtx dest, int label, rtx insn)
5725
{
5726
  static char string[64];
5727
  bool v9_form = false;
5728
  char *p;
5729
 
5730
  if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5731
    {
5732
      int delta = (INSN_ADDRESSES (INSN_UID (dest))
5733
                   - INSN_ADDRESSES (INSN_UID (insn)));
5734
      /* Leave some instructions for "slop".  */
5735
      if (delta >= -260000 && delta < 260000)
5736
        v9_form = true;
5737
    }
5738
 
5739
  if (v9_form)
5740
    strcpy (string, "ba%*,pt\t%%xcc, ");
5741
  else
5742
    strcpy (string, "b%*\t");
5743
 
5744
  p = strchr (string, '\0');
5745
  *p++ = '%';
5746
  *p++ = 'l';
5747
  *p++ = '0' + label;
5748
  *p++ = '%';
5749
  *p++ = '(';
5750
  *p = '\0';
5751
 
5752
  return string;
5753
}
5754
 
5755
/* Return the string to output a conditional branch to LABEL, which is
5756
   the operand number of the label.  OP is the conditional expression.
5757
   XEXP (OP, 0) is assumed to be a condition code register (integer or
5758
   floating point) and its mode specifies what kind of comparison we made.
5759
 
5760
   DEST is the destination insn (i.e. the label), INSN is the source.
5761
 
5762
   REVERSED is nonzero if we should reverse the sense of the comparison.
5763
 
5764
   ANNUL is nonzero if we should generate an annulling branch.  */
5765
 
5766
const char *
5767
output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5768
                rtx insn)
5769
{
5770
  static char string[64];
5771
  enum rtx_code code = GET_CODE (op);
5772
  rtx cc_reg = XEXP (op, 0);
5773
  enum machine_mode mode = GET_MODE (cc_reg);
5774
  const char *labelno, *branch;
5775
  int spaces = 8, far;
5776
  char *p;
5777
 
5778
  /* v9 branches are limited to +-1MB.  If it is too far away,
5779
     change
5780
 
5781
     bne,pt %xcc, .LC30
5782
 
5783
     to
5784
 
5785
     be,pn %xcc, .+12
5786
      nop
5787
     ba .LC30
5788
 
5789
     and
5790
 
5791
     fbne,a,pn %fcc2, .LC29
5792
 
5793
     to
5794
 
5795
     fbe,pt %fcc2, .+16
5796
      nop
5797
     ba .LC29  */
5798
 
5799
  far = TARGET_V9 && (get_attr_length (insn) >= 3);
5800
  if (reversed ^ far)
5801
    {
5802
      /* Reversal of FP compares takes care -- an ordered compare
5803
         becomes an unordered compare and vice versa.  */
5804
      if (mode == CCFPmode || mode == CCFPEmode)
5805
        code = reverse_condition_maybe_unordered (code);
5806
      else
5807
        code = reverse_condition (code);
5808
    }
5809
 
5810
  /* Start by writing the branch condition.  */
5811
  if (mode == CCFPmode || mode == CCFPEmode)
5812
    {
5813
      switch (code)
5814
        {
5815
        case NE:
5816
          branch = "fbne";
5817
          break;
5818
        case EQ:
5819
          branch = "fbe";
5820
          break;
5821
        case GE:
5822
          branch = "fbge";
5823
          break;
5824
        case GT:
5825
          branch = "fbg";
5826
          break;
5827
        case LE:
5828
          branch = "fble";
5829
          break;
5830
        case LT:
5831
          branch = "fbl";
5832
          break;
5833
        case UNORDERED:
5834
          branch = "fbu";
5835
          break;
5836
        case ORDERED:
5837
          branch = "fbo";
5838
          break;
5839
        case UNGT:
5840
          branch = "fbug";
5841
          break;
5842
        case UNLT:
5843
          branch = "fbul";
5844
          break;
5845
        case UNEQ:
5846
          branch = "fbue";
5847
          break;
5848
        case UNGE:
5849
          branch = "fbuge";
5850
          break;
5851
        case UNLE:
5852
          branch = "fbule";
5853
          break;
5854
        case LTGT:
5855
          branch = "fblg";
5856
          break;
5857
 
5858
        default:
5859
          gcc_unreachable ();
5860
        }
5861
 
5862
      /* ??? !v9: FP branches cannot be preceded by another floating point
5863
         insn.  Because there is currently no concept of pre-delay slots,
5864
         we can fix this only by always emitting a nop before a floating
5865
         point branch.  */
5866
 
5867
      string[0] = '\0';
5868
      if (! TARGET_V9)
5869
        strcpy (string, "nop\n\t");
5870
      strcat (string, branch);
5871
    }
5872
  else
5873
    {
5874
      switch (code)
5875
        {
5876
        case NE:
5877
          branch = "bne";
5878
          break;
5879
        case EQ:
5880
          branch = "be";
5881
          break;
5882
        case GE:
5883
          if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5884
            branch = "bpos";
5885
          else
5886
            branch = "bge";
5887
          break;
5888
        case GT:
5889
          branch = "bg";
5890
          break;
5891
        case LE:
5892
          branch = "ble";
5893
          break;
5894
        case LT:
5895
          if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
5896
            branch = "bneg";
5897
          else
5898
            branch = "bl";
5899
          break;
5900
        case GEU:
5901
          branch = "bgeu";
5902
          break;
5903
        case GTU:
5904
          branch = "bgu";
5905
          break;
5906
        case LEU:
5907
          branch = "bleu";
5908
          break;
5909
        case LTU:
5910
          branch = "blu";
5911
          break;
5912
 
5913
        default:
5914
          gcc_unreachable ();
5915
        }
5916
      strcpy (string, branch);
5917
    }
5918
  spaces -= strlen (branch);
5919
  p = strchr (string, '\0');
5920
 
5921
  /* Now add the annulling, the label, and a possible noop.  */
5922
  if (annul && ! far)
5923
    {
5924
      strcpy (p, ",a");
5925
      p += 2;
5926
      spaces -= 2;
5927
    }
5928
 
5929
  if (TARGET_V9)
5930
    {
5931
      rtx note;
5932
      int v8 = 0;
5933
 
5934
      if (! far && insn && INSN_ADDRESSES_SET_P ())
5935
        {
5936
          int delta = (INSN_ADDRESSES (INSN_UID (dest))
5937
                       - INSN_ADDRESSES (INSN_UID (insn)));
5938
          /* Leave some instructions for "slop".  */
5939
          if (delta < -260000 || delta >= 260000)
5940
            v8 = 1;
5941
        }
5942
 
5943
      if (mode == CCFPmode || mode == CCFPEmode)
5944
        {
5945
          static char v9_fcc_labelno[] = "%%fccX, ";
5946
          /* Set the char indicating the number of the fcc reg to use.  */
5947
          v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
5948
          labelno = v9_fcc_labelno;
5949
          if (v8)
5950
            {
5951
              gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
5952
              labelno = "";
5953
            }
5954
        }
5955
      else if (mode == CCXmode || mode == CCX_NOOVmode)
5956
        {
5957
          labelno = "%%xcc, ";
5958
          gcc_assert (! v8);
5959
        }
5960
      else
5961
        {
5962
          labelno = "%%icc, ";
5963
          if (v8)
5964
            labelno = "";
5965
        }
5966
 
5967
      if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
5968
        {
5969
          strcpy (p,
5970
                  ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
5971
                  ? ",pt" : ",pn");
5972
          p += 3;
5973
          spaces -= 3;
5974
        }
5975
    }
5976
  else
5977
    labelno = "";
5978
 
5979
  if (spaces > 0)
5980
    *p++ = '\t';
5981
  else
5982
    *p++ = ' ';
5983
  strcpy (p, labelno);
5984
  p = strchr (p, '\0');
5985
  if (far)
5986
    {
5987
      strcpy (p, ".+12\n\t nop\n\tb\t");
5988
      /* Skip the next insn if requested or
5989
         if we know that it will be a nop.  */
5990
      if (annul || ! final_sequence)
5991
        p[3] = '6';
5992
      p += 14;
5993
    }
5994
  *p++ = '%';
5995
  *p++ = 'l';
5996
  *p++ = label + '0';
5997
  *p++ = '%';
5998
  *p++ = '#';
5999
  *p = '\0';
6000
 
6001
  return string;
6002
}
6003
 
6004
/* Emit a library call comparison between floating point X and Y.
6005
   COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
6006
   TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6007
   values as arguments instead of the TFmode registers themselves,
6008
   that's why we cannot call emit_float_lib_cmp.  */
6009
void
6010
sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6011
{
6012
  const char *qpfunc;
6013
  rtx slot0, slot1, result, tem, tem2;
6014
  enum machine_mode mode;
6015
 
6016
  switch (comparison)
6017
    {
6018
    case EQ:
6019
      qpfunc = (TARGET_ARCH64) ? "_Qp_feq" : "_Q_feq";
6020
      break;
6021
 
6022
    case NE:
6023
      qpfunc = (TARGET_ARCH64) ? "_Qp_fne" : "_Q_fne";
6024
      break;
6025
 
6026
    case GT:
6027
      qpfunc = (TARGET_ARCH64) ? "_Qp_fgt" : "_Q_fgt";
6028
      break;
6029
 
6030
    case GE:
6031
      qpfunc = (TARGET_ARCH64) ? "_Qp_fge" : "_Q_fge";
6032
      break;
6033
 
6034
    case LT:
6035
      qpfunc = (TARGET_ARCH64) ? "_Qp_flt" : "_Q_flt";
6036
      break;
6037
 
6038
    case LE:
6039
      qpfunc = (TARGET_ARCH64) ? "_Qp_fle" : "_Q_fle";
6040
      break;
6041
 
6042
    case ORDERED:
6043
    case UNORDERED:
6044
    case UNGT:
6045
    case UNLT:
6046
    case UNEQ:
6047
    case UNGE:
6048
    case UNLE:
6049
    case LTGT:
6050
      qpfunc = (TARGET_ARCH64) ? "_Qp_cmp" : "_Q_cmp";
6051
      break;
6052
 
6053
    default:
6054
      gcc_unreachable ();
6055
    }
6056
 
6057
  if (TARGET_ARCH64)
6058
    {
6059
      if (GET_CODE (x) != MEM)
6060
        {
6061
          slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6062
          emit_move_insn (slot0, x);
6063
        }
6064
      else
6065
        slot0 = x;
6066
 
6067
      if (GET_CODE (y) != MEM)
6068
        {
6069
          slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6070
          emit_move_insn (slot1, y);
6071
        }
6072
      else
6073
        slot1 = y;
6074
 
6075
      emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6076
                         DImode, 2,
6077
                         XEXP (slot0, 0), Pmode,
6078
                         XEXP (slot1, 0), Pmode);
6079
 
6080
      mode = DImode;
6081
    }
6082
  else
6083
    {
6084
      emit_library_call (gen_rtx_SYMBOL_REF (Pmode, qpfunc), LCT_NORMAL,
6085
                         SImode, 2,
6086
                         x, TFmode, y, TFmode);
6087
 
6088
      mode = SImode;
6089
    }
6090
 
6091
 
6092
  /* Immediately move the result of the libcall into a pseudo
6093
     register so reload doesn't clobber the value if it needs
6094
     the return register for a spill reg.  */
6095
  result = gen_reg_rtx (mode);
6096
  emit_move_insn (result, hard_libcall_value (mode));
6097
 
6098
  switch (comparison)
6099
    {
6100
    default:
6101
      emit_cmp_insn (result, const0_rtx, NE, NULL_RTX, mode, 0);
6102
      break;
6103
    case ORDERED:
6104
    case UNORDERED:
6105
      emit_cmp_insn (result, GEN_INT(3), comparison == UNORDERED ? EQ : NE,
6106
                     NULL_RTX, mode, 0);
6107
      break;
6108
    case UNGT:
6109
    case UNGE:
6110
      emit_cmp_insn (result, const1_rtx,
6111
                     comparison == UNGT ? GT : NE, NULL_RTX, mode, 0);
6112
      break;
6113
    case UNLE:
6114
      emit_cmp_insn (result, const2_rtx, NE, NULL_RTX, mode, 0);
6115
      break;
6116
    case UNLT:
6117
      tem = gen_reg_rtx (mode);
6118
      if (TARGET_ARCH32)
6119
        emit_insn (gen_andsi3 (tem, result, const1_rtx));
6120
      else
6121
        emit_insn (gen_anddi3 (tem, result, const1_rtx));
6122
      emit_cmp_insn (tem, const0_rtx, NE, NULL_RTX, mode, 0);
6123
      break;
6124
    case UNEQ:
6125
    case LTGT:
6126
      tem = gen_reg_rtx (mode);
6127
      if (TARGET_ARCH32)
6128
        emit_insn (gen_addsi3 (tem, result, const1_rtx));
6129
      else
6130
        emit_insn (gen_adddi3 (tem, result, const1_rtx));
6131
      tem2 = gen_reg_rtx (mode);
6132
      if (TARGET_ARCH32)
6133
        emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6134
      else
6135
        emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6136
      emit_cmp_insn (tem2, const0_rtx, comparison == UNEQ ? EQ : NE,
6137
                     NULL_RTX, mode, 0);
6138
      break;
6139
    }
6140
}
6141
 
6142
/* Generate an unsigned DImode to FP conversion.  This is the same code
6143
   optabs would emit if we didn't have TFmode patterns.  */
6144
 
6145
void
6146
sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6147
{
6148
  rtx neglab, donelab, i0, i1, f0, in, out;
6149
 
6150
  out = operands[0];
6151
  in = force_reg (DImode, operands[1]);
6152
  neglab = gen_label_rtx ();
6153
  donelab = gen_label_rtx ();
6154
  i0 = gen_reg_rtx (DImode);
6155
  i1 = gen_reg_rtx (DImode);
6156
  f0 = gen_reg_rtx (mode);
6157
 
6158
  emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6159
 
6160
  emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6161
  emit_jump_insn (gen_jump (donelab));
6162
  emit_barrier ();
6163
 
6164
  emit_label (neglab);
6165
 
6166
  emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6167
  emit_insn (gen_anddi3 (i1, in, const1_rtx));
6168
  emit_insn (gen_iordi3 (i0, i0, i1));
6169
  emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6170
  emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6171
 
6172
  emit_label (donelab);
6173
}
6174
 
6175
/* Generate an FP to unsigned DImode conversion.  This is the same code
6176
   optabs would emit if we didn't have TFmode patterns.  */
6177
 
6178
void
6179
sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6180
{
6181
  rtx neglab, donelab, i0, i1, f0, in, out, limit;
6182
 
6183
  out = operands[0];
6184
  in = force_reg (mode, operands[1]);
6185
  neglab = gen_label_rtx ();
6186
  donelab = gen_label_rtx ();
6187
  i0 = gen_reg_rtx (DImode);
6188
  i1 = gen_reg_rtx (DImode);
6189
  limit = gen_reg_rtx (mode);
6190
  f0 = gen_reg_rtx (mode);
6191
 
6192
  emit_move_insn (limit,
6193
                  CONST_DOUBLE_FROM_REAL_VALUE (
6194
                    REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6195
  emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6196
 
6197
  emit_insn (gen_rtx_SET (VOIDmode,
6198
                          out,
6199
                          gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6200
  emit_jump_insn (gen_jump (donelab));
6201
  emit_barrier ();
6202
 
6203
  emit_label (neglab);
6204
 
6205
  emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6206
  emit_insn (gen_rtx_SET (VOIDmode,
6207
                          i0,
6208
                          gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6209
  emit_insn (gen_movdi (i1, const1_rtx));
6210
  emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6211
  emit_insn (gen_xordi3 (out, i0, i1));
6212
 
6213
  emit_label (donelab);
6214
}
6215
 
6216
/* Return the string to output a conditional branch to LABEL, testing
6217
   register REG.  LABEL is the operand number of the label; REG is the
6218
   operand number of the reg.  OP is the conditional expression.  The mode
6219
   of REG says what kind of comparison we made.
6220
 
6221
   DEST is the destination insn (i.e. the label), INSN is the source.
6222
 
6223
   REVERSED is nonzero if we should reverse the sense of the comparison.
6224
 
6225
   ANNUL is nonzero if we should generate an annulling branch.  */
6226
 
6227
const char *
6228
output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6229
                 int annul, rtx insn)
6230
{
6231
  static char string[64];
6232
  enum rtx_code code = GET_CODE (op);
6233
  enum machine_mode mode = GET_MODE (XEXP (op, 0));
6234
  rtx note;
6235
  int far;
6236
  char *p;
6237
 
6238
  /* branch on register are limited to +-128KB.  If it is too far away,
6239
     change
6240
 
6241
     brnz,pt %g1, .LC30
6242
 
6243
     to
6244
 
6245
     brz,pn %g1, .+12
6246
      nop
6247
     ba,pt %xcc, .LC30
6248
 
6249
     and
6250
 
6251
     brgez,a,pn %o1, .LC29
6252
 
6253
     to
6254
 
6255
     brlz,pt %o1, .+16
6256
      nop
6257
     ba,pt %xcc, .LC29  */
6258
 
6259
  far = get_attr_length (insn) >= 3;
6260
 
6261
  /* If not floating-point or if EQ or NE, we can just reverse the code.  */
6262
  if (reversed ^ far)
6263
    code = reverse_condition (code);
6264
 
6265
  /* Only 64 bit versions of these instructions exist.  */
6266
  gcc_assert (mode == DImode);
6267
 
6268
  /* Start by writing the branch condition.  */
6269
 
6270
  switch (code)
6271
    {
6272
    case NE:
6273
      strcpy (string, "brnz");
6274
      break;
6275
 
6276
    case EQ:
6277
      strcpy (string, "brz");
6278
      break;
6279
 
6280
    case GE:
6281
      strcpy (string, "brgez");
6282
      break;
6283
 
6284
    case LT:
6285
      strcpy (string, "brlz");
6286
      break;
6287
 
6288
    case LE:
6289
      strcpy (string, "brlez");
6290
      break;
6291
 
6292
    case GT:
6293
      strcpy (string, "brgz");
6294
      break;
6295
 
6296
    default:
6297
      gcc_unreachable ();
6298
    }
6299
 
6300
  p = strchr (string, '\0');
6301
 
6302
  /* Now add the annulling, reg, label, and nop.  */
6303
  if (annul && ! far)
6304
    {
6305
      strcpy (p, ",a");
6306
      p += 2;
6307
    }
6308
 
6309
  if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6310
    {
6311
      strcpy (p,
6312
              ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6313
              ? ",pt" : ",pn");
6314
      p += 3;
6315
    }
6316
 
6317
  *p = p < string + 8 ? '\t' : ' ';
6318
  p++;
6319
  *p++ = '%';
6320
  *p++ = '0' + reg;
6321
  *p++ = ',';
6322
  *p++ = ' ';
6323
  if (far)
6324
    {
6325
      int veryfar = 1, delta;
6326
 
6327
      if (INSN_ADDRESSES_SET_P ())
6328
        {
6329
          delta = (INSN_ADDRESSES (INSN_UID (dest))
6330
                   - INSN_ADDRESSES (INSN_UID (insn)));
6331
          /* Leave some instructions for "slop".  */
6332
          if (delta >= -260000 && delta < 260000)
6333
            veryfar = 0;
6334
        }
6335
 
6336
      strcpy (p, ".+12\n\t nop\n\t");
6337
      /* Skip the next insn if requested or
6338
         if we know that it will be a nop.  */
6339
      if (annul || ! final_sequence)
6340
        p[3] = '6';
6341
      p += 12;
6342
      if (veryfar)
6343
        {
6344
          strcpy (p, "b\t");
6345
          p += 2;
6346
        }
6347
      else
6348
        {
6349
          strcpy (p, "ba,pt\t%%xcc, ");
6350
          p += 13;
6351
        }
6352
    }
6353
  *p++ = '%';
6354
  *p++ = 'l';
6355
  *p++ = '0' + label;
6356
  *p++ = '%';
6357
  *p++ = '#';
6358
  *p = '\0';
6359
 
6360
  return string;
6361
}
6362
 
6363
/* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6364
   Such instructions cannot be used in the delay slot of return insn on v9.
6365
   If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6366
 */
6367
 
6368
static int
6369
epilogue_renumber (register rtx *where, int test)
6370
{
6371
  register const char *fmt;
6372
  register int i;
6373
  register enum rtx_code code;
6374
 
6375
  if (*where == 0)
6376
    return 0;
6377
 
6378
  code = GET_CODE (*where);
6379
 
6380
  switch (code)
6381
    {
6382
    case REG:
6383
      if (REGNO (*where) >= 8 && REGNO (*where) < 24)      /* oX or lX */
6384
        return 1;
6385
      if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6386
        *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6387
    case SCRATCH:
6388
    case CC0:
6389
    case PC:
6390
    case CONST_INT:
6391
    case CONST_DOUBLE:
6392
      return 0;
6393
 
6394
      /* Do not replace the frame pointer with the stack pointer because
6395
         it can cause the delayed instruction to load below the stack.
6396
         This occurs when instructions like:
6397
 
6398
         (set (reg/i:SI 24 %i0)
6399
             (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6400
                       (const_int -20 [0xffffffec])) 0))
6401
 
6402
         are in the return delayed slot.  */
6403
    case PLUS:
6404
      if (GET_CODE (XEXP (*where, 0)) == REG
6405
          && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6406
          && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6407
              || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6408
        return 1;
6409
      break;
6410
 
6411
    case MEM:
6412
      if (SPARC_STACK_BIAS
6413
          && GET_CODE (XEXP (*where, 0)) == REG
6414
          && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6415
        return 1;
6416
      break;
6417
 
6418
    default:
6419
      break;
6420
    }
6421
 
6422
  fmt = GET_RTX_FORMAT (code);
6423
 
6424
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6425
    {
6426
      if (fmt[i] == 'E')
6427
        {
6428
          register int j;
6429
          for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6430
            if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6431
              return 1;
6432
        }
6433
      else if (fmt[i] == 'e'
6434
               && epilogue_renumber (&(XEXP (*where, i)), test))
6435
        return 1;
6436
    }
6437
  return 0;
6438
}
6439
 
6440
/* Leaf functions and non-leaf functions have different needs.  */
6441
 
6442
static const int
6443
reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6444
 
6445
static const int
6446
reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6447
 
6448
static const int *const reg_alloc_orders[] = {
6449
  reg_leaf_alloc_order,
6450
  reg_nonleaf_alloc_order};
6451
 
6452
void
6453
order_regs_for_local_alloc (void)
6454
{
6455
  static int last_order_nonleaf = 1;
6456
 
6457
  if (regs_ever_live[15] != last_order_nonleaf)
6458
    {
6459
      last_order_nonleaf = !last_order_nonleaf;
6460
      memcpy ((char *) reg_alloc_order,
6461
              (const char *) reg_alloc_orders[last_order_nonleaf],
6462
              FIRST_PSEUDO_REGISTER * sizeof (int));
6463
    }
6464
}
6465
 
6466
/* Return 1 if REG and MEM are legitimate enough to allow the various
6467
   mem<-->reg splits to be run.  */
6468
 
6469
int
6470
sparc_splitdi_legitimate (rtx reg, rtx mem)
6471
{
6472
  /* Punt if we are here by mistake.  */
6473
  gcc_assert (reload_completed);
6474
 
6475
  /* We must have an offsettable memory reference.  */
6476
  if (! offsettable_memref_p (mem))
6477
    return 0;
6478
 
6479
  /* If we have legitimate args for ldd/std, we do not want
6480
     the split to happen.  */
6481
  if ((REGNO (reg) % 2) == 0
6482
      && mem_min_alignment (mem, 8))
6483
    return 0;
6484
 
6485
  /* Success.  */
6486
  return 1;
6487
}
6488
 
6489
/* Return 1 if x and y are some kind of REG and they refer to
6490
   different hard registers.  This test is guaranteed to be
6491
   run after reload.  */
6492
 
6493
int
6494
sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6495
{
6496
  if (GET_CODE (x) != REG)
6497
    return 0;
6498
  if (GET_CODE (y) != REG)
6499
    return 0;
6500
  if (REGNO (x) == REGNO (y))
6501
    return 0;
6502
  return 1;
6503
}
6504
 
6505
/* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6506
   This makes them candidates for using ldd and std insns.
6507
 
6508
   Note reg1 and reg2 *must* be hard registers.  */
6509
 
6510
int
6511
registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6512
{
6513
  /* We might have been passed a SUBREG.  */
6514
  if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6515
    return 0;
6516
 
6517
  if (REGNO (reg1) % 2 != 0)
6518
    return 0;
6519
 
6520
  /* Integer ldd is deprecated in SPARC V9 */
6521
  if (TARGET_V9 && REGNO (reg1) < 32)
6522
    return 0;
6523
 
6524
  return (REGNO (reg1) == REGNO (reg2) - 1);
6525
}
6526
 
6527
/* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6528
   an ldd or std insn.
6529
 
6530
   This can only happen when addr1 and addr2, the addresses in mem1
6531
   and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6532
   addr1 must also be aligned on a 64-bit boundary.
6533
 
6534
   Also iff dependent_reg_rtx is not null it should not be used to
6535
   compute the address for mem1, i.e. we cannot optimize a sequence
6536
   like:
6537
        ld [%o0], %o0
6538
        ld [%o0 + 4], %o1
6539
   to
6540
        ldd [%o0], %o0
6541
   nor:
6542
        ld [%g3 + 4], %g3
6543
        ld [%g3], %g2
6544
   to
6545
        ldd [%g3], %g2
6546
 
6547
   But, note that the transformation from:
6548
        ld [%g2 + 4], %g3
6549
        ld [%g2], %g2
6550
   to
6551
        ldd [%g2], %g2
6552
   is perfectly fine.  Thus, the peephole2 patterns always pass us
6553
   the destination register of the first load, never the second one.
6554
 
6555
   For stores we don't have a similar problem, so dependent_reg_rtx is
6556
   NULL_RTX.  */
6557
 
6558
int
6559
mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6560
{
6561
  rtx addr1, addr2;
6562
  unsigned int reg1;
6563
  HOST_WIDE_INT offset1;
6564
 
6565
  /* The mems cannot be volatile.  */
6566
  if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6567
    return 0;
6568
 
6569
  /* MEM1 should be aligned on a 64-bit boundary.  */
6570
  if (MEM_ALIGN (mem1) < 64)
6571
    return 0;
6572
 
6573
  addr1 = XEXP (mem1, 0);
6574
  addr2 = XEXP (mem2, 0);
6575
 
6576
  /* Extract a register number and offset (if used) from the first addr.  */
6577
  if (GET_CODE (addr1) == PLUS)
6578
    {
6579
      /* If not a REG, return zero.  */
6580
      if (GET_CODE (XEXP (addr1, 0)) != REG)
6581
        return 0;
6582
      else
6583
        {
6584
          reg1 = REGNO (XEXP (addr1, 0));
6585
          /* The offset must be constant!  */
6586
          if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6587
            return 0;
6588
          offset1 = INTVAL (XEXP (addr1, 1));
6589
        }
6590
    }
6591
  else if (GET_CODE (addr1) != REG)
6592
    return 0;
6593
  else
6594
    {
6595
      reg1 = REGNO (addr1);
6596
      /* This was a simple (mem (reg)) expression.  Offset is 0.  */
6597
      offset1 = 0;
6598
    }
6599
 
6600
  /* Make sure the second address is a (mem (plus (reg) (const_int).  */
6601
  if (GET_CODE (addr2) != PLUS)
6602
    return 0;
6603
 
6604
  if (GET_CODE (XEXP (addr2, 0)) != REG
6605
      || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6606
    return 0;
6607
 
6608
  if (reg1 != REGNO (XEXP (addr2, 0)))
6609
    return 0;
6610
 
6611
  if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6612
    return 0;
6613
 
6614
  /* The first offset must be evenly divisible by 8 to ensure the
6615
     address is 64 bit aligned.  */
6616
  if (offset1 % 8 != 0)
6617
    return 0;
6618
 
6619
  /* The offset for the second addr must be 4 more than the first addr.  */
6620
  if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6621
    return 0;
6622
 
6623
  /* All the tests passed.  addr1 and addr2 are valid for ldd and std
6624
     instructions.  */
6625
  return 1;
6626
}
6627
 
6628
/* Return 1 if reg is a pseudo, or is the first register in
6629
   a hard register pair.  This makes it a candidate for use in
6630
   ldd and std insns.  */
6631
 
6632
int
6633
register_ok_for_ldd (rtx reg)
6634
{
6635
  /* We might have been passed a SUBREG.  */
6636
  if (GET_CODE (reg) != REG)
6637
    return 0;
6638
 
6639
  if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6640
    return (REGNO (reg) % 2 == 0);
6641
  else
6642
    return 1;
6643
}
6644
 
6645
/* Print operand X (an rtx) in assembler syntax to file FILE.
6646
   CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6647
   For `%' followed by punctuation, CODE is the punctuation and X is null.  */
6648
 
6649
void
6650
print_operand (FILE *file, rtx x, int code)
6651
{
6652
  switch (code)
6653
    {
6654
    case '#':
6655
      /* Output an insn in a delay slot.  */
6656
      if (final_sequence)
6657
        sparc_indent_opcode = 1;
6658
      else
6659
        fputs ("\n\t nop", file);
6660
      return;
6661
    case '*':
6662
      /* Output an annul flag if there's nothing for the delay slot and we
6663
         are optimizing.  This is always used with '(' below.
6664
         Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6665
         this is a dbx bug.  So, we only do this when optimizing.
6666
         On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6667
         Always emit a nop in case the next instruction is a branch.  */
6668
      if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6669
        fputs (",a", file);
6670
      return;
6671
    case '(':
6672
      /* Output a 'nop' if there's nothing for the delay slot and we are
6673
         not optimizing.  This is always used with '*' above.  */
6674
      if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6675
        fputs ("\n\t nop", file);
6676
      else if (final_sequence)
6677
        sparc_indent_opcode = 1;
6678
      return;
6679
    case ')':
6680
      /* Output the right displacement from the saved PC on function return.
6681
         The caller may have placed an "unimp" insn immediately after the call
6682
         so we have to account for it.  This insn is used in the 32-bit ABI
6683
         when calling a function that returns a non zero-sized structure. The
6684
         64-bit ABI doesn't have it.  Be careful to have this test be the same
6685
         as that used on the call. The exception here is that when
6686
         sparc_std_struct_return is enabled, the psABI is followed exactly
6687
         and the adjustment is made by the code in sparc_struct_value_rtx.
6688
         The call emitted is the same when sparc_std_struct_return is
6689
         present. */
6690
     if (! TARGET_ARCH64
6691
         && current_function_returns_struct
6692
         && ! sparc_std_struct_return
6693
         && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6694
             == INTEGER_CST)
6695
         && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6696
        fputs ("12", file);
6697
      else
6698
        fputc ('8', file);
6699
      return;
6700
    case '_':
6701
      /* Output the Embedded Medium/Anywhere code model base register.  */
6702
      fputs (EMBMEDANY_BASE_REG, file);
6703
      return;
6704
    case '&':
6705
      /* Print some local dynamic TLS name.  */
6706
      assemble_name (file, get_some_local_dynamic_name ());
6707
      return;
6708
 
6709
    case 'Y':
6710
      /* Adjust the operand to take into account a RESTORE operation.  */
6711
      if (GET_CODE (x) == CONST_INT)
6712
        break;
6713
      else if (GET_CODE (x) != REG)
6714
        output_operand_lossage ("invalid %%Y operand");
6715
      else if (REGNO (x) < 8)
6716
        fputs (reg_names[REGNO (x)], file);
6717
      else if (REGNO (x) >= 24 && REGNO (x) < 32)
6718
        fputs (reg_names[REGNO (x)-16], file);
6719
      else
6720
        output_operand_lossage ("invalid %%Y operand");
6721
      return;
6722
    case 'L':
6723
      /* Print out the low order register name of a register pair.  */
6724
      if (WORDS_BIG_ENDIAN)
6725
        fputs (reg_names[REGNO (x)+1], file);
6726
      else
6727
        fputs (reg_names[REGNO (x)], file);
6728
      return;
6729
    case 'H':
6730
      /* Print out the high order register name of a register pair.  */
6731
      if (WORDS_BIG_ENDIAN)
6732
        fputs (reg_names[REGNO (x)], file);
6733
      else
6734
        fputs (reg_names[REGNO (x)+1], file);
6735
      return;
6736
    case 'R':
6737
      /* Print out the second register name of a register pair or quad.
6738
         I.e., R (%o0) => %o1.  */
6739
      fputs (reg_names[REGNO (x)+1], file);
6740
      return;
6741
    case 'S':
6742
      /* Print out the third register name of a register quad.
6743
         I.e., S (%o0) => %o2.  */
6744
      fputs (reg_names[REGNO (x)+2], file);
6745
      return;
6746
    case 'T':
6747
      /* Print out the fourth register name of a register quad.
6748
         I.e., T (%o0) => %o3.  */
6749
      fputs (reg_names[REGNO (x)+3], file);
6750
      return;
6751
    case 'x':
6752
      /* Print a condition code register.  */
6753
      if (REGNO (x) == SPARC_ICC_REG)
6754
        {
6755
          /* We don't handle CC[X]_NOOVmode because they're not supposed
6756
             to occur here.  */
6757
          if (GET_MODE (x) == CCmode)
6758
            fputs ("%icc", file);
6759
          else if (GET_MODE (x) == CCXmode)
6760
            fputs ("%xcc", file);
6761
          else
6762
            gcc_unreachable ();
6763
        }
6764
      else
6765
        /* %fccN register */
6766
        fputs (reg_names[REGNO (x)], file);
6767
      return;
6768
    case 'm':
6769
      /* Print the operand's address only.  */
6770
      output_address (XEXP (x, 0));
6771
      return;
6772
    case 'r':
6773
      /* In this case we need a register.  Use %g0 if the
6774
         operand is const0_rtx.  */
6775
      if (x == const0_rtx
6776
          || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
6777
        {
6778
          fputs ("%g0", file);
6779
          return;
6780
        }
6781
      else
6782
        break;
6783
 
6784
    case 'A':
6785
      switch (GET_CODE (x))
6786
        {
6787
        case IOR: fputs ("or", file); break;
6788
        case AND: fputs ("and", file); break;
6789
        case XOR: fputs ("xor", file); break;
6790
        default: output_operand_lossage ("invalid %%A operand");
6791
        }
6792
      return;
6793
 
6794
    case 'B':
6795
      switch (GET_CODE (x))
6796
        {
6797
        case IOR: fputs ("orn", file); break;
6798
        case AND: fputs ("andn", file); break;
6799
        case XOR: fputs ("xnor", file); break;
6800
        default: output_operand_lossage ("invalid %%B operand");
6801
        }
6802
      return;
6803
 
6804
      /* These are used by the conditional move instructions.  */
6805
    case 'c' :
6806
    case 'C':
6807
      {
6808
        enum rtx_code rc = GET_CODE (x);
6809
 
6810
        if (code == 'c')
6811
          {
6812
            enum machine_mode mode = GET_MODE (XEXP (x, 0));
6813
            if (mode == CCFPmode || mode == CCFPEmode)
6814
              rc = reverse_condition_maybe_unordered (GET_CODE (x));
6815
            else
6816
              rc = reverse_condition (GET_CODE (x));
6817
          }
6818
        switch (rc)
6819
          {
6820
          case NE: fputs ("ne", file); break;
6821
          case EQ: fputs ("e", file); break;
6822
          case GE: fputs ("ge", file); break;
6823
          case GT: fputs ("g", file); break;
6824
          case LE: fputs ("le", file); break;
6825
          case LT: fputs ("l", file); break;
6826
          case GEU: fputs ("geu", file); break;
6827
          case GTU: fputs ("gu", file); break;
6828
          case LEU: fputs ("leu", file); break;
6829
          case LTU: fputs ("lu", file); break;
6830
          case LTGT: fputs ("lg", file); break;
6831
          case UNORDERED: fputs ("u", file); break;
6832
          case ORDERED: fputs ("o", file); break;
6833
          case UNLT: fputs ("ul", file); break;
6834
          case UNLE: fputs ("ule", file); break;
6835
          case UNGT: fputs ("ug", file); break;
6836
          case UNGE: fputs ("uge", file); break;
6837
          case UNEQ: fputs ("ue", file); break;
6838
          default: output_operand_lossage (code == 'c'
6839
                                           ? "invalid %%c operand"
6840
                                           : "invalid %%C operand");
6841
          }
6842
        return;
6843
      }
6844
 
6845
      /* These are used by the movr instruction pattern.  */
6846
    case 'd':
6847
    case 'D':
6848
      {
6849
        enum rtx_code rc = (code == 'd'
6850
                            ? reverse_condition (GET_CODE (x))
6851
                            : GET_CODE (x));
6852
        switch (rc)
6853
          {
6854
          case NE: fputs ("ne", file); break;
6855
          case EQ: fputs ("e", file); break;
6856
          case GE: fputs ("gez", file); break;
6857
          case LT: fputs ("lz", file); break;
6858
          case LE: fputs ("lez", file); break;
6859
          case GT: fputs ("gz", file); break;
6860
          default: output_operand_lossage (code == 'd'
6861
                                           ? "invalid %%d operand"
6862
                                           : "invalid %%D operand");
6863
          }
6864
        return;
6865
      }
6866
 
6867
    case 'b':
6868
      {
6869
        /* Print a sign-extended character.  */
6870
        int i = trunc_int_for_mode (INTVAL (x), QImode);
6871
        fprintf (file, "%d", i);
6872
        return;
6873
      }
6874
 
6875
    case 'f':
6876
      /* Operand must be a MEM; write its address.  */
6877
      if (GET_CODE (x) != MEM)
6878
        output_operand_lossage ("invalid %%f operand");
6879
      output_address (XEXP (x, 0));
6880
      return;
6881
 
6882
    case 's':
6883
      {
6884
        /* Print a sign-extended 32-bit value.  */
6885
        HOST_WIDE_INT i;
6886
        if (GET_CODE(x) == CONST_INT)
6887
          i = INTVAL (x);
6888
        else if (GET_CODE(x) == CONST_DOUBLE)
6889
          i = CONST_DOUBLE_LOW (x);
6890
        else
6891
          {
6892
            output_operand_lossage ("invalid %%s operand");
6893
            return;
6894
          }
6895
        i = trunc_int_for_mode (i, SImode);
6896
        fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
6897
        return;
6898
      }
6899
 
6900
    case 0:
6901
      /* Do nothing special.  */
6902
      break;
6903
 
6904
    default:
6905
      /* Undocumented flag.  */
6906
      output_operand_lossage ("invalid operand output code");
6907
    }
6908
 
6909
  if (GET_CODE (x) == REG)
6910
    fputs (reg_names[REGNO (x)], file);
6911
  else if (GET_CODE (x) == MEM)
6912
    {
6913
      fputc ('[', file);
6914
        /* Poor Sun assembler doesn't understand absolute addressing.  */
6915
      if (CONSTANT_P (XEXP (x, 0)))
6916
        fputs ("%g0+", file);
6917
      output_address (XEXP (x, 0));
6918
      fputc (']', file);
6919
    }
6920
  else if (GET_CODE (x) == HIGH)
6921
    {
6922
      fputs ("%hi(", file);
6923
      output_addr_const (file, XEXP (x, 0));
6924
      fputc (')', file);
6925
    }
6926
  else if (GET_CODE (x) == LO_SUM)
6927
    {
6928
      print_operand (file, XEXP (x, 0), 0);
6929
      if (TARGET_CM_MEDMID)
6930
        fputs ("+%l44(", file);
6931
      else
6932
        fputs ("+%lo(", file);
6933
      output_addr_const (file, XEXP (x, 1));
6934
      fputc (')', file);
6935
    }
6936
  else if (GET_CODE (x) == CONST_DOUBLE
6937
           && (GET_MODE (x) == VOIDmode
6938
               || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
6939
    {
6940
      if (CONST_DOUBLE_HIGH (x) == 0)
6941
        fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
6942
      else if (CONST_DOUBLE_HIGH (x) == -1
6943
               && CONST_DOUBLE_LOW (x) < 0)
6944
        fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
6945
      else
6946
        output_operand_lossage ("long long constant not a valid immediate operand");
6947
    }
6948
  else if (GET_CODE (x) == CONST_DOUBLE)
6949
    output_operand_lossage ("floating point constant not a valid immediate operand");
6950
  else { output_addr_const (file, x); }
6951
}
6952
 
6953
/* Target hook for assembling integer objects.  The sparc version has
6954
   special handling for aligned DI-mode objects.  */
6955
 
6956
static bool
6957
sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
6958
{
6959
  /* ??? We only output .xword's for symbols and only then in environments
6960
     where the assembler can handle them.  */
6961
  if (aligned_p && size == 8
6962
      && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
6963
    {
6964
      if (TARGET_V9)
6965
        {
6966
          assemble_integer_with_op ("\t.xword\t", x);
6967
          return true;
6968
        }
6969
      else
6970
        {
6971
          assemble_aligned_integer (4, const0_rtx);
6972
          assemble_aligned_integer (4, x);
6973
          return true;
6974
        }
6975
    }
6976
  return default_assemble_integer (x, size, aligned_p);
6977
}
6978
 
6979
/* Return the value of a code used in the .proc pseudo-op that says
6980
   what kind of result this function returns.  For non-C types, we pick
6981
   the closest C type.  */
6982
 
6983
#ifndef SHORT_TYPE_SIZE
6984
#define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
6985
#endif
6986
 
6987
#ifndef INT_TYPE_SIZE
6988
#define INT_TYPE_SIZE BITS_PER_WORD
6989
#endif
6990
 
6991
#ifndef LONG_TYPE_SIZE
6992
#define LONG_TYPE_SIZE BITS_PER_WORD
6993
#endif
6994
 
6995
#ifndef LONG_LONG_TYPE_SIZE
6996
#define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
6997
#endif
6998
 
6999
#ifndef FLOAT_TYPE_SIZE
7000
#define FLOAT_TYPE_SIZE BITS_PER_WORD
7001
#endif
7002
 
7003
#ifndef DOUBLE_TYPE_SIZE
7004
#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7005
#endif
7006
 
7007
#ifndef LONG_DOUBLE_TYPE_SIZE
7008
#define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7009
#endif
7010
 
7011
unsigned long
7012
sparc_type_code (register tree type)
7013
{
7014
  register unsigned long qualifiers = 0;
7015
  register unsigned shift;
7016
 
7017
  /* Only the first 30 bits of the qualifier are valid.  We must refrain from
7018
     setting more, since some assemblers will give an error for this.  Also,
7019
     we must be careful to avoid shifts of 32 bits or more to avoid getting
7020
     unpredictable results.  */
7021
 
7022
  for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7023
    {
7024
      switch (TREE_CODE (type))
7025
        {
7026
        case ERROR_MARK:
7027
          return qualifiers;
7028
 
7029
        case ARRAY_TYPE:
7030
          qualifiers |= (3 << shift);
7031
          break;
7032
 
7033
        case FUNCTION_TYPE:
7034
        case METHOD_TYPE:
7035
          qualifiers |= (2 << shift);
7036
          break;
7037
 
7038
        case POINTER_TYPE:
7039
        case REFERENCE_TYPE:
7040
        case OFFSET_TYPE:
7041
          qualifiers |= (1 << shift);
7042
          break;
7043
 
7044
        case RECORD_TYPE:
7045
          return (qualifiers | 8);
7046
 
7047
        case UNION_TYPE:
7048
        case QUAL_UNION_TYPE:
7049
          return (qualifiers | 9);
7050
 
7051
        case ENUMERAL_TYPE:
7052
          return (qualifiers | 10);
7053
 
7054
        case VOID_TYPE:
7055
          return (qualifiers | 16);
7056
 
7057
        case INTEGER_TYPE:
7058
          /* If this is a range type, consider it to be the underlying
7059
             type.  */
7060
          if (TREE_TYPE (type) != 0)
7061
            break;
7062
 
7063
          /* Carefully distinguish all the standard types of C,
7064
             without messing up if the language is not C.  We do this by
7065
             testing TYPE_PRECISION and TYPE_UNSIGNED.  The old code used to
7066
             look at both the names and the above fields, but that's redundant.
7067
             Any type whose size is between two C types will be considered
7068
             to be the wider of the two types.  Also, we do not have a
7069
             special code to use for "long long", so anything wider than
7070
             long is treated the same.  Note that we can't distinguish
7071
             between "int" and "long" in this code if they are the same
7072
             size, but that's fine, since neither can the assembler.  */
7073
 
7074
          if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7075
            return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7076
 
7077
          else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7078
            return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7079
 
7080
          else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7081
            return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7082
 
7083
          else
7084
            return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7085
 
7086
        case REAL_TYPE:
7087
          /* If this is a range type, consider it to be the underlying
7088
             type.  */
7089
          if (TREE_TYPE (type) != 0)
7090
            break;
7091
 
7092
          /* Carefully distinguish all the standard types of C,
7093
             without messing up if the language is not C.  */
7094
 
7095
          if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7096
            return (qualifiers | 6);
7097
 
7098
          else
7099
            return (qualifiers | 7);
7100
 
7101
        case COMPLEX_TYPE:      /* GNU Fortran COMPLEX type.  */
7102
          /* ??? We need to distinguish between double and float complex types,
7103
             but I don't know how yet because I can't reach this code from
7104
             existing front-ends.  */
7105
          return (qualifiers | 7);      /* Who knows? */
7106
 
7107
        case VECTOR_TYPE:
7108
        case BOOLEAN_TYPE:      /* Boolean truth value type.  */
7109
        case LANG_TYPE:         /* ? */
7110
          return qualifiers;
7111
 
7112
        default:
7113
          gcc_unreachable ();           /* Not a type! */
7114
        }
7115
    }
7116
 
7117
  return qualifiers;
7118
}
7119
 
7120
/* Nested function support.  */
7121
 
7122
/* Emit RTL insns to initialize the variable parts of a trampoline.
7123
   FNADDR is an RTX for the address of the function's pure code.
7124
   CXT is an RTX for the static chain value for the function.
7125
 
7126
   This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7127
   (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7128
   (to store insns).  This is a bit excessive.  Perhaps a different
7129
   mechanism would be better here.
7130
 
7131
   Emit enough FLUSH insns to synchronize the data and instruction caches.  */
7132
 
7133
void
7134
sparc_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7135
{
7136
  /* SPARC 32-bit trampoline:
7137
 
7138
        sethi   %hi(fn), %g1
7139
        sethi   %hi(static), %g2
7140
        jmp     %g1+%lo(fn)
7141
        or      %g2, %lo(static), %g2
7142
 
7143
    SETHI i,r  = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7144
    JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7145
   */
7146
 
7147
  emit_move_insn
7148
    (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
7149
     expand_binop (SImode, ior_optab,
7150
                   expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7151
                                 size_int (10), 0, 1),
7152
                   GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7153
                   NULL_RTX, 1, OPTAB_DIRECT));
7154
 
7155
  emit_move_insn
7156
    (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7157
     expand_binop (SImode, ior_optab,
7158
                   expand_shift (RSHIFT_EXPR, SImode, cxt,
7159
                                 size_int (10), 0, 1),
7160
                   GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7161
                   NULL_RTX, 1, OPTAB_DIRECT));
7162
 
7163
  emit_move_insn
7164
    (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7165
     expand_binop (SImode, ior_optab,
7166
                   expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7167
                   GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7168
                   NULL_RTX, 1, OPTAB_DIRECT));
7169
 
7170
  emit_move_insn
7171
    (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7172
     expand_binop (SImode, ior_optab,
7173
                   expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7174
                   GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7175
                   NULL_RTX, 1, OPTAB_DIRECT));
7176
 
7177
  /* On UltraSPARC a flush flushes an entire cache line.  The trampoline is
7178
     aligned on a 16 byte boundary so one flush clears it all.  */
7179
  emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
7180
  if (sparc_cpu != PROCESSOR_ULTRASPARC
7181
      && sparc_cpu != PROCESSOR_ULTRASPARC3
7182
      && sparc_cpu != PROCESSOR_NIAGARA)
7183
    emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
7184
                                                     plus_constant (tramp, 8)))));
7185
 
7186
  /* Call __enable_execute_stack after writing onto the stack to make sure
7187
     the stack address is accessible.  */
7188
#ifdef ENABLE_EXECUTE_STACK
7189
  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7190
                     LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7191
#endif
7192
 
7193
}
7194
 
7195
/* The 64-bit version is simpler because it makes more sense to load the
7196
   values as "immediate" data out of the trampoline.  It's also easier since
7197
   we can read the PC without clobbering a register.  */
7198
 
7199
void
7200
sparc64_initialize_trampoline (rtx tramp, rtx fnaddr, rtx cxt)
7201
{
7202
  /* SPARC 64-bit trampoline:
7203
 
7204
        rd      %pc, %g1
7205
        ldx     [%g1+24], %g5
7206
        jmp     %g5
7207
        ldx     [%g1+16], %g5
7208
        +16 bytes data
7209
   */
7210
 
7211
  emit_move_insn (gen_rtx_MEM (SImode, tramp),
7212
                  GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7213
  emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
7214
                  GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7215
  emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
7216
                  GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7217
  emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
7218
                  GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7219
  emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
7220
  emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 24)), fnaddr);
7221
  emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, tramp))));
7222
 
7223
  if (sparc_cpu != PROCESSOR_ULTRASPARC
7224
      && sparc_cpu != PROCESSOR_ULTRASPARC3
7225
      && sparc_cpu != PROCESSOR_NIAGARA)
7226
    emit_insn (gen_flushdi (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
7227
 
7228
  /* Call __enable_execute_stack after writing onto the stack to make sure
7229
     the stack address is accessible.  */
7230
#ifdef ENABLE_EXECUTE_STACK
7231
  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7232
                     LCT_NORMAL, VOIDmode, 1, tramp, Pmode);
7233
#endif
7234
}
7235
 
7236
/* Adjust the cost of a scheduling dependency.  Return the new cost of
7237
   a dependency LINK or INSN on DEP_INSN.  COST is the current cost.  */
7238
 
7239
static int
7240
supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7241
{
7242
  enum attr_type insn_type;
7243
 
7244
  if (! recog_memoized (insn))
7245
    return 0;
7246
 
7247
  insn_type = get_attr_type (insn);
7248
 
7249
  if (REG_NOTE_KIND (link) == 0)
7250
    {
7251
      /* Data dependency; DEP_INSN writes a register that INSN reads some
7252
         cycles later.  */
7253
 
7254
      /* if a load, then the dependence must be on the memory address;
7255
         add an extra "cycle".  Note that the cost could be two cycles
7256
         if the reg was written late in an instruction group; we ca not tell
7257
         here.  */
7258
      if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7259
        return cost + 3;
7260
 
7261
      /* Get the delay only if the address of the store is the dependence.  */
7262
      if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7263
        {
7264
          rtx pat = PATTERN(insn);
7265
          rtx dep_pat = PATTERN (dep_insn);
7266
 
7267
          if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7268
            return cost;  /* This should not happen!  */
7269
 
7270
          /* The dependency between the two instructions was on the data that
7271
             is being stored.  Assume that this implies that the address of the
7272
             store is not dependent.  */
7273
          if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7274
            return cost;
7275
 
7276
          return cost + 3;  /* An approximation.  */
7277
        }
7278
 
7279
      /* A shift instruction cannot receive its data from an instruction
7280
         in the same cycle; add a one cycle penalty.  */
7281
      if (insn_type == TYPE_SHIFT)
7282
        return cost + 3;   /* Split before cascade into shift.  */
7283
    }
7284
  else
7285
    {
7286
      /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7287
         INSN writes some cycles later.  */
7288
 
7289
      /* These are only significant for the fpu unit; writing a fp reg before
7290
         the fpu has finished with it stalls the processor.  */
7291
 
7292
      /* Reusing an integer register causes no problems.  */
7293
      if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7294
        return 0;
7295
    }
7296
 
7297
  return cost;
7298
}
7299
 
7300
static int
7301
hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7302
{
7303
  enum attr_type insn_type, dep_type;
7304
  rtx pat = PATTERN(insn);
7305
  rtx dep_pat = PATTERN (dep_insn);
7306
 
7307
  if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7308
    return cost;
7309
 
7310
  insn_type = get_attr_type (insn);
7311
  dep_type = get_attr_type (dep_insn);
7312
 
7313
  switch (REG_NOTE_KIND (link))
7314
    {
7315
    case 0:
7316
      /* Data dependency; DEP_INSN writes a register that INSN reads some
7317
         cycles later.  */
7318
 
7319
      switch (insn_type)
7320
        {
7321
        case TYPE_STORE:
7322
        case TYPE_FPSTORE:
7323
          /* Get the delay iff the address of the store is the dependence.  */
7324
          if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7325
            return cost;
7326
 
7327
          if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7328
            return cost;
7329
          return cost + 3;
7330
 
7331
        case TYPE_LOAD:
7332
        case TYPE_SLOAD:
7333
        case TYPE_FPLOAD:
7334
          /* If a load, then the dependence must be on the memory address.  If
7335
             the addresses aren't equal, then it might be a false dependency */
7336
          if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7337
            {
7338
              if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7339
                  || GET_CODE (SET_DEST (dep_pat)) != MEM
7340
                  || GET_CODE (SET_SRC (pat)) != MEM
7341
                  || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7342
                                    XEXP (SET_SRC (pat), 0)))
7343
                return cost + 2;
7344
 
7345
              return cost + 8;
7346
            }
7347
          break;
7348
 
7349
        case TYPE_BRANCH:
7350
          /* Compare to branch latency is 0.  There is no benefit from
7351
             separating compare and branch.  */
7352
          if (dep_type == TYPE_COMPARE)
7353
            return 0;
7354
          /* Floating point compare to branch latency is less than
7355
             compare to conditional move.  */
7356
          if (dep_type == TYPE_FPCMP)
7357
            return cost - 1;
7358
          break;
7359
        default:
7360
          break;
7361
        }
7362
        break;
7363
 
7364
    case REG_DEP_ANTI:
7365
      /* Anti-dependencies only penalize the fpu unit.  */
7366
      if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7367
        return 0;
7368
      break;
7369
 
7370
    default:
7371
      break;
7372
    }
7373
 
7374
  return cost;
7375
}
7376
 
7377
static int
7378
sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7379
{
7380
  switch (sparc_cpu)
7381
    {
7382
    case PROCESSOR_SUPERSPARC:
7383
      cost = supersparc_adjust_cost (insn, link, dep, cost);
7384
      break;
7385
    case PROCESSOR_HYPERSPARC:
7386
    case PROCESSOR_SPARCLITE86X:
7387
      cost = hypersparc_adjust_cost (insn, link, dep, cost);
7388
      break;
7389
    default:
7390
      break;
7391
    }
7392
  return cost;
7393
}
7394
 
7395
static void
7396
sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7397
                  int sched_verbose ATTRIBUTE_UNUSED,
7398
                  int max_ready ATTRIBUTE_UNUSED)
7399
{
7400
}
7401
 
7402
static int
7403
sparc_use_sched_lookahead (void)
7404
{
7405
  if (sparc_cpu == PROCESSOR_NIAGARA)
7406
    return 0;
7407
  if (sparc_cpu == PROCESSOR_ULTRASPARC
7408
      || sparc_cpu == PROCESSOR_ULTRASPARC3)
7409
    return 4;
7410
  if ((1 << sparc_cpu) &
7411
      ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7412
       (1 << PROCESSOR_SPARCLITE86X)))
7413
    return 3;
7414
  return 0;
7415
}
7416
 
7417
static int
7418
sparc_issue_rate (void)
7419
{
7420
  switch (sparc_cpu)
7421
    {
7422
    case PROCESSOR_NIAGARA:
7423
    default:
7424
      return 1;
7425
    case PROCESSOR_V9:
7426
      /* Assume V9 processors are capable of at least dual-issue.  */
7427
      return 2;
7428
    case PROCESSOR_SUPERSPARC:
7429
      return 3;
7430
    case PROCESSOR_HYPERSPARC:
7431
    case PROCESSOR_SPARCLITE86X:
7432
      return 2;
7433
    case PROCESSOR_ULTRASPARC:
7434
    case PROCESSOR_ULTRASPARC3:
7435
      return 4;
7436
    }
7437
}
7438
 
7439
static int
7440
set_extends (rtx insn)
7441
{
7442
  register rtx pat = PATTERN (insn);
7443
 
7444
  switch (GET_CODE (SET_SRC (pat)))
7445
    {
7446
      /* Load and some shift instructions zero extend.  */
7447
    case MEM:
7448
    case ZERO_EXTEND:
7449
      /* sethi clears the high bits */
7450
    case HIGH:
7451
      /* LO_SUM is used with sethi.  sethi cleared the high
7452
         bits and the values used with lo_sum are positive */
7453
    case LO_SUM:
7454
      /* Store flag stores 0 or 1 */
7455
    case LT: case LTU:
7456
    case GT: case GTU:
7457
    case LE: case LEU:
7458
    case GE: case GEU:
7459
    case EQ:
7460
    case NE:
7461
      return 1;
7462
    case AND:
7463
      {
7464
        rtx op0 = XEXP (SET_SRC (pat), 0);
7465
        rtx op1 = XEXP (SET_SRC (pat), 1);
7466
        if (GET_CODE (op1) == CONST_INT)
7467
          return INTVAL (op1) >= 0;
7468
        if (GET_CODE (op0) != REG)
7469
          return 0;
7470
        if (sparc_check_64 (op0, insn) == 1)
7471
          return 1;
7472
        return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7473
      }
7474
    case IOR:
7475
    case XOR:
7476
      {
7477
        rtx op0 = XEXP (SET_SRC (pat), 0);
7478
        rtx op1 = XEXP (SET_SRC (pat), 1);
7479
        if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7480
          return 0;
7481
        if (GET_CODE (op1) == CONST_INT)
7482
          return INTVAL (op1) >= 0;
7483
        return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7484
      }
7485
    case LSHIFTRT:
7486
      return GET_MODE (SET_SRC (pat)) == SImode;
7487
      /* Positive integers leave the high bits zero.  */
7488
    case CONST_DOUBLE:
7489
      return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7490
    case CONST_INT:
7491
      return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7492
    case ASHIFTRT:
7493
    case SIGN_EXTEND:
7494
      return - (GET_MODE (SET_SRC (pat)) == SImode);
7495
    case REG:
7496
      return sparc_check_64 (SET_SRC (pat), insn);
7497
    default:
7498
      return 0;
7499
    }
7500
}
7501
 
7502
/* We _ought_ to have only one kind per function, but...  */
7503
static GTY(()) rtx sparc_addr_diff_list;
7504
static GTY(()) rtx sparc_addr_list;
7505
 
7506
void
7507
sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7508
{
7509
  vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7510
  if (diff)
7511
    sparc_addr_diff_list
7512
      = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7513
  else
7514
    sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7515
}
7516
 
7517
static void
7518
sparc_output_addr_vec (rtx vec)
7519
{
7520
  rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7521
  int idx, vlen = XVECLEN (body, 0);
7522
 
7523
#ifdef ASM_OUTPUT_ADDR_VEC_START  
7524
  ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7525
#endif
7526
 
7527
#ifdef ASM_OUTPUT_CASE_LABEL
7528
  ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7529
                         NEXT_INSN (lab));
7530
#else
7531
  (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7532
#endif
7533
 
7534
  for (idx = 0; idx < vlen; idx++)
7535
    {
7536
      ASM_OUTPUT_ADDR_VEC_ELT
7537
        (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7538
    }
7539
 
7540
#ifdef ASM_OUTPUT_ADDR_VEC_END
7541
  ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7542
#endif
7543
}
7544
 
7545
static void
7546
sparc_output_addr_diff_vec (rtx vec)
7547
{
7548
  rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7549
  rtx base = XEXP (XEXP (body, 0), 0);
7550
  int idx, vlen = XVECLEN (body, 1);
7551
 
7552
#ifdef ASM_OUTPUT_ADDR_VEC_START  
7553
  ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7554
#endif
7555
 
7556
#ifdef ASM_OUTPUT_CASE_LABEL
7557
  ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7558
                         NEXT_INSN (lab));
7559
#else
7560
  (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7561
#endif
7562
 
7563
  for (idx = 0; idx < vlen; idx++)
7564
    {
7565
      ASM_OUTPUT_ADDR_DIFF_ELT
7566
        (asm_out_file,
7567
         body,
7568
         CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7569
         CODE_LABEL_NUMBER (base));
7570
    }
7571
 
7572
#ifdef ASM_OUTPUT_ADDR_VEC_END
7573
  ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7574
#endif
7575
}
7576
 
7577
static void
7578
sparc_output_deferred_case_vectors (void)
7579
{
7580
  rtx t;
7581
  int align;
7582
 
7583
  if (sparc_addr_list == NULL_RTX
7584
      && sparc_addr_diff_list == NULL_RTX)
7585
    return;
7586
 
7587
  /* Align to cache line in the function's code section.  */
7588
  switch_to_section (current_function_section ());
7589
 
7590
  align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7591
  if (align > 0)
7592
    ASM_OUTPUT_ALIGN (asm_out_file, align);
7593
 
7594
  for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7595
    sparc_output_addr_vec (XEXP (t, 0));
7596
  for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7597
    sparc_output_addr_diff_vec (XEXP (t, 0));
7598
 
7599
  sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7600
}
7601
 
7602
/* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7603
   unknown.  Return 1 if the high bits are zero, -1 if the register is
7604
   sign extended.  */
7605
int
7606
sparc_check_64 (rtx x, rtx insn)
7607
{
7608
  /* If a register is set only once it is safe to ignore insns this
7609
     code does not know how to handle.  The loop will either recognize
7610
     the single set and return the correct value or fail to recognize
7611
     it and return 0.  */
7612
  int set_once = 0;
7613
  rtx y = x;
7614
 
7615
  gcc_assert (GET_CODE (x) == REG);
7616
 
7617
  if (GET_MODE (x) == DImode)
7618
    y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7619
 
7620
  if (flag_expensive_optimizations
7621
      && REG_N_SETS (REGNO (y)) == 1)
7622
    set_once = 1;
7623
 
7624
  if (insn == 0)
7625
    {
7626
      if (set_once)
7627
        insn = get_last_insn_anywhere ();
7628
      else
7629
        return 0;
7630
    }
7631
 
7632
  while ((insn = PREV_INSN (insn)))
7633
    {
7634
      switch (GET_CODE (insn))
7635
        {
7636
        case JUMP_INSN:
7637
        case NOTE:
7638
          break;
7639
        case CODE_LABEL:
7640
        case CALL_INSN:
7641
        default:
7642
          if (! set_once)
7643
            return 0;
7644
          break;
7645
        case INSN:
7646
          {
7647
            rtx pat = PATTERN (insn);
7648
            if (GET_CODE (pat) != SET)
7649
              return 0;
7650
            if (rtx_equal_p (x, SET_DEST (pat)))
7651
              return set_extends (insn);
7652
            if (y && rtx_equal_p (y, SET_DEST (pat)))
7653
              return set_extends (insn);
7654
            if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7655
              return 0;
7656
          }
7657
        }
7658
    }
7659
  return 0;
7660
}
7661
 
7662
/* Returns assembly code to perform a DImode shift using
7663
   a 64-bit global or out register on SPARC-V8+.  */
7664
const char *
7665
output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7666
{
7667
  static char asm_code[60];
7668
 
7669
  /* The scratch register is only required when the destination
7670
     register is not a 64-bit global or out register.  */
7671
  if (which_alternative != 2)
7672
    operands[3] = operands[0];
7673
 
7674
  /* We can only shift by constants <= 63. */
7675
  if (GET_CODE (operands[2]) == CONST_INT)
7676
    operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7677
 
7678
  if (GET_CODE (operands[1]) == CONST_INT)
7679
    {
7680
      output_asm_insn ("mov\t%1, %3", operands);
7681
    }
7682
  else
7683
    {
7684
      output_asm_insn ("sllx\t%H1, 32, %3", operands);
7685
      if (sparc_check_64 (operands[1], insn) <= 0)
7686
        output_asm_insn ("srl\t%L1, 0, %L1", operands);
7687
      output_asm_insn ("or\t%L1, %3, %3", operands);
7688
    }
7689
 
7690
  strcpy(asm_code, opcode);
7691
 
7692
  if (which_alternative != 2)
7693
    return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7694
  else
7695
    return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7696
}
7697
 
7698
/* Output rtl to increment the profiler label LABELNO
7699
   for profiling a function entry.  */
7700
 
7701
void
7702
sparc_profile_hook (int labelno)
7703
{
7704
  char buf[32];
7705
  rtx lab, fun;
7706
 
7707
  ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7708
  lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7709
  fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7710
 
7711
  emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7712
}
7713
 
7714
#ifdef OBJECT_FORMAT_ELF
7715
static void
7716
sparc_elf_asm_named_section (const char *name, unsigned int flags,
7717
                             tree decl)
7718
{
7719
  if (flags & SECTION_MERGE)
7720
    {
7721
      /* entsize cannot be expressed in this section attributes
7722
         encoding style.  */
7723
      default_elf_asm_named_section (name, flags, decl);
7724
      return;
7725
    }
7726
 
7727
  fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7728
 
7729
  if (!(flags & SECTION_DEBUG))
7730
    fputs (",#alloc", asm_out_file);
7731
  if (flags & SECTION_WRITE)
7732
    fputs (",#write", asm_out_file);
7733
  if (flags & SECTION_TLS)
7734
    fputs (",#tls", asm_out_file);
7735
  if (flags & SECTION_CODE)
7736
    fputs (",#execinstr", asm_out_file);
7737
 
7738
  /* ??? Handle SECTION_BSS.  */
7739
 
7740
  fputc ('\n', asm_out_file);
7741
}
7742
#endif /* OBJECT_FORMAT_ELF */
7743
 
7744
/* We do not allow indirect calls to be optimized into sibling calls.
7745
 
7746
   We cannot use sibling calls when delayed branches are disabled
7747
   because they will likely require the call delay slot to be filled.
7748
 
7749
   Also, on SPARC 32-bit we cannot emit a sibling call when the
7750
   current function returns a structure.  This is because the "unimp
7751
   after call" convention would cause the callee to return to the
7752
   wrong place.  The generic code already disallows cases where the
7753
   function being called returns a structure.
7754
 
7755
   It may seem strange how this last case could occur.  Usually there
7756
   is code after the call which jumps to epilogue code which dumps the
7757
   return value into the struct return area.  That ought to invalidate
7758
   the sibling call right?  Well, in the C++ case we can end up passing
7759
   the pointer to the struct return area to a constructor (which returns
7760
   void) and then nothing else happens.  Such a sibling call would look
7761
   valid without the added check here.  */
7762
static bool
7763
sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7764
{
7765
  return (decl
7766
          && flag_delayed_branch
7767
          && (TARGET_ARCH64 || ! current_function_returns_struct));
7768
}
7769
 
7770
/* libfunc renaming.  */
7771
#include "config/gofast.h"
7772
 
7773
static void
7774
sparc_init_libfuncs (void)
7775
{
7776
  if (TARGET_ARCH32)
7777
    {
7778
      /* Use the subroutines that Sun's library provides for integer
7779
         multiply and divide.  The `*' prevents an underscore from
7780
         being prepended by the compiler. .umul is a little faster
7781
         than .mul.  */
7782
      set_optab_libfunc (smul_optab, SImode, "*.umul");
7783
      set_optab_libfunc (sdiv_optab, SImode, "*.div");
7784
      set_optab_libfunc (udiv_optab, SImode, "*.udiv");
7785
      set_optab_libfunc (smod_optab, SImode, "*.rem");
7786
      set_optab_libfunc (umod_optab, SImode, "*.urem");
7787
 
7788
      /* TFmode arithmetic.  These names are part of the SPARC 32bit ABI.  */
7789
      set_optab_libfunc (add_optab, TFmode, "_Q_add");
7790
      set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
7791
      set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
7792
      set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
7793
      set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
7794
 
7795
      /* We can define the TFmode sqrt optab only if TARGET_FPU.  This
7796
         is because with soft-float, the SFmode and DFmode sqrt
7797
         instructions will be absent, and the compiler will notice and
7798
         try to use the TFmode sqrt instruction for calls to the
7799
         builtin function sqrt, but this fails.  */
7800
      if (TARGET_FPU)
7801
        set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
7802
 
7803
      set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
7804
      set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
7805
      set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
7806
      set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
7807
      set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
7808
      set_optab_libfunc (le_optab, TFmode, "_Q_fle");
7809
 
7810
      set_conv_libfunc (sext_optab,   TFmode, SFmode, "_Q_stoq");
7811
      set_conv_libfunc (sext_optab,   TFmode, DFmode, "_Q_dtoq");
7812
      set_conv_libfunc (trunc_optab,  SFmode, TFmode, "_Q_qtos");
7813
      set_conv_libfunc (trunc_optab,  DFmode, TFmode, "_Q_qtod");
7814
 
7815
      set_conv_libfunc (sfix_optab,   SImode, TFmode, "_Q_qtoi");
7816
      set_conv_libfunc (ufix_optab,   SImode, TFmode, "_Q_qtou");
7817
      set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
7818
      set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
7819
 
7820
      if (DITF_CONVERSION_LIBFUNCS)
7821
        {
7822
          set_conv_libfunc (sfix_optab,   DImode, TFmode, "_Q_qtoll");
7823
          set_conv_libfunc (ufix_optab,   DImode, TFmode, "_Q_qtoull");
7824
          set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
7825
          set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
7826
        }
7827
 
7828
      if (SUN_CONVERSION_LIBFUNCS)
7829
        {
7830
          set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
7831
          set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
7832
          set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
7833
          set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
7834
        }
7835
    }
7836
  if (TARGET_ARCH64)
7837
    {
7838
      /* In the SPARC 64bit ABI, SImode multiply and divide functions
7839
         do not exist in the library.  Make sure the compiler does not
7840
         emit calls to them by accident.  (It should always use the
7841
         hardware instructions.)  */
7842
      set_optab_libfunc (smul_optab, SImode, 0);
7843
      set_optab_libfunc (sdiv_optab, SImode, 0);
7844
      set_optab_libfunc (udiv_optab, SImode, 0);
7845
      set_optab_libfunc (smod_optab, SImode, 0);
7846
      set_optab_libfunc (umod_optab, SImode, 0);
7847
 
7848
      if (SUN_INTEGER_MULTIPLY_64)
7849
        {
7850
          set_optab_libfunc (smul_optab, DImode, "__mul64");
7851
          set_optab_libfunc (sdiv_optab, DImode, "__div64");
7852
          set_optab_libfunc (udiv_optab, DImode, "__udiv64");
7853
          set_optab_libfunc (smod_optab, DImode, "__rem64");
7854
          set_optab_libfunc (umod_optab, DImode, "__urem64");
7855
        }
7856
 
7857
      if (SUN_CONVERSION_LIBFUNCS)
7858
        {
7859
          set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
7860
          set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
7861
          set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
7862
          set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
7863
        }
7864
    }
7865
 
7866
  gofast_maybe_init_libfuncs ();
7867
}
7868
 
7869
#define def_builtin(NAME, CODE, TYPE) \
7870
  lang_hooks.builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
7871
                              NULL_TREE)
7872
 
7873
/* Implement the TARGET_INIT_BUILTINS target hook.
7874
   Create builtin functions for special SPARC instructions.  */
7875
 
7876
static void
7877
sparc_init_builtins (void)
7878
{
7879
  if (TARGET_VIS)
7880
    sparc_vis_init_builtins ();
7881
}
7882
 
7883
/* Create builtin functions for VIS 1.0 instructions.  */
7884
 
7885
static void
7886
sparc_vis_init_builtins (void)
7887
{
7888
  tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
7889
  tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
7890
  tree v4hi = build_vector_type (intHI_type_node, 4);
7891
  tree v2hi = build_vector_type (intHI_type_node, 2);
7892
  tree v2si = build_vector_type (intSI_type_node, 2);
7893
 
7894
  tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
7895
  tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
7896
  tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
7897
  tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
7898
  tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
7899
  tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
7900
  tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
7901
  tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
7902
  tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
7903
  tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
7904
  tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
7905
  tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
7906
  tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
7907
                                                         v8qi, v8qi,
7908
                                                         intDI_type_node, 0);
7909
  tree di_ftype_di_di = build_function_type_list (intDI_type_node,
7910
                                                  intDI_type_node,
7911
                                                  intDI_type_node, 0);
7912
  tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
7913
                                                    ptr_type_node,
7914
                                                    intSI_type_node, 0);
7915
  tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
7916
                                                    ptr_type_node,
7917
                                                    intDI_type_node, 0);
7918
 
7919
  /* Packing and expanding vectors.  */
7920
  def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
7921
  def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
7922
               v8qi_ftype_v2si_v8qi);
7923
  def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
7924
               v2hi_ftype_v2si);
7925
  def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
7926
  def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
7927
               v8qi_ftype_v4qi_v4qi);
7928
 
7929
  /* Multiplications.  */
7930
  def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
7931
               v4hi_ftype_v4qi_v4hi);
7932
  def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
7933
               v4hi_ftype_v4qi_v2hi);
7934
  def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
7935
               v4hi_ftype_v4qi_v2hi);
7936
  def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
7937
               v4hi_ftype_v8qi_v4hi);
7938
  def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
7939
               v4hi_ftype_v8qi_v4hi);
7940
  def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
7941
               v2si_ftype_v4qi_v2hi);
7942
  def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
7943
               v2si_ftype_v4qi_v2hi);
7944
 
7945
  /* Data aligning.  */
7946
  def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
7947
               v4hi_ftype_v4hi_v4hi);
7948
  def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
7949
               v8qi_ftype_v8qi_v8qi);
7950
  def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
7951
               v2si_ftype_v2si_v2si);
7952
  def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
7953
               di_ftype_di_di);
7954
  if (TARGET_ARCH64)
7955
    def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
7956
                 ptr_ftype_ptr_di);
7957
  else
7958
    def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
7959
                 ptr_ftype_ptr_si);
7960
 
7961
  /* Pixel distance.  */
7962
  def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
7963
               di_ftype_v8qi_v8qi_di);
7964
}
7965
 
7966
/* Handle TARGET_EXPAND_BUILTIN target hook.
7967
   Expand builtin functions for sparc intrinsics.  */
7968
 
7969
static rtx
7970
sparc_expand_builtin (tree exp, rtx target,
7971
                      rtx subtarget ATTRIBUTE_UNUSED,
7972
                      enum machine_mode tmode ATTRIBUTE_UNUSED,
7973
                      int ignore ATTRIBUTE_UNUSED)
7974
{
7975
  tree arglist;
7976
  tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7977
  unsigned int icode = DECL_FUNCTION_CODE (fndecl);
7978
  rtx pat, op[4];
7979
  enum machine_mode mode[4];
7980
  int arg_count = 0;
7981
 
7982
  mode[0] = insn_data[icode].operand[0].mode;
7983
  if (!target
7984
      || GET_MODE (target) != mode[0]
7985
      || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
7986
    op[0] = gen_reg_rtx (mode[0]);
7987
  else
7988
    op[0] = target;
7989
 
7990
  for (arglist = TREE_OPERAND (exp, 1); arglist;
7991
       arglist = TREE_CHAIN (arglist))
7992
    {
7993
      tree arg = TREE_VALUE (arglist);
7994
 
7995
      arg_count++;
7996
      mode[arg_count] = insn_data[icode].operand[arg_count].mode;
7997
      op[arg_count] = expand_normal (arg);
7998
 
7999
      if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8000
                                                              mode[arg_count]))
8001
        op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8002
    }
8003
 
8004
  switch (arg_count)
8005
    {
8006
    case 1:
8007
      pat = GEN_FCN (icode) (op[0], op[1]);
8008
      break;
8009
    case 2:
8010
      pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8011
      break;
8012
    case 3:
8013
      pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8014
      break;
8015
    default:
8016
      gcc_unreachable ();
8017
    }
8018
 
8019
  if (!pat)
8020
    return NULL_RTX;
8021
 
8022
  emit_insn (pat);
8023
 
8024
  return op[0];
8025
}
8026
 
8027
static int
8028
sparc_vis_mul8x16 (int e8, int e16)
8029
{
8030
  return (e8 * e16 + 128) / 256;
8031
}
8032
 
8033
/* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8034
   by FNCODE.  All of the elements in ELTS0 and ELTS1 lists must be integer
8035
   constants.  A tree list with the results of the multiplications is returned,
8036
   and each element in the list is of INNER_TYPE.  */
8037
 
8038
static tree
8039
sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8040
{
8041
  tree n_elts = NULL_TREE;
8042
  int scale;
8043
 
8044
  switch (fncode)
8045
    {
8046
    case CODE_FOR_fmul8x16_vis:
8047
      for (; elts0 && elts1;
8048
           elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8049
        {
8050
          int val
8051
            = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8052
                                 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8053
          n_elts = tree_cons (NULL_TREE,
8054
                              build_int_cst (inner_type, val),
8055
                              n_elts);
8056
        }
8057
      break;
8058
 
8059
    case CODE_FOR_fmul8x16au_vis:
8060
      scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8061
 
8062
      for (; elts0; elts0 = TREE_CHAIN (elts0))
8063
        {
8064
          int val
8065
            = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8066
                                 scale);
8067
          n_elts = tree_cons (NULL_TREE,
8068
                              build_int_cst (inner_type, val),
8069
                              n_elts);
8070
        }
8071
      break;
8072
 
8073
    case CODE_FOR_fmul8x16al_vis:
8074
      scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8075
 
8076
      for (; elts0; elts0 = TREE_CHAIN (elts0))
8077
        {
8078
          int val
8079
            = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8080
                                 scale);
8081
          n_elts = tree_cons (NULL_TREE,
8082
                              build_int_cst (inner_type, val),
8083
                              n_elts);
8084
        }
8085
      break;
8086
 
8087
    default:
8088
      gcc_unreachable ();
8089
    }
8090
 
8091
  return nreverse (n_elts);
8092
 
8093
}
8094
/* Handle TARGET_FOLD_BUILTIN target hook.
8095
   Fold builtin functions for SPARC intrinsics.  If IGNORE is true the
8096
   result of the function call is ignored.  NULL_TREE is returned if the
8097
   function could not be folded.  */
8098
 
8099
static tree
8100
sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8101
{
8102
  tree arg0, arg1, arg2;
8103
  tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8104
 
8105
  if (ignore
8106
      && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrsi_vis
8107
      && DECL_FUNCTION_CODE (fndecl) != CODE_FOR_alignaddrdi_vis)
8108
    return fold_convert (rtype, integer_zero_node);
8109
 
8110
  switch (DECL_FUNCTION_CODE (fndecl))
8111
    {
8112
    case CODE_FOR_fexpand_vis:
8113
      arg0 = TREE_VALUE (arglist);
8114
      STRIP_NOPS (arg0);
8115
 
8116
      if (TREE_CODE (arg0) == VECTOR_CST)
8117
        {
8118
          tree inner_type = TREE_TYPE (rtype);
8119
          tree elts = TREE_VECTOR_CST_ELTS (arg0);
8120
          tree n_elts = NULL_TREE;
8121
 
8122
          for (; elts; elts = TREE_CHAIN (elts))
8123
            {
8124
              unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8125
              n_elts = tree_cons (NULL_TREE,
8126
                                  build_int_cst (inner_type, val),
8127
                                  n_elts);
8128
            }
8129
          return build_vector (rtype, nreverse (n_elts));
8130
        }
8131
      break;
8132
 
8133
    case CODE_FOR_fmul8x16_vis:
8134
    case CODE_FOR_fmul8x16au_vis:
8135
    case CODE_FOR_fmul8x16al_vis:
8136
      arg0 = TREE_VALUE (arglist);
8137
      arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8138
      STRIP_NOPS (arg0);
8139
      STRIP_NOPS (arg1);
8140
 
8141
      if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8142
        {
8143
          tree inner_type = TREE_TYPE (rtype);
8144
          tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8145
          tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8146
          tree n_elts = sparc_handle_vis_mul8x16 (DECL_FUNCTION_CODE (fndecl),
8147
                                                  inner_type, elts0, elts1);
8148
 
8149
          return build_vector (rtype, n_elts);
8150
        }
8151
      break;
8152
 
8153
    case CODE_FOR_fpmerge_vis:
8154
      arg0 = TREE_VALUE (arglist);
8155
      arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8156
      STRIP_NOPS (arg0);
8157
      STRIP_NOPS (arg1);
8158
 
8159
      if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8160
        {
8161
          tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8162
          tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8163
          tree n_elts = NULL_TREE;
8164
 
8165
          for (; elts0 && elts1;
8166
               elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8167
            {
8168
              n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8169
              n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8170
            }
8171
 
8172
          return build_vector (rtype, nreverse (n_elts));
8173
        }
8174
      break;
8175
 
8176
    case CODE_FOR_pdist_vis:
8177
      arg0 = TREE_VALUE (arglist);
8178
      arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8179
      arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8180
      STRIP_NOPS (arg0);
8181
      STRIP_NOPS (arg1);
8182
      STRIP_NOPS (arg2);
8183
 
8184
      if (TREE_CODE (arg0) == VECTOR_CST
8185
          && TREE_CODE (arg1) == VECTOR_CST
8186
          && TREE_CODE (arg2) == INTEGER_CST)
8187
        {
8188
          int overflow = 0;
8189
          unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8190
          HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8191
          tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8192
          tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8193
 
8194
          for (; elts0 && elts1;
8195
               elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8196
            {
8197
              unsigned HOST_WIDE_INT
8198
                low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8199
                low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8200
              HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8201
              HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8202
 
8203
              unsigned HOST_WIDE_INT l;
8204
              HOST_WIDE_INT h;
8205
 
8206
              overflow |= neg_double (low1, high1, &l, &h);
8207
              overflow |= add_double (low0, high0, l, h, &l, &h);
8208
              if (h < 0)
8209
                overflow |= neg_double (l, h, &l, &h);
8210
 
8211
              overflow |= add_double (low, high, l, h, &low, &high);
8212
            }
8213
 
8214
          gcc_assert (overflow == 0);
8215
 
8216
          return build_int_cst_wide (rtype, low, high);
8217
        }
8218
 
8219
    default:
8220
      break;
8221
    }
8222
 
8223
  return NULL_TREE;
8224
}
8225
 
8226
int
8227
sparc_extra_constraint_check (rtx op, int c, int strict)
8228
{
8229
  int reload_ok_mem;
8230
 
8231
  if (TARGET_ARCH64
8232
      && (c == 'T' || c == 'U'))
8233
    return 0;
8234
 
8235
  switch (c)
8236
    {
8237
    case 'Q':
8238
      return fp_sethi_p (op);
8239
 
8240
    case 'R':
8241
      return fp_mov_p (op);
8242
 
8243
    case 'S':
8244
      return fp_high_losum_p (op);
8245
 
8246
    case 'U':
8247
      if (! strict
8248
          || (GET_CODE (op) == REG
8249
              && (REGNO (op) < FIRST_PSEUDO_REGISTER
8250
                  || reg_renumber[REGNO (op)] >= 0)))
8251
        return register_ok_for_ldd (op);
8252
 
8253
      return 0;
8254
 
8255
    case 'W':
8256
    case 'T':
8257
      break;
8258
 
8259
    case 'Y':
8260
      return const_zero_operand (op, GET_MODE (op));
8261
 
8262
    default:
8263
      return 0;
8264
    }
8265
 
8266
  /* Our memory extra constraints have to emulate the
8267
     behavior of 'm' and 'o' in order for reload to work
8268
     correctly.  */
8269
  if (GET_CODE (op) == MEM)
8270
    {
8271
      reload_ok_mem = 0;
8272
      if ((TARGET_ARCH64 || mem_min_alignment (op, 8))
8273
          && (! strict
8274
              || strict_memory_address_p (Pmode, XEXP (op, 0))))
8275
        reload_ok_mem = 1;
8276
    }
8277
  else
8278
    {
8279
      reload_ok_mem = (reload_in_progress
8280
                       && GET_CODE (op) == REG
8281
                       && REGNO (op) >= FIRST_PSEUDO_REGISTER
8282
                       && reg_renumber [REGNO (op)] < 0);
8283
    }
8284
 
8285
  return reload_ok_mem;
8286
}
8287
 
8288
/* ??? This duplicates information provided to the compiler by the
8289
   ??? scheduler description.  Some day, teach genautomata to output
8290
   ??? the latencies and then CSE will just use that.  */
8291
 
8292
static bool
8293
sparc_rtx_costs (rtx x, int code, int outer_code, int *total)
8294
{
8295
  enum machine_mode mode = GET_MODE (x);
8296
  bool float_mode_p = FLOAT_MODE_P (mode);
8297
 
8298
  switch (code)
8299
    {
8300
    case CONST_INT:
8301
      if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8302
        {
8303
          *total = 0;
8304
          return true;
8305
        }
8306
      /* FALLTHRU */
8307
 
8308
    case HIGH:
8309
      *total = 2;
8310
      return true;
8311
 
8312
    case CONST:
8313
    case LABEL_REF:
8314
    case SYMBOL_REF:
8315
      *total = 4;
8316
      return true;
8317
 
8318
    case CONST_DOUBLE:
8319
      if (GET_MODE (x) == VOIDmode
8320
          && ((CONST_DOUBLE_HIGH (x) == 0
8321
               && CONST_DOUBLE_LOW (x) < 0x1000)
8322
              || (CONST_DOUBLE_HIGH (x) == -1
8323
                  && CONST_DOUBLE_LOW (x) < 0
8324
                  && CONST_DOUBLE_LOW (x) >= -0x1000)))
8325
        *total = 0;
8326
      else
8327
        *total = 8;
8328
      return true;
8329
 
8330
    case MEM:
8331
      /* If outer-code was a sign or zero extension, a cost
8332
         of COSTS_N_INSNS (1) was already added in.  This is
8333
         why we are subtracting it back out.  */
8334
      if (outer_code == ZERO_EXTEND)
8335
        {
8336
          *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8337
        }
8338
      else if (outer_code == SIGN_EXTEND)
8339
        {
8340
          *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8341
        }
8342
      else if (float_mode_p)
8343
        {
8344
          *total = sparc_costs->float_load;
8345
        }
8346
      else
8347
        {
8348
          *total = sparc_costs->int_load;
8349
        }
8350
 
8351
      return true;
8352
 
8353
    case PLUS:
8354
    case MINUS:
8355
      if (float_mode_p)
8356
        *total = sparc_costs->float_plusminus;
8357
      else
8358
        *total = COSTS_N_INSNS (1);
8359
      return false;
8360
 
8361
    case MULT:
8362
      if (float_mode_p)
8363
        *total = sparc_costs->float_mul;
8364
      else if (! TARGET_HARD_MUL)
8365
        *total = COSTS_N_INSNS (25);
8366
      else
8367
        {
8368
          int bit_cost;
8369
 
8370
          bit_cost = 0;
8371
          if (sparc_costs->int_mul_bit_factor)
8372
            {
8373
              int nbits;
8374
 
8375
              if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8376
                {
8377
                  unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8378
                  for (nbits = 0; value != 0; value &= value - 1)
8379
                    nbits++;
8380
                }
8381
              else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8382
                       && GET_MODE (XEXP (x, 1)) == VOIDmode)
8383
                {
8384
                  rtx x1 = XEXP (x, 1);
8385
                  unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8386
                  unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8387
 
8388
                  for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8389
                    nbits++;
8390
                  for (; value2 != 0; value2 &= value2 - 1)
8391
                    nbits++;
8392
                }
8393
              else
8394
                nbits = 7;
8395
 
8396
              if (nbits < 3)
8397
                nbits = 3;
8398
              bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8399
              bit_cost = COSTS_N_INSNS (bit_cost);
8400
            }
8401
 
8402
          if (mode == DImode)
8403
            *total = sparc_costs->int_mulX + bit_cost;
8404
          else
8405
            *total = sparc_costs->int_mul + bit_cost;
8406
        }
8407
      return false;
8408
 
8409
    case ASHIFT:
8410
    case ASHIFTRT:
8411
    case LSHIFTRT:
8412
      *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8413
      return false;
8414
 
8415
    case DIV:
8416
    case UDIV:
8417
    case MOD:
8418
    case UMOD:
8419
      if (float_mode_p)
8420
        {
8421
          if (mode == DFmode)
8422
            *total = sparc_costs->float_div_df;
8423
          else
8424
            *total = sparc_costs->float_div_sf;
8425
        }
8426
      else
8427
        {
8428
          if (mode == DImode)
8429
            *total = sparc_costs->int_divX;
8430
          else
8431
            *total = sparc_costs->int_div;
8432
        }
8433
      return false;
8434
 
8435
    case NEG:
8436
      if (! float_mode_p)
8437
        {
8438
          *total = COSTS_N_INSNS (1);
8439
          return false;
8440
        }
8441
      /* FALLTHRU */
8442
 
8443
    case ABS:
8444
    case FLOAT:
8445
    case UNSIGNED_FLOAT:
8446
    case FIX:
8447
    case UNSIGNED_FIX:
8448
    case FLOAT_EXTEND:
8449
    case FLOAT_TRUNCATE:
8450
      *total = sparc_costs->float_move;
8451
      return false;
8452
 
8453
    case SQRT:
8454
      if (mode == DFmode)
8455
        *total = sparc_costs->float_sqrt_df;
8456
      else
8457
        *total = sparc_costs->float_sqrt_sf;
8458
      return false;
8459
 
8460
    case COMPARE:
8461
      if (float_mode_p)
8462
        *total = sparc_costs->float_cmp;
8463
      else
8464
        *total = COSTS_N_INSNS (1);
8465
      return false;
8466
 
8467
    case IF_THEN_ELSE:
8468
      if (float_mode_p)
8469
        *total = sparc_costs->float_cmove;
8470
      else
8471
        *total = sparc_costs->int_cmove;
8472
      return false;
8473
 
8474
    case IOR:
8475
      /* Handle the NAND vector patterns.  */
8476
      if (sparc_vector_mode_supported_p (GET_MODE (x))
8477
          && GET_CODE (XEXP (x, 0)) == NOT
8478
          && GET_CODE (XEXP (x, 1)) == NOT)
8479
        {
8480
          *total = COSTS_N_INSNS (1);
8481
          return true;
8482
        }
8483
      else
8484
        return false;
8485
 
8486
    default:
8487
      return false;
8488
    }
8489
}
8490
 
8491
/* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8492
   This is achieved by means of a manual dynamic stack space allocation in
8493
   the current frame.  We make the assumption that SEQ doesn't contain any
8494
   function calls, with the possible exception of calls to the PIC helper.  */
8495
 
8496
static void
8497
emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8498
{
8499
  /* We must preserve the lowest 16 words for the register save area.  */
8500
  HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8501
  /* We really need only 2 words of fresh stack space.  */
8502
  HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8503
 
8504
  rtx slot
8505
    = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8506
                                             SPARC_STACK_BIAS + offset));
8507
 
8508
  emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8509
  emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8510
  if (reg2)
8511
    emit_insn (gen_rtx_SET (VOIDmode,
8512
                            adjust_address (slot, word_mode, UNITS_PER_WORD),
8513
                            reg2));
8514
  emit_insn (seq);
8515
  if (reg2)
8516
    emit_insn (gen_rtx_SET (VOIDmode,
8517
                            reg2,
8518
                            adjust_address (slot, word_mode, UNITS_PER_WORD)));
8519
  emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8520
  emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8521
}
8522
 
8523
/* Output the assembler code for a thunk function.  THUNK_DECL is the
8524
   declaration for the thunk function itself, FUNCTION is the decl for
8525
   the target function.  DELTA is an immediate constant offset to be
8526
   added to THIS.  If VCALL_OFFSET is nonzero, the word at address
8527
   (*THIS + VCALL_OFFSET) should be additionally added to THIS.  */
8528
 
8529
static void
8530
sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8531
                       HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8532
                       tree function)
8533
{
8534
  rtx this, insn, funexp;
8535
  unsigned int int_arg_first;
8536
 
8537
  reload_completed = 1;
8538
  epilogue_completed = 1;
8539
  no_new_pseudos = 1;
8540
  reset_block_changes ();
8541
 
8542
  emit_note (NOTE_INSN_PROLOGUE_END);
8543
 
8544
  if (flag_delayed_branch)
8545
    {
8546
      /* We will emit a regular sibcall below, so we need to instruct
8547
         output_sibcall that we are in a leaf function.  */
8548
      sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8549
 
8550
      /* This will cause final.c to invoke leaf_renumber_regs so we
8551
         must behave as if we were in a not-yet-leafified function.  */
8552
      int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8553
    }
8554
  else
8555
    {
8556
      /* We will emit the sibcall manually below, so we will need to
8557
         manually spill non-leaf registers.  */
8558
      sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8559
 
8560
      /* We really are in a leaf function.  */
8561
      int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8562
    }
8563
 
8564
  /* Find the "this" pointer.  Normally in %o0, but in ARCH64 if the function
8565
     returns a structure, the structure return pointer is there instead.  */
8566
  if (TARGET_ARCH64 && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8567
    this = gen_rtx_REG (Pmode, int_arg_first + 1);
8568
  else
8569
    this = gen_rtx_REG (Pmode, int_arg_first);
8570
 
8571
  /* Add DELTA.  When possible use a plain add, otherwise load it into
8572
     a register first.  */
8573
  if (delta)
8574
    {
8575
      rtx delta_rtx = GEN_INT (delta);
8576
 
8577
      if (! SPARC_SIMM13_P (delta))
8578
        {
8579
          rtx scratch = gen_rtx_REG (Pmode, 1);
8580
          emit_move_insn (scratch, delta_rtx);
8581
          delta_rtx = scratch;
8582
        }
8583
 
8584
      /* THIS += DELTA.  */
8585
      emit_insn (gen_add2_insn (this, delta_rtx));
8586
    }
8587
 
8588
  /* Add the word at address (*THIS + VCALL_OFFSET).  */
8589
  if (vcall_offset)
8590
    {
8591
      rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8592
      rtx scratch = gen_rtx_REG (Pmode, 1);
8593
 
8594
      gcc_assert (vcall_offset < 0);
8595
 
8596
      /* SCRATCH = *THIS.  */
8597
      emit_move_insn (scratch, gen_rtx_MEM (Pmode, this));
8598
 
8599
      /* Prepare for adding VCALL_OFFSET.  The difficulty is that we
8600
         may not have any available scratch register at this point.  */
8601
      if (SPARC_SIMM13_P (vcall_offset))
8602
        ;
8603
      /* This is the case if ARCH64 (unless -ffixed-g5 is passed).  */
8604
      else if (! fixed_regs[5]
8605
               /* The below sequence is made up of at least 2 insns,
8606
                  while the default method may need only one.  */
8607
               && vcall_offset < -8192)
8608
        {
8609
          rtx scratch2 = gen_rtx_REG (Pmode, 5);
8610
          emit_move_insn (scratch2, vcall_offset_rtx);
8611
          vcall_offset_rtx = scratch2;
8612
        }
8613
      else
8614
        {
8615
          rtx increment = GEN_INT (-4096);
8616
 
8617
          /* VCALL_OFFSET is a negative number whose typical range can be
8618
             estimated as -32768..0 in 32-bit mode.  In almost all cases
8619
             it is therefore cheaper to emit multiple add insns than
8620
             spilling and loading the constant into a register (at least
8621
             6 insns).  */
8622
          while (! SPARC_SIMM13_P (vcall_offset))
8623
            {
8624
              emit_insn (gen_add2_insn (scratch, increment));
8625
              vcall_offset += 4096;
8626
            }
8627
          vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8628
        }
8629
 
8630
      /* SCRATCH = *(*THIS + VCALL_OFFSET).  */
8631
      emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8632
                                            gen_rtx_PLUS (Pmode,
8633
                                                          scratch,
8634
                                                          vcall_offset_rtx)));
8635
 
8636
      /* THIS += *(*THIS + VCALL_OFFSET).  */
8637
      emit_insn (gen_add2_insn (this, scratch));
8638
    }
8639
 
8640
  /* Generate a tail call to the target function.  */
8641
  if (! TREE_USED (function))
8642
    {
8643
      assemble_external (function);
8644
      TREE_USED (function) = 1;
8645
    }
8646
  funexp = XEXP (DECL_RTL (function), 0);
8647
 
8648
  if (flag_delayed_branch)
8649
    {
8650
      funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8651
      insn = emit_call_insn (gen_sibcall (funexp));
8652
      SIBLING_CALL_P (insn) = 1;
8653
    }
8654
  else
8655
    {
8656
      /* The hoops we have to jump through in order to generate a sibcall
8657
         without using delay slots...  */
8658
      rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8659
 
8660
      if (flag_pic)
8661
        {
8662
          spill_reg = gen_rtx_REG (word_mode, 15);  /* %o7 */
8663
          spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8664
          start_sequence ();
8665
          /* Delay emitting the PIC helper function because it needs to
8666
             change the section and we are emitting assembly code.  */
8667
          load_pic_register (true);  /* clobbers %o7 */
8668
          scratch = legitimize_pic_address (funexp, Pmode, scratch);
8669
          seq = get_insns ();
8670
          end_sequence ();
8671
          emit_and_preserve (seq, spill_reg, spill_reg2);
8672
        }
8673
      else if (TARGET_ARCH32)
8674
        {
8675
          emit_insn (gen_rtx_SET (VOIDmode,
8676
                                  scratch,
8677
                                  gen_rtx_HIGH (SImode, funexp)));
8678
          emit_insn (gen_rtx_SET (VOIDmode,
8679
                                  scratch,
8680
                                  gen_rtx_LO_SUM (SImode, scratch, funexp)));
8681
        }
8682
      else  /* TARGET_ARCH64 */
8683
        {
8684
          switch (sparc_cmodel)
8685
            {
8686
            case CM_MEDLOW:
8687
            case CM_MEDMID:
8688
              /* The destination can serve as a temporary.  */
8689
              sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8690
              break;
8691
 
8692
            case CM_MEDANY:
8693
            case CM_EMBMEDANY:
8694
              /* The destination cannot serve as a temporary.  */
8695
              spill_reg = gen_rtx_REG (DImode, 15);  /* %o7 */
8696
              start_sequence ();
8697
              sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8698
              seq = get_insns ();
8699
              end_sequence ();
8700
              emit_and_preserve (seq, spill_reg, 0);
8701
              break;
8702
 
8703
            default:
8704
              gcc_unreachable ();
8705
            }
8706
        }
8707
 
8708
      emit_jump_insn (gen_indirect_jump (scratch));
8709
    }
8710
 
8711
  emit_barrier ();
8712
 
8713
  /* Run just enough of rest_of_compilation to get the insns emitted.
8714
     There's not really enough bulk here to make other passes such as
8715
     instruction scheduling worth while.  Note that use_thunk calls
8716
     assemble_start_function and assemble_end_function.  */
8717
  insn = get_insns ();
8718
  insn_locators_initialize ();
8719
  shorten_branches (insn);
8720
  final_start_function (insn, file, 1);
8721
  final (insn, file, 1);
8722
  final_end_function ();
8723
 
8724
  reload_completed = 0;
8725
  epilogue_completed = 0;
8726
  no_new_pseudos = 0;
8727
}
8728
 
8729
/* Return true if sparc_output_mi_thunk would be able to output the
8730
   assembler code for the thunk function specified by the arguments
8731
   it is passed, and false otherwise.  */
8732
static bool
8733
sparc_can_output_mi_thunk (tree thunk_fndecl ATTRIBUTE_UNUSED,
8734
                           HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8735
                           HOST_WIDE_INT vcall_offset,
8736
                           tree function ATTRIBUTE_UNUSED)
8737
{
8738
  /* Bound the loop used in the default method above.  */
8739
  return (vcall_offset >= -32768 || ! fixed_regs[5]);
8740
}
8741
 
8742
/* How to allocate a 'struct machine_function'.  */
8743
 
8744
static struct machine_function *
8745
sparc_init_machine_status (void)
8746
{
8747
  return ggc_alloc_cleared (sizeof (struct machine_function));
8748
}
8749
 
8750
/* Locate some local-dynamic symbol still in use by this function
8751
   so that we can print its name in local-dynamic base patterns.  */
8752
 
8753
static const char *
8754
get_some_local_dynamic_name (void)
8755
{
8756
  rtx insn;
8757
 
8758
  if (cfun->machine->some_ld_name)
8759
    return cfun->machine->some_ld_name;
8760
 
8761
  for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8762
    if (INSN_P (insn)
8763
        && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8764
      return cfun->machine->some_ld_name;
8765
 
8766
  gcc_unreachable ();
8767
}
8768
 
8769
static int
8770
get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8771
{
8772
  rtx x = *px;
8773
 
8774
  if (x
8775
      && GET_CODE (x) == SYMBOL_REF
8776
      && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8777
    {
8778
      cfun->machine->some_ld_name = XSTR (x, 0);
8779
      return 1;
8780
    }
8781
 
8782
  return 0;
8783
}
8784
 
8785
/* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8786
   This is called from dwarf2out.c to emit call frame instructions
8787
   for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
8788
static void
8789
sparc_dwarf_handle_frame_unspec (const char *label,
8790
                                 rtx pattern ATTRIBUTE_UNUSED,
8791
                                 int index ATTRIBUTE_UNUSED)
8792
{
8793
  gcc_assert (index == UNSPECV_SAVEW);
8794
  dwarf2out_window_save (label);
8795
}
8796
 
8797
/* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8798
   We need to emit DTP-relative relocations.  */
8799
 
8800
static void
8801
sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
8802
{
8803
  switch (size)
8804
    {
8805
    case 4:
8806
      fputs ("\t.word\t%r_tls_dtpoff32(", file);
8807
      break;
8808
    case 8:
8809
      fputs ("\t.xword\t%r_tls_dtpoff64(", file);
8810
      break;
8811
    default:
8812
      gcc_unreachable ();
8813
    }
8814
  output_addr_const (file, x);
8815
  fputs (")", file);
8816
}
8817
 
8818
/* Do whatever processing is required at the end of a file.  */
8819
 
8820
static void
8821
sparc_file_end (void)
8822
{
8823
  /* If we haven't emitted the special PIC helper function, do so now.  */
8824
  if (pic_helper_symbol_name[0] && !pic_helper_emitted_p)
8825
    emit_pic_helper ();
8826
 
8827
  if (NEED_INDICATE_EXEC_STACK)
8828
    file_end_indicate_exec_stack ();
8829
}
8830
 
8831
#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
8832
/* Implement TARGET_MANGLE_FUNDAMENTAL_TYPE.  */
8833
 
8834
static const char *
8835
sparc_mangle_fundamental_type (tree type)
8836
{
8837
  if (!TARGET_64BIT
8838
      && TYPE_MAIN_VARIANT (type) == long_double_type_node
8839
      && TARGET_LONG_DOUBLE_128)
8840
    return "g";
8841
 
8842
  /* For all other types, use normal C++ mangling.  */
8843
  return NULL;
8844
}
8845
#endif
8846
 
8847
/* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
8848
   compare and swap on the word containing the byte or half-word.  */
8849
 
8850
void
8851
sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
8852
{
8853
  rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
8854
  rtx addr = gen_reg_rtx (Pmode);
8855
  rtx off = gen_reg_rtx (SImode);
8856
  rtx oldv = gen_reg_rtx (SImode);
8857
  rtx newv = gen_reg_rtx (SImode);
8858
  rtx oldvalue = gen_reg_rtx (SImode);
8859
  rtx newvalue = gen_reg_rtx (SImode);
8860
  rtx res = gen_reg_rtx (SImode);
8861
  rtx resv = gen_reg_rtx (SImode);
8862
  rtx memsi, val, mask, end_label, loop_label, cc;
8863
 
8864
  emit_insn (gen_rtx_SET (VOIDmode, addr,
8865
                          gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
8866
 
8867
  if (Pmode != SImode)
8868
    addr1 = gen_lowpart (SImode, addr1);
8869
  emit_insn (gen_rtx_SET (VOIDmode, off,
8870
                          gen_rtx_AND (SImode, addr1, GEN_INT (3))));
8871
 
8872
  memsi = gen_rtx_MEM (SImode, addr);
8873
  set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
8874
  MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
8875
 
8876
  val = force_reg (SImode, memsi);
8877
 
8878
  emit_insn (gen_rtx_SET (VOIDmode, off,
8879
                          gen_rtx_XOR (SImode, off,
8880
                                       GEN_INT (GET_MODE (mem) == QImode
8881
                                                ? 3 : 2))));
8882
 
8883
  emit_insn (gen_rtx_SET (VOIDmode, off,
8884
                          gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
8885
 
8886
  if (GET_MODE (mem) == QImode)
8887
    mask = force_reg (SImode, GEN_INT (0xff));
8888
  else
8889
    mask = force_reg (SImode, GEN_INT (0xffff));
8890
 
8891
  emit_insn (gen_rtx_SET (VOIDmode, mask,
8892
                          gen_rtx_ASHIFT (SImode, mask, off)));
8893
 
8894
  emit_insn (gen_rtx_SET (VOIDmode, val,
8895
                          gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8896
                                       val)));
8897
 
8898
  oldval = gen_lowpart (SImode, oldval);
8899
  emit_insn (gen_rtx_SET (VOIDmode, oldv,
8900
                          gen_rtx_ASHIFT (SImode, oldval, off)));
8901
 
8902
  newval = gen_lowpart_common (SImode, newval);
8903
  emit_insn (gen_rtx_SET (VOIDmode, newv,
8904
                          gen_rtx_ASHIFT (SImode, newval, off)));
8905
 
8906
  emit_insn (gen_rtx_SET (VOIDmode, oldv,
8907
                          gen_rtx_AND (SImode, oldv, mask)));
8908
 
8909
  emit_insn (gen_rtx_SET (VOIDmode, newv,
8910
                          gen_rtx_AND (SImode, newv, mask)));
8911
 
8912
  end_label = gen_label_rtx ();
8913
  loop_label = gen_label_rtx ();
8914
  emit_label (loop_label);
8915
 
8916
  emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
8917
                          gen_rtx_IOR (SImode, oldv, val)));
8918
 
8919
  emit_insn (gen_rtx_SET (VOIDmode, newvalue,
8920
                          gen_rtx_IOR (SImode, newv, val)));
8921
 
8922
  emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
8923
 
8924
  emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
8925
 
8926
  emit_insn (gen_rtx_SET (VOIDmode, resv,
8927
                          gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
8928
                                       res)));
8929
 
8930
  sparc_compare_op0 = resv;
8931
  sparc_compare_op1 = val;
8932
  cc = gen_compare_reg (NE);
8933
 
8934
  emit_insn (gen_rtx_SET (VOIDmode, val, resv));
8935
 
8936
  sparc_compare_emitted = cc;
8937
  emit_jump_insn (gen_bne (loop_label));
8938
 
8939
  emit_label (end_label);
8940
 
8941
  emit_insn (gen_rtx_SET (VOIDmode, res,
8942
                          gen_rtx_AND (SImode, res, mask)));
8943
 
8944
  emit_insn (gen_rtx_SET (VOIDmode, res,
8945
                          gen_rtx_LSHIFTRT (SImode, res, off)));
8946
 
8947
  emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
8948
}
8949
 
8950
#include "gt-sparc.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.