OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.5.1/] [gcc/] [config/] [sparc/] [sparc.c] - Blame information for rev 378

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 282 jeremybenn
/* Subroutines for insn-output.c for SPARC.
2
   Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4
   Free Software Foundation, Inc.
5
   Contributed by Michael Tiemann (tiemann@cygnus.com)
6
   64-bit SPARC-V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
7
   at Cygnus Support.
8
 
9
This file is part of GCC.
10
 
11
GCC is free software; you can redistribute it and/or modify
12
it under the terms of the GNU General Public License as published by
13
the Free Software Foundation; either version 3, or (at your option)
14
any later version.
15
 
16
GCC is distributed in the hope that it will be useful,
17
but WITHOUT ANY WARRANTY; without even the implied warranty of
18
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19
GNU General Public License for more details.
20
 
21
You should have received a copy of the GNU General Public License
22
along with GCC; see the file COPYING3.  If not see
23
<http://www.gnu.org/licenses/>.  */
24
 
25
#include "config.h"
26
#include "system.h"
27
#include "coretypes.h"
28
#include "tm.h"
29
#include "tree.h"
30
#include "rtl.h"
31
#include "regs.h"
32
#include "hard-reg-set.h"
33
#include "real.h"
34
#include "insn-config.h"
35
#include "insn-codes.h"
36
#include "conditions.h"
37
#include "output.h"
38
#include "insn-attr.h"
39
#include "flags.h"
40
#include "function.h"
41
#include "expr.h"
42
#include "optabs.h"
43
#include "recog.h"
44
#include "toplev.h"
45
#include "ggc.h"
46
#include "tm_p.h"
47
#include "debug.h"
48
#include "target.h"
49
#include "target-def.h"
50
#include "cfglayout.h"
51
#include "gimple.h"
52
#include "langhooks.h"
53
#include "params.h"
54
#include "df.h"
55
#include "dwarf2out.h"
56
 
57
/* Processor costs */
58
static const
59
struct processor_costs cypress_costs = {
60
  COSTS_N_INSNS (2), /* int load */
61
  COSTS_N_INSNS (2), /* int signed load */
62
  COSTS_N_INSNS (2), /* int zeroed load */
63
  COSTS_N_INSNS (2), /* float load */
64
  COSTS_N_INSNS (5), /* fmov, fneg, fabs */
65
  COSTS_N_INSNS (5), /* fadd, fsub */
66
  COSTS_N_INSNS (1), /* fcmp */
67
  COSTS_N_INSNS (1), /* fmov, fmovr */
68
  COSTS_N_INSNS (7), /* fmul */
69
  COSTS_N_INSNS (37), /* fdivs */
70
  COSTS_N_INSNS (37), /* fdivd */
71
  COSTS_N_INSNS (63), /* fsqrts */
72
  COSTS_N_INSNS (63), /* fsqrtd */
73
  COSTS_N_INSNS (1), /* imul */
74
  COSTS_N_INSNS (1), /* imulX */
75
  0, /* imul bit factor */
76
  COSTS_N_INSNS (1), /* idiv */
77
  COSTS_N_INSNS (1), /* idivX */
78
  COSTS_N_INSNS (1), /* movcc/movr */
79
  0, /* shift penalty */
80
};
81
 
82
static const
83
struct processor_costs supersparc_costs = {
84
  COSTS_N_INSNS (1), /* int load */
85
  COSTS_N_INSNS (1), /* int signed load */
86
  COSTS_N_INSNS (1), /* int zeroed load */
87
  COSTS_N_INSNS (0), /* float load */
88
  COSTS_N_INSNS (3), /* fmov, fneg, fabs */
89
  COSTS_N_INSNS (3), /* fadd, fsub */
90
  COSTS_N_INSNS (3), /* fcmp */
91
  COSTS_N_INSNS (1), /* fmov, fmovr */
92
  COSTS_N_INSNS (3), /* fmul */
93
  COSTS_N_INSNS (6), /* fdivs */
94
  COSTS_N_INSNS (9), /* fdivd */
95
  COSTS_N_INSNS (12), /* fsqrts */
96
  COSTS_N_INSNS (12), /* fsqrtd */
97
  COSTS_N_INSNS (4), /* imul */
98
  COSTS_N_INSNS (4), /* imulX */
99
  0, /* imul bit factor */
100
  COSTS_N_INSNS (4), /* idiv */
101
  COSTS_N_INSNS (4), /* idivX */
102
  COSTS_N_INSNS (1), /* movcc/movr */
103
  1, /* shift penalty */
104
};
105
 
106
static const
107
struct processor_costs hypersparc_costs = {
108
  COSTS_N_INSNS (1), /* int load */
109
  COSTS_N_INSNS (1), /* int signed load */
110
  COSTS_N_INSNS (1), /* int zeroed load */
111
  COSTS_N_INSNS (1), /* float load */
112
  COSTS_N_INSNS (1), /* fmov, fneg, fabs */
113
  COSTS_N_INSNS (1), /* fadd, fsub */
114
  COSTS_N_INSNS (1), /* fcmp */
115
  COSTS_N_INSNS (1), /* fmov, fmovr */
116
  COSTS_N_INSNS (1), /* fmul */
117
  COSTS_N_INSNS (8), /* fdivs */
118
  COSTS_N_INSNS (12), /* fdivd */
119
  COSTS_N_INSNS (17), /* fsqrts */
120
  COSTS_N_INSNS (17), /* fsqrtd */
121
  COSTS_N_INSNS (17), /* imul */
122
  COSTS_N_INSNS (17), /* imulX */
123
  0, /* imul bit factor */
124
  COSTS_N_INSNS (17), /* idiv */
125
  COSTS_N_INSNS (17), /* idivX */
126
  COSTS_N_INSNS (1), /* movcc/movr */
127
  0, /* shift penalty */
128
};
129
 
130
static const
131
struct processor_costs sparclet_costs = {
132
  COSTS_N_INSNS (3), /* int load */
133
  COSTS_N_INSNS (3), /* int signed load */
134
  COSTS_N_INSNS (1), /* int zeroed load */
135
  COSTS_N_INSNS (1), /* float load */
136
  COSTS_N_INSNS (1), /* fmov, fneg, fabs */
137
  COSTS_N_INSNS (1), /* fadd, fsub */
138
  COSTS_N_INSNS (1), /* fcmp */
139
  COSTS_N_INSNS (1), /* fmov, fmovr */
140
  COSTS_N_INSNS (1), /* fmul */
141
  COSTS_N_INSNS (1), /* fdivs */
142
  COSTS_N_INSNS (1), /* fdivd */
143
  COSTS_N_INSNS (1), /* fsqrts */
144
  COSTS_N_INSNS (1), /* fsqrtd */
145
  COSTS_N_INSNS (5), /* imul */
146
  COSTS_N_INSNS (5), /* imulX */
147
  0, /* imul bit factor */
148
  COSTS_N_INSNS (5), /* idiv */
149
  COSTS_N_INSNS (5), /* idivX */
150
  COSTS_N_INSNS (1), /* movcc/movr */
151
  0, /* shift penalty */
152
};
153
 
154
static const
155
struct processor_costs ultrasparc_costs = {
156
  COSTS_N_INSNS (2), /* int load */
157
  COSTS_N_INSNS (3), /* int signed load */
158
  COSTS_N_INSNS (2), /* int zeroed load */
159
  COSTS_N_INSNS (2), /* float load */
160
  COSTS_N_INSNS (1), /* fmov, fneg, fabs */
161
  COSTS_N_INSNS (4), /* fadd, fsub */
162
  COSTS_N_INSNS (1), /* fcmp */
163
  COSTS_N_INSNS (2), /* fmov, fmovr */
164
  COSTS_N_INSNS (4), /* fmul */
165
  COSTS_N_INSNS (13), /* fdivs */
166
  COSTS_N_INSNS (23), /* fdivd */
167
  COSTS_N_INSNS (13), /* fsqrts */
168
  COSTS_N_INSNS (23), /* fsqrtd */
169
  COSTS_N_INSNS (4), /* imul */
170
  COSTS_N_INSNS (4), /* imulX */
171
  2, /* imul bit factor */
172
  COSTS_N_INSNS (37), /* idiv */
173
  COSTS_N_INSNS (68), /* idivX */
174
  COSTS_N_INSNS (2), /* movcc/movr */
175
  2, /* shift penalty */
176
};
177
 
178
static const
179
struct processor_costs ultrasparc3_costs = {
180
  COSTS_N_INSNS (2), /* int load */
181
  COSTS_N_INSNS (3), /* int signed load */
182
  COSTS_N_INSNS (3), /* int zeroed load */
183
  COSTS_N_INSNS (2), /* float load */
184
  COSTS_N_INSNS (3), /* fmov, fneg, fabs */
185
  COSTS_N_INSNS (4), /* fadd, fsub */
186
  COSTS_N_INSNS (5), /* fcmp */
187
  COSTS_N_INSNS (3), /* fmov, fmovr */
188
  COSTS_N_INSNS (4), /* fmul */
189
  COSTS_N_INSNS (17), /* fdivs */
190
  COSTS_N_INSNS (20), /* fdivd */
191
  COSTS_N_INSNS (20), /* fsqrts */
192
  COSTS_N_INSNS (29), /* fsqrtd */
193
  COSTS_N_INSNS (6), /* imul */
194
  COSTS_N_INSNS (6), /* imulX */
195
  0, /* imul bit factor */
196
  COSTS_N_INSNS (40), /* idiv */
197
  COSTS_N_INSNS (71), /* idivX */
198
  COSTS_N_INSNS (2), /* movcc/movr */
199
  0, /* shift penalty */
200
};
201
 
202
static const
203
struct processor_costs niagara_costs = {
204
  COSTS_N_INSNS (3), /* int load */
205
  COSTS_N_INSNS (3), /* int signed load */
206
  COSTS_N_INSNS (3), /* int zeroed load */
207
  COSTS_N_INSNS (9), /* float load */
208
  COSTS_N_INSNS (8), /* fmov, fneg, fabs */
209
  COSTS_N_INSNS (8), /* fadd, fsub */
210
  COSTS_N_INSNS (26), /* fcmp */
211
  COSTS_N_INSNS (8), /* fmov, fmovr */
212
  COSTS_N_INSNS (29), /* fmul */
213
  COSTS_N_INSNS (54), /* fdivs */
214
  COSTS_N_INSNS (83), /* fdivd */
215
  COSTS_N_INSNS (100), /* fsqrts - not implemented in hardware */
216
  COSTS_N_INSNS (100), /* fsqrtd - not implemented in hardware */
217
  COSTS_N_INSNS (11), /* imul */
218
  COSTS_N_INSNS (11), /* imulX */
219
  0, /* imul bit factor */
220
  COSTS_N_INSNS (72), /* idiv */
221
  COSTS_N_INSNS (72), /* idivX */
222
  COSTS_N_INSNS (1), /* movcc/movr */
223
  0, /* shift penalty */
224
};
225
 
226
static const
227
struct processor_costs niagara2_costs = {
228
  COSTS_N_INSNS (3), /* int load */
229
  COSTS_N_INSNS (3), /* int signed load */
230
  COSTS_N_INSNS (3), /* int zeroed load */
231
  COSTS_N_INSNS (3), /* float load */
232
  COSTS_N_INSNS (6), /* fmov, fneg, fabs */
233
  COSTS_N_INSNS (6), /* fadd, fsub */
234
  COSTS_N_INSNS (6), /* fcmp */
235
  COSTS_N_INSNS (6), /* fmov, fmovr */
236
  COSTS_N_INSNS (6), /* fmul */
237
  COSTS_N_INSNS (19), /* fdivs */
238
  COSTS_N_INSNS (33), /* fdivd */
239
  COSTS_N_INSNS (19), /* fsqrts */
240
  COSTS_N_INSNS (33), /* fsqrtd */
241
  COSTS_N_INSNS (5), /* imul */
242
  COSTS_N_INSNS (5), /* imulX */
243
  0, /* imul bit factor */
244
  COSTS_N_INSNS (31), /* idiv, average of 12 - 41 cycle range */
245
  COSTS_N_INSNS (31), /* idivX, average of 12 - 41 cycle range */
246
  COSTS_N_INSNS (1), /* movcc/movr */
247
  0, /* shift penalty */
248
};
249
 
250
const struct processor_costs *sparc_costs = &cypress_costs;
251
 
252
#ifdef HAVE_AS_RELAX_OPTION
253
/* If 'as' and 'ld' are relaxing tail call insns into branch always, use
254
   "or %o7,%g0,X; call Y; or X,%g0,%o7" always, so that it can be optimized.
255
   With sethi/jmp, neither 'as' nor 'ld' has an easy way how to find out if
256
   somebody does not branch between the sethi and jmp.  */
257
#define LEAF_SIBCALL_SLOT_RESERVED_P 1
258
#else
259
#define LEAF_SIBCALL_SLOT_RESERVED_P \
260
  ((TARGET_ARCH64 && !TARGET_CM_MEDLOW) || flag_pic)
261
#endif
262
 
263
/* Global variables for machine-dependent things.  */
264
 
265
/* Size of frame.  Need to know this to emit return insns from leaf procedures.
266
   ACTUAL_FSIZE is set by sparc_compute_frame_size() which is called during the
267
   reload pass.  This is important as the value is later used for scheduling
268
   (to see what can go in a delay slot).
269
   APPARENT_FSIZE is the size of the stack less the register save area and less
270
   the outgoing argument area.  It is used when saving call preserved regs.  */
271
static HOST_WIDE_INT apparent_fsize;
272
static HOST_WIDE_INT actual_fsize;
273
 
274
/* Number of live general or floating point registers needed to be
275
   saved (as 4-byte quantities).  */
276
static int num_gfregs;
277
 
278
/* The alias set for prologue/epilogue register save/restore.  */
279
static GTY(()) alias_set_type sparc_sr_alias_set;
280
 
281
/* The alias set for the structure return value.  */
282
static GTY(()) alias_set_type struct_value_alias_set;
283
 
284
/* Vector to say how input registers are mapped to output registers.
285
   HARD_FRAME_POINTER_REGNUM cannot be remapped by this function to
286
   eliminate it.  You must use -fomit-frame-pointer to get that.  */
287
char leaf_reg_remap[] =
288
{ 0, 1, 2, 3, 4, 5, 6, 7,
289
  -1, -1, -1, -1, -1, -1, 14, -1,
290
  -1, -1, -1, -1, -1, -1, -1, -1,
291
  8, 9, 10, 11, 12, 13, -1, 15,
292
 
293
  32, 33, 34, 35, 36, 37, 38, 39,
294
  40, 41, 42, 43, 44, 45, 46, 47,
295
  48, 49, 50, 51, 52, 53, 54, 55,
296
  56, 57, 58, 59, 60, 61, 62, 63,
297
  64, 65, 66, 67, 68, 69, 70, 71,
298
  72, 73, 74, 75, 76, 77, 78, 79,
299
  80, 81, 82, 83, 84, 85, 86, 87,
300
  88, 89, 90, 91, 92, 93, 94, 95,
301
  96, 97, 98, 99, 100};
302
 
303
/* Vector, indexed by hard register number, which contains 1
304
   for a register that is allowable in a candidate for leaf
305
   function treatment.  */
306
char sparc_leaf_regs[] =
307
{ 1, 1, 1, 1, 1, 1, 1, 1,
308
  0, 0, 0, 0, 0, 0, 1, 0,
309
  0, 0, 0, 0, 0, 0, 0, 0,
310
  1, 1, 1, 1, 1, 1, 0, 1,
311
  1, 1, 1, 1, 1, 1, 1, 1,
312
  1, 1, 1, 1, 1, 1, 1, 1,
313
  1, 1, 1, 1, 1, 1, 1, 1,
314
  1, 1, 1, 1, 1, 1, 1, 1,
315
  1, 1, 1, 1, 1, 1, 1, 1,
316
  1, 1, 1, 1, 1, 1, 1, 1,
317
  1, 1, 1, 1, 1, 1, 1, 1,
318
  1, 1, 1, 1, 1, 1, 1, 1,
319
  1, 1, 1, 1, 1};
320
 
321
struct GTY(()) machine_function
322
{
323
  /* Some local-dynamic TLS symbol name.  */
324
  const char *some_ld_name;
325
 
326
  /* True if the current function is leaf and uses only leaf regs,
327
     so that the SPARC leaf function optimization can be applied.
328
     Private version of current_function_uses_only_leaf_regs, see
329
     sparc_expand_prologue for the rationale.  */
330
  int leaf_function_p;
331
 
332
  /* True if the data calculated by sparc_expand_prologue are valid.  */
333
  bool prologue_data_valid_p;
334
};
335
 
336
#define sparc_leaf_function_p  cfun->machine->leaf_function_p
337
#define sparc_prologue_data_valid_p  cfun->machine->prologue_data_valid_p
338
 
339
/* Register we pretend to think the frame pointer is allocated to.
340
   Normally, this is %fp, but if we are in a leaf procedure, this
341
   is %sp+"something".  We record "something" separately as it may
342
   be too big for reg+constant addressing.  */
343
static rtx frame_base_reg;
344
static HOST_WIDE_INT frame_base_offset;
345
 
346
/* 1 if the next opcode is to be specially indented.  */
347
int sparc_indent_opcode = 0;
348
 
349
static bool sparc_handle_option (size_t, const char *, int);
350
static void sparc_init_modes (void);
351
static void scan_record_type (tree, int *, int *, int *);
352
static int function_arg_slotno (const CUMULATIVE_ARGS *, enum machine_mode,
353
                                tree, int, int, int *, int *);
354
 
355
static int supersparc_adjust_cost (rtx, rtx, rtx, int);
356
static int hypersparc_adjust_cost (rtx, rtx, rtx, int);
357
 
358
static void sparc_output_addr_vec (rtx);
359
static void sparc_output_addr_diff_vec (rtx);
360
static void sparc_output_deferred_case_vectors (void);
361
static bool sparc_legitimate_address_p (enum machine_mode, rtx, bool);
362
static rtx sparc_builtin_saveregs (void);
363
static int epilogue_renumber (rtx *, int);
364
static bool sparc_assemble_integer (rtx, unsigned int, int);
365
static int set_extends (rtx);
366
static void load_pic_register (void);
367
static int save_or_restore_regs (int, int, rtx, int, int);
368
static void emit_save_or_restore_regs (int);
369
static void sparc_asm_function_prologue (FILE *, HOST_WIDE_INT);
370
static void sparc_asm_function_epilogue (FILE *, HOST_WIDE_INT);
371
static void sparc_solaris_elf_asm_named_section (const char *, unsigned int,
372
                                                 tree) ATTRIBUTE_UNUSED;
373
static int sparc_adjust_cost (rtx, rtx, rtx, int);
374
static int sparc_issue_rate (void);
375
static void sparc_sched_init (FILE *, int, int);
376
static int sparc_use_sched_lookahead (void);
377
 
378
static void emit_soft_tfmode_libcall (const char *, int, rtx *);
379
static void emit_soft_tfmode_binop (enum rtx_code, rtx *);
380
static void emit_soft_tfmode_unop (enum rtx_code, rtx *);
381
static void emit_soft_tfmode_cvt (enum rtx_code, rtx *);
382
static void emit_hard_tfmode_operation (enum rtx_code, rtx *);
383
 
384
static bool sparc_function_ok_for_sibcall (tree, tree);
385
static void sparc_init_libfuncs (void);
386
static void sparc_init_builtins (void);
387
static void sparc_vis_init_builtins (void);
388
static rtx sparc_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
389
static tree sparc_fold_builtin (tree, tree, bool);
390
static int sparc_vis_mul8x16 (int, int);
391
static tree sparc_handle_vis_mul8x16 (int, tree, tree, tree);
392
static void sparc_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
393
                                   HOST_WIDE_INT, tree);
394
static bool sparc_can_output_mi_thunk (const_tree, HOST_WIDE_INT,
395
                                       HOST_WIDE_INT, const_tree);
396
static struct machine_function * sparc_init_machine_status (void);
397
static bool sparc_cannot_force_const_mem (rtx);
398
static rtx sparc_tls_get_addr (void);
399
static rtx sparc_tls_got (void);
400
static const char *get_some_local_dynamic_name (void);
401
static int get_some_local_dynamic_name_1 (rtx *, void *);
402
static bool sparc_rtx_costs (rtx, int, int, int *, bool);
403
static bool sparc_promote_prototypes (const_tree);
404
static rtx sparc_struct_value_rtx (tree, int);
405
static enum machine_mode sparc_promote_function_mode (const_tree, enum machine_mode,
406
                                                      int *, const_tree, int);
407
static bool sparc_return_in_memory (const_tree, const_tree);
408
static bool sparc_strict_argument_naming (CUMULATIVE_ARGS *);
409
static void sparc_va_start (tree, rtx);
410
static tree sparc_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
411
static bool sparc_vector_mode_supported_p (enum machine_mode);
412
static bool sparc_tls_referenced_p (rtx);
413
static rtx legitimize_tls_address (rtx);
414
static rtx legitimize_pic_address (rtx, rtx);
415
static rtx sparc_legitimize_address (rtx, rtx, enum machine_mode);
416
static bool sparc_pass_by_reference (CUMULATIVE_ARGS *,
417
                                     enum machine_mode, const_tree, bool);
418
static int sparc_arg_partial_bytes (CUMULATIVE_ARGS *,
419
                                    enum machine_mode, tree, bool);
420
static void sparc_dwarf_handle_frame_unspec (const char *, rtx, int);
421
static void sparc_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
422
static void sparc_file_end (void);
423
static bool sparc_frame_pointer_required (void);
424
static bool sparc_can_eliminate (const int, const int);
425
#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
426
static const char *sparc_mangle_type (const_tree);
427
#endif
428
static void sparc_trampoline_init (rtx, tree, rtx);
429
 
430
#ifdef SUBTARGET_ATTRIBUTE_TABLE
431
/* Table of valid machine attributes.  */
432
static const struct attribute_spec sparc_attribute_table[] =
433
{
434
  /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
435
  SUBTARGET_ATTRIBUTE_TABLE,
436
  { NULL,        0, 0, false, false, false, NULL }
437
};
438
#endif
439
 
440
/* Option handling.  */
441
 
442
/* Parsed value.  */
443
enum cmodel sparc_cmodel;
444
 
445
char sparc_hard_reg_printed[8];
446
 
447
struct sparc_cpu_select sparc_select[] =
448
{
449
  /* switch     name,           tune    arch */
450
  { (char *)0,   "default",      1,      1 },
451
  { (char *)0,   "-mcpu=",       1,      1 },
452
  { (char *)0,   "-mtune=",      1,      0 },
453
  { 0, 0, 0, 0 }
454
};
455
 
456
/* CPU type.  This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx.  */
457
enum processor_type sparc_cpu;
458
 
459
/* Whetheran FPU option was specified.  */
460
static bool fpu_option_set = false;
461
 
462
/* Initialize the GCC target structure.  */
463
 
464
/* The default is to use .half rather than .short for aligned HI objects.  */
465
#undef TARGET_ASM_ALIGNED_HI_OP
466
#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
467
 
468
#undef TARGET_ASM_UNALIGNED_HI_OP
469
#define TARGET_ASM_UNALIGNED_HI_OP "\t.uahalf\t"
470
#undef TARGET_ASM_UNALIGNED_SI_OP
471
#define TARGET_ASM_UNALIGNED_SI_OP "\t.uaword\t"
472
#undef TARGET_ASM_UNALIGNED_DI_OP
473
#define TARGET_ASM_UNALIGNED_DI_OP "\t.uaxword\t"
474
 
475
/* The target hook has to handle DI-mode values.  */
476
#undef TARGET_ASM_INTEGER
477
#define TARGET_ASM_INTEGER sparc_assemble_integer
478
 
479
#undef TARGET_ASM_FUNCTION_PROLOGUE
480
#define TARGET_ASM_FUNCTION_PROLOGUE sparc_asm_function_prologue
481
#undef TARGET_ASM_FUNCTION_EPILOGUE
482
#define TARGET_ASM_FUNCTION_EPILOGUE sparc_asm_function_epilogue
483
 
484
#undef TARGET_SCHED_ADJUST_COST
485
#define TARGET_SCHED_ADJUST_COST sparc_adjust_cost
486
#undef TARGET_SCHED_ISSUE_RATE
487
#define TARGET_SCHED_ISSUE_RATE sparc_issue_rate
488
#undef TARGET_SCHED_INIT
489
#define TARGET_SCHED_INIT sparc_sched_init
490
#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
491
#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD sparc_use_sched_lookahead
492
 
493
#undef TARGET_FUNCTION_OK_FOR_SIBCALL
494
#define TARGET_FUNCTION_OK_FOR_SIBCALL sparc_function_ok_for_sibcall
495
 
496
#undef TARGET_INIT_LIBFUNCS
497
#define TARGET_INIT_LIBFUNCS sparc_init_libfuncs
498
#undef TARGET_INIT_BUILTINS
499
#define TARGET_INIT_BUILTINS sparc_init_builtins
500
 
501
#undef TARGET_LEGITIMIZE_ADDRESS
502
#define TARGET_LEGITIMIZE_ADDRESS sparc_legitimize_address
503
 
504
#undef TARGET_EXPAND_BUILTIN
505
#define TARGET_EXPAND_BUILTIN sparc_expand_builtin
506
#undef TARGET_FOLD_BUILTIN
507
#define TARGET_FOLD_BUILTIN sparc_fold_builtin
508
 
509
#if TARGET_TLS
510
#undef TARGET_HAVE_TLS
511
#define TARGET_HAVE_TLS true
512
#endif
513
 
514
#undef TARGET_CANNOT_FORCE_CONST_MEM
515
#define TARGET_CANNOT_FORCE_CONST_MEM sparc_cannot_force_const_mem
516
 
517
#undef TARGET_ASM_OUTPUT_MI_THUNK
518
#define TARGET_ASM_OUTPUT_MI_THUNK sparc_output_mi_thunk
519
#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
520
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK sparc_can_output_mi_thunk
521
 
522
#undef TARGET_RTX_COSTS
523
#define TARGET_RTX_COSTS sparc_rtx_costs
524
#undef TARGET_ADDRESS_COST
525
#define TARGET_ADDRESS_COST hook_int_rtx_bool_0
526
 
527
#undef TARGET_PROMOTE_FUNCTION_MODE
528
#define TARGET_PROMOTE_FUNCTION_MODE sparc_promote_function_mode
529
 
530
#undef TARGET_PROMOTE_PROTOTYPES
531
#define TARGET_PROMOTE_PROTOTYPES sparc_promote_prototypes
532
 
533
#undef TARGET_STRUCT_VALUE_RTX
534
#define TARGET_STRUCT_VALUE_RTX sparc_struct_value_rtx
535
#undef TARGET_RETURN_IN_MEMORY
536
#define TARGET_RETURN_IN_MEMORY sparc_return_in_memory
537
#undef TARGET_MUST_PASS_IN_STACK
538
#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
539
#undef TARGET_PASS_BY_REFERENCE
540
#define TARGET_PASS_BY_REFERENCE sparc_pass_by_reference
541
#undef TARGET_ARG_PARTIAL_BYTES
542
#define TARGET_ARG_PARTIAL_BYTES sparc_arg_partial_bytes
543
 
544
#undef TARGET_EXPAND_BUILTIN_SAVEREGS
545
#define TARGET_EXPAND_BUILTIN_SAVEREGS sparc_builtin_saveregs
546
#undef TARGET_STRICT_ARGUMENT_NAMING
547
#define TARGET_STRICT_ARGUMENT_NAMING sparc_strict_argument_naming
548
 
549
#undef TARGET_EXPAND_BUILTIN_VA_START
550
#define TARGET_EXPAND_BUILTIN_VA_START sparc_va_start
551
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
552
#define TARGET_GIMPLIFY_VA_ARG_EXPR sparc_gimplify_va_arg
553
 
554
#undef TARGET_VECTOR_MODE_SUPPORTED_P
555
#define TARGET_VECTOR_MODE_SUPPORTED_P sparc_vector_mode_supported_p
556
 
557
#undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
558
#define TARGET_DWARF_HANDLE_FRAME_UNSPEC sparc_dwarf_handle_frame_unspec
559
 
560
#ifdef SUBTARGET_INSERT_ATTRIBUTES
561
#undef TARGET_INSERT_ATTRIBUTES
562
#define TARGET_INSERT_ATTRIBUTES SUBTARGET_INSERT_ATTRIBUTES
563
#endif
564
 
565
#ifdef SUBTARGET_ATTRIBUTE_TABLE
566
#undef TARGET_ATTRIBUTE_TABLE
567
#define TARGET_ATTRIBUTE_TABLE sparc_attribute_table
568
#endif
569
 
570
#undef TARGET_RELAXED_ORDERING
571
#define TARGET_RELAXED_ORDERING SPARC_RELAXED_ORDERING
572
 
573
#undef TARGET_DEFAULT_TARGET_FLAGS
574
#define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
575
#undef TARGET_HANDLE_OPTION
576
#define TARGET_HANDLE_OPTION sparc_handle_option
577
 
578
#if TARGET_GNU_TLS && defined(HAVE_AS_SPARC_UA_PCREL)
579
#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
580
#define TARGET_ASM_OUTPUT_DWARF_DTPREL sparc_output_dwarf_dtprel
581
#endif
582
 
583
#undef TARGET_ASM_FILE_END
584
#define TARGET_ASM_FILE_END sparc_file_end
585
 
586
#undef TARGET_FRAME_POINTER_REQUIRED
587
#define TARGET_FRAME_POINTER_REQUIRED sparc_frame_pointer_required
588
 
589
#undef TARGET_CAN_ELIMINATE
590
#define TARGET_CAN_ELIMINATE sparc_can_eliminate
591
 
592
#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
593
#undef TARGET_MANGLE_TYPE
594
#define TARGET_MANGLE_TYPE sparc_mangle_type
595
#endif
596
 
597
#undef TARGET_LEGITIMATE_ADDRESS_P
598
#define TARGET_LEGITIMATE_ADDRESS_P sparc_legitimate_address_p
599
 
600
#undef TARGET_TRAMPOLINE_INIT
601
#define TARGET_TRAMPOLINE_INIT sparc_trampoline_init
602
 
603
struct gcc_target targetm = TARGET_INITIALIZER;
604
 
605
/* Implement TARGET_HANDLE_OPTION.  */
606
 
607
static bool
608
sparc_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
609
{
610
  switch (code)
611
    {
612
    case OPT_mfpu:
613
    case OPT_mhard_float:
614
    case OPT_msoft_float:
615
      fpu_option_set = true;
616
      break;
617
 
618
    case OPT_mcpu_:
619
      sparc_select[1].string = arg;
620
      break;
621
 
622
    case OPT_mtune_:
623
      sparc_select[2].string = arg;
624
      break;
625
    }
626
 
627
  return true;
628
}
629
 
630
/* Validate and override various options, and do some machine dependent
631
   initialization.  */
632
 
633
void
634
sparc_override_options (void)
635
{
636
  static struct code_model {
637
    const char *const name;
638
    const enum cmodel value;
639
  } const cmodels[] = {
640
    { "32", CM_32 },
641
    { "medlow", CM_MEDLOW },
642
    { "medmid", CM_MEDMID },
643
    { "medany", CM_MEDANY },
644
    { "embmedany", CM_EMBMEDANY },
645
    { NULL, (enum cmodel) 0 }
646
  };
647
  const struct code_model *cmodel;
648
  /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=.  */
649
  static struct cpu_default {
650
    const int cpu;
651
    const char *const name;
652
  } const cpu_default[] = {
653
    /* There must be one entry here for each TARGET_CPU value.  */
654
    { TARGET_CPU_sparc, "cypress" },
655
    { TARGET_CPU_sparclet, "tsc701" },
656
    { TARGET_CPU_sparclite, "f930" },
657
    { TARGET_CPU_v8, "v8" },
658
    { TARGET_CPU_hypersparc, "hypersparc" },
659
    { TARGET_CPU_sparclite86x, "sparclite86x" },
660
    { TARGET_CPU_supersparc, "supersparc" },
661
    { TARGET_CPU_v9, "v9" },
662
    { TARGET_CPU_ultrasparc, "ultrasparc" },
663
    { TARGET_CPU_ultrasparc3, "ultrasparc3" },
664
    { TARGET_CPU_niagara, "niagara" },
665
    { TARGET_CPU_niagara2, "niagara2" },
666
    { 0, 0 }
667
  };
668
  const struct cpu_default *def;
669
  /* Table of values for -m{cpu,tune}=.  */
670
  static struct cpu_table {
671
    const char *const name;
672
    const enum processor_type processor;
673
    const int disable;
674
    const int enable;
675
  } const cpu_table[] = {
676
    { "v7",         PROCESSOR_V7, MASK_ISA, 0 },
677
    { "cypress",    PROCESSOR_CYPRESS, MASK_ISA, 0 },
678
    { "v8",         PROCESSOR_V8, MASK_ISA, MASK_V8 },
679
    /* TI TMS390Z55 supersparc */
680
    { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
681
    { "sparclite",  PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
682
    /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
683
       The Fujitsu MB86934 is the recent sparclite chip, with an fpu.  */
684
    { "f930",       PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
685
    { "f934",       PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
686
    { "hypersparc", PROCESSOR_HYPERSPARC, MASK_ISA, MASK_V8|MASK_FPU },
687
    { "sparclite86x",  PROCESSOR_SPARCLITE86X, MASK_ISA|MASK_FPU,
688
      MASK_SPARCLITE },
689
    { "sparclet",   PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
690
    /* TEMIC sparclet */
691
    { "tsc701",     PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
692
    { "v9",         PROCESSOR_V9, MASK_ISA, MASK_V9 },
693
    /* TI ultrasparc I, II, IIi */
694
    { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9
695
    /* Although insns using %y are deprecated, it is a clear win on current
696
       ultrasparcs.  */
697
                                                    |MASK_DEPRECATED_V8_INSNS},
698
    /* TI ultrasparc III */
699
    /* ??? Check if %y issue still holds true in ultra3.  */
700
    { "ultrasparc3", PROCESSOR_ULTRASPARC3, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
701
    /* UltraSPARC T1 */
702
    { "niagara", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9|MASK_DEPRECATED_V8_INSNS},
703
    { "niagara2", PROCESSOR_NIAGARA, MASK_ISA, MASK_V9},
704
    { 0, (enum processor_type) 0, 0, 0 }
705
  };
706
  const struct cpu_table *cpu;
707
  const struct sparc_cpu_select *sel;
708
  int fpu;
709
 
710
#ifndef SPARC_BI_ARCH
711
  /* Check for unsupported architecture size.  */
712
  if (! TARGET_64BIT != DEFAULT_ARCH32_P)
713
    error ("%s is not supported by this configuration",
714
           DEFAULT_ARCH32_P ? "-m64" : "-m32");
715
#endif
716
 
717
  /* We force all 64bit archs to use 128 bit long double */
718
  if (TARGET_64BIT && ! TARGET_LONG_DOUBLE_128)
719
    {
720
      error ("-mlong-double-64 not allowed with -m64");
721
      target_flags |= MASK_LONG_DOUBLE_128;
722
    }
723
 
724
  /* Code model selection.  */
725
  sparc_cmodel = SPARC_DEFAULT_CMODEL;
726
 
727
#ifdef SPARC_BI_ARCH
728
  if (TARGET_ARCH32)
729
    sparc_cmodel = CM_32;
730
#endif
731
 
732
  if (sparc_cmodel_string != NULL)
733
    {
734
      if (TARGET_ARCH64)
735
        {
736
          for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
737
            if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
738
              break;
739
          if (cmodel->name == NULL)
740
            error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
741
          else
742
            sparc_cmodel = cmodel->value;
743
        }
744
      else
745
        error ("-mcmodel= is not supported on 32 bit systems");
746
    }
747
 
748
  fpu = target_flags & MASK_FPU; /* save current -mfpu status */
749
 
750
  /* Set the default CPU.  */
751
  for (def = &cpu_default[0]; def->name; ++def)
752
    if (def->cpu == TARGET_CPU_DEFAULT)
753
      break;
754
  gcc_assert (def->name);
755
  sparc_select[0].string = def->name;
756
 
757
  for (sel = &sparc_select[0]; sel->name; ++sel)
758
    {
759
      if (sel->string)
760
        {
761
          for (cpu = &cpu_table[0]; cpu->name; ++cpu)
762
            if (! strcmp (sel->string, cpu->name))
763
              {
764
                if (sel->set_tune_p)
765
                  sparc_cpu = cpu->processor;
766
 
767
                if (sel->set_arch_p)
768
                  {
769
                    target_flags &= ~cpu->disable;
770
                    target_flags |= cpu->enable;
771
                  }
772
                break;
773
              }
774
 
775
          if (! cpu->name)
776
            error ("bad value (%s) for %s switch", sel->string, sel->name);
777
        }
778
    }
779
 
780
  /* If -mfpu or -mno-fpu was explicitly used, don't override with
781
     the processor default.  */
782
  if (fpu_option_set)
783
    target_flags = (target_flags & ~MASK_FPU) | fpu;
784
 
785
  /* Don't allow -mvis if FPU is disabled.  */
786
  if (! TARGET_FPU)
787
    target_flags &= ~MASK_VIS;
788
 
789
  /* -mvis assumes UltraSPARC+, so we are sure v9 instructions
790
     are available.
791
     -m64 also implies v9.  */
792
  if (TARGET_VIS || TARGET_ARCH64)
793
    {
794
      target_flags |= MASK_V9;
795
      target_flags &= ~(MASK_V8 | MASK_SPARCLET | MASK_SPARCLITE);
796
    }
797
 
798
  /* Use the deprecated v8 insns for sparc64 in 32 bit mode.  */
799
  if (TARGET_V9 && TARGET_ARCH32)
800
    target_flags |= MASK_DEPRECATED_V8_INSNS;
801
 
802
  /* V8PLUS requires V9, makes no sense in 64 bit mode.  */
803
  if (! TARGET_V9 || TARGET_ARCH64)
804
    target_flags &= ~MASK_V8PLUS;
805
 
806
  /* Don't use stack biasing in 32 bit mode.  */
807
  if (TARGET_ARCH32)
808
    target_flags &= ~MASK_STACK_BIAS;
809
 
810
  /* Supply a default value for align_functions.  */
811
  if (align_functions == 0
812
      && (sparc_cpu == PROCESSOR_ULTRASPARC
813
          || sparc_cpu == PROCESSOR_ULTRASPARC3
814
          || sparc_cpu == PROCESSOR_NIAGARA
815
          || sparc_cpu == PROCESSOR_NIAGARA2))
816
    align_functions = 32;
817
 
818
  /* Validate PCC_STRUCT_RETURN.  */
819
  if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
820
    flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
821
 
822
  /* Only use .uaxword when compiling for a 64-bit target.  */
823
  if (!TARGET_ARCH64)
824
    targetm.asm_out.unaligned_op.di = NULL;
825
 
826
  /* Do various machine dependent initializations.  */
827
  sparc_init_modes ();
828
 
829
  /* Acquire unique alias sets for our private stuff.  */
830
  sparc_sr_alias_set = new_alias_set ();
831
  struct_value_alias_set = new_alias_set ();
832
 
833
  /* Set up function hooks.  */
834
  init_machine_status = sparc_init_machine_status;
835
 
836
  switch (sparc_cpu)
837
    {
838
    case PROCESSOR_V7:
839
    case PROCESSOR_CYPRESS:
840
      sparc_costs = &cypress_costs;
841
      break;
842
    case PROCESSOR_V8:
843
    case PROCESSOR_SPARCLITE:
844
    case PROCESSOR_SUPERSPARC:
845
      sparc_costs = &supersparc_costs;
846
      break;
847
    case PROCESSOR_F930:
848
    case PROCESSOR_F934:
849
    case PROCESSOR_HYPERSPARC:
850
    case PROCESSOR_SPARCLITE86X:
851
      sparc_costs = &hypersparc_costs;
852
      break;
853
    case PROCESSOR_SPARCLET:
854
    case PROCESSOR_TSC701:
855
      sparc_costs = &sparclet_costs;
856
      break;
857
    case PROCESSOR_V9:
858
    case PROCESSOR_ULTRASPARC:
859
      sparc_costs = &ultrasparc_costs;
860
      break;
861
    case PROCESSOR_ULTRASPARC3:
862
      sparc_costs = &ultrasparc3_costs;
863
      break;
864
    case PROCESSOR_NIAGARA:
865
      sparc_costs = &niagara_costs;
866
      break;
867
    case PROCESSOR_NIAGARA2:
868
      sparc_costs = &niagara2_costs;
869
      break;
870
    };
871
 
872
#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
873
  if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
874
    target_flags |= MASK_LONG_DOUBLE_128;
875
#endif
876
 
877
  if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
878
    set_param_value ("simultaneous-prefetches",
879
                     ((sparc_cpu == PROCESSOR_ULTRASPARC
880
                       || sparc_cpu == PROCESSOR_NIAGARA
881
                       || sparc_cpu == PROCESSOR_NIAGARA2)
882
                      ? 2
883
                      : (sparc_cpu == PROCESSOR_ULTRASPARC3
884
                         ? 8 : 3)));
885
  if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
886
    set_param_value ("l1-cache-line-size",
887
                     ((sparc_cpu == PROCESSOR_ULTRASPARC
888
                       || sparc_cpu == PROCESSOR_ULTRASPARC3
889
                       || sparc_cpu == PROCESSOR_NIAGARA
890
                       || sparc_cpu == PROCESSOR_NIAGARA2)
891
                      ? 64 : 32));
892
}
893
 
894
/* Miscellaneous utilities.  */
895
 
896
/* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
897
   or branch on register contents instructions.  */
898
 
899
int
900
v9_regcmp_p (enum rtx_code code)
901
{
902
  return (code == EQ || code == NE || code == GE || code == LT
903
          || code == LE || code == GT);
904
}
905
 
906
/* Nonzero if OP is a floating point constant which can
907
   be loaded into an integer register using a single
908
   sethi instruction.  */
909
 
910
int
911
fp_sethi_p (rtx op)
912
{
913
  if (GET_CODE (op) == CONST_DOUBLE)
914
    {
915
      REAL_VALUE_TYPE r;
916
      long i;
917
 
918
      REAL_VALUE_FROM_CONST_DOUBLE (r, op);
919
      REAL_VALUE_TO_TARGET_SINGLE (r, i);
920
      return !SPARC_SIMM13_P (i) && SPARC_SETHI_P (i);
921
    }
922
 
923
  return 0;
924
}
925
 
926
/* Nonzero if OP is a floating point constant which can
927
   be loaded into an integer register using a single
928
   mov instruction.  */
929
 
930
int
931
fp_mov_p (rtx op)
932
{
933
  if (GET_CODE (op) == CONST_DOUBLE)
934
    {
935
      REAL_VALUE_TYPE r;
936
      long i;
937
 
938
      REAL_VALUE_FROM_CONST_DOUBLE (r, op);
939
      REAL_VALUE_TO_TARGET_SINGLE (r, i);
940
      return SPARC_SIMM13_P (i);
941
    }
942
 
943
  return 0;
944
}
945
 
946
/* Nonzero if OP is a floating point constant which can
947
   be loaded into an integer register using a high/losum
948
   instruction sequence.  */
949
 
950
int
951
fp_high_losum_p (rtx op)
952
{
953
  /* The constraints calling this should only be in
954
     SFmode move insns, so any constant which cannot
955
     be moved using a single insn will do.  */
956
  if (GET_CODE (op) == CONST_DOUBLE)
957
    {
958
      REAL_VALUE_TYPE r;
959
      long i;
960
 
961
      REAL_VALUE_FROM_CONST_DOUBLE (r, op);
962
      REAL_VALUE_TO_TARGET_SINGLE (r, i);
963
      return !SPARC_SIMM13_P (i) && !SPARC_SETHI_P (i);
964
    }
965
 
966
  return 0;
967
}
968
 
969
/* Expand a move instruction.  Return true if all work is done.  */
970
 
971
bool
972
sparc_expand_move (enum machine_mode mode, rtx *operands)
973
{
974
  /* Handle sets of MEM first.  */
975
  if (GET_CODE (operands[0]) == MEM)
976
    {
977
      /* 0 is a register (or a pair of registers) on SPARC.  */
978
      if (register_or_zero_operand (operands[1], mode))
979
        return false;
980
 
981
      if (!reload_in_progress)
982
        {
983
          operands[0] = validize_mem (operands[0]);
984
          operands[1] = force_reg (mode, operands[1]);
985
        }
986
    }
987
 
988
  /* Fixup TLS cases.  */
989
  if (TARGET_HAVE_TLS
990
      && CONSTANT_P (operands[1])
991
      && sparc_tls_referenced_p (operands [1]))
992
    {
993
      operands[1] = legitimize_tls_address (operands[1]);
994
      return false;
995
    }
996
 
997
  /* Fixup PIC cases.  */
998
  if (flag_pic && CONSTANT_P (operands[1]))
999
    {
1000
      if (pic_address_needs_scratch (operands[1]))
1001
        operands[1] = legitimize_pic_address (operands[1], NULL_RTX);
1002
 
1003
      /* VxWorks does not impose a fixed gap between segments; the run-time
1004
         gap can be different from the object-file gap.  We therefore can't
1005
         assume X - _GLOBAL_OFFSET_TABLE_ is a link-time constant unless we
1006
         are absolutely sure that X is in the same segment as the GOT.
1007
         Unfortunately, the flexibility of linker scripts means that we
1008
         can't be sure of that in general, so assume that _G_O_T_-relative
1009
         accesses are never valid on VxWorks.  */
1010
      if (GET_CODE (operands[1]) == LABEL_REF && !TARGET_VXWORKS_RTP)
1011
        {
1012
          if (mode == SImode)
1013
            {
1014
              emit_insn (gen_movsi_pic_label_ref (operands[0], operands[1]));
1015
              return true;
1016
            }
1017
 
1018
          if (mode == DImode)
1019
            {
1020
              gcc_assert (TARGET_ARCH64);
1021
              emit_insn (gen_movdi_pic_label_ref (operands[0], operands[1]));
1022
              return true;
1023
            }
1024
        }
1025
 
1026
      if (symbolic_operand (operands[1], mode))
1027
        {
1028
          operands[1] = legitimize_pic_address (operands[1],
1029
                                                reload_in_progress
1030
                                                ? operands[0] : NULL_RTX);
1031
          return false;
1032
        }
1033
    }
1034
 
1035
  /* If we are trying to toss an integer constant into FP registers,
1036
     or loading a FP or vector constant, force it into memory.  */
1037
  if (CONSTANT_P (operands[1])
1038
      && REG_P (operands[0])
1039
      && (SPARC_FP_REG_P (REGNO (operands[0]))
1040
          || SCALAR_FLOAT_MODE_P (mode)
1041
          || VECTOR_MODE_P (mode)))
1042
    {
1043
      /* emit_group_store will send such bogosity to us when it is
1044
         not storing directly into memory.  So fix this up to avoid
1045
         crashes in output_constant_pool.  */
1046
      if (operands [1] == const0_rtx)
1047
        operands[1] = CONST0_RTX (mode);
1048
 
1049
      /* We can clear FP registers if TARGET_VIS, and always other regs.  */
1050
      if ((TARGET_VIS || REGNO (operands[0]) < SPARC_FIRST_FP_REG)
1051
          && const_zero_operand (operands[1], mode))
1052
        return false;
1053
 
1054
      if (REGNO (operands[0]) < SPARC_FIRST_FP_REG
1055
          /* We are able to build any SF constant in integer registers
1056
             with at most 2 instructions.  */
1057
          && (mode == SFmode
1058
              /* And any DF constant in integer registers.  */
1059
              || (mode == DFmode
1060
                  && (reload_completed || reload_in_progress))))
1061
        return false;
1062
 
1063
      operands[1] = force_const_mem (mode, operands[1]);
1064
      if (!reload_in_progress)
1065
        operands[1] = validize_mem (operands[1]);
1066
      return false;
1067
    }
1068
 
1069
  /* Accept non-constants and valid constants unmodified.  */
1070
  if (!CONSTANT_P (operands[1])
1071
      || GET_CODE (operands[1]) == HIGH
1072
      || input_operand (operands[1], mode))
1073
    return false;
1074
 
1075
  switch (mode)
1076
    {
1077
    case QImode:
1078
      /* All QImode constants require only one insn, so proceed.  */
1079
      break;
1080
 
1081
    case HImode:
1082
    case SImode:
1083
      sparc_emit_set_const32 (operands[0], operands[1]);
1084
      return true;
1085
 
1086
    case DImode:
1087
      /* input_operand should have filtered out 32-bit mode.  */
1088
      sparc_emit_set_const64 (operands[0], operands[1]);
1089
      return true;
1090
 
1091
    default:
1092
      gcc_unreachable ();
1093
    }
1094
 
1095
  return false;
1096
}
1097
 
1098
/* Load OP1, a 32-bit constant, into OP0, a register.
1099
   We know it can't be done in one insn when we get
1100
   here, the move expander guarantees this.  */
1101
 
1102
void
1103
sparc_emit_set_const32 (rtx op0, rtx op1)
1104
{
1105
  enum machine_mode mode = GET_MODE (op0);
1106
  rtx temp;
1107
 
1108
  if (reload_in_progress || reload_completed)
1109
    temp = op0;
1110
  else
1111
    temp = gen_reg_rtx (mode);
1112
 
1113
  if (GET_CODE (op1) == CONST_INT)
1114
    {
1115
      gcc_assert (!small_int_operand (op1, mode)
1116
                  && !const_high_operand (op1, mode));
1117
 
1118
      /* Emit them as real moves instead of a HIGH/LO_SUM,
1119
         this way CSE can see everything and reuse intermediate
1120
         values if it wants.  */
1121
      emit_insn (gen_rtx_SET (VOIDmode, temp,
1122
                              GEN_INT (INTVAL (op1)
1123
                                & ~(HOST_WIDE_INT)0x3ff)));
1124
 
1125
      emit_insn (gen_rtx_SET (VOIDmode,
1126
                              op0,
1127
                              gen_rtx_IOR (mode, temp,
1128
                                           GEN_INT (INTVAL (op1) & 0x3ff))));
1129
    }
1130
  else
1131
    {
1132
      /* A symbol, emit in the traditional way.  */
1133
      emit_insn (gen_rtx_SET (VOIDmode, temp,
1134
                              gen_rtx_HIGH (mode, op1)));
1135
      emit_insn (gen_rtx_SET (VOIDmode,
1136
                              op0, gen_rtx_LO_SUM (mode, temp, op1)));
1137
    }
1138
}
1139
 
1140
/* Load OP1, a symbolic 64-bit constant, into OP0, a DImode register.
1141
   If TEMP is nonzero, we are forbidden to use any other scratch
1142
   registers.  Otherwise, we are allowed to generate them as needed.
1143
 
1144
   Note that TEMP may have TImode if the code model is TARGET_CM_MEDANY
1145
   or TARGET_CM_EMBMEDANY (see the reload_indi and reload_outdi patterns).  */
1146
 
1147
void
1148
sparc_emit_set_symbolic_const64 (rtx op0, rtx op1, rtx temp)
1149
{
1150
  rtx temp1, temp2, temp3, temp4, temp5;
1151
  rtx ti_temp = 0;
1152
 
1153
  if (temp && GET_MODE (temp) == TImode)
1154
    {
1155
      ti_temp = temp;
1156
      temp = gen_rtx_REG (DImode, REGNO (temp));
1157
    }
1158
 
1159
  /* SPARC-V9 code-model support.  */
1160
  switch (sparc_cmodel)
1161
    {
1162
    case CM_MEDLOW:
1163
      /* The range spanned by all instructions in the object is less
1164
         than 2^31 bytes (2GB) and the distance from any instruction
1165
         to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1166
         than 2^31 bytes (2GB).
1167
 
1168
         The executable must be in the low 4TB of the virtual address
1169
         space.
1170
 
1171
         sethi  %hi(symbol), %temp1
1172
         or     %temp1, %lo(symbol), %reg  */
1173
      if (temp)
1174
        temp1 = temp;  /* op0 is allowed.  */
1175
      else
1176
        temp1 = gen_reg_rtx (DImode);
1177
 
1178
      emit_insn (gen_rtx_SET (VOIDmode, temp1, gen_rtx_HIGH (DImode, op1)));
1179
      emit_insn (gen_rtx_SET (VOIDmode, op0, gen_rtx_LO_SUM (DImode, temp1, op1)));
1180
      break;
1181
 
1182
    case CM_MEDMID:
1183
      /* The range spanned by all instructions in the object is less
1184
         than 2^31 bytes (2GB) and the distance from any instruction
1185
         to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1186
         than 2^31 bytes (2GB).
1187
 
1188
         The executable must be in the low 16TB of the virtual address
1189
         space.
1190
 
1191
         sethi  %h44(symbol), %temp1
1192
         or     %temp1, %m44(symbol), %temp2
1193
         sllx   %temp2, 12, %temp3
1194
         or     %temp3, %l44(symbol), %reg  */
1195
      if (temp)
1196
        {
1197
          temp1 = op0;
1198
          temp2 = op0;
1199
          temp3 = temp;  /* op0 is allowed.  */
1200
        }
1201
      else
1202
        {
1203
          temp1 = gen_reg_rtx (DImode);
1204
          temp2 = gen_reg_rtx (DImode);
1205
          temp3 = gen_reg_rtx (DImode);
1206
        }
1207
 
1208
      emit_insn (gen_seth44 (temp1, op1));
1209
      emit_insn (gen_setm44 (temp2, temp1, op1));
1210
      emit_insn (gen_rtx_SET (VOIDmode, temp3,
1211
                              gen_rtx_ASHIFT (DImode, temp2, GEN_INT (12))));
1212
      emit_insn (gen_setl44 (op0, temp3, op1));
1213
      break;
1214
 
1215
    case CM_MEDANY:
1216
      /* The range spanned by all instructions in the object is less
1217
         than 2^31 bytes (2GB) and the distance from any instruction
1218
         to the location of the label _GLOBAL_OFFSET_TABLE_ is less
1219
         than 2^31 bytes (2GB).
1220
 
1221
         The executable can be placed anywhere in the virtual address
1222
         space.
1223
 
1224
         sethi  %hh(symbol), %temp1
1225
         sethi  %lm(symbol), %temp2
1226
         or     %temp1, %hm(symbol), %temp3
1227
         sllx   %temp3, 32, %temp4
1228
         or     %temp4, %temp2, %temp5
1229
         or     %temp5, %lo(symbol), %reg  */
1230
      if (temp)
1231
        {
1232
          /* It is possible that one of the registers we got for operands[2]
1233
             might coincide with that of operands[0] (which is why we made
1234
             it TImode).  Pick the other one to use as our scratch.  */
1235
          if (rtx_equal_p (temp, op0))
1236
            {
1237
              gcc_assert (ti_temp);
1238
              temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1239
            }
1240
          temp1 = op0;
1241
          temp2 = temp;  /* op0 is _not_ allowed, see above.  */
1242
          temp3 = op0;
1243
          temp4 = op0;
1244
          temp5 = op0;
1245
        }
1246
      else
1247
        {
1248
          temp1 = gen_reg_rtx (DImode);
1249
          temp2 = gen_reg_rtx (DImode);
1250
          temp3 = gen_reg_rtx (DImode);
1251
          temp4 = gen_reg_rtx (DImode);
1252
          temp5 = gen_reg_rtx (DImode);
1253
        }
1254
 
1255
      emit_insn (gen_sethh (temp1, op1));
1256
      emit_insn (gen_setlm (temp2, op1));
1257
      emit_insn (gen_sethm (temp3, temp1, op1));
1258
      emit_insn (gen_rtx_SET (VOIDmode, temp4,
1259
                              gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1260
      emit_insn (gen_rtx_SET (VOIDmode, temp5,
1261
                              gen_rtx_PLUS (DImode, temp4, temp2)));
1262
      emit_insn (gen_setlo (op0, temp5, op1));
1263
      break;
1264
 
1265
    case CM_EMBMEDANY:
1266
      /* Old old old backwards compatibility kruft here.
1267
         Essentially it is MEDLOW with a fixed 64-bit
1268
         virtual base added to all data segment addresses.
1269
         Text-segment stuff is computed like MEDANY, we can't
1270
         reuse the code above because the relocation knobs
1271
         look different.
1272
 
1273
         Data segment:  sethi   %hi(symbol), %temp1
1274
                        add     %temp1, EMBMEDANY_BASE_REG, %temp2
1275
                        or      %temp2, %lo(symbol), %reg  */
1276
      if (data_segment_operand (op1, GET_MODE (op1)))
1277
        {
1278
          if (temp)
1279
            {
1280
              temp1 = temp;  /* op0 is allowed.  */
1281
              temp2 = op0;
1282
            }
1283
          else
1284
            {
1285
              temp1 = gen_reg_rtx (DImode);
1286
              temp2 = gen_reg_rtx (DImode);
1287
            }
1288
 
1289
          emit_insn (gen_embmedany_sethi (temp1, op1));
1290
          emit_insn (gen_embmedany_brsum (temp2, temp1));
1291
          emit_insn (gen_embmedany_losum (op0, temp2, op1));
1292
        }
1293
 
1294
      /* Text segment:  sethi   %uhi(symbol), %temp1
1295
                        sethi   %hi(symbol), %temp2
1296
                        or      %temp1, %ulo(symbol), %temp3
1297
                        sllx    %temp3, 32, %temp4
1298
                        or      %temp4, %temp2, %temp5
1299
                        or      %temp5, %lo(symbol), %reg  */
1300
      else
1301
        {
1302
          if (temp)
1303
            {
1304
              /* It is possible that one of the registers we got for operands[2]
1305
                 might coincide with that of operands[0] (which is why we made
1306
                 it TImode).  Pick the other one to use as our scratch.  */
1307
              if (rtx_equal_p (temp, op0))
1308
                {
1309
                  gcc_assert (ti_temp);
1310
                  temp = gen_rtx_REG (DImode, REGNO (temp) + 1);
1311
                }
1312
              temp1 = op0;
1313
              temp2 = temp;  /* op0 is _not_ allowed, see above.  */
1314
              temp3 = op0;
1315
              temp4 = op0;
1316
              temp5 = op0;
1317
            }
1318
          else
1319
            {
1320
              temp1 = gen_reg_rtx (DImode);
1321
              temp2 = gen_reg_rtx (DImode);
1322
              temp3 = gen_reg_rtx (DImode);
1323
              temp4 = gen_reg_rtx (DImode);
1324
              temp5 = gen_reg_rtx (DImode);
1325
            }
1326
 
1327
          emit_insn (gen_embmedany_textuhi (temp1, op1));
1328
          emit_insn (gen_embmedany_texthi  (temp2, op1));
1329
          emit_insn (gen_embmedany_textulo (temp3, temp1, op1));
1330
          emit_insn (gen_rtx_SET (VOIDmode, temp4,
1331
                                  gen_rtx_ASHIFT (DImode, temp3, GEN_INT (32))));
1332
          emit_insn (gen_rtx_SET (VOIDmode, temp5,
1333
                                  gen_rtx_PLUS (DImode, temp4, temp2)));
1334
          emit_insn (gen_embmedany_textlo  (op0, temp5, op1));
1335
        }
1336
      break;
1337
 
1338
    default:
1339
      gcc_unreachable ();
1340
    }
1341
}
1342
 
1343
#if HOST_BITS_PER_WIDE_INT == 32
1344
void
1345
sparc_emit_set_const64 (rtx op0 ATTRIBUTE_UNUSED, rtx op1 ATTRIBUTE_UNUSED)
1346
{
1347
  gcc_unreachable ();
1348
}
1349
#else
1350
/* These avoid problems when cross compiling.  If we do not
1351
   go through all this hair then the optimizer will see
1352
   invalid REG_EQUAL notes or in some cases none at all.  */
1353
static rtx gen_safe_HIGH64 (rtx, HOST_WIDE_INT);
1354
static rtx gen_safe_SET64 (rtx, HOST_WIDE_INT);
1355
static rtx gen_safe_OR64 (rtx, HOST_WIDE_INT);
1356
static rtx gen_safe_XOR64 (rtx, HOST_WIDE_INT);
1357
 
1358
/* The optimizer is not to assume anything about exactly
1359
   which bits are set for a HIGH, they are unspecified.
1360
   Unfortunately this leads to many missed optimizations
1361
   during CSE.  We mask out the non-HIGH bits, and matches
1362
   a plain movdi, to alleviate this problem.  */
1363
static rtx
1364
gen_safe_HIGH64 (rtx dest, HOST_WIDE_INT val)
1365
{
1366
  return gen_rtx_SET (VOIDmode, dest, GEN_INT (val & ~(HOST_WIDE_INT)0x3ff));
1367
}
1368
 
1369
static rtx
1370
gen_safe_SET64 (rtx dest, HOST_WIDE_INT val)
1371
{
1372
  return gen_rtx_SET (VOIDmode, dest, GEN_INT (val));
1373
}
1374
 
1375
static rtx
1376
gen_safe_OR64 (rtx src, HOST_WIDE_INT val)
1377
{
1378
  return gen_rtx_IOR (DImode, src, GEN_INT (val));
1379
}
1380
 
1381
static rtx
1382
gen_safe_XOR64 (rtx src, HOST_WIDE_INT val)
1383
{
1384
  return gen_rtx_XOR (DImode, src, GEN_INT (val));
1385
}
1386
 
1387
/* Worker routines for 64-bit constant formation on arch64.
1388
   One of the key things to be doing in these emissions is
1389
   to create as many temp REGs as possible.  This makes it
1390
   possible for half-built constants to be used later when
1391
   such values are similar to something required later on.
1392
   Without doing this, the optimizer cannot see such
1393
   opportunities.  */
1394
 
1395
static void sparc_emit_set_const64_quick1 (rtx, rtx,
1396
                                           unsigned HOST_WIDE_INT, int);
1397
 
1398
static void
1399
sparc_emit_set_const64_quick1 (rtx op0, rtx temp,
1400
                               unsigned HOST_WIDE_INT low_bits, int is_neg)
1401
{
1402
  unsigned HOST_WIDE_INT high_bits;
1403
 
1404
  if (is_neg)
1405
    high_bits = (~low_bits) & 0xffffffff;
1406
  else
1407
    high_bits = low_bits;
1408
 
1409
  emit_insn (gen_safe_HIGH64 (temp, high_bits));
1410
  if (!is_neg)
1411
    {
1412
      emit_insn (gen_rtx_SET (VOIDmode, op0,
1413
                              gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1414
    }
1415
  else
1416
    {
1417
      /* If we are XOR'ing with -1, then we should emit a one's complement
1418
         instead.  This way the combiner will notice logical operations
1419
         such as ANDN later on and substitute.  */
1420
      if ((low_bits & 0x3ff) == 0x3ff)
1421
        {
1422
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1423
                                  gen_rtx_NOT (DImode, temp)));
1424
        }
1425
      else
1426
        {
1427
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1428
                                  gen_safe_XOR64 (temp,
1429
                                                  (-(HOST_WIDE_INT)0x400
1430
                                                   | (low_bits & 0x3ff)))));
1431
        }
1432
    }
1433
}
1434
 
1435
static void sparc_emit_set_const64_quick2 (rtx, rtx, unsigned HOST_WIDE_INT,
1436
                                           unsigned HOST_WIDE_INT, int);
1437
 
1438
static void
1439
sparc_emit_set_const64_quick2 (rtx op0, rtx temp,
1440
                               unsigned HOST_WIDE_INT high_bits,
1441
                               unsigned HOST_WIDE_INT low_immediate,
1442
                               int shift_count)
1443
{
1444
  rtx temp2 = op0;
1445
 
1446
  if ((high_bits & 0xfffffc00) != 0)
1447
    {
1448
      emit_insn (gen_safe_HIGH64 (temp, high_bits));
1449
      if ((high_bits & ~0xfffffc00) != 0)
1450
        emit_insn (gen_rtx_SET (VOIDmode, op0,
1451
                                gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1452
      else
1453
        temp2 = temp;
1454
    }
1455
  else
1456
    {
1457
      emit_insn (gen_safe_SET64 (temp, high_bits));
1458
      temp2 = temp;
1459
    }
1460
 
1461
  /* Now shift it up into place.  */
1462
  emit_insn (gen_rtx_SET (VOIDmode, op0,
1463
                          gen_rtx_ASHIFT (DImode, temp2,
1464
                                          GEN_INT (shift_count))));
1465
 
1466
  /* If there is a low immediate part piece, finish up by
1467
     putting that in as well.  */
1468
  if (low_immediate != 0)
1469
    emit_insn (gen_rtx_SET (VOIDmode, op0,
1470
                            gen_safe_OR64 (op0, low_immediate)));
1471
}
1472
 
1473
static void sparc_emit_set_const64_longway (rtx, rtx, unsigned HOST_WIDE_INT,
1474
                                            unsigned HOST_WIDE_INT);
1475
 
1476
/* Full 64-bit constant decomposition.  Even though this is the
1477
   'worst' case, we still optimize a few things away.  */
1478
static void
1479
sparc_emit_set_const64_longway (rtx op0, rtx temp,
1480
                                unsigned HOST_WIDE_INT high_bits,
1481
                                unsigned HOST_WIDE_INT low_bits)
1482
{
1483
  rtx sub_temp;
1484
 
1485
  if (reload_in_progress || reload_completed)
1486
    sub_temp = op0;
1487
  else
1488
    sub_temp = gen_reg_rtx (DImode);
1489
 
1490
  if ((high_bits & 0xfffffc00) != 0)
1491
    {
1492
      emit_insn (gen_safe_HIGH64 (temp, high_bits));
1493
      if ((high_bits & ~0xfffffc00) != 0)
1494
        emit_insn (gen_rtx_SET (VOIDmode,
1495
                                sub_temp,
1496
                                gen_safe_OR64 (temp, (high_bits & 0x3ff))));
1497
      else
1498
        sub_temp = temp;
1499
    }
1500
  else
1501
    {
1502
      emit_insn (gen_safe_SET64 (temp, high_bits));
1503
      sub_temp = temp;
1504
    }
1505
 
1506
  if (!reload_in_progress && !reload_completed)
1507
    {
1508
      rtx temp2 = gen_reg_rtx (DImode);
1509
      rtx temp3 = gen_reg_rtx (DImode);
1510
      rtx temp4 = gen_reg_rtx (DImode);
1511
 
1512
      emit_insn (gen_rtx_SET (VOIDmode, temp4,
1513
                              gen_rtx_ASHIFT (DImode, sub_temp,
1514
                                              GEN_INT (32))));
1515
 
1516
      emit_insn (gen_safe_HIGH64 (temp2, low_bits));
1517
      if ((low_bits & ~0xfffffc00) != 0)
1518
        {
1519
          emit_insn (gen_rtx_SET (VOIDmode, temp3,
1520
                                  gen_safe_OR64 (temp2, (low_bits & 0x3ff))));
1521
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1522
                                  gen_rtx_PLUS (DImode, temp4, temp3)));
1523
        }
1524
      else
1525
        {
1526
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1527
                                  gen_rtx_PLUS (DImode, temp4, temp2)));
1528
        }
1529
    }
1530
  else
1531
    {
1532
      rtx low1 = GEN_INT ((low_bits >> (32 - 12))          & 0xfff);
1533
      rtx low2 = GEN_INT ((low_bits >> (32 - 12 - 12))     & 0xfff);
1534
      rtx low3 = GEN_INT ((low_bits >> (32 - 12 - 12 - 8)) & 0x0ff);
1535
      int to_shift = 12;
1536
 
1537
      /* We are in the middle of reload, so this is really
1538
         painful.  However we do still make an attempt to
1539
         avoid emitting truly stupid code.  */
1540
      if (low1 != const0_rtx)
1541
        {
1542
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1543
                                  gen_rtx_ASHIFT (DImode, sub_temp,
1544
                                                  GEN_INT (to_shift))));
1545
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1546
                                  gen_rtx_IOR (DImode, op0, low1)));
1547
          sub_temp = op0;
1548
          to_shift = 12;
1549
        }
1550
      else
1551
        {
1552
          to_shift += 12;
1553
        }
1554
      if (low2 != const0_rtx)
1555
        {
1556
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1557
                                  gen_rtx_ASHIFT (DImode, sub_temp,
1558
                                                  GEN_INT (to_shift))));
1559
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1560
                                  gen_rtx_IOR (DImode, op0, low2)));
1561
          sub_temp = op0;
1562
          to_shift = 8;
1563
        }
1564
      else
1565
        {
1566
          to_shift += 8;
1567
        }
1568
      emit_insn (gen_rtx_SET (VOIDmode, op0,
1569
                              gen_rtx_ASHIFT (DImode, sub_temp,
1570
                                              GEN_INT (to_shift))));
1571
      if (low3 != const0_rtx)
1572
        emit_insn (gen_rtx_SET (VOIDmode, op0,
1573
                                gen_rtx_IOR (DImode, op0, low3)));
1574
      /* phew...  */
1575
    }
1576
}
1577
 
1578
/* Analyze a 64-bit constant for certain properties.  */
1579
static void analyze_64bit_constant (unsigned HOST_WIDE_INT,
1580
                                    unsigned HOST_WIDE_INT,
1581
                                    int *, int *, int *);
1582
 
1583
static void
1584
analyze_64bit_constant (unsigned HOST_WIDE_INT high_bits,
1585
                        unsigned HOST_WIDE_INT low_bits,
1586
                        int *hbsp, int *lbsp, int *abbasp)
1587
{
1588
  int lowest_bit_set, highest_bit_set, all_bits_between_are_set;
1589
  int i;
1590
 
1591
  lowest_bit_set = highest_bit_set = -1;
1592
  i = 0;
1593
  do
1594
    {
1595
      if ((lowest_bit_set == -1)
1596
          && ((low_bits >> i) & 1))
1597
        lowest_bit_set = i;
1598
      if ((highest_bit_set == -1)
1599
          && ((high_bits >> (32 - i - 1)) & 1))
1600
        highest_bit_set = (64 - i - 1);
1601
    }
1602
  while (++i < 32
1603
         && ((highest_bit_set == -1)
1604
             || (lowest_bit_set == -1)));
1605
  if (i == 32)
1606
    {
1607
      i = 0;
1608
      do
1609
        {
1610
          if ((lowest_bit_set == -1)
1611
              && ((high_bits >> i) & 1))
1612
            lowest_bit_set = i + 32;
1613
          if ((highest_bit_set == -1)
1614
              && ((low_bits >> (32 - i - 1)) & 1))
1615
            highest_bit_set = 32 - i - 1;
1616
        }
1617
      while (++i < 32
1618
             && ((highest_bit_set == -1)
1619
                 || (lowest_bit_set == -1)));
1620
    }
1621
  /* If there are no bits set this should have gone out
1622
     as one instruction!  */
1623
  gcc_assert (lowest_bit_set != -1 && highest_bit_set != -1);
1624
  all_bits_between_are_set = 1;
1625
  for (i = lowest_bit_set; i <= highest_bit_set; i++)
1626
    {
1627
      if (i < 32)
1628
        {
1629
          if ((low_bits & (1 << i)) != 0)
1630
            continue;
1631
        }
1632
      else
1633
        {
1634
          if ((high_bits & (1 << (i - 32))) != 0)
1635
            continue;
1636
        }
1637
      all_bits_between_are_set = 0;
1638
      break;
1639
    }
1640
  *hbsp = highest_bit_set;
1641
  *lbsp = lowest_bit_set;
1642
  *abbasp = all_bits_between_are_set;
1643
}
1644
 
1645
static int const64_is_2insns (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT);
1646
 
1647
static int
1648
const64_is_2insns (unsigned HOST_WIDE_INT high_bits,
1649
                   unsigned HOST_WIDE_INT low_bits)
1650
{
1651
  int highest_bit_set, lowest_bit_set, all_bits_between_are_set;
1652
 
1653
  if (high_bits == 0
1654
      || high_bits == 0xffffffff)
1655
    return 1;
1656
 
1657
  analyze_64bit_constant (high_bits, low_bits,
1658
                          &highest_bit_set, &lowest_bit_set,
1659
                          &all_bits_between_are_set);
1660
 
1661
  if ((highest_bit_set == 63
1662
       || lowest_bit_set == 0)
1663
      && all_bits_between_are_set != 0)
1664
    return 1;
1665
 
1666
  if ((highest_bit_set - lowest_bit_set) < 21)
1667
    return 1;
1668
 
1669
  return 0;
1670
}
1671
 
1672
static unsigned HOST_WIDE_INT create_simple_focus_bits (unsigned HOST_WIDE_INT,
1673
                                                        unsigned HOST_WIDE_INT,
1674
                                                        int, int);
1675
 
1676
static unsigned HOST_WIDE_INT
1677
create_simple_focus_bits (unsigned HOST_WIDE_INT high_bits,
1678
                          unsigned HOST_WIDE_INT low_bits,
1679
                          int lowest_bit_set, int shift)
1680
{
1681
  HOST_WIDE_INT hi, lo;
1682
 
1683
  if (lowest_bit_set < 32)
1684
    {
1685
      lo = (low_bits >> lowest_bit_set) << shift;
1686
      hi = ((high_bits << (32 - lowest_bit_set)) << shift);
1687
    }
1688
  else
1689
    {
1690
      lo = 0;
1691
      hi = ((high_bits >> (lowest_bit_set - 32)) << shift);
1692
    }
1693
  gcc_assert (! (hi & lo));
1694
  return (hi | lo);
1695
}
1696
 
1697
/* Here we are sure to be arch64 and this is an integer constant
1698
   being loaded into a register.  Emit the most efficient
1699
   insn sequence possible.  Detection of all the 1-insn cases
1700
   has been done already.  */
1701
void
1702
sparc_emit_set_const64 (rtx op0, rtx op1)
1703
{
1704
  unsigned HOST_WIDE_INT high_bits, low_bits;
1705
  int lowest_bit_set, highest_bit_set;
1706
  int all_bits_between_are_set;
1707
  rtx temp = 0;
1708
 
1709
  /* Sanity check that we know what we are working with.  */
1710
  gcc_assert (TARGET_ARCH64
1711
              && (GET_CODE (op0) == SUBREG
1712
                  || (REG_P (op0) && ! SPARC_FP_REG_P (REGNO (op0)))));
1713
 
1714
  if (reload_in_progress || reload_completed)
1715
    temp = op0;
1716
 
1717
  if (GET_CODE (op1) != CONST_INT)
1718
    {
1719
      sparc_emit_set_symbolic_const64 (op0, op1, temp);
1720
      return;
1721
    }
1722
 
1723
  if (! temp)
1724
    temp = gen_reg_rtx (DImode);
1725
 
1726
  high_bits = ((INTVAL (op1) >> 32) & 0xffffffff);
1727
  low_bits = (INTVAL (op1) & 0xffffffff);
1728
 
1729
  /* low_bits   bits 0  --> 31
1730
     high_bits  bits 32 --> 63  */
1731
 
1732
  analyze_64bit_constant (high_bits, low_bits,
1733
                          &highest_bit_set, &lowest_bit_set,
1734
                          &all_bits_between_are_set);
1735
 
1736
  /* First try for a 2-insn sequence.  */
1737
 
1738
  /* These situations are preferred because the optimizer can
1739
   * do more things with them:
1740
   * 1) mov     -1, %reg
1741
   *    sllx    %reg, shift, %reg
1742
   * 2) mov     -1, %reg
1743
   *    srlx    %reg, shift, %reg
1744
   * 3) mov     some_small_const, %reg
1745
   *    sllx    %reg, shift, %reg
1746
   */
1747
  if (((highest_bit_set == 63
1748
        || lowest_bit_set == 0)
1749
       && all_bits_between_are_set != 0)
1750
      || ((highest_bit_set - lowest_bit_set) < 12))
1751
    {
1752
      HOST_WIDE_INT the_const = -1;
1753
      int shift = lowest_bit_set;
1754
 
1755
      if ((highest_bit_set != 63
1756
           && lowest_bit_set != 0)
1757
          || all_bits_between_are_set == 0)
1758
        {
1759
          the_const =
1760
            create_simple_focus_bits (high_bits, low_bits,
1761
                                      lowest_bit_set, 0);
1762
        }
1763
      else if (lowest_bit_set == 0)
1764
        shift = -(63 - highest_bit_set);
1765
 
1766
      gcc_assert (SPARC_SIMM13_P (the_const));
1767
      gcc_assert (shift != 0);
1768
 
1769
      emit_insn (gen_safe_SET64 (temp, the_const));
1770
      if (shift > 0)
1771
        emit_insn (gen_rtx_SET (VOIDmode,
1772
                                op0,
1773
                                gen_rtx_ASHIFT (DImode,
1774
                                                temp,
1775
                                                GEN_INT (shift))));
1776
      else if (shift < 0)
1777
        emit_insn (gen_rtx_SET (VOIDmode,
1778
                                op0,
1779
                                gen_rtx_LSHIFTRT (DImode,
1780
                                                  temp,
1781
                                                  GEN_INT (-shift))));
1782
      return;
1783
    }
1784
 
1785
  /* Now a range of 22 or less bits set somewhere.
1786
   * 1) sethi   %hi(focus_bits), %reg
1787
   *    sllx    %reg, shift, %reg
1788
   * 2) sethi   %hi(focus_bits), %reg
1789
   *    srlx    %reg, shift, %reg
1790
   */
1791
  if ((highest_bit_set - lowest_bit_set) < 21)
1792
    {
1793
      unsigned HOST_WIDE_INT focus_bits =
1794
        create_simple_focus_bits (high_bits, low_bits,
1795
                                  lowest_bit_set, 10);
1796
 
1797
      gcc_assert (SPARC_SETHI_P (focus_bits));
1798
      gcc_assert (lowest_bit_set != 10);
1799
 
1800
      emit_insn (gen_safe_HIGH64 (temp, focus_bits));
1801
 
1802
      /* If lowest_bit_set == 10 then a sethi alone could have done it.  */
1803
      if (lowest_bit_set < 10)
1804
        emit_insn (gen_rtx_SET (VOIDmode,
1805
                                op0,
1806
                                gen_rtx_LSHIFTRT (DImode, temp,
1807
                                                  GEN_INT (10 - lowest_bit_set))));
1808
      else if (lowest_bit_set > 10)
1809
        emit_insn (gen_rtx_SET (VOIDmode,
1810
                                op0,
1811
                                gen_rtx_ASHIFT (DImode, temp,
1812
                                                GEN_INT (lowest_bit_set - 10))));
1813
      return;
1814
    }
1815
 
1816
  /* 1) sethi   %hi(low_bits), %reg
1817
   *    or      %reg, %lo(low_bits), %reg
1818
   * 2) sethi   %hi(~low_bits), %reg
1819
   *    xor     %reg, %lo(-0x400 | (low_bits & 0x3ff)), %reg
1820
   */
1821
  if (high_bits == 0
1822
      || high_bits == 0xffffffff)
1823
    {
1824
      sparc_emit_set_const64_quick1 (op0, temp, low_bits,
1825
                                     (high_bits == 0xffffffff));
1826
      return;
1827
    }
1828
 
1829
  /* Now, try 3-insn sequences.  */
1830
 
1831
  /* 1) sethi   %hi(high_bits), %reg
1832
   *    or      %reg, %lo(high_bits), %reg
1833
   *    sllx    %reg, 32, %reg
1834
   */
1835
  if (low_bits == 0)
1836
    {
1837
      sparc_emit_set_const64_quick2 (op0, temp, high_bits, 0, 32);
1838
      return;
1839
    }
1840
 
1841
  /* We may be able to do something quick
1842
     when the constant is negated, so try that.  */
1843
  if (const64_is_2insns ((~high_bits) & 0xffffffff,
1844
                         (~low_bits) & 0xfffffc00))
1845
    {
1846
      /* NOTE: The trailing bits get XOR'd so we need the
1847
         non-negated bits, not the negated ones.  */
1848
      unsigned HOST_WIDE_INT trailing_bits = low_bits & 0x3ff;
1849
 
1850
      if ((((~high_bits) & 0xffffffff) == 0
1851
           && ((~low_bits) & 0x80000000) == 0)
1852
          || (((~high_bits) & 0xffffffff) == 0xffffffff
1853
              && ((~low_bits) & 0x80000000) != 0))
1854
        {
1855
          unsigned HOST_WIDE_INT fast_int = (~low_bits & 0xffffffff);
1856
 
1857
          if ((SPARC_SETHI_P (fast_int)
1858
               && (~high_bits & 0xffffffff) == 0)
1859
              || SPARC_SIMM13_P (fast_int))
1860
            emit_insn (gen_safe_SET64 (temp, fast_int));
1861
          else
1862
            sparc_emit_set_const64 (temp, GEN_INT (fast_int));
1863
        }
1864
      else
1865
        {
1866
          rtx negated_const;
1867
          negated_const = GEN_INT (((~low_bits) & 0xfffffc00) |
1868
                                   (((HOST_WIDE_INT)((~high_bits) & 0xffffffff))<<32));
1869
          sparc_emit_set_const64 (temp, negated_const);
1870
        }
1871
 
1872
      /* If we are XOR'ing with -1, then we should emit a one's complement
1873
         instead.  This way the combiner will notice logical operations
1874
         such as ANDN later on and substitute.  */
1875
      if (trailing_bits == 0x3ff)
1876
        {
1877
          emit_insn (gen_rtx_SET (VOIDmode, op0,
1878
                                  gen_rtx_NOT (DImode, temp)));
1879
        }
1880
      else
1881
        {
1882
          emit_insn (gen_rtx_SET (VOIDmode,
1883
                                  op0,
1884
                                  gen_safe_XOR64 (temp,
1885
                                                  (-0x400 | trailing_bits))));
1886
        }
1887
      return;
1888
    }
1889
 
1890
  /* 1) sethi   %hi(xxx), %reg
1891
   *    or      %reg, %lo(xxx), %reg
1892
   *    sllx    %reg, yyy, %reg
1893
   *
1894
   * ??? This is just a generalized version of the low_bits==0
1895
   * thing above, FIXME...
1896
   */
1897
  if ((highest_bit_set - lowest_bit_set) < 32)
1898
    {
1899
      unsigned HOST_WIDE_INT focus_bits =
1900
        create_simple_focus_bits (high_bits, low_bits,
1901
                                  lowest_bit_set, 0);
1902
 
1903
      /* We can't get here in this state.  */
1904
      gcc_assert (highest_bit_set >= 32 && lowest_bit_set < 32);
1905
 
1906
      /* So what we know is that the set bits straddle the
1907
         middle of the 64-bit word.  */
1908
      sparc_emit_set_const64_quick2 (op0, temp,
1909
                                     focus_bits, 0,
1910
                                     lowest_bit_set);
1911
      return;
1912
    }
1913
 
1914
  /* 1) sethi   %hi(high_bits), %reg
1915
   *    or      %reg, %lo(high_bits), %reg
1916
   *    sllx    %reg, 32, %reg
1917
   *    or      %reg, low_bits, %reg
1918
   */
1919
  if (SPARC_SIMM13_P(low_bits)
1920
      && ((int)low_bits > 0))
1921
    {
1922
      sparc_emit_set_const64_quick2 (op0, temp, high_bits, low_bits, 32);
1923
      return;
1924
    }
1925
 
1926
  /* The easiest way when all else fails, is full decomposition.  */
1927
#if 0
1928
  printf ("sparc_emit_set_const64: Hard constant [%08lx%08lx] neg[%08lx%08lx]\n",
1929
          high_bits, low_bits, ~high_bits, ~low_bits);
1930
#endif
1931
  sparc_emit_set_const64_longway (op0, temp, high_bits, low_bits);
1932
}
1933
#endif /* HOST_BITS_PER_WIDE_INT == 32 */
1934
 
1935
/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
1936
   return the mode to be used for the comparison.  For floating-point,
1937
   CCFP[E]mode is used.  CC_NOOVmode should be used when the first operand
1938
   is a PLUS, MINUS, NEG, or ASHIFT.  CCmode should be used when no special
1939
   processing is needed.  */
1940
 
1941
enum machine_mode
1942
select_cc_mode (enum rtx_code op, rtx x, rtx y ATTRIBUTE_UNUSED)
1943
{
1944
  if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1945
    {
1946
      switch (op)
1947
        {
1948
        case EQ:
1949
        case NE:
1950
        case UNORDERED:
1951
        case ORDERED:
1952
        case UNLT:
1953
        case UNLE:
1954
        case UNGT:
1955
        case UNGE:
1956
        case UNEQ:
1957
        case LTGT:
1958
          return CCFPmode;
1959
 
1960
        case LT:
1961
        case LE:
1962
        case GT:
1963
        case GE:
1964
          return CCFPEmode;
1965
 
1966
        default:
1967
          gcc_unreachable ();
1968
        }
1969
    }
1970
  else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
1971
           || GET_CODE (x) == NEG || GET_CODE (x) == ASHIFT)
1972
    {
1973
      if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1974
        return CCX_NOOVmode;
1975
      else
1976
        return CC_NOOVmode;
1977
    }
1978
  else
1979
    {
1980
      if (TARGET_ARCH64 && GET_MODE (x) == DImode)
1981
        return CCXmode;
1982
      else
1983
        return CCmode;
1984
    }
1985
}
1986
 
1987
/* Emit the compare insn and return the CC reg for a CODE comparison
1988
   with operands X and Y.  */
1989
 
1990
static rtx
1991
gen_compare_reg_1 (enum rtx_code code, rtx x, rtx y)
1992
{
1993
  enum machine_mode mode;
1994
  rtx cc_reg;
1995
 
1996
  if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
1997
    return x;
1998
 
1999
  mode = SELECT_CC_MODE (code, x, y);
2000
 
2001
  /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
2002
     fcc regs (cse can't tell they're really call clobbered regs and will
2003
     remove a duplicate comparison even if there is an intervening function
2004
     call - it will then try to reload the cc reg via an int reg which is why
2005
     we need the movcc patterns).  It is possible to provide the movcc
2006
     patterns by using the ldxfsr/stxfsr v9 insns.  I tried it: you need two
2007
     registers (say %g1,%g5) and it takes about 6 insns.  A better fix would be
2008
     to tell cse that CCFPE mode registers (even pseudos) are call
2009
     clobbered.  */
2010
 
2011
  /* ??? This is an experiment.  Rather than making changes to cse which may
2012
     or may not be easy/clean, we do our own cse.  This is possible because
2013
     we will generate hard registers.  Cse knows they're call clobbered (it
2014
     doesn't know the same thing about pseudos). If we guess wrong, no big
2015
     deal, but if we win, great!  */
2016
 
2017
  if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2018
#if 1 /* experiment */
2019
    {
2020
      int reg;
2021
      /* We cycle through the registers to ensure they're all exercised.  */
2022
      static int next_fcc_reg = 0;
2023
      /* Previous x,y for each fcc reg.  */
2024
      static rtx prev_args[4][2];
2025
 
2026
      /* Scan prev_args for x,y.  */
2027
      for (reg = 0; reg < 4; reg++)
2028
        if (prev_args[reg][0] == x && prev_args[reg][1] == y)
2029
          break;
2030
      if (reg == 4)
2031
        {
2032
          reg = next_fcc_reg;
2033
          prev_args[reg][0] = x;
2034
          prev_args[reg][1] = y;
2035
          next_fcc_reg = (next_fcc_reg + 1) & 3;
2036
        }
2037
      cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
2038
    }
2039
#else
2040
    cc_reg = gen_reg_rtx (mode);
2041
#endif /* ! experiment */
2042
  else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2043
    cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
2044
  else
2045
    cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
2046
 
2047
  /* We shouldn't get there for TFmode if !TARGET_HARD_QUAD.  If we do, this
2048
     will only result in an unrecognizable insn so no point in asserting.  */
2049
  emit_insn (gen_rtx_SET (VOIDmode, cc_reg, gen_rtx_COMPARE (mode, x, y)));
2050
 
2051
  return cc_reg;
2052
}
2053
 
2054
 
2055
/* Emit the compare insn and return the CC reg for the comparison in CMP.  */
2056
 
2057
rtx
2058
gen_compare_reg (rtx cmp)
2059
{
2060
  return gen_compare_reg_1 (GET_CODE (cmp), XEXP (cmp, 0), XEXP (cmp, 1));
2061
}
2062
 
2063
/* This function is used for v9 only.
2064
   DEST is the target of the Scc insn.
2065
   CODE is the code for an Scc's comparison.
2066
   X and Y are the values we compare.
2067
 
2068
   This function is needed to turn
2069
 
2070
           (set (reg:SI 110)
2071
               (gt (reg:CCX 100 %icc)
2072
                   (const_int 0)))
2073
   into
2074
           (set (reg:SI 110)
2075
               (gt:DI (reg:CCX 100 %icc)
2076
                   (const_int 0)))
2077
 
2078
   IE: The instruction recognizer needs to see the mode of the comparison to
2079
   find the right instruction. We could use "gt:DI" right in the
2080
   define_expand, but leaving it out allows us to handle DI, SI, etc.  */
2081
 
2082
static int
2083
gen_v9_scc (rtx dest, enum rtx_code compare_code, rtx x, rtx y)
2084
{
2085
  if (! TARGET_ARCH64
2086
      && (GET_MODE (x) == DImode
2087
          || GET_MODE (dest) == DImode))
2088
    return 0;
2089
 
2090
  /* Try to use the movrCC insns.  */
2091
  if (TARGET_ARCH64
2092
      && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
2093
      && y == const0_rtx
2094
      && v9_regcmp_p (compare_code))
2095
    {
2096
      rtx op0 = x;
2097
      rtx temp;
2098
 
2099
      /* Special case for op0 != 0.  This can be done with one instruction if
2100
         dest == x.  */
2101
 
2102
      if (compare_code == NE
2103
          && GET_MODE (dest) == DImode
2104
          && rtx_equal_p (op0, dest))
2105
        {
2106
          emit_insn (gen_rtx_SET (VOIDmode, dest,
2107
                              gen_rtx_IF_THEN_ELSE (DImode,
2108
                                       gen_rtx_fmt_ee (compare_code, DImode,
2109
                                                       op0, const0_rtx),
2110
                                       const1_rtx,
2111
                                       dest)));
2112
          return 1;
2113
        }
2114
 
2115
      if (reg_overlap_mentioned_p (dest, op0))
2116
        {
2117
          /* Handle the case where dest == x.
2118
             We "early clobber" the result.  */
2119
          op0 = gen_reg_rtx (GET_MODE (x));
2120
          emit_move_insn (op0, x);
2121
        }
2122
 
2123
      emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2124
      if (GET_MODE (op0) != DImode)
2125
        {
2126
          temp = gen_reg_rtx (DImode);
2127
          convert_move (temp, op0, 0);
2128
        }
2129
      else
2130
        temp = op0;
2131
      emit_insn (gen_rtx_SET (VOIDmode, dest,
2132
                          gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2133
                                   gen_rtx_fmt_ee (compare_code, DImode,
2134
                                                   temp, const0_rtx),
2135
                                   const1_rtx,
2136
                                   dest)));
2137
      return 1;
2138
    }
2139
  else
2140
    {
2141
      x = gen_compare_reg_1 (compare_code, x, y);
2142
      y = const0_rtx;
2143
 
2144
      gcc_assert (GET_MODE (x) != CC_NOOVmode
2145
                  && GET_MODE (x) != CCX_NOOVmode);
2146
 
2147
      emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
2148
      emit_insn (gen_rtx_SET (VOIDmode, dest,
2149
                          gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
2150
                                   gen_rtx_fmt_ee (compare_code,
2151
                                                   GET_MODE (x), x, y),
2152
                                    const1_rtx, dest)));
2153
      return 1;
2154
    }
2155
}
2156
 
2157
 
2158
/* Emit an scc insn.  For seq, sne, sgeu, and sltu, we can do this
2159
   without jumps using the addx/subx instructions.  */
2160
 
2161
bool
2162
emit_scc_insn (rtx operands[])
2163
{
2164
  rtx tem;
2165
  rtx x;
2166
  rtx y;
2167
  enum rtx_code code;
2168
 
2169
  /* The quad-word fp compare library routines all return nonzero to indicate
2170
     true, which is different from the equivalent libgcc routines, so we must
2171
     handle them specially here.  */
2172
  if (GET_MODE (operands[2]) == TFmode && ! TARGET_HARD_QUAD)
2173
    {
2174
      operands[1] = sparc_emit_float_lib_cmp (operands[2], operands[3],
2175
                                              GET_CODE (operands[1]));
2176
      operands[2] = XEXP (operands[1], 0);
2177
      operands[3] = XEXP (operands[1], 1);
2178
    }
2179
 
2180
  code = GET_CODE (operands[1]);
2181
  x = operands[2];
2182
  y = operands[3];
2183
 
2184
  /* For seq/sne on v9 we use the same code as v8 (the addx/subx method has
2185
     more applications).  The exception to this is "reg != 0" which can
2186
     be done in one instruction on v9 (so we do it).  */
2187
  if (code == EQ)
2188
    {
2189
      if (GET_MODE (x) == SImode)
2190
        {
2191
          rtx pat = gen_seqsi_special (operands[0], x, y);
2192
          emit_insn (pat);
2193
          return true;
2194
        }
2195
      else if (GET_MODE (x) == DImode)
2196
        {
2197
          rtx pat = gen_seqdi_special (operands[0], x, y);
2198
          emit_insn (pat);
2199
          return true;
2200
        }
2201
    }
2202
 
2203
  if (code == NE)
2204
    {
2205
      if (GET_MODE (x) == SImode)
2206
        {
2207
          rtx pat = gen_snesi_special (operands[0], x, y);
2208
          emit_insn (pat);
2209
          return true;
2210
        }
2211
      else if (GET_MODE (x) == DImode)
2212
        {
2213
          rtx pat = gen_snedi_special (operands[0], x, y);
2214
          emit_insn (pat);
2215
          return true;
2216
        }
2217
    }
2218
 
2219
  /* For the rest, on v9 we can use conditional moves.  */
2220
 
2221
  if (TARGET_V9)
2222
    {
2223
      if (gen_v9_scc (operands[0], code, x, y))
2224
        return true;
2225
    }
2226
 
2227
  /* We can do LTU and GEU using the addx/subx instructions too.  And
2228
     for GTU/LEU, if both operands are registers swap them and fall
2229
     back to the easy case.  */
2230
  if (code == GTU || code == LEU)
2231
    {
2232
      if ((GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2233
          && (GET_CODE (y) == REG || GET_CODE (y) == SUBREG))
2234
        {
2235
          tem = x;
2236
          x = y;
2237
          y = tem;
2238
          code = swap_condition (code);
2239
        }
2240
    }
2241
 
2242
  if (code == LTU || code == GEU)
2243
    {
2244
      emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2245
                              gen_rtx_fmt_ee (code, SImode,
2246
                                              gen_compare_reg_1 (code, x, y),
2247
                                              const0_rtx)));
2248
      return true;
2249
    }
2250
 
2251
  /* Nope, do branches.  */
2252
  return false;
2253
}
2254
 
2255
/* Emit a conditional jump insn for the v9 architecture using comparison code
2256
   CODE and jump target LABEL.
2257
   This function exists to take advantage of the v9 brxx insns.  */
2258
 
2259
static void
2260
emit_v9_brxx_insn (enum rtx_code code, rtx op0, rtx label)
2261
{
2262
  emit_jump_insn (gen_rtx_SET (VOIDmode,
2263
                           pc_rtx,
2264
                           gen_rtx_IF_THEN_ELSE (VOIDmode,
2265
                                    gen_rtx_fmt_ee (code, GET_MODE (op0),
2266
                                                    op0, const0_rtx),
2267
                                    gen_rtx_LABEL_REF (VOIDmode, label),
2268
                                    pc_rtx)));
2269
}
2270
 
2271
void
2272
emit_conditional_branch_insn (rtx operands[])
2273
{
2274
  /* The quad-word fp compare library routines all return nonzero to indicate
2275
     true, which is different from the equivalent libgcc routines, so we must
2276
     handle them specially here.  */
2277
  if (GET_MODE (operands[1]) == TFmode && ! TARGET_HARD_QUAD)
2278
    {
2279
      operands[0] = sparc_emit_float_lib_cmp (operands[1], operands[2],
2280
                                              GET_CODE (operands[0]));
2281
      operands[1] = XEXP (operands[0], 0);
2282
      operands[2] = XEXP (operands[0], 1);
2283
    }
2284
 
2285
  if (TARGET_ARCH64 && operands[2] == const0_rtx
2286
      && GET_CODE (operands[1]) == REG
2287
      && GET_MODE (operands[1]) == DImode)
2288
    {
2289
      emit_v9_brxx_insn (GET_CODE (operands[0]), operands[1], operands[3]);
2290
      return;
2291
    }
2292
 
2293
  operands[1] = gen_compare_reg (operands[0]);
2294
  operands[2] = const0_rtx;
2295
  operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]), VOIDmode,
2296
                                operands[1], operands[2]);
2297
  emit_jump_insn (gen_cbranchcc4 (operands[0], operands[1], operands[2],
2298
                                  operands[3]));
2299
}
2300
 
2301
 
2302
/* Generate a DFmode part of a hard TFmode register.
2303
   REG is the TFmode hard register, LOW is 1 for the
2304
   low 64bit of the register and 0 otherwise.
2305
 */
2306
rtx
2307
gen_df_reg (rtx reg, int low)
2308
{
2309
  int regno = REGNO (reg);
2310
 
2311
  if ((WORDS_BIG_ENDIAN == 0) ^ (low != 0))
2312
    regno += (TARGET_ARCH64 && regno < 32) ? 1 : 2;
2313
  return gen_rtx_REG (DFmode, regno);
2314
}
2315
 
2316
/* Generate a call to FUNC with OPERANDS.  Operand 0 is the return value.
2317
   Unlike normal calls, TFmode operands are passed by reference.  It is
2318
   assumed that no more than 3 operands are required.  */
2319
 
2320
static void
2321
emit_soft_tfmode_libcall (const char *func_name, int nargs, rtx *operands)
2322
{
2323
  rtx ret_slot = NULL, arg[3], func_sym;
2324
  int i;
2325
 
2326
  /* We only expect to be called for conversions, unary, and binary ops.  */
2327
  gcc_assert (nargs == 2 || nargs == 3);
2328
 
2329
  for (i = 0; i < nargs; ++i)
2330
    {
2331
      rtx this_arg = operands[i];
2332
      rtx this_slot;
2333
 
2334
      /* TFmode arguments and return values are passed by reference.  */
2335
      if (GET_MODE (this_arg) == TFmode)
2336
        {
2337
          int force_stack_temp;
2338
 
2339
          force_stack_temp = 0;
2340
          if (TARGET_BUGGY_QP_LIB && i == 0)
2341
            force_stack_temp = 1;
2342
 
2343
          if (GET_CODE (this_arg) == MEM
2344
              && ! force_stack_temp)
2345
            this_arg = XEXP (this_arg, 0);
2346
          else if (CONSTANT_P (this_arg)
2347
                   && ! force_stack_temp)
2348
            {
2349
              this_slot = force_const_mem (TFmode, this_arg);
2350
              this_arg = XEXP (this_slot, 0);
2351
            }
2352
          else
2353
            {
2354
              this_slot = assign_stack_temp (TFmode, GET_MODE_SIZE (TFmode), 0);
2355
 
2356
              /* Operand 0 is the return value.  We'll copy it out later.  */
2357
              if (i > 0)
2358
                emit_move_insn (this_slot, this_arg);
2359
              else
2360
                ret_slot = this_slot;
2361
 
2362
              this_arg = XEXP (this_slot, 0);
2363
            }
2364
        }
2365
 
2366
      arg[i] = this_arg;
2367
    }
2368
 
2369
  func_sym = gen_rtx_SYMBOL_REF (Pmode, func_name);
2370
 
2371
  if (GET_MODE (operands[0]) == TFmode)
2372
    {
2373
      if (nargs == 2)
2374
        emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 2,
2375
                           arg[0], GET_MODE (arg[0]),
2376
                           arg[1], GET_MODE (arg[1]));
2377
      else
2378
        emit_library_call (func_sym, LCT_NORMAL, VOIDmode, 3,
2379
                           arg[0], GET_MODE (arg[0]),
2380
                           arg[1], GET_MODE (arg[1]),
2381
                           arg[2], GET_MODE (arg[2]));
2382
 
2383
      if (ret_slot)
2384
        emit_move_insn (operands[0], ret_slot);
2385
    }
2386
  else
2387
    {
2388
      rtx ret;
2389
 
2390
      gcc_assert (nargs == 2);
2391
 
2392
      ret = emit_library_call_value (func_sym, operands[0], LCT_NORMAL,
2393
                                     GET_MODE (operands[0]), 1,
2394
                                     arg[1], GET_MODE (arg[1]));
2395
 
2396
      if (ret != operands[0])
2397
        emit_move_insn (operands[0], ret);
2398
    }
2399
}
2400
 
2401
/* Expand soft-float TFmode calls to sparc abi routines.  */
2402
 
2403
static void
2404
emit_soft_tfmode_binop (enum rtx_code code, rtx *operands)
2405
{
2406
  const char *func;
2407
 
2408
  switch (code)
2409
    {
2410
    case PLUS:
2411
      func = "_Qp_add";
2412
      break;
2413
    case MINUS:
2414
      func = "_Qp_sub";
2415
      break;
2416
    case MULT:
2417
      func = "_Qp_mul";
2418
      break;
2419
    case DIV:
2420
      func = "_Qp_div";
2421
      break;
2422
    default:
2423
      gcc_unreachable ();
2424
    }
2425
 
2426
  emit_soft_tfmode_libcall (func, 3, operands);
2427
}
2428
 
2429
static void
2430
emit_soft_tfmode_unop (enum rtx_code code, rtx *operands)
2431
{
2432
  const char *func;
2433
 
2434
  gcc_assert (code == SQRT);
2435
  func = "_Qp_sqrt";
2436
 
2437
  emit_soft_tfmode_libcall (func, 2, operands);
2438
}
2439
 
2440
static void
2441
emit_soft_tfmode_cvt (enum rtx_code code, rtx *operands)
2442
{
2443
  const char *func;
2444
 
2445
  switch (code)
2446
    {
2447
    case FLOAT_EXTEND:
2448
      switch (GET_MODE (operands[1]))
2449
        {
2450
        case SFmode:
2451
          func = "_Qp_stoq";
2452
          break;
2453
        case DFmode:
2454
          func = "_Qp_dtoq";
2455
          break;
2456
        default:
2457
          gcc_unreachable ();
2458
        }
2459
      break;
2460
 
2461
    case FLOAT_TRUNCATE:
2462
      switch (GET_MODE (operands[0]))
2463
        {
2464
        case SFmode:
2465
          func = "_Qp_qtos";
2466
          break;
2467
        case DFmode:
2468
          func = "_Qp_qtod";
2469
          break;
2470
        default:
2471
          gcc_unreachable ();
2472
        }
2473
      break;
2474
 
2475
    case FLOAT:
2476
      switch (GET_MODE (operands[1]))
2477
        {
2478
        case SImode:
2479
          func = "_Qp_itoq";
2480
          if (TARGET_ARCH64)
2481
            operands[1] = gen_rtx_SIGN_EXTEND (DImode, operands[1]);
2482
          break;
2483
        case DImode:
2484
          func = "_Qp_xtoq";
2485
          break;
2486
        default:
2487
          gcc_unreachable ();
2488
        }
2489
      break;
2490
 
2491
    case UNSIGNED_FLOAT:
2492
      switch (GET_MODE (operands[1]))
2493
        {
2494
        case SImode:
2495
          func = "_Qp_uitoq";
2496
          if (TARGET_ARCH64)
2497
            operands[1] = gen_rtx_ZERO_EXTEND (DImode, operands[1]);
2498
          break;
2499
        case DImode:
2500
          func = "_Qp_uxtoq";
2501
          break;
2502
        default:
2503
          gcc_unreachable ();
2504
        }
2505
      break;
2506
 
2507
    case FIX:
2508
      switch (GET_MODE (operands[0]))
2509
        {
2510
        case SImode:
2511
          func = "_Qp_qtoi";
2512
          break;
2513
        case DImode:
2514
          func = "_Qp_qtox";
2515
          break;
2516
        default:
2517
          gcc_unreachable ();
2518
        }
2519
      break;
2520
 
2521
    case UNSIGNED_FIX:
2522
      switch (GET_MODE (operands[0]))
2523
        {
2524
        case SImode:
2525
          func = "_Qp_qtoui";
2526
          break;
2527
        case DImode:
2528
          func = "_Qp_qtoux";
2529
          break;
2530
        default:
2531
          gcc_unreachable ();
2532
        }
2533
      break;
2534
 
2535
    default:
2536
      gcc_unreachable ();
2537
    }
2538
 
2539
  emit_soft_tfmode_libcall (func, 2, operands);
2540
}
2541
 
2542
/* Expand a hard-float tfmode operation.  All arguments must be in
2543
   registers.  */
2544
 
2545
static void
2546
emit_hard_tfmode_operation (enum rtx_code code, rtx *operands)
2547
{
2548
  rtx op, dest;
2549
 
2550
  if (GET_RTX_CLASS (code) == RTX_UNARY)
2551
    {
2552
      operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2553
      op = gen_rtx_fmt_e (code, GET_MODE (operands[0]), operands[1]);
2554
    }
2555
  else
2556
    {
2557
      operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
2558
      operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
2559
      op = gen_rtx_fmt_ee (code, GET_MODE (operands[0]),
2560
                           operands[1], operands[2]);
2561
    }
2562
 
2563
  if (register_operand (operands[0], VOIDmode))
2564
    dest = operands[0];
2565
  else
2566
    dest = gen_reg_rtx (GET_MODE (operands[0]));
2567
 
2568
  emit_insn (gen_rtx_SET (VOIDmode, dest, op));
2569
 
2570
  if (dest != operands[0])
2571
    emit_move_insn (operands[0], dest);
2572
}
2573
 
2574
void
2575
emit_tfmode_binop (enum rtx_code code, rtx *operands)
2576
{
2577
  if (TARGET_HARD_QUAD)
2578
    emit_hard_tfmode_operation (code, operands);
2579
  else
2580
    emit_soft_tfmode_binop (code, operands);
2581
}
2582
 
2583
void
2584
emit_tfmode_unop (enum rtx_code code, rtx *operands)
2585
{
2586
  if (TARGET_HARD_QUAD)
2587
    emit_hard_tfmode_operation (code, operands);
2588
  else
2589
    emit_soft_tfmode_unop (code, operands);
2590
}
2591
 
2592
void
2593
emit_tfmode_cvt (enum rtx_code code, rtx *operands)
2594
{
2595
  if (TARGET_HARD_QUAD)
2596
    emit_hard_tfmode_operation (code, operands);
2597
  else
2598
    emit_soft_tfmode_cvt (code, operands);
2599
}
2600
 
2601
/* Return nonzero if a branch/jump/call instruction will be emitting
2602
   nop into its delay slot.  */
2603
 
2604
int
2605
empty_delay_slot (rtx insn)
2606
{
2607
  rtx seq;
2608
 
2609
  /* If no previous instruction (should not happen), return true.  */
2610
  if (PREV_INSN (insn) == NULL)
2611
    return 1;
2612
 
2613
  seq = NEXT_INSN (PREV_INSN (insn));
2614
  if (GET_CODE (PATTERN (seq)) == SEQUENCE)
2615
    return 0;
2616
 
2617
  return 1;
2618
}
2619
 
2620
/* Return nonzero if TRIAL can go into the call delay slot.  */
2621
 
2622
int
2623
tls_call_delay (rtx trial)
2624
{
2625
  rtx pat;
2626
 
2627
  /* Binutils allows
2628
       call __tls_get_addr, %tgd_call (foo)
2629
        add %l7, %o0, %o0, %tgd_add (foo)
2630
     while Sun as/ld does not.  */
2631
  if (TARGET_GNU_TLS || !TARGET_TLS)
2632
    return 1;
2633
 
2634
  pat = PATTERN (trial);
2635
 
2636
  /* We must reject tgd_add{32|64}, i.e.
2637
       (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSGD)))
2638
     and tldm_add{32|64}, i.e.
2639
       (set (reg) (plus (reg) (unspec [(reg) (symbol_ref)] UNSPEC_TLSLDM)))
2640
     for Sun as/ld.  */
2641
  if (GET_CODE (pat) == SET
2642
      && GET_CODE (SET_SRC (pat)) == PLUS)
2643
    {
2644
      rtx unspec = XEXP (SET_SRC (pat), 1);
2645
 
2646
      if (GET_CODE (unspec) == UNSPEC
2647
          && (XINT (unspec, 1) == UNSPEC_TLSGD
2648
              || XINT (unspec, 1) == UNSPEC_TLSLDM))
2649
        return 0;
2650
    }
2651
 
2652
  return 1;
2653
}
2654
 
2655
/* Return nonzero if TRIAL, an insn, can be combined with a 'restore'
2656
   instruction.  RETURN_P is true if the v9 variant 'return' is to be
2657
   considered in the test too.
2658
 
2659
   TRIAL must be a SET whose destination is a REG appropriate for the
2660
   'restore' instruction or, if RETURN_P is true, for the 'return'
2661
   instruction.  */
2662
 
2663
static int
2664
eligible_for_restore_insn (rtx trial, bool return_p)
2665
{
2666
  rtx pat = PATTERN (trial);
2667
  rtx src = SET_SRC (pat);
2668
 
2669
  /* The 'restore src,%g0,dest' pattern for word mode and below.  */
2670
  if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2671
      && arith_operand (src, GET_MODE (src)))
2672
    {
2673
      if (TARGET_ARCH64)
2674
        return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2675
      else
2676
        return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
2677
    }
2678
 
2679
  /* The 'restore src,%g0,dest' pattern for double-word mode.  */
2680
  else if (GET_MODE_CLASS (GET_MODE (src)) != MODE_FLOAT
2681
           && arith_double_operand (src, GET_MODE (src)))
2682
    return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
2683
 
2684
  /* The 'restore src,%g0,dest' pattern for float if no FPU.  */
2685
  else if (! TARGET_FPU && register_operand (src, SFmode))
2686
    return 1;
2687
 
2688
  /* The 'restore src,%g0,dest' pattern for double if no FPU.  */
2689
  else if (! TARGET_FPU && TARGET_ARCH64 && register_operand (src, DFmode))
2690
    return 1;
2691
 
2692
  /* If we have the 'return' instruction, anything that does not use
2693
     local or output registers and can go into a delay slot wins.  */
2694
  else if (return_p && TARGET_V9 && ! epilogue_renumber (&pat, 1)
2695
           && (get_attr_in_uncond_branch_delay (trial)
2696
               == IN_UNCOND_BRANCH_DELAY_TRUE))
2697
    return 1;
2698
 
2699
  /* The 'restore src1,src2,dest' pattern for SImode.  */
2700
  else if (GET_CODE (src) == PLUS
2701
           && register_operand (XEXP (src, 0), SImode)
2702
           && arith_operand (XEXP (src, 1), SImode))
2703
    return 1;
2704
 
2705
  /* The 'restore src1,src2,dest' pattern for DImode.  */
2706
  else if (GET_CODE (src) == PLUS
2707
           && register_operand (XEXP (src, 0), DImode)
2708
           && arith_double_operand (XEXP (src, 1), DImode))
2709
    return 1;
2710
 
2711
  /* The 'restore src1,%lo(src2),dest' pattern.  */
2712
  else if (GET_CODE (src) == LO_SUM
2713
           && ! TARGET_CM_MEDMID
2714
           && ((register_operand (XEXP (src, 0), SImode)
2715
                && immediate_operand (XEXP (src, 1), SImode))
2716
               || (TARGET_ARCH64
2717
                   && register_operand (XEXP (src, 0), DImode)
2718
                   && immediate_operand (XEXP (src, 1), DImode))))
2719
    return 1;
2720
 
2721
  /* The 'restore src,src,dest' pattern.  */
2722
  else if (GET_CODE (src) == ASHIFT
2723
           && (register_operand (XEXP (src, 0), SImode)
2724
               || register_operand (XEXP (src, 0), DImode))
2725
           && XEXP (src, 1) == const1_rtx)
2726
    return 1;
2727
 
2728
  return 0;
2729
}
2730
 
2731
/* Return nonzero if TRIAL can go into the function return's
2732
   delay slot.  */
2733
 
2734
int
2735
eligible_for_return_delay (rtx trial)
2736
{
2737
  rtx pat;
2738
 
2739
  if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2740
    return 0;
2741
 
2742
  if (get_attr_length (trial) != 1)
2743
    return 0;
2744
 
2745
  /* If there are any call-saved registers, we should scan TRIAL if it
2746
     does not reference them.  For now just make it easy.  */
2747
  if (num_gfregs)
2748
    return 0;
2749
 
2750
  /* If the function uses __builtin_eh_return, the eh_return machinery
2751
     occupies the delay slot.  */
2752
  if (crtl->calls_eh_return)
2753
    return 0;
2754
 
2755
  /* In the case of a true leaf function, anything can go into the slot.  */
2756
  if (sparc_leaf_function_p)
2757
    return get_attr_in_uncond_branch_delay (trial)
2758
           == IN_UNCOND_BRANCH_DELAY_TRUE;
2759
 
2760
  pat = PATTERN (trial);
2761
 
2762
  /* Otherwise, only operations which can be done in tandem with
2763
     a `restore' or `return' insn can go into the delay slot.  */
2764
  if (GET_CODE (SET_DEST (pat)) != REG
2765
      || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24))
2766
    return 0;
2767
 
2768
  /* If this instruction sets up floating point register and we have a return
2769
     instruction, it can probably go in.  But restore will not work
2770
     with FP_REGS.  */
2771
  if (REGNO (SET_DEST (pat)) >= 32)
2772
    return (TARGET_V9
2773
            && ! epilogue_renumber (&pat, 1)
2774
            && (get_attr_in_uncond_branch_delay (trial)
2775
                == IN_UNCOND_BRANCH_DELAY_TRUE));
2776
 
2777
  return eligible_for_restore_insn (trial, true);
2778
}
2779
 
2780
/* Return nonzero if TRIAL can go into the sibling call's
2781
   delay slot.  */
2782
 
2783
int
2784
eligible_for_sibcall_delay (rtx trial)
2785
{
2786
  rtx pat;
2787
 
2788
  if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
2789
    return 0;
2790
 
2791
  if (get_attr_length (trial) != 1)
2792
    return 0;
2793
 
2794
  pat = PATTERN (trial);
2795
 
2796
  if (sparc_leaf_function_p)
2797
    {
2798
      /* If the tail call is done using the call instruction,
2799
         we have to restore %o7 in the delay slot.  */
2800
      if (LEAF_SIBCALL_SLOT_RESERVED_P)
2801
        return 0;
2802
 
2803
      /* %g1 is used to build the function address */
2804
      if (reg_mentioned_p (gen_rtx_REG (Pmode, 1), pat))
2805
        return 0;
2806
 
2807
      return 1;
2808
    }
2809
 
2810
  /* Otherwise, only operations which can be done in tandem with
2811
     a `restore' insn can go into the delay slot.  */
2812
  if (GET_CODE (SET_DEST (pat)) != REG
2813
      || (REGNO (SET_DEST (pat)) >= 8 && REGNO (SET_DEST (pat)) < 24)
2814
      || REGNO (SET_DEST (pat)) >= 32)
2815
    return 0;
2816
 
2817
  /* If it mentions %o7, it can't go in, because sibcall will clobber it
2818
     in most cases.  */
2819
  if (reg_mentioned_p (gen_rtx_REG (Pmode, 15), pat))
2820
    return 0;
2821
 
2822
  return eligible_for_restore_insn (trial, false);
2823
}
2824
 
2825
int
2826
short_branch (int uid1, int uid2)
2827
{
2828
  int delta = INSN_ADDRESSES (uid1) - INSN_ADDRESSES (uid2);
2829
 
2830
  /* Leave a few words of "slop".  */
2831
  if (delta >= -1023 && delta <= 1022)
2832
    return 1;
2833
 
2834
  return 0;
2835
}
2836
 
2837
/* Return nonzero if REG is not used after INSN.
2838
   We assume REG is a reload reg, and therefore does
2839
   not live past labels or calls or jumps.  */
2840
int
2841
reg_unused_after (rtx reg, rtx insn)
2842
{
2843
  enum rtx_code code, prev_code = UNKNOWN;
2844
 
2845
  while ((insn = NEXT_INSN (insn)))
2846
    {
2847
      if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
2848
        return 1;
2849
 
2850
      code = GET_CODE (insn);
2851
      if (GET_CODE (insn) == CODE_LABEL)
2852
        return 1;
2853
 
2854
      if (INSN_P (insn))
2855
        {
2856
          rtx set = single_set (insn);
2857
          int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
2858
          if (set && in_src)
2859
            return 0;
2860
          if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
2861
            return 1;
2862
          if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
2863
            return 0;
2864
        }
2865
      prev_code = code;
2866
    }
2867
  return 1;
2868
}
2869
 
2870
/* Determine if it's legal to put X into the constant pool.  This
2871
   is not possible if X contains the address of a symbol that is
2872
   not constant (TLS) or not known at final link time (PIC).  */
2873
 
2874
static bool
2875
sparc_cannot_force_const_mem (rtx x)
2876
{
2877
  switch (GET_CODE (x))
2878
    {
2879
    case CONST_INT:
2880
    case CONST_DOUBLE:
2881
    case CONST_VECTOR:
2882
      /* Accept all non-symbolic constants.  */
2883
      return false;
2884
 
2885
    case LABEL_REF:
2886
      /* Labels are OK iff we are non-PIC.  */
2887
      return flag_pic != 0;
2888
 
2889
    case SYMBOL_REF:
2890
      /* 'Naked' TLS symbol references are never OK,
2891
         non-TLS symbols are OK iff we are non-PIC.  */
2892
      if (SYMBOL_REF_TLS_MODEL (x))
2893
        return true;
2894
      else
2895
        return flag_pic != 0;
2896
 
2897
    case CONST:
2898
      return sparc_cannot_force_const_mem (XEXP (x, 0));
2899
    case PLUS:
2900
    case MINUS:
2901
      return sparc_cannot_force_const_mem (XEXP (x, 0))
2902
         || sparc_cannot_force_const_mem (XEXP (x, 1));
2903
    case UNSPEC:
2904
      return true;
2905
    default:
2906
      gcc_unreachable ();
2907
    }
2908
}
2909
 
2910
/* PIC support.  */
2911
static GTY(()) bool pic_helper_needed = false;
2912
static GTY(()) rtx pic_helper_symbol;
2913
static GTY(()) rtx global_offset_table;
2914
 
2915
/* Ensure that we are not using patterns that are not OK with PIC.  */
2916
 
2917
int
2918
check_pic (int i)
2919
{
2920
  switch (flag_pic)
2921
    {
2922
    case 1:
2923
      gcc_assert (GET_CODE (recog_data.operand[i]) != SYMBOL_REF
2924
                  && (GET_CODE (recog_data.operand[i]) != CONST
2925
                  || (GET_CODE (XEXP (recog_data.operand[i], 0)) == MINUS
2926
                      && (XEXP (XEXP (recog_data.operand[i], 0), 0)
2927
                          == global_offset_table)
2928
                      && (GET_CODE (XEXP (XEXP (recog_data.operand[i], 0), 1))
2929
                          == CONST))));
2930
    case 2:
2931
    default:
2932
      return 1;
2933
    }
2934
}
2935
 
2936
/* Return true if X is an address which needs a temporary register when
2937
   reloaded while generating PIC code.  */
2938
 
2939
int
2940
pic_address_needs_scratch (rtx x)
2941
{
2942
  /* An address which is a symbolic plus a non SMALL_INT needs a temp reg.  */
2943
  if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
2944
      && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
2945
      && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2946
      && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
2947
    return 1;
2948
 
2949
  return 0;
2950
}
2951
 
2952
/* Determine if a given RTX is a valid constant.  We already know this
2953
   satisfies CONSTANT_P.  */
2954
 
2955
bool
2956
legitimate_constant_p (rtx x)
2957
{
2958
  switch (GET_CODE (x))
2959
    {
2960
    case CONST:
2961
    case SYMBOL_REF:
2962
      if (sparc_tls_referenced_p (x))
2963
        return false;
2964
      break;
2965
 
2966
    case CONST_DOUBLE:
2967
      if (GET_MODE (x) == VOIDmode)
2968
        return true;
2969
 
2970
      /* Floating point constants are generally not ok.
2971
         The only exception is 0.0 in VIS.  */
2972
      if (TARGET_VIS
2973
          && SCALAR_FLOAT_MODE_P (GET_MODE (x))
2974
          && const_zero_operand (x, GET_MODE (x)))
2975
        return true;
2976
 
2977
      return false;
2978
 
2979
    case CONST_VECTOR:
2980
      /* Vector constants are generally not ok.
2981
         The only exception is 0 in VIS.  */
2982
      if (TARGET_VIS
2983
          && const_zero_operand (x, GET_MODE (x)))
2984
        return true;
2985
 
2986
      return false;
2987
 
2988
    default:
2989
      break;
2990
    }
2991
 
2992
  return true;
2993
}
2994
 
2995
/* Determine if a given RTX is a valid constant address.  */
2996
 
2997
bool
2998
constant_address_p (rtx x)
2999
{
3000
  switch (GET_CODE (x))
3001
    {
3002
    case LABEL_REF:
3003
    case CONST_INT:
3004
    case HIGH:
3005
      return true;
3006
 
3007
    case CONST:
3008
      if (flag_pic && pic_address_needs_scratch (x))
3009
        return false;
3010
      return legitimate_constant_p (x);
3011
 
3012
    case SYMBOL_REF:
3013
      return !flag_pic && legitimate_constant_p (x);
3014
 
3015
    default:
3016
      return false;
3017
    }
3018
}
3019
 
3020
/* Nonzero if the constant value X is a legitimate general operand
3021
   when generating PIC code.  It is given that flag_pic is on and
3022
   that X satisfies CONSTANT_P or is a CONST_DOUBLE.  */
3023
 
3024
bool
3025
legitimate_pic_operand_p (rtx x)
3026
{
3027
  if (pic_address_needs_scratch (x))
3028
    return false;
3029
  if (sparc_tls_referenced_p (x))
3030
    return false;
3031
  return true;
3032
}
3033
 
3034
/* Return nonzero if ADDR is a valid memory address.
3035
   STRICT specifies whether strict register checking applies.  */
3036
 
3037
static bool
3038
sparc_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
3039
{
3040
  rtx rs1 = NULL, rs2 = NULL, imm1 = NULL;
3041
 
3042
  if (REG_P (addr) || GET_CODE (addr) == SUBREG)
3043
    rs1 = addr;
3044
  else if (GET_CODE (addr) == PLUS)
3045
    {
3046
      rs1 = XEXP (addr, 0);
3047
      rs2 = XEXP (addr, 1);
3048
 
3049
      /* Canonicalize.  REG comes first, if there are no regs,
3050
         LO_SUM comes first.  */
3051
      if (!REG_P (rs1)
3052
          && GET_CODE (rs1) != SUBREG
3053
          && (REG_P (rs2)
3054
              || GET_CODE (rs2) == SUBREG
3055
              || (GET_CODE (rs2) == LO_SUM && GET_CODE (rs1) != LO_SUM)))
3056
        {
3057
          rs1 = XEXP (addr, 1);
3058
          rs2 = XEXP (addr, 0);
3059
        }
3060
 
3061
      if ((flag_pic == 1
3062
           && rs1 == pic_offset_table_rtx
3063
           && !REG_P (rs2)
3064
           && GET_CODE (rs2) != SUBREG
3065
           && GET_CODE (rs2) != LO_SUM
3066
           && GET_CODE (rs2) != MEM
3067
           && !(GET_CODE (rs2) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs2))
3068
           && (! symbolic_operand (rs2, VOIDmode) || mode == Pmode)
3069
           && (GET_CODE (rs2) != CONST_INT || SMALL_INT (rs2)))
3070
          || ((REG_P (rs1)
3071
               || GET_CODE (rs1) == SUBREG)
3072
              && RTX_OK_FOR_OFFSET_P (rs2)))
3073
        {
3074
          imm1 = rs2;
3075
          rs2 = NULL;
3076
        }
3077
      else if ((REG_P (rs1) || GET_CODE (rs1) == SUBREG)
3078
               && (REG_P (rs2) || GET_CODE (rs2) == SUBREG))
3079
        {
3080
          /* We prohibit REG + REG for TFmode when there are no quad move insns
3081
             and we consequently need to split.  We do this because REG+REG
3082
             is not an offsettable address.  If we get the situation in reload
3083
             where source and destination of a movtf pattern are both MEMs with
3084
             REG+REG address, then only one of them gets converted to an
3085
             offsettable address.  */
3086
          if (mode == TFmode
3087
              && ! (TARGET_FPU && TARGET_ARCH64 && TARGET_HARD_QUAD))
3088
            return 0;
3089
 
3090
          /* We prohibit REG + REG on ARCH32 if not optimizing for
3091
             DFmode/DImode because then mem_min_alignment is likely to be zero
3092
             after reload and the  forced split would lack a matching splitter
3093
             pattern.  */
3094
          if (TARGET_ARCH32 && !optimize
3095
              && (mode == DFmode || mode == DImode))
3096
            return 0;
3097
        }
3098
      else if (USE_AS_OFFSETABLE_LO10
3099
               && GET_CODE (rs1) == LO_SUM
3100
               && TARGET_ARCH64
3101
               && ! TARGET_CM_MEDMID
3102
               && RTX_OK_FOR_OLO10_P (rs2))
3103
        {
3104
          rs2 = NULL;
3105
          imm1 = XEXP (rs1, 1);
3106
          rs1 = XEXP (rs1, 0);
3107
          if (!CONSTANT_P (imm1)
3108
              || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3109
            return 0;
3110
        }
3111
    }
3112
  else if (GET_CODE (addr) == LO_SUM)
3113
    {
3114
      rs1 = XEXP (addr, 0);
3115
      imm1 = XEXP (addr, 1);
3116
 
3117
      if (!CONSTANT_P (imm1)
3118
          || (GET_CODE (rs1) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (rs1)))
3119
        return 0;
3120
 
3121
      /* We can't allow TFmode in 32-bit mode, because an offset greater
3122
         than the alignment (8) may cause the LO_SUM to overflow.  */
3123
      if (mode == TFmode && TARGET_ARCH32)
3124
        return 0;
3125
    }
3126
  else if (GET_CODE (addr) == CONST_INT && SMALL_INT (addr))
3127
    return 1;
3128
  else
3129
    return 0;
3130
 
3131
  if (GET_CODE (rs1) == SUBREG)
3132
    rs1 = SUBREG_REG (rs1);
3133
  if (!REG_P (rs1))
3134
    return 0;
3135
 
3136
  if (rs2)
3137
    {
3138
      if (GET_CODE (rs2) == SUBREG)
3139
        rs2 = SUBREG_REG (rs2);
3140
      if (!REG_P (rs2))
3141
        return 0;
3142
    }
3143
 
3144
  if (strict)
3145
    {
3146
      if (!REGNO_OK_FOR_BASE_P (REGNO (rs1))
3147
          || (rs2 && !REGNO_OK_FOR_BASE_P (REGNO (rs2))))
3148
        return 0;
3149
    }
3150
  else
3151
    {
3152
      if ((REGNO (rs1) >= 32
3153
           && REGNO (rs1) != FRAME_POINTER_REGNUM
3154
           && REGNO (rs1) < FIRST_PSEUDO_REGISTER)
3155
          || (rs2
3156
              && (REGNO (rs2) >= 32
3157
                  && REGNO (rs2) != FRAME_POINTER_REGNUM
3158
                  && REGNO (rs2) < FIRST_PSEUDO_REGISTER)))
3159
        return 0;
3160
    }
3161
  return 1;
3162
}
3163
 
3164
/* Construct the SYMBOL_REF for the tls_get_offset function.  */
3165
 
3166
static GTY(()) rtx sparc_tls_symbol;
3167
 
3168
static rtx
3169
sparc_tls_get_addr (void)
3170
{
3171
  if (!sparc_tls_symbol)
3172
    sparc_tls_symbol = gen_rtx_SYMBOL_REF (Pmode, "__tls_get_addr");
3173
 
3174
  return sparc_tls_symbol;
3175
}
3176
 
3177
static rtx
3178
sparc_tls_got (void)
3179
{
3180
  rtx temp;
3181
  if (flag_pic)
3182
    {
3183
      crtl->uses_pic_offset_table = 1;
3184
      return pic_offset_table_rtx;
3185
    }
3186
 
3187
  if (!global_offset_table)
3188
    global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3189
  temp = gen_reg_rtx (Pmode);
3190
  emit_move_insn (temp, global_offset_table);
3191
  return temp;
3192
}
3193
 
3194
/* Return true if X contains a thread-local symbol.  */
3195
 
3196
static bool
3197
sparc_tls_referenced_p (rtx x)
3198
{
3199
  if (!TARGET_HAVE_TLS)
3200
    return false;
3201
 
3202
  if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3203
    x = XEXP (XEXP (x, 0), 0);
3204
 
3205
  if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x))
3206
    return true;
3207
 
3208
  /* That's all we handle in legitimize_tls_address for now.  */
3209
  return false;
3210
}
3211
 
3212
/* ADDR contains a thread-local SYMBOL_REF.  Generate code to compute
3213
   this (thread-local) address.  */
3214
 
3215
static rtx
3216
legitimize_tls_address (rtx addr)
3217
{
3218
  rtx temp1, temp2, temp3, ret, o0, got, insn;
3219
 
3220
  gcc_assert (can_create_pseudo_p ());
3221
 
3222
  if (GET_CODE (addr) == SYMBOL_REF)
3223
    switch (SYMBOL_REF_TLS_MODEL (addr))
3224
      {
3225
      case TLS_MODEL_GLOBAL_DYNAMIC:
3226
        start_sequence ();
3227
        temp1 = gen_reg_rtx (SImode);
3228
        temp2 = gen_reg_rtx (SImode);
3229
        ret = gen_reg_rtx (Pmode);
3230
        o0 = gen_rtx_REG (Pmode, 8);
3231
        got = sparc_tls_got ();
3232
        emit_insn (gen_tgd_hi22 (temp1, addr));
3233
        emit_insn (gen_tgd_lo10 (temp2, temp1, addr));
3234
        if (TARGET_ARCH32)
3235
          {
3236
            emit_insn (gen_tgd_add32 (o0, got, temp2, addr));
3237
            insn = emit_call_insn (gen_tgd_call32 (o0, sparc_tls_get_addr (),
3238
                                                   addr, const1_rtx));
3239
          }
3240
        else
3241
          {
3242
            emit_insn (gen_tgd_add64 (o0, got, temp2, addr));
3243
            insn = emit_call_insn (gen_tgd_call64 (o0, sparc_tls_get_addr (),
3244
                                                   addr, const1_rtx));
3245
          }
3246
        CALL_INSN_FUNCTION_USAGE (insn)
3247
          = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3248
                               CALL_INSN_FUNCTION_USAGE (insn));
3249
        insn = get_insns ();
3250
        end_sequence ();
3251
        emit_libcall_block (insn, ret, o0, addr);
3252
        break;
3253
 
3254
      case TLS_MODEL_LOCAL_DYNAMIC:
3255
        start_sequence ();
3256
        temp1 = gen_reg_rtx (SImode);
3257
        temp2 = gen_reg_rtx (SImode);
3258
        temp3 = gen_reg_rtx (Pmode);
3259
        ret = gen_reg_rtx (Pmode);
3260
        o0 = gen_rtx_REG (Pmode, 8);
3261
        got = sparc_tls_got ();
3262
        emit_insn (gen_tldm_hi22 (temp1));
3263
        emit_insn (gen_tldm_lo10 (temp2, temp1));
3264
        if (TARGET_ARCH32)
3265
          {
3266
            emit_insn (gen_tldm_add32 (o0, got, temp2));
3267
            insn = emit_call_insn (gen_tldm_call32 (o0, sparc_tls_get_addr (),
3268
                                                    const1_rtx));
3269
          }
3270
        else
3271
          {
3272
            emit_insn (gen_tldm_add64 (o0, got, temp2));
3273
            insn = emit_call_insn (gen_tldm_call64 (o0, sparc_tls_get_addr (),
3274
                                                    const1_rtx));
3275
          }
3276
        CALL_INSN_FUNCTION_USAGE (insn)
3277
          = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_USE (VOIDmode, o0),
3278
                               CALL_INSN_FUNCTION_USAGE (insn));
3279
        insn = get_insns ();
3280
        end_sequence ();
3281
        emit_libcall_block (insn, temp3, o0,
3282
                            gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
3283
                                            UNSPEC_TLSLD_BASE));
3284
        temp1 = gen_reg_rtx (SImode);
3285
        temp2 = gen_reg_rtx (SImode);
3286
        emit_insn (gen_tldo_hix22 (temp1, addr));
3287
        emit_insn (gen_tldo_lox10 (temp2, temp1, addr));
3288
        if (TARGET_ARCH32)
3289
          emit_insn (gen_tldo_add32 (ret, temp3, temp2, addr));
3290
        else
3291
          emit_insn (gen_tldo_add64 (ret, temp3, temp2, addr));
3292
        break;
3293
 
3294
      case TLS_MODEL_INITIAL_EXEC:
3295
        temp1 = gen_reg_rtx (SImode);
3296
        temp2 = gen_reg_rtx (SImode);
3297
        temp3 = gen_reg_rtx (Pmode);
3298
        got = sparc_tls_got ();
3299
        emit_insn (gen_tie_hi22 (temp1, addr));
3300
        emit_insn (gen_tie_lo10 (temp2, temp1, addr));
3301
        if (TARGET_ARCH32)
3302
          emit_insn (gen_tie_ld32 (temp3, got, temp2, addr));
3303
        else
3304
          emit_insn (gen_tie_ld64 (temp3, got, temp2, addr));
3305
        if (TARGET_SUN_TLS)
3306
          {
3307
            ret = gen_reg_rtx (Pmode);
3308
            if (TARGET_ARCH32)
3309
              emit_insn (gen_tie_add32 (ret, gen_rtx_REG (Pmode, 7),
3310
                                        temp3, addr));
3311
            else
3312
              emit_insn (gen_tie_add64 (ret, gen_rtx_REG (Pmode, 7),
3313
                                        temp3, addr));
3314
          }
3315
        else
3316
          ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp3);
3317
        break;
3318
 
3319
      case TLS_MODEL_LOCAL_EXEC:
3320
        temp1 = gen_reg_rtx (Pmode);
3321
        temp2 = gen_reg_rtx (Pmode);
3322
        if (TARGET_ARCH32)
3323
          {
3324
            emit_insn (gen_tle_hix22_sp32 (temp1, addr));
3325
            emit_insn (gen_tle_lox10_sp32 (temp2, temp1, addr));
3326
          }
3327
        else
3328
          {
3329
            emit_insn (gen_tle_hix22_sp64 (temp1, addr));
3330
            emit_insn (gen_tle_lox10_sp64 (temp2, temp1, addr));
3331
          }
3332
        ret = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, 7), temp2);
3333
        break;
3334
 
3335
      default:
3336
        gcc_unreachable ();
3337
      }
3338
 
3339
  else if (GET_CODE (addr) == CONST)
3340
    {
3341
      rtx base, offset;
3342
 
3343
      gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS);
3344
 
3345
      base = legitimize_tls_address (XEXP (XEXP (addr, 0), 0));
3346
      offset = XEXP (XEXP (addr, 0), 1);
3347
 
3348
      base = force_operand (base, NULL_RTX);
3349
      if (!(GET_CODE (offset) == CONST_INT && SMALL_INT (offset)))
3350
        offset = force_reg (Pmode, offset);
3351
      ret = gen_rtx_PLUS (Pmode, base, offset);
3352
    }
3353
 
3354
  else
3355
    gcc_unreachable ();  /* for now ... */
3356
 
3357
  return ret;
3358
}
3359
 
3360
/* Legitimize PIC addresses.  If the address is already position-independent,
3361
   we return ORIG.  Newly generated position-independent addresses go into a
3362
   reg.  This is REG if nonzero, otherwise we allocate register(s) as
3363
   necessary.  */
3364
 
3365
static rtx
3366
legitimize_pic_address (rtx orig, rtx reg)
3367
{
3368
  bool gotdata_op = false;
3369
 
3370
  if (GET_CODE (orig) == SYMBOL_REF
3371
      /* See the comment in sparc_expand_move.  */
3372
      || (TARGET_VXWORKS_RTP && GET_CODE (orig) == LABEL_REF))
3373
    {
3374
      rtx pic_ref, address;
3375
      rtx insn;
3376
 
3377
      if (reg == 0)
3378
        {
3379
          gcc_assert (! reload_in_progress && ! reload_completed);
3380
          reg = gen_reg_rtx (Pmode);
3381
        }
3382
 
3383
      if (flag_pic == 2)
3384
        {
3385
          /* If not during reload, allocate another temp reg here for loading
3386
             in the address, so that these instructions can be optimized
3387
             properly.  */
3388
          rtx temp_reg = ((reload_in_progress || reload_completed)
3389
                          ? reg : gen_reg_rtx (Pmode));
3390
 
3391
          /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
3392
             won't get confused into thinking that these two instructions
3393
             are loading in the true address of the symbol.  If in the
3394
             future a PIC rtx exists, that should be used instead.  */
3395
          if (TARGET_ARCH64)
3396
            {
3397
              emit_insn (gen_movdi_high_pic (temp_reg, orig));
3398
              emit_insn (gen_movdi_lo_sum_pic (temp_reg, temp_reg, orig));
3399
            }
3400
          else
3401
            {
3402
              emit_insn (gen_movsi_high_pic (temp_reg, orig));
3403
              emit_insn (gen_movsi_lo_sum_pic (temp_reg, temp_reg, orig));
3404
            }
3405
          address = temp_reg;
3406
          gotdata_op = true;
3407
        }
3408
      else
3409
        address = orig;
3410
 
3411
      crtl->uses_pic_offset_table = 1;
3412
      if (gotdata_op)
3413
        {
3414
          if (TARGET_ARCH64)
3415
            insn = emit_insn (gen_movdi_pic_gotdata_op (reg, pic_offset_table_rtx,
3416
                                                        address, orig));
3417
          else
3418
            insn = emit_insn (gen_movsi_pic_gotdata_op (reg, pic_offset_table_rtx,
3419
                                                        address, orig));
3420
        }
3421
      else
3422
        {
3423
          pic_ref = gen_const_mem (Pmode,
3424
                                   gen_rtx_PLUS (Pmode,
3425
                                                 pic_offset_table_rtx, address));
3426
          insn = emit_move_insn (reg, pic_ref);
3427
        }
3428
      /* Put a REG_EQUAL note on this insn, so that it can be optimized
3429
         by loop.  */
3430
      set_unique_reg_note (insn, REG_EQUAL, orig);
3431
      return reg;
3432
    }
3433
  else if (GET_CODE (orig) == CONST)
3434
    {
3435
      rtx base, offset;
3436
 
3437
      if (GET_CODE (XEXP (orig, 0)) == PLUS
3438
          && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
3439
        return orig;
3440
 
3441
      if (reg == 0)
3442
        {
3443
          gcc_assert (! reload_in_progress && ! reload_completed);
3444
          reg = gen_reg_rtx (Pmode);
3445
        }
3446
 
3447
      gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3448
      base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), reg);
3449
      offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
3450
                                       base == reg ? NULL_RTX : reg);
3451
 
3452
      if (GET_CODE (offset) == CONST_INT)
3453
        {
3454
          if (SMALL_INT (offset))
3455
            return plus_constant (base, INTVAL (offset));
3456
          else if (! reload_in_progress && ! reload_completed)
3457
            offset = force_reg (Pmode, offset);
3458
          else
3459
            /* If we reach here, then something is seriously wrong.  */
3460
            gcc_unreachable ();
3461
        }
3462
      return gen_rtx_PLUS (Pmode, base, offset);
3463
    }
3464
  else if (GET_CODE (orig) == LABEL_REF)
3465
    /* ??? Why do we do this?  */
3466
    /* Now movsi_pic_label_ref uses it, but we ought to be checking that
3467
       the register is live instead, in case it is eliminated.  */
3468
    crtl->uses_pic_offset_table = 1;
3469
 
3470
  return orig;
3471
}
3472
 
3473
/* Try machine-dependent ways of modifying an illegitimate address X
3474
   to be legitimate.  If we find one, return the new, valid address.
3475
 
3476
   OLDX is the address as it was before break_out_memory_refs was called.
3477
   In some cases it is useful to look at this to decide what needs to be done.
3478
 
3479
   MODE is the mode of the operand pointed to by X.
3480
 
3481
   On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG.  */
3482
 
3483
static rtx
3484
sparc_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
3485
                          enum machine_mode mode)
3486
{
3487
  rtx orig_x = x;
3488
 
3489
  if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT)
3490
    x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3491
                      force_operand (XEXP (x, 0), NULL_RTX));
3492
  if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == MULT)
3493
    x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3494
                      force_operand (XEXP (x, 1), NULL_RTX));
3495
  if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS)
3496
    x = gen_rtx_PLUS (Pmode, force_operand (XEXP (x, 0), NULL_RTX),
3497
                      XEXP (x, 1));
3498
  if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == PLUS)
3499
    x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3500
                      force_operand (XEXP (x, 1), NULL_RTX));
3501
 
3502
  if (x != orig_x && sparc_legitimate_address_p (mode, x, FALSE))
3503
    return x;
3504
 
3505
  if (sparc_tls_referenced_p (x))
3506
    x = legitimize_tls_address (x);
3507
  else if (flag_pic)
3508
    x = legitimize_pic_address (x, NULL_RTX);
3509
  else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 1)))
3510
    x = gen_rtx_PLUS (Pmode, XEXP (x, 0),
3511
                      copy_to_mode_reg (Pmode, XEXP (x, 1)));
3512
  else if (GET_CODE (x) == PLUS && CONSTANT_ADDRESS_P (XEXP (x, 0)))
3513
    x = gen_rtx_PLUS (Pmode, XEXP (x, 1),
3514
                      copy_to_mode_reg (Pmode, XEXP (x, 0)));
3515
  else if (GET_CODE (x) == SYMBOL_REF
3516
           || GET_CODE (x) == CONST
3517
           || GET_CODE (x) == LABEL_REF)
3518
    x = copy_to_suggested_reg (x, NULL_RTX, Pmode);
3519
 
3520
  return x;
3521
}
3522
 
3523
#ifdef HAVE_GAS_HIDDEN
3524
# define USE_HIDDEN_LINKONCE 1
3525
#else
3526
# define USE_HIDDEN_LINKONCE 0
3527
#endif
3528
 
3529
static void
3530
get_pc_thunk_name (char name[32], unsigned int regno)
3531
{
3532
  const char *pic_name = reg_names[regno];
3533
 
3534
  /* Skip the leading '%' as that cannot be used in a
3535
     symbol name.  */
3536
  pic_name += 1;
3537
 
3538
  if (USE_HIDDEN_LINKONCE)
3539
    sprintf (name, "__sparc_get_pc_thunk.%s", pic_name);
3540
  else
3541
    ASM_GENERATE_INTERNAL_LABEL (name, "LADDPC", regno);
3542
}
3543
 
3544
/* Emit code to load the PIC register.  */
3545
 
3546
static void
3547
load_pic_register (void)
3548
{
3549
  int orig_flag_pic = flag_pic;
3550
 
3551
  if (TARGET_VXWORKS_RTP)
3552
    {
3553
      emit_insn (gen_vxworks_load_got ());
3554
      emit_use (pic_offset_table_rtx);
3555
      return;
3556
    }
3557
 
3558
  /* If we haven't initialized the special PIC symbols, do so now.  */
3559
  if (!pic_helper_needed)
3560
    {
3561
      char name[32];
3562
 
3563
      pic_helper_needed = true;
3564
 
3565
      get_pc_thunk_name (name, REGNO (pic_offset_table_rtx));
3566
      pic_helper_symbol = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
3567
 
3568
      global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3569
    }
3570
 
3571
  flag_pic = 0;
3572
  if (TARGET_ARCH64)
3573
    emit_insn (gen_load_pcrel_symdi (pic_offset_table_rtx, global_offset_table,
3574
                                     pic_helper_symbol));
3575
  else
3576
    emit_insn (gen_load_pcrel_symsi (pic_offset_table_rtx, global_offset_table,
3577
                                     pic_helper_symbol));
3578
  flag_pic = orig_flag_pic;
3579
 
3580
  /* Need to emit this whether or not we obey regdecls,
3581
     since setjmp/longjmp can cause life info to screw up.
3582
     ??? In the case where we don't obey regdecls, this is not sufficient
3583
     since we may not fall out the bottom.  */
3584
  emit_use (pic_offset_table_rtx);
3585
}
3586
 
3587
/* Emit a call instruction with the pattern given by PAT.  ADDR is the
3588
   address of the call target.  */
3589
 
3590
void
3591
sparc_emit_call_insn (rtx pat, rtx addr)
3592
{
3593
  rtx insn;
3594
 
3595
  insn = emit_call_insn (pat);
3596
 
3597
  /* The PIC register is live on entry to VxWorks PIC PLT entries.  */
3598
  if (TARGET_VXWORKS_RTP
3599
      && flag_pic
3600
      && GET_CODE (addr) == SYMBOL_REF
3601
      && (SYMBOL_REF_DECL (addr)
3602
          ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
3603
          : !SYMBOL_REF_LOCAL_P (addr)))
3604
    {
3605
      use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3606
      crtl->uses_pic_offset_table = 1;
3607
    }
3608
}
3609
 
3610
/* Return 1 if RTX is a MEM which is known to be aligned to at
3611
   least a DESIRED byte boundary.  */
3612
 
3613
int
3614
mem_min_alignment (rtx mem, int desired)
3615
{
3616
  rtx addr, base, offset;
3617
 
3618
  /* If it's not a MEM we can't accept it.  */
3619
  if (GET_CODE (mem) != MEM)
3620
    return 0;
3621
 
3622
  /* Obviously...  */
3623
  if (!TARGET_UNALIGNED_DOUBLES
3624
      && MEM_ALIGN (mem) / BITS_PER_UNIT >= (unsigned)desired)
3625
    return 1;
3626
 
3627
  /* ??? The rest of the function predates MEM_ALIGN so
3628
     there is probably a bit of redundancy.  */
3629
  addr = XEXP (mem, 0);
3630
  base = offset = NULL_RTX;
3631
  if (GET_CODE (addr) == PLUS)
3632
    {
3633
      if (GET_CODE (XEXP (addr, 0)) == REG)
3634
        {
3635
          base = XEXP (addr, 0);
3636
 
3637
          /* What we are saying here is that if the base
3638
             REG is aligned properly, the compiler will make
3639
             sure any REG based index upon it will be so
3640
             as well.  */
3641
          if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
3642
            offset = XEXP (addr, 1);
3643
          else
3644
            offset = const0_rtx;
3645
        }
3646
    }
3647
  else if (GET_CODE (addr) == REG)
3648
    {
3649
      base = addr;
3650
      offset = const0_rtx;
3651
    }
3652
 
3653
  if (base != NULL_RTX)
3654
    {
3655
      int regno = REGNO (base);
3656
 
3657
      if (regno != HARD_FRAME_POINTER_REGNUM && regno != STACK_POINTER_REGNUM)
3658
        {
3659
          /* Check if the compiler has recorded some information
3660
             about the alignment of the base REG.  If reload has
3661
             completed, we already matched with proper alignments.
3662
             If not running global_alloc, reload might give us
3663
             unaligned pointer to local stack though.  */
3664
          if (((cfun != 0
3665
                && REGNO_POINTER_ALIGN (regno) >= desired * BITS_PER_UNIT)
3666
               || (optimize && reload_completed))
3667
              && (INTVAL (offset) & (desired - 1)) == 0)
3668
            return 1;
3669
        }
3670
      else
3671
        {
3672
          if (((INTVAL (offset) - SPARC_STACK_BIAS) & (desired - 1)) == 0)
3673
            return 1;
3674
        }
3675
    }
3676
  else if (! TARGET_UNALIGNED_DOUBLES
3677
           || CONSTANT_P (addr)
3678
           || GET_CODE (addr) == LO_SUM)
3679
    {
3680
      /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
3681
         is true, in which case we can only assume that an access is aligned if
3682
         it is to a constant address, or the address involves a LO_SUM.  */
3683
      return 1;
3684
    }
3685
 
3686
  /* An obviously unaligned address.  */
3687
  return 0;
3688
}
3689
 
3690
 
3691
/* Vectors to keep interesting information about registers where it can easily
3692
   be got.  We used to use the actual mode value as the bit number, but there
3693
   are more than 32 modes now.  Instead we use two tables: one indexed by
3694
   hard register number, and one indexed by mode.  */
3695
 
3696
/* The purpose of sparc_mode_class is to shrink the range of modes so that
3697
   they all fit (as bit numbers) in a 32-bit word (again).  Each real mode is
3698
   mapped into one sparc_mode_class mode.  */
3699
 
3700
enum sparc_mode_class {
3701
  S_MODE, D_MODE, T_MODE, O_MODE,
3702
  SF_MODE, DF_MODE, TF_MODE, OF_MODE,
3703
  CC_MODE, CCFP_MODE
3704
};
3705
 
3706
/* Modes for single-word and smaller quantities.  */
3707
#define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
3708
 
3709
/* Modes for double-word and smaller quantities.  */
3710
#define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
3711
 
3712
/* Modes for quad-word and smaller quantities.  */
3713
#define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
3714
 
3715
/* Modes for 8-word and smaller quantities.  */
3716
#define O_MODES (T_MODES | (1 << (int) O_MODE) | (1 << (int) OF_MODE))
3717
 
3718
/* Modes for single-float quantities.  We must allow any single word or
3719
   smaller quantity.  This is because the fix/float conversion instructions
3720
   take integer inputs/outputs from the float registers.  */
3721
#define SF_MODES (S_MODES)
3722
 
3723
/* Modes for double-float and smaller quantities.  */
3724
#define DF_MODES (D_MODES)
3725
 
3726
/* Modes for quad-float and smaller quantities.  */
3727
#define TF_MODES (DF_MODES | (1 << (int) TF_MODE))
3728
 
3729
/* Modes for quad-float pairs and smaller quantities.  */
3730
#define OF_MODES (TF_MODES | (1 << (int) OF_MODE))
3731
 
3732
/* Modes for double-float only quantities.  */
3733
#define DF_MODES_NO_S ((1 << (int) D_MODE) | (1 << (int) DF_MODE))
3734
 
3735
/* Modes for quad-float and double-float only quantities.  */
3736
#define TF_MODES_NO_S (DF_MODES_NO_S | (1 << (int) TF_MODE))
3737
 
3738
/* Modes for quad-float pairs and double-float only quantities.  */
3739
#define OF_MODES_NO_S (TF_MODES_NO_S | (1 << (int) OF_MODE))
3740
 
3741
/* Modes for condition codes.  */
3742
#define CC_MODES (1 << (int) CC_MODE)
3743
#define CCFP_MODES (1 << (int) CCFP_MODE)
3744
 
3745
/* Value is 1 if register/mode pair is acceptable on sparc.
3746
   The funny mixture of D and T modes is because integer operations
3747
   do not specially operate on tetra quantities, so non-quad-aligned
3748
   registers can hold quadword quantities (except %o4 and %i4 because
3749
   they cross fixed registers).  */
3750
 
3751
/* This points to either the 32 bit or the 64 bit version.  */
3752
const int *hard_regno_mode_classes;
3753
 
3754
static const int hard_32bit_mode_classes[] = {
3755
  S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3756
  T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3757
  T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
3758
  T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
3759
 
3760
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3761
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3762
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3763
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3764
 
3765
  /* FP regs f32 to f63.  Only the even numbered registers actually exist,
3766
     and none can hold SFmode/SImode values.  */
3767
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3768
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3769
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3770
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3771
 
3772
  /* %fcc[0123] */
3773
  CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3774
 
3775
  /* %icc */
3776
  CC_MODES
3777
};
3778
 
3779
static const int hard_64bit_mode_classes[] = {
3780
  D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3781
  O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3782
  T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3783
  O_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
3784
 
3785
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3786
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3787
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, OF_MODES, SF_MODES, DF_MODES, SF_MODES,
3788
  OF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
3789
 
3790
  /* FP regs f32 to f63.  Only the even numbered registers actually exist,
3791
     and none can hold SFmode/SImode values.  */
3792
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3793
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3794
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, OF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3795
  OF_MODES_NO_S, 0, DF_MODES_NO_S, 0, TF_MODES_NO_S, 0, DF_MODES_NO_S, 0,
3796
 
3797
  /* %fcc[0123] */
3798
  CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
3799
 
3800
  /* %icc */
3801
  CC_MODES
3802
};
3803
 
3804
int sparc_mode_class [NUM_MACHINE_MODES];
3805
 
3806
enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
3807
 
3808
static void
3809
sparc_init_modes (void)
3810
{
3811
  int i;
3812
 
3813
  for (i = 0; i < NUM_MACHINE_MODES; i++)
3814
    {
3815
      switch (GET_MODE_CLASS (i))
3816
        {
3817
        case MODE_INT:
3818
        case MODE_PARTIAL_INT:
3819
        case MODE_COMPLEX_INT:
3820
          if (GET_MODE_SIZE (i) <= 4)
3821
            sparc_mode_class[i] = 1 << (int) S_MODE;
3822
          else if (GET_MODE_SIZE (i) == 8)
3823
            sparc_mode_class[i] = 1 << (int) D_MODE;
3824
          else if (GET_MODE_SIZE (i) == 16)
3825
            sparc_mode_class[i] = 1 << (int) T_MODE;
3826
          else if (GET_MODE_SIZE (i) == 32)
3827
            sparc_mode_class[i] = 1 << (int) O_MODE;
3828
          else
3829
            sparc_mode_class[i] = 0;
3830
          break;
3831
        case MODE_VECTOR_INT:
3832
          if (GET_MODE_SIZE (i) <= 4)
3833
            sparc_mode_class[i] = 1 << (int)SF_MODE;
3834
          else if (GET_MODE_SIZE (i) == 8)
3835
            sparc_mode_class[i] = 1 << (int)DF_MODE;
3836
          break;
3837
        case MODE_FLOAT:
3838
        case MODE_COMPLEX_FLOAT:
3839
          if (GET_MODE_SIZE (i) <= 4)
3840
            sparc_mode_class[i] = 1 << (int) SF_MODE;
3841
          else if (GET_MODE_SIZE (i) == 8)
3842
            sparc_mode_class[i] = 1 << (int) DF_MODE;
3843
          else if (GET_MODE_SIZE (i) == 16)
3844
            sparc_mode_class[i] = 1 << (int) TF_MODE;
3845
          else if (GET_MODE_SIZE (i) == 32)
3846
            sparc_mode_class[i] = 1 << (int) OF_MODE;
3847
          else
3848
            sparc_mode_class[i] = 0;
3849
          break;
3850
        case MODE_CC:
3851
          if (i == (int) CCFPmode || i == (int) CCFPEmode)
3852
            sparc_mode_class[i] = 1 << (int) CCFP_MODE;
3853
          else
3854
            sparc_mode_class[i] = 1 << (int) CC_MODE;
3855
          break;
3856
        default:
3857
          sparc_mode_class[i] = 0;
3858
          break;
3859
        }
3860
    }
3861
 
3862
  if (TARGET_ARCH64)
3863
    hard_regno_mode_classes = hard_64bit_mode_classes;
3864
  else
3865
    hard_regno_mode_classes = hard_32bit_mode_classes;
3866
 
3867
  /* Initialize the array used by REGNO_REG_CLASS.  */
3868
  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3869
    {
3870
      if (i < 16 && TARGET_V8PLUS)
3871
        sparc_regno_reg_class[i] = I64_REGS;
3872
      else if (i < 32 || i == FRAME_POINTER_REGNUM)
3873
        sparc_regno_reg_class[i] = GENERAL_REGS;
3874
      else if (i < 64)
3875
        sparc_regno_reg_class[i] = FP_REGS;
3876
      else if (i < 96)
3877
        sparc_regno_reg_class[i] = EXTRA_FP_REGS;
3878
      else if (i < 100)
3879
        sparc_regno_reg_class[i] = FPCC_REGS;
3880
      else
3881
        sparc_regno_reg_class[i] = NO_REGS;
3882
    }
3883
}
3884
 
3885
/* Compute the frame size required by the function.  This function is called
3886
   during the reload pass and also by sparc_expand_prologue.  */
3887
 
3888
HOST_WIDE_INT
3889
sparc_compute_frame_size (HOST_WIDE_INT size, int leaf_function_p)
3890
{
3891
  int outgoing_args_size = (crtl->outgoing_args_size
3892
                            + REG_PARM_STACK_SPACE (current_function_decl));
3893
  int n_regs = 0;  /* N_REGS is the number of 4-byte regs saved thus far.  */
3894
  int i;
3895
 
3896
  if (TARGET_ARCH64)
3897
    {
3898
      for (i = 0; i < 8; i++)
3899
        if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3900
          n_regs += 2;
3901
    }
3902
  else
3903
    {
3904
      for (i = 0; i < 8; i += 2)
3905
        if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3906
            || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3907
          n_regs += 2;
3908
    }
3909
 
3910
  for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
3911
    if ((df_regs_ever_live_p (i) && ! call_used_regs[i])
3912
        || (df_regs_ever_live_p (i+1) && ! call_used_regs[i+1]))
3913
      n_regs += 2;
3914
 
3915
  /* Set up values for use in prologue and epilogue.  */
3916
  num_gfregs = n_regs;
3917
 
3918
  if (leaf_function_p
3919
      && n_regs == 0
3920
      && size == 0
3921
      && crtl->outgoing_args_size == 0)
3922
    actual_fsize = apparent_fsize = 0;
3923
  else
3924
    {
3925
      /* We subtract STARTING_FRAME_OFFSET, remember it's negative.  */
3926
      apparent_fsize = (size - STARTING_FRAME_OFFSET + 7) & -8;
3927
      apparent_fsize += n_regs * 4;
3928
      actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
3929
    }
3930
 
3931
  /* Make sure nothing can clobber our register windows.
3932
     If a SAVE must be done, or there is a stack-local variable,
3933
     the register window area must be allocated.  */
3934
  if (! leaf_function_p || size > 0)
3935
    actual_fsize += FIRST_PARM_OFFSET (current_function_decl);
3936
 
3937
  return SPARC_STACK_ALIGN (actual_fsize);
3938
}
3939
 
3940
/* Output any necessary .register pseudo-ops.  */
3941
 
3942
void
3943
sparc_output_scratch_registers (FILE *file ATTRIBUTE_UNUSED)
3944
{
3945
#ifdef HAVE_AS_REGISTER_PSEUDO_OP
3946
  int i;
3947
 
3948
  if (TARGET_ARCH32)
3949
    return;
3950
 
3951
  /* Check if %g[2367] were used without
3952
     .register being printed for them already.  */
3953
  for (i = 2; i < 8; i++)
3954
    {
3955
      if (df_regs_ever_live_p (i)
3956
          && ! sparc_hard_reg_printed [i])
3957
        {
3958
          sparc_hard_reg_printed [i] = 1;
3959
          /* %g7 is used as TLS base register, use #ignore
3960
             for it instead of #scratch.  */
3961
          fprintf (file, "\t.register\t%%g%d, #%s\n", i,
3962
                   i == 7 ? "ignore" : "scratch");
3963
        }
3964
      if (i == 3) i = 5;
3965
    }
3966
#endif
3967
}
3968
 
3969
/* Save/restore call-saved registers from LOW to HIGH at BASE+OFFSET
3970
   as needed.  LOW should be double-word aligned for 32-bit registers.
3971
   Return the new OFFSET.  */
3972
 
3973
#define SORR_SAVE    0
3974
#define SORR_RESTORE 1
3975
 
3976
static int
3977
save_or_restore_regs (int low, int high, rtx base, int offset, int action)
3978
{
3979
  rtx mem, insn;
3980
  int i;
3981
 
3982
  if (TARGET_ARCH64 && high <= 32)
3983
    {
3984
      for (i = low; i < high; i++)
3985
        {
3986
          if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3987
            {
3988
              mem = gen_rtx_MEM (DImode, plus_constant (base, offset));
3989
              set_mem_alias_set (mem, sparc_sr_alias_set);
3990
              if (action == SORR_SAVE)
3991
                {
3992
                  insn = emit_move_insn (mem, gen_rtx_REG (DImode, i));
3993
                  RTX_FRAME_RELATED_P (insn) = 1;
3994
                }
3995
              else  /* action == SORR_RESTORE */
3996
                emit_move_insn (gen_rtx_REG (DImode, i), mem);
3997
              offset += 8;
3998
            }
3999
        }
4000
    }
4001
  else
4002
    {
4003
      for (i = low; i < high; i += 2)
4004
        {
4005
          bool reg0 = df_regs_ever_live_p (i) && ! call_used_regs[i];
4006
          bool reg1 = df_regs_ever_live_p (i+1) && ! call_used_regs[i+1];
4007
          enum machine_mode mode;
4008
          int regno;
4009
 
4010
          if (reg0 && reg1)
4011
            {
4012
              mode = i < 32 ? DImode : DFmode;
4013
              regno = i;
4014
            }
4015
          else if (reg0)
4016
            {
4017
              mode = i < 32 ? SImode : SFmode;
4018
              regno = i;
4019
            }
4020
          else if (reg1)
4021
            {
4022
              mode = i < 32 ? SImode : SFmode;
4023
              regno = i + 1;
4024
              offset += 4;
4025
            }
4026
          else
4027
            continue;
4028
 
4029
          mem = gen_rtx_MEM (mode, plus_constant (base, offset));
4030
          set_mem_alias_set (mem, sparc_sr_alias_set);
4031
          if (action == SORR_SAVE)
4032
            {
4033
              insn = emit_move_insn (mem, gen_rtx_REG (mode, regno));
4034
              RTX_FRAME_RELATED_P (insn) = 1;
4035
            }
4036
          else  /* action == SORR_RESTORE */
4037
            emit_move_insn (gen_rtx_REG (mode, regno), mem);
4038
 
4039
          /* Always preserve double-word alignment.  */
4040
          offset = (offset + 7) & -8;
4041
        }
4042
    }
4043
 
4044
  return offset;
4045
}
4046
 
4047
/* Emit code to save call-saved registers.  */
4048
 
4049
static void
4050
emit_save_or_restore_regs (int action)
4051
{
4052
  HOST_WIDE_INT offset;
4053
  rtx base;
4054
 
4055
  offset = frame_base_offset - apparent_fsize;
4056
 
4057
  if (offset < -4096 || offset + num_gfregs * 4 > 4095)
4058
    {
4059
      /* ??? This might be optimized a little as %g1 might already have a
4060
         value close enough that a single add insn will do.  */
4061
      /* ??? Although, all of this is probably only a temporary fix
4062
         because if %g1 can hold a function result, then
4063
         sparc_expand_epilogue will lose (the result will be
4064
         clobbered).  */
4065
      base = gen_rtx_REG (Pmode, 1);
4066
      emit_move_insn (base, GEN_INT (offset));
4067
      emit_insn (gen_rtx_SET (VOIDmode,
4068
                              base,
4069
                              gen_rtx_PLUS (Pmode, frame_base_reg, base)));
4070
      offset = 0;
4071
    }
4072
  else
4073
    base = frame_base_reg;
4074
 
4075
  offset = save_or_restore_regs (0, 8, base, offset, action);
4076
  save_or_restore_regs (32, TARGET_V9 ? 96 : 64, base, offset, action);
4077
}
4078
 
4079
/* Generate a save_register_window insn.  */
4080
 
4081
static rtx
4082
gen_save_register_window (rtx increment)
4083
{
4084
  if (TARGET_ARCH64)
4085
    return gen_save_register_windowdi (increment);
4086
  else
4087
    return gen_save_register_windowsi (increment);
4088
}
4089
 
4090
/* Generate an increment for the stack pointer.  */
4091
 
4092
static rtx
4093
gen_stack_pointer_inc (rtx increment)
4094
{
4095
  return gen_rtx_SET (VOIDmode,
4096
                      stack_pointer_rtx,
4097
                      gen_rtx_PLUS (Pmode,
4098
                                    stack_pointer_rtx,
4099
                                    increment));
4100
}
4101
 
4102
/* Generate a decrement for the stack pointer.  */
4103
 
4104
static rtx
4105
gen_stack_pointer_dec (rtx decrement)
4106
{
4107
  return gen_rtx_SET (VOIDmode,
4108
                      stack_pointer_rtx,
4109
                      gen_rtx_MINUS (Pmode,
4110
                                     stack_pointer_rtx,
4111
                                     decrement));
4112
}
4113
 
4114
/* Expand the function prologue.  The prologue is responsible for reserving
4115
   storage for the frame, saving the call-saved registers and loading the
4116
   PIC register if needed.  */
4117
 
4118
void
4119
sparc_expand_prologue (void)
4120
{
4121
  rtx insn;
4122
  int i;
4123
 
4124
  /* Compute a snapshot of current_function_uses_only_leaf_regs.  Relying
4125
     on the final value of the flag means deferring the prologue/epilogue
4126
     expansion until just before the second scheduling pass, which is too
4127
     late to emit multiple epilogues or return insns.
4128
 
4129
     Of course we are making the assumption that the value of the flag
4130
     will not change between now and its final value.  Of the three parts
4131
     of the formula, only the last one can reasonably vary.  Let's take a
4132
     closer look, after assuming that the first two ones are set to true
4133
     (otherwise the last value is effectively silenced).
4134
 
4135
     If only_leaf_regs_used returns false, the global predicate will also
4136
     be false so the actual frame size calculated below will be positive.
4137
     As a consequence, the save_register_window insn will be emitted in
4138
     the instruction stream; now this insn explicitly references %fp
4139
     which is not a leaf register so only_leaf_regs_used will always
4140
     return false subsequently.
4141
 
4142
     If only_leaf_regs_used returns true, we hope that the subsequent
4143
     optimization passes won't cause non-leaf registers to pop up.  For
4144
     example, the regrename pass has special provisions to not rename to
4145
     non-leaf registers in a leaf function.  */
4146
  sparc_leaf_function_p
4147
    = optimize > 0 && leaf_function_p () && only_leaf_regs_used ();
4148
 
4149
  /* Need to use actual_fsize, since we are also allocating
4150
     space for our callee (and our own register save area).  */
4151
  actual_fsize
4152
    = sparc_compute_frame_size (get_frame_size(), sparc_leaf_function_p);
4153
 
4154
  /* Advertise that the data calculated just above are now valid.  */
4155
  sparc_prologue_data_valid_p = true;
4156
 
4157
  if (sparc_leaf_function_p)
4158
    {
4159
      frame_base_reg = stack_pointer_rtx;
4160
      frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
4161
    }
4162
  else
4163
    {
4164
      frame_base_reg = hard_frame_pointer_rtx;
4165
      frame_base_offset = SPARC_STACK_BIAS;
4166
    }
4167
 
4168
  if (actual_fsize == 0)
4169
    /* do nothing.  */ ;
4170
  else if (sparc_leaf_function_p)
4171
    {
4172
      if (actual_fsize <= 4096)
4173
        insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4174
      else if (actual_fsize <= 8192)
4175
        {
4176
          insn = emit_insn (gen_stack_pointer_inc (GEN_INT (-4096)));
4177
          /* %sp is still the CFA register.  */
4178
          RTX_FRAME_RELATED_P (insn) = 1;
4179
          insn
4180
            = emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4181
        }
4182
      else
4183
        {
4184
          rtx reg = gen_rtx_REG (Pmode, 1);
4185
          emit_move_insn (reg, GEN_INT (-actual_fsize));
4186
          insn = emit_insn (gen_stack_pointer_inc (reg));
4187
          add_reg_note (insn, REG_FRAME_RELATED_EXPR,
4188
                        gen_stack_pointer_inc (GEN_INT (-actual_fsize)));
4189
        }
4190
 
4191
      RTX_FRAME_RELATED_P (insn) = 1;
4192
    }
4193
  else
4194
    {
4195
      if (actual_fsize <= 4096)
4196
        insn = emit_insn (gen_save_register_window (GEN_INT (-actual_fsize)));
4197
      else if (actual_fsize <= 8192)
4198
        {
4199
          insn = emit_insn (gen_save_register_window (GEN_INT (-4096)));
4200
          /* %sp is not the CFA register anymore.  */
4201
          emit_insn (gen_stack_pointer_inc (GEN_INT (4096-actual_fsize)));
4202
        }
4203
      else
4204
        {
4205
          rtx reg = gen_rtx_REG (Pmode, 1);
4206
          emit_move_insn (reg, GEN_INT (-actual_fsize));
4207
          insn = emit_insn (gen_save_register_window (reg));
4208
        }
4209
 
4210
      RTX_FRAME_RELATED_P (insn) = 1;
4211
      for (i=0; i < XVECLEN (PATTERN (insn), 0); i++)
4212
        RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, i)) = 1;
4213
    }
4214
 
4215
  if (num_gfregs)
4216
    emit_save_or_restore_regs (SORR_SAVE);
4217
 
4218
  /* Load the PIC register if needed.  */
4219
  if (flag_pic && crtl->uses_pic_offset_table)
4220
    load_pic_register ();
4221
}
4222
 
4223
/* This function generates the assembly code for function entry, which boils
4224
   down to emitting the necessary .register directives.  */
4225
 
4226
static void
4227
sparc_asm_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4228
{
4229
  /* Check that the assumption we made in sparc_expand_prologue is valid.  */
4230
  gcc_assert (sparc_leaf_function_p == current_function_uses_only_leaf_regs);
4231
 
4232
  sparc_output_scratch_registers (file);
4233
}
4234
 
4235
/* Expand the function epilogue, either normal or part of a sibcall.
4236
   We emit all the instructions except the return or the call.  */
4237
 
4238
void
4239
sparc_expand_epilogue (void)
4240
{
4241
  if (num_gfregs)
4242
    emit_save_or_restore_regs (SORR_RESTORE);
4243
 
4244
  if (actual_fsize == 0)
4245
    /* do nothing.  */ ;
4246
  else if (sparc_leaf_function_p)
4247
    {
4248
      if (actual_fsize <= 4096)
4249
        emit_insn (gen_stack_pointer_dec (GEN_INT (- actual_fsize)));
4250
      else if (actual_fsize <= 8192)
4251
        {
4252
          emit_insn (gen_stack_pointer_dec (GEN_INT (-4096)));
4253
          emit_insn (gen_stack_pointer_dec (GEN_INT (4096 - actual_fsize)));
4254
        }
4255
      else
4256
        {
4257
          rtx reg = gen_rtx_REG (Pmode, 1);
4258
          emit_move_insn (reg, GEN_INT (-actual_fsize));
4259
          emit_insn (gen_stack_pointer_dec (reg));
4260
        }
4261
    }
4262
}
4263
 
4264
/* Return true if it is appropriate to emit `return' instructions in the
4265
   body of a function.  */
4266
 
4267
bool
4268
sparc_can_use_return_insn_p (void)
4269
{
4270
  return sparc_prologue_data_valid_p
4271
         && (actual_fsize == 0 || !sparc_leaf_function_p);
4272
}
4273
 
4274
/* This function generates the assembly code for function exit.  */
4275
 
4276
static void
4277
sparc_asm_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4278
{
4279
  /* If code does not drop into the epilogue, we have to still output
4280
     a dummy nop for the sake of sane backtraces.  Otherwise, if the
4281
     last two instructions of a function were "call foo; dslot;" this
4282
     can make the return PC of foo (i.e. address of call instruction
4283
     plus 8) point to the first instruction in the next function.  */
4284
 
4285
  rtx insn, last_real_insn;
4286
 
4287
  insn = get_last_insn ();
4288
 
4289
  last_real_insn = prev_real_insn (insn);
4290
  if (last_real_insn
4291
      && GET_CODE (last_real_insn) == INSN
4292
      && GET_CODE (PATTERN (last_real_insn)) == SEQUENCE)
4293
    last_real_insn = XVECEXP (PATTERN (last_real_insn), 0, 0);
4294
 
4295
  if (last_real_insn && GET_CODE (last_real_insn) == CALL_INSN)
4296
    fputs("\tnop\n", file);
4297
 
4298
  sparc_output_deferred_case_vectors ();
4299
}
4300
 
4301
/* Output a 'restore' instruction.  */
4302
 
4303
static void
4304
output_restore (rtx pat)
4305
{
4306
  rtx operands[3];
4307
 
4308
  if (! pat)
4309
    {
4310
      fputs ("\t restore\n", asm_out_file);
4311
      return;
4312
    }
4313
 
4314
  gcc_assert (GET_CODE (pat) == SET);
4315
 
4316
  operands[0] = SET_DEST (pat);
4317
  pat = SET_SRC (pat);
4318
 
4319
  switch (GET_CODE (pat))
4320
    {
4321
      case PLUS:
4322
        operands[1] = XEXP (pat, 0);
4323
        operands[2] = XEXP (pat, 1);
4324
        output_asm_insn (" restore %r1, %2, %Y0", operands);
4325
        break;
4326
      case LO_SUM:
4327
        operands[1] = XEXP (pat, 0);
4328
        operands[2] = XEXP (pat, 1);
4329
        output_asm_insn (" restore %r1, %%lo(%a2), %Y0", operands);
4330
        break;
4331
      case ASHIFT:
4332
        operands[1] = XEXP (pat, 0);
4333
        gcc_assert (XEXP (pat, 1) == const1_rtx);
4334
        output_asm_insn (" restore %r1, %r1, %Y0", operands);
4335
        break;
4336
      default:
4337
        operands[1] = pat;
4338
        output_asm_insn (" restore %%g0, %1, %Y0", operands);
4339
        break;
4340
    }
4341
}
4342
 
4343
/* Output a return.  */
4344
 
4345
const char *
4346
output_return (rtx insn)
4347
{
4348
  if (sparc_leaf_function_p)
4349
    {
4350
      /* This is a leaf function so we don't have to bother restoring the
4351
         register window, which frees us from dealing with the convoluted
4352
         semantics of restore/return.  We simply output the jump to the
4353
         return address and the insn in the delay slot (if any).  */
4354
 
4355
      gcc_assert (! crtl->calls_eh_return);
4356
 
4357
      return "jmp\t%%o7+%)%#";
4358
    }
4359
  else
4360
    {
4361
      /* This is a regular function so we have to restore the register window.
4362
         We may have a pending insn for the delay slot, which will be either
4363
         combined with the 'restore' instruction or put in the delay slot of
4364
         the 'return' instruction.  */
4365
 
4366
      if (crtl->calls_eh_return)
4367
        {
4368
          /* If the function uses __builtin_eh_return, the eh_return
4369
             machinery occupies the delay slot.  */
4370
          gcc_assert (! final_sequence);
4371
 
4372
          if (! flag_delayed_branch)
4373
            fputs ("\tadd\t%fp, %g1, %fp\n", asm_out_file);
4374
 
4375
          if (TARGET_V9)
4376
            fputs ("\treturn\t%i7+8\n", asm_out_file);
4377
          else
4378
            fputs ("\trestore\n\tjmp\t%o7+8\n", asm_out_file);
4379
 
4380
          if (flag_delayed_branch)
4381
            fputs ("\t add\t%sp, %g1, %sp\n", asm_out_file);
4382
          else
4383
            fputs ("\t nop\n", asm_out_file);
4384
        }
4385
      else if (final_sequence)
4386
        {
4387
          rtx delay, pat;
4388
 
4389
          delay = NEXT_INSN (insn);
4390
          gcc_assert (delay);
4391
 
4392
          pat = PATTERN (delay);
4393
 
4394
          if (TARGET_V9 && ! epilogue_renumber (&pat, 1))
4395
            {
4396
              epilogue_renumber (&pat, 0);
4397
              return "return\t%%i7+%)%#";
4398
            }
4399
          else
4400
            {
4401
              output_asm_insn ("jmp\t%%i7+%)", NULL);
4402
              output_restore (pat);
4403
              PATTERN (delay) = gen_blockage ();
4404
              INSN_CODE (delay) = -1;
4405
            }
4406
        }
4407
      else
4408
        {
4409
          /* The delay slot is empty.  */
4410
          if (TARGET_V9)
4411
            return "return\t%%i7+%)\n\t nop";
4412
          else if (flag_delayed_branch)
4413
            return "jmp\t%%i7+%)\n\t restore";
4414
          else
4415
            return "restore\n\tjmp\t%%o7+%)\n\t nop";
4416
        }
4417
    }
4418
 
4419
  return "";
4420
}
4421
 
4422
/* Output a sibling call.  */
4423
 
4424
const char *
4425
output_sibcall (rtx insn, rtx call_operand)
4426
{
4427
  rtx operands[1];
4428
 
4429
  gcc_assert (flag_delayed_branch);
4430
 
4431
  operands[0] = call_operand;
4432
 
4433
  if (sparc_leaf_function_p)
4434
    {
4435
      /* This is a leaf function so we don't have to bother restoring the
4436
         register window.  We simply output the jump to the function and
4437
         the insn in the delay slot (if any).  */
4438
 
4439
      gcc_assert (!(LEAF_SIBCALL_SLOT_RESERVED_P && final_sequence));
4440
 
4441
      if (final_sequence)
4442
        output_asm_insn ("sethi\t%%hi(%a0), %%g1\n\tjmp\t%%g1 + %%lo(%a0)%#",
4443
                         operands);
4444
      else
4445
        /* Use or with rs2 %%g0 instead of mov, so that as/ld can optimize
4446
           it into branch if possible.  */
4447
        output_asm_insn ("or\t%%o7, %%g0, %%g1\n\tcall\t%a0, 0\n\t or\t%%g1, %%g0, %%o7",
4448
                         operands);
4449
    }
4450
  else
4451
    {
4452
      /* This is a regular function so we have to restore the register window.
4453
         We may have a pending insn for the delay slot, which will be combined
4454
         with the 'restore' instruction.  */
4455
 
4456
      output_asm_insn ("call\t%a0, 0", operands);
4457
 
4458
      if (final_sequence)
4459
        {
4460
          rtx delay = NEXT_INSN (insn);
4461
          gcc_assert (delay);
4462
 
4463
          output_restore (PATTERN (delay));
4464
 
4465
          PATTERN (delay) = gen_blockage ();
4466
          INSN_CODE (delay) = -1;
4467
        }
4468
      else
4469
        output_restore (NULL_RTX);
4470
    }
4471
 
4472
  return "";
4473
}
4474
 
4475
/* Functions for handling argument passing.
4476
 
4477
   For 32-bit, the first 6 args are normally in registers and the rest are
4478
   pushed.  Any arg that starts within the first 6 words is at least
4479
   partially passed in a register unless its data type forbids.
4480
 
4481
   For 64-bit, the argument registers are laid out as an array of 16 elements
4482
   and arguments are added sequentially.  The first 6 int args and up to the
4483
   first 16 fp args (depending on size) are passed in regs.
4484
 
4485
   Slot    Stack   Integral   Float   Float in structure   Double   Long Double
4486
   ----    -----   --------   -----   ------------------   ------   -----------
4487
    15   [SP+248]              %f31       %f30,%f31         %d30
4488
    14   [SP+240]              %f29       %f28,%f29         %d28       %q28
4489
    13   [SP+232]              %f27       %f26,%f27         %d26
4490
    12   [SP+224]              %f25       %f24,%f25         %d24       %q24
4491
    11   [SP+216]              %f23       %f22,%f23         %d22
4492
    10   [SP+208]              %f21       %f20,%f21         %d20       %q20
4493
     9   [SP+200]              %f19       %f18,%f19         %d18
4494
     8   [SP+192]              %f17       %f16,%f17         %d16       %q16
4495
     7   [SP+184]              %f15       %f14,%f15         %d14
4496
     6   [SP+176]              %f13       %f12,%f13         %d12       %q12
4497
     5   [SP+168]     %o5      %f11       %f10,%f11         %d10
4498
     4   [SP+160]     %o4       %f9        %f8,%f9           %d8        %q8
4499
     3   [SP+152]     %o3       %f7        %f6,%f7           %d6
4500
     2   [SP+144]     %o2       %f5        %f4,%f5           %d4        %q4
4501
     1   [SP+136]     %o1       %f3        %f2,%f3           %d2
4502
 
4503
 
4504
   Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
4505
 
4506
   Integral arguments are always passed as 64-bit quantities appropriately
4507
   extended.
4508
 
4509
   Passing of floating point values is handled as follows.
4510
   If a prototype is in scope:
4511
     If the value is in a named argument (i.e. not a stdarg function or a
4512
     value not part of the `...') then the value is passed in the appropriate
4513
     fp reg.
4514
     If the value is part of the `...' and is passed in one of the first 6
4515
     slots then the value is passed in the appropriate int reg.
4516
     If the value is part of the `...' and is not passed in one of the first 6
4517
     slots then the value is passed in memory.
4518
   If a prototype is not in scope:
4519
     If the value is one of the first 6 arguments the value is passed in the
4520
     appropriate integer reg and the appropriate fp reg.
4521
     If the value is not one of the first 6 arguments the value is passed in
4522
     the appropriate fp reg and in memory.
4523
 
4524
 
4525
   Summary of the calling conventions implemented by GCC on the SPARC:
4526
 
4527
   32-bit ABI:
4528
                                size      argument     return value
4529
 
4530
      small integer              <4       int. reg.      int. reg.
4531
      word                        4       int. reg.      int. reg.
4532
      double word                 8       int. reg.      int. reg.
4533
 
4534
      _Complex small integer     <8       int. reg.      int. reg.
4535
      _Complex word               8       int. reg.      int. reg.
4536
      _Complex double word       16        memory        int. reg.
4537
 
4538
      vector integer            <=8       int. reg.       FP reg.
4539
      vector integer             >8        memory         memory
4540
 
4541
      float                       4       int. reg.       FP reg.
4542
      double                      8       int. reg.       FP reg.
4543
      long double                16        memory         memory
4544
 
4545
      _Complex float              8        memory         FP reg.
4546
      _Complex double            16        memory         FP reg.
4547
      _Complex long double       32        memory         FP reg.
4548
 
4549
      vector float              any        memory         memory
4550
 
4551
      aggregate                 any        memory         memory
4552
 
4553
 
4554
 
4555
    64-bit ABI:
4556
                                size      argument     return value
4557
 
4558
      small integer              <8       int. reg.      int. reg.
4559
      word                        8       int. reg.      int. reg.
4560
      double word                16       int. reg.      int. reg.
4561
 
4562
      _Complex small integer    <16       int. reg.      int. reg.
4563
      _Complex word              16       int. reg.      int. reg.
4564
      _Complex double word       32        memory        int. reg.
4565
 
4566
      vector integer           <=16        FP reg.        FP reg.
4567
      vector integer       16<s<=32        memory         FP reg.
4568
      vector integer            >32        memory         memory
4569
 
4570
      float                       4        FP reg.        FP reg.
4571
      double                      8        FP reg.        FP reg.
4572
      long double                16        FP reg.        FP reg.
4573
 
4574
      _Complex float              8        FP reg.        FP reg.
4575
      _Complex double            16        FP reg.        FP reg.
4576
      _Complex long double       32        memory         FP reg.
4577
 
4578
      vector float             <=16        FP reg.        FP reg.
4579
      vector float         16<s<=32        memory         FP reg.
4580
      vector float              >32        memory         memory
4581
 
4582
      aggregate                <=16         reg.           reg.
4583
      aggregate            16<s<=32        memory          reg.
4584
      aggregate                 >32        memory         memory
4585
 
4586
 
4587
 
4588
Note #1: complex floating-point types follow the extended SPARC ABIs as
4589
implemented by the Sun compiler.
4590
 
4591
Note #2: integral vector types follow the scalar floating-point types
4592
conventions to match what is implemented by the Sun VIS SDK.
4593
 
4594
Note #3: floating-point vector types follow the aggregate types
4595
conventions.  */
4596
 
4597
 
4598
/* Maximum number of int regs for args.  */
4599
#define SPARC_INT_ARG_MAX 6
4600
/* Maximum number of fp regs for args.  */
4601
#define SPARC_FP_ARG_MAX 16
4602
 
4603
#define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
4604
 
4605
/* Handle the INIT_CUMULATIVE_ARGS macro.
4606
   Initialize a variable CUM of type CUMULATIVE_ARGS
4607
   for a call to a function whose data type is FNTYPE.
4608
   For a library call, FNTYPE is 0.  */
4609
 
4610
void
4611
init_cumulative_args (struct sparc_args *cum, tree fntype,
4612
                      rtx libname ATTRIBUTE_UNUSED,
4613
                      tree fndecl ATTRIBUTE_UNUSED)
4614
{
4615
  cum->words = 0;
4616
  cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
4617
  cum->libcall_p = fntype == 0;
4618
}
4619
 
4620
/* Handle the TARGET_PROMOTE_PROTOTYPES target hook.
4621
   When a prototype says `char' or `short', really pass an `int'.  */
4622
 
4623
static bool
4624
sparc_promote_prototypes (const_tree fntype ATTRIBUTE_UNUSED)
4625
{
4626
  return TARGET_ARCH32 ? true : false;
4627
}
4628
 
4629
/* Handle promotion of pointer and integer arguments.  */
4630
 
4631
static enum machine_mode
4632
sparc_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
4633
                             enum machine_mode mode,
4634
                             int *punsignedp ATTRIBUTE_UNUSED,
4635
                             const_tree fntype ATTRIBUTE_UNUSED,
4636
                             int for_return ATTRIBUTE_UNUSED)
4637
{
4638
  if (POINTER_TYPE_P (type))
4639
    {
4640
      *punsignedp = POINTERS_EXTEND_UNSIGNED;
4641
      return Pmode;
4642
    }
4643
 
4644
  /* For TARGET_ARCH64 we need this, as we don't have instructions
4645
     for arithmetic operations which do zero/sign extension at the same time,
4646
     so without this we end up with a srl/sra after every assignment to an
4647
     user variable,  which means very very bad code.  */
4648
  if (TARGET_ARCH64
4649
      && GET_MODE_CLASS (mode) == MODE_INT
4650
      && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
4651
    return word_mode;
4652
 
4653
  return mode;
4654
}
4655
 
4656
/* Handle the TARGET_STRICT_ARGUMENT_NAMING target hook.  */
4657
 
4658
static bool
4659
sparc_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4660
{
4661
  return TARGET_ARCH64 ? true : false;
4662
}
4663
 
4664
/* Scan the record type TYPE and return the following predicates:
4665
    - INTREGS_P: the record contains at least one field or sub-field
4666
      that is eligible for promotion in integer registers.
4667
    - FP_REGS_P: the record contains at least one field or sub-field
4668
      that is eligible for promotion in floating-point registers.
4669
    - PACKED_P: the record contains at least one field that is packed.
4670
 
4671
   Sub-fields are not taken into account for the PACKED_P predicate.  */
4672
 
4673
static void
4674
scan_record_type (tree type, int *intregs_p, int *fpregs_p, int *packed_p)
4675
{
4676
  tree field;
4677
 
4678
  for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4679
    {
4680
      if (TREE_CODE (field) == FIELD_DECL)
4681
        {
4682
          if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4683
            scan_record_type (TREE_TYPE (field), intregs_p, fpregs_p, 0);
4684
          else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4685
                   || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4686
                  && TARGET_FPU)
4687
            *fpregs_p = 1;
4688
          else
4689
            *intregs_p = 1;
4690
 
4691
          if (packed_p && DECL_PACKED (field))
4692
            *packed_p = 1;
4693
        }
4694
    }
4695
}
4696
 
4697
/* Compute the slot number to pass an argument in.
4698
   Return the slot number or -1 if passing on the stack.
4699
 
4700
   CUM is a variable of type CUMULATIVE_ARGS which gives info about
4701
    the preceding args and about the function being called.
4702
   MODE is the argument's machine mode.
4703
   TYPE is the data type of the argument (as a tree).
4704
    This is null for libcalls where that information may
4705
    not be available.
4706
   NAMED is nonzero if this argument is a named parameter
4707
    (otherwise it is an extra parameter matching an ellipsis).
4708
   INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
4709
   *PREGNO records the register number to use if scalar type.
4710
   *PPADDING records the amount of padding needed in words.  */
4711
 
4712
static int
4713
function_arg_slotno (const struct sparc_args *cum, enum machine_mode mode,
4714
                     tree type, int named, int incoming_p,
4715
                     int *pregno, int *ppadding)
4716
{
4717
  int regbase = (incoming_p
4718
                 ? SPARC_INCOMING_INT_ARG_FIRST
4719
                 : SPARC_OUTGOING_INT_ARG_FIRST);
4720
  int slotno = cum->words;
4721
  enum mode_class mclass;
4722
  int regno;
4723
 
4724
  *ppadding = 0;
4725
 
4726
  if (type && TREE_ADDRESSABLE (type))
4727
    return -1;
4728
 
4729
  if (TARGET_ARCH32
4730
      && mode == BLKmode
4731
      && type
4732
      && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
4733
    return -1;
4734
 
4735
  /* For SPARC64, objects requiring 16-byte alignment get it.  */
4736
  if (TARGET_ARCH64
4737
      && (type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode)) >= 128
4738
      && (slotno & 1) != 0)
4739
    slotno++, *ppadding = 1;
4740
 
4741
  mclass = GET_MODE_CLASS (mode);
4742
  if (type && TREE_CODE (type) == VECTOR_TYPE)
4743
    {
4744
      /* Vector types deserve special treatment because they are
4745
         polymorphic wrt their mode, depending upon whether VIS
4746
         instructions are enabled.  */
4747
      if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
4748
        {
4749
          /* The SPARC port defines no floating-point vector modes.  */
4750
          gcc_assert (mode == BLKmode);
4751
        }
4752
      else
4753
        {
4754
          /* Integral vector types should either have a vector
4755
             mode or an integral mode, because we are guaranteed
4756
             by pass_by_reference that their size is not greater
4757
             than 16 bytes and TImode is 16-byte wide.  */
4758
          gcc_assert (mode != BLKmode);
4759
 
4760
          /* Vector integers are handled like floats according to
4761
             the Sun VIS SDK.  */
4762
          mclass = MODE_FLOAT;
4763
        }
4764
    }
4765
 
4766
  switch (mclass)
4767
    {
4768
    case MODE_FLOAT:
4769
    case MODE_COMPLEX_FLOAT:
4770
    case MODE_VECTOR_INT:
4771
      if (TARGET_ARCH64 && TARGET_FPU && named)
4772
        {
4773
          if (slotno >= SPARC_FP_ARG_MAX)
4774
            return -1;
4775
          regno = SPARC_FP_ARG_FIRST + slotno * 2;
4776
          /* Arguments filling only one single FP register are
4777
             right-justified in the outer double FP register.  */
4778
          if (GET_MODE_SIZE (mode) <= 4)
4779
            regno++;
4780
          break;
4781
        }
4782
      /* fallthrough */
4783
 
4784
    case MODE_INT:
4785
    case MODE_COMPLEX_INT:
4786
      if (slotno >= SPARC_INT_ARG_MAX)
4787
        return -1;
4788
      regno = regbase + slotno;
4789
      break;
4790
 
4791
    case MODE_RANDOM:
4792
      if (mode == VOIDmode)
4793
        /* MODE is VOIDmode when generating the actual call.  */
4794
        return -1;
4795
 
4796
      gcc_assert (mode == BLKmode);
4797
 
4798
      if (TARGET_ARCH32
4799
          || !type
4800
          || (TREE_CODE (type) != VECTOR_TYPE
4801
              && TREE_CODE (type) != RECORD_TYPE))
4802
        {
4803
          if (slotno >= SPARC_INT_ARG_MAX)
4804
            return -1;
4805
          regno = regbase + slotno;
4806
        }
4807
      else  /* TARGET_ARCH64 && type */
4808
        {
4809
          int intregs_p = 0, fpregs_p = 0, packed_p = 0;
4810
 
4811
          /* First see what kinds of registers we would need.  */
4812
          if (TREE_CODE (type) == VECTOR_TYPE)
4813
            fpregs_p = 1;
4814
          else
4815
            scan_record_type (type, &intregs_p, &fpregs_p, &packed_p);
4816
 
4817
          /* The ABI obviously doesn't specify how packed structures
4818
             are passed.  These are defined to be passed in int regs
4819
             if possible, otherwise memory.  */
4820
          if (packed_p || !named)
4821
            fpregs_p = 0, intregs_p = 1;
4822
 
4823
          /* If all arg slots are filled, then must pass on stack.  */
4824
          if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
4825
            return -1;
4826
 
4827
          /* If there are only int args and all int arg slots are filled,
4828
             then must pass on stack.  */
4829
          if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
4830
            return -1;
4831
 
4832
          /* Note that even if all int arg slots are filled, fp members may
4833
             still be passed in regs if such regs are available.
4834
             *PREGNO isn't set because there may be more than one, it's up
4835
             to the caller to compute them.  */
4836
          return slotno;
4837
        }
4838
      break;
4839
 
4840
    default :
4841
      gcc_unreachable ();
4842
    }
4843
 
4844
  *pregno = regno;
4845
  return slotno;
4846
}
4847
 
4848
/* Handle recursive register counting for structure field layout.  */
4849
 
4850
struct function_arg_record_value_parms
4851
{
4852
  rtx ret;              /* return expression being built.  */
4853
  int slotno;           /* slot number of the argument.  */
4854
  int named;            /* whether the argument is named.  */
4855
  int regbase;          /* regno of the base register.  */
4856
  int stack;            /* 1 if part of the argument is on the stack.  */
4857
  int intoffset;        /* offset of the first pending integer field.  */
4858
  unsigned int nregs;   /* number of words passed in registers.  */
4859
};
4860
 
4861
static void function_arg_record_value_3
4862
 (HOST_WIDE_INT, struct function_arg_record_value_parms *);
4863
static void function_arg_record_value_2
4864
 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4865
static void function_arg_record_value_1
4866
 (const_tree, HOST_WIDE_INT, struct function_arg_record_value_parms *, bool);
4867
static rtx function_arg_record_value (const_tree, enum machine_mode, int, int, int);
4868
static rtx function_arg_union_value (int, enum machine_mode, int, int);
4869
 
4870
/* A subroutine of function_arg_record_value.  Traverse the structure
4871
   recursively and determine how many registers will be required.  */
4872
 
4873
static void
4874
function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos,
4875
                             struct function_arg_record_value_parms *parms,
4876
                             bool packed_p)
4877
{
4878
  tree field;
4879
 
4880
  /* We need to compute how many registers are needed so we can
4881
     allocate the PARALLEL but before we can do that we need to know
4882
     whether there are any packed fields.  The ABI obviously doesn't
4883
     specify how structures are passed in this case, so they are
4884
     defined to be passed in int regs if possible, otherwise memory,
4885
     regardless of whether there are fp values present.  */
4886
 
4887
  if (! packed_p)
4888
    for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4889
      {
4890
        if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
4891
          {
4892
            packed_p = true;
4893
            break;
4894
          }
4895
      }
4896
 
4897
  /* Compute how many registers we need.  */
4898
  for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4899
    {
4900
      if (TREE_CODE (field) == FIELD_DECL)
4901
        {
4902
          HOST_WIDE_INT bitpos = startbitpos;
4903
 
4904
          if (DECL_SIZE (field) != 0)
4905
            {
4906
              if (integer_zerop (DECL_SIZE (field)))
4907
                continue;
4908
 
4909
              if (host_integerp (bit_position (field), 1))
4910
                bitpos += int_bit_position (field);
4911
            }
4912
 
4913
          /* ??? FIXME: else assume zero offset.  */
4914
 
4915
          if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
4916
            function_arg_record_value_1 (TREE_TYPE (field),
4917
                                         bitpos,
4918
                                         parms,
4919
                                         packed_p);
4920
          else if ((FLOAT_TYPE_P (TREE_TYPE (field))
4921
                    || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
4922
                   && TARGET_FPU
4923
                   && parms->named
4924
                   && ! packed_p)
4925
            {
4926
              if (parms->intoffset != -1)
4927
                {
4928
                  unsigned int startbit, endbit;
4929
                  int intslots, this_slotno;
4930
 
4931
                  startbit = parms->intoffset & -BITS_PER_WORD;
4932
                  endbit   = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4933
 
4934
                  intslots = (endbit - startbit) / BITS_PER_WORD;
4935
                  this_slotno = parms->slotno + parms->intoffset
4936
                    / BITS_PER_WORD;
4937
 
4938
                  if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
4939
                    {
4940
                      intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
4941
                      /* We need to pass this field on the stack.  */
4942
                      parms->stack = 1;
4943
                    }
4944
 
4945
                  parms->nregs += intslots;
4946
                  parms->intoffset = -1;
4947
                }
4948
 
4949
              /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
4950
                 If it wasn't true we wouldn't be here.  */
4951
              if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
4952
                  && DECL_MODE (field) == BLKmode)
4953
                parms->nregs += TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
4954
              else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
4955
                parms->nregs += 2;
4956
              else
4957
                parms->nregs += 1;
4958
            }
4959
          else
4960
            {
4961
              if (parms->intoffset == -1)
4962
                parms->intoffset = bitpos;
4963
            }
4964
        }
4965
    }
4966
}
4967
 
4968
/* A subroutine of function_arg_record_value.  Assign the bits of the
4969
   structure between parms->intoffset and bitpos to integer registers.  */
4970
 
4971
static void
4972
function_arg_record_value_3 (HOST_WIDE_INT bitpos,
4973
                             struct function_arg_record_value_parms *parms)
4974
{
4975
  enum machine_mode mode;
4976
  unsigned int regno;
4977
  unsigned int startbit, endbit;
4978
  int this_slotno, intslots, intoffset;
4979
  rtx reg;
4980
 
4981
  if (parms->intoffset == -1)
4982
    return;
4983
 
4984
  intoffset = parms->intoffset;
4985
  parms->intoffset = -1;
4986
 
4987
  startbit = intoffset & -BITS_PER_WORD;
4988
  endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
4989
  intslots = (endbit - startbit) / BITS_PER_WORD;
4990
  this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
4991
 
4992
  intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
4993
  if (intslots <= 0)
4994
    return;
4995
 
4996
  /* If this is the trailing part of a word, only load that much into
4997
     the register.  Otherwise load the whole register.  Note that in
4998
     the latter case we may pick up unwanted bits.  It's not a problem
4999
     at the moment but may wish to revisit.  */
5000
 
5001
  if (intoffset % BITS_PER_WORD != 0)
5002
    mode = smallest_mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
5003
                                   MODE_INT);
5004
  else
5005
    mode = word_mode;
5006
 
5007
  intoffset /= BITS_PER_UNIT;
5008
  do
5009
    {
5010
      regno = parms->regbase + this_slotno;
5011
      reg = gen_rtx_REG (mode, regno);
5012
      XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5013
        = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
5014
 
5015
      this_slotno += 1;
5016
      intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
5017
      mode = word_mode;
5018
      parms->nregs += 1;
5019
      intslots -= 1;
5020
    }
5021
  while (intslots > 0);
5022
}
5023
 
5024
/* A subroutine of function_arg_record_value.  Traverse the structure
5025
   recursively and assign bits to floating point registers.  Track which
5026
   bits in between need integer registers; invoke function_arg_record_value_3
5027
   to make that happen.  */
5028
 
5029
static void
5030
function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos,
5031
                             struct function_arg_record_value_parms *parms,
5032
                             bool packed_p)
5033
{
5034
  tree field;
5035
 
5036
  if (! packed_p)
5037
    for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5038
      {
5039
        if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
5040
          {
5041
            packed_p = true;
5042
            break;
5043
          }
5044
      }
5045
 
5046
  for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
5047
    {
5048
      if (TREE_CODE (field) == FIELD_DECL)
5049
        {
5050
          HOST_WIDE_INT bitpos = startbitpos;
5051
 
5052
          if (DECL_SIZE (field) != 0)
5053
            {
5054
              if (integer_zerop (DECL_SIZE (field)))
5055
                continue;
5056
 
5057
              if (host_integerp (bit_position (field), 1))
5058
                bitpos += int_bit_position (field);
5059
            }
5060
 
5061
          /* ??? FIXME: else assume zero offset.  */
5062
 
5063
          if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
5064
            function_arg_record_value_2 (TREE_TYPE (field),
5065
                                         bitpos,
5066
                                         parms,
5067
                                         packed_p);
5068
          else if ((FLOAT_TYPE_P (TREE_TYPE (field))
5069
                    || TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5070
                   && TARGET_FPU
5071
                   && parms->named
5072
                   && ! packed_p)
5073
            {
5074
              int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
5075
              int regno, nregs, pos;
5076
              enum machine_mode mode = DECL_MODE (field);
5077
              rtx reg;
5078
 
5079
              function_arg_record_value_3 (bitpos, parms);
5080
 
5081
              if (TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE
5082
                  && mode == BLKmode)
5083
                {
5084
                  mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5085
                  nregs = TYPE_VECTOR_SUBPARTS (TREE_TYPE (field));
5086
                }
5087
              else if (TREE_CODE (TREE_TYPE (field)) == COMPLEX_TYPE)
5088
                {
5089
                  mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (field)));
5090
                  nregs = 2;
5091
                }
5092
              else
5093
                nregs = 1;
5094
 
5095
              regno = SPARC_FP_ARG_FIRST + this_slotno * 2;
5096
              if (GET_MODE_SIZE (mode) <= 4 && (bitpos & 32) != 0)
5097
                regno++;
5098
              reg = gen_rtx_REG (mode, regno);
5099
              pos = bitpos / BITS_PER_UNIT;
5100
              XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5101
                = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5102
              parms->nregs += 1;
5103
              while (--nregs > 0)
5104
                {
5105
                  regno += GET_MODE_SIZE (mode) / 4;
5106
                  reg = gen_rtx_REG (mode, regno);
5107
                  pos += GET_MODE_SIZE (mode);
5108
                  XVECEXP (parms->ret, 0, parms->stack + parms->nregs)
5109
                    = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (pos));
5110
                  parms->nregs += 1;
5111
                }
5112
            }
5113
          else
5114
            {
5115
              if (parms->intoffset == -1)
5116
                parms->intoffset = bitpos;
5117
            }
5118
        }
5119
    }
5120
}
5121
 
5122
/* Used by function_arg and function_value to implement the complex
5123
   conventions of the 64-bit ABI for passing and returning structures.
5124
   Return an expression valid as a return value for the two macros
5125
   FUNCTION_ARG and FUNCTION_VALUE.
5126
 
5127
   TYPE is the data type of the argument (as a tree).
5128
    This is null for libcalls where that information may
5129
    not be available.
5130
   MODE is the argument's machine mode.
5131
   SLOTNO is the index number of the argument's slot in the parameter array.
5132
   NAMED is nonzero if this argument is a named parameter
5133
    (otherwise it is an extra parameter matching an ellipsis).
5134
   REGBASE is the regno of the base register for the parameter array.  */
5135
 
5136
static rtx
5137
function_arg_record_value (const_tree type, enum machine_mode mode,
5138
                           int slotno, int named, int regbase)
5139
{
5140
  HOST_WIDE_INT typesize = int_size_in_bytes (type);
5141
  struct function_arg_record_value_parms parms;
5142
  unsigned int nregs;
5143
 
5144
  parms.ret = NULL_RTX;
5145
  parms.slotno = slotno;
5146
  parms.named = named;
5147
  parms.regbase = regbase;
5148
  parms.stack = 0;
5149
 
5150
  /* Compute how many registers we need.  */
5151
  parms.nregs = 0;
5152
  parms.intoffset = 0;
5153
  function_arg_record_value_1 (type, 0, &parms, false);
5154
 
5155
  /* Take into account pending integer fields.  */
5156
  if (parms.intoffset != -1)
5157
    {
5158
      unsigned int startbit, endbit;
5159
      int intslots, this_slotno;
5160
 
5161
      startbit = parms.intoffset & -BITS_PER_WORD;
5162
      endbit = (typesize*BITS_PER_UNIT + BITS_PER_WORD - 1) & -BITS_PER_WORD;
5163
      intslots = (endbit - startbit) / BITS_PER_WORD;
5164
      this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
5165
 
5166
      if (intslots > 0 && intslots > SPARC_INT_ARG_MAX - this_slotno)
5167
        {
5168
          intslots = MAX (0, SPARC_INT_ARG_MAX - this_slotno);
5169
          /* We need to pass this field on the stack.  */
5170
          parms.stack = 1;
5171
        }
5172
 
5173
      parms.nregs += intslots;
5174
    }
5175
  nregs = parms.nregs;
5176
 
5177
  /* Allocate the vector and handle some annoying special cases.  */
5178
  if (nregs == 0)
5179
    {
5180
      /* ??? Empty structure has no value?  Duh?  */
5181
      if (typesize <= 0)
5182
        {
5183
          /* Though there's nothing really to store, return a word register
5184
             anyway so the rest of gcc doesn't go nuts.  Returning a PARALLEL
5185
             leads to breakage due to the fact that there are zero bytes to
5186
             load.  */
5187
          return gen_rtx_REG (mode, regbase);
5188
        }
5189
      else
5190
        {
5191
          /* ??? C++ has structures with no fields, and yet a size.  Give up
5192
             for now and pass everything back in integer registers.  */
5193
          nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5194
        }
5195
      if (nregs + slotno > SPARC_INT_ARG_MAX)
5196
        nregs = SPARC_INT_ARG_MAX - slotno;
5197
    }
5198
  gcc_assert (nregs != 0);
5199
 
5200
  parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (parms.stack + nregs));
5201
 
5202
  /* If at least one field must be passed on the stack, generate
5203
     (parallel [(expr_list (nil) ...) ...]) so that all fields will
5204
     also be passed on the stack.  We can't do much better because the
5205
     semantics of TARGET_ARG_PARTIAL_BYTES doesn't handle the case
5206
     of structures for which the fields passed exclusively in registers
5207
     are not at the beginning of the structure.  */
5208
  if (parms.stack)
5209
    XVECEXP (parms.ret, 0, 0)
5210
      = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5211
 
5212
  /* Fill in the entries.  */
5213
  parms.nregs = 0;
5214
  parms.intoffset = 0;
5215
  function_arg_record_value_2 (type, 0, &parms, false);
5216
  function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
5217
 
5218
  gcc_assert (parms.nregs == nregs);
5219
 
5220
  return parms.ret;
5221
}
5222
 
5223
/* Used by function_arg and function_value to implement the conventions
5224
   of the 64-bit ABI for passing and returning unions.
5225
   Return an expression valid as a return value for the two macros
5226
   FUNCTION_ARG and FUNCTION_VALUE.
5227
 
5228
   SIZE is the size in bytes of the union.
5229
   MODE is the argument's machine mode.
5230
   REGNO is the hard register the union will be passed in.  */
5231
 
5232
static rtx
5233
function_arg_union_value (int size, enum machine_mode mode, int slotno,
5234
                          int regno)
5235
{
5236
  int nwords = ROUND_ADVANCE (size), i;
5237
  rtx regs;
5238
 
5239
  /* See comment in previous function for empty structures.  */
5240
  if (nwords == 0)
5241
    return gen_rtx_REG (mode, regno);
5242
 
5243
  if (slotno == SPARC_INT_ARG_MAX - 1)
5244
    nwords = 1;
5245
 
5246
  regs = gen_rtx_PARALLEL (mode, rtvec_alloc (nwords));
5247
 
5248
  for (i = 0; i < nwords; i++)
5249
    {
5250
      /* Unions are passed left-justified.  */
5251
      XVECEXP (regs, 0, i)
5252
        = gen_rtx_EXPR_LIST (VOIDmode,
5253
                             gen_rtx_REG (word_mode, regno),
5254
                             GEN_INT (UNITS_PER_WORD * i));
5255
      regno++;
5256
    }
5257
 
5258
  return regs;
5259
}
5260
 
5261
/* Used by function_arg and function_value to implement the conventions
5262
   for passing and returning large (BLKmode) vectors.
5263
   Return an expression valid as a return value for the two macros
5264
   FUNCTION_ARG and FUNCTION_VALUE.
5265
 
5266
   SIZE is the size in bytes of the vector (at least 8 bytes).
5267
   REGNO is the FP hard register the vector will be passed in.  */
5268
 
5269
static rtx
5270
function_arg_vector_value (int size, int regno)
5271
{
5272
  int i, nregs = size / 8;
5273
  rtx regs;
5274
 
5275
  regs = gen_rtx_PARALLEL (BLKmode, rtvec_alloc (nregs));
5276
 
5277
  for (i = 0; i < nregs; i++)
5278
    {
5279
      XVECEXP (regs, 0, i)
5280
        = gen_rtx_EXPR_LIST (VOIDmode,
5281
                             gen_rtx_REG (DImode, regno + 2*i),
5282
                             GEN_INT (i*8));
5283
    }
5284
 
5285
  return regs;
5286
}
5287
 
5288
/* Handle the FUNCTION_ARG macro.
5289
   Determine where to put an argument to a function.
5290
   Value is zero to push the argument on the stack,
5291
   or a hard register in which to store the argument.
5292
 
5293
   CUM is a variable of type CUMULATIVE_ARGS which gives info about
5294
    the preceding args and about the function being called.
5295
   MODE is the argument's machine mode.
5296
   TYPE is the data type of the argument (as a tree).
5297
    This is null for libcalls where that information may
5298
    not be available.
5299
   NAMED is nonzero if this argument is a named parameter
5300
    (otherwise it is an extra parameter matching an ellipsis).
5301
   INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.  */
5302
 
5303
rtx
5304
function_arg (const struct sparc_args *cum, enum machine_mode mode,
5305
              tree type, int named, int incoming_p)
5306
{
5307
  int regbase = (incoming_p
5308
                 ? SPARC_INCOMING_INT_ARG_FIRST
5309
                 : SPARC_OUTGOING_INT_ARG_FIRST);
5310
  int slotno, regno, padding;
5311
  enum mode_class mclass = GET_MODE_CLASS (mode);
5312
 
5313
  slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
5314
                                &regno, &padding);
5315
  if (slotno == -1)
5316
    return 0;
5317
 
5318
  /* Vector types deserve special treatment because they are polymorphic wrt
5319
     their mode, depending upon whether VIS instructions are enabled.  */
5320
  if (type && TREE_CODE (type) == VECTOR_TYPE)
5321
    {
5322
      HOST_WIDE_INT size = int_size_in_bytes (type);
5323
      gcc_assert ((TARGET_ARCH32 && size <= 8)
5324
                  || (TARGET_ARCH64 && size <= 16));
5325
 
5326
      if (mode == BLKmode)
5327
        return function_arg_vector_value (size,
5328
                                          SPARC_FP_ARG_FIRST + 2*slotno);
5329
      else
5330
        mclass = MODE_FLOAT;
5331
    }
5332
 
5333
  if (TARGET_ARCH32)
5334
    return gen_rtx_REG (mode, regno);
5335
 
5336
  /* Structures up to 16 bytes in size are passed in arg slots on the stack
5337
     and are promoted to registers if possible.  */
5338
  if (type && TREE_CODE (type) == RECORD_TYPE)
5339
    {
5340
      HOST_WIDE_INT size = int_size_in_bytes (type);
5341
      gcc_assert (size <= 16);
5342
 
5343
      return function_arg_record_value (type, mode, slotno, named, regbase);
5344
    }
5345
 
5346
  /* Unions up to 16 bytes in size are passed in integer registers.  */
5347
  else if (type && TREE_CODE (type) == UNION_TYPE)
5348
    {
5349
      HOST_WIDE_INT size = int_size_in_bytes (type);
5350
      gcc_assert (size <= 16);
5351
 
5352
      return function_arg_union_value (size, mode, slotno, regno);
5353
    }
5354
 
5355
  /* v9 fp args in reg slots beyond the int reg slots get passed in regs
5356
     but also have the slot allocated for them.
5357
     If no prototype is in scope fp values in register slots get passed
5358
     in two places, either fp regs and int regs or fp regs and memory.  */
5359
  else if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5360
           && SPARC_FP_REG_P (regno))
5361
    {
5362
      rtx reg = gen_rtx_REG (mode, regno);
5363
      if (cum->prototype_p || cum->libcall_p)
5364
        {
5365
          /* "* 2" because fp reg numbers are recorded in 4 byte
5366
             quantities.  */
5367
#if 0
5368
          /* ??? This will cause the value to be passed in the fp reg and
5369
             in the stack.  When a prototype exists we want to pass the
5370
             value in the reg but reserve space on the stack.  That's an
5371
             optimization, and is deferred [for a bit].  */
5372
          if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
5373
            return gen_rtx_PARALLEL (mode,
5374
                            gen_rtvec (2,
5375
                                       gen_rtx_EXPR_LIST (VOIDmode,
5376
                                                NULL_RTX, const0_rtx),
5377
                                       gen_rtx_EXPR_LIST (VOIDmode,
5378
                                                reg, const0_rtx)));
5379
          else
5380
#else
5381
          /* ??? It seems that passing back a register even when past
5382
             the area declared by REG_PARM_STACK_SPACE will allocate
5383
             space appropriately, and will not copy the data onto the
5384
             stack, exactly as we desire.
5385
 
5386
             This is due to locate_and_pad_parm being called in
5387
             expand_call whenever reg_parm_stack_space > 0, which
5388
             while beneficial to our example here, would seem to be
5389
             in error from what had been intended.  Ho hum...  -- r~ */
5390
#endif
5391
            return reg;
5392
        }
5393
      else
5394
        {
5395
          rtx v0, v1;
5396
 
5397
          if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
5398
            {
5399
              int intreg;
5400
 
5401
              /* On incoming, we don't need to know that the value
5402
                 is passed in %f0 and %i0, and it confuses other parts
5403
                 causing needless spillage even on the simplest cases.  */
5404
              if (incoming_p)
5405
                return reg;
5406
 
5407
              intreg = (SPARC_OUTGOING_INT_ARG_FIRST
5408
                        + (regno - SPARC_FP_ARG_FIRST) / 2);
5409
 
5410
              v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5411
              v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
5412
                                      const0_rtx);
5413
              return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5414
            }
5415
          else
5416
            {
5417
              v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
5418
              v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
5419
              return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
5420
            }
5421
        }
5422
    }
5423
 
5424
  /* All other aggregate types are passed in an integer register in a mode
5425
     corresponding to the size of the type.  */
5426
  else if (type && AGGREGATE_TYPE_P (type))
5427
    {
5428
      HOST_WIDE_INT size = int_size_in_bytes (type);
5429
      gcc_assert (size <= 16);
5430
 
5431
      mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5432
    }
5433
 
5434
  return gen_rtx_REG (mode, regno);
5435
}
5436
 
5437
/* For an arg passed partly in registers and partly in memory,
5438
   this is the number of bytes of registers used.
5439
   For args passed entirely in registers or entirely in memory, zero.
5440
 
5441
   Any arg that starts in the first 6 regs but won't entirely fit in them
5442
   needs partial registers on v8.  On v9, structures with integer
5443
   values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
5444
   values that begin in the last fp reg [where "last fp reg" varies with the
5445
   mode] will be split between that reg and memory.  */
5446
 
5447
static int
5448
sparc_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
5449
                         tree type, bool named)
5450
{
5451
  int slotno, regno, padding;
5452
 
5453
  /* We pass 0 for incoming_p here, it doesn't matter.  */
5454
  slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5455
 
5456
  if (slotno == -1)
5457
    return 0;
5458
 
5459
  if (TARGET_ARCH32)
5460
    {
5461
      if ((slotno + (mode == BLKmode
5462
                     ? ROUND_ADVANCE (int_size_in_bytes (type))
5463
                     : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
5464
          > SPARC_INT_ARG_MAX)
5465
        return (SPARC_INT_ARG_MAX - slotno) * UNITS_PER_WORD;
5466
    }
5467
  else
5468
    {
5469
      /* We are guaranteed by pass_by_reference that the size of the
5470
         argument is not greater than 16 bytes, so we only need to return
5471
         one word if the argument is partially passed in registers.  */
5472
 
5473
      if (type && AGGREGATE_TYPE_P (type))
5474
        {
5475
          int size = int_size_in_bytes (type);
5476
 
5477
          if (size > UNITS_PER_WORD
5478
              && slotno == SPARC_INT_ARG_MAX - 1)
5479
            return UNITS_PER_WORD;
5480
        }
5481
      else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
5482
               || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
5483
                   && ! (TARGET_FPU && named)))
5484
        {
5485
          /* The complex types are passed as packed types.  */
5486
          if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
5487
              && slotno == SPARC_INT_ARG_MAX - 1)
5488
            return UNITS_PER_WORD;
5489
        }
5490
      else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5491
        {
5492
          if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
5493
              > SPARC_FP_ARG_MAX)
5494
            return UNITS_PER_WORD;
5495
        }
5496
    }
5497
 
5498
  return 0;
5499
}
5500
 
5501
/* Handle the TARGET_PASS_BY_REFERENCE target hook.
5502
   Specify whether to pass the argument by reference.  */
5503
 
5504
static bool
5505
sparc_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
5506
                         enum machine_mode mode, const_tree type,
5507
                         bool named ATTRIBUTE_UNUSED)
5508
{
5509
  if (TARGET_ARCH32)
5510
    /* Original SPARC 32-bit ABI says that structures and unions,
5511
       and quad-precision floats are passed by reference.  For Pascal,
5512
       also pass arrays by reference.  All other base types are passed
5513
       in registers.
5514
 
5515
       Extended ABI (as implemented by the Sun compiler) says that all
5516
       complex floats are passed by reference.  Pass complex integers
5517
       in registers up to 8 bytes.  More generally, enforce the 2-word
5518
       cap for passing arguments in registers.
5519
 
5520
       Vector ABI (as implemented by the Sun VIS SDK) says that vector
5521
       integers are passed like floats of the same size, that is in
5522
       registers up to 8 bytes.  Pass all vector floats by reference
5523
       like structure and unions.  */
5524
    return ((type && (AGGREGATE_TYPE_P (type) || VECTOR_FLOAT_TYPE_P (type)))
5525
            || mode == SCmode
5526
            /* Catch CDImode, TFmode, DCmode and TCmode.  */
5527
            || GET_MODE_SIZE (mode) > 8
5528
            || (type
5529
                && TREE_CODE (type) == VECTOR_TYPE
5530
                && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5531
  else
5532
    /* Original SPARC 64-bit ABI says that structures and unions
5533
       smaller than 16 bytes are passed in registers, as well as
5534
       all other base types.
5535
 
5536
       Extended ABI (as implemented by the Sun compiler) says that
5537
       complex floats are passed in registers up to 16 bytes.  Pass
5538
       all complex integers in registers up to 16 bytes.  More generally,
5539
       enforce the 2-word cap for passing arguments in registers.
5540
 
5541
       Vector ABI (as implemented by the Sun VIS SDK) says that vector
5542
       integers are passed like floats of the same size, that is in
5543
       registers (up to 16 bytes).  Pass all vector floats like structure
5544
       and unions.  */
5545
    return ((type
5546
             && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == VECTOR_TYPE)
5547
             && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 16)
5548
            /* Catch CTImode and TCmode.  */
5549
            || GET_MODE_SIZE (mode) > 16);
5550
}
5551
 
5552
/* Handle the FUNCTION_ARG_ADVANCE macro.
5553
   Update the data in CUM to advance over an argument
5554
   of mode MODE and data type TYPE.
5555
   TYPE is null for libcalls where that information may not be available.  */
5556
 
5557
void
5558
function_arg_advance (struct sparc_args *cum, enum machine_mode mode,
5559
                      tree type, int named)
5560
{
5561 378 julius
  int regno, padding;
5562 282 jeremybenn
 
5563
  /* We pass 0 for incoming_p here, it doesn't matter.  */
5564 378 julius
  function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
5565 282 jeremybenn
 
5566 378 julius
  /* If argument requires leading padding, add it.  */
5567
  cum->words += padding;
5568 282 jeremybenn
 
5569
  if (TARGET_ARCH32)
5570
    {
5571
      cum->words += (mode != BLKmode
5572
                     ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5573
                     : ROUND_ADVANCE (int_size_in_bytes (type)));
5574
    }
5575
  else
5576
    {
5577
      if (type && AGGREGATE_TYPE_P (type))
5578
        {
5579
          int size = int_size_in_bytes (type);
5580
 
5581
          if (size <= 8)
5582
            ++cum->words;
5583
          else if (size <= 16)
5584
            cum->words += 2;
5585
          else /* passed by reference */
5586
            ++cum->words;
5587
        }
5588
      else
5589
        {
5590
          cum->words += (mode != BLKmode
5591
                         ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
5592
                         : ROUND_ADVANCE (int_size_in_bytes (type)));
5593
        }
5594
    }
5595
}
5596
 
5597
/* Handle the FUNCTION_ARG_PADDING macro.
5598
   For the 64 bit ABI structs are always stored left shifted in their
5599
   argument slot.  */
5600
 
5601
enum direction
5602
function_arg_padding (enum machine_mode mode, const_tree type)
5603
{
5604
  if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
5605
    return upward;
5606
 
5607
  /* Fall back to the default.  */
5608
  return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
5609
}
5610
 
5611
/* Handle the TARGET_RETURN_IN_MEMORY target hook.
5612
   Specify whether to return the return value in memory.  */
5613
 
5614
static bool
5615
sparc_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5616
{
5617
  if (TARGET_ARCH32)
5618
    /* Original SPARC 32-bit ABI says that structures and unions,
5619
       and quad-precision floats are returned in memory.  All other
5620
       base types are returned in registers.
5621
 
5622
       Extended ABI (as implemented by the Sun compiler) says that
5623
       all complex floats are returned in registers (8 FP registers
5624
       at most for '_Complex long double').  Return all complex integers
5625
       in registers (4 at most for '_Complex long long').
5626
 
5627
       Vector ABI (as implemented by the Sun VIS SDK) says that vector
5628
       integers are returned like floats of the same size, that is in
5629
       registers up to 8 bytes and in memory otherwise.  Return all
5630
       vector floats in memory like structure and unions; note that
5631
       they always have BLKmode like the latter.  */
5632
    return (TYPE_MODE (type) == BLKmode
5633
            || TYPE_MODE (type) == TFmode
5634
            || (TREE_CODE (type) == VECTOR_TYPE
5635
                && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8));
5636
  else
5637
    /* Original SPARC 64-bit ABI says that structures and unions
5638
       smaller than 32 bytes are returned in registers, as well as
5639
       all other base types.
5640
 
5641
       Extended ABI (as implemented by the Sun compiler) says that all
5642
       complex floats are returned in registers (8 FP registers at most
5643
       for '_Complex long double').  Return all complex integers in
5644
       registers (4 at most for '_Complex TItype').
5645
 
5646
       Vector ABI (as implemented by the Sun VIS SDK) says that vector
5647
       integers are returned like floats of the same size, that is in
5648
       registers.  Return all vector floats like structure and unions;
5649
       note that they always have BLKmode like the latter.  */
5650
    return ((TYPE_MODE (type) == BLKmode
5651
             && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 32));
5652
}
5653
 
5654
/* Handle the TARGET_STRUCT_VALUE target hook.
5655
   Return where to find the structure return value address.  */
5656
 
5657
static rtx
5658
sparc_struct_value_rtx (tree fndecl, int incoming)
5659
{
5660
  if (TARGET_ARCH64)
5661
    return 0;
5662
  else
5663
    {
5664
      rtx mem;
5665
 
5666
      if (incoming)
5667
        mem = gen_rtx_MEM (Pmode, plus_constant (frame_pointer_rtx,
5668
                                                 STRUCT_VALUE_OFFSET));
5669
      else
5670
        mem = gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx,
5671
                                                 STRUCT_VALUE_OFFSET));
5672
 
5673
      /* Only follow the SPARC ABI for fixed-size structure returns.
5674
         Variable size structure returns are handled per the normal
5675
         procedures in GCC. This is enabled by -mstd-struct-return */
5676
      if (incoming == 2
5677
          && sparc_std_struct_return
5678
          && TYPE_SIZE_UNIT (TREE_TYPE (fndecl))
5679
          && TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (fndecl))) == INTEGER_CST)
5680
        {
5681
          /* We must check and adjust the return address, as it is
5682
             optional as to whether the return object is really
5683
             provided.  */
5684
          rtx ret_rtx = gen_rtx_REG (Pmode, 31);
5685
          rtx scratch = gen_reg_rtx (SImode);
5686
          rtx endlab = gen_label_rtx ();
5687
 
5688
          /* Calculate the return object size */
5689
          tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl));
5690
          rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff);
5691
          /* Construct a temporary return value */
5692
          rtx temp_val = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0);
5693
 
5694
          /* Implement SPARC 32-bit psABI callee returns struck checking
5695
             requirements:
5696
 
5697
              Fetch the instruction where we will return to and see if
5698
             it's an unimp instruction (the most significant 10 bits
5699
             will be zero).  */
5700
          emit_move_insn (scratch, gen_rtx_MEM (SImode,
5701
                                                plus_constant (ret_rtx, 8)));
5702
          /* Assume the size is valid and pre-adjust */
5703
          emit_insn (gen_add3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5704
          emit_cmp_and_jump_insns (scratch, size_rtx, EQ, const0_rtx, SImode, 0, endlab);
5705
          emit_insn (gen_sub3_insn (ret_rtx, ret_rtx, GEN_INT (4)));
5706
          /* Assign stack temp:
5707
             Write the address of the memory pointed to by temp_val into
5708
             the memory pointed to by mem */
5709
          emit_move_insn (mem, XEXP (temp_val, 0));
5710
          emit_label (endlab);
5711
        }
5712
 
5713
      set_mem_alias_set (mem, struct_value_alias_set);
5714
      return mem;
5715
    }
5716
}
5717
 
5718
/* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
5719
   For v9, function return values are subject to the same rules as arguments,
5720
   except that up to 32 bytes may be returned in registers.  */
5721
 
5722
rtx
5723
function_value (const_tree type, enum machine_mode mode, int incoming_p)
5724
{
5725
  /* Beware that the two values are swapped here wrt function_arg.  */
5726
  int regbase = (incoming_p
5727
                 ? SPARC_OUTGOING_INT_ARG_FIRST
5728
                 : SPARC_INCOMING_INT_ARG_FIRST);
5729
  enum mode_class mclass = GET_MODE_CLASS (mode);
5730
  int regno;
5731
 
5732
  /* Vector types deserve special treatment because they are polymorphic wrt
5733
     their mode, depending upon whether VIS instructions are enabled.  */
5734
  if (type && TREE_CODE (type) == VECTOR_TYPE)
5735
    {
5736
      HOST_WIDE_INT size = int_size_in_bytes (type);
5737
      gcc_assert ((TARGET_ARCH32 && size <= 8)
5738
                  || (TARGET_ARCH64 && size <= 32));
5739
 
5740
      if (mode == BLKmode)
5741
        return function_arg_vector_value (size,
5742
                                          SPARC_FP_ARG_FIRST);
5743
      else
5744
        mclass = MODE_FLOAT;
5745
    }
5746
 
5747
  if (TARGET_ARCH64 && type)
5748
    {
5749
      /* Structures up to 32 bytes in size are returned in registers.  */
5750
      if (TREE_CODE (type) == RECORD_TYPE)
5751
        {
5752
          HOST_WIDE_INT size = int_size_in_bytes (type);
5753
          gcc_assert (size <= 32);
5754
 
5755
          return function_arg_record_value (type, mode, 0, 1, regbase);
5756
        }
5757
 
5758
      /* Unions up to 32 bytes in size are returned in integer registers.  */
5759
      else if (TREE_CODE (type) == UNION_TYPE)
5760
        {
5761
          HOST_WIDE_INT size = int_size_in_bytes (type);
5762
          gcc_assert (size <= 32);
5763
 
5764
          return function_arg_union_value (size, mode, 0, regbase);
5765
        }
5766
 
5767
      /* Objects that require it are returned in FP registers.  */
5768
      else if (mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT)
5769
        ;
5770
 
5771
      /* All other aggregate types are returned in an integer register in a
5772
         mode corresponding to the size of the type.  */
5773
      else if (AGGREGATE_TYPE_P (type))
5774
        {
5775
          /* All other aggregate types are passed in an integer register
5776
             in a mode corresponding to the size of the type.  */
5777
          HOST_WIDE_INT size = int_size_in_bytes (type);
5778
          gcc_assert (size <= 32);
5779
 
5780
          mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
5781
 
5782
          /* ??? We probably should have made the same ABI change in
5783
             3.4.0 as the one we made for unions.   The latter was
5784
             required by the SCD though, while the former is not
5785
             specified, so we favored compatibility and efficiency.
5786
 
5787
             Now we're stuck for aggregates larger than 16 bytes,
5788
             because OImode vanished in the meantime.  Let's not
5789
             try to be unduly clever, and simply follow the ABI
5790
             for unions in that case.  */
5791
          if (mode == BLKmode)
5792
            return function_arg_union_value (size, mode, 0, regbase);
5793
          else
5794
            mclass = MODE_INT;
5795
        }
5796
 
5797
      /* This must match sparc_promote_function_mode.
5798
         ??? Maybe 32-bit pointers should actually remain in Pmode?  */
5799
      else if (mclass == MODE_INT && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
5800
        mode = word_mode;
5801
    }
5802
 
5803
  if ((mclass == MODE_FLOAT || mclass == MODE_COMPLEX_FLOAT) && TARGET_FPU)
5804
    regno = SPARC_FP_ARG_FIRST;
5805
  else
5806
    regno = regbase;
5807
 
5808
  return gen_rtx_REG (mode, regno);
5809
}
5810
 
5811
/* Do what is necessary for `va_start'.  We look at the current function
5812
   to determine if stdarg or varargs is used and return the address of
5813
   the first unnamed parameter.  */
5814
 
5815
static rtx
5816
sparc_builtin_saveregs (void)
5817
{
5818
  int first_reg = crtl->args.info.words;
5819
  rtx address;
5820
  int regno;
5821
 
5822
  for (regno = first_reg; regno < SPARC_INT_ARG_MAX; regno++)
5823
    emit_move_insn (gen_rtx_MEM (word_mode,
5824
                                 gen_rtx_PLUS (Pmode,
5825
                                               frame_pointer_rtx,
5826
                                               GEN_INT (FIRST_PARM_OFFSET (0)
5827
                                                        + (UNITS_PER_WORD
5828
                                                           * regno)))),
5829
                    gen_rtx_REG (word_mode,
5830
                                 SPARC_INCOMING_INT_ARG_FIRST + regno));
5831
 
5832
  address = gen_rtx_PLUS (Pmode,
5833
                          frame_pointer_rtx,
5834
                          GEN_INT (FIRST_PARM_OFFSET (0)
5835
                                   + UNITS_PER_WORD * first_reg));
5836
 
5837
  return address;
5838
}
5839
 
5840
/* Implement `va_start' for stdarg.  */
5841
 
5842
static void
5843
sparc_va_start (tree valist, rtx nextarg)
5844
{
5845
  nextarg = expand_builtin_saveregs ();
5846
  std_expand_builtin_va_start (valist, nextarg);
5847
}
5848
 
5849
/* Implement `va_arg' for stdarg.  */
5850
 
5851
static tree
5852
sparc_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
5853
                       gimple_seq *post_p)
5854
{
5855
  HOST_WIDE_INT size, rsize, align;
5856
  tree addr, incr;
5857
  bool indirect;
5858
  tree ptrtype = build_pointer_type (type);
5859
 
5860
  if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
5861
    {
5862
      indirect = true;
5863
      size = rsize = UNITS_PER_WORD;
5864
      align = 0;
5865
    }
5866
  else
5867
    {
5868
      indirect = false;
5869
      size = int_size_in_bytes (type);
5870
      rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
5871
      align = 0;
5872
 
5873
      if (TARGET_ARCH64)
5874
        {
5875
          /* For SPARC64, objects requiring 16-byte alignment get it.  */
5876
          if (TYPE_ALIGN (type) >= 2 * (unsigned) BITS_PER_WORD)
5877
            align = 2 * UNITS_PER_WORD;
5878
 
5879
          /* SPARC-V9 ABI states that structures up to 16 bytes in size
5880
             are left-justified in their slots.  */
5881
          if (AGGREGATE_TYPE_P (type))
5882
            {
5883
              if (size == 0)
5884
                size = rsize = UNITS_PER_WORD;
5885
              else
5886
                size = rsize;
5887
            }
5888
        }
5889
    }
5890
 
5891
  incr = valist;
5892
  if (align)
5893
    {
5894
      incr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5895
                          size_int (align - 1));
5896
      incr = fold_convert (sizetype, incr);
5897
      incr = fold_build2 (BIT_AND_EXPR, sizetype, incr,
5898
                          size_int (-align));
5899
      incr = fold_convert (ptr_type_node, incr);
5900
    }
5901
 
5902
  gimplify_expr (&incr, pre_p, post_p, is_gimple_val, fb_rvalue);
5903
  addr = incr;
5904
 
5905
  if (BYTES_BIG_ENDIAN && size < rsize)
5906
    addr = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr,
5907
                        size_int (rsize - size));
5908
 
5909
  if (indirect)
5910
    {
5911
      addr = fold_convert (build_pointer_type (ptrtype), addr);
5912
      addr = build_va_arg_indirect_ref (addr);
5913
    }
5914
 
5915
  /* If the address isn't aligned properly for the type, we need a temporary.
5916
     FIXME: This is inefficient, usually we can do this in registers.  */
5917
  else if (align == 0 && TYPE_ALIGN (type) > BITS_PER_WORD)
5918
    {
5919
      tree tmp = create_tmp_var (type, "va_arg_tmp");
5920
      tree dest_addr = build_fold_addr_expr (tmp);
5921
      tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
5922
                                   3, dest_addr, addr, size_int (rsize));
5923
      TREE_ADDRESSABLE (tmp) = 1;
5924
      gimplify_and_add (copy, pre_p);
5925
      addr = dest_addr;
5926
    }
5927
 
5928
  else
5929
    addr = fold_convert (ptrtype, addr);
5930
 
5931
  incr
5932
    = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node, incr, size_int (rsize));
5933
  gimplify_assign (valist, incr, post_p);
5934
 
5935
  return build_va_arg_indirect_ref (addr);
5936
}
5937
 
5938
/* Implement the TARGET_VECTOR_MODE_SUPPORTED_P target hook.
5939
   Specify whether the vector mode is supported by the hardware.  */
5940
 
5941
static bool
5942
sparc_vector_mode_supported_p (enum machine_mode mode)
5943
{
5944
  return TARGET_VIS && VECTOR_MODE_P (mode) ? true : false;
5945
}
5946
 
5947
/* Return the string to output an unconditional branch to LABEL, which is
5948
   the operand number of the label.
5949
 
5950
   DEST is the destination insn (i.e. the label), INSN is the source.  */
5951
 
5952
const char *
5953
output_ubranch (rtx dest, int label, rtx insn)
5954
{
5955
  static char string[64];
5956
  bool v9_form = false;
5957
  char *p;
5958
 
5959
  if (TARGET_V9 && INSN_ADDRESSES_SET_P ())
5960
    {
5961
      int delta = (INSN_ADDRESSES (INSN_UID (dest))
5962
                   - INSN_ADDRESSES (INSN_UID (insn)));
5963
      /* Leave some instructions for "slop".  */
5964
      if (delta >= -260000 && delta < 260000)
5965
        v9_form = true;
5966
    }
5967
 
5968
  if (v9_form)
5969
    strcpy (string, "ba%*,pt\t%%xcc, ");
5970
  else
5971
    strcpy (string, "b%*\t");
5972
 
5973
  p = strchr (string, '\0');
5974
  *p++ = '%';
5975
  *p++ = 'l';
5976
  *p++ = '0' + label;
5977
  *p++ = '%';
5978
  *p++ = '(';
5979
  *p = '\0';
5980
 
5981
  return string;
5982
}
5983
 
5984
/* Return the string to output a conditional branch to LABEL, which is
5985
   the operand number of the label.  OP is the conditional expression.
5986
   XEXP (OP, 0) is assumed to be a condition code register (integer or
5987
   floating point) and its mode specifies what kind of comparison we made.
5988
 
5989
   DEST is the destination insn (i.e. the label), INSN is the source.
5990
 
5991
   REVERSED is nonzero if we should reverse the sense of the comparison.
5992
 
5993
   ANNUL is nonzero if we should generate an annulling branch.  */
5994
 
5995
const char *
5996
output_cbranch (rtx op, rtx dest, int label, int reversed, int annul,
5997
                rtx insn)
5998
{
5999
  static char string[64];
6000
  enum rtx_code code = GET_CODE (op);
6001
  rtx cc_reg = XEXP (op, 0);
6002
  enum machine_mode mode = GET_MODE (cc_reg);
6003
  const char *labelno, *branch;
6004
  int spaces = 8, far;
6005
  char *p;
6006
 
6007
  /* v9 branches are limited to +-1MB.  If it is too far away,
6008
     change
6009
 
6010
     bne,pt %xcc, .LC30
6011
 
6012
     to
6013
 
6014
     be,pn %xcc, .+12
6015
      nop
6016
     ba .LC30
6017
 
6018
     and
6019
 
6020
     fbne,a,pn %fcc2, .LC29
6021
 
6022
     to
6023
 
6024
     fbe,pt %fcc2, .+16
6025
      nop
6026
     ba .LC29  */
6027
 
6028
  far = TARGET_V9 && (get_attr_length (insn) >= 3);
6029
  if (reversed ^ far)
6030
    {
6031
      /* Reversal of FP compares takes care -- an ordered compare
6032
         becomes an unordered compare and vice versa.  */
6033
      if (mode == CCFPmode || mode == CCFPEmode)
6034
        code = reverse_condition_maybe_unordered (code);
6035
      else
6036
        code = reverse_condition (code);
6037
    }
6038
 
6039
  /* Start by writing the branch condition.  */
6040
  if (mode == CCFPmode || mode == CCFPEmode)
6041
    {
6042
      switch (code)
6043
        {
6044
        case NE:
6045
          branch = "fbne";
6046
          break;
6047
        case EQ:
6048
          branch = "fbe";
6049
          break;
6050
        case GE:
6051
          branch = "fbge";
6052
          break;
6053
        case GT:
6054
          branch = "fbg";
6055
          break;
6056
        case LE:
6057
          branch = "fble";
6058
          break;
6059
        case LT:
6060
          branch = "fbl";
6061
          break;
6062
        case UNORDERED:
6063
          branch = "fbu";
6064
          break;
6065
        case ORDERED:
6066
          branch = "fbo";
6067
          break;
6068
        case UNGT:
6069
          branch = "fbug";
6070
          break;
6071
        case UNLT:
6072
          branch = "fbul";
6073
          break;
6074
        case UNEQ:
6075
          branch = "fbue";
6076
          break;
6077
        case UNGE:
6078
          branch = "fbuge";
6079
          break;
6080
        case UNLE:
6081
          branch = "fbule";
6082
          break;
6083
        case LTGT:
6084
          branch = "fblg";
6085
          break;
6086
 
6087
        default:
6088
          gcc_unreachable ();
6089
        }
6090
 
6091
      /* ??? !v9: FP branches cannot be preceded by another floating point
6092
         insn.  Because there is currently no concept of pre-delay slots,
6093
         we can fix this only by always emitting a nop before a floating
6094
         point branch.  */
6095
 
6096
      string[0] = '\0';
6097
      if (! TARGET_V9)
6098
        strcpy (string, "nop\n\t");
6099
      strcat (string, branch);
6100
    }
6101
  else
6102
    {
6103
      switch (code)
6104
        {
6105
        case NE:
6106
          branch = "bne";
6107
          break;
6108
        case EQ:
6109
          branch = "be";
6110
          break;
6111
        case GE:
6112
          if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6113
            branch = "bpos";
6114
          else
6115
            branch = "bge";
6116
          break;
6117
        case GT:
6118
          branch = "bg";
6119
          break;
6120
        case LE:
6121
          branch = "ble";
6122
          break;
6123
        case LT:
6124
          if (mode == CC_NOOVmode || mode == CCX_NOOVmode)
6125
            branch = "bneg";
6126
          else
6127
            branch = "bl";
6128
          break;
6129
        case GEU:
6130
          branch = "bgeu";
6131
          break;
6132
        case GTU:
6133
          branch = "bgu";
6134
          break;
6135
        case LEU:
6136
          branch = "bleu";
6137
          break;
6138
        case LTU:
6139
          branch = "blu";
6140
          break;
6141
 
6142
        default:
6143
          gcc_unreachable ();
6144
        }
6145
      strcpy (string, branch);
6146
    }
6147
  spaces -= strlen (branch);
6148
  p = strchr (string, '\0');
6149
 
6150
  /* Now add the annulling, the label, and a possible noop.  */
6151
  if (annul && ! far)
6152
    {
6153
      strcpy (p, ",a");
6154
      p += 2;
6155
      spaces -= 2;
6156
    }
6157
 
6158
  if (TARGET_V9)
6159
    {
6160
      rtx note;
6161
      int v8 = 0;
6162
 
6163
      if (! far && insn && INSN_ADDRESSES_SET_P ())
6164
        {
6165
          int delta = (INSN_ADDRESSES (INSN_UID (dest))
6166
                       - INSN_ADDRESSES (INSN_UID (insn)));
6167
          /* Leave some instructions for "slop".  */
6168
          if (delta < -260000 || delta >= 260000)
6169
            v8 = 1;
6170
        }
6171
 
6172
      if (mode == CCFPmode || mode == CCFPEmode)
6173
        {
6174
          static char v9_fcc_labelno[] = "%%fccX, ";
6175
          /* Set the char indicating the number of the fcc reg to use.  */
6176
          v9_fcc_labelno[5] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
6177
          labelno = v9_fcc_labelno;
6178
          if (v8)
6179
            {
6180
              gcc_assert (REGNO (cc_reg) == SPARC_FCC_REG);
6181
              labelno = "";
6182
            }
6183
        }
6184
      else if (mode == CCXmode || mode == CCX_NOOVmode)
6185
        {
6186
          labelno = "%%xcc, ";
6187
          gcc_assert (! v8);
6188
        }
6189
      else
6190
        {
6191
          labelno = "%%icc, ";
6192
          if (v8)
6193
            labelno = "";
6194
        }
6195
 
6196
      if (*labelno && insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6197
        {
6198
          strcpy (p,
6199
                  ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6200
                  ? ",pt" : ",pn");
6201
          p += 3;
6202
          spaces -= 3;
6203
        }
6204
    }
6205
  else
6206
    labelno = "";
6207
 
6208
  if (spaces > 0)
6209
    *p++ = '\t';
6210
  else
6211
    *p++ = ' ';
6212
  strcpy (p, labelno);
6213
  p = strchr (p, '\0');
6214
  if (far)
6215
    {
6216
      strcpy (p, ".+12\n\t nop\n\tb\t");
6217
      /* Skip the next insn if requested or
6218
         if we know that it will be a nop.  */
6219
      if (annul || ! final_sequence)
6220
        p[3] = '6';
6221
      p += 14;
6222
    }
6223
  *p++ = '%';
6224
  *p++ = 'l';
6225
  *p++ = label + '0';
6226
  *p++ = '%';
6227
  *p++ = '#';
6228
  *p = '\0';
6229
 
6230
  return string;
6231
}
6232
 
6233
/* Emit a library call comparison between floating point X and Y.
6234
   COMPARISON is the operator to compare with (EQ, NE, GT, etc).
6235
   Return the new operator to be used in the comparison sequence.
6236
 
6237
   TARGET_ARCH64 uses _Qp_* functions, which use pointers to TFmode
6238
   values as arguments instead of the TFmode registers themselves,
6239
   that's why we cannot call emit_float_lib_cmp.  */
6240
 
6241
rtx
6242
sparc_emit_float_lib_cmp (rtx x, rtx y, enum rtx_code comparison)
6243
{
6244
  const char *qpfunc;
6245
  rtx slot0, slot1, result, tem, tem2, libfunc;
6246
  enum machine_mode mode;
6247
  enum rtx_code new_comparison;
6248
 
6249
  switch (comparison)
6250
    {
6251
    case EQ:
6252
      qpfunc = (TARGET_ARCH64 ? "_Qp_feq" : "_Q_feq");
6253
      break;
6254
 
6255
    case NE:
6256
      qpfunc = (TARGET_ARCH64 ? "_Qp_fne" : "_Q_fne");
6257
      break;
6258
 
6259
    case GT:
6260
      qpfunc = (TARGET_ARCH64 ? "_Qp_fgt" : "_Q_fgt");
6261
      break;
6262
 
6263
    case GE:
6264
      qpfunc = (TARGET_ARCH64 ? "_Qp_fge" : "_Q_fge");
6265
      break;
6266
 
6267
    case LT:
6268
      qpfunc = (TARGET_ARCH64 ? "_Qp_flt" : "_Q_flt");
6269
      break;
6270
 
6271
    case LE:
6272
      qpfunc = (TARGET_ARCH64 ? "_Qp_fle" : "_Q_fle");
6273
      break;
6274
 
6275
    case ORDERED:
6276
    case UNORDERED:
6277
    case UNGT:
6278
    case UNLT:
6279
    case UNEQ:
6280
    case UNGE:
6281
    case UNLE:
6282
    case LTGT:
6283
      qpfunc = (TARGET_ARCH64 ? "_Qp_cmp" : "_Q_cmp");
6284
      break;
6285
 
6286
    default:
6287
      gcc_unreachable ();
6288
    }
6289
 
6290
  if (TARGET_ARCH64)
6291
    {
6292
      if (MEM_P (x))
6293
        slot0 = x;
6294
      else
6295
        {
6296
          slot0 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6297
          emit_move_insn (slot0, x);
6298
        }
6299
 
6300
      if (MEM_P (y))
6301
        slot1 = y;
6302
      else
6303
        {
6304
          slot1 = assign_stack_temp (TFmode, GET_MODE_SIZE(TFmode), 0);
6305
          emit_move_insn (slot1, y);
6306
        }
6307
 
6308
      libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6309
      emit_library_call (libfunc, LCT_NORMAL,
6310
                         DImode, 2,
6311
                         XEXP (slot0, 0), Pmode,
6312
                         XEXP (slot1, 0), Pmode);
6313
      mode = DImode;
6314
    }
6315
  else
6316
    {
6317
      libfunc = gen_rtx_SYMBOL_REF (Pmode, qpfunc);
6318
      emit_library_call (libfunc, LCT_NORMAL,
6319
                         SImode, 2,
6320
                         x, TFmode, y, TFmode);
6321
      mode = SImode;
6322
    }
6323
 
6324
 
6325
  /* Immediately move the result of the libcall into a pseudo
6326
     register so reload doesn't clobber the value if it needs
6327
     the return register for a spill reg.  */
6328
  result = gen_reg_rtx (mode);
6329
  emit_move_insn (result, hard_libcall_value (mode, libfunc));
6330
 
6331
  switch (comparison)
6332
    {
6333
    default:
6334
      return gen_rtx_NE (VOIDmode, result, const0_rtx);
6335
    case ORDERED:
6336
    case UNORDERED:
6337
      new_comparison = (comparison == UNORDERED ? EQ : NE);
6338
      return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, GEN_INT(3));
6339
    case UNGT:
6340
    case UNGE:
6341
      new_comparison = (comparison == UNGT ? GT : NE);
6342
      return gen_rtx_fmt_ee (new_comparison, VOIDmode, result, const1_rtx);
6343
    case UNLE:
6344
      return gen_rtx_NE (VOIDmode, result, const2_rtx);
6345
    case UNLT:
6346
      tem = gen_reg_rtx (mode);
6347
      if (TARGET_ARCH32)
6348
        emit_insn (gen_andsi3 (tem, result, const1_rtx));
6349
      else
6350
        emit_insn (gen_anddi3 (tem, result, const1_rtx));
6351
      return gen_rtx_NE (VOIDmode, tem, const0_rtx);
6352
    case UNEQ:
6353
    case LTGT:
6354
      tem = gen_reg_rtx (mode);
6355
      if (TARGET_ARCH32)
6356
        emit_insn (gen_addsi3 (tem, result, const1_rtx));
6357
      else
6358
        emit_insn (gen_adddi3 (tem, result, const1_rtx));
6359
      tem2 = gen_reg_rtx (mode);
6360
      if (TARGET_ARCH32)
6361
        emit_insn (gen_andsi3 (tem2, tem, const2_rtx));
6362
      else
6363
        emit_insn (gen_anddi3 (tem2, tem, const2_rtx));
6364
      new_comparison = (comparison == UNEQ ? EQ : NE);
6365
      return gen_rtx_fmt_ee (new_comparison, VOIDmode, tem2, const0_rtx);
6366
    }
6367
 
6368
  gcc_unreachable ();
6369
}
6370
 
6371
/* Generate an unsigned DImode to FP conversion.  This is the same code
6372
   optabs would emit if we didn't have TFmode patterns.  */
6373
 
6374
void
6375
sparc_emit_floatunsdi (rtx *operands, enum machine_mode mode)
6376
{
6377
  rtx neglab, donelab, i0, i1, f0, in, out;
6378
 
6379
  out = operands[0];
6380
  in = force_reg (DImode, operands[1]);
6381
  neglab = gen_label_rtx ();
6382
  donelab = gen_label_rtx ();
6383
  i0 = gen_reg_rtx (DImode);
6384
  i1 = gen_reg_rtx (DImode);
6385
  f0 = gen_reg_rtx (mode);
6386
 
6387
  emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab);
6388
 
6389
  emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_FLOAT (mode, in)));
6390
  emit_jump_insn (gen_jump (donelab));
6391
  emit_barrier ();
6392
 
6393
  emit_label (neglab);
6394
 
6395
  emit_insn (gen_lshrdi3 (i0, in, const1_rtx));
6396
  emit_insn (gen_anddi3 (i1, in, const1_rtx));
6397
  emit_insn (gen_iordi3 (i0, i0, i1));
6398
  emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_FLOAT (mode, i0)));
6399
  emit_insn (gen_rtx_SET (VOIDmode, out, gen_rtx_PLUS (mode, f0, f0)));
6400
 
6401
  emit_label (donelab);
6402
}
6403
 
6404
/* Generate an FP to unsigned DImode conversion.  This is the same code
6405
   optabs would emit if we didn't have TFmode patterns.  */
6406
 
6407
void
6408
sparc_emit_fixunsdi (rtx *operands, enum machine_mode mode)
6409
{
6410
  rtx neglab, donelab, i0, i1, f0, in, out, limit;
6411
 
6412
  out = operands[0];
6413
  in = force_reg (mode, operands[1]);
6414
  neglab = gen_label_rtx ();
6415
  donelab = gen_label_rtx ();
6416
  i0 = gen_reg_rtx (DImode);
6417
  i1 = gen_reg_rtx (DImode);
6418
  limit = gen_reg_rtx (mode);
6419
  f0 = gen_reg_rtx (mode);
6420
 
6421
  emit_move_insn (limit,
6422
                  CONST_DOUBLE_FROM_REAL_VALUE (
6423
                    REAL_VALUE_ATOF ("9223372036854775808.0", mode), mode));
6424
  emit_cmp_and_jump_insns (in, limit, GE, NULL_RTX, mode, 0, neglab);
6425
 
6426
  emit_insn (gen_rtx_SET (VOIDmode,
6427
                          out,
6428
                          gen_rtx_FIX (DImode, gen_rtx_FIX (mode, in))));
6429
  emit_jump_insn (gen_jump (donelab));
6430
  emit_barrier ();
6431
 
6432
  emit_label (neglab);
6433
 
6434
  emit_insn (gen_rtx_SET (VOIDmode, f0, gen_rtx_MINUS (mode, in, limit)));
6435
  emit_insn (gen_rtx_SET (VOIDmode,
6436
                          i0,
6437
                          gen_rtx_FIX (DImode, gen_rtx_FIX (mode, f0))));
6438
  emit_insn (gen_movdi (i1, const1_rtx));
6439
  emit_insn (gen_ashldi3 (i1, i1, GEN_INT (63)));
6440
  emit_insn (gen_xordi3 (out, i0, i1));
6441
 
6442
  emit_label (donelab);
6443
}
6444
 
6445
/* Return the string to output a conditional branch to LABEL, testing
6446
   register REG.  LABEL is the operand number of the label; REG is the
6447
   operand number of the reg.  OP is the conditional expression.  The mode
6448
   of REG says what kind of comparison we made.
6449
 
6450
   DEST is the destination insn (i.e. the label), INSN is the source.
6451
 
6452
   REVERSED is nonzero if we should reverse the sense of the comparison.
6453
 
6454
   ANNUL is nonzero if we should generate an annulling branch.  */
6455
 
6456
const char *
6457
output_v9branch (rtx op, rtx dest, int reg, int label, int reversed,
6458
                 int annul, rtx insn)
6459
{
6460
  static char string[64];
6461
  enum rtx_code code = GET_CODE (op);
6462
  enum machine_mode mode = GET_MODE (XEXP (op, 0));
6463
  rtx note;
6464
  int far;
6465
  char *p;
6466
 
6467
  /* branch on register are limited to +-128KB.  If it is too far away,
6468
     change
6469
 
6470
     brnz,pt %g1, .LC30
6471
 
6472
     to
6473
 
6474
     brz,pn %g1, .+12
6475
      nop
6476
     ba,pt %xcc, .LC30
6477
 
6478
     and
6479
 
6480
     brgez,a,pn %o1, .LC29
6481
 
6482
     to
6483
 
6484
     brlz,pt %o1, .+16
6485
      nop
6486
     ba,pt %xcc, .LC29  */
6487
 
6488
  far = get_attr_length (insn) >= 3;
6489
 
6490
  /* If not floating-point or if EQ or NE, we can just reverse the code.  */
6491
  if (reversed ^ far)
6492
    code = reverse_condition (code);
6493
 
6494
  /* Only 64 bit versions of these instructions exist.  */
6495
  gcc_assert (mode == DImode);
6496
 
6497
  /* Start by writing the branch condition.  */
6498
 
6499
  switch (code)
6500
    {
6501
    case NE:
6502
      strcpy (string, "brnz");
6503
      break;
6504
 
6505
    case EQ:
6506
      strcpy (string, "brz");
6507
      break;
6508
 
6509
    case GE:
6510
      strcpy (string, "brgez");
6511
      break;
6512
 
6513
    case LT:
6514
      strcpy (string, "brlz");
6515
      break;
6516
 
6517
    case LE:
6518
      strcpy (string, "brlez");
6519
      break;
6520
 
6521
    case GT:
6522
      strcpy (string, "brgz");
6523
      break;
6524
 
6525
    default:
6526
      gcc_unreachable ();
6527
    }
6528
 
6529
  p = strchr (string, '\0');
6530
 
6531
  /* Now add the annulling, reg, label, and nop.  */
6532
  if (annul && ! far)
6533
    {
6534
      strcpy (p, ",a");
6535
      p += 2;
6536
    }
6537
 
6538
  if (insn && (note = find_reg_note (insn, REG_BR_PROB, NULL_RTX)))
6539
    {
6540
      strcpy (p,
6541
              ((INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2) ^ far)
6542
              ? ",pt" : ",pn");
6543
      p += 3;
6544
    }
6545
 
6546
  *p = p < string + 8 ? '\t' : ' ';
6547
  p++;
6548
  *p++ = '%';
6549
  *p++ = '0' + reg;
6550
  *p++ = ',';
6551
  *p++ = ' ';
6552
  if (far)
6553
    {
6554
      int veryfar = 1, delta;
6555
 
6556
      if (INSN_ADDRESSES_SET_P ())
6557
        {
6558
          delta = (INSN_ADDRESSES (INSN_UID (dest))
6559
                   - INSN_ADDRESSES (INSN_UID (insn)));
6560
          /* Leave some instructions for "slop".  */
6561
          if (delta >= -260000 && delta < 260000)
6562
            veryfar = 0;
6563
        }
6564
 
6565
      strcpy (p, ".+12\n\t nop\n\t");
6566
      /* Skip the next insn if requested or
6567
         if we know that it will be a nop.  */
6568
      if (annul || ! final_sequence)
6569
        p[3] = '6';
6570
      p += 12;
6571
      if (veryfar)
6572
        {
6573
          strcpy (p, "b\t");
6574
          p += 2;
6575
        }
6576
      else
6577
        {
6578
          strcpy (p, "ba,pt\t%%xcc, ");
6579
          p += 13;
6580
        }
6581
    }
6582
  *p++ = '%';
6583
  *p++ = 'l';
6584
  *p++ = '0' + label;
6585
  *p++ = '%';
6586
  *p++ = '#';
6587
  *p = '\0';
6588
 
6589
  return string;
6590
}
6591
 
6592
/* Return 1, if any of the registers of the instruction are %l[0-7] or %o[0-7].
6593
   Such instructions cannot be used in the delay slot of return insn on v9.
6594
   If TEST is 0, also rename all %i[0-7] registers to their %o[0-7] counterparts.
6595
 */
6596
 
6597
static int
6598
epilogue_renumber (register rtx *where, int test)
6599
{
6600
  register const char *fmt;
6601
  register int i;
6602
  register enum rtx_code code;
6603
 
6604
  if (*where == 0)
6605
    return 0;
6606
 
6607
  code = GET_CODE (*where);
6608
 
6609
  switch (code)
6610
    {
6611
    case REG:
6612
      if (REGNO (*where) >= 8 && REGNO (*where) < 24)      /* oX or lX */
6613
        return 1;
6614
      if (! test && REGNO (*where) >= 24 && REGNO (*where) < 32)
6615
        *where = gen_rtx_REG (GET_MODE (*where), OUTGOING_REGNO (REGNO(*where)));
6616
    case SCRATCH:
6617
    case CC0:
6618
    case PC:
6619
    case CONST_INT:
6620
    case CONST_DOUBLE:
6621
      return 0;
6622
 
6623
      /* Do not replace the frame pointer with the stack pointer because
6624
         it can cause the delayed instruction to load below the stack.
6625
         This occurs when instructions like:
6626
 
6627
         (set (reg/i:SI 24 %i0)
6628
             (mem/f:SI (plus:SI (reg/f:SI 30 %fp)
6629
                       (const_int -20 [0xffffffec])) 0))
6630
 
6631
         are in the return delayed slot.  */
6632
    case PLUS:
6633
      if (GET_CODE (XEXP (*where, 0)) == REG
6634
          && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM
6635
          && (GET_CODE (XEXP (*where, 1)) != CONST_INT
6636
              || INTVAL (XEXP (*where, 1)) < SPARC_STACK_BIAS))
6637
        return 1;
6638
      break;
6639
 
6640
    case MEM:
6641
      if (SPARC_STACK_BIAS
6642
          && GET_CODE (XEXP (*where, 0)) == REG
6643
          && REGNO (XEXP (*where, 0)) == HARD_FRAME_POINTER_REGNUM)
6644
        return 1;
6645
      break;
6646
 
6647
    default:
6648
      break;
6649
    }
6650
 
6651
  fmt = GET_RTX_FORMAT (code);
6652
 
6653
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6654
    {
6655
      if (fmt[i] == 'E')
6656
        {
6657
          register int j;
6658
          for (j = XVECLEN (*where, i) - 1; j >= 0; j--)
6659
            if (epilogue_renumber (&(XVECEXP (*where, i, j)), test))
6660
              return 1;
6661
        }
6662
      else if (fmt[i] == 'e'
6663
               && epilogue_renumber (&(XEXP (*where, i)), test))
6664
        return 1;
6665
    }
6666
  return 0;
6667
}
6668
 
6669
/* Leaf functions and non-leaf functions have different needs.  */
6670
 
6671
static const int
6672
reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
6673
 
6674
static const int
6675
reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
6676
 
6677
static const int *const reg_alloc_orders[] = {
6678
  reg_leaf_alloc_order,
6679
  reg_nonleaf_alloc_order};
6680
 
6681
void
6682
order_regs_for_local_alloc (void)
6683
{
6684
  static int last_order_nonleaf = 1;
6685
 
6686
  if (df_regs_ever_live_p (15) != last_order_nonleaf)
6687
    {
6688
      last_order_nonleaf = !last_order_nonleaf;
6689
      memcpy ((char *) reg_alloc_order,
6690
              (const char *) reg_alloc_orders[last_order_nonleaf],
6691
              FIRST_PSEUDO_REGISTER * sizeof (int));
6692
    }
6693
}
6694
 
6695
/* Return 1 if REG and MEM are legitimate enough to allow the various
6696
   mem<-->reg splits to be run.  */
6697
 
6698
int
6699
sparc_splitdi_legitimate (rtx reg, rtx mem)
6700
{
6701
  /* Punt if we are here by mistake.  */
6702
  gcc_assert (reload_completed);
6703
 
6704
  /* We must have an offsettable memory reference.  */
6705
  if (! offsettable_memref_p (mem))
6706
    return 0;
6707
 
6708
  /* If we have legitimate args for ldd/std, we do not want
6709
     the split to happen.  */
6710
  if ((REGNO (reg) % 2) == 0
6711
      && mem_min_alignment (mem, 8))
6712
    return 0;
6713
 
6714
  /* Success.  */
6715
  return 1;
6716
}
6717
 
6718
/* Return 1 if x and y are some kind of REG and they refer to
6719
   different hard registers.  This test is guaranteed to be
6720
   run after reload.  */
6721
 
6722
int
6723
sparc_absnegfloat_split_legitimate (rtx x, rtx y)
6724
{
6725
  if (GET_CODE (x) != REG)
6726
    return 0;
6727
  if (GET_CODE (y) != REG)
6728
    return 0;
6729
  if (REGNO (x) == REGNO (y))
6730
    return 0;
6731
  return 1;
6732
}
6733
 
6734
/* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
6735
   This makes them candidates for using ldd and std insns.
6736
 
6737
   Note reg1 and reg2 *must* be hard registers.  */
6738
 
6739
int
6740
registers_ok_for_ldd_peep (rtx reg1, rtx reg2)
6741
{
6742
  /* We might have been passed a SUBREG.  */
6743
  if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
6744
    return 0;
6745
 
6746
  if (REGNO (reg1) % 2 != 0)
6747
    return 0;
6748
 
6749
  /* Integer ldd is deprecated in SPARC V9 */
6750
  if (TARGET_V9 && REGNO (reg1) < 32)
6751
    return 0;
6752
 
6753
  return (REGNO (reg1) == REGNO (reg2) - 1);
6754
}
6755
 
6756
/* Return 1 if the addresses in mem1 and mem2 are suitable for use in
6757
   an ldd or std insn.
6758
 
6759
   This can only happen when addr1 and addr2, the addresses in mem1
6760
   and mem2, are consecutive memory locations (addr1 + 4 == addr2).
6761
   addr1 must also be aligned on a 64-bit boundary.
6762
 
6763
   Also iff dependent_reg_rtx is not null it should not be used to
6764
   compute the address for mem1, i.e. we cannot optimize a sequence
6765
   like:
6766
        ld [%o0], %o0
6767
        ld [%o0 + 4], %o1
6768
   to
6769
        ldd [%o0], %o0
6770
   nor:
6771
        ld [%g3 + 4], %g3
6772
        ld [%g3], %g2
6773
   to
6774
        ldd [%g3], %g2
6775
 
6776
   But, note that the transformation from:
6777
        ld [%g2 + 4], %g3
6778
        ld [%g2], %g2
6779
   to
6780
        ldd [%g2], %g2
6781
   is perfectly fine.  Thus, the peephole2 patterns always pass us
6782
   the destination register of the first load, never the second one.
6783
 
6784
   For stores we don't have a similar problem, so dependent_reg_rtx is
6785
   NULL_RTX.  */
6786
 
6787
int
6788
mems_ok_for_ldd_peep (rtx mem1, rtx mem2, rtx dependent_reg_rtx)
6789
{
6790
  rtx addr1, addr2;
6791
  unsigned int reg1;
6792
  HOST_WIDE_INT offset1;
6793
 
6794
  /* The mems cannot be volatile.  */
6795
  if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
6796
    return 0;
6797
 
6798
  /* MEM1 should be aligned on a 64-bit boundary.  */
6799
  if (MEM_ALIGN (mem1) < 64)
6800
    return 0;
6801
 
6802
  addr1 = XEXP (mem1, 0);
6803
  addr2 = XEXP (mem2, 0);
6804
 
6805
  /* Extract a register number and offset (if used) from the first addr.  */
6806
  if (GET_CODE (addr1) == PLUS)
6807
    {
6808
      /* If not a REG, return zero.  */
6809
      if (GET_CODE (XEXP (addr1, 0)) != REG)
6810
        return 0;
6811
      else
6812
        {
6813
          reg1 = REGNO (XEXP (addr1, 0));
6814
          /* The offset must be constant!  */
6815
          if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
6816
            return 0;
6817
          offset1 = INTVAL (XEXP (addr1, 1));
6818
        }
6819
    }
6820
  else if (GET_CODE (addr1) != REG)
6821
    return 0;
6822
  else
6823
    {
6824
      reg1 = REGNO (addr1);
6825
      /* This was a simple (mem (reg)) expression.  Offset is 0.  */
6826
      offset1 = 0;
6827
    }
6828
 
6829
  /* Make sure the second address is a (mem (plus (reg) (const_int).  */
6830
  if (GET_CODE (addr2) != PLUS)
6831
    return 0;
6832
 
6833
  if (GET_CODE (XEXP (addr2, 0)) != REG
6834
      || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
6835
    return 0;
6836
 
6837
  if (reg1 != REGNO (XEXP (addr2, 0)))
6838
    return 0;
6839
 
6840
  if (dependent_reg_rtx != NULL_RTX && reg1 == REGNO (dependent_reg_rtx))
6841
    return 0;
6842
 
6843
  /* The first offset must be evenly divisible by 8 to ensure the
6844
     address is 64 bit aligned.  */
6845
  if (offset1 % 8 != 0)
6846
    return 0;
6847
 
6848
  /* The offset for the second addr must be 4 more than the first addr.  */
6849
  if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
6850
    return 0;
6851
 
6852
  /* All the tests passed.  addr1 and addr2 are valid for ldd and std
6853
     instructions.  */
6854
  return 1;
6855
}
6856
 
6857
/* Return 1 if reg is a pseudo, or is the first register in
6858
   a hard register pair.  This makes it suitable for use in
6859
   ldd and std insns.  */
6860
 
6861
int
6862
register_ok_for_ldd (rtx reg)
6863
{
6864
  /* We might have been passed a SUBREG.  */
6865
  if (!REG_P (reg))
6866
    return 0;
6867
 
6868
  if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
6869
    return (REGNO (reg) % 2 == 0);
6870
 
6871
  return 1;
6872
}
6873
 
6874
/* Return 1 if OP is a memory whose address is known to be
6875
   aligned to 8-byte boundary, or a pseudo during reload.
6876
   This makes it suitable for use in ldd and std insns.  */
6877
 
6878
int
6879
memory_ok_for_ldd (rtx op)
6880
{
6881
  if (MEM_P (op))
6882
    {
6883
      /* In 64-bit mode, we assume that the address is word-aligned.  */
6884
      if (TARGET_ARCH32 && !mem_min_alignment (op, 8))
6885
        return 0;
6886
 
6887
      if ((reload_in_progress || reload_completed)
6888
          && !strict_memory_address_p (Pmode, XEXP (op, 0)))
6889
        return 0;
6890
    }
6891
  else if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
6892
    {
6893
      if (!(reload_in_progress && reg_renumber [REGNO (op)] < 0))
6894
        return 0;
6895
    }
6896
  else
6897
    return 0;
6898
 
6899
  return 1;
6900
}
6901
 
6902
/* Print operand X (an rtx) in assembler syntax to file FILE.
6903
   CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
6904
   For `%' followed by punctuation, CODE is the punctuation and X is null.  */
6905
 
6906
void
6907
print_operand (FILE *file, rtx x, int code)
6908
{
6909
  switch (code)
6910
    {
6911
    case '#':
6912
      /* Output an insn in a delay slot.  */
6913
      if (final_sequence)
6914
        sparc_indent_opcode = 1;
6915
      else
6916
        fputs ("\n\t nop", file);
6917
      return;
6918
    case '*':
6919
      /* Output an annul flag if there's nothing for the delay slot and we
6920
         are optimizing.  This is always used with '(' below.
6921
         Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
6922
         this is a dbx bug.  So, we only do this when optimizing.
6923
         On UltraSPARC, a branch in a delay slot causes a pipeline flush.
6924
         Always emit a nop in case the next instruction is a branch.  */
6925
      if (! final_sequence && (optimize && (int)sparc_cpu < PROCESSOR_V9))
6926
        fputs (",a", file);
6927
      return;
6928
    case '(':
6929
      /* Output a 'nop' if there's nothing for the delay slot and we are
6930
         not optimizing.  This is always used with '*' above.  */
6931
      if (! final_sequence && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
6932
        fputs ("\n\t nop", file);
6933
      else if (final_sequence)
6934
        sparc_indent_opcode = 1;
6935
      return;
6936
    case ')':
6937
      /* Output the right displacement from the saved PC on function return.
6938
         The caller may have placed an "unimp" insn immediately after the call
6939
         so we have to account for it.  This insn is used in the 32-bit ABI
6940
         when calling a function that returns a non zero-sized structure.  The
6941
         64-bit ABI doesn't have it.  Be careful to have this test be the same
6942
         as that for the call.  The exception is when sparc_std_struct_return
6943
         is enabled, the psABI is followed exactly and the adjustment is made
6944
         by the code in sparc_struct_value_rtx.  The call emitted is the same
6945
         when sparc_std_struct_return is enabled. */
6946
     if (!TARGET_ARCH64
6947
         && cfun->returns_struct
6948
         && !sparc_std_struct_return
6949
         && DECL_SIZE (DECL_RESULT (current_function_decl))
6950
         && TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl)))
6951
             == INTEGER_CST
6952
         && !integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))))
6953
        fputs ("12", file);
6954
      else
6955
        fputc ('8', file);
6956
      return;
6957
    case '_':
6958
      /* Output the Embedded Medium/Anywhere code model base register.  */
6959
      fputs (EMBMEDANY_BASE_REG, file);
6960
      return;
6961
    case '&':
6962
      /* Print some local dynamic TLS name.  */
6963
      assemble_name (file, get_some_local_dynamic_name ());
6964
      return;
6965
 
6966
    case 'Y':
6967
      /* Adjust the operand to take into account a RESTORE operation.  */
6968
      if (GET_CODE (x) == CONST_INT)
6969
        break;
6970
      else if (GET_CODE (x) != REG)
6971
        output_operand_lossage ("invalid %%Y operand");
6972
      else if (REGNO (x) < 8)
6973
        fputs (reg_names[REGNO (x)], file);
6974
      else if (REGNO (x) >= 24 && REGNO (x) < 32)
6975
        fputs (reg_names[REGNO (x)-16], file);
6976
      else
6977
        output_operand_lossage ("invalid %%Y operand");
6978
      return;
6979
    case 'L':
6980
      /* Print out the low order register name of a register pair.  */
6981
      if (WORDS_BIG_ENDIAN)
6982
        fputs (reg_names[REGNO (x)+1], file);
6983
      else
6984
        fputs (reg_names[REGNO (x)], file);
6985
      return;
6986
    case 'H':
6987
      /* Print out the high order register name of a register pair.  */
6988
      if (WORDS_BIG_ENDIAN)
6989
        fputs (reg_names[REGNO (x)], file);
6990
      else
6991
        fputs (reg_names[REGNO (x)+1], file);
6992
      return;
6993
    case 'R':
6994
      /* Print out the second register name of a register pair or quad.
6995
         I.e., R (%o0) => %o1.  */
6996
      fputs (reg_names[REGNO (x)+1], file);
6997
      return;
6998
    case 'S':
6999
      /* Print out the third register name of a register quad.
7000
         I.e., S (%o0) => %o2.  */
7001
      fputs (reg_names[REGNO (x)+2], file);
7002
      return;
7003
    case 'T':
7004
      /* Print out the fourth register name of a register quad.
7005
         I.e., T (%o0) => %o3.  */
7006
      fputs (reg_names[REGNO (x)+3], file);
7007
      return;
7008
    case 'x':
7009
      /* Print a condition code register.  */
7010
      if (REGNO (x) == SPARC_ICC_REG)
7011
        {
7012
          /* We don't handle CC[X]_NOOVmode because they're not supposed
7013
             to occur here.  */
7014
          if (GET_MODE (x) == CCmode)
7015
            fputs ("%icc", file);
7016
          else if (GET_MODE (x) == CCXmode)
7017
            fputs ("%xcc", file);
7018
          else
7019
            gcc_unreachable ();
7020
        }
7021
      else
7022
        /* %fccN register */
7023
        fputs (reg_names[REGNO (x)], file);
7024
      return;
7025
    case 'm':
7026
      /* Print the operand's address only.  */
7027
      output_address (XEXP (x, 0));
7028
      return;
7029
    case 'r':
7030
      /* In this case we need a register.  Use %g0 if the
7031
         operand is const0_rtx.  */
7032
      if (x == const0_rtx
7033
          || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
7034
        {
7035
          fputs ("%g0", file);
7036
          return;
7037
        }
7038
      else
7039
        break;
7040
 
7041
    case 'A':
7042
      switch (GET_CODE (x))
7043
        {
7044
        case IOR: fputs ("or", file); break;
7045
        case AND: fputs ("and", file); break;
7046
        case XOR: fputs ("xor", file); break;
7047
        default: output_operand_lossage ("invalid %%A operand");
7048
        }
7049
      return;
7050
 
7051
    case 'B':
7052
      switch (GET_CODE (x))
7053
        {
7054
        case IOR: fputs ("orn", file); break;
7055
        case AND: fputs ("andn", file); break;
7056
        case XOR: fputs ("xnor", file); break;
7057
        default: output_operand_lossage ("invalid %%B operand");
7058
        }
7059
      return;
7060
 
7061
      /* These are used by the conditional move instructions.  */
7062
    case 'c' :
7063
    case 'C':
7064
      {
7065
        enum rtx_code rc = GET_CODE (x);
7066
 
7067
        if (code == 'c')
7068
          {
7069
            enum machine_mode mode = GET_MODE (XEXP (x, 0));
7070
            if (mode == CCFPmode || mode == CCFPEmode)
7071
              rc = reverse_condition_maybe_unordered (GET_CODE (x));
7072
            else
7073
              rc = reverse_condition (GET_CODE (x));
7074
          }
7075
        switch (rc)
7076
          {
7077
          case NE: fputs ("ne", file); break;
7078
          case EQ: fputs ("e", file); break;
7079
          case GE: fputs ("ge", file); break;
7080
          case GT: fputs ("g", file); break;
7081
          case LE: fputs ("le", file); break;
7082
          case LT: fputs ("l", file); break;
7083
          case GEU: fputs ("geu", file); break;
7084
          case GTU: fputs ("gu", file); break;
7085
          case LEU: fputs ("leu", file); break;
7086
          case LTU: fputs ("lu", file); break;
7087
          case LTGT: fputs ("lg", file); break;
7088
          case UNORDERED: fputs ("u", file); break;
7089
          case ORDERED: fputs ("o", file); break;
7090
          case UNLT: fputs ("ul", file); break;
7091
          case UNLE: fputs ("ule", file); break;
7092
          case UNGT: fputs ("ug", file); break;
7093
          case UNGE: fputs ("uge", file); break;
7094
          case UNEQ: fputs ("ue", file); break;
7095
          default: output_operand_lossage (code == 'c'
7096
                                           ? "invalid %%c operand"
7097
                                           : "invalid %%C operand");
7098
          }
7099
        return;
7100
      }
7101
 
7102
      /* These are used by the movr instruction pattern.  */
7103
    case 'd':
7104
    case 'D':
7105
      {
7106
        enum rtx_code rc = (code == 'd'
7107
                            ? reverse_condition (GET_CODE (x))
7108
                            : GET_CODE (x));
7109
        switch (rc)
7110
          {
7111
          case NE: fputs ("ne", file); break;
7112
          case EQ: fputs ("e", file); break;
7113
          case GE: fputs ("gez", file); break;
7114
          case LT: fputs ("lz", file); break;
7115
          case LE: fputs ("lez", file); break;
7116
          case GT: fputs ("gz", file); break;
7117
          default: output_operand_lossage (code == 'd'
7118
                                           ? "invalid %%d operand"
7119
                                           : "invalid %%D operand");
7120
          }
7121
        return;
7122
      }
7123
 
7124
    case 'b':
7125
      {
7126
        /* Print a sign-extended character.  */
7127
        int i = trunc_int_for_mode (INTVAL (x), QImode);
7128
        fprintf (file, "%d", i);
7129
        return;
7130
      }
7131
 
7132
    case 'f':
7133
      /* Operand must be a MEM; write its address.  */
7134
      if (GET_CODE (x) != MEM)
7135
        output_operand_lossage ("invalid %%f operand");
7136
      output_address (XEXP (x, 0));
7137
      return;
7138
 
7139
    case 's':
7140
      {
7141
        /* Print a sign-extended 32-bit value.  */
7142
        HOST_WIDE_INT i;
7143
        if (GET_CODE(x) == CONST_INT)
7144
          i = INTVAL (x);
7145
        else if (GET_CODE(x) == CONST_DOUBLE)
7146
          i = CONST_DOUBLE_LOW (x);
7147
        else
7148
          {
7149
            output_operand_lossage ("invalid %%s operand");
7150
            return;
7151
          }
7152
        i = trunc_int_for_mode (i, SImode);
7153
        fprintf (file, HOST_WIDE_INT_PRINT_DEC, i);
7154
        return;
7155
      }
7156
 
7157
    case 0:
7158
      /* Do nothing special.  */
7159
      break;
7160
 
7161
    default:
7162
      /* Undocumented flag.  */
7163
      output_operand_lossage ("invalid operand output code");
7164
    }
7165
 
7166
  if (GET_CODE (x) == REG)
7167
    fputs (reg_names[REGNO (x)], file);
7168
  else if (GET_CODE (x) == MEM)
7169
    {
7170
      fputc ('[', file);
7171
        /* Poor Sun assembler doesn't understand absolute addressing.  */
7172
      if (CONSTANT_P (XEXP (x, 0)))
7173
        fputs ("%g0+", file);
7174
      output_address (XEXP (x, 0));
7175
      fputc (']', file);
7176
    }
7177
  else if (GET_CODE (x) == HIGH)
7178
    {
7179
      fputs ("%hi(", file);
7180
      output_addr_const (file, XEXP (x, 0));
7181
      fputc (')', file);
7182
    }
7183
  else if (GET_CODE (x) == LO_SUM)
7184
    {
7185
      print_operand (file, XEXP (x, 0), 0);
7186
      if (TARGET_CM_MEDMID)
7187
        fputs ("+%l44(", file);
7188
      else
7189
        fputs ("+%lo(", file);
7190
      output_addr_const (file, XEXP (x, 1));
7191
      fputc (')', file);
7192
    }
7193
  else if (GET_CODE (x) == CONST_DOUBLE
7194
           && (GET_MODE (x) == VOIDmode
7195
               || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
7196
    {
7197
      if (CONST_DOUBLE_HIGH (x) == 0)
7198
        fprintf (file, "%u", (unsigned int) CONST_DOUBLE_LOW (x));
7199
      else if (CONST_DOUBLE_HIGH (x) == -1
7200
               && CONST_DOUBLE_LOW (x) < 0)
7201
        fprintf (file, "%d", (int) CONST_DOUBLE_LOW (x));
7202
      else
7203
        output_operand_lossage ("long long constant not a valid immediate operand");
7204
    }
7205
  else if (GET_CODE (x) == CONST_DOUBLE)
7206
    output_operand_lossage ("floating point constant not a valid immediate operand");
7207
  else { output_addr_const (file, x); }
7208
}
7209
 
7210
/* Target hook for assembling integer objects.  The sparc version has
7211
   special handling for aligned DI-mode objects.  */
7212
 
7213
static bool
7214
sparc_assemble_integer (rtx x, unsigned int size, int aligned_p)
7215
{
7216
  /* ??? We only output .xword's for symbols and only then in environments
7217
     where the assembler can handle them.  */
7218
  if (aligned_p && size == 8
7219
      && (GET_CODE (x) != CONST_INT && GET_CODE (x) != CONST_DOUBLE))
7220
    {
7221
      if (TARGET_V9)
7222
        {
7223
          assemble_integer_with_op ("\t.xword\t", x);
7224
          return true;
7225
        }
7226
      else
7227
        {
7228
          assemble_aligned_integer (4, const0_rtx);
7229
          assemble_aligned_integer (4, x);
7230
          return true;
7231
        }
7232
    }
7233
  return default_assemble_integer (x, size, aligned_p);
7234
}
7235
 
7236
/* Return the value of a code used in the .proc pseudo-op that says
7237
   what kind of result this function returns.  For non-C types, we pick
7238
   the closest C type.  */
7239
 
7240
#ifndef SHORT_TYPE_SIZE
7241
#define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
7242
#endif
7243
 
7244
#ifndef INT_TYPE_SIZE
7245
#define INT_TYPE_SIZE BITS_PER_WORD
7246
#endif
7247
 
7248
#ifndef LONG_TYPE_SIZE
7249
#define LONG_TYPE_SIZE BITS_PER_WORD
7250
#endif
7251
 
7252
#ifndef LONG_LONG_TYPE_SIZE
7253
#define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
7254
#endif
7255
 
7256
#ifndef FLOAT_TYPE_SIZE
7257
#define FLOAT_TYPE_SIZE BITS_PER_WORD
7258
#endif
7259
 
7260
#ifndef DOUBLE_TYPE_SIZE
7261
#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7262
#endif
7263
 
7264
#ifndef LONG_DOUBLE_TYPE_SIZE
7265
#define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
7266
#endif
7267
 
7268
unsigned long
7269
sparc_type_code (register tree type)
7270
{
7271
  register unsigned long qualifiers = 0;
7272
  register unsigned shift;
7273
 
7274
  /* Only the first 30 bits of the qualifier are valid.  We must refrain from
7275
     setting more, since some assemblers will give an error for this.  Also,
7276
     we must be careful to avoid shifts of 32 bits or more to avoid getting
7277
     unpredictable results.  */
7278
 
7279
  for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
7280
    {
7281
      switch (TREE_CODE (type))
7282
        {
7283
        case ERROR_MARK:
7284
          return qualifiers;
7285
 
7286
        case ARRAY_TYPE:
7287
          qualifiers |= (3 << shift);
7288
          break;
7289
 
7290
        case FUNCTION_TYPE:
7291
        case METHOD_TYPE:
7292
          qualifiers |= (2 << shift);
7293
          break;
7294
 
7295
        case POINTER_TYPE:
7296
        case REFERENCE_TYPE:
7297
        case OFFSET_TYPE:
7298
          qualifiers |= (1 << shift);
7299
          break;
7300
 
7301
        case RECORD_TYPE:
7302
          return (qualifiers | 8);
7303
 
7304
        case UNION_TYPE:
7305
        case QUAL_UNION_TYPE:
7306
          return (qualifiers | 9);
7307
 
7308
        case ENUMERAL_TYPE:
7309
          return (qualifiers | 10);
7310
 
7311
        case VOID_TYPE:
7312
          return (qualifiers | 16);
7313
 
7314
        case INTEGER_TYPE:
7315
          /* If this is a range type, consider it to be the underlying
7316
             type.  */
7317
          if (TREE_TYPE (type) != 0)
7318
            break;
7319
 
7320
          /* Carefully distinguish all the standard types of C,
7321
             without messing up if the language is not C.  We do this by
7322
             testing TYPE_PRECISION and TYPE_UNSIGNED.  The old code used to
7323
             look at both the names and the above fields, but that's redundant.
7324
             Any type whose size is between two C types will be considered
7325
             to be the wider of the two types.  Also, we do not have a
7326
             special code to use for "long long", so anything wider than
7327
             long is treated the same.  Note that we can't distinguish
7328
             between "int" and "long" in this code if they are the same
7329
             size, but that's fine, since neither can the assembler.  */
7330
 
7331
          if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
7332
            return (qualifiers | (TYPE_UNSIGNED (type) ? 12 : 2));
7333
 
7334
          else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
7335
            return (qualifiers | (TYPE_UNSIGNED (type) ? 13 : 3));
7336
 
7337
          else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
7338
            return (qualifiers | (TYPE_UNSIGNED (type) ? 14 : 4));
7339
 
7340
          else
7341
            return (qualifiers | (TYPE_UNSIGNED (type) ? 15 : 5));
7342
 
7343
        case REAL_TYPE:
7344
          /* If this is a range type, consider it to be the underlying
7345
             type.  */
7346
          if (TREE_TYPE (type) != 0)
7347
            break;
7348
 
7349
          /* Carefully distinguish all the standard types of C,
7350
             without messing up if the language is not C.  */
7351
 
7352
          if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
7353
            return (qualifiers | 6);
7354
 
7355
          else
7356
            return (qualifiers | 7);
7357
 
7358
        case COMPLEX_TYPE:      /* GNU Fortran COMPLEX type.  */
7359
          /* ??? We need to distinguish between double and float complex types,
7360
             but I don't know how yet because I can't reach this code from
7361
             existing front-ends.  */
7362
          return (qualifiers | 7);      /* Who knows? */
7363
 
7364
        case VECTOR_TYPE:
7365
        case BOOLEAN_TYPE:      /* Boolean truth value type.  */
7366
        case LANG_TYPE:         /* ? */
7367
          return qualifiers;
7368
 
7369
        default:
7370
          gcc_unreachable ();           /* Not a type! */
7371
        }
7372
    }
7373
 
7374
  return qualifiers;
7375
}
7376
 
7377
/* Nested function support.  */
7378
 
7379
/* Emit RTL insns to initialize the variable parts of a trampoline.
7380
   FNADDR is an RTX for the address of the function's pure code.
7381
   CXT is an RTX for the static chain value for the function.
7382
 
7383
   This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
7384
   (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
7385
   (to store insns).  This is a bit excessive.  Perhaps a different
7386
   mechanism would be better here.
7387
 
7388
   Emit enough FLUSH insns to synchronize the data and instruction caches.  */
7389
 
7390
static void
7391
sparc32_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7392
{
7393
  /* SPARC 32-bit trampoline:
7394
 
7395
        sethi   %hi(fn), %g1
7396
        sethi   %hi(static), %g2
7397
        jmp     %g1+%lo(fn)
7398
        or      %g2, %lo(static), %g2
7399
 
7400
    SETHI i,r  = 00rr rrr1 00ii iiii iiii iiii iiii iiii
7401
    JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
7402
   */
7403
 
7404
  emit_move_insn
7405
    (adjust_address (m_tramp, SImode, 0),
7406
     expand_binop (SImode, ior_optab,
7407
                   expand_shift (RSHIFT_EXPR, SImode, fnaddr,
7408
                                 size_int (10), 0, 1),
7409
                   GEN_INT (trunc_int_for_mode (0x03000000, SImode)),
7410
                   NULL_RTX, 1, OPTAB_DIRECT));
7411
 
7412
  emit_move_insn
7413
    (adjust_address (m_tramp, SImode, 4),
7414
     expand_binop (SImode, ior_optab,
7415
                   expand_shift (RSHIFT_EXPR, SImode, cxt,
7416
                                 size_int (10), 0, 1),
7417
                   GEN_INT (trunc_int_for_mode (0x05000000, SImode)),
7418
                   NULL_RTX, 1, OPTAB_DIRECT));
7419
 
7420
  emit_move_insn
7421
    (adjust_address (m_tramp, SImode, 8),
7422
     expand_binop (SImode, ior_optab,
7423
                   expand_and (SImode, fnaddr, GEN_INT (0x3ff), NULL_RTX),
7424
                   GEN_INT (trunc_int_for_mode (0x81c06000, SImode)),
7425
                   NULL_RTX, 1, OPTAB_DIRECT));
7426
 
7427
  emit_move_insn
7428
    (adjust_address (m_tramp, SImode, 12),
7429
     expand_binop (SImode, ior_optab,
7430
                   expand_and (SImode, cxt, GEN_INT (0x3ff), NULL_RTX),
7431
                   GEN_INT (trunc_int_for_mode (0x8410a000, SImode)),
7432
                   NULL_RTX, 1, OPTAB_DIRECT));
7433
 
7434
  /* On UltraSPARC a flush flushes an entire cache line.  The trampoline is
7435
     aligned on a 16 byte boundary so one flush clears it all.  */
7436
  emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 0))));
7437
  if (sparc_cpu != PROCESSOR_ULTRASPARC
7438
      && sparc_cpu != PROCESSOR_ULTRASPARC3
7439
      && sparc_cpu != PROCESSOR_NIAGARA
7440
      && sparc_cpu != PROCESSOR_NIAGARA2)
7441
    emit_insn (gen_flush (validize_mem (adjust_address (m_tramp, SImode, 8))));
7442
 
7443
  /* Call __enable_execute_stack after writing onto the stack to make sure
7444
     the stack address is accessible.  */
7445
#ifdef ENABLE_EXECUTE_STACK
7446
  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7447
                     LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7448
#endif
7449
 
7450
}
7451
 
7452
/* The 64-bit version is simpler because it makes more sense to load the
7453
   values as "immediate" data out of the trampoline.  It's also easier since
7454
   we can read the PC without clobbering a register.  */
7455
 
7456
static void
7457
sparc64_initialize_trampoline (rtx m_tramp, rtx fnaddr, rtx cxt)
7458
{
7459
  /* SPARC 64-bit trampoline:
7460
 
7461
        rd      %pc, %g1
7462
        ldx     [%g1+24], %g5
7463
        jmp     %g5
7464
        ldx     [%g1+16], %g5
7465
        +16 bytes data
7466
   */
7467
 
7468
  emit_move_insn (adjust_address (m_tramp, SImode, 0),
7469
                  GEN_INT (trunc_int_for_mode (0x83414000, SImode)));
7470
  emit_move_insn (adjust_address (m_tramp, SImode, 4),
7471
                  GEN_INT (trunc_int_for_mode (0xca586018, SImode)));
7472
  emit_move_insn (adjust_address (m_tramp, SImode, 8),
7473
                  GEN_INT (trunc_int_for_mode (0x81c14000, SImode)));
7474
  emit_move_insn (adjust_address (m_tramp, SImode, 12),
7475
                  GEN_INT (trunc_int_for_mode (0xca586010, SImode)));
7476
  emit_move_insn (adjust_address (m_tramp, DImode, 16), cxt);
7477
  emit_move_insn (adjust_address (m_tramp, DImode, 24), fnaddr);
7478
  emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 0))));
7479
 
7480
  if (sparc_cpu != PROCESSOR_ULTRASPARC
7481
      && sparc_cpu != PROCESSOR_ULTRASPARC3
7482
      && sparc_cpu != PROCESSOR_NIAGARA
7483
      && sparc_cpu != PROCESSOR_NIAGARA2)
7484
    emit_insn (gen_flushdi (validize_mem (adjust_address (m_tramp, DImode, 8))));
7485
 
7486
  /* Call __enable_execute_stack after writing onto the stack to make sure
7487
     the stack address is accessible.  */
7488
#ifdef ENABLE_EXECUTE_STACK
7489
  emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__enable_execute_stack"),
7490
                     LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
7491
#endif
7492
}
7493
 
7494
/* Worker for TARGET_TRAMPOLINE_INIT.  */
7495
 
7496
static void
7497
sparc_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
7498
{
7499
  rtx fnaddr = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
7500
  cxt = force_reg (Pmode, cxt);
7501
  if (TARGET_ARCH64)
7502
    sparc64_initialize_trampoline (m_tramp, fnaddr, cxt);
7503
  else
7504
    sparc32_initialize_trampoline (m_tramp, fnaddr, cxt);
7505
}
7506
 
7507
/* Adjust the cost of a scheduling dependency.  Return the new cost of
7508
   a dependency LINK or INSN on DEP_INSN.  COST is the current cost.  */
7509
 
7510
static int
7511
supersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7512
{
7513
  enum attr_type insn_type;
7514
 
7515
  if (! recog_memoized (insn))
7516
    return 0;
7517
 
7518
  insn_type = get_attr_type (insn);
7519
 
7520
  if (REG_NOTE_KIND (link) == 0)
7521
    {
7522
      /* Data dependency; DEP_INSN writes a register that INSN reads some
7523
         cycles later.  */
7524
 
7525
      /* if a load, then the dependence must be on the memory address;
7526
         add an extra "cycle".  Note that the cost could be two cycles
7527
         if the reg was written late in an instruction group; we ca not tell
7528
         here.  */
7529
      if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
7530
        return cost + 3;
7531
 
7532
      /* Get the delay only if the address of the store is the dependence.  */
7533
      if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
7534
        {
7535
          rtx pat = PATTERN(insn);
7536
          rtx dep_pat = PATTERN (dep_insn);
7537
 
7538
          if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7539
            return cost;  /* This should not happen!  */
7540
 
7541
          /* The dependency between the two instructions was on the data that
7542
             is being stored.  Assume that this implies that the address of the
7543
             store is not dependent.  */
7544
          if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7545
            return cost;
7546
 
7547
          return cost + 3;  /* An approximation.  */
7548
        }
7549
 
7550
      /* A shift instruction cannot receive its data from an instruction
7551
         in the same cycle; add a one cycle penalty.  */
7552
      if (insn_type == TYPE_SHIFT)
7553
        return cost + 3;   /* Split before cascade into shift.  */
7554
    }
7555
  else
7556
    {
7557
      /* Anti- or output- dependency; DEP_INSN reads/writes a register that
7558
         INSN writes some cycles later.  */
7559
 
7560
      /* These are only significant for the fpu unit; writing a fp reg before
7561
         the fpu has finished with it stalls the processor.  */
7562
 
7563
      /* Reusing an integer register causes no problems.  */
7564
      if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7565
        return 0;
7566
    }
7567
 
7568
  return cost;
7569
}
7570
 
7571
static int
7572
hypersparc_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
7573
{
7574
  enum attr_type insn_type, dep_type;
7575
  rtx pat = PATTERN(insn);
7576
  rtx dep_pat = PATTERN (dep_insn);
7577
 
7578
  if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
7579
    return cost;
7580
 
7581
  insn_type = get_attr_type (insn);
7582
  dep_type = get_attr_type (dep_insn);
7583
 
7584
  switch (REG_NOTE_KIND (link))
7585
    {
7586
    case 0:
7587
      /* Data dependency; DEP_INSN writes a register that INSN reads some
7588
         cycles later.  */
7589
 
7590
      switch (insn_type)
7591
        {
7592
        case TYPE_STORE:
7593
        case TYPE_FPSTORE:
7594
          /* Get the delay iff the address of the store is the dependence.  */
7595
          if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
7596
            return cost;
7597
 
7598
          if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
7599
            return cost;
7600
          return cost + 3;
7601
 
7602
        case TYPE_LOAD:
7603
        case TYPE_SLOAD:
7604
        case TYPE_FPLOAD:
7605
          /* If a load, then the dependence must be on the memory address.  If
7606
             the addresses aren't equal, then it might be a false dependency */
7607
          if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
7608
            {
7609
              if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
7610
                  || GET_CODE (SET_DEST (dep_pat)) != MEM
7611
                  || GET_CODE (SET_SRC (pat)) != MEM
7612
                  || ! rtx_equal_p (XEXP (SET_DEST (dep_pat), 0),
7613
                                    XEXP (SET_SRC (pat), 0)))
7614
                return cost + 2;
7615
 
7616
              return cost + 8;
7617
            }
7618
          break;
7619
 
7620
        case TYPE_BRANCH:
7621
          /* Compare to branch latency is 0.  There is no benefit from
7622
             separating compare and branch.  */
7623
          if (dep_type == TYPE_COMPARE)
7624
            return 0;
7625
          /* Floating point compare to branch latency is less than
7626
             compare to conditional move.  */
7627
          if (dep_type == TYPE_FPCMP)
7628
            return cost - 1;
7629
          break;
7630
        default:
7631
          break;
7632
        }
7633
        break;
7634
 
7635
    case REG_DEP_ANTI:
7636
      /* Anti-dependencies only penalize the fpu unit.  */
7637
      if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
7638
        return 0;
7639
      break;
7640
 
7641
    default:
7642
      break;
7643
    }
7644
 
7645
  return cost;
7646
}
7647
 
7648
static int
7649
sparc_adjust_cost(rtx insn, rtx link, rtx dep, int cost)
7650
{
7651
  switch (sparc_cpu)
7652
    {
7653
    case PROCESSOR_SUPERSPARC:
7654
      cost = supersparc_adjust_cost (insn, link, dep, cost);
7655
      break;
7656
    case PROCESSOR_HYPERSPARC:
7657
    case PROCESSOR_SPARCLITE86X:
7658
      cost = hypersparc_adjust_cost (insn, link, dep, cost);
7659
      break;
7660
    default:
7661
      break;
7662
    }
7663
  return cost;
7664
}
7665
 
7666
static void
7667
sparc_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7668
                  int sched_verbose ATTRIBUTE_UNUSED,
7669
                  int max_ready ATTRIBUTE_UNUSED)
7670
{}
7671
 
7672
static int
7673
sparc_use_sched_lookahead (void)
7674
{
7675
  if (sparc_cpu == PROCESSOR_NIAGARA
7676
      || sparc_cpu == PROCESSOR_NIAGARA2)
7677
    return 0;
7678
  if (sparc_cpu == PROCESSOR_ULTRASPARC
7679
      || sparc_cpu == PROCESSOR_ULTRASPARC3)
7680
    return 4;
7681
  if ((1 << sparc_cpu) &
7682
      ((1 << PROCESSOR_SUPERSPARC) | (1 << PROCESSOR_HYPERSPARC) |
7683
       (1 << PROCESSOR_SPARCLITE86X)))
7684
    return 3;
7685
  return 0;
7686
}
7687
 
7688
static int
7689
sparc_issue_rate (void)
7690
{
7691
  switch (sparc_cpu)
7692
    {
7693
    case PROCESSOR_NIAGARA:
7694
    case PROCESSOR_NIAGARA2:
7695
    default:
7696
      return 1;
7697
    case PROCESSOR_V9:
7698
      /* Assume V9 processors are capable of at least dual-issue.  */
7699
      return 2;
7700
    case PROCESSOR_SUPERSPARC:
7701
      return 3;
7702
    case PROCESSOR_HYPERSPARC:
7703
    case PROCESSOR_SPARCLITE86X:
7704
      return 2;
7705
    case PROCESSOR_ULTRASPARC:
7706
    case PROCESSOR_ULTRASPARC3:
7707
      return 4;
7708
    }
7709
}
7710
 
7711
static int
7712
set_extends (rtx insn)
7713
{
7714
  register rtx pat = PATTERN (insn);
7715
 
7716
  switch (GET_CODE (SET_SRC (pat)))
7717
    {
7718
      /* Load and some shift instructions zero extend.  */
7719
    case MEM:
7720
    case ZERO_EXTEND:
7721
      /* sethi clears the high bits */
7722
    case HIGH:
7723
      /* LO_SUM is used with sethi.  sethi cleared the high
7724
         bits and the values used with lo_sum are positive */
7725
    case LO_SUM:
7726
      /* Store flag stores 0 or 1 */
7727
    case LT: case LTU:
7728
    case GT: case GTU:
7729
    case LE: case LEU:
7730
    case GE: case GEU:
7731
    case EQ:
7732
    case NE:
7733
      return 1;
7734
    case AND:
7735
      {
7736
        rtx op0 = XEXP (SET_SRC (pat), 0);
7737
        rtx op1 = XEXP (SET_SRC (pat), 1);
7738
        if (GET_CODE (op1) == CONST_INT)
7739
          return INTVAL (op1) >= 0;
7740
        if (GET_CODE (op0) != REG)
7741
          return 0;
7742
        if (sparc_check_64 (op0, insn) == 1)
7743
          return 1;
7744
        return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7745
      }
7746
    case IOR:
7747
    case XOR:
7748
      {
7749
        rtx op0 = XEXP (SET_SRC (pat), 0);
7750
        rtx op1 = XEXP (SET_SRC (pat), 1);
7751
        if (GET_CODE (op0) != REG || sparc_check_64 (op0, insn) <= 0)
7752
          return 0;
7753
        if (GET_CODE (op1) == CONST_INT)
7754
          return INTVAL (op1) >= 0;
7755
        return (GET_CODE (op1) == REG && sparc_check_64 (op1, insn) == 1);
7756
      }
7757
    case LSHIFTRT:
7758
      return GET_MODE (SET_SRC (pat)) == SImode;
7759
      /* Positive integers leave the high bits zero.  */
7760
    case CONST_DOUBLE:
7761
      return ! (CONST_DOUBLE_LOW (SET_SRC (pat)) & 0x80000000);
7762
    case CONST_INT:
7763
      return ! (INTVAL (SET_SRC (pat)) & 0x80000000);
7764
    case ASHIFTRT:
7765
    case SIGN_EXTEND:
7766
      return - (GET_MODE (SET_SRC (pat)) == SImode);
7767
    case REG:
7768
      return sparc_check_64 (SET_SRC (pat), insn);
7769
    default:
7770
      return 0;
7771
    }
7772
}
7773
 
7774
/* We _ought_ to have only one kind per function, but...  */
7775
static GTY(()) rtx sparc_addr_diff_list;
7776
static GTY(()) rtx sparc_addr_list;
7777
 
7778
void
7779
sparc_defer_case_vector (rtx lab, rtx vec, int diff)
7780
{
7781
  vec = gen_rtx_EXPR_LIST (VOIDmode, lab, vec);
7782
  if (diff)
7783
    sparc_addr_diff_list
7784
      = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_diff_list);
7785
  else
7786
    sparc_addr_list = gen_rtx_EXPR_LIST (VOIDmode, vec, sparc_addr_list);
7787
}
7788
 
7789
static void
7790
sparc_output_addr_vec (rtx vec)
7791
{
7792
  rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7793
  int idx, vlen = XVECLEN (body, 0);
7794
 
7795
#ifdef ASM_OUTPUT_ADDR_VEC_START
7796
  ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7797
#endif
7798
 
7799
#ifdef ASM_OUTPUT_CASE_LABEL
7800
  ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7801
                         NEXT_INSN (lab));
7802
#else
7803
  (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7804
#endif
7805
 
7806
  for (idx = 0; idx < vlen; idx++)
7807
    {
7808
      ASM_OUTPUT_ADDR_VEC_ELT
7809
        (asm_out_file, CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 0, idx), 0)));
7810
    }
7811
 
7812
#ifdef ASM_OUTPUT_ADDR_VEC_END
7813
  ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7814
#endif
7815
}
7816
 
7817
static void
7818
sparc_output_addr_diff_vec (rtx vec)
7819
{
7820
  rtx lab = XEXP (vec, 0), body = XEXP (vec, 1);
7821
  rtx base = XEXP (XEXP (body, 0), 0);
7822
  int idx, vlen = XVECLEN (body, 1);
7823
 
7824
#ifdef ASM_OUTPUT_ADDR_VEC_START
7825
  ASM_OUTPUT_ADDR_VEC_START (asm_out_file);
7826
#endif
7827
 
7828
#ifdef ASM_OUTPUT_CASE_LABEL
7829
  ASM_OUTPUT_CASE_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (lab),
7830
                         NEXT_INSN (lab));
7831
#else
7832
  (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (lab));
7833
#endif
7834
 
7835
  for (idx = 0; idx < vlen; idx++)
7836
    {
7837
      ASM_OUTPUT_ADDR_DIFF_ELT
7838
        (asm_out_file,
7839
         body,
7840
         CODE_LABEL_NUMBER (XEXP (XVECEXP (body, 1, idx), 0)),
7841
         CODE_LABEL_NUMBER (base));
7842
    }
7843
 
7844
#ifdef ASM_OUTPUT_ADDR_VEC_END
7845
  ASM_OUTPUT_ADDR_VEC_END (asm_out_file);
7846
#endif
7847
}
7848
 
7849
static void
7850
sparc_output_deferred_case_vectors (void)
7851
{
7852
  rtx t;
7853
  int align;
7854
 
7855
  if (sparc_addr_list == NULL_RTX
7856
      && sparc_addr_diff_list == NULL_RTX)
7857
    return;
7858
 
7859
  /* Align to cache line in the function's code section.  */
7860
  switch_to_section (current_function_section ());
7861
 
7862
  align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
7863
  if (align > 0)
7864
    ASM_OUTPUT_ALIGN (asm_out_file, align);
7865
 
7866
  for (t = sparc_addr_list; t ; t = XEXP (t, 1))
7867
    sparc_output_addr_vec (XEXP (t, 0));
7868
  for (t = sparc_addr_diff_list; t ; t = XEXP (t, 1))
7869
    sparc_output_addr_diff_vec (XEXP (t, 0));
7870
 
7871
  sparc_addr_list = sparc_addr_diff_list = NULL_RTX;
7872
}
7873
 
7874
/* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
7875
   unknown.  Return 1 if the high bits are zero, -1 if the register is
7876
   sign extended.  */
7877
int
7878
sparc_check_64 (rtx x, rtx insn)
7879
{
7880
  /* If a register is set only once it is safe to ignore insns this
7881
     code does not know how to handle.  The loop will either recognize
7882
     the single set and return the correct value or fail to recognize
7883
     it and return 0.  */
7884
  int set_once = 0;
7885
  rtx y = x;
7886
 
7887
  gcc_assert (GET_CODE (x) == REG);
7888
 
7889
  if (GET_MODE (x) == DImode)
7890
    y = gen_rtx_REG (SImode, REGNO (x) + WORDS_BIG_ENDIAN);
7891
 
7892
  if (flag_expensive_optimizations
7893
      && df && DF_REG_DEF_COUNT (REGNO (y)) == 1)
7894
    set_once = 1;
7895
 
7896
  if (insn == 0)
7897
    {
7898
      if (set_once)
7899
        insn = get_last_insn_anywhere ();
7900
      else
7901
        return 0;
7902
    }
7903
 
7904
  while ((insn = PREV_INSN (insn)))
7905
    {
7906
      switch (GET_CODE (insn))
7907
        {
7908
        case JUMP_INSN:
7909
        case NOTE:
7910
          break;
7911
        case CODE_LABEL:
7912
        case CALL_INSN:
7913
        default:
7914
          if (! set_once)
7915
            return 0;
7916
          break;
7917
        case INSN:
7918
          {
7919
            rtx pat = PATTERN (insn);
7920
            if (GET_CODE (pat) != SET)
7921
              return 0;
7922
            if (rtx_equal_p (x, SET_DEST (pat)))
7923
              return set_extends (insn);
7924
            if (y && rtx_equal_p (y, SET_DEST (pat)))
7925
              return set_extends (insn);
7926
            if (reg_overlap_mentioned_p (SET_DEST (pat), y))
7927
              return 0;
7928
          }
7929
        }
7930
    }
7931
  return 0;
7932
}
7933
 
7934
/* Returns assembly code to perform a DImode shift using
7935
   a 64-bit global or out register on SPARC-V8+.  */
7936
const char *
7937
output_v8plus_shift (rtx *operands, rtx insn, const char *opcode)
7938
{
7939
  static char asm_code[60];
7940
 
7941
  /* The scratch register is only required when the destination
7942
     register is not a 64-bit global or out register.  */
7943
  if (which_alternative != 2)
7944
    operands[3] = operands[0];
7945
 
7946
  /* We can only shift by constants <= 63. */
7947
  if (GET_CODE (operands[2]) == CONST_INT)
7948
    operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
7949
 
7950
  if (GET_CODE (operands[1]) == CONST_INT)
7951
    {
7952
      output_asm_insn ("mov\t%1, %3", operands);
7953
    }
7954
  else
7955
    {
7956
      output_asm_insn ("sllx\t%H1, 32, %3", operands);
7957
      if (sparc_check_64 (operands[1], insn) <= 0)
7958
        output_asm_insn ("srl\t%L1, 0, %L1", operands);
7959
      output_asm_insn ("or\t%L1, %3, %3", operands);
7960
    }
7961
 
7962
  strcpy(asm_code, opcode);
7963
 
7964
  if (which_alternative != 2)
7965
    return strcat (asm_code, "\t%0, %2, %L0\n\tsrlx\t%L0, 32, %H0");
7966
  else
7967
    return strcat (asm_code, "\t%3, %2, %3\n\tsrlx\t%3, 32, %H0\n\tmov\t%3, %L0");
7968
}
7969
 
7970
/* Output rtl to increment the profiler label LABELNO
7971
   for profiling a function entry.  */
7972
 
7973
void
7974
sparc_profile_hook (int labelno)
7975
{
7976
  char buf[32];
7977
  rtx lab, fun;
7978
 
7979
  fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_FUNCTION);
7980
  if (NO_PROFILE_COUNTERS)
7981
    {
7982
      emit_library_call (fun, LCT_NORMAL, VOIDmode, 0);
7983
    }
7984
  else
7985
    {
7986
      ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
7987
      lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
7988
      emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, lab, Pmode);
7989
    }
7990
}
7991
 
7992
/* Solaris implementation of TARGET_ASM_NAMED_SECTION.  */
7993
 
7994
static void
7995
sparc_solaris_elf_asm_named_section (const char *name, unsigned int flags,
7996
                                     tree decl ATTRIBUTE_UNUSED)
7997
{
7998
  fprintf (asm_out_file, "\t.section\t\"%s\"", name);
7999
 
8000
  if (!(flags & SECTION_DEBUG))
8001
    fputs (",#alloc", asm_out_file);
8002
  if (flags & SECTION_WRITE)
8003
    fputs (",#write", asm_out_file);
8004
  if (flags & SECTION_TLS)
8005
    fputs (",#tls", asm_out_file);
8006
  if (flags & SECTION_CODE)
8007
    fputs (",#execinstr", asm_out_file);
8008
 
8009
  /* ??? Handle SECTION_BSS.  */
8010
 
8011
  fputc ('\n', asm_out_file);
8012
}
8013
 
8014
/* We do not allow indirect calls to be optimized into sibling calls.
8015
 
8016
   We cannot use sibling calls when delayed branches are disabled
8017
   because they will likely require the call delay slot to be filled.
8018
 
8019
   Also, on SPARC 32-bit we cannot emit a sibling call when the
8020
   current function returns a structure.  This is because the "unimp
8021
   after call" convention would cause the callee to return to the
8022
   wrong place.  The generic code already disallows cases where the
8023
   function being called returns a structure.
8024
 
8025
   It may seem strange how this last case could occur.  Usually there
8026
   is code after the call which jumps to epilogue code which dumps the
8027
   return value into the struct return area.  That ought to invalidate
8028
   the sibling call right?  Well, in the C++ case we can end up passing
8029
   the pointer to the struct return area to a constructor (which returns
8030
   void) and then nothing else happens.  Such a sibling call would look
8031
   valid without the added check here.
8032
 
8033
   VxWorks PIC PLT entries require the global pointer to be initialized
8034
   on entry.  We therefore can't emit sibling calls to them.  */
8035
static bool
8036
sparc_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8037
{
8038
  return (decl
8039
          && flag_delayed_branch
8040
          && (TARGET_ARCH64 || ! cfun->returns_struct)
8041
          && !(TARGET_VXWORKS_RTP
8042
               && flag_pic
8043
               && !targetm.binds_local_p (decl)));
8044
}
8045
 
8046
/* libfunc renaming.  */
8047
#include "config/gofast.h"
8048
 
8049
static void
8050
sparc_init_libfuncs (void)
8051
{
8052
  if (TARGET_ARCH32)
8053
    {
8054
      /* Use the subroutines that Sun's library provides for integer
8055
         multiply and divide.  The `*' prevents an underscore from
8056
         being prepended by the compiler. .umul is a little faster
8057
         than .mul.  */
8058
      set_optab_libfunc (smul_optab, SImode, "*.umul");
8059
      set_optab_libfunc (sdiv_optab, SImode, "*.div");
8060
      set_optab_libfunc (udiv_optab, SImode, "*.udiv");
8061
      set_optab_libfunc (smod_optab, SImode, "*.rem");
8062
      set_optab_libfunc (umod_optab, SImode, "*.urem");
8063
 
8064
      /* TFmode arithmetic.  These names are part of the SPARC 32bit ABI.  */
8065
      set_optab_libfunc (add_optab, TFmode, "_Q_add");
8066
      set_optab_libfunc (sub_optab, TFmode, "_Q_sub");
8067
      set_optab_libfunc (neg_optab, TFmode, "_Q_neg");
8068
      set_optab_libfunc (smul_optab, TFmode, "_Q_mul");
8069
      set_optab_libfunc (sdiv_optab, TFmode, "_Q_div");
8070
 
8071
      /* We can define the TFmode sqrt optab only if TARGET_FPU.  This
8072
         is because with soft-float, the SFmode and DFmode sqrt
8073
         instructions will be absent, and the compiler will notice and
8074
         try to use the TFmode sqrt instruction for calls to the
8075
         builtin function sqrt, but this fails.  */
8076
      if (TARGET_FPU)
8077
        set_optab_libfunc (sqrt_optab, TFmode, "_Q_sqrt");
8078
 
8079
      set_optab_libfunc (eq_optab, TFmode, "_Q_feq");
8080
      set_optab_libfunc (ne_optab, TFmode, "_Q_fne");
8081
      set_optab_libfunc (gt_optab, TFmode, "_Q_fgt");
8082
      set_optab_libfunc (ge_optab, TFmode, "_Q_fge");
8083
      set_optab_libfunc (lt_optab, TFmode, "_Q_flt");
8084
      set_optab_libfunc (le_optab, TFmode, "_Q_fle");
8085
 
8086
      set_conv_libfunc (sext_optab,   TFmode, SFmode, "_Q_stoq");
8087
      set_conv_libfunc (sext_optab,   TFmode, DFmode, "_Q_dtoq");
8088
      set_conv_libfunc (trunc_optab,  SFmode, TFmode, "_Q_qtos");
8089
      set_conv_libfunc (trunc_optab,  DFmode, TFmode, "_Q_qtod");
8090
 
8091
      set_conv_libfunc (sfix_optab,   SImode, TFmode, "_Q_qtoi");
8092
      set_conv_libfunc (ufix_optab,   SImode, TFmode, "_Q_qtou");
8093
      set_conv_libfunc (sfloat_optab, TFmode, SImode, "_Q_itoq");
8094
      set_conv_libfunc (ufloat_optab, TFmode, SImode, "_Q_utoq");
8095
 
8096
      if (DITF_CONVERSION_LIBFUNCS)
8097
        {
8098
          set_conv_libfunc (sfix_optab,   DImode, TFmode, "_Q_qtoll");
8099
          set_conv_libfunc (ufix_optab,   DImode, TFmode, "_Q_qtoull");
8100
          set_conv_libfunc (sfloat_optab, TFmode, DImode, "_Q_lltoq");
8101
          set_conv_libfunc (ufloat_optab, TFmode, DImode, "_Q_ulltoq");
8102
        }
8103
 
8104
      if (SUN_CONVERSION_LIBFUNCS)
8105
        {
8106
          set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftoll");
8107
          set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoull");
8108
          set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtoll");
8109
          set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoull");
8110
        }
8111
    }
8112
  if (TARGET_ARCH64)
8113
    {
8114
      /* In the SPARC 64bit ABI, SImode multiply and divide functions
8115
         do not exist in the library.  Make sure the compiler does not
8116
         emit calls to them by accident.  (It should always use the
8117
         hardware instructions.)  */
8118
      set_optab_libfunc (smul_optab, SImode, 0);
8119
      set_optab_libfunc (sdiv_optab, SImode, 0);
8120
      set_optab_libfunc (udiv_optab, SImode, 0);
8121
      set_optab_libfunc (smod_optab, SImode, 0);
8122
      set_optab_libfunc (umod_optab, SImode, 0);
8123
 
8124
      if (SUN_INTEGER_MULTIPLY_64)
8125
        {
8126
          set_optab_libfunc (smul_optab, DImode, "__mul64");
8127
          set_optab_libfunc (sdiv_optab, DImode, "__div64");
8128
          set_optab_libfunc (udiv_optab, DImode, "__udiv64");
8129
          set_optab_libfunc (smod_optab, DImode, "__rem64");
8130
          set_optab_libfunc (umod_optab, DImode, "__urem64");
8131
        }
8132
 
8133
      if (SUN_CONVERSION_LIBFUNCS)
8134
        {
8135
          set_conv_libfunc (sfix_optab, DImode, SFmode, "__ftol");
8136
          set_conv_libfunc (ufix_optab, DImode, SFmode, "__ftoul");
8137
          set_conv_libfunc (sfix_optab, DImode, DFmode, "__dtol");
8138
          set_conv_libfunc (ufix_optab, DImode, DFmode, "__dtoul");
8139
        }
8140
    }
8141
 
8142
  gofast_maybe_init_libfuncs ();
8143
}
8144
 
8145
#define def_builtin(NAME, CODE, TYPE) \
8146
  add_builtin_function((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL, \
8147
                       NULL_TREE)
8148
 
8149
/* Implement the TARGET_INIT_BUILTINS target hook.
8150
   Create builtin functions for special SPARC instructions.  */
8151
 
8152
static void
8153
sparc_init_builtins (void)
8154
{
8155
  if (TARGET_VIS)
8156
    sparc_vis_init_builtins ();
8157
}
8158
 
8159
/* Create builtin functions for VIS 1.0 instructions.  */
8160
 
8161
static void
8162
sparc_vis_init_builtins (void)
8163
{
8164
  tree v4qi = build_vector_type (unsigned_intQI_type_node, 4);
8165
  tree v8qi = build_vector_type (unsigned_intQI_type_node, 8);
8166
  tree v4hi = build_vector_type (intHI_type_node, 4);
8167
  tree v2hi = build_vector_type (intHI_type_node, 2);
8168
  tree v2si = build_vector_type (intSI_type_node, 2);
8169
 
8170
  tree v4qi_ftype_v4hi = build_function_type_list (v4qi, v4hi, 0);
8171
  tree v8qi_ftype_v2si_v8qi = build_function_type_list (v8qi, v2si, v8qi, 0);
8172
  tree v2hi_ftype_v2si = build_function_type_list (v2hi, v2si, 0);
8173
  tree v4hi_ftype_v4qi = build_function_type_list (v4hi, v4qi, 0);
8174
  tree v8qi_ftype_v4qi_v4qi = build_function_type_list (v8qi, v4qi, v4qi, 0);
8175
  tree v4hi_ftype_v4qi_v4hi = build_function_type_list (v4hi, v4qi, v4hi, 0);
8176
  tree v4hi_ftype_v4qi_v2hi = build_function_type_list (v4hi, v4qi, v2hi, 0);
8177
  tree v2si_ftype_v4qi_v2hi = build_function_type_list (v2si, v4qi, v2hi, 0);
8178
  tree v4hi_ftype_v8qi_v4hi = build_function_type_list (v4hi, v8qi, v4hi, 0);
8179
  tree v4hi_ftype_v4hi_v4hi = build_function_type_list (v4hi, v4hi, v4hi, 0);
8180
  tree v2si_ftype_v2si_v2si = build_function_type_list (v2si, v2si, v2si, 0);
8181
  tree v8qi_ftype_v8qi_v8qi = build_function_type_list (v8qi, v8qi, v8qi, 0);
8182
  tree di_ftype_v8qi_v8qi_di = build_function_type_list (intDI_type_node,
8183
                                                         v8qi, v8qi,
8184
                                                         intDI_type_node, 0);
8185
  tree di_ftype_di_di = build_function_type_list (intDI_type_node,
8186
                                                  intDI_type_node,
8187
                                                  intDI_type_node, 0);
8188
  tree ptr_ftype_ptr_si = build_function_type_list (ptr_type_node,
8189
                                                    ptr_type_node,
8190
                                                    intSI_type_node, 0);
8191
  tree ptr_ftype_ptr_di = build_function_type_list (ptr_type_node,
8192
                                                    ptr_type_node,
8193
                                                    intDI_type_node, 0);
8194
 
8195
  /* Packing and expanding vectors.  */
8196
  def_builtin ("__builtin_vis_fpack16", CODE_FOR_fpack16_vis, v4qi_ftype_v4hi);
8197
  def_builtin ("__builtin_vis_fpack32", CODE_FOR_fpack32_vis,
8198
               v8qi_ftype_v2si_v8qi);
8199
  def_builtin ("__builtin_vis_fpackfix", CODE_FOR_fpackfix_vis,
8200
               v2hi_ftype_v2si);
8201
  def_builtin ("__builtin_vis_fexpand", CODE_FOR_fexpand_vis, v4hi_ftype_v4qi);
8202
  def_builtin ("__builtin_vis_fpmerge", CODE_FOR_fpmerge_vis,
8203
               v8qi_ftype_v4qi_v4qi);
8204
 
8205
  /* Multiplications.  */
8206
  def_builtin ("__builtin_vis_fmul8x16", CODE_FOR_fmul8x16_vis,
8207
               v4hi_ftype_v4qi_v4hi);
8208
  def_builtin ("__builtin_vis_fmul8x16au", CODE_FOR_fmul8x16au_vis,
8209
               v4hi_ftype_v4qi_v2hi);
8210
  def_builtin ("__builtin_vis_fmul8x16al", CODE_FOR_fmul8x16al_vis,
8211
               v4hi_ftype_v4qi_v2hi);
8212
  def_builtin ("__builtin_vis_fmul8sux16", CODE_FOR_fmul8sux16_vis,
8213
               v4hi_ftype_v8qi_v4hi);
8214
  def_builtin ("__builtin_vis_fmul8ulx16", CODE_FOR_fmul8ulx16_vis,
8215
               v4hi_ftype_v8qi_v4hi);
8216
  def_builtin ("__builtin_vis_fmuld8sux16", CODE_FOR_fmuld8sux16_vis,
8217
               v2si_ftype_v4qi_v2hi);
8218
  def_builtin ("__builtin_vis_fmuld8ulx16", CODE_FOR_fmuld8ulx16_vis,
8219
               v2si_ftype_v4qi_v2hi);
8220
 
8221
  /* Data aligning.  */
8222
  def_builtin ("__builtin_vis_faligndatav4hi", CODE_FOR_faligndatav4hi_vis,
8223
               v4hi_ftype_v4hi_v4hi);
8224
  def_builtin ("__builtin_vis_faligndatav8qi", CODE_FOR_faligndatav8qi_vis,
8225
               v8qi_ftype_v8qi_v8qi);
8226
  def_builtin ("__builtin_vis_faligndatav2si", CODE_FOR_faligndatav2si_vis,
8227
               v2si_ftype_v2si_v2si);
8228
  def_builtin ("__builtin_vis_faligndatadi", CODE_FOR_faligndatadi_vis,
8229
               di_ftype_di_di);
8230
  if (TARGET_ARCH64)
8231
    def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrdi_vis,
8232
                 ptr_ftype_ptr_di);
8233
  else
8234
    def_builtin ("__builtin_vis_alignaddr", CODE_FOR_alignaddrsi_vis,
8235
                 ptr_ftype_ptr_si);
8236
 
8237
  /* Pixel distance.  */
8238
  def_builtin ("__builtin_vis_pdist", CODE_FOR_pdist_vis,
8239
               di_ftype_v8qi_v8qi_di);
8240
}
8241
 
8242
/* Handle TARGET_EXPAND_BUILTIN target hook.
8243
   Expand builtin functions for sparc intrinsics.  */
8244
 
8245
static rtx
8246
sparc_expand_builtin (tree exp, rtx target,
8247
                      rtx subtarget ATTRIBUTE_UNUSED,
8248
                      enum machine_mode tmode ATTRIBUTE_UNUSED,
8249
                      int ignore ATTRIBUTE_UNUSED)
8250
{
8251
  tree arg;
8252
  call_expr_arg_iterator iter;
8253
  tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
8254
  unsigned int icode = DECL_FUNCTION_CODE (fndecl);
8255
  rtx pat, op[4];
8256
  enum machine_mode mode[4];
8257
  int arg_count = 0;
8258
 
8259
  mode[0] = insn_data[icode].operand[0].mode;
8260
  if (!target
8261
      || GET_MODE (target) != mode[0]
8262
      || ! (*insn_data[icode].operand[0].predicate) (target, mode[0]))
8263
    op[0] = gen_reg_rtx (mode[0]);
8264
  else
8265
    op[0] = target;
8266
 
8267
  FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
8268
    {
8269
      arg_count++;
8270
      mode[arg_count] = insn_data[icode].operand[arg_count].mode;
8271
      op[arg_count] = expand_normal (arg);
8272
 
8273
      if (! (*insn_data[icode].operand[arg_count].predicate) (op[arg_count],
8274
                                                              mode[arg_count]))
8275
        op[arg_count] = copy_to_mode_reg (mode[arg_count], op[arg_count]);
8276
    }
8277
 
8278
  switch (arg_count)
8279
    {
8280
    case 1:
8281
      pat = GEN_FCN (icode) (op[0], op[1]);
8282
      break;
8283
    case 2:
8284
      pat = GEN_FCN (icode) (op[0], op[1], op[2]);
8285
      break;
8286
    case 3:
8287
      pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
8288
      break;
8289
    default:
8290
      gcc_unreachable ();
8291
    }
8292
 
8293
  if (!pat)
8294
    return NULL_RTX;
8295
 
8296
  emit_insn (pat);
8297
 
8298
  return op[0];
8299
}
8300
 
8301
static int
8302
sparc_vis_mul8x16 (int e8, int e16)
8303
{
8304
  return (e8 * e16 + 128) / 256;
8305
}
8306
 
8307
/* Multiply the vector elements in ELTS0 to the elements in ELTS1 as specified
8308
   by FNCODE.  All of the elements in ELTS0 and ELTS1 lists must be integer
8309
   constants.  A tree list with the results of the multiplications is returned,
8310
   and each element in the list is of INNER_TYPE.  */
8311
 
8312
static tree
8313
sparc_handle_vis_mul8x16 (int fncode, tree inner_type, tree elts0, tree elts1)
8314
{
8315
  tree n_elts = NULL_TREE;
8316
  int scale;
8317
 
8318
  switch (fncode)
8319
    {
8320
    case CODE_FOR_fmul8x16_vis:
8321
      for (; elts0 && elts1;
8322
           elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8323
        {
8324
          int val
8325
            = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8326
                                 TREE_INT_CST_LOW (TREE_VALUE (elts1)));
8327
          n_elts = tree_cons (NULL_TREE,
8328
                              build_int_cst (inner_type, val),
8329
                              n_elts);
8330
        }
8331
      break;
8332
 
8333
    case CODE_FOR_fmul8x16au_vis:
8334
      scale = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8335
 
8336
      for (; elts0; elts0 = TREE_CHAIN (elts0))
8337
        {
8338
          int val
8339
            = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8340
                                 scale);
8341
          n_elts = tree_cons (NULL_TREE,
8342
                              build_int_cst (inner_type, val),
8343
                              n_elts);
8344
        }
8345
      break;
8346
 
8347
    case CODE_FOR_fmul8x16al_vis:
8348
      scale = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (elts1)));
8349
 
8350
      for (; elts0; elts0 = TREE_CHAIN (elts0))
8351
        {
8352
          int val
8353
            = sparc_vis_mul8x16 (TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8354
                                 scale);
8355
          n_elts = tree_cons (NULL_TREE,
8356
                              build_int_cst (inner_type, val),
8357
                              n_elts);
8358
        }
8359
      break;
8360
 
8361
    default:
8362
      gcc_unreachable ();
8363
    }
8364
 
8365
  return nreverse (n_elts);
8366
 
8367
}
8368
/* Handle TARGET_FOLD_BUILTIN target hook.
8369
   Fold builtin functions for SPARC intrinsics.  If IGNORE is true the
8370
   result of the function call is ignored.  NULL_TREE is returned if the
8371
   function could not be folded.  */
8372
 
8373
static tree
8374
sparc_fold_builtin (tree fndecl, tree arglist, bool ignore)
8375
{
8376
  tree arg0, arg1, arg2;
8377
  tree rtype = TREE_TYPE (TREE_TYPE (fndecl));
8378
  enum insn_code icode = (enum insn_code) DECL_FUNCTION_CODE (fndecl);
8379
 
8380
  if (ignore
8381
      && icode != CODE_FOR_alignaddrsi_vis
8382
      && icode != CODE_FOR_alignaddrdi_vis)
8383
    return fold_convert (rtype, integer_zero_node);
8384
 
8385
  switch (icode)
8386
    {
8387
    case CODE_FOR_fexpand_vis:
8388
      arg0 = TREE_VALUE (arglist);
8389
      STRIP_NOPS (arg0);
8390
 
8391
      if (TREE_CODE (arg0) == VECTOR_CST)
8392
        {
8393
          tree inner_type = TREE_TYPE (rtype);
8394
          tree elts = TREE_VECTOR_CST_ELTS (arg0);
8395
          tree n_elts = NULL_TREE;
8396
 
8397
          for (; elts; elts = TREE_CHAIN (elts))
8398
            {
8399
              unsigned int val = TREE_INT_CST_LOW (TREE_VALUE (elts)) << 4;
8400
              n_elts = tree_cons (NULL_TREE,
8401
                                  build_int_cst (inner_type, val),
8402
                                  n_elts);
8403
            }
8404
          return build_vector (rtype, nreverse (n_elts));
8405
        }
8406
      break;
8407
 
8408
    case CODE_FOR_fmul8x16_vis:
8409
    case CODE_FOR_fmul8x16au_vis:
8410
    case CODE_FOR_fmul8x16al_vis:
8411
      arg0 = TREE_VALUE (arglist);
8412
      arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8413
      STRIP_NOPS (arg0);
8414
      STRIP_NOPS (arg1);
8415
 
8416
      if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8417
        {
8418
          tree inner_type = TREE_TYPE (rtype);
8419
          tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8420
          tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8421
          tree n_elts = sparc_handle_vis_mul8x16 (icode, inner_type, elts0,
8422
                                                  elts1);
8423
 
8424
          return build_vector (rtype, n_elts);
8425
        }
8426
      break;
8427
 
8428
    case CODE_FOR_fpmerge_vis:
8429
      arg0 = TREE_VALUE (arglist);
8430
      arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8431
      STRIP_NOPS (arg0);
8432
      STRIP_NOPS (arg1);
8433
 
8434
      if (TREE_CODE (arg0) == VECTOR_CST && TREE_CODE (arg1) == VECTOR_CST)
8435
        {
8436
          tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8437
          tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8438
          tree n_elts = NULL_TREE;
8439
 
8440
          for (; elts0 && elts1;
8441
               elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8442
            {
8443
              n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts0), n_elts);
8444
              n_elts = tree_cons (NULL_TREE, TREE_VALUE (elts1), n_elts);
8445
            }
8446
 
8447
          return build_vector (rtype, nreverse (n_elts));
8448
        }
8449
      break;
8450
 
8451
    case CODE_FOR_pdist_vis:
8452
      arg0 = TREE_VALUE (arglist);
8453
      arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8454
      arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8455
      STRIP_NOPS (arg0);
8456
      STRIP_NOPS (arg1);
8457
      STRIP_NOPS (arg2);
8458
 
8459
      if (TREE_CODE (arg0) == VECTOR_CST
8460
          && TREE_CODE (arg1) == VECTOR_CST
8461
          && TREE_CODE (arg2) == INTEGER_CST)
8462
        {
8463
          int overflow = 0;
8464
          unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg2);
8465
          HOST_WIDE_INT high = TREE_INT_CST_HIGH (arg2);
8466
          tree elts0 = TREE_VECTOR_CST_ELTS (arg0);
8467
          tree elts1 = TREE_VECTOR_CST_ELTS (arg1);
8468
 
8469
          for (; elts0 && elts1;
8470
               elts0 = TREE_CHAIN (elts0), elts1 = TREE_CHAIN (elts1))
8471
            {
8472
              unsigned HOST_WIDE_INT
8473
                low0 = TREE_INT_CST_LOW (TREE_VALUE (elts0)),
8474
                low1 = TREE_INT_CST_LOW (TREE_VALUE (elts1));
8475
              HOST_WIDE_INT high0 = TREE_INT_CST_HIGH (TREE_VALUE (elts0));
8476
              HOST_WIDE_INT high1 = TREE_INT_CST_HIGH (TREE_VALUE (elts1));
8477
 
8478
              unsigned HOST_WIDE_INT l;
8479
              HOST_WIDE_INT h;
8480
 
8481
              overflow |= neg_double (low1, high1, &l, &h);
8482
              overflow |= add_double (low0, high0, l, h, &l, &h);
8483
              if (h < 0)
8484
                overflow |= neg_double (l, h, &l, &h);
8485
 
8486
              overflow |= add_double (low, high, l, h, &low, &high);
8487
            }
8488
 
8489
          gcc_assert (overflow == 0);
8490
 
8491
          return build_int_cst_wide (rtype, low, high);
8492
        }
8493
 
8494
    default:
8495
      break;
8496
    }
8497
 
8498
  return NULL_TREE;
8499
}
8500
 
8501
/* ??? This duplicates information provided to the compiler by the
8502
   ??? scheduler description.  Some day, teach genautomata to output
8503
   ??? the latencies and then CSE will just use that.  */
8504
 
8505
static bool
8506
sparc_rtx_costs (rtx x, int code, int outer_code, int *total,
8507
                 bool speed ATTRIBUTE_UNUSED)
8508
{
8509
  enum machine_mode mode = GET_MODE (x);
8510
  bool float_mode_p = FLOAT_MODE_P (mode);
8511
 
8512
  switch (code)
8513
    {
8514
    case CONST_INT:
8515
      if (INTVAL (x) < 0x1000 && INTVAL (x) >= -0x1000)
8516
        {
8517
          *total = 0;
8518
          return true;
8519
        }
8520
      /* FALLTHRU */
8521
 
8522
    case HIGH:
8523
      *total = 2;
8524
      return true;
8525
 
8526
    case CONST:
8527
    case LABEL_REF:
8528
    case SYMBOL_REF:
8529
      *total = 4;
8530
      return true;
8531
 
8532
    case CONST_DOUBLE:
8533
      if (GET_MODE (x) == VOIDmode
8534
          && ((CONST_DOUBLE_HIGH (x) == 0
8535
               && CONST_DOUBLE_LOW (x) < 0x1000)
8536
              || (CONST_DOUBLE_HIGH (x) == -1
8537
                  && CONST_DOUBLE_LOW (x) < 0
8538
                  && CONST_DOUBLE_LOW (x) >= -0x1000)))
8539
        *total = 0;
8540
      else
8541
        *total = 8;
8542
      return true;
8543
 
8544
    case MEM:
8545
      /* If outer-code was a sign or zero extension, a cost
8546
         of COSTS_N_INSNS (1) was already added in.  This is
8547
         why we are subtracting it back out.  */
8548
      if (outer_code == ZERO_EXTEND)
8549
        {
8550
          *total = sparc_costs->int_zload - COSTS_N_INSNS (1);
8551
        }
8552
      else if (outer_code == SIGN_EXTEND)
8553
        {
8554
          *total = sparc_costs->int_sload - COSTS_N_INSNS (1);
8555
        }
8556
      else if (float_mode_p)
8557
        {
8558
          *total = sparc_costs->float_load;
8559
        }
8560
      else
8561
        {
8562
          *total = sparc_costs->int_load;
8563
        }
8564
 
8565
      return true;
8566
 
8567
    case PLUS:
8568
    case MINUS:
8569
      if (float_mode_p)
8570
        *total = sparc_costs->float_plusminus;
8571
      else
8572
        *total = COSTS_N_INSNS (1);
8573
      return false;
8574
 
8575
    case MULT:
8576
      if (float_mode_p)
8577
        *total = sparc_costs->float_mul;
8578
      else if (! TARGET_HARD_MUL)
8579
        *total = COSTS_N_INSNS (25);
8580
      else
8581
        {
8582
          int bit_cost;
8583
 
8584
          bit_cost = 0;
8585
          if (sparc_costs->int_mul_bit_factor)
8586
            {
8587
              int nbits;
8588
 
8589
              if (GET_CODE (XEXP (x, 1)) == CONST_INT)
8590
                {
8591
                  unsigned HOST_WIDE_INT value = INTVAL (XEXP (x, 1));
8592
                  for (nbits = 0; value != 0; value &= value - 1)
8593
                    nbits++;
8594
                }
8595
              else if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
8596
                       && GET_MODE (XEXP (x, 1)) == VOIDmode)
8597
                {
8598
                  rtx x1 = XEXP (x, 1);
8599
                  unsigned HOST_WIDE_INT value1 = CONST_DOUBLE_LOW (x1);
8600
                  unsigned HOST_WIDE_INT value2 = CONST_DOUBLE_HIGH (x1);
8601
 
8602
                  for (nbits = 0; value1 != 0; value1 &= value1 - 1)
8603
                    nbits++;
8604
                  for (; value2 != 0; value2 &= value2 - 1)
8605
                    nbits++;
8606
                }
8607
              else
8608
                nbits = 7;
8609
 
8610
              if (nbits < 3)
8611
                nbits = 3;
8612
              bit_cost = (nbits - 3) / sparc_costs->int_mul_bit_factor;
8613
              bit_cost = COSTS_N_INSNS (bit_cost);
8614
            }
8615
 
8616
          if (mode == DImode)
8617
            *total = sparc_costs->int_mulX + bit_cost;
8618
          else
8619
            *total = sparc_costs->int_mul + bit_cost;
8620
        }
8621
      return false;
8622
 
8623
    case ASHIFT:
8624
    case ASHIFTRT:
8625
    case LSHIFTRT:
8626
      *total = COSTS_N_INSNS (1) + sparc_costs->shift_penalty;
8627
      return false;
8628
 
8629
    case DIV:
8630
    case UDIV:
8631
    case MOD:
8632
    case UMOD:
8633
      if (float_mode_p)
8634
        {
8635
          if (mode == DFmode)
8636
            *total = sparc_costs->float_div_df;
8637
          else
8638
            *total = sparc_costs->float_div_sf;
8639
        }
8640
      else
8641
        {
8642
          if (mode == DImode)
8643
            *total = sparc_costs->int_divX;
8644
          else
8645
            *total = sparc_costs->int_div;
8646
        }
8647
      return false;
8648
 
8649
    case NEG:
8650
      if (! float_mode_p)
8651
        {
8652
          *total = COSTS_N_INSNS (1);
8653
          return false;
8654
        }
8655
      /* FALLTHRU */
8656
 
8657
    case ABS:
8658
    case FLOAT:
8659
    case UNSIGNED_FLOAT:
8660
    case FIX:
8661
    case UNSIGNED_FIX:
8662
    case FLOAT_EXTEND:
8663
    case FLOAT_TRUNCATE:
8664
      *total = sparc_costs->float_move;
8665
      return false;
8666
 
8667
    case SQRT:
8668
      if (mode == DFmode)
8669
        *total = sparc_costs->float_sqrt_df;
8670
      else
8671
        *total = sparc_costs->float_sqrt_sf;
8672
      return false;
8673
 
8674
    case COMPARE:
8675
      if (float_mode_p)
8676
        *total = sparc_costs->float_cmp;
8677
      else
8678
        *total = COSTS_N_INSNS (1);
8679
      return false;
8680
 
8681
    case IF_THEN_ELSE:
8682
      if (float_mode_p)
8683
        *total = sparc_costs->float_cmove;
8684
      else
8685
        *total = sparc_costs->int_cmove;
8686
      return false;
8687
 
8688
    case IOR:
8689
      /* Handle the NAND vector patterns.  */
8690
      if (sparc_vector_mode_supported_p (GET_MODE (x))
8691
          && GET_CODE (XEXP (x, 0)) == NOT
8692
          && GET_CODE (XEXP (x, 1)) == NOT)
8693
        {
8694
          *total = COSTS_N_INSNS (1);
8695
          return true;
8696
        }
8697
      else
8698
        return false;
8699
 
8700
    default:
8701
      return false;
8702
    }
8703
}
8704
 
8705
/* Emit the sequence of insns SEQ while preserving the registers REG and REG2.
8706
   This is achieved by means of a manual dynamic stack space allocation in
8707
   the current frame.  We make the assumption that SEQ doesn't contain any
8708
   function calls, with the possible exception of calls to the PIC helper.  */
8709
 
8710
static void
8711
emit_and_preserve (rtx seq, rtx reg, rtx reg2)
8712
{
8713
  /* We must preserve the lowest 16 words for the register save area.  */
8714
  HOST_WIDE_INT offset = 16*UNITS_PER_WORD;
8715
  /* We really need only 2 words of fresh stack space.  */
8716
  HOST_WIDE_INT size = SPARC_STACK_ALIGN (offset + 2*UNITS_PER_WORD);
8717
 
8718
  rtx slot
8719
    = gen_rtx_MEM (word_mode, plus_constant (stack_pointer_rtx,
8720
                                             SPARC_STACK_BIAS + offset));
8721
 
8722
  emit_insn (gen_stack_pointer_dec (GEN_INT (size)));
8723
  emit_insn (gen_rtx_SET (VOIDmode, slot, reg));
8724
  if (reg2)
8725
    emit_insn (gen_rtx_SET (VOIDmode,
8726
                            adjust_address (slot, word_mode, UNITS_PER_WORD),
8727
                            reg2));
8728
  emit_insn (seq);
8729
  if (reg2)
8730
    emit_insn (gen_rtx_SET (VOIDmode,
8731
                            reg2,
8732
                            adjust_address (slot, word_mode, UNITS_PER_WORD)));
8733
  emit_insn (gen_rtx_SET (VOIDmode, reg, slot));
8734
  emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
8735
}
8736
 
8737
/* Output the assembler code for a thunk function.  THUNK_DECL is the
8738
   declaration for the thunk function itself, FUNCTION is the decl for
8739
   the target function.  DELTA is an immediate constant offset to be
8740
   added to THIS.  If VCALL_OFFSET is nonzero, the word at address
8741
   (*THIS + VCALL_OFFSET) should be additionally added to THIS.  */
8742
 
8743
static void
8744
sparc_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8745
                       HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8746
                       tree function)
8747
{
8748
  rtx this_rtx, insn, funexp;
8749
  unsigned int int_arg_first;
8750
 
8751
  reload_completed = 1;
8752
  epilogue_completed = 1;
8753
 
8754
  emit_note (NOTE_INSN_PROLOGUE_END);
8755
 
8756
  if (flag_delayed_branch)
8757
    {
8758
      /* We will emit a regular sibcall below, so we need to instruct
8759
         output_sibcall that we are in a leaf function.  */
8760
      sparc_leaf_function_p = current_function_uses_only_leaf_regs = 1;
8761
 
8762
      /* This will cause final.c to invoke leaf_renumber_regs so we
8763
         must behave as if we were in a not-yet-leafified function.  */
8764
      int_arg_first = SPARC_INCOMING_INT_ARG_FIRST;
8765
    }
8766
  else
8767
    {
8768
      /* We will emit the sibcall manually below, so we will need to
8769
         manually spill non-leaf registers.  */
8770
      sparc_leaf_function_p = current_function_uses_only_leaf_regs = 0;
8771
 
8772
      /* We really are in a leaf function.  */
8773
      int_arg_first = SPARC_OUTGOING_INT_ARG_FIRST;
8774
    }
8775
 
8776
  /* Find the "this" pointer.  Normally in %o0, but in ARCH64 if the function
8777
     returns a structure, the structure return pointer is there instead.  */
8778
  if (TARGET_ARCH64
8779
      && aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8780
    this_rtx = gen_rtx_REG (Pmode, int_arg_first + 1);
8781
  else
8782
    this_rtx = gen_rtx_REG (Pmode, int_arg_first);
8783
 
8784
  /* Add DELTA.  When possible use a plain add, otherwise load it into
8785
     a register first.  */
8786
  if (delta)
8787
    {
8788
      rtx delta_rtx = GEN_INT (delta);
8789
 
8790
      if (! SPARC_SIMM13_P (delta))
8791
        {
8792
          rtx scratch = gen_rtx_REG (Pmode, 1);
8793
          emit_move_insn (scratch, delta_rtx);
8794
          delta_rtx = scratch;
8795
        }
8796
 
8797
      /* THIS_RTX += DELTA.  */
8798
      emit_insn (gen_add2_insn (this_rtx, delta_rtx));
8799
    }
8800
 
8801
  /* Add the word at address (*THIS_RTX + VCALL_OFFSET).  */
8802
  if (vcall_offset)
8803
    {
8804
      rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8805
      rtx scratch = gen_rtx_REG (Pmode, 1);
8806
 
8807
      gcc_assert (vcall_offset < 0);
8808
 
8809
      /* SCRATCH = *THIS_RTX.  */
8810
      emit_move_insn (scratch, gen_rtx_MEM (Pmode, this_rtx));
8811
 
8812
      /* Prepare for adding VCALL_OFFSET.  The difficulty is that we
8813
         may not have any available scratch register at this point.  */
8814
      if (SPARC_SIMM13_P (vcall_offset))
8815
        ;
8816
      /* This is the case if ARCH64 (unless -ffixed-g5 is passed).  */
8817
      else if (! fixed_regs[5]
8818
               /* The below sequence is made up of at least 2 insns,
8819
                  while the default method may need only one.  */
8820
               && vcall_offset < -8192)
8821
        {
8822
          rtx scratch2 = gen_rtx_REG (Pmode, 5);
8823
          emit_move_insn (scratch2, vcall_offset_rtx);
8824
          vcall_offset_rtx = scratch2;
8825
        }
8826
      else
8827
        {
8828
          rtx increment = GEN_INT (-4096);
8829
 
8830
          /* VCALL_OFFSET is a negative number whose typical range can be
8831
             estimated as -32768..0 in 32-bit mode.  In almost all cases
8832
             it is therefore cheaper to emit multiple add insns than
8833
             spilling and loading the constant into a register (at least
8834
             6 insns).  */
8835
          while (! SPARC_SIMM13_P (vcall_offset))
8836
            {
8837
              emit_insn (gen_add2_insn (scratch, increment));
8838
              vcall_offset += 4096;
8839
            }
8840
          vcall_offset_rtx = GEN_INT (vcall_offset); /* cannot be 0 */
8841
        }
8842
 
8843
      /* SCRATCH = *(*THIS_RTX + VCALL_OFFSET).  */
8844
      emit_move_insn (scratch, gen_rtx_MEM (Pmode,
8845
                                            gen_rtx_PLUS (Pmode,
8846
                                                          scratch,
8847
                                                          vcall_offset_rtx)));
8848
 
8849
      /* THIS_RTX += *(*THIS_RTX + VCALL_OFFSET).  */
8850
      emit_insn (gen_add2_insn (this_rtx, scratch));
8851
    }
8852
 
8853
  /* Generate a tail call to the target function.  */
8854
  if (! TREE_USED (function))
8855
    {
8856
      assemble_external (function);
8857
      TREE_USED (function) = 1;
8858
    }
8859
  funexp = XEXP (DECL_RTL (function), 0);
8860
 
8861
  if (flag_delayed_branch)
8862
    {
8863
      funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8864
      insn = emit_call_insn (gen_sibcall (funexp));
8865
      SIBLING_CALL_P (insn) = 1;
8866
    }
8867
  else
8868
    {
8869
      /* The hoops we have to jump through in order to generate a sibcall
8870
         without using delay slots...  */
8871
      rtx spill_reg, spill_reg2, seq, scratch = gen_rtx_REG (Pmode, 1);
8872
 
8873
      if (flag_pic)
8874
        {
8875
          spill_reg = gen_rtx_REG (word_mode, 15);  /* %o7 */
8876
          spill_reg2 = gen_rtx_REG (word_mode, PIC_OFFSET_TABLE_REGNUM);
8877
          start_sequence ();
8878
          /* Delay emitting the PIC helper function because it needs to
8879
             change the section and we are emitting assembly code.  */
8880
          load_pic_register ();  /* clobbers %o7 */
8881
          scratch = legitimize_pic_address (funexp, scratch);
8882
          seq = get_insns ();
8883
          end_sequence ();
8884
          emit_and_preserve (seq, spill_reg, spill_reg2);
8885
        }
8886
      else if (TARGET_ARCH32)
8887
        {
8888
          emit_insn (gen_rtx_SET (VOIDmode,
8889
                                  scratch,
8890
                                  gen_rtx_HIGH (SImode, funexp)));
8891
          emit_insn (gen_rtx_SET (VOIDmode,
8892
                                  scratch,
8893
                                  gen_rtx_LO_SUM (SImode, scratch, funexp)));
8894
        }
8895
      else  /* TARGET_ARCH64 */
8896
        {
8897
          switch (sparc_cmodel)
8898
            {
8899
            case CM_MEDLOW:
8900
            case CM_MEDMID:
8901
              /* The destination can serve as a temporary.  */
8902
              sparc_emit_set_symbolic_const64 (scratch, funexp, scratch);
8903
              break;
8904
 
8905
            case CM_MEDANY:
8906
            case CM_EMBMEDANY:
8907
              /* The destination cannot serve as a temporary.  */
8908
              spill_reg = gen_rtx_REG (DImode, 15);  /* %o7 */
8909
              start_sequence ();
8910
              sparc_emit_set_symbolic_const64 (scratch, funexp, spill_reg);
8911
              seq = get_insns ();
8912
              end_sequence ();
8913
              emit_and_preserve (seq, spill_reg, 0);
8914
              break;
8915
 
8916
            default:
8917
              gcc_unreachable ();
8918
            }
8919
        }
8920
 
8921
      emit_jump_insn (gen_indirect_jump (scratch));
8922
    }
8923
 
8924
  emit_barrier ();
8925
 
8926
  /* Run just enough of rest_of_compilation to get the insns emitted.
8927
     There's not really enough bulk here to make other passes such as
8928
     instruction scheduling worth while.  Note that use_thunk calls
8929
     assemble_start_function and assemble_end_function.  */
8930
  insn = get_insns ();
8931
  insn_locators_alloc ();
8932
  shorten_branches (insn);
8933
  final_start_function (insn, file, 1);
8934
  final (insn, file, 1);
8935
  final_end_function ();
8936
 
8937
  reload_completed = 0;
8938
  epilogue_completed = 0;
8939
}
8940
 
8941
/* Return true if sparc_output_mi_thunk would be able to output the
8942
   assembler code for the thunk function specified by the arguments
8943
   it is passed, and false otherwise.  */
8944
static bool
8945
sparc_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
8946
                           HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
8947
                           HOST_WIDE_INT vcall_offset,
8948
                           const_tree function ATTRIBUTE_UNUSED)
8949
{
8950
  /* Bound the loop used in the default method above.  */
8951
  return (vcall_offset >= -32768 || ! fixed_regs[5]);
8952
}
8953
 
8954
/* How to allocate a 'struct machine_function'.  */
8955
 
8956
static struct machine_function *
8957
sparc_init_machine_status (void)
8958
{
8959
  return GGC_CNEW (struct machine_function);
8960
}
8961
 
8962
/* Locate some local-dynamic symbol still in use by this function
8963
   so that we can print its name in local-dynamic base patterns.  */
8964
 
8965
static const char *
8966
get_some_local_dynamic_name (void)
8967
{
8968
  rtx insn;
8969
 
8970
  if (cfun->machine->some_ld_name)
8971
    return cfun->machine->some_ld_name;
8972
 
8973
  for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
8974
    if (INSN_P (insn)
8975
        && for_each_rtx (&PATTERN (insn), get_some_local_dynamic_name_1, 0))
8976
      return cfun->machine->some_ld_name;
8977
 
8978
  gcc_unreachable ();
8979
}
8980
 
8981
static int
8982
get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
8983
{
8984
  rtx x = *px;
8985
 
8986
  if (x
8987
      && GET_CODE (x) == SYMBOL_REF
8988
      && SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
8989
    {
8990
      cfun->machine->some_ld_name = XSTR (x, 0);
8991
      return 1;
8992
    }
8993
 
8994
  return 0;
8995
}
8996
 
8997
/* Handle the TARGET_DWARF_HANDLE_FRAME_UNSPEC hook.
8998
   This is called from dwarf2out.c to emit call frame instructions
8999
   for frame-related insns containing UNSPECs and UNSPEC_VOLATILEs. */
9000
static void
9001
sparc_dwarf_handle_frame_unspec (const char *label,
9002
                                 rtx pattern ATTRIBUTE_UNUSED,
9003
                                 int index ATTRIBUTE_UNUSED)
9004
{
9005
  gcc_assert (index == UNSPECV_SAVEW);
9006
  dwarf2out_window_save (label);
9007
}
9008
 
9009
/* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
9010
   We need to emit DTP-relative relocations.  */
9011
 
9012
static void
9013
sparc_output_dwarf_dtprel (FILE *file, int size, rtx x)
9014
{
9015
  switch (size)
9016
    {
9017
    case 4:
9018
      fputs ("\t.word\t%r_tls_dtpoff32(", file);
9019
      break;
9020
    case 8:
9021
      fputs ("\t.xword\t%r_tls_dtpoff64(", file);
9022
      break;
9023
    default:
9024
      gcc_unreachable ();
9025
    }
9026
  output_addr_const (file, x);
9027
  fputs (")", file);
9028
}
9029
 
9030
/* Do whatever processing is required at the end of a file.  */
9031
 
9032
static void
9033
sparc_file_end (void)
9034
{
9035
  /* If need to emit the special PIC helper function, do so now.  */
9036
  if (pic_helper_needed)
9037
    {
9038
      unsigned int regno = REGNO (pic_offset_table_rtx);
9039
      const char *pic_name = reg_names[regno];
9040
      char name[32];
9041
#ifdef DWARF2_UNWIND_INFO
9042
      bool do_cfi;
9043
#endif
9044
 
9045
      get_pc_thunk_name (name, regno);
9046
      if (USE_HIDDEN_LINKONCE)
9047
        {
9048
          tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
9049
                                  get_identifier (name),
9050
                                  build_function_type (void_type_node,
9051
                                                       void_list_node));
9052
          DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
9053
                                           NULL_TREE, void_type_node);
9054
          TREE_STATIC (decl) = 1;
9055
          make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
9056
          DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
9057
          DECL_VISIBILITY_SPECIFIED (decl) = 1;
9058
          allocate_struct_function (decl, true);
9059
          current_function_decl = decl;
9060
          init_varasm_status ();
9061
          assemble_start_function (decl, name);
9062
        }
9063
      else
9064
        {
9065
          const int align = floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT);
9066
          switch_to_section (text_section);
9067
          if (align > 0)
9068
            ASM_OUTPUT_ALIGN (asm_out_file, align);
9069
          ASM_OUTPUT_LABEL (asm_out_file, name);
9070
        }
9071
 
9072
#ifdef DWARF2_UNWIND_INFO
9073
      do_cfi = dwarf2out_do_cfi_asm ();
9074
      if (do_cfi)
9075
        fprintf (asm_out_file, "\t.cfi_startproc\n");
9076
#endif
9077
      if (flag_delayed_branch)
9078
        fprintf (asm_out_file, "\tjmp\t%%o7+8\n\t add\t%%o7, %s, %s\n",
9079
                 pic_name, pic_name);
9080
      else
9081
        fprintf (asm_out_file, "\tadd\t%%o7, %s, %s\n\tjmp\t%%o7+8\n\t nop\n",
9082
                 pic_name, pic_name);
9083
#ifdef DWARF2_UNWIND_INFO
9084
      if (do_cfi)
9085
        fprintf (asm_out_file, "\t.cfi_endproc\n");
9086
#endif
9087
    }
9088
 
9089
  if (NEED_INDICATE_EXEC_STACK)
9090
    file_end_indicate_exec_stack ();
9091
}
9092
 
9093
#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING
9094
/* Implement TARGET_MANGLE_TYPE.  */
9095
 
9096
static const char *
9097
sparc_mangle_type (const_tree type)
9098
{
9099
  if (!TARGET_64BIT
9100
      && TYPE_MAIN_VARIANT (type) == long_double_type_node
9101
      && TARGET_LONG_DOUBLE_128)
9102
    return "g";
9103
 
9104
  /* For all other types, use normal C++ mangling.  */
9105
  return NULL;
9106
}
9107
#endif
9108
 
9109
/* Expand code to perform a 8 or 16-bit compare and swap by doing 32-bit
9110
   compare and swap on the word containing the byte or half-word.  */
9111
 
9112
void
9113
sparc_expand_compare_and_swap_12 (rtx result, rtx mem, rtx oldval, rtx newval)
9114
{
9115
  rtx addr1 = force_reg (Pmode, XEXP (mem, 0));
9116
  rtx addr = gen_reg_rtx (Pmode);
9117
  rtx off = gen_reg_rtx (SImode);
9118
  rtx oldv = gen_reg_rtx (SImode);
9119
  rtx newv = gen_reg_rtx (SImode);
9120
  rtx oldvalue = gen_reg_rtx (SImode);
9121
  rtx newvalue = gen_reg_rtx (SImode);
9122
  rtx res = gen_reg_rtx (SImode);
9123
  rtx resv = gen_reg_rtx (SImode);
9124
  rtx memsi, val, mask, end_label, loop_label, cc;
9125
 
9126
  emit_insn (gen_rtx_SET (VOIDmode, addr,
9127
                          gen_rtx_AND (Pmode, addr1, GEN_INT (-4))));
9128
 
9129
  if (Pmode != SImode)
9130
    addr1 = gen_lowpart (SImode, addr1);
9131
  emit_insn (gen_rtx_SET (VOIDmode, off,
9132
                          gen_rtx_AND (SImode, addr1, GEN_INT (3))));
9133
 
9134
  memsi = gen_rtx_MEM (SImode, addr);
9135
  set_mem_alias_set (memsi, ALIAS_SET_MEMORY_BARRIER);
9136
  MEM_VOLATILE_P (memsi) = MEM_VOLATILE_P (mem);
9137
 
9138
  val = force_reg (SImode, memsi);
9139
 
9140
  emit_insn (gen_rtx_SET (VOIDmode, off,
9141
                          gen_rtx_XOR (SImode, off,
9142
                                       GEN_INT (GET_MODE (mem) == QImode
9143
                                                ? 3 : 2))));
9144
 
9145
  emit_insn (gen_rtx_SET (VOIDmode, off,
9146
                          gen_rtx_ASHIFT (SImode, off, GEN_INT (3))));
9147
 
9148
  if (GET_MODE (mem) == QImode)
9149
    mask = force_reg (SImode, GEN_INT (0xff));
9150
  else
9151
    mask = force_reg (SImode, GEN_INT (0xffff));
9152
 
9153
  emit_insn (gen_rtx_SET (VOIDmode, mask,
9154
                          gen_rtx_ASHIFT (SImode, mask, off)));
9155
 
9156
  emit_insn (gen_rtx_SET (VOIDmode, val,
9157
                          gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9158
                                       val)));
9159
 
9160
  oldval = gen_lowpart (SImode, oldval);
9161
  emit_insn (gen_rtx_SET (VOIDmode, oldv,
9162
                          gen_rtx_ASHIFT (SImode, oldval, off)));
9163
 
9164
  newval = gen_lowpart_common (SImode, newval);
9165
  emit_insn (gen_rtx_SET (VOIDmode, newv,
9166
                          gen_rtx_ASHIFT (SImode, newval, off)));
9167
 
9168
  emit_insn (gen_rtx_SET (VOIDmode, oldv,
9169
                          gen_rtx_AND (SImode, oldv, mask)));
9170
 
9171
  emit_insn (gen_rtx_SET (VOIDmode, newv,
9172
                          gen_rtx_AND (SImode, newv, mask)));
9173
 
9174
  end_label = gen_label_rtx ();
9175
  loop_label = gen_label_rtx ();
9176
  emit_label (loop_label);
9177
 
9178
  emit_insn (gen_rtx_SET (VOIDmode, oldvalue,
9179
                          gen_rtx_IOR (SImode, oldv, val)));
9180
 
9181
  emit_insn (gen_rtx_SET (VOIDmode, newvalue,
9182
                          gen_rtx_IOR (SImode, newv, val)));
9183
 
9184
  emit_insn (gen_sync_compare_and_swapsi (res, memsi, oldvalue, newvalue));
9185
 
9186
  emit_cmp_and_jump_insns (res, oldvalue, EQ, NULL, SImode, 0, end_label);
9187
 
9188
  emit_insn (gen_rtx_SET (VOIDmode, resv,
9189
                          gen_rtx_AND (SImode, gen_rtx_NOT (SImode, mask),
9190
                                       res)));
9191
 
9192
  cc = gen_compare_reg_1 (NE, resv, val);
9193
  emit_insn (gen_rtx_SET (VOIDmode, val, resv));
9194
 
9195
  /* Use cbranchcc4 to separate the compare and branch!  */
9196
  emit_jump_insn (gen_cbranchcc4 (gen_rtx_NE (VOIDmode, cc, const0_rtx),
9197
                                  cc, const0_rtx, loop_label));
9198
 
9199
  emit_label (end_label);
9200
 
9201
  emit_insn (gen_rtx_SET (VOIDmode, res,
9202
                          gen_rtx_AND (SImode, res, mask)));
9203
 
9204
  emit_insn (gen_rtx_SET (VOIDmode, res,
9205
                          gen_rtx_LSHIFTRT (SImode, res, off)));
9206
 
9207
  emit_move_insn (result, gen_lowpart (GET_MODE (result), res));
9208
}
9209
 
9210
/* Implement TARGET_FRAME_POINTER_REQUIRED.  */
9211
 
9212
bool
9213
sparc_frame_pointer_required (void)
9214
{
9215
  return !(leaf_function_p () && only_leaf_regs_used ());
9216
}
9217
 
9218
/* The way this is structured, we can't eliminate SFP in favor of SP
9219
   if the frame pointer is required: we want to use the SFP->HFP elimination
9220
   in that case.  But the test in update_eliminables doesn't know we are
9221
   assuming below that we only do the former elimination.  */
9222
 
9223
bool
9224
sparc_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
9225
{
9226
  return (to == HARD_FRAME_POINTER_REGNUM
9227
          || !targetm.frame_pointer_required ());
9228
}
9229
 
9230
#include "gt-sparc.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.